Merge pull request #865 from CREDO23/sur-182-fix-ux-experience-for-composio-google-drive-connector

[Perf] Batch embedding, non-blocking search, chunks index & Google Drive UX fix
This commit is contained in:
Rohan Verma 2026-03-10 12:52:16 -07:00 committed by GitHub
commit 547077e5b9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
17 changed files with 183 additions and 108 deletions

View file

@ -0,0 +1,41 @@
"""104_add_chunks_document_id_index
Revision ID: 104
Revises: 103
Create Date: 2026-03-09
Adds a B-tree index on chunks.document_id to speed up chunk lookups
during hybrid search (both retrievers fetch chunks by document_id
after RRF ranking selects the top documents).
"""
from __future__ import annotations
from collections.abc import Sequence
from alembic import op
revision: str = "104"
down_revision: str | None = "103"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
op.execute(
"""
DO $$
BEGIN
IF NOT EXISTS (
SELECT 1 FROM pg_indexes
WHERE tablename = 'chunks' AND indexname = 'ix_chunks_document_id'
) THEN
CREATE INDEX ix_chunks_document_id ON chunks(document_id);
END IF;
END$$;
"""
)
def downgrade() -> None:
op.execute("DROP INDEX IF EXISTS ix_chunks_document_id")

View file

@ -8,6 +8,7 @@ The documentation is indexed at deployment time from MDX files and stored
in dedicated tables (surfsense_docs_documents, surfsense_docs_chunks).
"""
import asyncio
import json
from langchain_core.tools import tool
@ -100,7 +101,7 @@ async def search_surfsense_docs_async(
Formatted string with relevant documentation content
"""
# Get embedding for the query
query_embedding = embed_text(query)
query_embedding = await asyncio.to_thread(embed_text, query)
# Vector similarity search on chunks, joining with documents
stmt = (

View file

@ -1,5 +1,6 @@
"""Shared (team) memory backend for search-space-scoped AI context."""
import asyncio
import logging
from typing import Any
from uuid import UUID
@ -64,7 +65,7 @@ async def save_shared_memory(
count = await get_shared_memory_count(db_session, search_space_id)
if count >= MAX_MEMORIES_PER_SEARCH_SPACE:
await delete_oldest_shared_memory(db_session, search_space_id)
embedding = embed_text(content)
embedding = await asyncio.to_thread(embed_text, content)
row = SharedMemory(
search_space_id=search_space_id,
created_by_id=_to_uuid(created_by_id),
@ -108,7 +109,7 @@ async def recall_shared_memory(
if category and category in valid_categories:
stmt = stmt.where(SharedMemory.category == MemoryCategory(category))
if query:
query_embedding = embed_text(query)
query_embedding = await asyncio.to_thread(embed_text, query)
stmt = stmt.order_by(
SharedMemory.embedding.op("<=>")(query_embedding)
).limit(top_k)

View file

@ -9,6 +9,7 @@ Features:
- recall_memory: Retrieve relevant memories using semantic search
"""
import asyncio
import logging
from typing import Any
from uuid import UUID
@ -177,8 +178,7 @@ def create_save_memory_tool(
# Delete oldest memory to make room
await delete_oldest_memory(db_session, user_id, search_space_id)
# Generate embedding for the memory
embedding = embed_text(content)
embedding = await asyncio.to_thread(embed_text, content)
# Create new memory using ORM
# The pgvector Vector column type handles embedding conversion automatically
@ -267,8 +267,7 @@ def create_recall_memory_tool(
uuid_user_id = _to_uuid(user_id)
if query:
# Semantic search using embeddings
query_embedding = embed_text(query)
query_embedding = await asyncio.to_thread(embed_text, query)
# Build query with vector similarity
stmt = (

View file

@ -327,6 +327,7 @@ class Config:
EMBEDDING_MODEL,
**embedding_kwargs,
)
is_local_embedding_model = "://" not in (EMBEDDING_MODEL or "")
chunker_instance = RecursiveChunker(
chunk_size=getattr(embedding_model_instance, "max_seq_length", 512)
)

View file

@ -960,7 +960,7 @@ class Chunk(BaseModel, TimestampMixin):
embedding = Column(Vector(config.embedding_model_instance.dimension))
document_id = Column(
Integer, ForeignKey("documents.id", ondelete="CASCADE"), nullable=False
Integer, ForeignKey("documents.id", ondelete="CASCADE"), nullable=False, index=True
)
document = relationship("Document", back_populates="chunks")

View file

@ -1,3 +1,3 @@
from app.utils.document_converters import embed_text
from app.utils.document_converters import embed_text, embed_texts
__all__ = ["embed_text"]
__all__ = ["embed_text", "embed_texts"]

View file

@ -9,7 +9,7 @@ from sqlalchemy.ext.asyncio import AsyncSession
from app.db import Chunk, Document, DocumentStatus
from app.indexing_pipeline.connector_document import ConnectorDocument
from app.indexing_pipeline.document_chunker import chunk_text
from app.indexing_pipeline.document_embedder import embed_text
from app.indexing_pipeline.document_embedder import embed_texts
from app.indexing_pipeline.document_hashing import (
compute_content_hash,
compute_unique_identifier_hash,
@ -195,25 +195,23 @@ class IndexingPipelineService:
else:
content = connector_doc.source_markdown
t_step = time.perf_counter()
embedding = embed_text(content)
perf.debug(
"[indexing] embed_text (summary) doc=%d in %.3fs",
document.id,
time.perf_counter() - t_step,
)
await self.session.execute(
delete(Chunk).where(Chunk.document_id == document.id)
)
t_step = time.perf_counter()
chunk_texts = chunk_text(
connector_doc.source_markdown,
use_code_chunker=connector_doc.should_use_code_chunker,
)
texts_to_embed = [content, *chunk_texts]
embeddings = embed_texts(texts_to_embed)
summary_embedding, *chunk_embeddings = embeddings
chunks = [
Chunk(content=text, embedding=embed_text(text))
for text in chunk_text(
connector_doc.source_markdown,
use_code_chunker=connector_doc.should_use_code_chunker,
)
Chunk(content=text, embedding=emb)
for text, emb in zip(chunk_texts, chunk_embeddings)
]
perf.info(
"[indexing] chunk+embed doc=%d chunks=%d in %.3fs",
@ -223,7 +221,7 @@ class IndexingPipelineService:
)
document.content = content
document.embedding = embedding
document.embedding = summary_embedding
attach_chunks_to_document(document, chunks)
document.updated_at = datetime.now(UTC)
document.status = DocumentStatus.ready()

View file

@ -264,7 +264,9 @@ class ConnectorService:
# Reuse caller-provided embedding or compute once for both retrievers.
if query_embedding is None:
t_embed = time.perf_counter()
query_embedding = config.embedding_model_instance.embed(query_text)
query_embedding = await asyncio.to_thread(
config.embedding_model_instance.embed, query_text
)
perf.info(
"[connector_svc] _combined_rrf embedding in %.3fs type=%s",
time.perf_counter() - t_embed,

View file

@ -55,6 +55,23 @@ def embed_text(text: str) -> np.ndarray:
return config.embedding_model_instance.embed(truncate_for_embedding(text))
def embed_texts(texts: list[str]) -> list[np.ndarray]:
"""Batch-embed multiple texts in a single call.
Each text is truncated to fit the model's context window before embedding.
For API-based models (``://`` in the model string) this uses
``embed_batch`` to collapse many network round-trips into one.
For local models (SentenceTransformers) it falls back to sequential
``embed`` calls to avoid padding overhead.
"""
if not texts:
return []
truncated = [truncate_for_embedding(t) for t in texts]
if config.is_local_embedding_model:
return [config.embedding_model_instance.embed(t) for t in truncated]
return config.embedding_model_instance.embed_batch(truncated)
def get_model_context_window(model_name: str) -> int:
"""Get the total context window size for a model (input + output tokens)."""
try:
@ -209,12 +226,11 @@ async def create_document_chunks(content: str) -> list[Chunk]:
Returns:
List of Chunk objects with embeddings
"""
chunk_texts = [c.text for c in config.chunker_instance.chunk(content)]
chunk_embeddings = embed_texts(chunk_texts)
return [
Chunk(
content=chunk.text,
embedding=embed_text(chunk.text),
)
for chunk in config.chunker_instance.chunk(content)
Chunk(content=text, embedding=emb)
for text, emb in zip(chunk_texts, chunk_embeddings)
]

View file

@ -129,10 +129,12 @@ def patched_summarize_raises(monkeypatch) -> AsyncMock:
@pytest.fixture
def patched_embed_text(monkeypatch) -> MagicMock:
mock = MagicMock(return_value=[0.1] * _EMBEDDING_DIM)
def patched_embed_texts(monkeypatch) -> MagicMock:
mock = MagicMock(
side_effect=lambda texts: [[0.1] * _EMBEDDING_DIM for _ in texts]
)
monkeypatch.setattr(
"app.indexing_pipeline.indexing_pipeline_service.embed_text",
"app.indexing_pipeline.indexing_pipeline_service.embed_texts",
mock,
)
return mock

View file

@ -265,8 +265,8 @@ def _mock_external_apis(monkeypatch):
AsyncMock(return_value="Mocked summary."),
)
monkeypatch.setattr(
"app.indexing_pipeline.indexing_pipeline_service.embed_text",
MagicMock(return_value=[0.1] * _EMBEDDING_DIM),
"app.indexing_pipeline.indexing_pipeline_service.embed_texts",
MagicMock(side_effect=lambda texts: [[0.1] * _EMBEDDING_DIM for _ in texts]),
)
monkeypatch.setattr(
"app.indexing_pipeline.indexing_pipeline_service.chunk_text",

View file

@ -8,7 +8,7 @@ pytestmark = pytest.mark.integration
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_sets_status_ready(db_session, db_search_space, db_user, mocker):
"""Document status is READY after successful indexing."""
@ -31,7 +31,7 @@ async def test_sets_status_ready(db_session, db_search_space, db_user, mocker):
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_content_is_summary(db_session, db_search_space, db_user, mocker):
"""Document content is set to the LLM-generated summary."""
@ -55,7 +55,7 @@ async def test_content_is_summary(db_session, db_search_space, db_user, mocker):
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_chunks_written_to_db(db_session, db_search_space, db_user, mocker):
"""Chunks derived from the source markdown are persisted in the DB."""
@ -84,7 +84,7 @@ async def test_chunks_written_to_db(db_session, db_search_space, db_user, mocker
@pytest.mark.usefixtures(
"patched_summarize_raises", "patched_embed_text", "patched_chunk_text"
"patched_summarize_raises", "patched_embed_texts", "patched_chunk_text"
)
async def test_raises_on_indexing_failure(db_session, db_search_space, db_user, mocker):
"""RuntimeError is raised when the indexing step fails so the caller can fire a failure notification."""
@ -107,7 +107,7 @@ async def test_raises_on_indexing_failure(db_session, db_search_space, db_user,
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_reindex_updates_content(db_session, db_search_space, db_user, mocker):
"""Document content is updated to the new summary after reindexing."""
@ -136,7 +136,7 @@ async def test_reindex_updates_content(db_session, db_search_space, db_user, moc
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_reindex_updates_content_hash(
db_session, db_search_space, db_user, mocker
@ -168,7 +168,7 @@ async def test_reindex_updates_content_hash(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_reindex_sets_status_ready(db_session, db_search_space, db_user, mocker):
"""Document status is READY after successful reindexing."""
@ -196,7 +196,7 @@ async def test_reindex_sets_status_ready(db_session, db_search_space, db_user, m
assert DocumentStatus.is_state(document.status, DocumentStatus.READY)
@pytest.mark.usefixtures("patched_summarize", "patched_embed_text")
@pytest.mark.usefixtures("patched_summarize", "patched_embed_texts")
async def test_reindex_replaces_chunks(db_session, db_search_space, db_user, mocker):
"""Reindexing replaces old chunks with new content rather than appending."""
mocker.patch(
@ -235,7 +235,7 @@ async def test_reindex_replaces_chunks(db_session, db_search_space, db_user, moc
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_reindex_clears_reindexing_flag(
db_session, db_search_space, db_user, mocker
@ -266,7 +266,7 @@ async def test_reindex_clears_reindexing_flag(
assert document.content_needs_reindexing is False
@pytest.mark.usefixtures("patched_embed_text", "patched_chunk_text")
@pytest.mark.usefixtures("patched_embed_texts", "patched_chunk_text")
async def test_reindex_raises_on_failure(db_session, db_search_space, db_user, mocker):
"""RuntimeError is raised when reindexing fails so the caller can handle it."""
mocker.patch(

View file

@ -11,7 +11,7 @@ pytestmark = pytest.mark.integration
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_sets_status_ready(
db_session,
@ -38,7 +38,7 @@ async def test_sets_status_ready(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_content_is_summary_when_should_summarize_true(
db_session,
@ -65,7 +65,7 @@ async def test_content_is_summary_when_should_summarize_true(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_content_is_source_markdown_when_should_summarize_false(
db_session,
@ -95,7 +95,7 @@ async def test_content_is_source_markdown_when_should_summarize_false(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_chunks_written_to_db(
db_session,
@ -123,7 +123,7 @@ async def test_chunks_written_to_db(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_embedding_written_to_db(
db_session,
@ -151,7 +151,7 @@ async def test_embedding_written_to_db(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_updated_at_advances_after_indexing(
db_session,
@ -183,7 +183,7 @@ async def test_updated_at_advances_after_indexing(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_no_llm_falls_back_to_source_markdown(
db_session,
@ -214,7 +214,7 @@ async def test_no_llm_falls_back_to_source_markdown(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_fallback_summary_used_when_llm_unavailable(
db_session,
@ -245,7 +245,7 @@ async def test_fallback_summary_used_when_llm_unavailable(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_reindex_replaces_old_chunks(
db_session,
@ -282,7 +282,7 @@ async def test_reindex_replaces_old_chunks(
@pytest.mark.usefixtures(
"patched_summarize_raises", "patched_embed_text", "patched_chunk_text"
"patched_summarize_raises", "patched_embed_texts", "patched_chunk_text"
)
async def test_llm_error_sets_status_failed(
db_session,
@ -309,7 +309,7 @@ async def test_llm_error_sets_status_failed(
@pytest.mark.usefixtures(
"patched_summarize_raises", "patched_embed_text", "patched_chunk_text"
"patched_summarize_raises", "patched_embed_texts", "patched_chunk_text"
)
async def test_llm_error_leaves_no_partial_data(
db_session,

View file

@ -33,7 +33,7 @@ async def test_new_document_is_persisted_with_pending_status(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_unchanged_ready_document_is_skipped(
db_session,
@ -56,7 +56,7 @@ async def test_unchanged_ready_document_is_skipped(
@pytest.mark.usefixtures(
"patched_summarize", "patched_embed_text", "patched_chunk_text"
"patched_summarize", "patched_embed_texts", "patched_chunk_text"
)
async def test_title_only_change_updates_title_in_db(
db_session,
@ -339,7 +339,7 @@ async def test_same_content_from_different_source_is_skipped(
@pytest.mark.usefixtures(
"patched_summarize_raises", "patched_embed_text", "patched_chunk_text"
"patched_summarize_raises", "patched_embed_texts", "patched_chunk_text"
)
async def test_failed_document_with_unchanged_content_is_requeued(
db_session,

View file

@ -1,6 +1,8 @@
"use client";
import {
ChevronDown,
ChevronRight,
File,
FileSpreadsheet,
FileText,
@ -12,7 +14,6 @@ import {
import type { FC } from "react";
import { useEffect, useState } from "react";
import { ComposioDriveFolderTree } from "@/components/connectors/composio-drive-folder-tree";
import { Button } from "@/components/ui/button";
import { Label } from "@/components/ui/label";
import {
Select,
@ -108,9 +109,11 @@ export const ComposioDriveConfig: FC<ComposioDriveConfigProps> = ({
const [selectedFolders, setSelectedFolders] = useState<SelectedFolder[]>(existingFolders);
const [selectedFiles, setSelectedFiles] = useState<SelectedFolder[]>(existingFiles);
const [showFolderSelector, setShowFolderSelector] = useState(false);
const [indexingOptions, setIndexingOptions] = useState<IndexingOptions>(existingIndexingOptions);
const [isEditMode] = useState(() => existingFolders.length > 0 || existingFiles.length > 0);
const [isFolderTreeOpen, setIsFolderTreeOpen] = useState(!isEditMode);
// Update selected folders and files when connector config changes
useEffect(() => {
const folders = (connector.config?.selected_folders as SelectedFolder[] | undefined) || [];
@ -241,8 +244,21 @@ export const ComposioDriveConfig: FC<ComposioDriveConfigProps> = ({
</div>
)}
{showFolderSelector ? (
<div className="space-y-2 sm:space-y-3">
{isEditMode ? (
<div className="space-y-2">
<button
type="button"
onClick={() => setIsFolderTreeOpen(!isFolderTreeOpen)}
className="flex items-center gap-2 text-xs sm:text-sm text-muted-foreground hover:text-foreground transition-colors w-fit"
>
{isFolderTreeOpen ? (
<ChevronDown className="size-4" />
) : (
<ChevronRight className="size-4" />
)}
Change Selection
</button>
{isFolderTreeOpen && (
<ComposioDriveFolderTree
connectorId={connector.id}
selectedFolders={selectedFolders}
@ -250,26 +266,17 @@ export const ComposioDriveConfig: FC<ComposioDriveConfigProps> = ({
selectedFiles={selectedFiles}
onSelectFiles={handleSelectFiles}
/>
<Button
type="button"
variant="outline"
size="sm"
onClick={() => setShowFolderSelector(false)}
className="bg-slate-400/5 dark:bg-white/5 border-slate-400/20 hover:bg-slate-400/10 dark:hover:bg-white/10 text-xs sm:text-sm h-8 sm:h-9"
>
Done Selecting
</Button>
</div>
) : (
<Button
type="button"
variant="outline"
onClick={() => setShowFolderSelector(true)}
className="bg-slate-400/5 dark:bg-white/5 border-slate-400/20 hover:bg-slate-400/10 dark:hover:bg-white/10 text-xs sm:text-sm h-8 sm:h-9"
>
{totalSelected > 0 ? "Change Selection" : "Select Folders & Files"}
</Button>
)}
)}
</div>
) : (
<ComposioDriveFolderTree
connectorId={connector.id}
selectedFolders={selectedFolders}
onSelectFolders={handleSelectFolders}
selectedFiles={selectedFiles}
onSelectFiles={handleSelectFiles}
/>
)}
</div>
{/* Indexing Options */}

View file

@ -1,6 +1,8 @@
"use client";
import {
ChevronDown,
ChevronRight,
File,
FileSpreadsheet,
FileText,
@ -12,7 +14,6 @@ import {
import type { FC } from "react";
import { useEffect, useState } from "react";
import { GoogleDriveFolderTree } from "@/components/connectors/google-drive-folder-tree";
import { Button } from "@/components/ui/button";
import { Label } from "@/components/ui/label";
import {
Select,
@ -97,9 +98,11 @@ export const GoogleDriveConfig: FC<ConnectorConfigProps> = ({ connector, onConfi
const [selectedFolders, setSelectedFolders] = useState<SelectedFolder[]>(existingFolders);
const [selectedFiles, setSelectedFiles] = useState<SelectedFolder[]>(existingFiles);
const [showFolderSelector, setShowFolderSelector] = useState(false);
const [indexingOptions, setIndexingOptions] = useState<IndexingOptions>(existingIndexingOptions);
const [isEditMode] = useState(() => existingFolders.length > 0 || existingFiles.length > 0);
const [isFolderTreeOpen, setIsFolderTreeOpen] = useState(!isEditMode);
// Update selected folders and files when connector config changes
useEffect(() => {
const folders = (connector.config?.selected_folders as SelectedFolder[] | undefined) || [];
@ -225,8 +228,21 @@ export const GoogleDriveConfig: FC<ConnectorConfigProps> = ({ connector, onConfi
</div>
)}
{showFolderSelector ? (
<div className="space-y-2 sm:space-y-3">
{isEditMode ? (
<div className="space-y-2">
<button
type="button"
onClick={() => setIsFolderTreeOpen(!isFolderTreeOpen)}
className="flex items-center gap-2 text-xs sm:text-sm text-muted-foreground hover:text-foreground transition-colors w-fit"
>
{isFolderTreeOpen ? (
<ChevronDown className="size-4" />
) : (
<ChevronRight className="size-4" />
)}
Change Selection
</button>
{isFolderTreeOpen && (
<GoogleDriveFolderTree
connectorId={connector.id}
selectedFolders={selectedFolders}
@ -234,26 +250,17 @@ export const GoogleDriveConfig: FC<ConnectorConfigProps> = ({ connector, onConfi
selectedFiles={selectedFiles}
onSelectFiles={handleSelectFiles}
/>
<Button
type="button"
variant="outline"
size="sm"
onClick={() => setShowFolderSelector(false)}
className="bg-slate-400/5 dark:bg-white/5 border-slate-400/20 hover:bg-slate-400/10 dark:hover:bg-white/10 text-xs sm:text-sm h-8 sm:h-9"
>
Done Selecting
</Button>
</div>
) : (
<Button
type="button"
variant="outline"
onClick={() => setShowFolderSelector(true)}
className="bg-slate-400/5 dark:bg-white/5 border-slate-400/20 hover:bg-slate-400/10 dark:hover:bg-white/10 text-xs sm:text-sm h-8 sm:h-9"
>
{totalSelected > 0 ? "Change Selection" : "Select Folders & Files"}
</Button>
)}
)}
</div>
) : (
<GoogleDriveFolderTree
connectorId={connector.id}
selectedFolders={selectedFolders}
onSelectFolders={handleSelectFolders}
selectedFiles={selectedFiles}
onSelectFiles={handleSelectFiles}
/>
)}
</div>
{/* Indexing Options */}