feat: init video presentation agent

This commit is contained in:
DESKTOP-RTLN3BA\$punk 2026-03-21 22:13:41 -07:00
parent 40d949b7d5
commit b28f135a96
37 changed files with 3567 additions and 24 deletions

3
.gitignore vendored
View file

@ -5,5 +5,4 @@ node_modules/
.ruff_cache/ .ruff_cache/
.venv .venv
.pnpm-store .pnpm-store
.DS_Store .DS_Store
RemotionTets/

35
.vscode/launch.json vendored
View file

@ -22,7 +22,11 @@
"console": "integratedTerminal", "console": "integratedTerminal",
"justMyCode": false, "justMyCode": false,
"cwd": "${workspaceFolder}/surfsense_backend", "cwd": "${workspaceFolder}/surfsense_backend",
"python": "${command:python.interpreterPath}" "python": "uv",
"pythonArgs": [
"run",
"python"
]
}, },
{ {
"name": "Backend: FastAPI (No Reload)", "name": "Backend: FastAPI (No Reload)",
@ -32,7 +36,11 @@
"console": "integratedTerminal", "console": "integratedTerminal",
"justMyCode": false, "justMyCode": false,
"cwd": "${workspaceFolder}/surfsense_backend", "cwd": "${workspaceFolder}/surfsense_backend",
"python": "${command:python.interpreterPath}" "python": "uv",
"pythonArgs": [
"run",
"python"
]
}, },
{ {
"name": "Backend: FastAPI (main.py)", "name": "Backend: FastAPI (main.py)",
@ -41,14 +49,19 @@
"program": "${workspaceFolder}/surfsense_backend/main.py", "program": "${workspaceFolder}/surfsense_backend/main.py",
"console": "integratedTerminal", "console": "integratedTerminal",
"justMyCode": false, "justMyCode": false,
"cwd": "${workspaceFolder}/surfsense_backend" "cwd": "${workspaceFolder}/surfsense_backend",
"python": "uv",
"pythonArgs": [
"run",
"python"
]
}, },
{ {
"name": "Frontend: Next.js", "name": "Frontend: Next.js",
"type": "node", "type": "node",
"request": "launch", "request": "launch",
"cwd": "${workspaceFolder}/surfsense_web", "cwd": "${workspaceFolder}/surfsense_web",
"runtimeExecutable": "npm", "runtimeExecutable": "pnpm",
"runtimeArgs": ["run", "dev"], "runtimeArgs": ["run", "dev"],
"console": "integratedTerminal", "console": "integratedTerminal",
"serverReadyAction": { "serverReadyAction": {
@ -62,7 +75,7 @@
"type": "node", "type": "node",
"request": "launch", "request": "launch",
"cwd": "${workspaceFolder}/surfsense_web", "cwd": "${workspaceFolder}/surfsense_web",
"runtimeExecutable": "npm", "runtimeExecutable": "pnpm",
"runtimeArgs": ["run", "debug:server"], "runtimeArgs": ["run", "debug:server"],
"console": "integratedTerminal", "console": "integratedTerminal",
"serverReadyAction": { "serverReadyAction": {
@ -87,7 +100,11 @@
"console": "integratedTerminal", "console": "integratedTerminal",
"justMyCode": false, "justMyCode": false,
"cwd": "${workspaceFolder}/surfsense_backend", "cwd": "${workspaceFolder}/surfsense_backend",
"python": "${command:python.interpreterPath}" "python": "uv",
"pythonArgs": [
"run",
"python"
]
}, },
{ {
"name": "Celery: Beat Scheduler", "name": "Celery: Beat Scheduler",
@ -103,7 +120,11 @@
"console": "integratedTerminal", "console": "integratedTerminal",
"justMyCode": false, "justMyCode": false,
"cwd": "${workspaceFolder}/surfsense_backend", "cwd": "${workspaceFolder}/surfsense_backend",
"python": "${command:python.interpreterPath}" "python": "uv",
"pythonArgs": [
"run",
"python"
]
} }
], ],
"compounds": [ "compounds": [

View file

@ -6,6 +6,7 @@ __pycache__/
.flashrank_cache .flashrank_cache
surf_new_backend.egg-info/ surf_new_backend.egg-info/
podcasts/ podcasts/
video_presentation_audio/
sandbox_files/ sandbox_files/
temp_audio/ temp_audio/
celerybeat-schedule* celerybeat-schedule*

View file

@ -0,0 +1,85 @@
"""Add video_presentations table and video_presentation_status enum
Revision ID: 107
Revises: 106
"""
from collections.abc import Sequence
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
from alembic import op
revision: str = "107"
down_revision: str | None = "106"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
video_presentation_status_enum = sa.Enum(
"pending",
"generating",
"ready",
"failed",
name="video_presentation_status",
)
def upgrade() -> None:
video_presentation_status_enum.create(op.get_bind(), checkfirst=True)
op.create_table(
"video_presentations",
sa.Column("id", sa.Integer(), autoincrement=True, nullable=False),
sa.Column("title", sa.String(length=500), nullable=False),
sa.Column("slides", JSONB(), nullable=True),
sa.Column("scene_codes", JSONB(), nullable=True),
sa.Column(
"status",
video_presentation_status_enum,
server_default="ready",
nullable=False,
),
sa.Column("search_space_id", sa.Integer(), nullable=False),
sa.Column("thread_id", sa.Integer(), nullable=True),
sa.Column(
"created_at",
sa.TIMESTAMP(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.ForeignKeyConstraint(
["search_space_id"],
["searchspaces.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["thread_id"],
["new_chat_threads.id"],
ondelete="SET NULL",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"ix_video_presentations_status",
"video_presentations",
["status"],
)
op.create_index(
"ix_video_presentations_thread_id",
"video_presentations",
["thread_id"],
)
op.create_index(
"ix_video_presentations_created_at",
"video_presentations",
["created_at"],
)
def downgrade() -> None:
op.drop_index("ix_video_presentations_created_at", table_name="video_presentations")
op.drop_index("ix_video_presentations_thread_id", table_name="video_presentations")
op.drop_index("ix_video_presentations_status", table_name="video_presentations")
op.drop_table("video_presentations")
video_presentation_status_enum.drop(op.get_bind(), checkfirst=True)

View file

@ -132,6 +132,17 @@ _TOOL_INSTRUCTIONS["generate_podcast"] = """
- After calling this tool, inform the user that podcast generation has started and they will see the player when it's ready (takes 3-5 minutes). - After calling this tool, inform the user that podcast generation has started and they will see the player when it's ready (takes 3-5 minutes).
""" """
_TOOL_INSTRUCTIONS["generate_video_presentation"] = """
- generate_video_presentation: Generate a video presentation from provided content.
- Use this when the user asks to create a video, presentation, slides, or slide deck.
- Trigger phrases: "give me a presentation", "create slides", "generate a video", "make a slide deck", "turn this into a presentation"
- Args:
- source_content: The text content to turn into a presentation. The more detailed, the better.
- video_title: Optional title (default: "SurfSense Presentation")
- user_prompt: Optional style instructions (e.g., "Make it technical and detailed")
- After calling this tool, inform the user that generation has started and they will see the presentation when it's ready.
"""
_TOOL_INSTRUCTIONS["generate_report"] = """ _TOOL_INSTRUCTIONS["generate_report"] = """
- generate_report: Generate or revise a structured Markdown report artifact. - generate_report: Generate or revise a structured Markdown report artifact.
- WHEN TO CALL THIS TOOL the message must contain a creation or modification VERB directed at producing a deliverable: - WHEN TO CALL THIS TOOL the message must contain a creation or modification VERB directed at producing a deliverable:
@ -438,6 +449,16 @@ _TOOL_EXAMPLES["generate_podcast"] = """
- Then: `generate_podcast(source_content="Key insights about quantum computing from the knowledge base:\\n\\n[Comprehensive summary of all relevant search results with key facts, concepts, and findings]", podcast_title="Quantum Computing Explained")` - Then: `generate_podcast(source_content="Key insights about quantum computing from the knowledge base:\\n\\n[Comprehensive summary of all relevant search results with key facts, concepts, and findings]", podcast_title="Quantum Computing Explained")`
""" """
_TOOL_EXAMPLES["generate_video_presentation"] = """
- User: "Give me a presentation about AI trends based on what we discussed"
- First search for relevant content, then call: `generate_video_presentation(source_content="Based on our conversation and search results: [detailed summary of chat + search findings]", video_title="AI Trends Presentation")`
- User: "Create slides summarizing this conversation"
- Call: `generate_video_presentation(source_content="Complete conversation summary:\\n\\nUser asked about [topic 1]:\\n[Your detailed response]\\n\\nUser then asked about [topic 2]:\\n[Your detailed response]\\n\\n[Continue for all exchanges in the conversation]", video_title="Conversation Summary")`
- User: "Make a video presentation about quantum computing"
- First search: `search_knowledge_base(query="quantum computing")`
- Then: `generate_video_presentation(source_content="Key insights about quantum computing from the knowledge base:\\n\\n[Comprehensive summary of all relevant search results with key facts, concepts, and findings]", video_title="Quantum Computing Explained")`
"""
_TOOL_EXAMPLES["generate_report"] = """ _TOOL_EXAMPLES["generate_report"] = """
- User: "Generate a report about AI trends" - User: "Generate a report about AI trends"
- Call: `generate_report(topic="AI Trends Report", source_strategy="kb_search", search_queries=["AI trends recent developments", "artificial intelligence industry trends", "AI market growth and predictions"], report_style="detailed")` - Call: `generate_report(topic="AI Trends Report", source_strategy="kb_search", search_queries=["AI trends recent developments", "artificial intelligence industry trends", "AI market growth and predictions"], report_style="detailed")`
@ -499,6 +520,7 @@ _ALL_TOOL_NAMES_ORDERED = [
"search_knowledge_base", "search_knowledge_base",
"web_search", "web_search",
"generate_podcast", "generate_podcast",
"generate_video_presentation",
"generate_report", "generate_report",
"link_preview", "link_preview",
"display_image", "display_image",

View file

@ -8,6 +8,7 @@ Available tools:
- search_knowledge_base: Search the user's personal knowledge base - search_knowledge_base: Search the user's personal knowledge base
- search_surfsense_docs: Search Surfsense documentation for usage help - search_surfsense_docs: Search Surfsense documentation for usage help
- generate_podcast: Generate audio podcasts from content - generate_podcast: Generate audio podcasts from content
- generate_video_presentation: Generate video presentations with slides and narration
- generate_image: Generate images from text descriptions using AI models - generate_image: Generate images from text descriptions using AI models
- link_preview: Fetch rich previews for URLs - link_preview: Fetch rich previews for URLs
- display_image: Display images in chat - display_image: Display images in chat
@ -39,6 +40,7 @@ from .registry import (
from .scrape_webpage import create_scrape_webpage_tool from .scrape_webpage import create_scrape_webpage_tool
from .search_surfsense_docs import create_search_surfsense_docs_tool from .search_surfsense_docs import create_search_surfsense_docs_tool
from .user_memory import create_recall_memory_tool, create_save_memory_tool from .user_memory import create_recall_memory_tool, create_save_memory_tool
from .video_presentation import create_generate_video_presentation_tool
__all__ = [ __all__ = [
# Registry # Registry
@ -51,6 +53,7 @@ __all__ = [
"create_display_image_tool", "create_display_image_tool",
"create_generate_image_tool", "create_generate_image_tool",
"create_generate_podcast_tool", "create_generate_podcast_tool",
"create_generate_video_presentation_tool",
"create_link_preview_tool", "create_link_preview_tool",
"create_recall_memory_tool", "create_recall_memory_tool",
"create_save_memory_tool", "create_save_memory_tool",

View file

@ -73,6 +73,7 @@ from .shared_memory import (
create_save_shared_memory_tool, create_save_shared_memory_tool,
) )
from .user_memory import create_recall_memory_tool, create_save_memory_tool from .user_memory import create_recall_memory_tool, create_save_memory_tool
from .video_presentation import create_generate_video_presentation_tool
from .web_search import create_web_search_tool from .web_search import create_web_search_tool
# ============================================================================= # =============================================================================
@ -136,6 +137,17 @@ BUILTIN_TOOLS: list[ToolDefinition] = [
), ),
requires=["search_space_id", "db_session", "thread_id"], requires=["search_space_id", "db_session", "thread_id"],
), ),
# Video presentation generation tool
ToolDefinition(
name="generate_video_presentation",
description="Generate a video presentation with slides and narration from provided content",
factory=lambda deps: create_generate_video_presentation_tool(
search_space_id=deps["search_space_id"],
db_session=deps["db_session"],
thread_id=deps["thread_id"],
),
requires=["search_space_id", "db_session", "thread_id"],
),
# Report generation tool (inline, short-lived sessions for DB ops) # Report generation tool (inline, short-lived sessions for DB ops)
# Supports internal KB search via source_strategy so the agent doesn't # Supports internal KB search via source_strategy so the agent doesn't
# need to call search_knowledge_base separately before generating. # need to call search_knowledge_base separately before generating.

View file

@ -0,0 +1,171 @@
"""
Video presentation generation tool for the SurfSense agent.
This module provides a factory function for creating the generate_video_presentation
tool that submits a Celery task for background video presentation generation.
The frontend polls for completion and auto-updates when the presentation is ready.
Duplicate request prevention:
- Only one video presentation can be generated at a time per search space
- Uses Redis to track active video presentation tasks
- Validates the Redis marker against actual DB status to avoid stale locks
"""
from typing import Any
import redis
from langchain_core.tools import tool
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from app.config import config
from app.db import VideoPresentation, VideoPresentationStatus
REDIS_URL = config.REDIS_APP_URL
_redis_client: redis.Redis | None = None
def get_redis_client() -> redis.Redis:
"""Get or create Redis client for video presentation task tracking."""
global _redis_client
if _redis_client is None:
_redis_client = redis.from_url(REDIS_URL, decode_responses=True)
return _redis_client
def _redis_key(search_space_id: int) -> str:
return f"video_presentation:generating:{search_space_id}"
def get_generating_video_presentation_id(search_space_id: int) -> int | None:
"""Get the video presentation ID currently being generated for this search space."""
try:
client = get_redis_client()
value = client.get(_redis_key(search_space_id))
return int(value) if value else None
except Exception:
return None
def clear_generating_video_presentation(search_space_id: int) -> None:
"""Clear the generating marker (used when we detect a stale lock)."""
try:
client = get_redis_client()
client.delete(_redis_key(search_space_id))
except Exception:
pass
def set_generating_video_presentation(
search_space_id: int, video_presentation_id: int
) -> None:
"""Mark a video presentation as currently generating for this search space."""
try:
client = get_redis_client()
client.setex(_redis_key(search_space_id), 1800, str(video_presentation_id))
except Exception as e:
print(
f"[generate_video_presentation] Warning: Could not set generating video presentation in Redis: {e}"
)
def create_generate_video_presentation_tool(
search_space_id: int,
db_session: AsyncSession,
thread_id: int | None = None,
):
"""
Factory function to create the generate_video_presentation tool with injected dependencies.
Pre-creates video presentation record with pending status so the ID is available
immediately for frontend polling.
"""
@tool
async def generate_video_presentation(
source_content: str,
video_title: str = "SurfSense Presentation",
user_prompt: str | None = None,
) -> dict[str, Any]:
"""Generate a video presentation from the provided content.
Use this tool when the user asks to create a video, presentation, slides, or slide deck.
Args:
source_content: The text content to turn into a presentation.
video_title: Title for the presentation (default: "SurfSense Presentation")
user_prompt: Optional style/tone instructions.
"""
try:
generating_id = get_generating_video_presentation_id(search_space_id)
if generating_id:
result = await db_session.execute(
select(VideoPresentation).filter(
VideoPresentation.id == generating_id
)
)
existing = result.scalars().first()
if existing and existing.status == VideoPresentationStatus.GENERATING:
print(
f"[generate_video_presentation] Blocked duplicate — "
f"presentation {generating_id} is actively generating"
)
return {
"status": VideoPresentationStatus.GENERATING.value,
"video_presentation_id": generating_id,
"title": video_title,
"message": "A video presentation is already being generated. Please wait for it to complete.",
}
print(
f"[generate_video_presentation] Stale Redis lock for presentation {generating_id} "
f"(status={existing.status if existing else 'not found'}). Clearing and proceeding."
)
clear_generating_video_presentation(search_space_id)
video_pres = VideoPresentation(
title=video_title,
status=VideoPresentationStatus.PENDING,
search_space_id=search_space_id,
thread_id=thread_id,
)
db_session.add(video_pres)
await db_session.commit()
await db_session.refresh(video_pres)
from app.tasks.celery_tasks.video_presentation_tasks import (
generate_video_presentation_task,
)
task = generate_video_presentation_task.delay(
video_presentation_id=video_pres.id,
source_content=source_content,
search_space_id=search_space_id,
user_prompt=user_prompt,
)
set_generating_video_presentation(search_space_id, video_pres.id)
print(
f"[generate_video_presentation] Created video presentation {video_pres.id}, task: {task.id}"
)
return {
"status": VideoPresentationStatus.PENDING.value,
"video_presentation_id": video_pres.id,
"title": video_title,
"message": "Video presentation generation started. This may take a few minutes.",
}
except Exception as e:
error_message = str(e)
print(f"[generate_video_presentation] Error: {error_message}")
return {
"status": VideoPresentationStatus.FAILED.value,
"error": error_message,
"title": video_title,
"video_presentation_id": None,
}
return generate_video_presentation

View file

@ -0,0 +1,10 @@
"""Video Presentation LangGraph Agent.
This module defines a graph for generating video presentations
from source content, similar to the podcaster agent but producing
slide-based video presentations with TTS narration.
"""
from .graph import graph
__all__ = ["graph"]

View file

@ -0,0 +1,25 @@
"""Define the configurable parameters for the video presentation agent."""
from __future__ import annotations
from dataclasses import dataclass, fields
from langchain_core.runnables import RunnableConfig
@dataclass(kw_only=True)
class Configuration:
"""The configuration for the video presentation agent."""
video_title: str
search_space_id: int
user_prompt: str | None = None
@classmethod
def from_runnable_config(
cls, config: RunnableConfig | None = None
) -> Configuration:
"""Create a Configuration instance from a RunnableConfig object."""
configurable = (config.get("configurable") or {}) if config else {}
_fields = {f.name for f in fields(cls) if f.init}
return cls(**{k: v for k, v in configurable.items() if k in _fields})

View file

@ -0,0 +1,30 @@
from langgraph.graph import StateGraph
from .configuration import Configuration
from .nodes import (
create_presentation_slides,
create_slide_audio,
generate_slide_scene_codes,
)
from .state import State
def build_graph():
workflow = StateGraph(State, config_schema=Configuration)
workflow.add_node("create_presentation_slides", create_presentation_slides)
workflow.add_node("create_slide_audio", create_slide_audio)
workflow.add_node("generate_slide_scene_codes", generate_slide_scene_codes)
workflow.add_edge("__start__", "create_presentation_slides")
workflow.add_edge("create_presentation_slides", "create_slide_audio")
workflow.add_edge("create_slide_audio", "generate_slide_scene_codes")
workflow.add_edge("generate_slide_scene_codes", "__end__")
graph = workflow.compile()
graph.name = "Surfsense Video Presentation"
return graph
graph = build_graph()

View file

@ -0,0 +1,552 @@
import asyncio
import contextlib
import json
import math
import os
import shutil
import uuid
from pathlib import Path
from typing import Any
from ffmpeg.asyncio import FFmpeg
from langchain_core.messages import HumanMessage, SystemMessage
from langchain_core.runnables import RunnableConfig
from litellm import aspeech
from app.config import config as app_config
from app.services.kokoro_tts_service import get_kokoro_tts_service
from app.services.llm_service import get_agent_llm
from .configuration import Configuration
from .prompts import (
DEFAULT_DURATION_IN_FRAMES,
FPS,
REFINE_SCENE_SYSTEM_PROMPT,
REMOTION_SCENE_SYSTEM_PROMPT,
THEME_PRESETS,
build_scene_generation_user_prompt,
build_theme_assignment_user_prompt,
get_slide_generation_prompt,
get_theme_assignment_system_prompt,
pick_theme_and_mode_fallback,
)
from .state import (
PresentationSlides,
SlideAudioResult,
SlideContent,
SlideSceneCode,
State,
)
from .utils import get_voice_for_provider
MAX_REFINE_ATTEMPTS = 3
async def create_presentation_slides(
state: State, config: RunnableConfig
) -> dict[str, Any]:
"""Parse source content into structured presentation slides using LLM."""
configuration = Configuration.from_runnable_config(config)
search_space_id = configuration.search_space_id
user_prompt = configuration.user_prompt
llm = await get_agent_llm(state.db_session, search_space_id)
if not llm:
error_message = f"No LLM configured for search space {search_space_id}"
print(error_message)
raise RuntimeError(error_message)
prompt = get_slide_generation_prompt(user_prompt)
messages = [
SystemMessage(content=prompt),
HumanMessage(
content=f"<source_content>{state.source_content}</source_content>"
),
]
llm_response = await llm.ainvoke(messages)
try:
presentation = PresentationSlides.model_validate(
json.loads(llm_response.content)
)
except (json.JSONDecodeError, ValueError) as e:
print(f"Direct JSON parsing failed, trying fallback approach: {e!s}")
try:
content = llm_response.content
json_start = content.find("{")
json_end = content.rfind("}") + 1
if json_start >= 0 and json_end > json_start:
json_str = content[json_start:json_end]
parsed_data = json.loads(json_str)
presentation = PresentationSlides.model_validate(parsed_data)
print("Successfully parsed presentation slides using fallback approach")
else:
error_message = f"Could not find valid JSON in LLM response. Raw response: {content}"
print(error_message)
raise ValueError(error_message)
except (json.JSONDecodeError, ValueError) as e2:
error_message = f"Error parsing LLM response (fallback also failed): {e2!s}"
print(f"Error parsing LLM response: {e2!s}")
print(f"Raw response: {llm_response.content}")
raise
return {"slides": presentation.slides}
async def create_slide_audio(state: State, config: RunnableConfig) -> dict[str, Any]:
"""Generate TTS audio for each slide.
Each slide's speaker_transcripts are generated as individual TTS chunks,
then concatenated with ffmpeg (matching the POC in RemotionTets/api/tts).
"""
session_id = str(uuid.uuid4())
temp_dir = Path("temp_audio")
temp_dir.mkdir(exist_ok=True)
output_dir = Path("video_presentation_audio")
output_dir.mkdir(exist_ok=True)
slides = state.slides or []
voice = get_voice_for_provider(app_config.TTS_SERVICE, speaker_id=0)
ext = "wav" if app_config.TTS_SERVICE == "local/kokoro" else "mp3"
async def _generate_tts_chunk(text: str, chunk_path: str) -> str:
"""Generate a single TTS chunk and write it to *chunk_path*."""
if app_config.TTS_SERVICE == "local/kokoro":
kokoro_service = await get_kokoro_tts_service(lang_code="a")
await kokoro_service.generate_speech(
text=text,
voice=voice,
speed=1.0,
output_path=chunk_path,
)
else:
kwargs: dict[str, Any] = {
"model": app_config.TTS_SERVICE,
"api_key": app_config.TTS_SERVICE_API_KEY,
"voice": voice,
"input": text,
"max_retries": 2,
"timeout": 600,
}
if app_config.TTS_SERVICE_API_BASE:
kwargs["api_base"] = app_config.TTS_SERVICE_API_BASE
response = await aspeech(**kwargs)
with open(chunk_path, "wb") as f:
f.write(response.content)
return chunk_path
async def _concat_with_ffmpeg(chunk_paths: list[str], output_file: str) -> None:
"""Concatenate multiple audio chunks into one file using async ffmpeg."""
ffmpeg = FFmpeg().option("y")
for chunk in chunk_paths:
ffmpeg = ffmpeg.input(chunk)
filter_parts = [f"[{i}:0]" for i in range(len(chunk_paths))]
filter_str = (
"".join(filter_parts) + f"concat=n={len(chunk_paths)}:v=0:a=1[outa]"
)
ffmpeg = ffmpeg.option("filter_complex", filter_str)
ffmpeg = ffmpeg.output(output_file, map="[outa]")
await ffmpeg.execute()
async def generate_audio_for_slide(slide: SlideContent) -> SlideAudioResult:
has_transcripts = (
slide.speaker_transcripts and len(slide.speaker_transcripts) > 0
)
if not has_transcripts:
print(
f"Slide {slide.slide_number}: no speaker_transcripts, "
f"using default duration ({DEFAULT_DURATION_IN_FRAMES} frames)"
)
return SlideAudioResult(
slide_number=slide.slide_number,
audio_file="",
duration_seconds=DEFAULT_DURATION_IN_FRAMES / FPS,
duration_in_frames=DEFAULT_DURATION_IN_FRAMES,
)
output_file = str(output_dir / f"{session_id}_slide_{slide.slide_number}.{ext}")
chunk_paths: list[str] = []
try:
for i, text in enumerate(slide.speaker_transcripts):
chunk_path = str(
temp_dir
/ f"{session_id}_slide_{slide.slide_number}_chunk_{i}.{ext}"
)
print(
f" Slide {slide.slide_number} chunk {i + 1}/"
f"{len(slide.speaker_transcripts)}: "
f'"{text[:60]}..."'
)
await _generate_tts_chunk(text, chunk_path)
chunk_paths.append(chunk_path)
if len(chunk_paths) == 1:
shutil.move(chunk_paths[0], output_file)
else:
print(
f" Concatenating {len(chunk_paths)} chunks for slide "
f"{slide.slide_number} with ffmpeg"
)
await _concat_with_ffmpeg(chunk_paths, output_file)
duration_seconds = await _get_audio_duration(output_file)
duration_in_frames = math.ceil(duration_seconds * FPS)
return SlideAudioResult(
slide_number=slide.slide_number,
audio_file=output_file,
duration_seconds=duration_seconds,
duration_in_frames=max(duration_in_frames, DEFAULT_DURATION_IN_FRAMES),
)
except Exception as e:
print(f"Error generating audio for slide {slide.slide_number}: {e!s}")
raise
finally:
for p in chunk_paths:
with contextlib.suppress(OSError):
os.remove(p)
tasks = [generate_audio_for_slide(slide) for slide in slides]
audio_results = await asyncio.gather(*tasks)
audio_results_sorted = sorted(audio_results, key=lambda r: r.slide_number)
print(
f"Generated audio for {len(audio_results_sorted)} slides "
f"(total duration: {sum(r.duration_seconds for r in audio_results_sorted):.1f}s)"
)
return {"slide_audio_results": audio_results_sorted}
async def _get_audio_duration(file_path: str) -> float:
"""Get audio duration in seconds using ffprobe (via python-ffmpeg).
Falls back to file-size estimation if ffprobe fails.
"""
try:
import subprocess
proc = await asyncio.create_subprocess_exec(
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
file_path,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = await asyncio.wait_for(proc.communicate(), timeout=10)
if proc.returncode == 0 and stdout.strip():
return float(stdout.strip())
except Exception as e:
print(f"ffprobe failed for {file_path}: {e!s}, using file-size estimation")
try:
file_size = os.path.getsize(file_path)
if file_path.endswith(".wav"):
return file_size / (16000 * 2)
else:
return file_size / 16000
except Exception:
return DEFAULT_DURATION_IN_FRAMES / FPS
async def _assign_themes_with_llm(
llm, slides: list[SlideContent]
) -> dict[int, tuple[str, str]]:
"""Ask the LLM to assign a theme+mode to each slide in one call.
Returns a dict mapping slide_number (theme, mode).
Falls back to round-robin if the LLM response can't be parsed.
"""
total = len(slides)
slide_summaries = [
{
"slide_number": s.slide_number,
"title": s.title,
"subtitle": s.subtitle or "",
"background_explanation": s.background_explanation or "",
}
for s in slides
]
system = get_theme_assignment_system_prompt()
user = build_theme_assignment_user_prompt(slide_summaries)
try:
response = await llm.ainvoke(
[
SystemMessage(content=system),
HumanMessage(content=user),
]
)
text = response.content.strip()
if text.startswith("```"):
lines = text.split("\n")
text = "\n".join(
line for line in lines if not line.strip().startswith("```")
).strip()
assignments = json.loads(text)
valid_themes = set(THEME_PRESETS)
result: dict[int, tuple[str, str]] = {}
for entry in assignments:
sn = entry.get("slide_number")
theme = entry.get("theme", "").upper()
mode = entry.get("mode", "dark").lower()
if sn and theme in valid_themes and mode in ("dark", "light"):
result[sn] = (theme, mode)
if len(result) == total:
print(
"LLM theme assignment: "
+ ", ".join(f"S{sn}={t}/{m}" for sn, (t, m) in sorted(result.items()))
)
return result
print(
f"LLM returned {len(result)}/{total} valid assignments, "
"filling gaps with fallback"
)
for s in slides:
if s.slide_number not in result:
result[s.slide_number] = pick_theme_and_mode_fallback(
s.slide_number - 1, total
)
return result
except Exception as e:
print(f"LLM theme assignment failed ({e!s}), using fallback")
return {
s.slide_number: pick_theme_and_mode_fallback(s.slide_number - 1, total)
for s in slides
}
async def generate_slide_scene_codes(
state: State, config: RunnableConfig
) -> dict[str, Any]:
"""Generate Remotion component code for each slide using LLM.
First assigns a theme+mode to every slide via a single LLM call,
then generates scene code per slide with the assigned theme.
"""
configuration = Configuration.from_runnable_config(config)
search_space_id = configuration.search_space_id
llm = await get_agent_llm(state.db_session, search_space_id)
if not llm:
raise RuntimeError(f"No LLM configured for search space {search_space_id}")
slides = state.slides or []
audio_results = state.slide_audio_results or []
audio_map: dict[int, SlideAudioResult] = {r.slide_number: r for r in audio_results}
total_slides = len(slides)
theme_assignments = await _assign_themes_with_llm(llm, slides)
scene_codes: list[SlideSceneCode] = []
for slide in slides:
audio = audio_map.get(slide.slide_number)
duration = audio.duration_in_frames if audio else DEFAULT_DURATION_IN_FRAMES
theme, mode = theme_assignments.get(
slide.slide_number,
pick_theme_and_mode_fallback(slide.slide_number - 1, total_slides),
)
user_prompt = build_scene_generation_user_prompt(
slide_number=slide.slide_number,
total_slides=total_slides,
title=slide.title,
subtitle=slide.subtitle,
content_in_markdown=slide.content_in_markdown,
background_explanation=slide.background_explanation,
duration_in_frames=duration,
theme=theme,
mode=mode,
)
messages = [
SystemMessage(content=REMOTION_SCENE_SYSTEM_PROMPT),
HumanMessage(content=user_prompt),
]
print(
f"Generating scene code for slide {slide.slide_number}/{total_slides}: "
f'"{slide.title}" ({duration} frames)'
)
llm_response = await llm.ainvoke(messages)
code, scene_title = _extract_code_and_title(llm_response.content)
code = await _refine_if_needed(llm, code, slide.slide_number)
scene_codes.append(
SlideSceneCode(
slide_number=slide.slide_number,
code=code,
title=scene_title or slide.title,
)
)
print(f"Scene code ready for slide {slide.slide_number} ({len(code)} chars)")
return {"slide_scene_codes": scene_codes}
def _extract_code_and_title(content: str) -> tuple[str, str | None]:
"""Extract code and optional title from LLM response.
The LLM may return a JSON object like the POC's structured output:
{ "code": "...", "title": "..." }
Or it may return raw code (with optional markdown fences).
Returns (code, title) where title may be None.
"""
text = content.strip()
if text.startswith("{"):
try:
parsed = json.loads(text)
if isinstance(parsed, dict) and "code" in parsed:
return parsed["code"], parsed.get("title")
except (json.JSONDecodeError, ValueError):
pass
json_start = text.find("{")
json_end = text.rfind("}") + 1
if json_start >= 0 and json_end > json_start:
try:
parsed = json.loads(text[json_start:json_end])
if isinstance(parsed, dict) and "code" in parsed:
return parsed["code"], parsed.get("title")
except (json.JSONDecodeError, ValueError):
pass
code = text
if code.startswith("```"):
lines = code.split("\n")
start = 1
end = len(lines)
for i in range(len(lines) - 1, 0, -1):
if lines[i].strip().startswith("```"):
end = i
break
code = "\n".join(lines[start:end]).strip()
return code, None
async def _refine_if_needed(llm, code: str, slide_number: int) -> str:
"""Attempt basic syntax validation and auto-repair via LLM if needed.
Raises RuntimeError if the code is still invalid after MAX_REFINE_ATTEMPTS,
matching the POC's behavior where a failed slide aborts the pipeline.
"""
error = _basic_syntax_check(code)
if error is None:
return code
for attempt in range(1, MAX_REFINE_ATTEMPTS + 1):
print(
f"Slide {slide_number}: syntax issue (attempt {attempt}/{MAX_REFINE_ATTEMPTS}): {error}"
)
messages = [
SystemMessage(content=REFINE_SCENE_SYSTEM_PROMPT),
HumanMessage(
content=(
f"Here is the broken Remotion component code:\n\n{code}\n\n"
f"Compilation error:\n{error}\n\nFix the code."
)
),
]
response = await llm.ainvoke(messages)
code, _ = _extract_code_and_title(response.content)
error = _basic_syntax_check(code)
if error is None:
print(f"Slide {slide_number}: fixed on attempt {attempt}")
return code
raise RuntimeError(
f"Slide {slide_number} failed to compile after {MAX_REFINE_ATTEMPTS} "
f"refine attempts. Last error: {error}"
)
def _basic_syntax_check(code: str) -> str | None:
"""Run a lightweight syntax check on the generated code.
Full Babel-based compilation happens on the frontend. This backend check
catches the most common LLM code-generation mistakes so the refine loop
can fix them before persisting.
Returns an error description or None if the code looks valid.
"""
if not code or not code.strip():
return "Empty code"
if "export" not in code and "MyComposition" not in code:
return "Missing exported component (expected 'export const MyComposition')"
brace_count = 0
paren_count = 0
bracket_count = 0
for ch in code:
if ch == "{":
brace_count += 1
elif ch == "}":
brace_count -= 1
elif ch == "(":
paren_count += 1
elif ch == ")":
paren_count -= 1
elif ch == "[":
bracket_count += 1
elif ch == "]":
bracket_count -= 1
if brace_count < 0:
return "Unmatched closing brace '}'"
if paren_count < 0:
return "Unmatched closing parenthesis ')'"
if bracket_count < 0:
return "Unmatched closing bracket ']'"
if brace_count != 0:
return f"Unbalanced braces: {brace_count} unclosed"
if paren_count != 0:
return f"Unbalanced parentheses: {paren_count} unclosed"
if bracket_count != 0:
return f"Unbalanced brackets: {bracket_count} unclosed"
if "useCurrentFrame" not in code:
return "Missing useCurrentFrame() — required for Remotion animations"
if "AbsoluteFill" not in code:
return "Missing AbsoluteFill — required as the root layout component"
return None

View file

@ -0,0 +1,509 @@
import datetime
# TODO: move these to config file
MAX_SLIDES = 5
FPS = 30
DEFAULT_DURATION_IN_FRAMES = 300
THEME_PRESETS = [
"TERRA",
"OCEAN",
"SUNSET",
"EMERALD",
"ECLIPSE",
"ROSE",
"FROST",
"NEBULA",
"AURORA",
"CORAL",
"MIDNIGHT",
"AMBER",
"LAVENDER",
"STEEL",
"CITRUS",
"CHERRY",
]
THEME_DESCRIPTIONS: dict[str, str] = {
"TERRA": "Warm earthy tones — terracotta, olive. Heritage, tradition, organic warmth.",
"OCEAN": "Cool oceanic depth — teal, coral accents. Calm, marine, fluid elegance.",
"SUNSET": "Vibrant warm energy — orange, purple. Passion, creativity, bold expression.",
"EMERALD": "Fresh natural life — green, mint. Growth, health, sustainability.",
"ECLIPSE": "Dramatic luxury — black, gold. Premium, power, prestige.",
"ROSE": "Soft elegance — dusty pink, mauve. Beauty, care, refined femininity.",
"FROST": "Crisp clarity — ice blue, silver. Tech, data, precision analytics.",
"NEBULA": "Cosmic mystery — magenta, deep purple. AI, innovation, cutting-edge future.",
"AURORA": "Ethereal northern lights — green-teal, violet. Mystical, transformative, wonder.",
"CORAL": "Tropical warmth — coral, turquoise. Inviting, lively, community.",
"MIDNIGHT": "Deep sophistication — navy, silver. Contemplative, trust, authority.",
"AMBER": "Rich honey warmth — amber, brown. Comfort, wisdom, organic richness.",
"LAVENDER": "Gentle dreaminess — purple, lilac. Calm, imaginative, serene.",
"STEEL": "Industrial strength — gray, steel blue. Modern professional, reliability.",
"CITRUS": "Bright optimism — yellow, lime. Energy, joy, fresh starts.",
"CHERRY": "Bold impact — deep red, dark. Power, urgency, passionate conviction.",
}
# ---------------------------------------------------------------------------
# LLM-based theme assignment (replaces keyword-based pick_theme_and_mode)
# ---------------------------------------------------------------------------
THEME_ASSIGNMENT_SYSTEM_PROMPT = """You are a visual design director assigning color themes to presentation slides.
Given a list of slides, assign each slide a theme preset and color mode (dark or light).
Available themes (name description):
{theme_list}
Rules:
1. Pick the theme that best matches each slide's mood, content, and visual direction.
2. Maximize visual variety avoid repeating the same theme on consecutive slides.
3. Mix dark and light modes across the presentation for contrast and rhythm.
4. Opening slides often benefit from a bold dark theme; closing/summary slides can go either way.
5. The "background_explanation" field is the primary signal it describes the intended mood and color direction.
Return ONLY a JSON array (no markdown fences, no explanation):
[
{{"slide_number": 1, "theme": "THEME_NAME", "mode": "dark"}},
{{"slide_number": 2, "theme": "THEME_NAME", "mode": "light"}}
]
""".strip()
def build_theme_assignment_user_prompt(
slides: list[dict[str, str]],
) -> str:
"""Build the user prompt for LLM theme assignment.
*slides* is a list of dicts with keys: slide_number, title, subtitle,
background_explanation (mood).
"""
lines = ["Assign a theme and mode to each of these slides:", ""]
for s in slides:
lines.append(
f'Slide {s["slide_number"]}: "{s["title"]}" '
f'(subtitle: "{s.get("subtitle", "")}") — '
f'Mood: "{s.get("background_explanation", "neutral")}"'
)
return "\n".join(lines)
def get_theme_assignment_system_prompt() -> str:
"""Return the theme assignment system prompt with the full theme list injected."""
theme_list = "\n".join(
f"- {name}: {desc}" for name, desc in THEME_DESCRIPTIONS.items()
)
return THEME_ASSIGNMENT_SYSTEM_PROMPT.format(theme_list=theme_list)
def pick_theme_and_mode_fallback(
slide_index: int, total_slides: int
) -> tuple[str, str]:
"""Simple round-robin fallback when LLM theme assignment fails."""
theme = THEME_PRESETS[slide_index % len(THEME_PRESETS)]
mode = "dark" if slide_index % 2 == 0 else "light"
if total_slides == 1:
mode = "dark"
return theme, mode
def get_slide_generation_prompt(user_prompt: str | None = None) -> str:
return f"""
Today's date: {datetime.datetime.now().strftime("%Y-%m-%d")}
<video_presentation_system>
You are a content-to-slides converter. You receive raw source content (articles, notes, transcripts,
product descriptions, chat conversations, etc.) and break it into a sequence of presentation slides
for a video presentation with voiceover narration.
{
f'''
You **MUST** strictly adhere to the following user instruction while generating the slides:
<user_instruction>
{user_prompt}
</user_instruction>
'''
if user_prompt
else ""
}
<input>
- '<source_content>': A block of text containing the information to be presented. This could be
research findings, an article summary, a detailed outline, user chat history, or any relevant
raw information. The content serves as the factual basis for the video presentation.
</input>
<output_format>
A JSON object containing the presentation slides:
{{
"slides": [
{{
"slide_number": 1,
"title": "Concise slide title",
"subtitle": "One-line subtitle or tagline",
"content_in_markdown": "## Heading\\n- Bullet point 1\\n- **Bold text**\\n- Bullet point 3",
"speaker_transcripts": [
"First narration sentence for this slide.",
"Second narration sentence expanding on the point.",
"Third sentence wrapping up this slide."
],
"background_explanation": "Emotional mood and color direction for this slide"
}}
]
}}
</output_format>
<guidelines>
=== SLIDE COUNT ===
Dynamically decide the number of slides between 1 and {MAX_SLIDES} (inclusive).
Base your decision entirely on the content's depth, richness, and how many distinct ideas it contains.
Thin or simple content should produce fewer slides; dense or multi-faceted content may use more.
Do NOT inflate or pad slides to reach {
MAX_SLIDES
} only use what the content genuinely warrants.
Do NOT treat {MAX_SLIDES} as a target; it is a hard ceiling, not a goal.
=== SLIDE STRUCTURE ===
- Each slide should cover ONE distinct key idea or section.
- Keep slides focused: 2-5 bullet points of content per slide max.
- The first slide should be a title/intro slide.
- The last slide should be a summary or closing slide ONLY if there are 3+ slides.
For 1-2 slides, skip the closing slide just cover the content.
- Do NOT create a separate closing slide if its content would just repeat earlier slides.
=== CONTENT FIELDS ===
- Write speaker_transcripts as if a human presenter is narrating natural, conversational, 2-4 sentences per slide.
These will be converted to TTS audio, so write in a way that sounds great when spoken aloud.
- background_explanation should describe a visual style matching the slide's mood:
- Describe the emotional feel: "warm and organic", "dramatic and urgent", "clean and optimistic",
"technical and precise", "celebratory", "earthy and grounded", "cosmic and futuristic"
- Mention color direction: warm tones, cool tones, earth tones, neon accents, gold/black, etc.
- Vary the mood across slides do NOT always say "dark blue gradient".
- content_in_markdown should use proper markdown: ## headings, **bold**, - bullets, etc.
=== NARRATION QUALITY ===
- Speaker transcripts should explain the slide content in an engaging, presenter-like voice.
- Keep narration concise: 2-4 sentences per slide (targeting ~10-15 seconds of audio per slide).
- The narration should add context beyond what's on the slide — don't just read the bullets.
- Use natural language: contractions, conversational tone, occasional enthusiasm.
</guidelines>
<examples>
Input: "Quantum computing uses quantum bits or qubits which can exist in multiple states simultaneously due to superposition."
Output:
{{
"slides": [
{{
"slide_number": 1,
"title": "Quantum Computing",
"subtitle": "Beyond Classical Bits",
"content_in_markdown": "## The Quantum Leap\\n- Classical computers use **bits** (0 or 1)\\n- Quantum computers use **qubits**\\n- Qubits leverage **superposition**",
"speaker_transcripts": [
"Let's explore quantum computing, a technology that's fundamentally different from the computers we use every day.",
"While traditional computers work with bits that are either zero or one, quantum computers use something called qubits.",
"The magic of qubits is superposition — they can exist in multiple states at the same time."
],
"background_explanation": "Cosmic and futuristic with deep purple and magenta tones, evoking the mystery of quantum mechanics"
}}
]
}}
</examples>
Transform the source material into well-structured presentation slides with engaging narration.
Ensure each slide has a clear visual mood and natural-sounding speaker transcripts.
</video_presentation_system>
"""
# ---------------------------------------------------------------------------
# Remotion scene code generation prompt
# Ported from RemotionTets POC /api/generate system prompt
# ---------------------------------------------------------------------------
REMOTION_SCENE_SYSTEM_PROMPT = """
You are a Remotion component generator that creates cinematic, modern motion graphics.
Generate a single self-contained React component that uses Remotion.
=== THEME PRESETS (pick ONE per slide see user prompt for which to use) ===
Each slide MUST use a DIFFERENT preset. The user prompt will tell you which preset to use.
Use ALL colors from that preset background, surface, text, accent, glow. Do NOT mix presets.
TERRA (warm earth terracotta + olive):
dark: bg #1C1510 surface #261E16 border #3D3024 text #E8DDD0 muted #9A8A78 accent #C2623D secondary #7D8C52 glow rgba(194,98,61,0.12)
light: bg #F7F0E8 surface #FFF8F0 border #DDD0BF text #2C1D0E muted #8A7A68 accent #B85430 secondary #6B7A42 glow rgba(184,84,48,0.08)
gradient-dark: radial-gradient(ellipse at 30% 80%, rgba(194,98,61,0.18), transparent 60%), linear-gradient(180deg, #1C1510, #261E16)
gradient-light: radial-gradient(ellipse at 70% 20%, rgba(107,122,66,0.12), transparent 55%), linear-gradient(180deg, #F7F0E8, #FFF8F0)
OCEAN (cool depth teal + coral):
dark: bg #0B1A1E surface #122428 border #1E3740 text #D5EAF0 muted #6A9AA8 accent #1DB6A8 secondary #E87461 glow rgba(29,182,168,0.12)
light: bg #F0F8FA surface #FFFFFF border #C8E0E8 text #0E2830 muted #5A8A98 accent #0EA69A secondary #D05F4E glow rgba(14,166,154,0.08)
gradient-dark: radial-gradient(ellipse at 80% 30%, rgba(29,182,168,0.20), transparent 55%), radial-gradient(circle at 20% 80%, rgba(232,116,97,0.10), transparent 50%), #0B1A1E
gradient-light: radial-gradient(ellipse at 20% 40%, rgba(14,166,154,0.10), transparent 55%), linear-gradient(180deg, #F0F8FA, #FFFFFF)
SUNSET (warm energy orange + purple):
dark: bg #1E130F surface #2A1B14 border #42291C text #F0DDD0 muted #A08878 accent #E86A20 secondary #A855C0 glow rgba(232,106,32,0.12)
light: bg #FFF5ED surface #FFFFFF border #EADAC8 text #2E1508 muted #907860 accent #D05A18 secondary #9045A8 glow rgba(208,90,24,0.08)
gradient-dark: linear-gradient(135deg, rgba(232,106,32,0.15) 0%, transparent 40%), radial-gradient(circle at 80% 70%, rgba(168,85,192,0.15), transparent 50%), #1E130F
gradient-light: linear-gradient(135deg, rgba(208,90,24,0.08) 0%, rgba(144,69,168,0.06) 100%), #FFF5ED
EMERALD (fresh life green + mint):
dark: bg #0B1E14 surface #12281A border #1E3C28 text #D0F0E0 muted #5EA880 accent #10B981 secondary #84CC16 glow rgba(16,185,129,0.12)
light: bg #F0FAF5 surface #FFFFFF border #C0E8D0 text #0E2C18 muted #489068 accent #059669 secondary #65A30D glow rgba(5,150,105,0.08)
gradient-dark: radial-gradient(ellipse at 50% 50%, rgba(16,185,129,0.18), transparent 60%), linear-gradient(180deg, #0B1E14, #12281A)
gradient-light: radial-gradient(ellipse at 60% 30%, rgba(101,163,13,0.10), transparent 55%), linear-gradient(180deg, #F0FAF5, #FFFFFF)
ECLIPSE (dramatic black + gold):
dark: bg #100C05 surface #1A1508 border #2E2510 text #D4B96A muted #8A7840 accent #E8B830 secondary #C09020 glow rgba(232,184,48,0.14)
light: bg #FAF6ED surface #FFFFFF border #E0D8C0 text #1A1408 muted #7A6818 accent #C09820 secondary #A08018 glow rgba(192,152,32,0.08)
gradient-dark: radial-gradient(circle at 50% 40%, rgba(232,184,48,0.20), transparent 50%), radial-gradient(ellipse at 50% 90%, rgba(192,144,32,0.08), transparent 50%), #100C05
gradient-light: radial-gradient(circle at 50% 40%, rgba(192,152,32,0.10), transparent 55%), linear-gradient(180deg, #FAF6ED, #FFFFFF)
ROSE (soft elegance dusty pink + mauve):
dark: bg #1E1018 surface #281820 border #3D2830 text #F0D8E0 muted #A08090 accent #E4508C secondary #B06498 glow rgba(228,80,140,0.12)
light: bg #FDF2F5 surface #FFFFFF border #F0D0D8 text #2C1018 muted #906878 accent #D43D78 secondary #9A5080 glow rgba(212,61,120,0.08)
gradient-dark: radial-gradient(ellipse at 70% 30%, rgba(228,80,140,0.18), transparent 55%), radial-gradient(circle at 20% 80%, rgba(176,100,152,0.10), transparent 50%), #1E1018
gradient-light: radial-gradient(ellipse at 30% 60%, rgba(212,61,120,0.08), transparent 55%), linear-gradient(180deg, #FDF2F5, #FFFFFF)
FROST (crisp clarity ice blue + silver):
dark: bg #0A1520 surface #101D2A border #1A3040 text #D0E5F5 muted #6090B0 accent #5AB4E8 secondary #8BA8C0 glow rgba(90,180,232,0.12)
light: bg #F0F6FC surface #FFFFFF border #C8D8E8 text #0C1820 muted #5080A0 accent #3A96D0 secondary #7090A8 glow rgba(58,150,208,0.08)
gradient-dark: radial-gradient(ellipse at 40% 20%, rgba(90,180,232,0.16), transparent 55%), linear-gradient(180deg, #0A1520, #101D2A)
gradient-light: radial-gradient(ellipse at 50% 50%, rgba(58,150,208,0.08), transparent 55%), linear-gradient(180deg, #F0F6FC, #FFFFFF)
NEBULA (cosmic magenta + deep purple):
dark: bg #150A1E surface #1E1028 border #351A48 text #E0D0F0 muted #8060A0 accent #C850E0 secondary #8030C0 glow rgba(200,80,224,0.14)
light: bg #F8F0FF surface #FFFFFF border #E0C8F0 text #1A0A24 muted #7050A0 accent #A840C0 secondary #6820A0 glow rgba(168,64,192,0.08)
gradient-dark: radial-gradient(circle at 60% 40%, rgba(200,80,224,0.18), transparent 50%), radial-gradient(ellipse at 30% 80%, rgba(128,48,192,0.12), transparent 50%), #150A1E
gradient-light: radial-gradient(circle at 40% 30%, rgba(168,64,192,0.10), transparent 55%), linear-gradient(180deg, #F8F0FF, #FFFFFF)
AURORA (ethereal lights green-teal + violet):
dark: bg #0A1A1A surface #102020 border #1A3838 text #D0F0F0 muted #60A0A0 accent #30D0B0 secondary #8040D0 glow rgba(48,208,176,0.12)
light: bg #F0FAF8 surface #FFFFFF border #C0E8E0 text #0A2020 muted #508080 accent #20B090 secondary #6830B0 glow rgba(32,176,144,0.08)
gradient-dark: radial-gradient(ellipse at 30% 70%, rgba(48,208,176,0.18), transparent 55%), radial-gradient(circle at 70% 30%, rgba(128,64,208,0.12), transparent 50%), #0A1A1A
gradient-light: radial-gradient(ellipse at 50% 40%, rgba(32,176,144,0.10), transparent 55%), linear-gradient(180deg, #F0FAF8, #FFFFFF)
CORAL (tropical warmth coral + turquoise):
dark: bg #1E0F0F surface #281818 border #402828 text #F0D8D8 muted #A07070 accent #F06050 secondary #30B8B0 glow rgba(240,96,80,0.12)
light: bg #FFF5F3 surface #FFFFFF border #F0D0C8 text #2E1010 muted #906060 accent #E04838 secondary #20A098 glow rgba(224,72,56,0.08)
gradient-dark: radial-gradient(ellipse at 60% 60%, rgba(240,96,80,0.18), transparent 55%), radial-gradient(circle at 30% 30%, rgba(48,184,176,0.10), transparent 50%), #1E0F0F
gradient-light: radial-gradient(ellipse at 40% 50%, rgba(224,72,56,0.08), transparent 55%), linear-gradient(180deg, #FFF5F3, #FFFFFF)
MIDNIGHT (deep sophistication navy + silver):
dark: bg #080C18 surface #0E1420 border #1A2438 text #C8D8F0 muted #5070A0 accent #4080E0 secondary #A0B0D0 glow rgba(64,128,224,0.12)
light: bg #F0F2F8 surface #FFFFFF border #C8D0E0 text #101828 muted #506080 accent #3060C0 secondary #8090B0 glow rgba(48,96,192,0.08)
gradient-dark: radial-gradient(ellipse at 50% 30%, rgba(64,128,224,0.16), transparent 55%), linear-gradient(180deg, #080C18, #0E1420)
gradient-light: radial-gradient(ellipse at 50% 50%, rgba(48,96,192,0.08), transparent 55%), linear-gradient(180deg, #F0F2F8, #FFFFFF)
AMBER (rich honey warmth amber + brown):
dark: bg #1A1208 surface #221A0E border #3A2C18 text #F0E0C0 muted #A09060 accent #E0A020 secondary #C08030 glow rgba(224,160,32,0.12)
light: bg #FFF8E8 surface #FFFFFF border #E8D8B8 text #2A1C08 muted #907840 accent #C88810 secondary #A86820 glow rgba(200,136,16,0.08)
gradient-dark: radial-gradient(ellipse at 40% 60%, rgba(224,160,32,0.18), transparent 55%), linear-gradient(180deg, #1A1208, #221A0E)
gradient-light: radial-gradient(ellipse at 60% 40%, rgba(200,136,16,0.10), transparent 55%), linear-gradient(180deg, #FFF8E8, #FFFFFF)
LAVENDER (gentle dreaminess purple + lilac):
dark: bg #14101E surface #1C1628 border #302840 text #E0D8F0 muted #8070A0 accent #A060E0 secondary #C090D0 glow rgba(160,96,224,0.12)
light: bg #F8F0FF surface #FFFFFF border #E0D0F0 text #1C1028 muted #706090 accent #8848C0 secondary #A878B8 glow rgba(136,72,192,0.08)
gradient-dark: radial-gradient(ellipse at 60% 40%, rgba(160,96,224,0.18), transparent 55%), radial-gradient(circle at 30% 70%, rgba(192,144,208,0.10), transparent 50%), #14101E
gradient-light: radial-gradient(ellipse at 40% 30%, rgba(136,72,192,0.10), transparent 55%), linear-gradient(180deg, #F8F0FF, #FFFFFF)
STEEL (industrial strength gray + steel blue):
dark: bg #101214 surface #181C20 border #282E38 text #D0D8E0 muted #708090 accent #5088B0 secondary #90A0B0 glow rgba(80,136,176,0.12)
light: bg #F2F4F6 surface #FFFFFF border #D0D8E0 text #181C24 muted #607080 accent #3870A0 secondary #708898 glow rgba(56,112,160,0.08)
gradient-dark: radial-gradient(ellipse at 50% 50%, rgba(80,136,176,0.14), transparent 55%), linear-gradient(180deg, #101214, #181C20)
gradient-light: radial-gradient(ellipse at 50% 40%, rgba(56,112,160,0.08), transparent 55%), linear-gradient(180deg, #F2F4F6, #FFFFFF)
CITRUS (bright optimism yellow + lime):
dark: bg #181808 surface #202010 border #383818 text #F0F0C0 muted #A0A060 accent #E8D020 secondary #90D030 glow rgba(232,208,32,0.12)
light: bg #FFFFF0 surface #FFFFFF border #E8E8C0 text #282808 muted #808040 accent #C8B010 secondary #70B020 glow rgba(200,176,16,0.08)
gradient-dark: radial-gradient(ellipse at 40% 40%, rgba(232,208,32,0.18), transparent 55%), radial-gradient(circle at 70% 70%, rgba(144,208,48,0.10), transparent 50%), #181808
gradient-light: radial-gradient(ellipse at 50% 30%, rgba(200,176,16,0.10), transparent 55%), linear-gradient(180deg, #FFFFF0, #FFFFFF)
CHERRY (bold impact deep red + dark):
dark: bg #1A0808 surface #241010 border #401818 text #F0D0D0 muted #A06060 accent #D02030 secondary #E05060 glow rgba(208,32,48,0.14)
light: bg #FFF0F0 surface #FFFFFF border #F0C8C8 text #280808 muted #904848 accent #B01828 secondary #C83848 glow rgba(176,24,40,0.08)
gradient-dark: radial-gradient(ellipse at 50% 40%, rgba(208,32,48,0.20), transparent 50%), linear-gradient(180deg, #1A0808, #241010)
gradient-light: radial-gradient(ellipse at 50% 50%, rgba(176,24,40,0.10), transparent 55%), linear-gradient(180deg, #FFF0F0, #FFFFFF)
=== SHARED TOKENS (use with any theme above) ===
SPACING: xs 8px, sm 16px, md 24px, lg 32px, xl 48px, 2xl 64px, 3xl 96px, 4xl 128px
TYPOGRAPHY: fontFamily "Inter, system-ui, -apple-system, sans-serif"
caption 14px/1.4, body 18px/1.6, subhead 24px/1.4, title 40px/1.2 w600, headline 64px/1.1 w700, display 96px/1.0 w800
letterSpacing: tight "-0.02em", normal "0", wide "0.05em"
BORDER RADIUS: 12px (cards), 8px (buttons), 9999px (pills)
=== VISUAL VARIETY (CRITICAL) ===
The user prompt assigns each slide a specific theme preset AND mode (dark/light).
You MUST use EXACTLY the assigned preset and mode. Additionally:
1. Use the preset's gradient as the AbsoluteFill background.
2. Use the preset's accent/secondary colors for highlights, pill badges, and card accents.
3. Use the preset's glow value for all boxShadow effects.
4. LAYOUT VARIATION: Vary layout between slides:
- One slide: bold centered headline + subtle stat
- Another: two-column card layout
- Another: single large number or quote as hero
Do NOT use the same layout pattern for every slide.
=== LAYOUT RULES (CRITICAL elements must NEVER overlap) ===
The canvas is 1920x1080. You MUST use a SINGLE-LAYER layout. NO stacking, NO multiple AbsoluteFill layers.
STRUCTURE every component must follow this exact pattern:
<AbsoluteFill style={{ backgroundColor: "...", display: "flex", flexDirection: "column", justifyContent: "center", alignItems: "center", padding: 80 }}>
{/* ALL content goes here as direct children in normal flow */}
</AbsoluteFill>
ABSOLUTE RULES:
- Use exactly ONE AbsoluteFill as the root. Set its background color/gradient via its style prop.
- NEVER nest AbsoluteFill inside AbsoluteFill.
- NEVER use position "absolute" or position "fixed" on ANY element.
- NEVER use multiple layers or z-index.
- ALL elements must be in normal document flow inside the single root AbsoluteFill.
SPACING:
- Root padding: 80px on all sides (safe area).
- Use flexDirection "column" with gap for vertical stacking, flexDirection "row" with gap for horizontal.
- Minimum gap between elements: 24px vertical, 32px horizontal.
- Text hierarchy gaps: headlinesubheading 16px, subheadingbody 12px, bodybutton 32px.
- Cards/panels: padding 32px-48px, borderRadius 12px.
- NEVER use margin to space siblings always use the parent's gap property.
=== DESIGN STYLE ===
- Premium aesthetic use the exact colors from the assigned theme preset (do NOT invent your own)
- Background: use the preset's gradient-dark or gradient-light value directly as the AbsoluteFill's background
- Card/surface backgrounds: use the preset's surface color
- Text colors: use the preset's text, muted values
- Borders: use the preset's border color
- Glows: use the preset's glow value for all boxShadow — do NOT substitute other colors
- Generous whitespace less is more, let elements breathe
- NO decorative background shapes, blurs, or overlapping ornaments
=== REMOTION RULES ===
- Export the component as: export const MyComposition = () => { ... }
- Use useCurrentFrame() and useVideoConfig() from "remotion"
- Do NOT use Sequence
- Do NOT manually calculate animation timings or frame offsets
=== ANIMATION (use the stagger() helper for ALL element animations) ===
A pre-built helper function called stagger() is available globally.
It handles enter, hold, and exit phases automatically you MUST use it.
Signature:
stagger(frame, fps, index, total) { opacity: number, transform: string }
Parameters:
frame from useCurrentFrame()
fps from useVideoConfig()
index 0-based index of this element in the entrance order
total total number of animated elements in the scene
It returns a style object with opacity and transform that you spread onto the element.
Timing is handled for you: staggered spring entrances, ambient hold motion, and a graceful exit.
Usage pattern:
const frame = useCurrentFrame();
const { fps } = useVideoConfig();
<div style={stagger(frame, fps, 0, 4)}>Headline</div>
<div style={stagger(frame, fps, 1, 4)}>Subtitle</div>
<div style={stagger(frame, fps, 2, 4)}>Card</div>
<div style={stagger(frame, fps, 3, 4)}>Footer</div>
Rules:
- Count ALL animated elements in your scene and pass that count as the "total" parameter.
- Assign each element a sequential index starting from 0.
- You can merge stagger's return with additional styles:
<div style={{ ...stagger(frame, fps, 0, 3), fontSize: 64, color: "#fafafa" }}>
- For non-animated static elements (backgrounds, borders), just use normal styles without stagger.
- You may still use spring() and interpolate() for EXTRA custom effects (e.g., a number counter,
color shift, or typewriter effect), but stagger() must drive all entrance/exit animations.
=== AVAILABLE GLOBALS (injected at runtime, do NOT import anything else) ===
- React (available globally)
- AbsoluteFill, useCurrentFrame, useVideoConfig, spring, interpolate, Easing from "remotion"
- stagger(frame, fps, index, total) animation helper described above
=== CODE RULES ===
- Output ONLY the raw code, no markdown fences, no explanations
- Keep it fully self-contained, no external dependencies or images
- Use inline styles only (no CSS imports, no className)
- Target 1920x1080 resolution
- Every container must use display "flex" with explicit gap values
- NEVER use marginTop/marginBottom to space siblings use the parent's gap instead
""".strip()
def build_scene_generation_user_prompt(
slide_number: int,
total_slides: int,
title: str,
subtitle: str,
content_in_markdown: str,
background_explanation: str,
duration_in_frames: int,
theme: str,
mode: str,
) -> str:
"""Build the user prompt for generating a single slide's Remotion scene code.
*theme* and *mode* are pre-assigned (by LLM or fallback) before this is called.
"""
return "\n".join(
[
"Create a cinematic, visually striking Remotion scene.",
f"The video is {duration_in_frames} frames at {FPS}fps ({duration_in_frames / FPS:.1f}s total).",
"",
f"This is slide {slide_number} of {total_slides} in the video.",
"",
f"=== ASSIGNED THEME: {theme} / {mode.upper()} mode ===",
f"You MUST use the {theme} preset in {mode} mode from the theme presets above.",
f"Use its exact background gradient (gradient-{mode}), surface, text, accent, secondary, border, and glow colors.",
"Do NOT substitute, invent, or default to blue/violet colors.",
"",
f'The scene should communicate this message: "{title}{subtitle}"',
"",
"Key ideas to convey (use as creative inspiration, NOT literal text to dump on screen):",
content_in_markdown,
"",
"Pick only the 1-2 most impactful phrases or numbers to display as text.",
"",
f"Mood & tone: {background_explanation}",
]
)
REFINE_SCENE_SYSTEM_PROMPT = """
You are a code repair assistant. You will receive a Remotion React component that failed to compile,
along with the exact error message from the Babel transpiler.
Your job is to fix the code so it compiles and runs correctly.
RULES:
- Output ONLY the fixed raw code as a string no markdown fences, no explanations.
- Preserve the original intent, design, and animations as closely as possible.
- The component must be exported as: export const MyComposition = () => { ... }
- Only these globals are available at runtime (they are injected, not actually imported):
React, AbsoluteFill, useCurrentFrame, useVideoConfig, spring, interpolate, Easing,
stagger (a helper: stagger(frame, fps, index, total) { opacity, transform })
- Keep import statements at the top (they get stripped by the compiler) but do NOT import anything
other than "react" and "remotion".
- Use inline styles only (no CSS, no className).
- Common fixes:
- Mismatched braces/brackets in JSX style objects (e.g. }}, instead of }}>)
- Missing closing tags
- Trailing commas before > in JSX
- Undefined variables or typos
- Invalid JSX expressions
- After fixing, mentally walk through every brace pair { } and JSX tag to verify they match.
""".strip()

View file

@ -0,0 +1,72 @@
"""Define the state structures for the video presentation agent."""
from __future__ import annotations
from dataclasses import dataclass
from pydantic import BaseModel, Field
from sqlalchemy.ext.asyncio import AsyncSession
class SlideContent(BaseModel):
"""Represents a single parsed slide from content analysis."""
slide_number: int = Field(..., description="1-based slide number")
title: str = Field(..., description="Concise slide title")
subtitle: str = Field(..., description="One-line subtitle or tagline")
content_in_markdown: str = Field(
..., description="Slide body content formatted as markdown"
)
speaker_transcripts: list[str] = Field(
...,
description="2-4 short sentences a presenter would say while this slide is shown",
)
background_explanation: str = Field(
...,
description="Emotional mood and color direction for this slide",
)
class PresentationSlides(BaseModel):
"""Represents the full set of parsed slides from the LLM."""
slides: list[SlideContent] = Field(
..., description="Ordered array of presentation slides"
)
class SlideAudioResult(BaseModel):
"""Audio generation result for a single slide."""
slide_number: int
audio_file: str = Field(..., description="Path to the per-slide audio file")
duration_seconds: float = Field(..., description="Audio duration in seconds")
duration_in_frames: int = Field(
..., description="Audio duration in frames (at 30fps)"
)
class SlideSceneCode(BaseModel):
"""Generated Remotion component code for a single slide."""
slide_number: int
code: str = Field(
..., description="Raw Remotion React component source code for this slide"
)
title: str = Field(..., description="Short title for the composition")
@dataclass
class State:
"""State for the video presentation agent graph.
Pipeline: parse slides generate per-slide TTS audio generate per-slide Remotion code
The frontend receives the slides + code + audio and handles compilation/rendering.
"""
db_session: AsyncSession
source_content: str
slides: list[SlideContent] | None = None
slide_audio_results: list[SlideAudioResult] | None = None
slide_scene_codes: list[SlideSceneCode] | None = None

View file

@ -0,0 +1,30 @@
def get_voice_for_provider(provider: str, speaker_id: int = 0) -> dict | str:
"""
Get the appropriate voice configuration based on the TTS provider.
Currently single-speaker only (speaker_id=0). Multi-speaker support
will be added in a future iteration.
Args:
provider: The TTS provider (e.g., "openai/tts-1", "vertex_ai/test")
speaker_id: The ID of the speaker (default 0, single speaker for now)
Returns:
Voice configuration - string for OpenAI, dict for Vertex AI
"""
if provider == "local/kokoro":
return "af_heart"
provider_type = (
provider.split("/")[0].lower() if "/" in provider else provider.lower()
)
voices = {
"openai": "alloy",
"vertex_ai": {
"languageCode": "en-US",
"name": "en-US-Studio-O",
},
"azure": "alloy",
}
return voices.get(provider_type, {})

View file

@ -341,7 +341,7 @@ if config.NEXT_FRONTEND_URL:
allowed_origins.append(www_url) allowed_origins.append(www_url)
allowed_origins.extend( allowed_origins.extend(
[ # For local development and desktop app [ # For local development and desktop app
"http://localhost:3000", "http://localhost:3000",
"http://127.0.0.1:3000", "http://127.0.0.1:3000",
] ]

View file

@ -77,6 +77,7 @@ celery_app = Celery(
include=[ include=[
"app.tasks.celery_tasks.document_tasks", "app.tasks.celery_tasks.document_tasks",
"app.tasks.celery_tasks.podcast_tasks", "app.tasks.celery_tasks.podcast_tasks",
"app.tasks.celery_tasks.video_presentation_tasks",
"app.tasks.celery_tasks.connector_tasks", "app.tasks.celery_tasks.connector_tasks",
"app.tasks.celery_tasks.schedule_checker_task", "app.tasks.celery_tasks.schedule_checker_task",
"app.tasks.celery_tasks.document_reindex_tasks", "app.tasks.celery_tasks.document_reindex_tasks",

View file

@ -103,6 +103,13 @@ class PodcastStatus(StrEnum):
FAILED = "failed" FAILED = "failed"
class VideoPresentationStatus(StrEnum):
PENDING = "pending"
GENERATING = "generating"
READY = "ready"
FAILED = "failed"
class DocumentStatus: class DocumentStatus:
""" """
Helper class for document processing status (stored as JSONB). Helper class for document processing status (stored as JSONB).
@ -337,6 +344,12 @@ class Permission(StrEnum):
PODCASTS_UPDATE = "podcasts:update" PODCASTS_UPDATE = "podcasts:update"
PODCASTS_DELETE = "podcasts:delete" PODCASTS_DELETE = "podcasts:delete"
# Video Presentations
VIDEO_PRESENTATIONS_CREATE = "video_presentations:create"
VIDEO_PRESENTATIONS_READ = "video_presentations:read"
VIDEO_PRESENTATIONS_UPDATE = "video_presentations:update"
VIDEO_PRESENTATIONS_DELETE = "video_presentations:delete"
# Image Generations # Image Generations
IMAGE_GENERATIONS_CREATE = "image_generations:create" IMAGE_GENERATIONS_CREATE = "image_generations:create"
IMAGE_GENERATIONS_READ = "image_generations:read" IMAGE_GENERATIONS_READ = "image_generations:read"
@ -403,6 +416,10 @@ DEFAULT_ROLE_PERMISSIONS = {
Permission.PODCASTS_CREATE.value, Permission.PODCASTS_CREATE.value,
Permission.PODCASTS_READ.value, Permission.PODCASTS_READ.value,
Permission.PODCASTS_UPDATE.value, Permission.PODCASTS_UPDATE.value,
# Video Presentations (no delete)
Permission.VIDEO_PRESENTATIONS_CREATE.value,
Permission.VIDEO_PRESENTATIONS_READ.value,
Permission.VIDEO_PRESENTATIONS_UPDATE.value,
# Image Generations (create and read, no delete) # Image Generations (create and read, no delete)
Permission.IMAGE_GENERATIONS_CREATE.value, Permission.IMAGE_GENERATIONS_CREATE.value,
Permission.IMAGE_GENERATIONS_READ.value, Permission.IMAGE_GENERATIONS_READ.value,
@ -435,6 +452,8 @@ DEFAULT_ROLE_PERMISSIONS = {
Permission.LLM_CONFIGS_READ.value, Permission.LLM_CONFIGS_READ.value,
# Podcasts (read only) # Podcasts (read only)
Permission.PODCASTS_READ.value, Permission.PODCASTS_READ.value,
# Video Presentations (read only)
Permission.VIDEO_PRESENTATIONS_READ.value,
# Image Generations (read only) # Image Generations (read only)
Permission.IMAGE_GENERATIONS_READ.value, Permission.IMAGE_GENERATIONS_READ.value,
# Connectors (read only) # Connectors (read only)
@ -1044,6 +1063,46 @@ class Podcast(BaseModel, TimestampMixin):
thread = relationship("NewChatThread") thread = relationship("NewChatThread")
class VideoPresentation(BaseModel, TimestampMixin):
"""Video presentation model for storing AI-generated video presentations.
The slides JSONB stores per-slide data including Remotion component code,
audio file paths, and durations. The frontend compiles the code and renders
the video using Remotion Player.
"""
__tablename__ = "video_presentations"
title = Column(String(500), nullable=False)
slides = Column(JSONB, nullable=True)
scene_codes = Column(JSONB, nullable=True)
status = Column(
SQLAlchemyEnum(
VideoPresentationStatus,
name="video_presentation_status",
create_type=False,
values_callable=lambda x: [e.value for e in x],
),
nullable=False,
default=VideoPresentationStatus.READY,
server_default="ready",
index=True,
)
search_space_id = Column(
Integer, ForeignKey("searchspaces.id", ondelete="CASCADE"), nullable=False
)
search_space = relationship("SearchSpace", back_populates="video_presentations")
thread_id = Column(
Integer,
ForeignKey("new_chat_threads.id", ondelete="SET NULL"),
nullable=True,
index=True,
)
thread = relationship("NewChatThread")
class Report(BaseModel, TimestampMixin): class Report(BaseModel, TimestampMixin):
"""Report model for storing generated Markdown reports.""" """Report model for storing generated Markdown reports."""
@ -1228,6 +1287,12 @@ class SearchSpace(BaseModel, TimestampMixin):
order_by="Podcast.id.desc()", order_by="Podcast.id.desc()",
cascade="all, delete-orphan", cascade="all, delete-orphan",
) )
video_presentations = relationship(
"VideoPresentation",
back_populates="search_space",
order_by="VideoPresentation.id.desc()",
cascade="all, delete-orphan",
)
reports = relationship( reports = relationship(
"Report", "Report",
back_populates="search_space", back_populates="search_space",

View file

@ -42,6 +42,7 @@ from .search_spaces_routes import router as search_spaces_router
from .slack_add_connector_route import router as slack_add_connector_router from .slack_add_connector_route import router as slack_add_connector_router
from .surfsense_docs_routes import router as surfsense_docs_router from .surfsense_docs_routes import router as surfsense_docs_router
from .teams_add_connector_route import router as teams_add_connector_router from .teams_add_connector_route import router as teams_add_connector_router
from .video_presentations_routes import router as video_presentations_router
from .youtube_routes import router as youtube_router from .youtube_routes import router as youtube_router
router = APIRouter() router = APIRouter()
@ -55,6 +56,9 @@ router.include_router(new_chat_router) # Chat with assistant-ui persistence
router.include_router(sandbox_router) # Sandbox file downloads (Daytona) router.include_router(sandbox_router) # Sandbox file downloads (Daytona)
router.include_router(chat_comments_router) router.include_router(chat_comments_router)
router.include_router(podcasts_router) # Podcast task status and audio router.include_router(podcasts_router) # Podcast task status and audio
router.include_router(
video_presentations_router
) # Video presentation status and streaming
router.include_router(reports_router) # Report CRUD and multi-format export router.include_router(reports_router) # Report CRUD and multi-format export
router.include_router(image_generation_router) # Image generation via litellm router.include_router(image_generation_router) # Image generation via litellm
router.include_router(search_source_connectors_router) router.include_router(search_source_connectors_router)

View file

@ -0,0 +1,242 @@
"""
Video presentation routes for CRUD operations and per-slide audio streaming.
These routes support the video presentation generation feature in new-chat.
Frontend polls GET /video-presentations/{id} to check status field.
When ready, the slides JSONB contains per-slide Remotion code and audio file paths.
The frontend compiles the Remotion code via Babel and renders with Remotion Player.
"""
import os
from pathlib import Path
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import StreamingResponse
from sqlalchemy import select
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession
from app.db import (
Permission,
SearchSpace,
SearchSpaceMembership,
User,
VideoPresentation,
get_async_session,
)
from app.schemas import VideoPresentationRead
from app.users import current_active_user
from app.utils.rbac import check_permission
router = APIRouter()
@router.get("/video-presentations", response_model=list[VideoPresentationRead])
async def read_video_presentations(
skip: int = 0,
limit: int = 100,
search_space_id: int | None = None,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
List video presentations the user has access to.
Requires VIDEO_PRESENTATIONS_READ permission for the search space(s).
"""
if skip < 0 or limit < 1:
raise HTTPException(status_code=400, detail="Invalid pagination parameters")
try:
if search_space_id is not None:
await check_permission(
session,
user,
search_space_id,
Permission.VIDEO_PRESENTATIONS_READ.value,
"You don't have permission to read video presentations in this search space",
)
result = await session.execute(
select(VideoPresentation)
.filter(VideoPresentation.search_space_id == search_space_id)
.offset(skip)
.limit(limit)
)
else:
result = await session.execute(
select(VideoPresentation)
.join(SearchSpace)
.join(SearchSpaceMembership)
.filter(SearchSpaceMembership.user_id == user.id)
.offset(skip)
.limit(limit)
)
return [
VideoPresentationRead.from_orm_with_slides(vp)
for vp in result.scalars().all()
]
except HTTPException:
raise
except SQLAlchemyError:
raise HTTPException(
status_code=500,
detail="Database error occurred while fetching video presentations",
) from None
@router.get(
"/video-presentations/{video_presentation_id}",
response_model=VideoPresentationRead,
)
async def read_video_presentation(
video_presentation_id: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Get a specific video presentation by ID.
Requires authentication with VIDEO_PRESENTATIONS_READ permission.
When status is "ready", the response includes:
- slides: parsed slide data with per-slide audio_url and durations
- scene_codes: Remotion component source code per slide
"""
try:
result = await session.execute(
select(VideoPresentation).filter(
VideoPresentation.id == video_presentation_id
)
)
video_pres = result.scalars().first()
if not video_pres:
raise HTTPException(status_code=404, detail="Video presentation not found")
await check_permission(
session,
user,
video_pres.search_space_id,
Permission.VIDEO_PRESENTATIONS_READ.value,
"You don't have permission to read video presentations in this search space",
)
return VideoPresentationRead.from_orm_with_slides(video_pres)
except HTTPException as he:
raise he
except SQLAlchemyError:
raise HTTPException(
status_code=500,
detail="Database error occurred while fetching video presentation",
) from None
@router.delete("/video-presentations/{video_presentation_id}", response_model=dict)
async def delete_video_presentation(
video_presentation_id: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Delete a video presentation.
Requires VIDEO_PRESENTATIONS_DELETE permission for the search space.
"""
try:
result = await session.execute(
select(VideoPresentation).filter(
VideoPresentation.id == video_presentation_id
)
)
db_video_pres = result.scalars().first()
if not db_video_pres:
raise HTTPException(status_code=404, detail="Video presentation not found")
await check_permission(
session,
user,
db_video_pres.search_space_id,
Permission.VIDEO_PRESENTATIONS_DELETE.value,
"You don't have permission to delete video presentations in this search space",
)
await session.delete(db_video_pres)
await session.commit()
return {"message": "Video presentation deleted successfully"}
except HTTPException as he:
raise he
except SQLAlchemyError:
await session.rollback()
raise HTTPException(
status_code=500,
detail="Database error occurred while deleting video presentation",
) from None
@router.get("/video-presentations/{video_presentation_id}/slides/{slide_number}/audio")
async def stream_slide_audio(
video_presentation_id: int,
slide_number: int,
session: AsyncSession = Depends(get_async_session),
user: User = Depends(current_active_user),
):
"""
Stream the audio file for a specific slide in a video presentation.
The slide_number is 1-based. Audio path is read from the slides JSONB.
"""
try:
result = await session.execute(
select(VideoPresentation).filter(
VideoPresentation.id == video_presentation_id
)
)
video_pres = result.scalars().first()
if not video_pres:
raise HTTPException(status_code=404, detail="Video presentation not found")
await check_permission(
session,
user,
video_pres.search_space_id,
Permission.VIDEO_PRESENTATIONS_READ.value,
"You don't have permission to access video presentations in this search space",
)
slides = video_pres.slides or []
slide_data = None
for s in slides:
if s.get("slide_number") == slide_number:
slide_data = s
break
if not slide_data:
raise HTTPException(
status_code=404,
detail=f"Slide {slide_number} not found",
)
file_path = slide_data.get("audio_file")
if not file_path or not os.path.isfile(file_path):
raise HTTPException(status_code=404, detail="Slide audio file not found")
ext = Path(file_path).suffix.lower()
media_type = "audio/wav" if ext == ".wav" else "audio/mpeg"
def iterfile():
with open(file_path, mode="rb") as file_like:
yield from file_like
return StreamingResponse(
iterfile(),
media_type=media_type,
headers={
"Accept-Ranges": "bytes",
"Content-Disposition": f"inline; filename={Path(file_path).name}",
},
)
except HTTPException as he:
raise he
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Error streaming slide audio: {e!s}",
) from e

View file

@ -101,6 +101,12 @@ from .search_space import (
SearchSpaceWithStats, SearchSpaceWithStats,
) )
from .users import UserCreate, UserRead, UserUpdate from .users import UserCreate, UserRead, UserUpdate
from .video_presentations import (
VideoPresentationBase,
VideoPresentationCreate,
VideoPresentationRead,
VideoPresentationUpdate,
)
__all__ = [ __all__ = [
# Chat schemas (assistant-ui integration) # Chat schemas (assistant-ui integration)
@ -220,4 +226,9 @@ __all__ = [
"UserRead", "UserRead",
"UserSearchSpaceAccess", "UserSearchSpaceAccess",
"UserUpdate", "UserUpdate",
# Video Presentation schemas
"VideoPresentationBase",
"VideoPresentationCreate",
"VideoPresentationRead",
"VideoPresentationUpdate",
] ]

View file

@ -0,0 +1,103 @@
"""Video presentation schemas for API responses."""
from datetime import datetime
from enum import StrEnum
from typing import Any
from pydantic import BaseModel
class VideoPresentationStatusEnum(StrEnum):
PENDING = "pending"
GENERATING = "generating"
READY = "ready"
FAILED = "failed"
class VideoPresentationBase(BaseModel):
"""Base video presentation schema."""
title: str
slides: list[dict[str, Any]] | None = None
scene_codes: list[dict[str, Any]] | None = None
search_space_id: int
class VideoPresentationCreate(VideoPresentationBase):
"""Schema for creating a video presentation."""
pass
class VideoPresentationUpdate(BaseModel):
"""Schema for updating a video presentation."""
title: str | None = None
slides: list[dict[str, Any]] | None = None
scene_codes: list[dict[str, Any]] | None = None
class VideoPresentationRead(VideoPresentationBase):
"""Schema for reading a video presentation."""
id: int
status: VideoPresentationStatusEnum = VideoPresentationStatusEnum.READY
created_at: datetime
slide_count: int | None = None
class Config:
from_attributes = True
@classmethod
def from_orm_with_slides(cls, obj):
"""Create VideoPresentationRead with slide_count computed.
Replaces raw server file paths in `audio_file` with API streaming
URLs so the frontend can use them directly in Remotion <Audio />.
"""
slides = obj.slides
if slides:
slides = _replace_audio_paths_with_urls(obj.id, slides)
data = {
"id": obj.id,
"title": obj.title,
"slides": slides,
"scene_codes": obj.scene_codes,
"search_space_id": obj.search_space_id,
"status": obj.status,
"created_at": obj.created_at,
"slide_count": len(obj.slides) if obj.slides else None,
}
return cls(**data)
def _replace_audio_paths_with_urls(
video_presentation_id: int,
slides: list[dict[str, Any]],
) -> list[dict[str, Any]]:
"""Replace server-local audio_file paths with streaming API URLs.
Transforms:
"audio_file": "video_presentation_audio/abc_slide_1.mp3"
Into:
"audio_url": "/api/v1/video-presentations/42/slides/1/audio"
The frontend passes this URL to Remotion's <Audio src={slide.audio_url} />.
"""
result = []
for slide in slides:
slide_copy = dict(slide)
slide_number = slide_copy.get("slide_number")
audio_file = slide_copy.pop("audio_file", None)
if audio_file and slide_number is not None:
slide_copy["audio_url"] = (
f"/api/v1/video-presentations/{video_presentation_id}"
f"/slides/{slide_number}/audio"
)
else:
slide_copy["audio_url"] = None
result.append(slide_copy)
return result

View file

@ -0,0 +1,178 @@
"""Celery tasks for video presentation generation."""
import asyncio
import logging
import sys
from sqlalchemy import select
from app.agents.video_presentation.graph import graph as video_presentation_graph
from app.agents.video_presentation.state import State as VideoPresentationState
from app.celery_app import celery_app
from app.config import config
from app.db import VideoPresentation, VideoPresentationStatus
from app.tasks.celery_tasks import get_celery_session_maker
logger = logging.getLogger(__name__)
if sys.platform.startswith("win"):
try:
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
except AttributeError:
logger.warning(
"WindowsProactorEventLoopPolicy is unavailable; async subprocess support may fail."
)
def _clear_generating_video_presentation(search_space_id: int) -> None:
"""Clear the generating video presentation marker from Redis when task completes."""
import redis
try:
client = redis.from_url(config.REDIS_APP_URL, decode_responses=True)
key = f"video_presentation:generating:{search_space_id}"
client.delete(key)
logger.info(
f"Cleared generating video presentation key for search_space_id={search_space_id}"
)
except Exception as e:
logger.warning(f"Could not clear generating video presentation key: {e}")
@celery_app.task(name="generate_video_presentation", bind=True)
def generate_video_presentation_task(
self,
video_presentation_id: int,
source_content: str,
search_space_id: int,
user_prompt: str | None = None,
) -> dict:
"""
Celery task to generate video presentation from source content.
Updates existing video presentation record created by the tool.
"""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
result = loop.run_until_complete(
_generate_video_presentation(
video_presentation_id,
source_content,
search_space_id,
user_prompt,
)
)
loop.run_until_complete(loop.shutdown_asyncgens())
return result
except Exception as e:
logger.error(f"Error generating video presentation: {e!s}")
loop.run_until_complete(_mark_video_presentation_failed(video_presentation_id))
return {"status": "failed", "video_presentation_id": video_presentation_id}
finally:
_clear_generating_video_presentation(search_space_id)
asyncio.set_event_loop(None)
loop.close()
async def _mark_video_presentation_failed(video_presentation_id: int) -> None:
"""Mark a video presentation as failed in the database."""
async with get_celery_session_maker()() as session:
try:
result = await session.execute(
select(VideoPresentation).filter(
VideoPresentation.id == video_presentation_id
)
)
video_pres = result.scalars().first()
if video_pres:
video_pres.status = VideoPresentationStatus.FAILED
await session.commit()
except Exception as e:
logger.error(f"Failed to mark video presentation as failed: {e}")
async def _generate_video_presentation(
video_presentation_id: int,
source_content: str,
search_space_id: int,
user_prompt: str | None = None,
) -> dict:
"""Generate video presentation and update existing record."""
async with get_celery_session_maker()() as session:
result = await session.execute(
select(VideoPresentation).filter(
VideoPresentation.id == video_presentation_id
)
)
video_pres = result.scalars().first()
if not video_pres:
raise ValueError(f"VideoPresentation {video_presentation_id} not found")
try:
video_pres.status = VideoPresentationStatus.GENERATING
await session.commit()
graph_config = {
"configurable": {
"video_title": video_pres.title,
"search_space_id": search_space_id,
"user_prompt": user_prompt,
}
}
initial_state = VideoPresentationState(
source_content=source_content,
db_session=session,
)
graph_result = await video_presentation_graph.ainvoke(
initial_state, config=graph_config
)
# Serialize slides (parsed content + audio info merged)
slides_raw = graph_result.get("slides", [])
audio_results_raw = graph_result.get("slide_audio_results", [])
scene_codes_raw = graph_result.get("slide_scene_codes", [])
audio_map = {}
for ar in audio_results_raw:
data = ar.model_dump() if hasattr(ar, "model_dump") else ar
audio_map[data.get("slide_number", 0)] = data
serializable_slides = []
for slide in slides_raw:
slide_data = (
slide.model_dump() if hasattr(slide, "model_dump") else dict(slide)
)
audio_data = audio_map.get(slide_data.get("slide_number", 0), {})
slide_data["audio_file"] = audio_data.get("audio_file")
slide_data["duration_seconds"] = audio_data.get("duration_seconds")
slide_data["duration_in_frames"] = audio_data.get("duration_in_frames")
serializable_slides.append(slide_data)
serializable_scene_codes = []
for sc in scene_codes_raw:
sc_data = sc.model_dump() if hasattr(sc, "model_dump") else dict(sc)
serializable_scene_codes.append(sc_data)
video_pres.slides = serializable_slides
video_pres.scene_codes = serializable_scene_codes
video_pres.status = VideoPresentationStatus.READY
await session.commit()
logger.info(f"Successfully generated video presentation: {video_pres.id}")
return {
"status": "ready",
"video_presentation_id": video_pres.id,
"title": video_pres.title,
"slide_count": len(serializable_slides),
}
except Exception as e:
logger.error(f"Error in _generate_video_presentation: {e!s}")
video_pres.status = VideoPresentationStatus.FAILED
await session.commit()
raise

View file

@ -613,6 +613,41 @@ async def _stream_agent_events(
status="completed", status="completed",
items=completed_items, items=completed_items,
) )
elif tool_name == "generate_video_presentation":
vp_status = (
tool_output.get("status", "unknown")
if isinstance(tool_output, dict)
else "unknown"
)
vp_title = (
tool_output.get("title", "Presentation")
if isinstance(tool_output, dict)
else "Presentation"
)
if vp_status in ("pending", "generating"):
completed_items = [
f"Title: {vp_title}",
"Presentation generation started",
"Processing in background...",
]
elif vp_status == "failed":
error_msg = (
tool_output.get("error", "Unknown error")
if isinstance(tool_output, dict)
else "Unknown error"
)
completed_items = [
f"Title: {vp_title}",
f"Error: {error_msg[:50]}",
]
else:
completed_items = last_active_step_items
yield streaming_service.format_thinking_step(
step_id=original_step_id,
title="Generating video presentation",
status="completed",
items=completed_items,
)
elif tool_name == "generate_report": elif tool_name == "generate_report":
report_status = ( report_status = (
tool_output.get("status", "unknown") tool_output.get("status", "unknown")
@ -756,6 +791,34 @@ async def _stream_agent_events(
f"Podcast generation failed: {error_msg}", f"Podcast generation failed: {error_msg}",
"error", "error",
) )
elif tool_name == "generate_video_presentation":
yield streaming_service.format_tool_output_available(
tool_call_id,
tool_output
if isinstance(tool_output, dict)
else {"result": tool_output},
)
if (
isinstance(tool_output, dict)
and tool_output.get("status") == "pending"
):
yield streaming_service.format_terminal_info(
f"Video presentation queued: {tool_output.get('title', 'Presentation')}",
"success",
)
elif (
isinstance(tool_output, dict)
and tool_output.get("status") == "failed"
):
error_msg = (
tool_output.get("error", "Unknown error")
if isinstance(tool_output, dict)
else "Unknown error"
)
yield streaming_service.format_terminal_info(
f"Presentation generation failed: {error_msg}",
"error",
)
elif tool_name == "link_preview": elif tool_name == "link_preview":
yield streaming_service.format_tool_output_available( yield streaming_service.format_tool_output_available(
tool_call_id, tool_call_id,

View file

@ -472,21 +472,21 @@ export function DocumentsTableShell({
setBulkDeleteConfirmOpen(false); setBulkDeleteConfirmOpen(false);
}, [deletableSelectedIds, bulkDeleteDocuments, deleteDocument]); }, [deletableSelectedIds, bulkDeleteDocuments, deleteDocument]);
const bulkDeleteBar = hasDeletableSelection ? (
<div className="flex items-center justify-center py-1.5 border-b border-border/50 bg-destructive/5 shrink-0 animate-in fade-in slide-in-from-top-1 duration-150">
<button
type="button"
onClick={() => setBulkDeleteConfirmOpen(true)}
className="flex items-center gap-1.5 px-3 py-1 rounded-md bg-destructive text-destructive-foreground shadow-sm text-xs font-medium hover:bg-destructive/90 transition-colors"
>
<Trash2 size={12} />
Delete ({deletableSelectedIds.length} selected)
</button>
</div>
) : null;
return ( return (
<div className="bg-sidebar overflow-hidden select-none border-t border-border/50 flex-1 flex flex-col min-h-0 relative"> <div className="bg-sidebar overflow-hidden select-none border-t border-border/50 flex-1 flex flex-col min-h-0">
{/* Floating bulk delete pill */}
{hasDeletableSelection && (
<div className="absolute left-1/2 -translate-x-1/2 top-2 md:top-9 z-20 animate-in fade-in slide-in-from-top-1 duration-150">
<button
type="button"
onClick={() => setBulkDeleteConfirmOpen(true)}
className="flex items-center gap-1.5 px-3 py-1.5 rounded-md bg-destructive text-destructive-foreground shadow-md text-xs font-medium"
>
<Trash2 size={12} />
Delete ({deletableSelectedIds.length} selected)
</button>
</div>
)}
{/* Desktop Table View */} {/* Desktop Table View */}
<div className="hidden md:flex md:flex-col flex-1 min-h-0"> <div className="hidden md:flex md:flex-col flex-1 min-h-0">
<Table className="table-fixed w-full"> <Table className="table-fixed w-full">
@ -526,6 +526,7 @@ export function DocumentsTableShell({
</TableRow> </TableRow>
</TableHeader> </TableHeader>
</Table> </Table>
{bulkDeleteBar}
{loading ? ( {loading ? (
<div className="flex-1 overflow-auto"> <div className="flex-1 overflow-auto">
<Table className="table-fixed w-full"> <Table className="table-fixed w-full">
@ -777,6 +778,9 @@ export function DocumentsTableShell({
)} )}
</div> </div>
{/* Mobile bulk delete bar */}
<div className="md:hidden">{bulkDeleteBar}</div>
{/* Mobile Card View */} {/* Mobile Card View */}
{loading ? ( {loading ? (
<div className="md:hidden divide-y divide-border/50 flex-1 overflow-auto"> <div className="md:hidden divide-y divide-border/50 flex-1 overflow-auto">

View file

@ -40,6 +40,7 @@ import type { ThinkingStep } from "@/components/tool-ui/deepagent-thinking";
import { DisplayImageToolUI } from "@/components/tool-ui/display-image"; import { DisplayImageToolUI } from "@/components/tool-ui/display-image";
import { GeneratePodcastToolUI } from "@/components/tool-ui/generate-podcast"; import { GeneratePodcastToolUI } from "@/components/tool-ui/generate-podcast";
import { GenerateReportToolUI } from "@/components/tool-ui/generate-report"; import { GenerateReportToolUI } from "@/components/tool-ui/generate-report";
import { GenerateVideoPresentationToolUI } from "@/components/tool-ui/video-presentation";
import { import {
CreateGoogleDriveFileToolUI, CreateGoogleDriveFileToolUI,
DeleteGoogleDriveFileToolUI, DeleteGoogleDriveFileToolUI,
@ -148,6 +149,7 @@ function extractMentionedDocuments(content: unknown): MentionedDocumentInfo[] {
const TOOLS_WITH_UI = new Set([ const TOOLS_WITH_UI = new Set([
"generate_podcast", "generate_podcast",
"generate_report", "generate_report",
"generate_video_presentation",
"link_preview", "link_preview",
"display_image", "display_image",
"delete_notion_page", "delete_notion_page",
@ -1662,6 +1664,7 @@ export default function NewChatPage() {
<AssistantRuntimeProvider runtime={runtime}> <AssistantRuntimeProvider runtime={runtime}>
<GeneratePodcastToolUI /> <GeneratePodcastToolUI />
<GenerateReportToolUI /> <GenerateReportToolUI />
<GenerateVideoPresentationToolUI />
<LinkPreviewToolUI /> <LinkPreviewToolUI />
<DisplayImageToolUI /> <DisplayImageToolUI />
<ScrapeWebpageToolUI /> <ScrapeWebpageToolUI />

View file

@ -6,6 +6,7 @@ import { ReportPanel } from "@/components/report-panel/report-panel";
import { DisplayImageToolUI } from "@/components/tool-ui/display-image"; import { DisplayImageToolUI } from "@/components/tool-ui/display-image";
import { GeneratePodcastToolUI } from "@/components/tool-ui/generate-podcast"; import { GeneratePodcastToolUI } from "@/components/tool-ui/generate-podcast";
import { GenerateReportToolUI } from "@/components/tool-ui/generate-report"; import { GenerateReportToolUI } from "@/components/tool-ui/generate-report";
import { GenerateVideoPresentationToolUI } from "@/components/tool-ui/video-presentation";
import { LinkPreviewToolUI } from "@/components/tool-ui/link-preview"; import { LinkPreviewToolUI } from "@/components/tool-ui/link-preview";
import { ScrapeWebpageToolUI } from "@/components/tool-ui/scrape-webpage"; import { ScrapeWebpageToolUI } from "@/components/tool-ui/scrape-webpage";
import { Spinner } from "@/components/ui/spinner"; import { Spinner } from "@/components/ui/spinner";
@ -45,6 +46,7 @@ export function PublicChatView({ shareToken }: PublicChatViewProps) {
{/* Tool UIs for rendering tool results */} {/* Tool UIs for rendering tool results */}
<GeneratePodcastToolUI /> <GeneratePodcastToolUI />
<GenerateReportToolUI /> <GenerateReportToolUI />
<GenerateVideoPresentationToolUI />
<LinkPreviewToolUI /> <LinkPreviewToolUI />
<DisplayImageToolUI /> <DisplayImageToolUI />
<ScrapeWebpageToolUI /> <ScrapeWebpageToolUI />

View file

@ -32,6 +32,7 @@ export {
} from "./display-image"; } from "./display-image";
export { GeneratePodcastToolUI } from "./generate-podcast"; export { GeneratePodcastToolUI } from "./generate-podcast";
export { GenerateReportToolUI } from "./generate-report"; export { GenerateReportToolUI } from "./generate-report";
export { GenerateVideoPresentationToolUI } from "./video-presentation";
export { CreateGoogleDriveFileToolUI, DeleteGoogleDriveFileToolUI } from "./google-drive"; export { CreateGoogleDriveFileToolUI, DeleteGoogleDriveFileToolUI } from "./google-drive";
export { export {
Image, Image,

View file

@ -0,0 +1,74 @@
"use client";
import React, { useMemo } from "react";
import { Player } from "@remotion/player";
import { Sequence, AbsoluteFill } from "remotion";
import { Audio } from "@remotion/media";
import { FPS } from "@/lib/remotion/constants";
export interface CompiledSlide {
component: React.ComponentType;
title: string;
code: string;
durationInFrames: number;
audioUrl?: string;
}
function CombinedComposition({ scenes }: { scenes: CompiledSlide[] }) {
let offset = 0;
return (
<AbsoluteFill>
{scenes.map((scene, i) => {
const from = offset;
offset += scene.durationInFrames;
return (
<Sequence key={i} from={from} durationInFrames={scene.durationInFrames}>
<scene.component />
{scene.audioUrl && <Audio src={scene.audioUrl} />}
</Sequence>
);
})}
</AbsoluteFill>
);
}
export function buildCompositionComponent(slides: CompiledSlide[]): React.FC {
const scenesSnapshot = [...slides];
const Comp: React.FC = () => <CombinedComposition scenes={scenesSnapshot} />;
return Comp;
}
interface CombinedPlayerProps {
slides: CompiledSlide[];
}
export function CombinedPlayer({ slides }: CombinedPlayerProps) {
const CompositionWithScenes = useMemo(() => {
const scenesSnapshot = [...slides];
const Comp: React.FC = () => <CombinedComposition scenes={scenesSnapshot} />;
return Comp;
}, [slides]);
const totalFrames = useMemo(
() => slides.reduce((sum, s) => sum + s.durationInFrames, 0),
[slides],
);
return (
<div className="overflow-hidden rounded-xl border shadow-2xl shadow-purple-500/5">
<Player
component={CompositionWithScenes}
durationInFrames={totalFrames}
fps={FPS}
compositionWidth={1920}
compositionHeight={1080}
style={{ width: "100%", aspectRatio: "16/9" }}
controls
autoPlay
loop
acknowledgeRemotionLicense
/>
</div>
);
}

View file

@ -0,0 +1,682 @@
"use client";
import React, { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { makeAssistantToolUI } from "@assistant-ui/react";
import {
AlertCircleIcon,
Download,
Film,
Loader2,
Presentation,
X,
} from "lucide-react";
import { z } from "zod";
import { Spinner } from "@/components/ui/spinner";
import { baseApiService } from "@/lib/apis/base-api.service";
import { authenticatedFetch } from "@/lib/auth-utils";
import { compileCheck, compileToComponent } from "@/lib/remotion/compile-check";
import { FPS } from "@/lib/remotion/constants";
import {
CombinedPlayer,
buildCompositionComponent,
type CompiledSlide,
} from "./combined-player";
const GenerateVideoPresentationArgsSchema = z.object({
source_content: z.string(),
video_title: z.string().nullish(),
user_prompt: z.string().nullish(),
});
const GenerateVideoPresentationResultSchema = z.object({
status: z.enum(["pending", "generating", "ready", "failed"]),
video_presentation_id: z.number().nullish(),
title: z.string().nullish(),
message: z.string().nullish(),
error: z.string().nullish(),
});
const VideoPresentationStatusResponseSchema = z.object({
status: z.enum(["pending", "generating", "ready", "failed"]),
id: z.number(),
title: z.string(),
slides: z
.array(
z.object({
slide_number: z.number(),
title: z.string(),
subtitle: z.string().nullish(),
content_in_markdown: z.string().nullish(),
speaker_transcripts: z.array(z.string()).nullish(),
background_explanation: z.string().nullish(),
audio_url: z.string().nullish(),
duration_seconds: z.number().nullish(),
duration_in_frames: z.number().nullish(),
}),
)
.nullish(),
scene_codes: z
.array(
z.object({
slide_number: z.number(),
code: z.string(),
title: z.string().nullish(),
}),
)
.nullish(),
slide_count: z.number().nullish(),
});
type GenerateVideoPresentationArgs = z.infer<typeof GenerateVideoPresentationArgsSchema>;
type GenerateVideoPresentationResult = z.infer<typeof GenerateVideoPresentationResultSchema>;
type VideoPresentationStatusResponse = z.infer<typeof VideoPresentationStatusResponseSchema>;
function parseStatusResponse(data: unknown): VideoPresentationStatusResponse | null {
const result = VideoPresentationStatusResponseSchema.safeParse(data);
if (!result.success) {
console.warn("Invalid video presentation status:", result.error.issues);
return null;
}
return result.data;
}
function GeneratingState({ title }: { title: string }) {
return (
<div className="my-4 overflow-hidden rounded-xl border border-primary/20 bg-linear-to-br from-primary/5 to-primary/10 p-4 sm:p-6">
<div className="flex items-center gap-3 sm:gap-4">
<div className="relative shrink-0">
<div className="flex size-12 sm:size-16 items-center justify-center rounded-full bg-primary/20">
<Film className="size-6 sm:size-8 text-primary" />
</div>
<div className="absolute inset-1 animate-ping rounded-full bg-primary/20" />
</div>
<div className="flex-1 min-w-0">
<h3 className="font-semibold text-foreground text-sm sm:text-lg leading-tight">
{title}
</h3>
<div className="mt-1.5 sm:mt-2 flex items-center gap-1.5 sm:gap-2 text-muted-foreground">
<Spinner size="sm" className="size-3 sm:size-4" />
<span className="text-xs sm:text-sm">
Generating video presentation. This may take a few minutes.
</span>
</div>
<div className="mt-2 sm:mt-3">
<div className="h-1 sm:h-1.5 w-full overflow-hidden rounded-full bg-primary/10">
<div className="h-full w-1/3 animate-pulse rounded-full bg-primary" />
</div>
</div>
</div>
</div>
</div>
);
}
function ErrorState({ title, error }: { title: string; error: string }) {
return (
<div className="my-4 overflow-hidden rounded-xl border border-destructive/20 bg-destructive/5 p-4 sm:p-6">
<div className="flex items-center gap-3 sm:gap-4">
<div className="flex size-12 sm:size-16 shrink-0 items-center justify-center rounded-full bg-destructive/10">
<AlertCircleIcon className="size-6 sm:size-8 text-destructive" />
</div>
<div className="flex-1 min-w-0">
<h3 className="font-semibold text-foreground text-sm sm:text-base leading-tight">
{title}
</h3>
<p className="mt-1 text-destructive text-xs sm:text-sm">
Failed to generate video presentation
</p>
<p className="mt-1.5 sm:mt-2 text-muted-foreground text-xs sm:text-sm">{error}</p>
</div>
</div>
</div>
);
}
function CompilationLoadingState({ title }: { title: string }) {
return (
<div className="my-4 overflow-hidden rounded-xl border bg-muted/30 p-4 sm:p-6">
<div className="flex items-center gap-3 sm:gap-4">
<div className="flex size-12 sm:size-16 shrink-0 items-center justify-center rounded-full bg-primary/10">
<Film className="size-6 sm:size-8 text-primary/50" />
</div>
<div className="flex-1 min-w-0">
<h3 className="font-semibold text-foreground text-sm sm:text-base leading-tight">
{title}
</h3>
<div className="mt-1.5 sm:mt-2 flex items-center gap-1.5 sm:gap-2 text-muted-foreground">
<Spinner size="sm" className="size-3 sm:size-4" />
<span className="text-xs sm:text-sm">Compiling scenes...</span>
</div>
</div>
</div>
</div>
);
}
function VideoPresentationPlayer({
presentationId,
title,
}: {
presentationId: number;
title: string;
}) {
const [isLoading, setIsLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
const [compiledSlides, setCompiledSlides] = useState<CompiledSlide[]>([]);
const [isRendering, setIsRendering] = useState(false);
const [renderProgress, setRenderProgress] = useState<number | null>(null);
const [renderError, setRenderError] = useState<string | null>(null);
const [renderFormat, setRenderFormat] = useState<string | null>(null);
const abortControllerRef = useRef<AbortController | null>(null);
const [isPptxExporting, setIsPptxExporting] = useState(false);
const [pptxProgress, setPptxProgress] = useState<string | null>(null);
const backendUrl = process.env.NEXT_PUBLIC_FASTAPI_BACKEND_URL ?? "";
const audioBlobUrlsRef = useRef<string[]>([]);
const loadPresentation = useCallback(async () => {
setIsLoading(true);
setError(null);
try {
const raw = await baseApiService.get<unknown>(
`/api/v1/video-presentations/${presentationId}`,
);
const data = parseStatusResponse(raw);
if (!data) throw new Error("Invalid response");
if (data.status !== "ready") throw new Error(`Unexpected status: ${data.status}`);
if (!data.slides?.length || !data.scene_codes?.length) {
throw new Error("No slides or scene codes in response");
}
const sceneMap = new Map(data.scene_codes.map((sc) => [sc.slide_number, sc]));
const compiled: CompiledSlide[] = [];
for (const slide of data.slides) {
const scene = sceneMap.get(slide.slide_number);
if (!scene) continue;
const durationInFrames = slide.duration_in_frames ?? 300;
const check = compileCheck(scene.code);
if (!check.success) {
console.warn(
`Slide ${slide.slide_number} failed to compile: ${check.error}`,
);
continue;
}
const component = compileToComponent(scene.code, durationInFrames);
compiled.push({
component,
title: scene.title ?? slide.title,
code: scene.code,
durationInFrames,
audioUrl: slide.audio_url
? `${backendUrl}${slide.audio_url}`
: undefined,
});
}
if (compiled.length === 0) {
throw new Error("No slides compiled successfully");
}
// Pre-fetch all audio files with auth headers and convert to blob URLs.
// Remotion's <Audio> uses a plain <audio> element which can't send auth
// headers, so we fetch the audio ourselves and hand it a blob: URL.
const withBlobs = await Promise.all(
compiled.map(async (slide) => {
if (!slide.audioUrl) return slide;
try {
const resp = await authenticatedFetch(slide.audioUrl, {
method: "GET",
});
if (!resp.ok) {
console.warn(
`Audio fetch ${resp.status} for slide "${slide.title}"`,
);
return { ...slide, audioUrl: undefined };
}
const blob = await resp.blob();
const blobUrl = URL.createObjectURL(blob);
audioBlobUrlsRef.current.push(blobUrl);
return { ...slide, audioUrl: blobUrl };
} catch (err) {
console.warn(`Failed to fetch audio for "${slide.title}":`, err);
return { ...slide, audioUrl: undefined };
}
}),
);
setCompiledSlides(withBlobs);
} catch (err) {
console.error("Error loading video presentation:", err);
setError(err instanceof Error ? err.message : "Failed to load presentation");
} finally {
setIsLoading(false);
}
}, [presentationId, backendUrl]);
useEffect(() => {
loadPresentation();
return () => {
for (const url of audioBlobUrlsRef.current) {
URL.revokeObjectURL(url);
}
audioBlobUrlsRef.current = [];
};
}, [loadPresentation]);
const totalDuration = useMemo(
() => compiledSlides.reduce((sum, s) => sum + s.durationInFrames / FPS, 0),
[compiledSlides],
);
const handleDownload = async () => {
if (isRendering || compiledSlides.length === 0) return;
setIsRendering(true);
setRenderProgress(0);
setRenderError(null);
setRenderFormat(null);
const controller = new AbortController();
abortControllerRef.current = controller;
try {
const { canRenderMediaOnWeb, renderMediaOnWeb } = await import(
"@remotion/web-renderer"
);
const formats = [
{ container: "mp4" as const, videoCodec: "h264" as const, ext: "mp4" },
{ container: "mp4" as const, videoCodec: "h265" as const, ext: "mp4" },
{ container: "webm" as const, videoCodec: "vp8" as const, ext: "webm" },
{ container: "webm" as const, videoCodec: "vp9" as const, ext: "webm" },
];
let chosen: (typeof formats)[number] | null = null;
for (const fmt of formats) {
const { canRender } = await canRenderMediaOnWeb({
width: 1920,
height: 1080,
container: fmt.container,
videoCodec: fmt.videoCodec,
});
if (canRender) {
chosen = fmt;
break;
}
}
if (!chosen) {
throw new Error(
"Your browser does not support video rendering (WebCodecs). Please use Chrome, Edge, or Firefox 130+.",
);
}
setRenderFormat(chosen.ext.toUpperCase());
const totalFrames = compiledSlides.reduce((sum, s) => sum + s.durationInFrames, 0);
const CompositionComponent = buildCompositionComponent(compiledSlides);
const { getBlob } = await renderMediaOnWeb({
composition: {
component: CompositionComponent,
durationInFrames: totalFrames,
fps: FPS,
width: 1920,
height: 1080,
id: "combined-video",
},
container: chosen.container,
videoCodec: chosen.videoCodec,
videoBitrate: "high",
onProgress: ({ progress }) => {
setRenderProgress(progress);
},
signal: controller.signal,
});
const blob = await getBlob();
const url = URL.createObjectURL(blob);
const a = document.createElement("a");
a.href = url;
a.download = `video.${chosen.ext}`;
document.body.appendChild(a);
a.click();
document.body.removeChild(a);
URL.revokeObjectURL(url);
} catch (err) {
if ((err as Error).name === "AbortError") {
// User cancelled
} else {
setRenderError(err instanceof Error ? err.message : "Failed to render video");
}
} finally {
setIsRendering(false);
setRenderProgress(null);
abortControllerRef.current = null;
}
};
const handleCancelRender = () => {
abortControllerRef.current?.abort();
};
const handleDownloadPPTX = async () => {
if (isPptxExporting || compiledSlides.length === 0) return;
setIsPptxExporting(true);
setPptxProgress("Preparing...");
setRenderError(null);
try {
const { exportToPptx } = await import("dom-to-pptx");
const { Thumbnail } = await import("@remotion/player");
const { createRoot } = await import("react-dom/client");
const { flushSync } = await import("react-dom");
const offscreen = document.createElement("div");
offscreen.style.cssText =
"position:fixed;left:-99999px;top:0;overflow:hidden;pointer-events:none;";
document.body.appendChild(offscreen);
const slideElements: HTMLElement[] = [];
const roots: ReturnType<typeof createRoot>[] = [];
for (let i = 0; i < compiledSlides.length; i++) {
const slide = compiledSlides[i];
setPptxProgress(`Rendering slide ${i + 1}/${compiledSlides.length}...`);
const wrapper = document.createElement("div");
wrapper.style.cssText = "width:1920px;height:1080px;overflow:hidden;";
offscreen.appendChild(wrapper);
const holdFrame = Math.floor(slide.durationInFrames * 0.3);
const root = createRoot(wrapper);
flushSync(() => {
root.render(
React.createElement(Thumbnail, {
component: slide.component,
compositionWidth: 1920,
compositionHeight: 1080,
frameToDisplay: holdFrame,
durationInFrames: slide.durationInFrames,
fps: FPS,
style: { width: 1920, height: 1080 },
}),
);
});
await new Promise((r) => setTimeout(r, 500));
slideElements.push(wrapper);
roots.push(root);
}
setPptxProgress("Converting to editable PPTX...");
await exportToPptx(slideElements, {
fileName: "presentation.pptx",
});
roots.forEach((r) => r.unmount());
document.body.removeChild(offscreen);
} catch (err) {
setRenderError(err instanceof Error ? err.message : "Failed to export PPTX");
} finally {
setIsPptxExporting(false);
setPptxProgress(null);
}
};
if (isLoading) {
return <CompilationLoadingState title={title} />;
}
if (error || compiledSlides.length === 0) {
return <ErrorState title={title} error={error || "Failed to compile scenes"} />;
}
return (
<div className="my-4 space-y-3">
{/* Title bar with actions */}
<div className="flex items-center justify-between flex-wrap gap-2">
<div className="flex items-center gap-3 min-w-0">
<div className="flex size-8 shrink-0 items-center justify-center rounded-lg bg-primary/10">
<Film className="size-4 text-primary" />
</div>
<div className="min-w-0">
<h3 className="text-sm font-semibold text-foreground truncate">{title}</h3>
<p className="text-xs text-muted-foreground">
{compiledSlides.length} slides &middot; {totalDuration.toFixed(1)}s &middot;{" "}
{FPS}fps
</p>
</div>
</div>
<div className="flex items-center gap-2">
{isRendering ? (
<>
<div className="flex items-center gap-2 rounded-lg border bg-card px-3 py-1.5">
<Loader2 className="size-3.5 animate-spin text-primary" />
<span className="text-xs font-medium">
Rendering {renderFormat ?? ""}{" "}
{renderProgress !== null
? `${Math.round(renderProgress * 100)}%`
: "..."}
</span>
<div className="h-1.5 w-20 overflow-hidden rounded-full bg-secondary">
<div
className="h-full rounded-full bg-primary transition-all duration-300"
style={{ width: `${(renderProgress ?? 0) * 100}%` }}
/>
</div>
</div>
<button
onClick={handleCancelRender}
className="rounded-lg border p-1.5 text-muted-foreground transition-colors hover:bg-secondary hover:text-foreground"
title="Cancel render"
type="button"
>
<X className="size-3.5" />
</button>
</>
) : (
<>
<button
onClick={handleDownload}
className="inline-flex items-center gap-1.5 rounded-lg border bg-card px-3 py-1.5 text-xs font-medium text-foreground transition-colors hover:bg-secondary"
type="button"
>
<Download className="size-3.5" />
Download MP4
</button>
<button
onClick={handleDownloadPPTX}
disabled={isPptxExporting}
className="inline-flex items-center gap-1.5 rounded-lg border bg-card px-3 py-1.5 text-xs font-medium text-foreground transition-colors hover:bg-secondary disabled:opacity-50 disabled:cursor-not-allowed"
type="button"
>
{isPptxExporting ? (
<>
<Loader2 className="size-3.5 animate-spin" />
{pptxProgress ?? "Exporting..."}
</>
) : (
<>
<Presentation className="size-3.5" />
Download PPTX
</>
)}
</button>
</>
)}
</div>
</div>
{/* Render error */}
{renderError && (
<div className="flex items-start gap-3 rounded-xl border border-destructive/20 bg-destructive/5 p-3">
<AlertCircleIcon className="mt-0.5 size-4 shrink-0 text-destructive" />
<div>
<p className="text-sm font-medium text-destructive">Download Failed</p>
<p className="mt-1 text-xs text-destructive/70 whitespace-pre-wrap">
{renderError}
</p>
</div>
</div>
)}
{/* Combined Remotion Player */}
<CombinedPlayer slides={compiledSlides} />
</div>
);
}
function StatusPoller({
presentationId,
title,
}: {
presentationId: number;
title: string;
}) {
const [status, setStatus] = useState<VideoPresentationStatusResponse | null>(null);
const pollingRef = useRef<NodeJS.Timeout | null>(null);
useEffect(() => {
const poll = async () => {
try {
const raw = await baseApiService.get<unknown>(
`/api/v1/video-presentations/${presentationId}`,
);
const response = parseStatusResponse(raw);
if (response) {
setStatus(response);
if (response.status === "ready" || response.status === "failed") {
if (pollingRef.current) {
clearInterval(pollingRef.current);
pollingRef.current = null;
}
}
}
} catch (err) {
console.error("Error polling video presentation status:", err);
}
};
poll();
pollingRef.current = setInterval(poll, 5000);
return () => {
if (pollingRef.current) {
clearInterval(pollingRef.current);
}
};
}, [presentationId]);
if (!status || status.status === "pending" || status.status === "generating") {
return <GeneratingState title={title} />;
}
if (status.status === "failed") {
return <ErrorState title={title} error="Generation failed" />;
}
if (status.status === "ready") {
return (
<VideoPresentationPlayer
presentationId={status.id}
title={status.title || title}
/>
);
}
return <ErrorState title={title} error="Unexpected state" />;
}
export const GenerateVideoPresentationToolUI = makeAssistantToolUI<
GenerateVideoPresentationArgs,
GenerateVideoPresentationResult
>({
toolName: "generate_video_presentation",
render: function GenerateVideoPresentationUI({ args, result, status }) {
const title = args.video_title || "SurfSense Presentation";
if (status.type === "running" || status.type === "requires-action") {
return <GeneratingState title={title} />;
}
if (status.type === "incomplete") {
if (status.reason === "cancelled") {
return (
<div className="my-4 rounded-xl border border-muted p-3 sm:p-4 text-muted-foreground">
<p className="flex items-center gap-1.5 sm:gap-2 text-xs sm:text-sm">
<Film className="size-3.5 sm:size-4" />
<span className="line-through">Presentation generation cancelled</span>
</p>
</div>
);
}
if (status.reason === "error") {
return (
<ErrorState
title={title}
error={typeof status.error === "string" ? status.error : "An error occurred"}
/>
);
}
}
if (!result) {
return <GeneratingState title={title} />;
}
if (result.status === "failed") {
return <ErrorState title={title} error={result.error || "Generation failed"} />;
}
if (result.status === "generating") {
return (
<div className="my-4 overflow-hidden rounded-xl border border-amber-500/20 bg-amber-500/5 p-3 sm:p-4">
<div className="flex items-center gap-2.5 sm:gap-3">
<div className="flex size-8 sm:size-10 shrink-0 items-center justify-center rounded-full bg-amber-500/20">
<Film className="size-4 sm:size-5 text-amber-500" />
</div>
<div className="min-w-0">
<p className="text-amber-600 dark:text-amber-400 text-xs sm:text-sm font-medium">
Presentation already in progress
</p>
<p className="text-muted-foreground text-[10px] sm:text-xs mt-0.5">
Please wait for the current presentation to complete.
</p>
</div>
</div>
</div>
);
}
if (result.status === "pending" && result.video_presentation_id) {
return (
<StatusPoller
presentationId={result.video_presentation_id}
title={result.title || title}
/>
);
}
if (result.status === "ready" && result.video_presentation_id) {
return (
<VideoPresentationPlayer
presentationId={result.video_presentation_id}
title={result.title || title}
/>
);
}
return <ErrorState title={title} error="Missing presentation ID" />;
},
});

View file

@ -0,0 +1 @@
export { GenerateVideoPresentationToolUI } from "./generate-video-presentation";

View file

@ -3,6 +3,7 @@ import {
Brain, Brain,
Database, Database,
FileText, FileText,
Film,
Globe, Globe,
ImageIcon, ImageIcon,
Link2, Link2,
@ -16,6 +17,7 @@ import {
const TOOL_ICONS: Record<string, LucideIcon> = { const TOOL_ICONS: Record<string, LucideIcon> = {
search_knowledge_base: Database, search_knowledge_base: Database,
generate_podcast: Podcast, generate_podcast: Podcast,
generate_video_presentation: Film,
generate_report: FileText, generate_report: FileText,
link_preview: Link2, link_preview: Link2,
display_image: ImageIcon, display_image: ImageIcon,

View file

@ -0,0 +1,154 @@
import * as Babel from "@babel/standalone";
import React from "react";
import {
AbsoluteFill,
useCurrentFrame,
useVideoConfig,
spring,
interpolate,
Sequence,
Easing,
} from "remotion";
import { DURATION_IN_FRAMES } from "./constants";
export interface CompileResult {
success: boolean;
error: string | null;
}
function createStagger(totalFrames: number) {
return function stagger(
frame: number,
fps: number,
index: number,
total: number,
): { opacity: number; transform: string } {
const enterPhase = Math.floor(totalFrames * 0.2);
const exitStart = Math.floor(totalFrames * 0.8);
const gap = Math.max(6, Math.floor(enterPhase / Math.max(total, 1)));
const delay = index * gap;
const s = spring({
frame: Math.max(0, frame - delay),
fps,
config: { damping: 15, stiffness: 120, mass: 0.8 },
});
const exit = interpolate(frame, [exitStart, totalFrames], [0, 1], {
extrapolateLeft: "clamp",
extrapolateRight: "clamp",
});
const ambient = s > 0.99 ? Math.sin(frame * 0.05) * 2 : 0;
const opacity = s * (1 - exit);
const translateY =
interpolate(s, [0, 1], [40, 0]) +
interpolate(exit, [0, 1], [0, -30]) +
ambient;
const scale = interpolate(s, [0, 1], [0.97, 1]);
return {
opacity,
transform: `translateY(${translateY}px) scale(${scale})`,
};
};
}
const defaultStagger = createStagger(DURATION_IN_FRAMES);
const INJECTED_NAMES = [
"React",
"AbsoluteFill",
"useCurrentFrame",
"useVideoConfig",
"spring",
"interpolate",
"Sequence",
"Easing",
"stagger",
] as const;
const linear = (t: number) => t;
const SafeEasing = new Proxy(Easing, {
get(target, prop) {
const val = target[prop as keyof typeof Easing];
if (typeof val === "function") return val;
return linear;
},
});
function buildInjectedValues(staggerFn: ReturnType<typeof createStagger>) {
return [
React,
AbsoluteFill,
useCurrentFrame,
useVideoConfig,
spring,
interpolate,
Sequence,
SafeEasing,
staggerFn,
];
}
export function prepareSource(code: string): string {
const codeWithoutImports = code.replace(/^import\s+.*$/gm, "").trim();
const match = codeWithoutImports.match(
/export\s+(?:const|function)\s+(\w+)\s*(?::\s*React\.FC\s*)?=?\s*\(\s*\)\s*=>\s*\{([\s\S]*)\};?\s*$/,
);
if (match) {
return `const DynamicComponent = () => {\n${match[2].trim()}\n};`;
}
const cleanedCode = codeWithoutImports
.replace(/export\s+default\s+/, "")
.replace(/export\s+/, "");
return cleanedCode.replace(/const\s+(\w+)/, "const DynamicComponent");
}
function transpile(code: string): string {
const wrappedSource = prepareSource(code);
const transpiled = Babel.transform(wrappedSource, {
presets: ["react", "typescript"],
filename: "dynamic.tsx",
});
if (!transpiled.code) throw new Error("Transpilation produced no output");
return transpiled.code;
}
export function compileCheck(code: string): CompileResult {
if (!code?.trim()) {
return { success: false, error: "Empty code" };
}
try {
const jsCode = transpile(code);
new Function(...INJECTED_NAMES, `${jsCode}\nreturn DynamicComponent;`);
return { success: true, error: null };
} catch (err) {
return {
success: false,
error: err instanceof Error ? err.message : "Unknown compilation error",
};
}
}
export function compileToComponent(
code: string,
durationInFrames?: number,
): React.ComponentType {
const staggerFn = durationInFrames
? createStagger(durationInFrames)
: defaultStagger;
const jsCode = transpile(code);
const factory = new Function(
...INJECTED_NAMES,
`${jsCode}\nreturn DynamicComponent;`,
);
return factory(...buildInjectedValues(staggerFn)) as React.ComponentType;
}

View file

@ -0,0 +1,2 @@
export const FPS = 30;
export const DURATION_IN_FRAMES = 300;

View file

@ -0,0 +1,18 @@
declare module "dom-to-pptx" {
interface ExportOptions {
fileName?: string;
autoEmbedFonts?: boolean;
fonts?: Array<{ name: string; url: string }>;
skipDownload?: boolean;
svgAsVector?: boolean;
listConfig?: {
color?: string;
spacing?: { before?: number; after?: number };
};
}
export function exportToPptx(
elementOrSelector: string | HTMLElement | Array<string | HTMLElement>,
options?: ExportOptions,
): Promise<Blob>;
}

View file

@ -26,6 +26,7 @@
"@assistant-ui/react": "^0.11.53", "@assistant-ui/react": "^0.11.53",
"@assistant-ui/react-ai-sdk": "^1.1.20", "@assistant-ui/react-ai-sdk": "^1.1.20",
"@assistant-ui/react-markdown": "^0.11.9", "@assistant-ui/react-markdown": "^0.11.9",
"@babel/standalone": "^7.29.2",
"@electric-sql/client": "^1.4.0", "@electric-sql/client": "^1.4.0",
"@electric-sql/pglite": "^0.3.14", "@electric-sql/pglite": "^0.3.14",
"@electric-sql/pglite-sync": "^0.4.0", "@electric-sql/pglite-sync": "^0.4.0",
@ -72,6 +73,9 @@
"@radix-ui/react-toggle-group": "^1.1.10", "@radix-ui/react-toggle-group": "^1.1.10",
"@radix-ui/react-toolbar": "^1.1.11", "@radix-ui/react-toolbar": "^1.1.11",
"@radix-ui/react-tooltip": "^1.2.7", "@radix-ui/react-tooltip": "^1.2.7",
"@remotion/media": "^4.0.438",
"@remotion/player": "^4.0.438",
"@remotion/web-renderer": "^4.0.438",
"@streamdown/code": "^1.0.2", "@streamdown/code": "^1.0.2",
"@streamdown/math": "^1.0.2", "@streamdown/math": "^1.0.2",
"@tabler/icons-react": "^3.34.1", "@tabler/icons-react": "^3.34.1",
@ -88,6 +92,7 @@
"clsx": "^2.1.1", "clsx": "^2.1.1",
"cmdk": "^1.1.1", "cmdk": "^1.1.1",
"date-fns": "^4.1.0", "date-fns": "^4.1.0",
"dom-to-pptx": "^1.1.5",
"dotenv": "^17.2.3", "dotenv": "^17.2.3",
"drizzle-orm": "^0.44.5", "drizzle-orm": "^0.44.5",
"emblor": "^1.4.8", "emblor": "^1.4.8",
@ -126,6 +131,7 @@
"rehype-sanitize": "^6.0.0", "rehype-sanitize": "^6.0.0",
"remark-gfm": "^4.0.1", "remark-gfm": "^4.0.1",
"remark-math": "^6.0.0", "remark-math": "^6.0.0",
"remotion": "^4.0.438",
"server-only": "^0.0.1", "server-only": "^0.0.1",
"sonner": "^2.0.6", "sonner": "^2.0.6",
"streamdown": "^2.2.0", "streamdown": "^2.2.0",
@ -143,6 +149,7 @@
"@svgr/webpack": "^8.1.0", "@svgr/webpack": "^8.1.0",
"@tailwindcss/postcss": "^4.1.11", "@tailwindcss/postcss": "^4.1.11",
"@tailwindcss/typography": "^0.5.16", "@tailwindcss/typography": "^0.5.16",
"@types/babel__standalone": "^7.1.9",
"@types/canvas-confetti": "^1.9.0", "@types/canvas-confetti": "^1.9.0",
"@types/gapi": "^0.0.47", "@types/gapi": "^0.0.47",
"@types/google.picker": "^0.0.52", "@types/google.picker": "^0.0.52",

View file

@ -23,6 +23,9 @@ importers:
'@assistant-ui/react-markdown': '@assistant-ui/react-markdown':
specifier: ^0.11.9 specifier: ^0.11.9
version: 0.11.10(@assistant-ui/react@0.11.58(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(immer@10.2.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) version: 0.11.10(@assistant-ui/react@0.11.58(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(immer@10.2.0)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)(use-sync-external-store@1.6.0(react@19.2.4)))(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
'@babel/standalone':
specifier: ^7.29.2
version: 7.29.2
'@electric-sql/client': '@electric-sql/client':
specifier: ^1.4.0 specifier: ^1.4.0
version: 1.5.7 version: 1.5.7
@ -161,6 +164,15 @@ importers:
'@radix-ui/react-tooltip': '@radix-ui/react-tooltip':
specifier: ^1.2.7 specifier: ^1.2.7
version: 1.2.8(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) version: 1.2.8(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
'@remotion/media':
specifier: ^4.0.438
version: 4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
'@remotion/player':
specifier: ^4.0.438
version: 4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
'@remotion/web-renderer':
specifier: ^4.0.438
version: 4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
'@streamdown/code': '@streamdown/code':
specifier: ^1.0.2 specifier: ^1.0.2
version: 1.0.3(react@19.2.4) version: 1.0.3(react@19.2.4)
@ -209,6 +221,9 @@ importers:
date-fns: date-fns:
specifier: ^4.1.0 specifier: ^4.1.0
version: 4.1.0 version: 4.1.0
dom-to-pptx:
specifier: ^1.1.5
version: 1.1.5
dotenv: dotenv:
specifier: ^17.2.3 specifier: ^17.2.3
version: 17.3.1 version: 17.3.1
@ -323,6 +338,9 @@ importers:
remark-math: remark-math:
specifier: ^6.0.0 specifier: ^6.0.0
version: 6.0.0 version: 6.0.0
remotion:
specifier: ^4.0.438
version: 4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
server-only: server-only:
specifier: ^0.0.1 specifier: ^0.0.1
version: 0.0.1 version: 0.0.1
@ -369,6 +387,9 @@ importers:
'@tailwindcss/typography': '@tailwindcss/typography':
specifier: ^0.5.16 specifier: ^0.5.16
version: 0.5.19(tailwindcss@4.2.1) version: 0.5.19(tailwindcss@4.2.1)
'@types/babel__standalone':
specifier: ^7.1.9
version: 7.1.9
'@types/canvas-confetti': '@types/canvas-confetti':
specifier: ^1.9.0 specifier: ^1.9.0
version: 1.9.0 version: 1.9.0
@ -1074,6 +1095,10 @@ packages:
resolution: {integrity: sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==} resolution: {integrity: sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA==}
engines: {node: '>=6.9.0'} engines: {node: '>=6.9.0'}
'@babel/standalone@7.29.2':
resolution: {integrity: sha512-VSuvywmVRS8efooKrvJzs6BlVSxRvAdLeGrAKUrWoBx1fFBSeE/oBpUZCQ5BcprLyXy04W8skzz7JT8GqlNRJg==}
engines: {node: '>=6.9.0'}
'@babel/template@7.28.6': '@babel/template@7.28.6':
resolution: {integrity: sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==} resolution: {integrity: sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ==}
engines: {node: '>=6.9.0'} engines: {node: '>=6.9.0'}
@ -1892,6 +1917,21 @@ packages:
'@mdx-js/mdx@3.1.1': '@mdx-js/mdx@3.1.1':
resolution: {integrity: sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==} resolution: {integrity: sha512-f6ZO2ifpwAQIpzGWaBQT2TXxPv6z3RBzQKpVftEWN78Vl/YweF1uwussDx8ECAXVtr3Rs89fKyG9YlzUs9DyGQ==}
'@mediabunny/aac-encoder@1.39.2':
resolution: {integrity: sha512-KD6KADVzAnW7tqhRFGBOX4uaiHbd0Yxvg0lfthj3wJLAEEgEBAvi43w+ZXWeEn54X/jpabrLe4bW/eYFFvlbUA==}
peerDependencies:
mediabunny: ^1.0.0
'@mediabunny/flac-encoder@1.39.2':
resolution: {integrity: sha512-VwBr3AzZTPEEPvt4aladZiXwOf3W293eq213zDupGQi/taS8WWNqDd3eBdf8FfvlbXATfbRiycXDKyQ0HlOZaQ==}
peerDependencies:
mediabunny: ^1.0.0
'@mediabunny/mp3-encoder@1.39.2':
resolution: {integrity: sha512-3rrodrGnUpUP8F2d1aRUl8IvjqK3jegkupbOzvOokooSAO5rXk2Lr5jZe7TnPeiVGiXfmnoJ7s9uyUOHlCd8qw==}
peerDependencies:
mediabunny: ^1.0.0
'@microsoft/fetch-event-source@2.0.1': '@microsoft/fetch-event-source@2.0.1':
resolution: {integrity: sha512-W6CLUJ2eBMw3Rec70qrsEW0jOm/3twwJv21mrmj2yORiaVmVYGS4sSS5yUwvQc1ZlDLYGPnClVWmUUMagKNsfA==} resolution: {integrity: sha512-W6CLUJ2eBMw3Rec70qrsEW0jOm/3twwJv21mrmj2yORiaVmVYGS4sSS5yUwvQc1ZlDLYGPnClVWmUUMagKNsfA==}
@ -3329,6 +3369,27 @@ packages:
'@react-dnd/shallowequal@4.0.2': '@react-dnd/shallowequal@4.0.2':
resolution: {integrity: sha512-/RVXdLvJxLg4QKvMoM5WlwNR9ViO9z8B/qPcc+C0Sa/teJY7QG7kJ441DwzOjMYEY7GmU4dj5EcGHIkKZiQZCA==} resolution: {integrity: sha512-/RVXdLvJxLg4QKvMoM5WlwNR9ViO9z8B/qPcc+C0Sa/teJY7QG7kJ441DwzOjMYEY7GmU4dj5EcGHIkKZiQZCA==}
'@remotion/licensing@4.0.438':
resolution: {integrity: sha512-JYg8ebbVkDJDnKt0BZV7OEptAysKkIWTdRPVGdwPeNw6CMcCooPPDuR7gGHjPtm0Odn/A3EKUopCX3kTvyWTPQ==}
'@remotion/media@4.0.438':
resolution: {integrity: sha512-l47r+8Gyrv8KqqumLC9HU0Gq2CWNm8cGaRKkwC1VbdnqZ4bPQUf9SnYJV1U4IsWR8eAhk0i1QGa1Hc4VKYqKVQ==}
peerDependencies:
react: '>=16.8.0'
react-dom: '>=16.8.0'
'@remotion/player@4.0.438':
resolution: {integrity: sha512-2OT8r0arsjxUEVEa5dNgain7ChbDG0THZglxh1Bo4U8nTSMnuX+1yyF0MVOlmT56yaESfoiGbZvnnpB6e7Ltew==}
peerDependencies:
react: '>=16.8.0'
react-dom: '>=16.8.0'
'@remotion/web-renderer@4.0.438':
resolution: {integrity: sha512-cOGtgqRiD+q77Vj65gy/fo+GK1wA3G5ft2xYLPJZzddmrhhrhibyFglljY2Vu7UaXYIUWZTkGoXwdKchC09UZA==}
peerDependencies:
react: '>=18.0.0'
react-dom: '>=18.0.0'
'@rollup/rollup-android-arm-eabi@4.59.0': '@rollup/rollup-android-arm-eabi@4.59.0':
resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==} resolution: {integrity: sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==}
cpu: [arm] cpu: [arm]
@ -3798,6 +3859,21 @@ packages:
'@tybys/wasm-util@0.10.1': '@tybys/wasm-util@0.10.1':
resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==}
'@types/babel__core@7.20.5':
resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==}
'@types/babel__generator@7.27.0':
resolution: {integrity: sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==}
'@types/babel__standalone@7.1.9':
resolution: {integrity: sha512-IcCNPLqpevUD7UpV8QB0uwQPOyoOKACFf0YtYWRHcmxcakaje4Q7dbG2+jMqxw/I8Zk0NHvEps66WwS7z/UaaA==}
'@types/babel__template@7.4.4':
resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==}
'@types/babel__traverse@7.28.0':
resolution: {integrity: sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==}
'@types/canvas-confetti@1.9.0': '@types/canvas-confetti@1.9.0':
resolution: {integrity: sha512-aBGj/dULrimR1XDZLtG9JwxX1b4HPRF6CX9Yfwh3NvstZEm1ZL7RBnel4keCPSqs1ANRu1u2Aoz9R+VmtjYuTg==} resolution: {integrity: sha512-aBGj/dULrimR1XDZLtG9JwxX1b4HPRF6CX9Yfwh3NvstZEm1ZL7RBnel4keCPSqs1ANRu1u2Aoz9R+VmtjYuTg==}
@ -3807,6 +3883,12 @@ packages:
'@types/diff-match-patch@1.0.36': '@types/diff-match-patch@1.0.36':
resolution: {integrity: sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg==} resolution: {integrity: sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg==}
'@types/dom-mediacapture-transform@0.1.11':
resolution: {integrity: sha512-Y2p+nGf1bF2XMttBnsVPHUWzRRZzqUoJAKmiP10b5umnO6DDrWI0BrGDJy1pOHoOULVmGSfFNkQrAlC5dcj6nQ==}
'@types/dom-webcodecs@0.1.13':
resolution: {integrity: sha512-O5hkiFIcjjszPIYyUSyvScyvrBoV3NOEEZx/pMlsu44TKzWNkLVBBxnxJz42in5n3QIolYOcBYFCPZZ0h8SkwQ==}
'@types/estree-jsx@1.0.5': '@types/estree-jsx@1.0.5':
resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==} resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==}
@ -3843,6 +3925,9 @@ packages:
'@types/ms@2.1.0': '@types/ms@2.1.0':
resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==}
'@types/node@18.19.130':
resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==}
'@types/node@20.19.33': '@types/node@20.19.33':
resolution: {integrity: sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw==} resolution: {integrity: sha512-Rs1bVAIdBs5gbTIKza/tgpMuG1k3U/UMJLWecIMxNdJFDMzcM5LOiLVRYh3PilWEYDIeUDv7bpiHPLPsbydGcw==}
@ -4053,6 +4138,10 @@ packages:
resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==} resolution: {integrity: sha512-Fw28YZpRnA3cAHHDlkt7xQHiJ0fcL+NRcIqsocZQUSmbzeIKRpwttJjik5ZGanXP+vlA4SbTg+AbA3bP363l+w==}
engines: {node: '>= 20'} engines: {node: '>= 20'}
'@xmldom/xmldom@0.8.11':
resolution: {integrity: sha512-cQzWCtO6C8TQiYl1ruKNn2U6Ao4o4WBBcbL61yJl84x+j5sOWWFU9X7DpND8XZG3daDppSsigMdfAIl2upQBRw==}
engines: {node: '>=10.0.0'}
acorn-jsx@5.3.2: acorn-jsx@5.3.2:
resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==}
peerDependencies: peerDependencies:
@ -4190,6 +4279,10 @@ packages:
resolution: {integrity: sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==} resolution: {integrity: sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==}
engines: {node: 18 || 20 || >=22} engines: {node: 18 || 20 || >=22}
base64-arraybuffer@1.0.2:
resolution: {integrity: sha512-I3yl4r9QB5ZRY3XuJVEPfc2XhZO6YweFPI+UovAzn+8/hb3oJ6lnysaFcjVpkCPfVWFUDvoZ8kmVDP7WyRtYtQ==}
engines: {node: '>= 0.6.0'}
baseline-browser-mapping@2.10.0: baseline-browser-mapping@2.10.0:
resolution: {integrity: sha512-lIyg0szRfYbiy67j9KN8IyeD7q7hcmqnJ1ddWmNt19ItGpNN64mnllmxUNFIOdOm6by97jlL6wfpTTJrmnjWAA==} resolution: {integrity: sha512-lIyg0szRfYbiy67j9KN8IyeD7q7hcmqnJ1ddWmNt19ItGpNN64mnllmxUNFIOdOm6by97jlL6wfpTTJrmnjWAA==}
engines: {node: '>=6.0.0'} engines: {node: '>=6.0.0'}
@ -4346,6 +4439,9 @@ packages:
core-js@3.48.0: core-js@3.48.0:
resolution: {integrity: sha512-zpEHTy1fjTMZCKLHUZoVeylt9XrzaIN2rbPXEt0k+q7JE5CkCZdo6bNq55bn24a69CH7ErAVLKijxJja4fw+UQ==} resolution: {integrity: sha512-zpEHTy1fjTMZCKLHUZoVeylt9XrzaIN2rbPXEt0k+q7JE5CkCZdo6bNq55bn24a69CH7ErAVLKijxJja4fw+UQ==}
core-util-is@1.0.3:
resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==}
cosmiconfig@8.3.6: cosmiconfig@8.3.6:
resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==} resolution: {integrity: sha512-kcZ6+W5QzcJ3P1Mt+83OUv/oHFqZHIx8DuxG6eZ5RGMERoLqp4BuGjhHLYGK+Kf5XVkQvqBSmAy/nGWN3qDgEA==}
engines: {node: '>=14'} engines: {node: '>=14'}
@ -4364,6 +4460,9 @@ packages:
resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==}
engines: {node: '>= 8'} engines: {node: '>= 8'}
css-line-break@2.1.0:
resolution: {integrity: sha512-FHcKFCZcAha3LwfVBhCQbW2nCNbkZXn7KVUJcsT5/P8YmfsVja0FMPJr0B903j/E69HUphKiV9iQArX8SDYA4w==}
css-select@5.2.2: css-select@5.2.2:
resolution: {integrity: sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==} resolution: {integrity: sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==}
@ -4481,6 +4580,9 @@ packages:
dom-serializer@2.0.0: dom-serializer@2.0.0:
resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==} resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==}
dom-to-pptx@1.1.5:
resolution: {integrity: sha512-3uGRnPJH2oOIicClhoslkTUHxLx8GYqAK1kFvFYy2Z2BQ9RFjgenduZsKuYdiDRs2RRVtXiTKtUeZmPyMgn/Ug==}
domelementtype@2.3.0: domelementtype@2.3.0:
resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==}
@ -4906,6 +5008,9 @@ packages:
flatted@3.3.3: flatted@3.3.3:
resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==} resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==}
fonteditor-core@2.6.3:
resolution: {integrity: sha512-YUryIKjkenjZ41E7JvM3V+02Ak4mTHDDTwBWgs9KBzypzHqLZHuua1UDRevZNTKawmnq1dbBAa70Jddl2+F4FQ==}
for-each@0.3.5: for-each@0.3.5:
resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==} resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@ -5208,6 +5313,13 @@ packages:
html-void-elements@3.0.0: html-void-elements@3.0.0:
resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==} resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==}
html2canvas@1.4.1:
resolution: {integrity: sha512-fPU6BHNpsyIhr8yyMpTLLxAbkaK8ArIBcmZIRiBLiDhjeqvXolaEmDGmELFuX9I4xDcaKKcJl+TKZLqruBbmWA==}
engines: {node: '>=8.0.0'}
https@1.0.0:
resolution: {integrity: sha512-4EC57ddXrkaF0x83Oj8sM6SLQHAWXw90Skqu2M4AEWENZ3F02dFJE/GARA8igO79tcgYqGrD7ae4f5L3um2lgg==}
icu-minify@4.8.3: icu-minify@4.8.3:
resolution: {integrity: sha512-65Av7FLosNk7bPbmQx5z5XG2Y3T2GFppcjiXh4z1idHeVgQxlDpAmkGoYI0eFzAvrOnjpWTL5FmPDhsdfRMPEA==} resolution: {integrity: sha512-65Av7FLosNk7bPbmQx5z5XG2Y3T2GFppcjiXh4z1idHeVgQxlDpAmkGoYI0eFzAvrOnjpWTL5FmPDhsdfRMPEA==}
@ -5219,11 +5331,19 @@ packages:
resolution: {integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==} resolution: {integrity: sha512-Hs59xBNfUIunMFgWAbGX5cq6893IbWg4KnrjbYwX3tx0ztorVgTDA6B2sxf8ejHJ4wz8BqGUMYlnzNBer5NvGg==}
engines: {node: '>= 4'} engines: {node: '>= 4'}
image-size@1.2.1:
resolution: {integrity: sha512-rH+46sQJ2dlwfjfhCyNx5thzrv+dtmBIhPHk0zgRUukHzZ/kRueTJXoYYsclBaKcSMBWuGbOFXtioLpzTb5euw==}
engines: {node: '>=16.x'}
hasBin: true
image-size@2.0.2: image-size@2.0.2:
resolution: {integrity: sha512-IRqXKlaXwgSMAMtpNzZa1ZAe8m+Sa1770Dhk8VkSsP9LS+iHD62Zd8FQKs8fbPiagBE7BzoFX23cxFnwshpV6w==} resolution: {integrity: sha512-IRqXKlaXwgSMAMtpNzZa1ZAe8m+Sa1770Dhk8VkSsP9LS+iHD62Zd8FQKs8fbPiagBE7BzoFX23cxFnwshpV6w==}
engines: {node: '>=16.x'} engines: {node: '>=16.x'}
hasBin: true hasBin: true
immediate@3.0.6:
resolution: {integrity: sha512-XXOFtyqDjNDAQxVfYxuF7g9Il/IbWmmlQg2MYKOH8ExIT1qg6xc4zyS3HaEEATgs1btfzxq15ciUiY7gjSXRGQ==}
immer@10.2.0: immer@10.2.0:
resolution: {integrity: sha512-d/+XTN3zfODyjr89gM3mPq1WNX2B8pYsu7eORitdwyA2sBubnTl3laYlBk4sXY5FUa5qTZGBDPJICVbvqzjlbw==} resolution: {integrity: sha512-d/+XTN3zfODyjr89gM3mPq1WNX2B8pYsu7eORitdwyA2sBubnTl3laYlBk4sXY5FUa5qTZGBDPJICVbvqzjlbw==}
@ -5235,6 +5355,9 @@ packages:
resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
engines: {node: '>=0.8.19'} engines: {node: '>=0.8.19'}
inherits@2.0.4:
resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
inline-style-parser@0.2.7: inline-style-parser@0.2.7:
resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==} resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==}
@ -5386,6 +5509,9 @@ packages:
resolution: {integrity: sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==} resolution: {integrity: sha512-mfcwb6IzQyOKTs84CQMrOwW4gQcaTOAWJ0zzJCl2WSPDrWk/OzDaImWFH3djXhb24g4eudZfLRozAvPGw4d9hQ==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
isarray@1.0.0:
resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==}
isarray@2.0.5: isarray@2.0.5:
resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==} resolution: {integrity: sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==}
@ -5506,6 +5632,9 @@ packages:
resolution: {integrity: sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==} resolution: {integrity: sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ==}
engines: {node: '>=4.0'} engines: {node: '>=4.0'}
jszip@3.10.1:
resolution: {integrity: sha512-xXDvecyTpGLrqFrvkrUSoxxfJI5AH7U8zxxtVclpsUtMCq4JQ290LY8AW5c7Ggnr/Y/oK+bQMbqK2qmtk3pN4g==}
katex@0.16.22: katex@0.16.22:
resolution: {integrity: sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==} resolution: {integrity: sha512-XCHRdUw4lf3SKBaJe4EvgqIuWwkPSo9XoeO8GjQW94Bp7TWv9hNhzZjZ+OH9yf1UmLygb7DIT5GSFQiyt16zYg==}
hasBin: true hasBin: true
@ -5542,6 +5671,9 @@ packages:
resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==}
engines: {node: '>= 0.8.0'} engines: {node: '>= 0.8.0'}
lie@3.3.0:
resolution: {integrity: sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ==}
lightningcss-android-arm64@1.31.1: lightningcss-android-arm64@1.31.1:
resolution: {integrity: sha512-HXJF3x8w9nQ4jbXRiNppBCqeZPIAfUo8zE/kOEGbW5NZvGc/K7nMxbhIr+YlFlHW5mpbg/YFPdbnCh1wAXCKFg==} resolution: {integrity: sha512-HXJF3x8w9nQ4jbXRiNppBCqeZPIAfUo8zE/kOEGbW5NZvGc/K7nMxbhIr+YlFlHW5mpbg/YFPdbnCh1wAXCKFg==}
engines: {node: '>= 12.0.0'} engines: {node: '>= 12.0.0'}
@ -5744,6 +5876,9 @@ packages:
mdn-data@2.0.30: mdn-data@2.0.30:
resolution: {integrity: sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==} resolution: {integrity: sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==}
mediabunny@1.39.2:
resolution: {integrity: sha512-VcrisGRt+OI7tTPrziucJoCIPYIS/DEWY37TqzQVLWSUUHiyvsiRizEypQ3FOlhfIZ4ytAG/Mw4zxfetCTyKUg==}
merge2@1.4.1: merge2@1.4.1:
resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==}
engines: {node: '>= 8'} engines: {node: '>= 8'}
@ -6020,6 +6155,11 @@ packages:
oniguruma-to-es@4.3.4: oniguruma-to-es@4.3.4:
resolution: {integrity: sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==} resolution: {integrity: sha512-3VhUGN3w2eYxnTzHn+ikMI+fp/96KoRSVK9/kMTcFqj1NRDh2IhQCKvYxDnWePKRXY/AqH+Fuiyb7VHSzBjHfA==}
opentype.js@1.3.4:
resolution: {integrity: sha512-d2JE9RP/6uagpQAVtJoF0pJJA/fgai89Cc50Yp0EJHk+eLp6QQ7gBoblsnubRULNY132I0J1QKMJ+JTbMqz4sw==}
engines: {node: '>= 8.0.0'}
hasBin: true
optics-ts@2.4.1: optics-ts@2.4.1:
resolution: {integrity: sha512-HaYzMHvC80r7U/LqAd4hQyopDezC60PO2qF5GuIwALut2cl5rK1VWHsqTp0oqoJJWjiv6uXKqsO+Q2OO0C3MmQ==} resolution: {integrity: sha512-HaYzMHvC80r7U/LqAd4hQyopDezC60PO2qF5GuIwALut2cl5rK1VWHsqTp0oqoJJWjiv6uXKqsO+Q2OO0C3MmQ==}
@ -6039,6 +6179,12 @@ packages:
resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==}
engines: {node: '>=10'} engines: {node: '>=10'}
pako@1.0.11:
resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==}
pako@2.1.0:
resolution: {integrity: sha512-w+eufiZ1WuJYgPXbV/PO3NCMEc3xqylkKHzp8bxp1uW4qaSNQUkwmLLEc3kKsfz8lpV1F8Ht3U1Cm+9Srog2ug==}
parent-module@1.0.1: parent-module@1.0.1:
resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
engines: {node: '>=6'} engines: {node: '>=6'}
@ -6178,6 +6324,9 @@ packages:
resolution: {integrity: sha512-mdb8TKt+YCRbGQdYar3AKNUPCyEiqcprScF4unYpGALF6HlBaEuO6wPuIqXXpCWkw4VclJYCKbb6lq6pH6bJeA==} resolution: {integrity: sha512-mdb8TKt+YCRbGQdYar3AKNUPCyEiqcprScF4unYpGALF6HlBaEuO6wPuIqXXpCWkw4VclJYCKbb6lq6pH6bJeA==}
engines: {node: ^20.20.0 || >=22.22.0} engines: {node: ^20.20.0 || >=22.22.0}
pptxgenjs@3.12.0:
resolution: {integrity: sha512-ZozkYKWb1MoPR4ucw3/aFYlHkVIJxo9czikEclcUVnS4Iw/M+r+TEwdlB3fyAWO9JY1USxJDt0Y0/r15IR/RUA==}
preact@10.28.4: preact@10.28.4:
resolution: {integrity: sha512-uKFfOHWuSNpRFVTnljsCluEFq57OKT+0QdOiQo8XWnQ/pSvg7OpX5eNOejELXJMWy+BwM2nobz0FkvzmnpCNsQ==} resolution: {integrity: sha512-uKFfOHWuSNpRFVTnljsCluEFq57OKT+0QdOiQo8XWnQ/pSvg7OpX5eNOejELXJMWy+BwM2nobz0FkvzmnpCNsQ==}
@ -6193,6 +6342,9 @@ packages:
resolution: {integrity: sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==} resolution: {integrity: sha512-DEvV2ZF2r2/63V+tK8hQvrR2ZGn10srHbXviTlcv7Kpzw8jWiNTqbVgjO3IY8RxrrOUF8VPMQQFysYYYv0YZxw==}
engines: {node: '>=6'} engines: {node: '>=6'}
process-nextick-args@2.0.1:
resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==}
prop-types@15.8.1: prop-types@15.8.1:
resolution: {integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==} resolution: {integrity: sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==}
@ -6219,6 +6371,9 @@ packages:
queue-microtask@1.2.3: queue-microtask@1.2.3:
resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==}
queue@6.0.2:
resolution: {integrity: sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==}
radix-ui@1.4.3: radix-ui@1.4.3:
resolution: {integrity: sha512-aWizCQiyeAenIdUbqEpXgRA1ya65P13NKn/W8rWkcN0OPkRDxdBVLWnIEDsS2RpwCK2nobI7oMUSmexzTDyAmA==} resolution: {integrity: sha512-aWizCQiyeAenIdUbqEpXgRA1ya65P13NKn/W8rWkcN0OPkRDxdBVLWnIEDsS2RpwCK2nobI7oMUSmexzTDyAmA==}
peerDependencies: peerDependencies:
@ -6392,6 +6547,9 @@ packages:
resolution: {integrity: sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==} resolution: {integrity: sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==}
engines: {node: '>=0.10.0'} engines: {node: '>=0.10.0'}
readable-stream@2.3.8:
resolution: {integrity: sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==}
readdirp@5.0.0: readdirp@5.0.0:
resolution: {integrity: sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==} resolution: {integrity: sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==}
engines: {node: '>= 20.19.0'} engines: {node: '>= 20.19.0'}
@ -6490,6 +6648,12 @@ packages:
remend@1.2.1: remend@1.2.1:
resolution: {integrity: sha512-4wC12bgXsfKAjF1ewwkNIQz5sqewz/z1xgIgjEMb3r1pEytQ37F0Cm6i+OhbTWEvguJD7lhOUJhK5fSasw9f0w==} resolution: {integrity: sha512-4wC12bgXsfKAjF1ewwkNIQz5sqewz/z1xgIgjEMb3r1pEytQ37F0Cm6i+OhbTWEvguJD7lhOUJhK5fSasw9f0w==}
remotion@4.0.438:
resolution: {integrity: sha512-keG2GxHh81UFV0zFrwRh+yngXHn6C3z/Nohc+ru49M1TlIcaThG/hD2amJNH9otYzpYJxVjNF1t/i1hqcI8OGw==}
peerDependencies:
react: '>=16.8.0'
react-dom: '>=16.8.0'
resolve-from@4.0.0: resolve-from@4.0.0:
resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==}
engines: {node: '>=4'} engines: {node: '>=4'}
@ -6523,6 +6687,9 @@ packages:
resolution: {integrity: sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==} resolution: {integrity: sha512-AURm5f0jYEOydBj7VQlVvDrjeFgthDdEF5H1dP+6mNpoXOMo1quQqJ4wvJDyRZ9+pO3kGWoOdmV08cSv2aJV6Q==}
engines: {node: '>=0.4'} engines: {node: '>=0.4'}
safe-buffer@5.1.2:
resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==}
safe-push-apply@1.0.0: safe-push-apply@1.0.0:
resolution: {integrity: sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==} resolution: {integrity: sha512-iKE9w/Z7xCzUMIZqdBsp6pEQvwuEebH4vdpjcDWnyzaI6yl6O9FHvVpmGelvEHNsoY6wGblkxR6Zty/h00WiSA==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@ -6567,6 +6734,9 @@ packages:
resolution: {integrity: sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==} resolution: {integrity: sha512-RJRdvCo6IAnPdsvP/7m6bsQqNnn1FCBX5ZNtFL98MmFF/4xAIJTIg1YbHW5DC2W5SKZanrC6i4HsJqlajw/dZw==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
setimmediate@1.0.5:
resolution: {integrity: sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==}
sharp@0.34.5: sharp@0.34.5:
resolution: {integrity: sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==} resolution: {integrity: sha512-Ou9I5Ft9WNcCbXrU9cMgPBcCK8LiwLqcbywW3t4oDV37n1pzpuNLsYiAV8eODnjbtQlSDwZ2cUEeQz4E54Hltg==}
engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0}
@ -6666,6 +6836,9 @@ packages:
react: ^18.0.0 || ^19.0.0 react: ^18.0.0 || ^19.0.0
react-dom: ^18.0.0 || ^19.0.0 react-dom: ^18.0.0 || ^19.0.0
string.prototype.codepointat@0.2.1:
resolution: {integrity: sha512-2cBVCj6I4IOvEnjgO/hWqXjqBGsY+zwPmHl12Srk9IXSZ56Jwwmy+66XO5Iut/oQVR7t5ihYdLB0GMa4alEUcg==}
string.prototype.includes@2.0.1: string.prototype.includes@2.0.1:
resolution: {integrity: sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==} resolution: {integrity: sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
@ -6689,6 +6862,9 @@ packages:
resolution: {integrity: sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==} resolution: {integrity: sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
string_decoder@1.1.1:
resolution: {integrity: sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==}
stringify-entities@4.0.4: stringify-entities@4.0.4:
resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==} resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==}
@ -6763,10 +6939,16 @@ packages:
resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==} resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==}
engines: {node: '>=6'} engines: {node: '>=6'}
text-segmentation@1.0.3:
resolution: {integrity: sha512-iOiPUo/BGnZ6+54OsWxZidGCsdU8YbE4PSpdPinp7DeMtUJNJBoJ/ouUSTJjHkh1KntHaltHl/gDs2FC4i5+Nw==}
throttleit@2.1.0: throttleit@2.1.0:
resolution: {integrity: sha512-nt6AMGKW1p/70DF/hGBdJB57B8Tspmbp5gfJ8ilhLnt7kkr2ye7hzD6NVG8GGErk2HWF34igrL2CXmNIkzKqKw==} resolution: {integrity: sha512-nt6AMGKW1p/70DF/hGBdJB57B8Tspmbp5gfJ8ilhLnt7kkr2ye7hzD6NVG8GGErk2HWF34igrL2CXmNIkzKqKw==}
engines: {node: '>=18'} engines: {node: '>=18'}
tiny-inflate@1.0.3:
resolution: {integrity: sha512-pkY1fj1cKHb2seWDy0B16HeWyczlJA9/WW3u3c4z/NiWDsO3DOU5D7nhTLE9CF0yXv/QZFY7sEJmj24dK+Rrqw==}
tiny-invariant@1.3.1: tiny-invariant@1.3.1:
resolution: {integrity: sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==} resolution: {integrity: sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw==}
@ -6845,6 +7027,9 @@ packages:
resolution: {integrity: sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==} resolution: {integrity: sha512-nWJ91DjeOkej/TA8pXQ3myruKpKEYgqvpw9lz4OPHj/NWFNluYrjbz9j01CJ8yKQd2g4jFoOkINCTW2I5LEEyw==}
engines: {node: '>= 0.4'} engines: {node: '>= 0.4'}
undici-types@5.26.5:
resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==}
undici-types@6.21.0: undici-types@6.21.0:
resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==}
@ -6986,6 +7171,9 @@ packages:
util-deprecate@1.0.2: util-deprecate@1.0.2:
resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==}
utrie@1.0.2:
resolution: {integrity: sha512-1MLa5ouZiOmQzUbjbu9VmjLzn1QLXBhwpUa7kdLUQK+KQ5KA9I1vk5U4YHe/X2Ch7PYnJfWuWT+VbuxbGwljhw==}
vaul@1.1.2: vaul@1.1.2:
resolution: {integrity: sha512-ZFkClGpWyI2WUQjdLJ/BaGuV6AVQiJ3uELGk3OYtP+B6yCO7Cmn9vPFXVJkRaGkOJu3m8bQMgtyzNHixULceQA==} resolution: {integrity: sha512-ZFkClGpWyI2WUQjdLJ/BaGuV6AVQiJ3uELGk3OYtP+B6yCO7Cmn9vPFXVJkRaGkOJu3m8bQMgtyzNHixULceQA==}
peerDependencies: peerDependencies:
@ -7960,6 +8148,8 @@ snapshots:
'@babel/runtime@7.28.6': {} '@babel/runtime@7.28.6': {}
'@babel/standalone@7.29.2': {}
'@babel/template@7.28.6': '@babel/template@7.28.6':
dependencies: dependencies:
'@babel/code-frame': 7.29.0 '@babel/code-frame': 7.29.0
@ -8565,6 +8755,18 @@ snapshots:
transitivePeerDependencies: transitivePeerDependencies:
- supports-color - supports-color
'@mediabunny/aac-encoder@1.39.2(mediabunny@1.39.2)':
dependencies:
mediabunny: 1.39.2
'@mediabunny/flac-encoder@1.39.2(mediabunny@1.39.2)':
dependencies:
mediabunny: 1.39.2
'@mediabunny/mp3-encoder@1.39.2(mediabunny@1.39.2)':
dependencies:
mediabunny: 1.39.2
'@microsoft/fetch-event-source@2.0.1': {} '@microsoft/fetch-event-source@2.0.1': {}
'@napi-rs/wasm-runtime@0.2.12': '@napi-rs/wasm-runtime@0.2.12':
@ -10078,6 +10280,33 @@ snapshots:
'@react-dnd/shallowequal@4.0.2': {} '@react-dnd/shallowequal@4.0.2': {}
'@remotion/licensing@4.0.438': {}
'@remotion/media@4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)':
dependencies:
mediabunny: 1.39.2
react: 19.2.4
react-dom: 19.2.4(react@19.2.4)
remotion: 4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
zod: 4.3.6
'@remotion/player@4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)':
dependencies:
react: 19.2.4
react-dom: 19.2.4(react@19.2.4)
remotion: 4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
'@remotion/web-renderer@4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)':
dependencies:
'@mediabunny/aac-encoder': 1.39.2(mediabunny@1.39.2)
'@mediabunny/flac-encoder': 1.39.2(mediabunny@1.39.2)
'@mediabunny/mp3-encoder': 1.39.2(mediabunny@1.39.2)
'@remotion/licensing': 4.0.438
mediabunny: 1.39.2
react: 19.2.4
react-dom: 19.2.4(react@19.2.4)
remotion: 4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4)
'@rollup/rollup-android-arm-eabi@4.59.0': '@rollup/rollup-android-arm-eabi@4.59.0':
optional: true optional: true
@ -10484,6 +10713,36 @@ snapshots:
tslib: 2.8.1 tslib: 2.8.1
optional: true optional: true
'@types/babel__core@7.20.5':
dependencies:
'@babel/parser': 7.29.0
'@babel/types': 7.29.0
'@types/babel__generator': 7.27.0
'@types/babel__template': 7.4.4
'@types/babel__traverse': 7.28.0
'@types/babel__generator@7.27.0':
dependencies:
'@babel/types': 7.29.0
'@types/babel__standalone@7.1.9':
dependencies:
'@babel/parser': 7.29.0
'@babel/types': 7.29.0
'@types/babel__core': 7.20.5
'@types/babel__generator': 7.27.0
'@types/babel__template': 7.4.4
'@types/babel__traverse': 7.28.0
'@types/babel__template@7.4.4':
dependencies:
'@babel/parser': 7.29.0
'@babel/types': 7.29.0
'@types/babel__traverse@7.28.0':
dependencies:
'@babel/types': 7.29.0
'@types/canvas-confetti@1.9.0': {} '@types/canvas-confetti@1.9.0': {}
'@types/debug@4.1.12': '@types/debug@4.1.12':
@ -10492,6 +10751,12 @@ snapshots:
'@types/diff-match-patch@1.0.36': {} '@types/diff-match-patch@1.0.36': {}
'@types/dom-mediacapture-transform@0.1.11':
dependencies:
'@types/dom-webcodecs': 0.1.13
'@types/dom-webcodecs@0.1.13': {}
'@types/estree-jsx@1.0.5': '@types/estree-jsx@1.0.5':
dependencies: dependencies:
'@types/estree': 1.0.8 '@types/estree': 1.0.8
@ -10524,6 +10789,10 @@ snapshots:
'@types/ms@2.1.0': {} '@types/ms@2.1.0': {}
'@types/node@18.19.130':
dependencies:
undici-types: 5.26.5
'@types/node@20.19.33': '@types/node@20.19.33':
dependencies: dependencies:
undici-types: 6.21.0 undici-types: 6.21.0
@ -10737,6 +11006,8 @@ snapshots:
'@vercel/oidc@3.1.0': {} '@vercel/oidc@3.1.0': {}
'@xmldom/xmldom@0.8.11': {}
acorn-jsx@5.3.2(acorn@8.16.0): acorn-jsx@5.3.2(acorn@8.16.0):
dependencies: dependencies:
acorn: 8.16.0 acorn: 8.16.0
@ -10911,6 +11182,8 @@ snapshots:
balanced-match@4.0.4: {} balanced-match@4.0.4: {}
base64-arraybuffer@1.0.2: {}
baseline-browser-mapping@2.10.0: {} baseline-browser-mapping@2.10.0: {}
boolbase@1.0.0: {} boolbase@1.0.0: {}
@ -11052,6 +11325,8 @@ snapshots:
core-js@3.48.0: {} core-js@3.48.0: {}
core-util-is@1.0.3: {}
cosmiconfig@8.3.6(typescript@5.9.3): cosmiconfig@8.3.6(typescript@5.9.3):
dependencies: dependencies:
import-fresh: 3.3.1 import-fresh: 3.3.1
@ -11071,6 +11346,10 @@ snapshots:
shebang-command: 2.0.0 shebang-command: 2.0.0
which: 2.0.2 which: 2.0.2
css-line-break@2.1.0:
dependencies:
utrie: 1.0.2
css-select@5.2.2: css-select@5.2.2:
dependencies: dependencies:
boolbase: 1.0.0 boolbase: 1.0.0
@ -11183,6 +11462,15 @@ snapshots:
domhandler: 5.0.3 domhandler: 5.0.3
entities: 4.5.0 entities: 4.5.0
dom-to-pptx@1.1.5:
dependencies:
fonteditor-core: 2.6.3
html2canvas: 1.4.1
jszip: 3.10.1
opentype.js: 1.3.4
pako: 2.1.0
pptxgenjs: 3.12.0
domelementtype@2.3.0: {} domelementtype@2.3.0: {}
domhandler@5.0.3: domhandler@5.0.3:
@ -11766,6 +12054,10 @@ snapshots:
flatted@3.3.3: {} flatted@3.3.3: {}
fonteditor-core@2.6.3:
dependencies:
'@xmldom/xmldom': 0.8.11
for-each@0.3.5: for-each@0.3.5:
dependencies: dependencies:
is-callable: 1.2.7 is-callable: 1.2.7
@ -12158,6 +12450,13 @@ snapshots:
html-void-elements@3.0.0: {} html-void-elements@3.0.0: {}
html2canvas@1.4.1:
dependencies:
css-line-break: 2.1.0
text-segmentation: 1.0.3
https@1.0.0: {}
icu-minify@4.8.3: icu-minify@4.8.3:
dependencies: dependencies:
'@formatjs/icu-messageformat-parser': 3.5.1 '@formatjs/icu-messageformat-parser': 3.5.1
@ -12166,8 +12465,14 @@ snapshots:
ignore@7.0.5: {} ignore@7.0.5: {}
image-size@1.2.1:
dependencies:
queue: 6.0.2
image-size@2.0.2: {} image-size@2.0.2: {}
immediate@3.0.6: {}
immer@10.2.0: {} immer@10.2.0: {}
import-fresh@3.3.1: import-fresh@3.3.1:
@ -12177,6 +12482,8 @@ snapshots:
imurmurhash@0.1.4: {} imurmurhash@0.1.4: {}
inherits@2.0.4: {}
inline-style-parser@0.2.7: {} inline-style-parser@0.2.7: {}
internal-slot@1.1.0: internal-slot@1.1.0:
@ -12334,6 +12641,8 @@ snapshots:
call-bound: 1.0.4 call-bound: 1.0.4
get-intrinsic: 1.3.0 get-intrinsic: 1.3.0
isarray@1.0.0: {}
isarray@2.0.5: {} isarray@2.0.5: {}
isexe@2.0.0: {} isexe@2.0.0: {}
@ -12418,6 +12727,13 @@ snapshots:
object.assign: 4.1.7 object.assign: 4.1.7
object.values: 1.2.1 object.values: 1.2.1
jszip@3.10.1:
dependencies:
lie: 3.3.0
pako: 1.0.11
readable-stream: 2.3.8
setimmediate: 1.0.5
katex@0.16.22: katex@0.16.22:
dependencies: dependencies:
commander: 8.3.0 commander: 8.3.0
@ -12445,6 +12761,10 @@ snapshots:
prelude-ls: 1.2.1 prelude-ls: 1.2.1
type-check: 0.4.0 type-check: 0.4.0
lie@3.3.0:
dependencies:
immediate: 3.0.6
lightningcss-android-arm64@1.31.1: lightningcss-android-arm64@1.31.1:
optional: true optional: true
@ -12736,6 +13056,11 @@ snapshots:
mdn-data@2.0.30: {} mdn-data@2.0.30: {}
mediabunny@1.39.2:
dependencies:
'@types/dom-mediacapture-transform': 0.1.11
'@types/dom-webcodecs': 0.1.13
merge2@1.4.1: {} merge2@1.4.1: {}
micromark-core-commonmark@2.0.3: micromark-core-commonmark@2.0.3:
@ -13180,6 +13505,11 @@ snapshots:
regex: 6.1.0 regex: 6.1.0
regex-recursion: 6.0.2 regex-recursion: 6.0.2
opentype.js@1.3.4:
dependencies:
string.prototype.codepointat: 0.2.1
tiny-inflate: 1.0.3
optics-ts@2.4.1: {} optics-ts@2.4.1: {}
optionator@0.9.4: optionator@0.9.4:
@ -13205,6 +13535,10 @@ snapshots:
dependencies: dependencies:
p-limit: 3.1.0 p-limit: 3.1.0
pako@1.0.11: {}
pako@2.1.0: {}
parent-module@1.0.1: parent-module@1.0.1:
dependencies: dependencies:
callsites: 3.1.0 callsites: 3.1.0
@ -13368,6 +13702,13 @@ snapshots:
dependencies: dependencies:
'@posthog/core': 1.23.1 '@posthog/core': 1.23.1
pptxgenjs@3.12.0:
dependencies:
'@types/node': 18.19.130
https: 1.0.0
image-size: 1.2.1
jszip: 3.10.1
preact@10.28.4: {} preact@10.28.4: {}
prelude-ls@1.2.1: {} prelude-ls@1.2.1: {}
@ -13376,6 +13717,8 @@ snapshots:
prismjs@1.30.0: {} prismjs@1.30.0: {}
process-nextick-args@2.0.1: {}
prop-types@15.8.1: prop-types@15.8.1:
dependencies: dependencies:
loose-envify: 1.4.0 loose-envify: 1.4.0
@ -13411,6 +13754,10 @@ snapshots:
queue-microtask@1.2.3: {} queue-microtask@1.2.3: {}
queue@6.0.2:
dependencies:
inherits: 2.0.4
radix-ui@1.4.3(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4): radix-ui@1.4.3(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4):
dependencies: dependencies:
'@radix-ui/primitive': 1.1.3 '@radix-ui/primitive': 1.1.3
@ -13639,6 +13986,16 @@ snapshots:
react@19.2.4: {} react@19.2.4: {}
readable-stream@2.3.8:
dependencies:
core-util-is: 1.0.3
inherits: 2.0.4
isarray: 1.0.0
process-nextick-args: 2.0.1
safe-buffer: 5.1.2
string_decoder: 1.1.1
util-deprecate: 1.0.2
readdirp@5.0.0: {} readdirp@5.0.0: {}
recma-build-jsx@1.0.0: recma-build-jsx@1.0.0:
@ -13825,6 +14182,11 @@ snapshots:
remend@1.2.1: {} remend@1.2.1: {}
remotion@4.0.438(react-dom@19.2.4(react@19.2.4))(react@19.2.4):
dependencies:
react: 19.2.4
react-dom: 19.2.4(react@19.2.4)
resolve-from@4.0.0: {} resolve-from@4.0.0: {}
resolve-pkg-maps@1.0.0: {} resolve-pkg-maps@1.0.0: {}
@ -13889,6 +14251,8 @@ snapshots:
has-symbols: 1.1.0 has-symbols: 1.1.0
isarray: 2.0.5 isarray: 2.0.5
safe-buffer@5.1.2: {}
safe-push-apply@1.0.0: safe-push-apply@1.0.0:
dependencies: dependencies:
es-errors: 1.3.0 es-errors: 1.3.0
@ -13938,6 +14302,8 @@ snapshots:
es-errors: 1.3.0 es-errors: 1.3.0
es-object-atoms: 1.1.1 es-object-atoms: 1.1.1
setimmediate@1.0.5: {}
sharp@0.34.5: sharp@0.34.5:
dependencies: dependencies:
'@img/colour': 1.0.0 '@img/colour': 1.0.0
@ -14102,6 +14468,8 @@ snapshots:
transitivePeerDependencies: transitivePeerDependencies:
- supports-color - supports-color
string.prototype.codepointat@0.2.1: {}
string.prototype.includes@2.0.1: string.prototype.includes@2.0.1:
dependencies: dependencies:
call-bind: 1.0.8 call-bind: 1.0.8
@ -14152,6 +14520,10 @@ snapshots:
define-properties: 1.2.1 define-properties: 1.2.1
es-object-atoms: 1.1.1 es-object-atoms: 1.1.1
string_decoder@1.1.1:
dependencies:
safe-buffer: 5.1.2
stringify-entities@4.0.4: stringify-entities@4.0.4:
dependencies: dependencies:
character-entities-html4: 2.1.0 character-entities-html4: 2.1.0
@ -14216,8 +14588,14 @@ snapshots:
tapable@2.3.0: {} tapable@2.3.0: {}
text-segmentation@1.0.3:
dependencies:
utrie: 1.0.2
throttleit@2.1.0: {} throttleit@2.1.0: {}
tiny-inflate@1.0.3: {}
tiny-invariant@1.3.1: {} tiny-invariant@1.3.1: {}
tinyexec@1.0.2: {} tinyexec@1.0.2: {}
@ -14307,6 +14685,8 @@ snapshots:
has-symbols: 1.1.0 has-symbols: 1.1.0
which-boxed-primitive: 1.1.1 which-boxed-primitive: 1.1.1
undici-types@5.26.5: {}
undici-types@6.21.0: {} undici-types@6.21.0: {}
unicode-canonical-property-names-ecmascript@2.0.1: {} unicode-canonical-property-names-ecmascript@2.0.1: {}
@ -14465,6 +14845,10 @@ snapshots:
util-deprecate@1.0.2: {} util-deprecate@1.0.2: {}
utrie@1.0.2:
dependencies:
base64-arraybuffer: 1.0.2
vaul@1.1.2(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4): vaul@1.1.2(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4):
dependencies: dependencies:
'@radix-ui/react-dialog': 1.1.15(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4) '@radix-ui/react-dialog': 1.1.15(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(react-dom@19.2.4(react@19.2.4))(react@19.2.4)