mirror of
https://github.com/MODSetter/SurfSense.git
synced 2026-05-12 17:22:38 +02:00
Add deliverables and web tool streaming handlers for chat runs.
This commit is contained in:
parent
a322eedaa1
commit
c8fb4aa5e5
27 changed files with 645 additions and 0 deletions
|
|
@ -0,0 +1,28 @@
|
|||
"""generate_image: tool card + terminal summary."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterator
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.emission_context import (
|
||||
ToolCompletionEmissionContext,
|
||||
)
|
||||
|
||||
|
||||
def iter_completion_emission_frames(
|
||||
ctx: ToolCompletionEmissionContext,
|
||||
) -> Iterator[str]:
|
||||
out = ctx.tool_output
|
||||
payload = out if isinstance(out, dict) else {"result": out}
|
||||
yield ctx.emit_tool_output_card(payload)
|
||||
if isinstance(out, dict):
|
||||
if out.get("error"):
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Image generation failed: {out['error'][:60]}",
|
||||
"error",
|
||||
)
|
||||
else:
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
"Image generated successfully",
|
||||
"success",
|
||||
)
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
"""generate_image: thinking-step copy."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.deliverables.shared.tool_input import (
|
||||
as_tool_input_dict,
|
||||
)
|
||||
from app.tasks.chat.streaming.handlers.tools.shared.model import (
|
||||
ToolStartThinking,
|
||||
)
|
||||
|
||||
|
||||
def resolve_start_thinking(tool_name: str, tool_input: Any) -> ToolStartThinking:
|
||||
del tool_name
|
||||
d = as_tool_input_dict(tool_input)
|
||||
prompt = d.get("prompt", "") if isinstance(tool_input, dict) else str(tool_input)
|
||||
return ToolStartThinking(
|
||||
title="Generating image",
|
||||
items=[f"Prompt: {prompt[:80]}{'...' if len(prompt) > 80 else ''}"],
|
||||
)
|
||||
|
||||
|
||||
def resolve_completed_thinking(
|
||||
tool_name: str, tool_output: Any, last_items: list[str],
|
||||
) -> tuple[str, list[str]]:
|
||||
del tool_name
|
||||
items = last_items
|
||||
if isinstance(tool_output, dict) and not tool_output.get("error"):
|
||||
completed = [*items, "Image generated successfully"]
|
||||
else:
|
||||
error_msg = (
|
||||
tool_output.get("error", "Generation failed")
|
||||
if isinstance(tool_output, dict)
|
||||
else "Generation failed"
|
||||
)
|
||||
completed = [*items, f"Error: {error_msg}"]
|
||||
return ("Generating image", completed)
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
"""generate_podcast: tool card + queue / success / failure terminal lines."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterator
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.emission_context import (
|
||||
ToolCompletionEmissionContext,
|
||||
)
|
||||
|
||||
|
||||
def iter_completion_emission_frames(
|
||||
ctx: ToolCompletionEmissionContext,
|
||||
) -> Iterator[str]:
|
||||
out = ctx.tool_output
|
||||
payload = out if isinstance(out, dict) else {"result": out}
|
||||
yield ctx.emit_tool_output_card(payload)
|
||||
if isinstance(out, dict) and out.get("status") in (
|
||||
"pending",
|
||||
"generating",
|
||||
"processing",
|
||||
):
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Podcast queued: {out.get('title', 'Podcast')}",
|
||||
"success",
|
||||
)
|
||||
elif isinstance(out, dict) and out.get("status") in ("ready", "success"):
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Podcast generated successfully: {out.get('title', 'Podcast')}",
|
||||
"success",
|
||||
)
|
||||
elif isinstance(out, dict) and out.get("status") in ("failed", "error"):
|
||||
error_msg = out.get("error", "Unknown error")
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Podcast generation failed: {error_msg}",
|
||||
"error",
|
||||
)
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
"""generate_podcast: thinking-step copy."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.deliverables.shared.tool_input import (
|
||||
as_tool_input_dict,
|
||||
)
|
||||
from app.tasks.chat.streaming.handlers.tools.shared.model import (
|
||||
ToolStartThinking,
|
||||
)
|
||||
|
||||
|
||||
def resolve_start_thinking(tool_name: str, tool_input: Any) -> ToolStartThinking:
|
||||
del tool_name
|
||||
d = as_tool_input_dict(tool_input)
|
||||
podcast_title = (
|
||||
d.get("podcast_title", "SurfSense Podcast")
|
||||
if isinstance(tool_input, dict)
|
||||
else "SurfSense Podcast"
|
||||
)
|
||||
content_len = len(
|
||||
d.get("source_content", "") if isinstance(tool_input, dict) else ""
|
||||
)
|
||||
return ToolStartThinking(
|
||||
title="Generating podcast",
|
||||
items=[
|
||||
f"Title: {podcast_title}",
|
||||
f"Content: {content_len:,} characters",
|
||||
"Preparing audio generation...",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
def resolve_completed_thinking(
|
||||
tool_name: str, tool_output: Any, last_items: list[str],
|
||||
) -> tuple[str, list[str]]:
|
||||
del tool_name
|
||||
items = last_items
|
||||
podcast_status = (
|
||||
tool_output.get("status", "unknown")
|
||||
if isinstance(tool_output, dict)
|
||||
else "unknown"
|
||||
)
|
||||
podcast_title = (
|
||||
tool_output.get("title", "Podcast")
|
||||
if isinstance(tool_output, dict)
|
||||
else "Podcast"
|
||||
)
|
||||
if podcast_status in ("pending", "generating", "processing"):
|
||||
completed = [
|
||||
f"Title: {podcast_title}",
|
||||
"Podcast generation started",
|
||||
"Processing in background...",
|
||||
]
|
||||
elif podcast_status == "already_generating":
|
||||
completed = [
|
||||
f"Title: {podcast_title}",
|
||||
"Podcast already in progress",
|
||||
"Please wait for it to complete",
|
||||
]
|
||||
elif podcast_status in ("failed", "error"):
|
||||
error_msg = (
|
||||
tool_output.get("error", "Unknown error")
|
||||
if isinstance(tool_output, dict)
|
||||
else "Unknown error"
|
||||
)
|
||||
completed = [
|
||||
f"Title: {podcast_title}",
|
||||
f"Error: {error_msg[:50]}",
|
||||
]
|
||||
elif podcast_status in ("ready", "success"):
|
||||
completed = [
|
||||
f"Title: {podcast_title}",
|
||||
"Podcast ready",
|
||||
]
|
||||
else:
|
||||
completed = items
|
||||
return ("Generating podcast", completed)
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
"""generate_report: full payload + terminal line."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterator
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.emission_context import (
|
||||
ToolCompletionEmissionContext,
|
||||
)
|
||||
|
||||
|
||||
def iter_completion_emission_frames(
|
||||
ctx: ToolCompletionEmissionContext,
|
||||
) -> Iterator[str]:
|
||||
out = ctx.tool_output
|
||||
payload = out if isinstance(out, dict) else {"result": out}
|
||||
yield ctx.emit_tool_output_card(payload)
|
||||
if isinstance(out, dict) and out.get("status") == "ready":
|
||||
word_count = out.get("word_count", 0)
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Report generated: {out.get('title', 'Report')} ({word_count:,} words)",
|
||||
"success",
|
||||
)
|
||||
else:
|
||||
error_msg = (
|
||||
out.get("error", "Unknown error")
|
||||
if isinstance(out, dict)
|
||||
else "Unknown error"
|
||||
)
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Report generation failed: {error_msg}",
|
||||
"error",
|
||||
)
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
"""generate_report: thinking-step copy."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.deliverables.shared.tool_input import (
|
||||
as_tool_input_dict,
|
||||
)
|
||||
from app.tasks.chat.streaming.handlers.tools.shared.model import (
|
||||
ToolStartThinking,
|
||||
)
|
||||
|
||||
|
||||
def resolve_start_thinking(tool_name: str, tool_input: Any) -> ToolStartThinking:
|
||||
del tool_name
|
||||
d = as_tool_input_dict(tool_input)
|
||||
report_topic = (
|
||||
d.get("topic", "Report") if isinstance(tool_input, dict) else "Report"
|
||||
)
|
||||
is_revision = bool(
|
||||
isinstance(tool_input, dict) and tool_input.get("parent_report_id")
|
||||
)
|
||||
step_title = "Revising report" if is_revision else "Generating report"
|
||||
return ToolStartThinking(
|
||||
title=step_title,
|
||||
items=[f"Topic: {report_topic}", "Analyzing source content..."],
|
||||
)
|
||||
|
||||
|
||||
def resolve_completed_thinking(
|
||||
tool_name: str, tool_output: Any, last_items: list[str],
|
||||
) -> tuple[str, list[str]]:
|
||||
del tool_name
|
||||
items = last_items
|
||||
report_status = (
|
||||
tool_output.get("status", "unknown")
|
||||
if isinstance(tool_output, dict)
|
||||
else "unknown"
|
||||
)
|
||||
report_title = (
|
||||
tool_output.get("title", "Report")
|
||||
if isinstance(tool_output, dict)
|
||||
else "Report"
|
||||
)
|
||||
word_count = (
|
||||
tool_output.get("word_count", 0)
|
||||
if isinstance(tool_output, dict)
|
||||
else 0
|
||||
)
|
||||
is_revision = (
|
||||
tool_output.get("is_revision", False)
|
||||
if isinstance(tool_output, dict)
|
||||
else False
|
||||
)
|
||||
step_title = "Revising report" if is_revision else "Generating report"
|
||||
|
||||
if report_status == "ready":
|
||||
completed = [
|
||||
f"Topic: {report_title}",
|
||||
f"{word_count:,} words",
|
||||
"Report ready",
|
||||
]
|
||||
elif report_status == "failed":
|
||||
error_msg = (
|
||||
tool_output.get("error", "Unknown error")
|
||||
if isinstance(tool_output, dict)
|
||||
else "Unknown error"
|
||||
)
|
||||
completed = [
|
||||
f"Topic: {report_title}",
|
||||
f"Error: {error_msg[:50]}",
|
||||
]
|
||||
else:
|
||||
completed = items
|
||||
|
||||
return (step_title, completed)
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
"""generate_resume: full payload + terminal line."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterator
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.emission_context import (
|
||||
ToolCompletionEmissionContext,
|
||||
)
|
||||
|
||||
|
||||
def iter_completion_emission_frames(
|
||||
ctx: ToolCompletionEmissionContext,
|
||||
) -> Iterator[str]:
|
||||
out = ctx.tool_output
|
||||
payload = out if isinstance(out, dict) else {"result": out}
|
||||
yield ctx.emit_tool_output_card(payload)
|
||||
if isinstance(out, dict) and out.get("status") == "ready":
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Resume generated: {out.get('title', 'Resume')}",
|
||||
"success",
|
||||
)
|
||||
else:
|
||||
error_msg = (
|
||||
out.get("error", "Unknown error")
|
||||
if isinstance(out, dict)
|
||||
else "Unknown error"
|
||||
)
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Resume generation failed: {error_msg}",
|
||||
"error",
|
||||
)
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
"""generate_resume: generic thinking titles and items."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.default import (
|
||||
thinking as default_thinking,
|
||||
)
|
||||
from app.tasks.chat.streaming.handlers.tools.shared.model import (
|
||||
ToolStartThinking,
|
||||
)
|
||||
|
||||
|
||||
def resolve_start_thinking(tool_name: str, tool_input: Any) -> ToolStartThinking:
|
||||
return default_thinking.resolve_start_thinking(tool_name, tool_input)
|
||||
|
||||
|
||||
def resolve_completed_thinking(
|
||||
tool_name: str, tool_output: Any, last_items: list[str],
|
||||
) -> tuple[str, list[str]]:
|
||||
return default_thinking.resolve_completed_thinking(
|
||||
tool_name, tool_output, last_items
|
||||
)
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
"""generate_video_presentation: tool card + terminal line."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterator
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.emission_context import (
|
||||
ToolCompletionEmissionContext,
|
||||
)
|
||||
|
||||
|
||||
def iter_completion_emission_frames(
|
||||
ctx: ToolCompletionEmissionContext,
|
||||
) -> Iterator[str]:
|
||||
out = ctx.tool_output
|
||||
payload = out if isinstance(out, dict) else {"result": out}
|
||||
yield ctx.emit_tool_output_card(payload)
|
||||
if isinstance(out, dict) and out.get("status") == "pending":
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Video presentation queued: {out.get('title', 'Presentation')}",
|
||||
"success",
|
||||
)
|
||||
elif isinstance(out, dict) and out.get("status") == "failed":
|
||||
error_msg = out.get("error", "Unknown error")
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Presentation generation failed: {error_msg}",
|
||||
"error",
|
||||
)
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
"""generate_video_presentation: generic in-progress thinking; completion is status-driven."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.default import (
|
||||
thinking as default_thinking,
|
||||
)
|
||||
from app.tasks.chat.streaming.handlers.tools.shared.model import (
|
||||
ToolStartThinking,
|
||||
)
|
||||
|
||||
|
||||
def resolve_start_thinking(tool_name: str, tool_input: Any) -> ToolStartThinking:
|
||||
return default_thinking.resolve_start_thinking(tool_name, tool_input)
|
||||
|
||||
|
||||
def resolve_completed_thinking(
|
||||
tool_name: str, tool_output: Any, last_items: list[str],
|
||||
) -> tuple[str, list[str]]:
|
||||
del tool_name
|
||||
items = last_items
|
||||
vp_status = (
|
||||
tool_output.get("status", "unknown")
|
||||
if isinstance(tool_output, dict)
|
||||
else "unknown"
|
||||
)
|
||||
vp_title = (
|
||||
tool_output.get("title", "Presentation")
|
||||
if isinstance(tool_output, dict)
|
||||
else "Presentation"
|
||||
)
|
||||
if vp_status in ("pending", "generating"):
|
||||
completed = [
|
||||
f"Title: {vp_title}",
|
||||
"Presentation generation started",
|
||||
"Processing in background...",
|
||||
]
|
||||
elif vp_status == "failed":
|
||||
error_msg = (
|
||||
tool_output.get("error", "Unknown error")
|
||||
if isinstance(tool_output, dict)
|
||||
else "Unknown error"
|
||||
)
|
||||
completed = [
|
||||
f"Title: {vp_title}",
|
||||
f"Error: {error_msg[:50]}",
|
||||
]
|
||||
else:
|
||||
completed = items
|
||||
return ("Generating video presentation", completed)
|
||||
|
|
@ -0,0 +1,16 @@
|
|||
"""save_document: default completion card and terminal line."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterator
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.default import emission as _default
|
||||
from app.tasks.chat.streaming.handlers.tools.emission_context import (
|
||||
ToolCompletionEmissionContext,
|
||||
)
|
||||
|
||||
|
||||
def iter_completion_emission_frames(
|
||||
ctx: ToolCompletionEmissionContext,
|
||||
) -> Iterator[str]:
|
||||
yield from _default.iter_completion_emission_frames(ctx)
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
"""save_document: thinking-step copy."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.deliverables.shared.tool_input import (
|
||||
as_tool_input_dict,
|
||||
)
|
||||
from app.tasks.chat.streaming.handlers.tools.shared.model import (
|
||||
ToolStartThinking,
|
||||
)
|
||||
|
||||
|
||||
def resolve_start_thinking(tool_name: str, tool_input: Any) -> ToolStartThinking:
|
||||
del tool_name
|
||||
d = as_tool_input_dict(tool_input)
|
||||
doc_title = d.get("title", "") if isinstance(tool_input, dict) else str(tool_input)
|
||||
display_title = doc_title[:60] + ("…" if len(doc_title) > 60 else "")
|
||||
return ToolStartThinking(title="Saving document", items=[display_title])
|
||||
|
||||
|
||||
def resolve_completed_thinking(
|
||||
tool_name: str, tool_output: Any, last_items: list[str],
|
||||
) -> tuple[str, list[str]]:
|
||||
del tool_name
|
||||
items = last_items
|
||||
result_str = (
|
||||
tool_output.get("result", "")
|
||||
if isinstance(tool_output, dict)
|
||||
else str(tool_output)
|
||||
)
|
||||
is_error = "Error" in result_str
|
||||
completed = [
|
||||
*items,
|
||||
result_str[:80] if is_error else "Saved to knowledge base",
|
||||
]
|
||||
return ("Saving document", completed)
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
"""Tool-call args for deliverable thinking modules."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def as_tool_input_dict(tool_input: Any) -> dict[str, Any]:
|
||||
return tool_input if isinstance(tool_input, dict) else {}
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
from __future__ import annotations
|
||||
|
||||
DELIVERABLE_TOOLS: frozenset[str] = frozenset(
|
||||
{
|
||||
"generate_image",
|
||||
"generate_podcast",
|
||||
"generate_report",
|
||||
"generate_resume",
|
||||
"generate_video_presentation",
|
||||
"save_document",
|
||||
}
|
||||
)
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
"""scrape_webpage: redacted payload + terminal summary."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Iterator
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.emission_context import (
|
||||
ToolCompletionEmissionContext,
|
||||
)
|
||||
|
||||
|
||||
def iter_completion_emission_frames(
|
||||
ctx: ToolCompletionEmissionContext,
|
||||
) -> Iterator[str]:
|
||||
out = ctx.tool_output
|
||||
if isinstance(out, dict):
|
||||
display_output = {k: v for k, v in out.items() if k != "content"}
|
||||
if "content" in out:
|
||||
content = out.get("content", "")
|
||||
display_output["content_preview"] = (
|
||||
content[:500] + "..." if len(content) > 500 else content
|
||||
)
|
||||
yield ctx.emit_tool_output_card(display_output)
|
||||
else:
|
||||
yield ctx.emit_tool_output_card({"result": out})
|
||||
|
||||
if isinstance(out, dict) and "error" not in out:
|
||||
title = out.get("title", "Webpage")
|
||||
word_count = out.get("word_count", 0)
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Scraped: {title[:40]}{'...' if len(title) > 40 else ''} ({word_count:,} words)",
|
||||
"success",
|
||||
)
|
||||
else:
|
||||
error_msg = (
|
||||
out.get("error", "Failed to scrape")
|
||||
if isinstance(out, dict)
|
||||
else "Failed to scrape"
|
||||
)
|
||||
yield ctx.streaming_service.format_terminal_info(
|
||||
f"Scrape failed: {error_msg}",
|
||||
"error",
|
||||
)
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
"""Tool-call args for scrape_webpage thinking."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def as_tool_input_dict(tool_input: Any) -> dict[str, Any]:
|
||||
return tool_input if isinstance(tool_input, dict) else {}
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
"""scrape_webpage: thinking-step copy."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.scrape_webpage.shared.tool_input import (
|
||||
as_tool_input_dict,
|
||||
)
|
||||
from app.tasks.chat.streaming.handlers.tools.shared.model import (
|
||||
ToolStartThinking,
|
||||
)
|
||||
|
||||
|
||||
def resolve_start_thinking(tool_name: str, tool_input: Any) -> ToolStartThinking:
|
||||
del tool_name
|
||||
d = as_tool_input_dict(tool_input)
|
||||
url = d.get("url", "") if isinstance(tool_input, dict) else str(tool_input)
|
||||
return ToolStartThinking(
|
||||
title="Scraping webpage",
|
||||
items=[f"URL: {url[:80]}{'...' if len(url) > 80 else ''}"],
|
||||
)
|
||||
|
||||
|
||||
def resolve_completed_thinking(
|
||||
tool_name: str, tool_output: Any, last_items: list[str],
|
||||
) -> tuple[str, list[str]]:
|
||||
del tool_name
|
||||
items = last_items
|
||||
if isinstance(tool_output, dict):
|
||||
title = tool_output.get("title", "Webpage")
|
||||
word_count = tool_output.get("word_count", 0)
|
||||
has_error = "error" in tool_output
|
||||
if has_error:
|
||||
completed = [
|
||||
*items,
|
||||
f"Error: {tool_output.get('error', 'Failed to scrape')[:50]}",
|
||||
]
|
||||
else:
|
||||
completed = [
|
||||
*items,
|
||||
f"Title: {title[:50]}{'...' if len(title) > 50 else ''}",
|
||||
f"Extracted: {word_count:,} words",
|
||||
]
|
||||
else:
|
||||
completed = [*items, "Content extracted"]
|
||||
return ("Scraping webpage", completed)
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
"""web_search: citations parsed from provider XML."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from collections.abc import Iterator
|
||||
|
||||
from app.tasks.chat.streaming.handlers.tools.emission_context import (
|
||||
ToolCompletionEmissionContext,
|
||||
)
|
||||
|
||||
|
||||
def iter_completion_emission_frames(
|
||||
ctx: ToolCompletionEmissionContext,
|
||||
) -> Iterator[str]:
|
||||
out = ctx.tool_output
|
||||
xml = out.get("result", str(out)) if isinstance(out, dict) else str(out)
|
||||
citations: dict[str, dict[str, str]] = {}
|
||||
for m in re.finditer(
|
||||
r"<title><!\[CDATA\[(.*?)\]\]></title>\s*<url><!\[CDATA\[(.*?)\]\]></url>",
|
||||
xml,
|
||||
):
|
||||
title, url = m.group(1).strip(), m.group(2).strip()
|
||||
if url.startswith("http") and url not in citations:
|
||||
citations[url] = {"title": title}
|
||||
for m in re.finditer(
|
||||
r"<chunk\s+id='([^']*)'><!\[CDATA\[([\s\S]*?)\]\]></chunk>",
|
||||
xml,
|
||||
):
|
||||
chunk_url, content = m.group(1).strip(), m.group(2).strip()
|
||||
if (
|
||||
chunk_url.startswith("http")
|
||||
and chunk_url in citations
|
||||
and content
|
||||
):
|
||||
citations[chunk_url]["snippet"] = (
|
||||
content[:200] + "…" if len(content) > 200 else content
|
||||
)
|
||||
yield ctx.emit_tool_output_card(
|
||||
{"status": "completed", "citations": citations},
|
||||
)
|
||||
Loading…
Add table
Add a link
Reference in a new issue