mirror of
https://github.com/MODSetter/SurfSense.git
synced 2026-05-03 12:52:39 +02:00
feat: prompt caching
Some checks are pending
Build and Push Docker Images / tag_release (push) Waiting to run
Build and Push Docker Images / build (./surfsense_backend, ./surfsense_backend/Dockerfile, backend, surfsense-backend, ubuntu-24.04-arm, linux/arm64, arm64) (push) Blocked by required conditions
Build and Push Docker Images / build (./surfsense_backend, ./surfsense_backend/Dockerfile, backend, surfsense-backend, ubuntu-latest, linux/amd64, amd64) (push) Blocked by required conditions
Build and Push Docker Images / build (./surfsense_web, ./surfsense_web/Dockerfile, web, surfsense-web, ubuntu-24.04-arm, linux/arm64, arm64) (push) Blocked by required conditions
Build and Push Docker Images / build (./surfsense_web, ./surfsense_web/Dockerfile, web, surfsense-web, ubuntu-latest, linux/amd64, amd64) (push) Blocked by required conditions
Build and Push Docker Images / create_manifest (backend, surfsense-backend) (push) Blocked by required conditions
Build and Push Docker Images / create_manifest (web, surfsense-web) (push) Blocked by required conditions
Some checks are pending
Build and Push Docker Images / tag_release (push) Waiting to run
Build and Push Docker Images / build (./surfsense_backend, ./surfsense_backend/Dockerfile, backend, surfsense-backend, ubuntu-24.04-arm, linux/arm64, arm64) (push) Blocked by required conditions
Build and Push Docker Images / build (./surfsense_backend, ./surfsense_backend/Dockerfile, backend, surfsense-backend, ubuntu-latest, linux/amd64, amd64) (push) Blocked by required conditions
Build and Push Docker Images / build (./surfsense_web, ./surfsense_web/Dockerfile, web, surfsense-web, ubuntu-24.04-arm, linux/arm64, arm64) (push) Blocked by required conditions
Build and Push Docker Images / build (./surfsense_web, ./surfsense_web/Dockerfile, web, surfsense-web, ubuntu-latest, linux/amd64, amd64) (push) Blocked by required conditions
Build and Push Docker Images / create_manifest (backend, surfsense-backend) (push) Blocked by required conditions
Build and Push Docker Images / create_manifest (web, surfsense-web) (push) Blocked by required conditions
- Updated `litellm` dependency version from `1.83.4` to `1.83.7`. - Adjusted `aiohttp` version from `3.13.5` to `3.13.4` in the lock file. - Implemented `apply_litellm_prompt_caching` in `chat_deepagent.py` to improve prompt caching. - Added model name resolution logic in `chat_deepagent.py` to ensure correct provider-variant dispatch. - Enhanced `llm_config.py` to configure prompt caching for various LLM providers. - Updated tests to verify correct model name forwarding and prompt caching behavior.
This commit is contained in:
parent
360b5f8e3a
commit
e57c3a7d0c
12 changed files with 877 additions and 156 deletions
|
|
@ -10,7 +10,9 @@ We use ``create_agent`` (from langchain) rather than ``create_deep_agent``
|
|||
This lets us swap in ``SurfSenseFilesystemMiddleware`` — a customisable
|
||||
subclass of the default ``FilesystemMiddleware`` — while preserving every
|
||||
other behaviour that ``create_deep_agent`` provides (todo-list, subagents,
|
||||
summarisation, prompt-caching, etc.).
|
||||
summarisation, etc.). Prompt caching is configured at LLM-build time via
|
||||
``apply_litellm_prompt_caching`` (LiteLLM-native, multi-provider) rather
|
||||
than as a middleware.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
|
|
@ -33,7 +35,6 @@ from langchain.agents.middleware import (
|
|||
TodoListMiddleware,
|
||||
ToolCallLimitMiddleware,
|
||||
)
|
||||
from langchain_anthropic.middleware import AnthropicPromptCachingMiddleware
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
from langchain_core.tools import BaseTool
|
||||
from langgraph.types import Checkpointer
|
||||
|
|
@ -74,6 +75,7 @@ from app.agents.new_chat.plugin_loader import (
|
|||
load_allowed_plugin_names_from_env,
|
||||
load_plugin_middlewares,
|
||||
)
|
||||
from app.agents.new_chat.prompt_caching import apply_litellm_prompt_caching
|
||||
from app.agents.new_chat.subagents import build_specialized_subagents
|
||||
from app.agents.new_chat.system_prompt import (
|
||||
build_configurable_system_prompt,
|
||||
|
|
@ -94,6 +96,39 @@ from app.utils.perf import get_perf_logger
|
|||
|
||||
_perf_log = get_perf_logger()
|
||||
|
||||
|
||||
def _resolve_prompt_model_name(
|
||||
agent_config: AgentConfig | None,
|
||||
llm: BaseChatModel,
|
||||
) -> str | None:
|
||||
"""Resolve the model id to feed to provider-variant detection.
|
||||
|
||||
Preference order (matches the established idiom in
|
||||
``llm_router_service.py`` — see ``params.get("base_model") or
|
||||
params.get("model", "")`` usages there):
|
||||
|
||||
1. ``agent_config.litellm_params["base_model"]`` — required for Azure
|
||||
deployments where ``model_name`` is the deployment slug, not the
|
||||
underlying family. Without this, a deployment named e.g.
|
||||
``"prod-chat-001"`` would silently miss every provider regex.
|
||||
2. ``agent_config.model_name`` — the user's configured model id.
|
||||
3. ``getattr(llm, "model", None)`` — fallback for direct callers that
|
||||
don't supply an ``AgentConfig`` (currently a defensive path; all
|
||||
production callers pass ``agent_config``).
|
||||
|
||||
Returns ``None`` when nothing is available; ``compose_system_prompt``
|
||||
treats that as the ``"default"`` variant (no provider block emitted).
|
||||
"""
|
||||
if agent_config is not None:
|
||||
params = agent_config.litellm_params or {}
|
||||
base_model = params.get("base_model")
|
||||
if isinstance(base_model, str) and base_model.strip():
|
||||
return base_model
|
||||
if agent_config.model_name:
|
||||
return agent_config.model_name
|
||||
return getattr(llm, "model", None)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Connector Type Mapping
|
||||
# =============================================================================
|
||||
|
|
@ -279,6 +314,14 @@ async def create_surfsense_deep_agent(
|
|||
)
|
||||
"""
|
||||
_t_agent_total = time.perf_counter()
|
||||
|
||||
# Layer thread-aware prompt caching onto the LLM. Idempotent with the
|
||||
# build-time call in ``llm_config.py``; this run merely adds
|
||||
# ``prompt_cache_key=f"surfsense-thread-{thread_id}"`` for OpenAI-family
|
||||
# configs now that ``thread_id`` is known. No-op when ``thread_id`` is
|
||||
# None or the provider is non-OpenAI-family.
|
||||
apply_litellm_prompt_caching(llm, agent_config=agent_config, thread_id=thread_id)
|
||||
|
||||
filesystem_selection = filesystem_selection or FilesystemSelection()
|
||||
backend_resolver = build_backend_resolver(
|
||||
filesystem_selection,
|
||||
|
|
@ -398,6 +441,7 @@ async def create_surfsense_deep_agent(
|
|||
enabled_tool_names=_enabled_tool_names,
|
||||
disabled_tool_names=_user_disabled_tool_names,
|
||||
mcp_connector_tools=_mcp_connector_tools,
|
||||
model_name=_resolve_prompt_model_name(agent_config, llm),
|
||||
)
|
||||
else:
|
||||
system_prompt = build_surfsense_system_prompt(
|
||||
|
|
@ -405,6 +449,7 @@ async def create_surfsense_deep_agent(
|
|||
enabled_tool_names=_enabled_tool_names,
|
||||
disabled_tool_names=_user_disabled_tool_names,
|
||||
mcp_connector_tools=_mcp_connector_tools,
|
||||
model_name=_resolve_prompt_model_name(agent_config, llm),
|
||||
)
|
||||
_perf_log.info(
|
||||
"[create_agent] System prompt built in %.3fs", time.perf_counter() - _t0
|
||||
|
|
@ -568,7 +613,6 @@ def _build_compiled_agent_blocking(
|
|||
),
|
||||
create_surfsense_compaction_middleware(llm, StateBackend),
|
||||
PatchToolCallsMiddleware(),
|
||||
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
|
||||
]
|
||||
|
||||
general_purpose_spec: SubAgent = { # type: ignore[typeddict-unknown-key]
|
||||
|
|
@ -1006,12 +1050,12 @@ def _build_compiled_agent_blocking(
|
|||
action_log_mw,
|
||||
PatchToolCallsMiddleware(),
|
||||
DedupHITLToolCallsMiddleware(agent_tools=list(tools)),
|
||||
# Plugin slot — sits just before AnthropicCache so plugin-side
|
||||
# transforms see the final tool result and run before any
|
||||
# caching heuristics. Multiple plugins in declared order; loader
|
||||
# filtered by the admin allowlist already.
|
||||
# Plugin slot — sits at the tail so plugin-side transforms see the
|
||||
# final tool result. Prompt caching is now applied at LLM build time
|
||||
# via ``apply_litellm_prompt_caching`` (see prompt_caching.py), so no
|
||||
# caching middleware is needed here. Multiple plugins run in declared
|
||||
# order; loader filtered by the admin allowlist already.
|
||||
*plugin_middlewares,
|
||||
AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore"),
|
||||
]
|
||||
deepagent_middleware = [m for m in deepagent_middleware if m is not None]
|
||||
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ from litellm import get_model_info
|
|||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.agents.new_chat.prompt_caching import apply_litellm_prompt_caching
|
||||
from app.services.llm_router_service import (
|
||||
AUTO_MODE_ID,
|
||||
ChatLiteLLMRouter,
|
||||
|
|
@ -494,6 +495,11 @@ def create_chat_litellm_from_config(llm_config: dict) -> ChatLiteLLM | None:
|
|||
|
||||
llm = SanitizedChatLiteLLM(**litellm_kwargs)
|
||||
_attach_model_profile(llm, model_string)
|
||||
# Configure LiteLLM-native prompt caching (cache_control_injection_points
|
||||
# for Anthropic/Bedrock/Vertex/Gemini/Azure-AI/OpenRouter/Databricks/etc.).
|
||||
# ``agent_config=None`` here — the YAML path doesn't have provider intent
|
||||
# in a structured form, so we set only the universal injection points.
|
||||
apply_litellm_prompt_caching(llm)
|
||||
return llm
|
||||
|
||||
|
||||
|
|
@ -518,7 +524,16 @@ def create_chat_litellm_from_agent_config(
|
|||
print("Error: Auto mode requested but LLM Router not initialized")
|
||||
return None
|
||||
try:
|
||||
return get_auto_mode_llm()
|
||||
router_llm = get_auto_mode_llm()
|
||||
if router_llm is not None:
|
||||
# Universal cache_control_injection_points only — auto-mode
|
||||
# fans out across providers, so OpenAI-only kwargs (e.g.
|
||||
# ``prompt_cache_key``) are left off here. ``drop_params``
|
||||
# would strip them at the provider boundary anyway, but
|
||||
# there's no point setting them when we don't know the
|
||||
# destination.
|
||||
apply_litellm_prompt_caching(router_llm, agent_config=agent_config)
|
||||
return router_llm
|
||||
except Exception as e:
|
||||
print(f"Error creating ChatLiteLLMRouter: {e}")
|
||||
return None
|
||||
|
|
@ -549,4 +564,9 @@ def create_chat_litellm_from_agent_config(
|
|||
|
||||
llm = SanitizedChatLiteLLM(**litellm_kwargs)
|
||||
_attach_model_profile(llm, model_string)
|
||||
# Build-time prompt caching: sets ``cache_control_injection_points`` for
|
||||
# all providers and (for OpenAI/DeepSeek/xAI) ``prompt_cache_retention``.
|
||||
# Per-thread ``prompt_cache_key`` is layered on later in
|
||||
# ``create_surfsense_deep_agent`` once ``thread_id`` is known.
|
||||
apply_litellm_prompt_caching(llm, agent_config=agent_config)
|
||||
return llm
|
||||
|
|
|
|||
166
surfsense_backend/app/agents/new_chat/prompt_caching.py
Normal file
166
surfsense_backend/app/agents/new_chat/prompt_caching.py
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
"""LiteLLM-native prompt caching configuration for SurfSense agents.
|
||||
|
||||
Replaces the legacy ``AnthropicPromptCachingMiddleware`` (which never
|
||||
activated for our LiteLLM-based stack — its ``isinstance(model, ChatAnthropic)``
|
||||
gate always failed) with LiteLLM's universal caching mechanism.
|
||||
|
||||
Coverage:
|
||||
|
||||
- Marker-based providers (need ``cache_control`` injection, which LiteLLM
|
||||
performs automatically when ``cache_control_injection_points`` is set):
|
||||
``anthropic/``, ``bedrock/``, ``vertex_ai/``, ``gemini/``, ``azure_ai/``,
|
||||
``openrouter/`` (Claude/Gemini/MiniMax/GLM/z-ai routes), ``databricks/``
|
||||
(Claude), ``dashscope/`` (Qwen), ``minimax/``, ``zai/`` (GLM).
|
||||
- Auto-cached (LiteLLM strips the marker silently): ``openai/``,
|
||||
``deepseek/``, ``xai/`` — these caches automatically for prompts ≥1024
|
||||
tokens and surface ``prompt_cache_key`` / ``prompt_cache_retention``.
|
||||
|
||||
We inject **two** breakpoints per request:
|
||||
|
||||
- ``role: system`` — pins the SurfSense system prompt (provider variant,
|
||||
citation rules, tool catalog, KB tree, skills metadata) into the cache.
|
||||
- ``index: -1`` — pins the latest message so multi-turn savings compound:
|
||||
Anthropic-family providers use longest-matching-prefix lookup, so turn
|
||||
N+1 still reads turn N's cache up to the shared prefix.
|
||||
|
||||
For OpenAI-family configs we additionally pass:
|
||||
|
||||
- ``prompt_cache_key=f"surfsense-thread-{thread_id}"`` — routing hint that
|
||||
raises hit rate by sending requests with a shared prefix to the same
|
||||
backend.
|
||||
- ``prompt_cache_retention="24h"`` — extends cache TTL beyond the default
|
||||
5-10 min in-memory cache.
|
||||
|
||||
Safety net: ``litellm.drop_params=True`` is set globally in
|
||||
``app.services.llm_service`` at module-load time. Any kwarg the destination
|
||||
provider doesn't recognise is auto-stripped at the provider transformer
|
||||
layer, so an OpenAI→Bedrock auto-mode fallback can't 400 on
|
||||
``prompt_cache_key`` etc.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from langchain_core.language_models import BaseChatModel
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from app.agents.new_chat.llm_config import AgentConfig
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Two-breakpoint policy: system + latest message. See module docstring for
|
||||
# rationale. Anthropic limits requests to 4 ``cache_control`` blocks; we
|
||||
# use 2 here, leaving headroom for Phase-2 tool caching.
|
||||
_DEFAULT_INJECTION_POINTS: tuple[dict[str, Any], ...] = (
|
||||
{"location": "message", "role": "system"},
|
||||
{"location": "message", "index": -1},
|
||||
)
|
||||
|
||||
# Providers (uppercase ``AgentConfig.provider`` values) that natively expose
|
||||
# OpenAI-style automatic prompt caching with ``prompt_cache_key`` and
|
||||
# ``prompt_cache_retention`` kwargs. Strict whitelist — many other providers
|
||||
# in ``PROVIDER_MAP`` route through litellm's ``openai`` prefix without
|
||||
# implementing the OpenAI prompt-cache surface (e.g. MOONSHOT, ZHIPU,
|
||||
# MINIMAX), so we can't infer family from the litellm prefix alone.
|
||||
_OPENAI_FAMILY_PROVIDERS: frozenset[str] = frozenset({"OPENAI", "DEEPSEEK", "XAI"})
|
||||
|
||||
|
||||
def _is_router_llm(llm: BaseChatModel) -> bool:
|
||||
"""Detect ``ChatLiteLLMRouter`` (auto-mode) without an eager import.
|
||||
|
||||
Importing ``app.services.llm_router_service`` at module-load time would
|
||||
create a cycle via ``llm_config -> prompt_caching -> llm_router_service``.
|
||||
Class-name comparison is sufficient since the class is defined in a
|
||||
single place.
|
||||
"""
|
||||
return type(llm).__name__ == "ChatLiteLLMRouter"
|
||||
|
||||
|
||||
def _is_openai_family_config(agent_config: AgentConfig | None) -> bool:
|
||||
"""Whether the config targets an OpenAI-style prompt-cache surface.
|
||||
|
||||
Strict — only returns True when the user explicitly chose OPENAI,
|
||||
DEEPSEEK, or XAI as the provider in their ``NewLLMConfig`` /
|
||||
``YAMLConfig``. Auto-mode and custom providers return False because
|
||||
we can't statically know the destination.
|
||||
"""
|
||||
if agent_config is None or not agent_config.provider:
|
||||
return False
|
||||
if agent_config.is_auto_mode:
|
||||
return False
|
||||
if agent_config.custom_provider:
|
||||
return False
|
||||
return agent_config.provider.upper() in _OPENAI_FAMILY_PROVIDERS
|
||||
|
||||
|
||||
def _get_or_init_model_kwargs(llm: BaseChatModel) -> dict[str, Any] | None:
|
||||
"""Return ``llm.model_kwargs`` as a writable dict, or ``None`` to bail.
|
||||
|
||||
Initialises the field to ``{}`` when present-but-None on a Pydantic v2
|
||||
model. Returns ``None`` if the LLM type doesn't expose a writable
|
||||
``model_kwargs`` attribute (caller should treat as no-op).
|
||||
"""
|
||||
model_kwargs = getattr(llm, "model_kwargs", None)
|
||||
if isinstance(model_kwargs, dict):
|
||||
return model_kwargs
|
||||
try:
|
||||
llm.model_kwargs = {} # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
return None
|
||||
refreshed = getattr(llm, "model_kwargs", None)
|
||||
return refreshed if isinstance(refreshed, dict) else None
|
||||
|
||||
|
||||
def apply_litellm_prompt_caching(
|
||||
llm: BaseChatModel,
|
||||
*,
|
||||
agent_config: AgentConfig | None = None,
|
||||
thread_id: int | None = None,
|
||||
) -> None:
|
||||
"""Configure LiteLLM prompt caching on a ChatLiteLLM/ChatLiteLLMRouter.
|
||||
|
||||
Idempotent — values already present in ``llm.model_kwargs`` (e.g. from
|
||||
``agent_config.litellm_params`` overrides) are preserved. Mutates
|
||||
``llm.model_kwargs`` in place; the kwargs flow to ``litellm.completion``
|
||||
via ``ChatLiteLLM._default_params`` and via ``self.model_kwargs`` merge
|
||||
in our custom ``ChatLiteLLMRouter``.
|
||||
|
||||
Args:
|
||||
llm: ChatLiteLLM, SanitizedChatLiteLLM, or ChatLiteLLMRouter instance.
|
||||
agent_config: Optional ``AgentConfig`` driving provider-specific
|
||||
behaviour. When omitted (or auto-mode), only the universal
|
||||
``cache_control_injection_points`` are set.
|
||||
thread_id: Optional thread id used to construct a per-thread
|
||||
``prompt_cache_key`` for OpenAI-family providers. Caching still
|
||||
works without it (server-side automatic), but the key improves
|
||||
backend routing affinity and therefore hit rate.
|
||||
"""
|
||||
model_kwargs = _get_or_init_model_kwargs(llm)
|
||||
if model_kwargs is None:
|
||||
logger.debug(
|
||||
"apply_litellm_prompt_caching: %s exposes no writable model_kwargs; skipping",
|
||||
type(llm).__name__,
|
||||
)
|
||||
return
|
||||
|
||||
if "cache_control_injection_points" not in model_kwargs:
|
||||
model_kwargs["cache_control_injection_points"] = [
|
||||
dict(point) for point in _DEFAULT_INJECTION_POINTS
|
||||
]
|
||||
|
||||
# OpenAI-family extras only when we statically know the destination is
|
||||
# OpenAI / DeepSeek / xAI. Auto-mode router fans out across providers
|
||||
# so we can't safely set OpenAI-only kwargs there (drop_params would
|
||||
# strip them but it's wasteful to set them in the first place).
|
||||
if _is_router_llm(llm):
|
||||
return
|
||||
if not _is_openai_family_config(agent_config):
|
||||
return
|
||||
|
||||
if thread_id is not None and "prompt_cache_key" not in model_kwargs:
|
||||
model_kwargs["prompt_cache_key"] = f"surfsense-thread-{thread_id}"
|
||||
if "prompt_cache_retention" not in model_kwargs:
|
||||
model_kwargs["prompt_cache_retention"] = "24h"
|
||||
|
|
@ -28,6 +28,7 @@ from litellm.exceptions import (
|
|||
BadRequestError as LiteLLMBadRequestError,
|
||||
ContextWindowExceededError,
|
||||
)
|
||||
from pydantic import Field
|
||||
|
||||
from app.utils.perf import get_perf_logger
|
||||
|
||||
|
|
@ -573,6 +574,11 @@ class ChatLiteLLMRouter(BaseChatModel):
|
|||
# Public attributes that Pydantic will manage
|
||||
model: str = "auto"
|
||||
streaming: bool = True
|
||||
# Static kwargs that flow through to ``litellm.completion(...)`` on every
|
||||
# invocation (e.g. ``cache_control_injection_points`` set by
|
||||
# ``apply_litellm_prompt_caching``). Per-call ``**kwargs`` from
|
||||
# ``invoke()`` still take precedence — see ``_generate``/``_astream``.
|
||||
model_kwargs: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
# Bound tools and tool choice for tool calling
|
||||
_bound_tools: list[dict] | None = None
|
||||
|
|
@ -898,13 +904,16 @@ class ChatLiteLLMRouter(BaseChatModel):
|
|||
logger.warning(f"Failed to convert tool {tool}: {e}")
|
||||
continue
|
||||
|
||||
# Create a new instance with tools bound
|
||||
# Create a new instance with tools bound. Carry through ``model_kwargs``
|
||||
# so static settings (e.g. cache_control_injection_points) survive the
|
||||
# bind_tools rebuild.
|
||||
return ChatLiteLLMRouter(
|
||||
router=self._router,
|
||||
bound_tools=formatted_tools if formatted_tools else None,
|
||||
tool_choice=tool_choice,
|
||||
model=self.model,
|
||||
streaming=self.streaming,
|
||||
model_kwargs=dict(self.model_kwargs),
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
|
|
@ -929,8 +938,10 @@ class ChatLiteLLMRouter(BaseChatModel):
|
|||
formatted_messages = self._convert_messages(messages)
|
||||
formatted_messages = self._trim_messages_to_fit_context(formatted_messages)
|
||||
|
||||
# Add tools if bound
|
||||
call_kwargs = {**kwargs}
|
||||
# Merge static model_kwargs (e.g. cache_control_injection_points) under
|
||||
# per-call kwargs so callers can still override per invocation. Then add
|
||||
# bound tools.
|
||||
call_kwargs = {**self.model_kwargs, **kwargs}
|
||||
if self._bound_tools:
|
||||
call_kwargs["tools"] = self._bound_tools
|
||||
if self._tool_choice is not None:
|
||||
|
|
@ -997,8 +1008,10 @@ class ChatLiteLLMRouter(BaseChatModel):
|
|||
formatted_messages = self._convert_messages(messages)
|
||||
formatted_messages = self._trim_messages_to_fit_context(formatted_messages)
|
||||
|
||||
# Add tools if bound
|
||||
call_kwargs = {**kwargs}
|
||||
# Merge static model_kwargs (e.g. cache_control_injection_points) under
|
||||
# per-call kwargs so callers can still override per invocation. Then add
|
||||
# bound tools.
|
||||
call_kwargs = {**self.model_kwargs, **kwargs}
|
||||
if self._bound_tools:
|
||||
call_kwargs["tools"] = self._bound_tools
|
||||
if self._tool_choice is not None:
|
||||
|
|
@ -1060,8 +1073,10 @@ class ChatLiteLLMRouter(BaseChatModel):
|
|||
formatted_messages = self._convert_messages(messages)
|
||||
formatted_messages = self._trim_messages_to_fit_context(formatted_messages)
|
||||
|
||||
# Add tools if bound
|
||||
call_kwargs = {**kwargs}
|
||||
# Merge static model_kwargs (e.g. cache_control_injection_points) under
|
||||
# per-call kwargs so callers can still override per invocation. Then add
|
||||
# bound tools.
|
||||
call_kwargs = {**self.model_kwargs, **kwargs}
|
||||
if self._bound_tools:
|
||||
call_kwargs["tools"] = self._bound_tools
|
||||
if self._tool_choice is not None:
|
||||
|
|
@ -1110,8 +1125,10 @@ class ChatLiteLLMRouter(BaseChatModel):
|
|||
formatted_messages = self._convert_messages(messages)
|
||||
formatted_messages = self._trim_messages_to_fit_context(formatted_messages)
|
||||
|
||||
# Add tools if bound
|
||||
call_kwargs = {**kwargs}
|
||||
# Merge static model_kwargs (e.g. cache_control_injection_points) under
|
||||
# per-call kwargs so callers can still override per invocation. Then add
|
||||
# bound tools.
|
||||
call_kwargs = {**self.model_kwargs, **kwargs}
|
||||
if self._bound_tools:
|
||||
call_kwargs["tools"] = self._bound_tools
|
||||
if self._tool_choice is not None:
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ dependencies = [
|
|||
"deepagents>=0.4.12",
|
||||
"stripe>=15.0.0",
|
||||
"azure-ai-documentintelligence>=1.0.2",
|
||||
"litellm>=1.83.4",
|
||||
"litellm>=1.83.7",
|
||||
"langchain-litellm>=0.6.4",
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -226,6 +226,31 @@ class TestCompose:
|
|||
# Default block should NOT be present
|
||||
assert "<knowledge_base_only_policy>" not in prompt
|
||||
|
||||
def test_provider_hints_render_with_custom_system_instructions(
|
||||
self, fixed_today: datetime
|
||||
) -> None:
|
||||
"""Regression guard for the always-append decision: provider hints
|
||||
append AFTER a custom system prompt.
|
||||
|
||||
Provider hints are stylistic nudges (parallel tool-call rules,
|
||||
formatting guidance, etc.) that help the model regardless of
|
||||
what the system instructions say. Suppressing them when a
|
||||
custom prompt is set would partially defeat the per-family
|
||||
prompt machinery.
|
||||
"""
|
||||
prompt = compose_system_prompt(
|
||||
today=fixed_today,
|
||||
custom_system_instructions="You are a custom assistant.",
|
||||
model_name="anthropic/claude-3-5-sonnet",
|
||||
)
|
||||
assert "You are a custom assistant." in prompt
|
||||
assert "<provider_hints>" in prompt
|
||||
# The custom prompt must come BEFORE the provider hints so the
|
||||
# user's framing isn't drowned out by the stylistic nudges.
|
||||
assert prompt.index("You are a custom assistant.") < prompt.index(
|
||||
"<provider_hints>"
|
||||
)
|
||||
|
||||
def test_use_default_false_with_no_custom_yields_no_system_block(
|
||||
self, fixed_today: datetime
|
||||
) -> None:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,350 @@
|
|||
"""Tests for ``apply_litellm_prompt_caching`` in
|
||||
:mod:`app.agents.new_chat.prompt_caching`.
|
||||
|
||||
The helper replaces the legacy ``AnthropicPromptCachingMiddleware`` (which
|
||||
never activated for our LiteLLM stack) with LiteLLM-native multi-provider
|
||||
prompt caching. It mutates ``llm.model_kwargs`` so the kwargs flow to
|
||||
``litellm.completion(...)``. The tests below pin its public contract:
|
||||
|
||||
1. Always sets BOTH ``role: system`` and ``index: -1`` injection points so
|
||||
savings compound across multi-turn conversations on Anthropic-family
|
||||
providers.
|
||||
2. Adds ``prompt_cache_key``/``prompt_cache_retention`` only for
|
||||
single-model OPENAI/DEEPSEEK/XAI configs (where OpenAI's automatic
|
||||
prompt-cache surface is available).
|
||||
3. Treats ``ChatLiteLLMRouter`` (auto-mode) as universal-only — no
|
||||
OpenAI-only kwargs because the router fans out across providers.
|
||||
4. Idempotent: user-supplied values in ``model_kwargs`` are preserved.
|
||||
5. Defensive: LLMs without a writable ``model_kwargs`` are silently
|
||||
skipped rather than raising.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from app.agents.new_chat.llm_config import AgentConfig
|
||||
from app.agents.new_chat.prompt_caching import apply_litellm_prompt_caching
|
||||
|
||||
pytestmark = pytest.mark.unit
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Test doubles
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class _FakeLLM:
|
||||
"""Stand-in for ``ChatLiteLLM``/``SanitizedChatLiteLLM``.
|
||||
|
||||
The helper only inspects ``getattr(llm, "model_kwargs", None)``,
|
||||
``getattr(llm, "model", None)``, and ``type(llm).__name__``. A simple
|
||||
object suffices — we don't need to spin up real LangChain/LiteLLM
|
||||
machinery for unit tests of the helper's logic.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "openai/gpt-4o",
|
||||
model_kwargs: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
self.model = model
|
||||
self.model_kwargs: dict[str, Any] = dict(model_kwargs) if model_kwargs else {}
|
||||
|
||||
|
||||
class ChatLiteLLMRouter:
|
||||
"""Class-name-only impostor of the real router.
|
||||
|
||||
The helper's router gate is ``type(llm).__name__ == "ChatLiteLLMRouter"``
|
||||
(a deliberate stringly-typed check to avoid an import cycle with
|
||||
``app.services.llm_router_service``). Reusing the same class name here
|
||||
triggers the same code path without instantiating a real ``Router``.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.model = "auto"
|
||||
self.model_kwargs: dict[str, Any] = {}
|
||||
|
||||
|
||||
def _make_cfg(**overrides: Any) -> AgentConfig:
|
||||
"""Build an ``AgentConfig`` with sensible defaults for the helper test."""
|
||||
defaults: dict[str, Any] = {
|
||||
"provider": "OPENAI",
|
||||
"model_name": "gpt-4o",
|
||||
"api_key": "k",
|
||||
}
|
||||
return AgentConfig(**{**defaults, **overrides})
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# (a) Universal injection points
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_sets_both_cache_control_injection_points_with_no_config() -> None:
|
||||
"""Bare call (no agent_config, no thread_id) still sets the two
|
||||
universal breakpoints — these cost nothing on providers that don't
|
||||
consume them and unlock caching on every supported provider."""
|
||||
llm = _FakeLLM()
|
||||
|
||||
apply_litellm_prompt_caching(llm)
|
||||
|
||||
points = llm.model_kwargs["cache_control_injection_points"]
|
||||
assert {"location": "message", "role": "system"} in points
|
||||
assert {"location": "message", "index": -1} in points
|
||||
assert len(points) == 2
|
||||
|
||||
|
||||
def test_injection_points_set_for_anthropic_config() -> None:
|
||||
"""Anthropic-family configs need the marker — verify it lands."""
|
||||
cfg = _make_cfg(provider="ANTHROPIC", model_name="claude-3-5-sonnet")
|
||||
llm = _FakeLLM(model="anthropic/claude-3-5-sonnet")
|
||||
|
||||
apply_litellm_prompt_caching(llm, agent_config=cfg)
|
||||
|
||||
assert "cache_control_injection_points" in llm.model_kwargs
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# (b) Idempotency / user override wins
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_does_not_overwrite_user_supplied_cache_control_injection_points() -> None:
|
||||
"""Users who set their own injection points (e.g. with ``ttl: "1h"``
|
||||
via ``litellm_params``) keep them — the helper merges, never
|
||||
clobbers."""
|
||||
user_points = [
|
||||
{"location": "message", "role": "system", "ttl": "1h"},
|
||||
]
|
||||
llm = _FakeLLM(
|
||||
model_kwargs={"cache_control_injection_points": user_points},
|
||||
)
|
||||
|
||||
apply_litellm_prompt_caching(llm)
|
||||
|
||||
assert llm.model_kwargs["cache_control_injection_points"] is user_points
|
||||
|
||||
|
||||
def test_idempotent_when_called_multiple_times() -> None:
|
||||
"""Build-time + thread-time double-call must be a no-op the second time."""
|
||||
cfg = _make_cfg(provider="OPENAI")
|
||||
llm = _FakeLLM()
|
||||
|
||||
apply_litellm_prompt_caching(llm, agent_config=cfg, thread_id=1)
|
||||
snapshot = {
|
||||
"cache_control_injection_points": list(
|
||||
llm.model_kwargs["cache_control_injection_points"]
|
||||
),
|
||||
"prompt_cache_key": llm.model_kwargs["prompt_cache_key"],
|
||||
"prompt_cache_retention": llm.model_kwargs["prompt_cache_retention"],
|
||||
}
|
||||
apply_litellm_prompt_caching(llm, agent_config=cfg, thread_id=1)
|
||||
|
||||
assert (
|
||||
llm.model_kwargs["cache_control_injection_points"]
|
||||
== snapshot["cache_control_injection_points"]
|
||||
)
|
||||
assert llm.model_kwargs["prompt_cache_key"] == snapshot["prompt_cache_key"]
|
||||
assert (
|
||||
llm.model_kwargs["prompt_cache_retention"] == snapshot["prompt_cache_retention"]
|
||||
)
|
||||
|
||||
|
||||
def test_does_not_overwrite_user_supplied_prompt_cache_key() -> None:
|
||||
"""A pre-set ``prompt_cache_key`` (e.g. tenant-aware override via
|
||||
``litellm_params``) wins over our default per-thread key."""
|
||||
cfg = _make_cfg(provider="OPENAI")
|
||||
llm = _FakeLLM(model_kwargs={"prompt_cache_key": "tenant-abc"})
|
||||
|
||||
apply_litellm_prompt_caching(llm, agent_config=cfg, thread_id=42)
|
||||
|
||||
assert llm.model_kwargs["prompt_cache_key"] == "tenant-abc"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# (c) OpenAI-family extras (OPENAI / DEEPSEEK / XAI)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.mark.parametrize("provider", ["OPENAI", "DEEPSEEK", "XAI"])
|
||||
def test_sets_openai_family_extras(provider: str) -> None:
|
||||
"""OpenAI-style providers gain ``prompt_cache_key`` (raises hit rate
|
||||
via routing affinity) and ``prompt_cache_retention="24h"`` (extends
|
||||
cache TTL beyond the default 5-10 min)."""
|
||||
cfg = _make_cfg(provider=provider)
|
||||
llm = _FakeLLM()
|
||||
|
||||
apply_litellm_prompt_caching(llm, agent_config=cfg, thread_id=42)
|
||||
|
||||
assert llm.model_kwargs["prompt_cache_key"] == "surfsense-thread-42"
|
||||
assert llm.model_kwargs["prompt_cache_retention"] == "24h"
|
||||
|
||||
|
||||
def test_skips_prompt_cache_key_when_no_thread_id() -> None:
|
||||
"""Without a thread id we can't construct a per-thread key. Retention
|
||||
is still useful so we set it (it's free)."""
|
||||
cfg = _make_cfg(provider="OPENAI")
|
||||
llm = _FakeLLM()
|
||||
|
||||
apply_litellm_prompt_caching(llm, agent_config=cfg, thread_id=None)
|
||||
|
||||
assert "prompt_cache_key" not in llm.model_kwargs
|
||||
assert llm.model_kwargs["prompt_cache_retention"] == "24h"
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"provider",
|
||||
["ANTHROPIC", "BEDROCK", "VERTEX_AI", "GOOGLE_AI_STUDIO", "GROQ", "MOONSHOT"],
|
||||
)
|
||||
def test_no_openai_extras_for_other_providers(provider: str) -> None:
|
||||
"""Non-OpenAI-family providers don't expose ``prompt_cache_key`` —
|
||||
skip it. ``cache_control_injection_points`` is still set (universal)."""
|
||||
cfg = _make_cfg(provider=provider)
|
||||
llm = _FakeLLM()
|
||||
|
||||
apply_litellm_prompt_caching(llm, agent_config=cfg, thread_id=42)
|
||||
|
||||
assert "prompt_cache_key" not in llm.model_kwargs
|
||||
assert "prompt_cache_retention" not in llm.model_kwargs
|
||||
assert "cache_control_injection_points" in llm.model_kwargs
|
||||
|
||||
|
||||
def test_no_openai_extras_in_auto_mode() -> None:
|
||||
"""Auto-mode fans out across mixed providers — we can't statically
|
||||
target OpenAI-only kwargs."""
|
||||
cfg = AgentConfig.from_auto_mode()
|
||||
llm = _FakeLLM()
|
||||
|
||||
apply_litellm_prompt_caching(llm, agent_config=cfg, thread_id=42)
|
||||
|
||||
assert "prompt_cache_key" not in llm.model_kwargs
|
||||
assert "prompt_cache_retention" not in llm.model_kwargs
|
||||
assert "cache_control_injection_points" in llm.model_kwargs
|
||||
|
||||
|
||||
def test_no_openai_extras_for_custom_provider() -> None:
|
||||
"""Custom providers route through arbitrary user-supplied prefixes —
|
||||
we don't try to infer OpenAI-family compatibility."""
|
||||
cfg = _make_cfg(provider="OPENAI", custom_provider="my_proxy")
|
||||
llm = _FakeLLM()
|
||||
|
||||
apply_litellm_prompt_caching(llm, agent_config=cfg, thread_id=42)
|
||||
|
||||
assert "prompt_cache_key" not in llm.model_kwargs
|
||||
assert "prompt_cache_retention" not in llm.model_kwargs
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# (d) ChatLiteLLMRouter — universal injection points only
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_router_llm_gets_only_universal_injection_points() -> None:
|
||||
"""Even with an OpenAI-flavoured config, a ``ChatLiteLLMRouter`` must
|
||||
receive only the universal injection points — its requests dispatch
|
||||
across provider deployments and OpenAI-only kwargs would be wasted
|
||||
(or stripped by ``drop_params``) on non-OpenAI legs."""
|
||||
router = ChatLiteLLMRouter()
|
||||
cfg = _make_cfg(provider="OPENAI")
|
||||
|
||||
apply_litellm_prompt_caching(router, agent_config=cfg, thread_id=42)
|
||||
|
||||
assert "cache_control_injection_points" in router.model_kwargs
|
||||
assert "prompt_cache_key" not in router.model_kwargs
|
||||
assert "prompt_cache_retention" not in router.model_kwargs
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# (e) Defensive paths
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_handles_llm_with_no_writable_model_kwargs() -> None:
|
||||
"""Some LLM implementations (e.g. fakes / minimal subclasses) don't
|
||||
expose a writable ``model_kwargs``. The helper must skip silently —
|
||||
raising would crash the entire LLM build path on a non-critical
|
||||
optimisation."""
|
||||
|
||||
class _ImmutableLLM:
|
||||
# ``__slots__`` blocks attribute creation, so ``setattr`` raises.
|
||||
__slots__ = ("model",)
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.model = "openai/gpt-4o"
|
||||
|
||||
llm = _ImmutableLLM()
|
||||
|
||||
apply_litellm_prompt_caching(llm)
|
||||
|
||||
|
||||
def test_initialises_missing_model_kwargs_dict() -> None:
|
||||
"""When ``model_kwargs`` is present-but-None (Pydantic v2 default
|
||||
pattern when no factory is set), the helper initialises it to an
|
||||
empty dict before mutating."""
|
||||
|
||||
class _LazyLLM:
|
||||
def __init__(self) -> None:
|
||||
self.model = "openai/gpt-4o"
|
||||
self.model_kwargs: dict[str, Any] | None = None
|
||||
|
||||
llm = _LazyLLM()
|
||||
|
||||
apply_litellm_prompt_caching(llm)
|
||||
|
||||
assert isinstance(llm.model_kwargs, dict)
|
||||
assert "cache_control_injection_points" in llm.model_kwargs
|
||||
|
||||
|
||||
def test_falls_back_to_llm_model_prefix_when_no_agent_config() -> None:
|
||||
"""Direct caller path (e.g. ``create_chat_litellm_from_config`` for
|
||||
YAML configs without a structured ``AgentConfig``): without
|
||||
``agent_config`` the helper sets only the universal injection points
|
||||
— no OpenAI-family extras even if the prefix says ``openai/``.
|
||||
Conservative: we'd rather miss the speedup than silently misroute."""
|
||||
llm = _FakeLLM(model="openai/gpt-4o")
|
||||
|
||||
apply_litellm_prompt_caching(llm, agent_config=None, thread_id=99)
|
||||
|
||||
assert "cache_control_injection_points" in llm.model_kwargs
|
||||
assert "prompt_cache_key" not in llm.model_kwargs
|
||||
assert "prompt_cache_retention" not in llm.model_kwargs
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# (f) drop_params safety net (regression guard for #19346)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_litellm_drop_params_is_globally_enabled() -> None:
|
||||
"""``litellm.drop_params=True`` is set globally in
|
||||
:mod:`app.services.llm_service` so any ``prompt_cache_key`` /
|
||||
``prompt_cache_retention`` we set on an OpenAI-family config is
|
||||
auto-stripped if the request later routes to a non-supporting
|
||||
provider (e.g. via auto-mode router fallback). This test pins that
|
||||
invariant — losing it would mean Bedrock/Vertex 400s on ``prompt_cache_key``.
|
||||
"""
|
||||
import litellm
|
||||
|
||||
import app.services.llm_service # noqa: F401 (side-effect: sets globals)
|
||||
|
||||
assert litellm.drop_params is True
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Regression note: LiteLLM #15696 (multi-content-block last message)
|
||||
# ---------------------------------------------------------------------------
|
||||
#
|
||||
# Before LiteLLM 1.81 a list-form last message ``[block_a, block_b]``
|
||||
# would get ``cache_control`` applied to *every* content block instead
|
||||
# of only the last one — wasting cache breakpoints and triggering 400s
|
||||
# on Anthropic when it exceeded the 4-breakpoint limit. Fixed in
|
||||
# https://github.com/BerriAI/litellm/pull/15699.
|
||||
#
|
||||
# We pin ``litellm>=1.83.7`` in ``pyproject.toml`` (well past the fix).
|
||||
# An end-to-end behavioural test would need to run ``litellm.completion``
|
||||
# through the Anthropic transformer, which is integration territory and
|
||||
# better covered by LiteLLM's own test suite. The unit guard here is the
|
||||
# version pin plus the build-time ``model_kwargs`` shape we verify above.
|
||||
|
|
@ -0,0 +1,117 @@
|
|||
"""Tests for ``_resolve_prompt_model_name`` in :mod:`app.agents.new_chat.chat_deepagent`.
|
||||
|
||||
The helper picks the model id fed to ``detect_provider_variant`` so the
|
||||
right ``<provider_hints>`` block lands in the system prompt. The tests
|
||||
below pin its preference order:
|
||||
|
||||
1. ``agent_config.litellm_params["base_model"]`` (Azure-correct).
|
||||
2. ``agent_config.model_name``.
|
||||
3. ``getattr(llm, "model", None)``.
|
||||
|
||||
Without (1) an Azure deployment named e.g. ``"prod-chat-001"`` would
|
||||
silently miss every provider regex.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from app.agents.new_chat.chat_deepagent import _resolve_prompt_model_name
|
||||
from app.agents.new_chat.llm_config import AgentConfig
|
||||
|
||||
pytestmark = pytest.mark.unit
|
||||
|
||||
|
||||
def _make_cfg(**overrides) -> AgentConfig:
|
||||
"""Build an ``AgentConfig`` with sensible defaults for the helper test."""
|
||||
defaults = {
|
||||
"provider": "OPENAI",
|
||||
"model_name": "x",
|
||||
"api_key": "k",
|
||||
}
|
||||
return AgentConfig(**{**defaults, **overrides})
|
||||
|
||||
|
||||
class _FakeLLM:
|
||||
"""Stand-in for a ``ChatLiteLLM`` / ``ChatLiteLLMRouter`` instance.
|
||||
|
||||
The resolver only reads the ``.model`` attribute via ``getattr``,
|
||||
matching the established idiom in ``knowledge_search.py`` /
|
||||
``stream_new_chat.py`` / ``document_summarizer.py``.
|
||||
"""
|
||||
|
||||
def __init__(self, model: str | None) -> None:
|
||||
self.model = model
|
||||
|
||||
|
||||
def test_prefers_litellm_params_base_model_over_deployment_name() -> None:
|
||||
"""Azure deployment slug must NOT shadow the underlying model family.
|
||||
|
||||
This is the failure mode the helper exists to prevent: a deployment
|
||||
named ``"azure/prod-chat-001"`` would not match any provider regex
|
||||
on its own, but the family ``"gpt-4o"`` lives in
|
||||
``litellm_params["base_model"]`` and routes to ``openai_classic``.
|
||||
"""
|
||||
cfg = _make_cfg(
|
||||
model_name="azure/prod-chat-001",
|
||||
litellm_params={"base_model": "gpt-4o"},
|
||||
)
|
||||
assert _resolve_prompt_model_name(cfg, _FakeLLM("azure/prod-chat-001")) == "gpt-4o"
|
||||
|
||||
|
||||
def test_falls_back_to_model_name_when_litellm_params_is_none() -> None:
|
||||
cfg = _make_cfg(
|
||||
model_name="anthropic/claude-3-5-sonnet",
|
||||
litellm_params=None,
|
||||
)
|
||||
got = _resolve_prompt_model_name(cfg, _FakeLLM("anthropic/claude-3-5-sonnet"))
|
||||
assert got == "anthropic/claude-3-5-sonnet"
|
||||
|
||||
|
||||
def test_handles_litellm_params_without_base_model_key() -> None:
|
||||
cfg = _make_cfg(
|
||||
model_name="openai/gpt-4o",
|
||||
litellm_params={"temperature": 0.5},
|
||||
)
|
||||
assert _resolve_prompt_model_name(cfg, _FakeLLM("openai/gpt-4o")) == "openai/gpt-4o"
|
||||
|
||||
|
||||
def test_ignores_blank_base_model() -> None:
|
||||
"""Whitespace-only ``base_model`` must not shadow ``model_name``."""
|
||||
cfg = _make_cfg(
|
||||
model_name="openai/gpt-4o",
|
||||
litellm_params={"base_model": " "},
|
||||
)
|
||||
assert _resolve_prompt_model_name(cfg, _FakeLLM("openai/gpt-4o")) == "openai/gpt-4o"
|
||||
|
||||
|
||||
def test_ignores_non_string_base_model() -> None:
|
||||
"""Defensive: a non-string ``base_model`` should not crash the resolver."""
|
||||
cfg = _make_cfg(
|
||||
model_name="openai/gpt-4o",
|
||||
litellm_params={"base_model": 42},
|
||||
)
|
||||
assert _resolve_prompt_model_name(cfg, _FakeLLM("openai/gpt-4o")) == "openai/gpt-4o"
|
||||
|
||||
|
||||
def test_falls_back_to_llm_model_when_no_agent_config() -> None:
|
||||
"""No ``agent_config`` -> use ``llm.model`` directly. Defensive path
|
||||
for direct callers; production callers always supply a config."""
|
||||
assert (
|
||||
_resolve_prompt_model_name(None, _FakeLLM("openai/gpt-4o-mini"))
|
||||
== "openai/gpt-4o-mini"
|
||||
)
|
||||
|
||||
|
||||
def test_returns_none_when_nothing_available() -> None:
|
||||
"""``compose_system_prompt`` treats ``None`` as the ``"default"``
|
||||
variant and emits no provider block."""
|
||||
assert _resolve_prompt_model_name(None, _FakeLLM(None)) is None
|
||||
|
||||
|
||||
def test_auto_mode_resolves_to_auto_string() -> None:
|
||||
"""Auto mode -> ``"auto"``. ``detect_provider_variant("auto")``
|
||||
returns ``"default"``, which is correct: the child model isn't
|
||||
known until the LiteLLM Router dispatches."""
|
||||
cfg = AgentConfig.from_auto_mode()
|
||||
assert _resolve_prompt_model_name(cfg, _FakeLLM("auto")) == "auto"
|
||||
|
|
@ -372,3 +372,39 @@ def test_turn_status_sse_contract_exists():
|
|||
assert 'type: "data-turn-status"' in state_source
|
||||
assert 'case "data-turn-status":' in pipeline_source
|
||||
assert "end_turn(str(chat_id))" in stream_source
|
||||
|
||||
|
||||
def test_chat_deepagent_forwards_resolved_model_name_to_both_builders():
|
||||
"""Regression guard: both system-prompt builders in chat_deepagent.py
|
||||
must receive ``model_name=_resolve_prompt_model_name(...)`` so the
|
||||
provider-variant dispatch can render the right ``<provider_hints>``
|
||||
block. Without this the prompt silently falls back to the empty
|
||||
``"default"`` variant — the original bug being fixed.
|
||||
|
||||
This test mirrors :func:`test_stream_error_emission_keeps_machine_error_codes`
|
||||
in style: it inspects module source text + a regex to enforce the
|
||||
call-site shape, not just the wrapper layer (the wrappers already
|
||||
forward ``model_name`` correctly, so testing them would not catch
|
||||
the actual missed plumbing).
|
||||
"""
|
||||
import app.agents.new_chat.chat_deepagent as chat_deepagent_module
|
||||
|
||||
source = inspect.getsource(chat_deepagent_module)
|
||||
|
||||
# Helper itself must be defined.
|
||||
assert "def _resolve_prompt_model_name(" in source
|
||||
|
||||
# Both builder calls must forward the resolved model name. Match
|
||||
# across newlines + whitespace because the kwargs are split over
|
||||
# multiple lines.
|
||||
pattern = re.compile(
|
||||
r"build_(?:surfsense|configurable)_system_prompt\([^)]*"
|
||||
r"model_name=_resolve_prompt_model_name\(",
|
||||
re.DOTALL,
|
||||
)
|
||||
matches = pattern.findall(source)
|
||||
assert len(matches) == 2, (
|
||||
"Expected both system-prompt builder call sites to forward "
|
||||
"`model_name=_resolve_prompt_model_name(...)`, found "
|
||||
f"{len(matches)}"
|
||||
)
|
||||
|
|
|
|||
160
surfsense_backend/uv.lock
generated
160
surfsense_backend/uv.lock
generated
|
|
@ -62,7 +62,7 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "aiohttp"
|
||||
version = "3.13.5"
|
||||
version = "3.13.4"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiohappyeyeballs" },
|
||||
|
|
@ -73,76 +73,76 @@ dependencies = [
|
|||
{ name = "propcache" },
|
||||
{ name = "yarl" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/77/9a/152096d4808df8e4268befa55fba462f440f14beab85e8ad9bf990516918/aiohttp-3.13.5.tar.gz", hash = "sha256:9d98cc980ecc96be6eb4c1994ce35d28d8b1f5e5208a23b421187d1209dbb7d1", size = 7858271 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/45/4a/064321452809dae953c1ed6e017504e72551a26b6f5708a5a80e4bf556ff/aiohttp-3.13.4.tar.gz", hash = "sha256:d97a6d09c66087890c2ab5d49069e1e570583f7ac0314ecf98294c1b6aaebd38", size = 7859748 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/be/6f/353954c29e7dcce7cf00280a02c75f30e133c00793c7a2ed3776d7b2f426/aiohttp-3.13.5-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:023ecba036ddd840b0b19bf195bfae970083fd7024ce1ac22e9bba90464620e9", size = 748876 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f5/1b/428a7c64687b3b2e9cd293186695affc0e1e54a445d0361743b231f11066/aiohttp-3.13.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:15c933ad7920b7d9a20de151efcd05a6e38302cbf0e10c9b2acb9a42210a2416", size = 499557 },
|
||||
{ url = "https://files.pythonhosted.org/packages/29/47/7be41556bfbb6917069d6a6634bb7dd5e163ba445b783a90d40f5ac7e3a7/aiohttp-3.13.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ab2899f9fa2f9f741896ebb6fa07c4c883bfa5c7f2ddd8cf2aafa86fa981b2d2", size = 500258 },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/84/c9ecc5828cb0b3695856c07c0a6817a99d51e2473400f705275a2b3d9239/aiohttp-3.13.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60eaa2d440cd4707696b52e40ed3e2b0f73f65be07fd0ef23b6b539c9c0b0b4", size = 1749199 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/d3/3c6d610e66b495657622edb6ae7c7fd31b2e9086b4ec50b47897ad6042a9/aiohttp-3.13.5-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:55b3bdd3292283295774ab585160c4004f4f2f203946997f49aac032c84649e9", size = 1721013 },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/a0/24409c12217456df0bae7babe3b014e460b0b38a8e60753d6cb339f6556d/aiohttp-3.13.5-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c2b2355dc094e5f7d45a7bb262fe7207aa0460b37a0d87027dcf21b5d890e7d5", size = 1781501 },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/9d/b65ec649adc5bccc008b0957a9a9c691070aeac4e41cea18559fef49958b/aiohttp-3.13.5-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b38765950832f7d728297689ad78f5f2cf79ff82487131c4d26fe6ceecdc5f8e", size = 1878981 },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/d8/8d44036d7eb7b6a8ec4c5494ea0c8c8b94fbc0ed3991c1a7adf230df03bf/aiohttp-3.13.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b18f31b80d5a33661e08c89e202edabf1986e9b49c42b4504371daeaa11b47c1", size = 1767934 },
|
||||
{ url = "https://files.pythonhosted.org/packages/31/04/d3f8211f273356f158e3464e9e45484d3fb8c4ce5eb2f6fe9405c3273983/aiohttp-3.13.5-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:33add2463dde55c4f2d9635c6ab33ce154e5ecf322bd26d09af95c5f81cfa286", size = 1566671 },
|
||||
{ url = "https://files.pythonhosted.org/packages/41/db/073e4ebe00b78e2dfcacff734291651729a62953b48933d765dc513bf798/aiohttp-3.13.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:327cc432fdf1356fb4fbc6fe833ad4e9f6aacb71a8acaa5f1855e4b25910e4a9", size = 1705219 },
|
||||
{ url = "https://files.pythonhosted.org/packages/48/45/7dfba71a2f9fd97b15c95c06819de7eb38113d2cdb6319669195a7d64270/aiohttp-3.13.5-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:7c35b0bf0b48a70b4cb4fc5d7bed9b932532728e124874355de1a0af8ec4bc88", size = 1743049 },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/71/901db0061e0f717d226386a7f471bb59b19566f2cae5f0d93874b017271f/aiohttp-3.13.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:df23d57718f24badef8656c49743e11a89fd6f5358fa8a7b96e728fda2abf7d3", size = 1749557 },
|
||||
{ url = "https://files.pythonhosted.org/packages/08/d5/41eebd16066e59cd43728fe74bce953d7402f2b4ddfdfef2c0e9f17ca274/aiohttp-3.13.5-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:02e048037a6501a5ec1f6fc9736135aec6eb8a004ce48838cb951c515f32c80b", size = 1558931 },
|
||||
{ url = "https://files.pythonhosted.org/packages/30/e6/4a799798bf05740e66c3a1161079bda7a3dd8e22ca392481d7a7f9af82a6/aiohttp-3.13.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:31cebae8b26f8a615d2b546fee45d5ffb76852ae6450e2a03f42c9102260d6fe", size = 1774125 },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/63/7749337c90f92bc2cb18f9560d67aa6258c7060d1397d21529b8004fcf6f/aiohttp-3.13.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:888e78eb5ca55a615d285c3c09a7a91b42e9dd6fc699b166ebd5dee87c9ccf14", size = 1732427 },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/de/cf2f44ff98d307e72fb97d5f5bbae3bfcb442f0ea9790c0bf5c5c2331404/aiohttp-3.13.5-cp312-cp312-win32.whl", hash = "sha256:8bd3ec6376e68a41f9f95f5ed170e2fcf22d4eb27a1f8cb361d0508f6e0557f3", size = 433534 },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/ca/eadf6f9c8fa5e31d40993e3db153fb5ed0b11008ad5d9de98a95045bed84/aiohttp-3.13.5-cp312-cp312-win_amd64.whl", hash = "sha256:110e448e02c729bcebb18c60b9214a87ba33bac4a9fa5e9a5f139938b56c6cb1", size = 460446 },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/e9/d76bf503005709e390122d34e15256b88f7008e246c4bdbe915cd4f1adce/aiohttp-3.13.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a5029cc80718bbd545123cd8fe5d15025eccaaaace5d0eeec6bd556ad6163d61", size = 742930 },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/00/4b7b70223deaebd9bb85984d01a764b0d7bd6526fcdc73cca83bcbe7243e/aiohttp-3.13.5-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4bb6bf5811620003614076bdc807ef3b5e38244f9d25ca5fe888eaccea2a9832", size = 496927 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9c/f5/0fb20fb49f8efdcdce6cd8127604ad2c503e754a8f139f5e02b01626523f/aiohttp-3.13.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a84792f8631bf5a94e52d9cc881c0b824ab42717165a5579c760b830d9392ac9", size = 497141 },
|
||||
{ url = "https://files.pythonhosted.org/packages/3b/86/b7c870053e36a94e8951b803cb5b909bfbc9b90ca941527f5fcafbf6b0fa/aiohttp-3.13.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:57653eac22c6a4c13eb22ecf4d673d64a12f266e72785ab1c8b8e5940d0e8090", size = 1732476 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b5/e5/4e161f84f98d80c03a238671b4136e6530453d65262867d989bbe78244d0/aiohttp-3.13.5-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5e5f7debc7a57af53fdf5c5009f9391d9f4c12867049d509bf7bb164a6e295b", size = 1706507 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/56/ea11a9f01518bd5a2a2fcee869d248c4b8a0cfa0bb13401574fa31adf4d4/aiohttp-3.13.5-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c719f65bebcdf6716f10e9eff80d27567f7892d8988c06de12bbbd39307c6e3a", size = 1773465 },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/40/333ca27fb74b0383f17c90570c748f7582501507307350a79d9f9f3c6eb1/aiohttp-3.13.5-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d97f93fdae594d886c5a866636397e2bcab146fd7a132fd6bb9ce182224452f8", size = 1873523 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f0/d2/e2f77eef1acb7111405433c707dc735e63f67a56e176e72e9e7a2cd3f493/aiohttp-3.13.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3df334e39d4c2f899a914f1dba283c1aadc311790733f705182998c6f7cae665", size = 1754113 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fb/56/3f653d7f53c89669301ec9e42c95233e2a0c0a6dd051269e6e678db4fdb0/aiohttp-3.13.5-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fe6970addfea9e5e081401bcbadf865d2b6da045472f58af08427e108d618540", size = 1562351 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/a6/9b3e91eb8ae791cce4ee736da02211c85c6f835f1bdfac0594a8a3b7018c/aiohttp-3.13.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7becdf835feff2f4f335d7477f121af787e3504b48b449ff737afb35869ba7bb", size = 1693205 },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/fc/bfb437a99a2fcebd6b6eaec609571954de2ed424f01c352f4b5504371dd3/aiohttp-3.13.5-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:676e5651705ad5d8a70aeb8eb6936c436d8ebbd56e63436cb7dd9bb36d2a9a46", size = 1730618 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/b6/c8534862126191a034f68153194c389addc285a0f1347d85096d349bbc15/aiohttp-3.13.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9b16c653d38eb1a611cc898c41e76859ca27f119d25b53c12875fd0474ae31a8", size = 1745185 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/93/4ca8ee2ef5236e2707e0fd5fecb10ce214aee1ff4ab307af9c558bda3b37/aiohttp-3.13.5-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:999802d5fa0389f58decd24b537c54aa63c01c3219ce17d1214cbda3c2b22d2d", size = 1557311 },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/ae/76177b15f18c5f5d094f19901d284025db28eccc5ae374d1d254181d33f4/aiohttp-3.13.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ec707059ee75732b1ba130ed5f9580fe10ff75180c812bc267ded039db5128c6", size = 1773147 },
|
||||
{ url = "https://files.pythonhosted.org/packages/01/a4/62f05a0a98d88af59d93b7fcac564e5f18f513cb7471696ac286db970d6a/aiohttp-3.13.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2d6d44a5b48132053c2f6cd5c8cb14bc67e99a63594e336b0f2af81e94d5530c", size = 1730356 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/85/fc8601f59dfa8c9523808281f2da571f8b4699685f9809a228adcc90838d/aiohttp-3.13.5-cp313-cp313-win32.whl", hash = "sha256:329f292ed14d38a6c4c435e465f48bebb47479fd676a0411936cc371643225cc", size = 432637 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/1b/ac685a8882896acf0f6b31d689e3792199cfe7aba37969fa91da63a7fa27/aiohttp-3.13.5-cp313-cp313-win_amd64.whl", hash = "sha256:69f571de7500e0557801c0b51f4780482c0ec5fe2ac851af5a92cfce1af1cb83", size = 458896 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5d/ce/46572759afc859e867a5bc8ec3487315869013f59281ce61764f76d879de/aiohttp-3.13.5-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:eb4639f32fd4a9904ab8fb45bf3383ba71137f3d9d4ba25b3b3f3109977c5b8c", size = 745721 },
|
||||
{ url = "https://files.pythonhosted.org/packages/13/fe/8a2efd7626dbe6049b2ef8ace18ffda8a4dfcbe1bcff3ac30c0c7575c20b/aiohttp-3.13.5-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:7e5dc4311bd5ac493886c63cbf76ab579dbe4641268e7c74e48e774c74b6f2be", size = 497663 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9b/91/cc8cc78a111826c54743d88651e1687008133c37e5ee615fee9b57990fac/aiohttp-3.13.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:756c3c304d394977519824449600adaf2be0ccee76d206ee339c5e76b70ded25", size = 499094 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0a/33/a8362cb15cf16a3af7e86ed11962d5cd7d59b449202dc576cdc731310bde/aiohttp-3.13.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecc26751323224cf8186efcf7fbcbc30f4e1d8c7970659daf25ad995e4032a56", size = 1726701 },
|
||||
{ url = "https://files.pythonhosted.org/packages/45/0c/c091ac5c3a17114bd76cbf85d674650969ddf93387876cf67f754204bd77/aiohttp-3.13.5-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10a75acfcf794edf9d8db50e5a7ec5fc818b2a8d3f591ce93bc7b1210df016d2", size = 1683360 },
|
||||
{ url = "https://files.pythonhosted.org/packages/23/73/bcee1c2b79bc275e964d1446c55c54441a461938e70267c86afaae6fba27/aiohttp-3.13.5-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0f7a18f258d124cd678c5fe072fe4432a4d5232b0657fca7c1847f599233c83a", size = 1773023 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c7/ef/720e639df03004fee2d869f771799d8c23046dec47d5b81e396c7cda583a/aiohttp-3.13.5-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:df6104c009713d3a89621096f3e3e88cc323fd269dbd7c20afe18535094320be", size = 1853795 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/c9/989f4034fb46841208de7aeeac2c6d8300745ab4f28c42f629ba77c2d916/aiohttp-3.13.5-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:241a94f7de7c0c3b616627aaad530fe2cb620084a8b144d3be7b6ecfe95bae3b", size = 1730405 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ce/75/ee1fd286ca7dc599d824b5651dad7b3be7ff8d9a7e7b3fe9820d9180f7db/aiohttp-3.13.5-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c974fb66180e58709b6fc402846f13791240d180b74de81d23913abe48e96d94", size = 1558082 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c3/20/1e9e6650dfc436340116b7aa89ff8cb2bbdf0abc11dfaceaad8f74273a10/aiohttp-3.13.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:6e27ea05d184afac78aabbac667450c75e54e35f62238d44463131bd3f96753d", size = 1692346 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/40/8ebc6658d48ea630ac7903912fe0dd4e262f0e16825aa4c833c56c9f1f56/aiohttp-3.13.5-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a79a6d399cef33a11b6f004c67bb07741d91f2be01b8d712d52c75711b1e07c7", size = 1698891 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/78/ea0ae5ec8ba7a5c10bdd6e318f1ba5e76fcde17db8275188772afc7917a4/aiohttp-3.13.5-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:c632ce9c0b534fbe25b52c974515ed674937c5b99f549a92127c85f771a78772", size = 1742113 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8a/66/9d308ed71e3f2491be1acb8769d96c6f0c47d92099f3bc9119cada27b357/aiohttp-3.13.5-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:fceedde51fbd67ee2bcc8c0b33d0126cc8b51ef3bbde2f86662bd6d5a6f10ec5", size = 1553088 },
|
||||
{ url = "https://files.pythonhosted.org/packages/da/a6/6cc25ed8dfc6e00c90f5c6d126a98e2cf28957ad06fa1036bd34b6f24a2c/aiohttp-3.13.5-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f92995dfec9420bb69ae629abf422e516923ba79ba4403bc750d94fb4a6c68c1", size = 1757976 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c1/2b/cce5b0ffe0de99c83e5e36d8f828e4161e415660a9f3e58339d07cce3006/aiohttp-3.13.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:20ae0ff08b1f2c8788d6fb85afcb798654ae6ba0b747575f8562de738078457b", size = 1712444 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/cf/9e1795b4160c58d29421eafd1a69c6ce351e2f7c8d3c6b7e4ca44aea1a5b/aiohttp-3.13.5-cp314-cp314-win32.whl", hash = "sha256:b20df693de16f42b2472a9c485e1c948ee55524786a0a34345511afdd22246f3", size = 438128 },
|
||||
{ url = "https://files.pythonhosted.org/packages/22/4d/eaedff67fc805aeba4ba746aec891b4b24cebb1a7d078084b6300f79d063/aiohttp-3.13.5-cp314-cp314-win_amd64.whl", hash = "sha256:f85c6f327bf0b8c29da7d93b1cabb6363fb5e4e160a32fa241ed2dce21b73162", size = 464029 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/11/c27d9332ee20d68dd164dc12a6ecdef2e2e35ecc97ed6cf0d2442844624b/aiohttp-3.13.5-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:1efb06900858bb618ff5cee184ae2de5828896c448403d51fb633f09e109be0a", size = 778758 },
|
||||
{ url = "https://files.pythonhosted.org/packages/04/fb/377aead2e0a3ba5f09b7624f702a964bdf4f08b5b6728a9799830c80041e/aiohttp-3.13.5-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:fee86b7c4bd29bdaf0d53d14739b08a106fdda809ca5fe032a15f52fae5fe254", size = 512883 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/a6/aa109a33671f7a5d3bd78b46da9d852797c5e665bfda7d6b373f56bff2ec/aiohttp-3.13.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:20058e23909b9e65f9da62b396b77dfa95965cbe840f8def6e572538b1d32e36", size = 516668 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/b3/ca078f9f2fa9563c36fb8ef89053ea2bb146d6f792c5104574d49d8acb63/aiohttp-3.13.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8cf20a8d6868cb15a73cab329ffc07291ba8c22b1b88176026106ae39aa6df0f", size = 1883461 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b7/e3/a7ad633ca1ca497b852233a3cce6906a56c3225fb6d9217b5e5e60b7419d/aiohttp-3.13.5-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:330f5da04c987f1d5bdb8ae189137c77139f36bd1cb23779ca1a354a4b027800", size = 1747661 },
|
||||
{ url = "https://files.pythonhosted.org/packages/33/b9/cd6fe579bed34a906d3d783fe60f2fa297ef55b27bb4538438ee49d4dc41/aiohttp-3.13.5-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6f1cbf0c7926d315c3c26c2da41fd2b5d2fe01ac0e157b78caefc51a782196cf", size = 1863800 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c0/3f/2c1e2f5144cefa889c8afd5cf431994c32f3b29da9961698ff4e3811b79a/aiohttp-3.13.5-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:53fc049ed6390d05423ba33103ded7281fe897cf97878f369a527070bd95795b", size = 1958382 },
|
||||
{ url = "https://files.pythonhosted.org/packages/66/1d/f31ec3f1013723b3babe3609e7f119c2c2fb6ef33da90061a705ef3e1bc8/aiohttp-3.13.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:898703aa2667e3c5ca4c54ca36cd73f58b7a38ef87a5606414799ebce4d3fd3a", size = 1803724 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/b4/57712dfc6f1542f067daa81eb61da282fab3e6f1966fca25db06c4fc62d5/aiohttp-3.13.5-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0494a01ca9584eea1e5fbd6d748e61ecff218c51b576ee1999c23db7066417d8", size = 1640027 },
|
||||
{ url = "https://files.pythonhosted.org/packages/25/3c/734c878fb43ec083d8e31bf029daae1beafeae582d1b35da234739e82ee7/aiohttp-3.13.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:6cf81fe010b8c17b09495cbd15c1d35afbc8fb405c0c9cf4738e5ae3af1d65be", size = 1806644 },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/a5/f671e5cbec1c21d044ff3078223f949748f3a7f86b14e34a365d74a5d21f/aiohttp-3.13.5-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:c564dd5f09ddc9d8f2c2d0a301cd30a79a2cc1b46dd1a73bef8f0038863d016b", size = 1791630 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/63/fb8d0ad63a0b8a99be97deac8c04dacf0785721c158bdf23d679a87aa99e/aiohttp-3.13.5-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:2994be9f6e51046c4f864598fd9abeb4fba6e88f0b2152422c9666dcd4aea9c6", size = 1809403 },
|
||||
{ url = "https://files.pythonhosted.org/packages/59/0c/bfed7f30662fcf12206481c2aac57dedee43fe1c49275e85b3a1e1742294/aiohttp-3.13.5-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:157826e2fa245d2ef46c83ea8a5faf77ca19355d278d425c29fda0beb3318037", size = 1634924 },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/d6/fd518d668a09fd5a3319ae5e984d4d80b9a4b3df4e21c52f02251ef5a32e/aiohttp-3.13.5-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:a8aca50daa9493e9e13c0f566201a9006f080e7c50e5e90d0b06f53146a54500", size = 1836119 },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/b7/15fb7a9d52e112a25b621c67b69c167805cb1f2ab8f1708a5c490d1b52fe/aiohttp-3.13.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3b13560160d07e047a93f23aaa30718606493036253d5430887514715b67c9d9", size = 1772072 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7e/df/57ba7f0c4a553fc2bd8b6321df236870ec6fd64a2a473a8a13d4f733214e/aiohttp-3.13.5-cp314-cp314t-win32.whl", hash = "sha256:9a0f4474b6ea6818b41f82172d799e4b3d29e22c2c520ce4357856fced9af2f8", size = 471819 },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/29/2f8418269e46454a26171bfdd6a055d74febf32234e474930f2f60a17145/aiohttp-3.13.5-cp314-cp314t-win_amd64.whl", hash = "sha256:18a2f6c1182c51baa1d28d68fea51513cb2a76612f038853c0ad3c145423d3d9", size = 505441 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/bd/ede278648914cabbabfdf95e436679b5d4156e417896a9b9f4587169e376/aiohttp-3.13.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ee62d4471ce86b108b19c3364db4b91180d13fe3510144872d6bad5401957360", size = 752158 },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/de/581c053253c07b480b03785196ca5335e3c606a37dc73e95f6527f1591fe/aiohttp-3.13.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c0fd8f41b54b58636402eb493afd512c23580456f022c1ba2db0f810c959ed0d", size = 501037 },
|
||||
{ url = "https://files.pythonhosted.org/packages/fa/f9/a5ede193c08f13cc42c0a5b50d1e246ecee9115e4cf6e900d8dbd8fd6acb/aiohttp-3.13.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4baa48ce49efd82d6b1a0be12d6a36b35e5594d1dd42f8bfba96ea9f8678b88c", size = 501556 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/10/88ff67cd48a6ec36335b63a640abe86135791544863e0cfe1f065d6cef7a/aiohttp-3.13.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d738ebab9f71ee652d9dbd0211057690022201b11197f9a7324fd4dba128aa97", size = 1757314 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/15/fdb90a5cf5a1f52845c276e76298c75fbbcc0ac2b4a86551906d54529965/aiohttp-3.13.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0ce692c3468fa831af7dceed52edf51ac348cebfc8d3feb935927b63bd3e8576", size = 1731819 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ec/df/28146785a007f7820416be05d4f28cc207493efd1e8c6c1068e9bdc29198/aiohttp-3.13.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8e08abcfe752a454d2cb89ff0c08f2d1ecd057ae3e8cc6d84638de853530ebab", size = 1793279 },
|
||||
{ url = "https://files.pythonhosted.org/packages/10/47/689c743abf62ea7a77774d5722f220e2c912a77d65d368b884d9779ef41b/aiohttp-3.13.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5977f701b3fff36367a11087f30ea73c212e686d41cd363c50c022d48b011d8d", size = 1891082 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/b6/f7f4f318c7e58c23b761c9b13b9a3c9b394e0f9d5d76fbc6622fa98509f6/aiohttp-3.13.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:54203e10405c06f8b6020bd1e076ae0fe6c194adcee12a5a78af3ffa3c57025e", size = 1773938 },
|
||||
{ url = "https://files.pythonhosted.org/packages/aa/06/f207cb3121852c989586a6fc16ff854c4fcc8651b86c5d3bd1fc83057650/aiohttp-3.13.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:358a6af0145bc4dda037f13167bef3cce54b132087acc4c295c739d05d16b1c3", size = 1579548 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6c/58/e1289661a32161e24c1fe479711d783067210d266842523752869cc1d9c2/aiohttp-3.13.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:898ea1850656d7d61832ef06aa9846ab3ddb1621b74f46de78fbc5e1a586ba83", size = 1714669 },
|
||||
{ url = "https://files.pythonhosted.org/packages/96/0a/3e86d039438a74a86e6a948a9119b22540bae037d6ba317a042ae3c22711/aiohttp-3.13.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:7bc30cceb710cf6a44e9617e43eebb6e3e43ad855a34da7b4b6a73537d8a6763", size = 1754175 },
|
||||
{ url = "https://files.pythonhosted.org/packages/f4/30/e717fc5df83133ba467a560b6d8ef20197037b4bb5d7075b90037de1018e/aiohttp-3.13.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4a31c0c587a8a038f19a4c7e60654a6c899c9de9174593a13e7cc6e15ff271f9", size = 1762049 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e4/28/8f7a2d4492e336e40005151bdd94baf344880a4707573378579f833a64c1/aiohttp-3.13.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:2062f675f3fe6e06d6113eb74a157fb9df58953ffed0cdb4182554b116545758", size = 1570861 },
|
||||
{ url = "https://files.pythonhosted.org/packages/78/45/12e1a3d0645968b1c38de4b23fdf270b8637735ea057d4f84482ff918ad9/aiohttp-3.13.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3d1ba8afb847ff80626d5e408c1fdc99f942acc877d0702fe137015903a220a9", size = 1790003 },
|
||||
{ url = "https://files.pythonhosted.org/packages/eb/0f/60374e18d590de16dcb39d6ff62f39c096c1b958e6f37727b5870026ea30/aiohttp-3.13.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b08149419994cdd4d5eecf7fd4bc5986b5a9380285bcd01ab4c0d6bfca47b79d", size = 1737289 },
|
||||
{ url = "https://files.pythonhosted.org/packages/02/bf/535e58d886cfbc40a8b0013c974afad24ef7632d645bca0b678b70033a60/aiohttp-3.13.4-cp312-cp312-win32.whl", hash = "sha256:fc432f6a2c4f720180959bc19aa37259651c1a4ed8af8afc84dd41c60f15f791", size = 434185 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/1a/d92e3325134ebfff6f4069f270d3aac770d63320bd1fcd0eca023e74d9a8/aiohttp-3.13.4-cp312-cp312-win_amd64.whl", hash = "sha256:6148c9ae97a3e8bff9a1fc9c757fa164116f86c100468339730e717590a3fb77", size = 461285 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e3/ac/892f4162df9b115b4758d615f32ec63d00f3084c705ff5526630887b9b42/aiohttp-3.13.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:63dd5e5b1e43b8fb1e91b79b7ceba1feba588b317d1edff385084fcc7a0a4538", size = 745744 },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/a9/c5b87e4443a2f0ea88cb3000c93a8fdad1ee63bffc9ded8d8c8e0d66efc6/aiohttp-3.13.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:746ac3cc00b5baea424dacddea3ec2c2702f9590de27d837aa67004db1eebc6e", size = 498178 },
|
||||
{ url = "https://files.pythonhosted.org/packages/94/42/07e1b543a61250783650df13da8ddcdc0d0a5538b2bd15cef6e042aefc61/aiohttp-3.13.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bda8f16ea99d6a6705e5946732e48487a448be874e54a4f73d514660ff7c05d3", size = 498331 },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/d6/492f46bf0328534124772d0cf58570acae5b286ea25006900650f69dae0e/aiohttp-3.13.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4b061e7b5f840391e3f64d0ddf672973e45c4cfff7a0feea425ea24e51530fc2", size = 1744414 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e2/4d/e02627b2683f68051246215d2d62b2d2f249ff7a285e7a858dc47d6b6a14/aiohttp-3.13.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b252e8d5cd66184b570d0d010de742736e8a4fab22c58299772b0c5a466d4b21", size = 1719226 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/6c/5d0a3394dd2b9f9aeba6e1b6065d0439e4b75d41f1fb09a3ec010b43552b/aiohttp-3.13.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:20af8aad61d1803ff11152a26146d8d81c266aa8c5aa9b4504432abb965c36a0", size = 1782110 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0d/2d/c20791e3437700a7441a7edfb59731150322424f5aadf635602d1d326101/aiohttp-3.13.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:13a5cc924b59859ad2adb1478e31f410a7ed46e92a2a619d6d1dd1a63c1a855e", size = 1884809 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/94/d99dbfbd1924a87ef643833932eb2a3d9e5eee87656efea7d78058539eff/aiohttp-3.13.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:534913dfb0a644d537aebb4123e7d466d94e3be5549205e6a31f72368980a81a", size = 1764938 },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/61/3ce326a1538781deb89f6cf5e094e2029cd308ed1e21b2ba2278b08426f6/aiohttp-3.13.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:320e40192a2dcc1cf4b5576936e9652981ab596bf81eb309535db7e2f5b5672f", size = 1570697 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b6/77/4ab5a546857bb3028fbaf34d6eea180267bdab022ee8b1168b1fcde4bfdd/aiohttp-3.13.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9e587fcfce2bcf06526a43cb705bdee21ac089096f2e271d75de9c339db3100c", size = 1702258 },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/63/d8f29021e39bc5af8e5d5e9da1b07976fb9846487a784e11e4f4eeda4666/aiohttp-3.13.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:9eb9c2eea7278206b5c6c1441fdd9dc420c278ead3f3b2cc87f9b693698cc500", size = 1740287 },
|
||||
{ url = "https://files.pythonhosted.org/packages/55/3a/cbc6b3b124859a11bc8055d3682c26999b393531ef926754a3445b99dfef/aiohttp-3.13.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:29be00c51972b04bf9d5c8f2d7f7314f48f96070ca40a873a53056e652e805f7", size = 1753011 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e0/30/836278675205d58c1368b21520eab9572457cf19afd23759216c04483048/aiohttp-3.13.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:90c06228a6c3a7c9f776fe4fc0b7ff647fffd3bed93779a6913c804ae00c1073", size = 1566359 },
|
||||
{ url = "https://files.pythonhosted.org/packages/50/b4/8032cc9b82d17e4277704ba30509eaccb39329dc18d6a35f05e424439e32/aiohttp-3.13.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:a533ec132f05fd9a1d959e7f34184cd7d5e8511584848dab85faefbaac573069", size = 1785537 },
|
||||
{ url = "https://files.pythonhosted.org/packages/17/7d/5873e98230bde59f493bf1f7c3e327486a4b5653fa401144704df5d00211/aiohttp-3.13.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1c946f10f413836f82ea4cfb90200d2a59578c549f00857e03111cf45ad01ca5", size = 1740752 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7b/f2/13e46e0df051494d7d3c68b7f72d071f48c384c12716fc294f75d5b1a064/aiohttp-3.13.4-cp313-cp313-win32.whl", hash = "sha256:48708e2706106da6967eff5908c78ca3943f005ed6bcb75da2a7e4da94ef8c70", size = 433187 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ea/c0/649856ee655a843c8f8664592cfccb73ac80ede6a8c8db33a25d810c12db/aiohttp-3.13.4-cp313-cp313-win_amd64.whl", hash = "sha256:74a2eb058da44fa3a877a49e2095b591d4913308bb424c418b77beb160c55ce3", size = 459778 },
|
||||
{ url = "https://files.pythonhosted.org/packages/6d/29/6657cc37ae04cacc2dbf53fb730a06b6091cc4cbe745028e047c53e6d840/aiohttp-3.13.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:e0a2c961fc92abeff61d6444f2ce6ad35bb982db9fc8ff8a47455beacf454a57", size = 749363 },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/7f/30ccdf67ca3d24b610067dc63d64dcb91e5d88e27667811640644aa4a85d/aiohttp-3.13.4-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:153274535985a0ff2bff1fb6c104ed547cec898a09213d21b0f791a44b14d933", size = 499317 },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/13/e372dd4e68ad04ee25dafb050c7f98b0d91ea643f7352757e87231102555/aiohttp-3.13.4-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:351f3171e2458da3d731ce83f9e6b9619e325c45cbd534c7759750cabf453ad7", size = 500477 },
|
||||
{ url = "https://files.pythonhosted.org/packages/e5/fe/ee6298e8e586096fb6f5eddd31393d8544f33ae0792c71ecbb4c2bef98ac/aiohttp-3.13.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f989ac8bc5595ff761a5ccd32bdb0768a117f36dd1504b1c2c074ed5d3f4df9c", size = 1737227 },
|
||||
{ url = "https://files.pythonhosted.org/packages/b0/b9/a7a0463a09e1a3fe35100f74324f23644bfc3383ac5fd5effe0722a5f0b7/aiohttp-3.13.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d36fc1709110ec1e87a229b201dd3ddc32aa01e98e7868083a794609b081c349", size = 1694036 },
|
||||
{ url = "https://files.pythonhosted.org/packages/57/7c/8972ae3fb7be00a91aee6b644b2a6a909aedb2c425269a3bfd90115e6f8f/aiohttp-3.13.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:42adaeea83cbdf069ab94f5103ce0787c21fb1a0153270da76b59d5578302329", size = 1786814 },
|
||||
{ url = "https://files.pythonhosted.org/packages/93/01/c81e97e85c774decbaf0d577de7d848934e8166a3a14ad9f8aa5be329d28/aiohttp-3.13.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:92deb95469928cc41fd4b42a95d8012fa6df93f6b1c0a83af0ffbc4a5e218cde", size = 1866676 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/5f/5b46fe8694a639ddea2cd035bf5729e4677ea882cb251396637e2ef1590d/aiohttp-3.13.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0c0c7c07c4257ef3a1df355f840bc62d133bcdef5c1c5ba75add3c08553e2eed", size = 1740842 },
|
||||
{ url = "https://files.pythonhosted.org/packages/20/a2/0d4b03d011cca6b6b0acba8433193c1e484efa8d705ea58295590fe24203/aiohttp-3.13.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f062c45de8a1098cb137a1898819796a2491aec4e637a06b03f149315dff4d8f", size = 1566508 },
|
||||
{ url = "https://files.pythonhosted.org/packages/98/17/e689fd500da52488ec5f889effd6404dece6a59de301e380f3c64f167beb/aiohttp-3.13.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:76093107c531517001114f0ebdb4f46858ce818590363e3e99a4a2280334454a", size = 1700569 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/0d/66402894dbcf470ef7db99449e436105ea862c24f7ea4c95c683e635af35/aiohttp-3.13.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:6f6ec32162d293b82f8b63a16edc80769662fbd5ae6fbd4936d3206a2c2cc63b", size = 1707407 },
|
||||
{ url = "https://files.pythonhosted.org/packages/2f/eb/af0ab1a3650092cbd8e14ef29e4ab0209e1460e1c299996c3f8288b3f1ff/aiohttp-3.13.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5903e2db3d202a00ad9f0ec35a122c005e85d90c9836ab4cda628f01edf425e2", size = 1752214 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/bf/72326f8a98e4c666f292f03c385545963cc65e358835d2a7375037a97b57/aiohttp-3.13.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2d5bea57be7aca98dbbac8da046d99b5557c5cf4e28538c4c786313078aca09e", size = 1562162 },
|
||||
{ url = "https://files.pythonhosted.org/packages/67/9f/13b72435f99151dd9a5469c96b3b5f86aa29b7e785ca7f35cf5e538f74c0/aiohttp-3.13.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:bcf0c9902085976edc0232b75006ef38f89686901249ce14226b6877f88464fb", size = 1768904 },
|
||||
{ url = "https://files.pythonhosted.org/packages/18/bc/28d4970e7d5452ac7776cdb5431a1164a0d9cf8bd2fffd67b4fb463aa56d/aiohttp-3.13.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3295f98bfeed2e867cab588f2a146a9db37a85e3ae9062abf46ba062bd29165", size = 1723378 },
|
||||
{ url = "https://files.pythonhosted.org/packages/53/74/b32458ca1a7f34d65bdee7aef2036adbe0438123d3d53e2b083c453c24dd/aiohttp-3.13.4-cp314-cp314-win32.whl", hash = "sha256:a598a5c5767e1369d8f5b08695cab1d8160040f796c4416af76fd773d229b3c9", size = 438711 },
|
||||
{ url = "https://files.pythonhosted.org/packages/40/b2/54b487316c2df3e03a8f3435e9636f8a81a42a69d942164830d193beb56a/aiohttp-3.13.4-cp314-cp314-win_amd64.whl", hash = "sha256:c555db4bc7a264bead5a7d63d92d41a1122fcd39cc62a4db815f45ad46f9c2c8", size = 464977 },
|
||||
{ url = "https://files.pythonhosted.org/packages/47/fb/e41b63c6ce71b07a59243bb8f3b457ee0c3402a619acb9d2c0d21ef0e647/aiohttp-3.13.4-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:45abbbf09a129825d13c18c7d3182fecd46d9da3cfc383756145394013604ac1", size = 781549 },
|
||||
{ url = "https://files.pythonhosted.org/packages/97/53/532b8d28df1e17e44c4d9a9368b78dcb6bf0b51037522136eced13afa9e8/aiohttp-3.13.4-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:74c80b2bc2c2adb7b3d1941b2b60701ee2af8296fc8aad8b8bc48bc25767266c", size = 514383 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1b/1f/62e5d400603e8468cd635812d99cb81cfdc08127a3dc474c647615f31339/aiohttp-3.13.4-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c97989ae40a9746650fa196894f317dafc12227c808c774929dda0ff873a5954", size = 518304 },
|
||||
{ url = "https://files.pythonhosted.org/packages/90/57/2326b37b10896447e3c6e0cbef4fe2486d30913639a5cfd1332b5d870f82/aiohttp-3.13.4-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dae86be9811493f9990ef44fff1685f5c1a3192e9061a71a109d527944eed551", size = 1893433 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d2/b4/a24d82112c304afdb650167ef2fe190957d81cbddac7460bedd245f765aa/aiohttp-3.13.4-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:1db491abe852ca2fa6cc48a3341985b0174b3741838e1341b82ac82c8bd9e871", size = 1755901 },
|
||||
{ url = "https://files.pythonhosted.org/packages/9e/2d/0883ef9d878d7846287f036c162a951968f22aabeef3ac97b0bea6f76d5d/aiohttp-3.13.4-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0e5d701c0aad02a7dce72eef6b93226cf3734330f1a31d69ebbf69f33b86666e", size = 1876093 },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/52/9204bb59c014869b71971addad6778f005daa72a96eed652c496789d7468/aiohttp-3.13.4-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8ac32a189081ae0a10ba18993f10f338ec94341f0d5df8fff348043962f3c6f8", size = 1970815 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/b5/e4eb20275a866dde0f570f411b36c6b48f7b53edfe4f4071aa1b0728098a/aiohttp-3.13.4-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:98e968cdaba43e45c73c3f306fca418c8009a957733bac85937c9f9cf3f4de27", size = 1816223 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/23/e98075c5bb146aa61a1239ee1ac7714c85e814838d6cebbe37d3fe19214a/aiohttp-3.13.4-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca114790c9144c335d538852612d3e43ea0f075288f4849cf4b05d6cd2238ce7", size = 1649145 },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/c1/7bad8be33bb06c2bb224b6468874346026092762cbec388c3bdb65a368ee/aiohttp-3.13.4-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ea2e071661ba9cfe11eabbc81ac5376eaeb3061f6e72ec4cc86d7cdd1ffbdbbb", size = 1816562 },
|
||||
{ url = "https://files.pythonhosted.org/packages/5c/10/c00323348695e9a5e316825969c88463dcc24c7e9d443244b8a2c9cf2eae/aiohttp-3.13.4-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:34e89912b6c20e0fd80e07fa401fd218a410aa1ce9f1c2f1dad6db1bd0ce0927", size = 1800333 },
|
||||
{ url = "https://files.pythonhosted.org/packages/84/43/9b2147a1df3559f49bd723e22905b46a46c068a53adb54abdca32c4de180/aiohttp-3.13.4-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:0e217cf9f6a42908c52b46e42c568bd57adc39c9286ced31aaace614b6087965", size = 1820617 },
|
||||
{ url = "https://files.pythonhosted.org/packages/a9/7f/b3481a81e7a586d02e99387b18c6dafff41285f6efd3daa2124c01f87eae/aiohttp-3.13.4-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:0c296f1221e21ba979f5ac1964c3b78cfde15c5c5f855ffd2caab337e9cd9182", size = 1643417 },
|
||||
{ url = "https://files.pythonhosted.org/packages/8f/72/07181226bc99ce1124e0f89280f5221a82d3ae6a6d9d1973ce429d48e52b/aiohttp-3.13.4-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:d99a9d168ebaffb74f36d011750e490085ac418f4db926cce3989c8fe6cb6b1b", size = 1849286 },
|
||||
{ url = "https://files.pythonhosted.org/packages/1a/e6/1b3566e103eca6da5be4ae6713e112a053725c584e96574caf117568ffef/aiohttp-3.13.4-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cb19177205d93b881f3f89e6081593676043a6828f59c78c17a0fd6c1fbed2ba", size = 1782635 },
|
||||
{ url = "https://files.pythonhosted.org/packages/37/58/1b11c71904b8d079eb0c39fe664180dd1e14bebe5608e235d8bfbadc8929/aiohttp-3.13.4-cp314-cp314t-win32.whl", hash = "sha256:c606aa5656dab6552e52ca368e43869c916338346bfaf6304e15c58fb113ea30", size = 472537 },
|
||||
{ url = "https://files.pythonhosted.org/packages/bc/8f/87c56a1a1977d7dddea5b31e12189665a140fdb48a71e9038ff90bb564ec/aiohttp-3.13.4-cp314-cp314t-win_amd64.whl", hash = "sha256:014dcc10ec8ab8db681f0d68e939d1e9286a5aa2b993cbbdb0db130853e02144", size = 506381 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -3723,7 +3723,7 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "litellm"
|
||||
version = "1.83.4"
|
||||
version = "1.83.14"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "aiohttp" },
|
||||
|
|
@ -3739,9 +3739,9 @@ dependencies = [
|
|||
{ name = "tiktoken" },
|
||||
{ name = "tokenizers" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/03/c4/30469c06ae7437a4406bc11e3c433cfd380a6771068cca15ea918dcd158f/litellm-1.83.4.tar.gz", hash = "sha256:6458d2030a41229460b321adee00517a91dbd8e63213cc953d355cb41d16f2d4", size = 17733899 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/8d/7c/c095649380adc96c8630273c1768c2ad1e74aa2ee1dd8dd05d218a60569f/litellm-1.83.14.tar.gz", hash = "sha256:24aef9b47cdc424c833e32f3727f411741c690832cd1fe4405e0077144fe09c9", size = 14836599 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/b8/bd/df19d3f8f6654535ee343a341fd921f81c411abf601a53e3eaef58129b02/litellm-1.83.4-py3-none-any.whl", hash = "sha256:17d7b4d48d47aca988ea4f762ddda5e7bd72cda3270192b22813d0330869d7b4", size = 16015555 },
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/5c/1b5691575420135e90578543b2bf219497caa33cfd0af64cb38f30288450/litellm-1.83.14-py3-none-any.whl", hash = "sha256:92b11ba2a32cf80707ddf388d18526696c7999a21b418c5e3b6eda1243d2cfdb", size = 16457054 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -5124,7 +5124,7 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "2.30.0"
|
||||
version = "2.24.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
|
|
@ -5136,9 +5136,9 @@ dependencies = [
|
|||
{ name = "tqdm" },
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/88/15/52580c8fbc16d0675d516e8749806eda679b16de1e4434ea06fb6feaa610/openai-2.30.0.tar.gz", hash = "sha256:92f7661c990bda4b22a941806c83eabe4896c3094465030dd882a71abe80c885", size = 676084 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/55/13/17e87641b89b74552ed408a92b231283786523edddc95f3545809fab673c/openai-2.24.0.tar.gz", hash = "sha256:1e5769f540dbd01cb33bc4716a23e67b9d695161a734aff9c5f925e2bf99a673", size = 658717 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/2a/9e/5bfa2270f902d5b92ab7d41ce0475b8630572e71e349b2a4996d14bdda93/openai-2.30.0-py3-none-any.whl", hash = "sha256:9a5ae616888eb2748ec5e0c5b955a51592e0b201a11f4262db920f2a78c5231d", size = 1146656 },
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/30/844dc675ee6902579b8eef01ed23917cc9319a1c9c0c14ec6e39340c96d0/openai-2.24.0-py3-none-any.whl", hash = "sha256:fed30480d7d6c884303287bde864980a4b137b60553ffbcf9ab4a233b7a73d94", size = 1120122 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -6780,11 +6780,11 @@ wheels = [
|
|||
|
||||
[[package]]
|
||||
name = "python-dotenv"
|
||||
version = "1.0.1"
|
||||
version = "1.2.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/bc/57/e84d88dfe0aec03b7a2d4327012c1627ab5f03652216c63d49846d7a6c58/python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca", size = 39115 }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/82/ed/0301aeeac3e5353ef3d94b6ec08bbcabd04a72018415dcb29e588514bba8/python_dotenv-1.2.2.tar.gz", hash = "sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3", size = 50135 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/3e/b68c118422ec867fa7ab88444e1274aa40681c606d59ac27de5a5588f082/python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a", size = 19863 },
|
||||
{ url = "https://files.pythonhosted.org/packages/0b/d7/1959b9648791274998a9c3526f6d0ec8fd2233e4d4acce81bbae76b44b2a/python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a", size = 22101 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -8070,7 +8070,7 @@ requires-dist = [
|
|||
{ name = "langgraph", specifier = ">=1.1.3" },
|
||||
{ name = "langgraph-checkpoint-postgres", specifier = ">=3.0.2" },
|
||||
{ name = "linkup-sdk", specifier = ">=0.2.4" },
|
||||
{ name = "litellm", specifier = ">=1.83.4" },
|
||||
{ name = "litellm", specifier = ">=1.83.7" },
|
||||
{ name = "llama-cloud-services", specifier = ">=0.6.25" },
|
||||
{ name = "markdown", specifier = ">=3.7" },
|
||||
{ name = "markdownify", specifier = ">=0.14.1" },
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ const demoPlans = [
|
|||
"Self Hostable",
|
||||
"500 pages included to start",
|
||||
"3 million premium tokens to start",
|
||||
"Earn up to 3,000+ bonus pages for free",
|
||||
"Includes access to OpenAI text, audio and image models",
|
||||
"Realtime Collaborative Group Chats with teammates",
|
||||
"Community support on Discord",
|
||||
|
|
|
|||
|
|
@ -1,21 +1,14 @@
|
|||
"use client";
|
||||
|
||||
import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query";
|
||||
import { Check, ExternalLink, Mail } from "lucide-react";
|
||||
import { Check, ExternalLink } from "lucide-react";
|
||||
import Link from "next/link";
|
||||
import { useParams } from "next/navigation";
|
||||
import { useEffect, useState } from "react";
|
||||
import { useEffect } from "react";
|
||||
import { toast } from "sonner";
|
||||
import { USER_QUERY_KEY } from "@/atoms/user/user-query.atoms";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { Card, CardContent } from "@/components/ui/card";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog";
|
||||
import { Separator } from "@/components/ui/separator";
|
||||
import { Skeleton } from "@/components/ui/skeleton";
|
||||
import { Spinner } from "@/components/ui/spinner";
|
||||
|
|
@ -33,7 +26,6 @@ export function MorePagesContent() {
|
|||
const params = useParams();
|
||||
const queryClient = useQueryClient();
|
||||
const searchSpaceId = params?.search_space_id ?? "";
|
||||
const [claimOpen, setClaimOpen] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
trackIncentivePageViewed();
|
||||
|
|
@ -79,35 +71,10 @@ export function MorePagesContent() {
|
|||
<div className="text-center">
|
||||
<h2 className="text-xl font-bold tracking-tight">Get Free Pages</h2>
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
Claim your free page offer and earn bonus pages
|
||||
Earn bonus pages by completing tasks
|
||||
</p>
|
||||
</div>
|
||||
|
||||
{/* 3k free offer */}
|
||||
<Card className="border-emerald-500/30 bg-emerald-500/5">
|
||||
<CardContent className="flex items-center gap-3 p-4">
|
||||
<div className="flex h-10 w-10 shrink-0 items-center justify-center rounded-full bg-emerald-600 text-white text-xs font-bold">
|
||||
3k
|
||||
</div>
|
||||
<div className="min-w-0 flex-1">
|
||||
<p className="text-sm font-semibold">Claim 3,000 Free Pages</p>
|
||||
<p className="text-xs text-muted-foreground">
|
||||
Limited offer. Schedule a meeting or email us to claim.
|
||||
</p>
|
||||
</div>
|
||||
<Button
|
||||
size="sm"
|
||||
className="bg-emerald-600 text-white hover:bg-emerald-700"
|
||||
onClick={() => setClaimOpen(true)}
|
||||
>
|
||||
Claim
|
||||
</Button>
|
||||
</CardContent>
|
||||
</Card>
|
||||
|
||||
<Separator />
|
||||
|
||||
{/* Free tasks */}
|
||||
<div className="space-y-2">
|
||||
<h3 className="text-sm font-semibold">Earn Bonus Pages</h3>
|
||||
{isLoading ? (
|
||||
|
|
@ -182,7 +149,6 @@ export function MorePagesContent() {
|
|||
|
||||
<Separator />
|
||||
|
||||
{/* Link to buy pages */}
|
||||
<div className="text-center">
|
||||
<p className="text-sm text-muted-foreground">Need more?</p>
|
||||
{pageBuyingEnabled ? (
|
||||
|
|
@ -197,25 +163,6 @@ export function MorePagesContent() {
|
|||
</p>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Claim 3k dialog */}
|
||||
<Dialog open={claimOpen} onOpenChange={setClaimOpen}>
|
||||
<DialogContent className="sm:max-w-md">
|
||||
<DialogHeader>
|
||||
<DialogTitle>Claim 3,000 Free Pages</DialogTitle>
|
||||
<DialogDescription>
|
||||
Send us an email to claim your free 3,000 pages. Include your account email and
|
||||
primary usecase for free pages.
|
||||
</DialogDescription>
|
||||
</DialogHeader>
|
||||
<Button asChild className="w-full gap-2">
|
||||
<a href="mailto:rohan@surfsense.com?subject=Claim%203%2C000%20Free%20Pages&body=Hi%2C%20I'd%20like%20to%20claim%20the%203%2C000%20free%20pages%20offer.%0A%0AMy%20account%20email%3A%20">
|
||||
<Mail className="h-4 w-4" />
|
||||
rohan@surfsense.com
|
||||
</a>
|
||||
</Button>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue