diff --git a/surfsense_backend/app/services/vision_autocomplete_service.py b/surfsense_backend/app/services/vision_autocomplete_service.py index 0804df7fb..e172c6522 100644 --- a/surfsense_backend/app/services/vision_autocomplete_service.py +++ b/surfsense_backend/app/services/vision_autocomplete_service.py @@ -186,6 +186,14 @@ async def stream_vision_autocomplete( yield streaming.format_done() except Exception as e: - logger.error(f"Vision autocomplete streaming error: {e}") - yield streaming.format_error(str(e)) + error_str = str(e).lower() + if "content must be a string" in error_str or "does not support image" in error_str: + logger.warning(f"Vision autocomplete: selected model does not support vision: {e}") + yield streaming.format_error( + "The selected model does not support vision. " + "Please set a vision-capable model (e.g. GPT-4o, Gemini) in your search space settings." + ) + else: + logger.error(f"Vision autocomplete streaming error: {e}") + yield streaming.format_error(str(e)) yield streaming.format_done() diff --git a/surfsense_web/app/desktop/suggestion/page.tsx b/surfsense_web/app/desktop/suggestion/page.tsx index b7d9b97bd..7188b73c6 100644 --- a/surfsense_web/app/desktop/suggestion/page.tsx +++ b/surfsense_web/app/desktop/suggestion/page.tsx @@ -24,6 +24,8 @@ function friendlyError(raw: string | number): string { return "Please sign in to use suggestions."; if (lower.includes("no vision llm configured") || lower.includes("no llm configured")) return "No Vision LLM configured. Set one in search space settings."; + if (lower.includes("does not support vision")) + return "Selected model doesn\u2019t support vision. Set a vision-capable model in settings."; if (lower.includes("fetch") || lower.includes("network") || lower.includes("econnrefused")) return "Can\u2019t reach the server. Check your connection."; return "Something went wrong. Try again.";