From cd25175b8459994b7dc982be1de5eb22b5bb7d32 Mon Sep 17 00:00:00 2001 From: Anish Sarkar <104695310+AnishSarkar22@users.noreply.github.com> Date: Sat, 2 May 2026 03:36:13 +0530 Subject: [PATCH] chore: ran linting --- .../139_add_user_to_zero_publication.py | 4 +- surfsense_backend/app/config/__init__.py | 4 +- .../app/services/auto_model_pin_service.py | 4 +- .../openrouter_integration_service.py | 26 +++---------- .../app/services/quality_score.py | 10 ++--- .../app/tasks/chat/stream_new_chat.py | 24 ++++++++---- .../services/test_auto_model_pin_service.py | 2 +- .../services/test_llm_router_pool_filter.py | 37 ++++++++++++------- .../test_openrouter_integration_service.py | 2 - .../services/test_openrouter_legacy_config.py | 4 +- .../tests/unit/services/test_quality_score.py | 9 +++-- .../unit/test_stream_new_chat_contract.py | 8 ++-- .../components/assistant-ui/markdown-text.tsx | 14 +++---- .../lib/apis/documents-api.service.ts | 12 +++--- 14 files changed, 78 insertions(+), 82 deletions(-) diff --git a/surfsense_backend/alembic/versions/139_add_user_to_zero_publication.py b/surfsense_backend/alembic/versions/139_add_user_to_zero_publication.py index 5b8bc29b0..83c96a429 100644 --- a/surfsense_backend/alembic/versions/139_add_user_to_zero_publication.py +++ b/surfsense_backend/alembic/versions/139_add_user_to_zero_publication.py @@ -90,7 +90,9 @@ def _has_zero_version(conn, table: str) -> bool: ) -def _build_publication_ddl(documents_has_zero_ver: bool, user_has_zero_ver: bool) -> str: +def _build_publication_ddl( + documents_has_zero_ver: bool, user_has_zero_ver: bool +) -> str: doc_cols = DOCUMENT_COLS + (['"_0_version"'] if documents_has_zero_ver else []) user_cols = USER_COLS + (['"_0_version"'] if user_has_zero_ver else []) doc_col_list = ", ".join(doc_cols) diff --git a/surfsense_backend/app/config/__init__.py b/surfsense_backend/app/config/__init__.py index b3eff571e..675b05d2c 100644 --- a/surfsense_backend/app/config/__init__.py +++ b/surfsense_backend/app/config/__init__.py @@ -286,9 +286,7 @@ def initialize_openrouter_integration(): if new_configs: config.GLOBAL_LLM_CONFIGS.extend(new_configs) - free_count = sum( - 1 for c in new_configs if c.get("billing_tier") == "free" - ) + free_count = sum(1 for c in new_configs if c.get("billing_tier") == "free") premium_count = sum( 1 for c in new_configs if c.get("billing_tier") == "premium" ) diff --git a/surfsense_backend/app/services/auto_model_pin_service.py b/surfsense_backend/app/services/auto_model_pin_service.py index b2acd6f56..3a2c681b7 100644 --- a/surfsense_backend/app/services/auto_model_pin_service.py +++ b/surfsense_backend/app/services/auto_model_pin_service.py @@ -277,9 +277,7 @@ async def resolve_or_get_pinned_llm_config_id( c for c in _global_candidates() if int(c.get("id", 0)) not in excluded_ids ] if not candidates: - raise ValueError( - "No usable global LLM configs are available for Auto mode" - ) + raise ValueError("No usable global LLM configs are available for Auto mode") candidate_by_id = {int(c["id"]): c for c in candidates} # Reuse an existing valid pin without re-checking current quota (no silent diff --git a/surfsense_backend/app/services/openrouter_integration_service.py b/surfsense_backend/app/services/openrouter_integration_service.py index 67dbb6690..7e856d015 100644 --- a/surfsense_backend/app/services/openrouter_integration_service.py +++ b/surfsense_backend/app/services/openrouter_integration_service.py @@ -405,9 +405,7 @@ class OpenRouterIntegrationService: # Re-blend health scores against the freshly fetched catalogue. Also # re-stamps health for any YAML-curated cfg with provider==OPENROUTER # so a hand-picked dead OR model is gated like a dynamic one. - await self._enrich_health_safely( - static_configs + new_configs, log_summary=True - ) + await self._enrich_health_safely(static_configs + new_configs, log_summary=True) # Rebuild the LiteLLM router so freshly fetched configs flow through # (dynamic OR premium entries now opt into the pool, free ones stay @@ -415,8 +413,8 @@ class OpenRouterIntegrationService: # reset cached context-window profiles). try: from app.config import config as _app_config - from app.services.llm_router_service import LLMRouterService from app.services.llm_router_service import ( + LLMRouterService, _router_instance_cache as _chat_router_cache, ) @@ -426,9 +424,7 @@ class OpenRouterIntegrationService: ) _chat_router_cache.clear() except Exception as exc: - logger.warning( - "OpenRouter refresh: router rebuild skipped (%s)", exc - ) + logger.warning("OpenRouter refresh: router rebuild skipped (%s)", exc) @staticmethod def _tier_counts(configs: list[dict]) -> dict[str, int]: @@ -475,19 +471,11 @@ class OpenRouterIntegrationService: return premium_pool = sorted( - [ - c - for c in or_cfgs - if str(c.get("billing_tier", "")).lower() == "premium" - ], + [c for c in or_cfgs if str(c.get("billing_tier", "")).lower() == "premium"], key=lambda c: -int(c.get("quality_score_static") or 0), )[:_HEALTH_ENRICH_TOP_N_PREMIUM] free_pool = sorted( - [ - c - for c in or_cfgs - if str(c.get("billing_tier", "")).lower() == "free" - ], + [c for c in or_cfgs if str(c.get("billing_tier", "")).lower() == "free"], key=lambda c: -int(c.get("quality_score_static") or 0), )[:_HEALTH_ENRICH_TOP_N_FREE] # De-duplicate while preserving order: a cfg shouldn't fall in both @@ -507,9 +495,7 @@ class OpenRouterIntegrationService: api_key = str(self._settings.get("api_key") or "") semaphore = asyncio.Semaphore(_HEALTH_ENRICH_CONCURRENCY) - async with httpx.AsyncClient( - timeout=_HEALTH_FETCH_TIMEOUT_SEC - ) as client: + async with httpx.AsyncClient(timeout=_HEALTH_FETCH_TIMEOUT_SEC) as client: results = await asyncio.gather( *( self._fetch_endpoints(client, semaphore, api_key, cfg) diff --git a/surfsense_backend/app/services/quality_score.py b/surfsense_backend/app/services/quality_score.py index 8f6c75d56..2fb37de21 100644 --- a/surfsense_backend/app/services/quality_score.py +++ b/surfsense_backend/app/services/quality_score.py @@ -7,12 +7,12 @@ sort and a SHA256 pick. Score components (0-100 scale, higher is better): -* ``static_score_or`` – derived from the bulk ``/api/v1/models`` payload +* ``static_score_or`` - derived from the bulk ``/api/v1/models`` payload (provider prestige + ``created`` recency + pricing band + context window + capabilities + narrow tiny/legacy slug penalty). -* ``static_score_yaml`` – same shape for hand-curated YAML configs, plus +* ``static_score_yaml`` - same shape for hand-curated YAML configs, plus an operator-trust bonus (the operator deliberately picked this model). -* ``aggregate_health`` – run on per-model ``/api/v1/models/{id}/endpoints`` +* ``aggregate_health`` - run on per-model ``/api/v1/models/{id}/endpoints`` responses; returns ``(gated, score_or_none)``. The blended ``quality_score`` (0.5 * static + 0.5 * health) is computed in @@ -281,9 +281,7 @@ def static_score_yaml(cfg: dict) -> int: model_name = cfg.get("model_name") or "" litellm_params = cfg.get("litellm_params") or {} lookup_name = ( - litellm_params.get("base_model") - or litellm_params.get("model") - or model_name + litellm_params.get("base_model") or litellm_params.get("model") or model_name ) ctx = 0 diff --git a/surfsense_backend/app/tasks/chat/stream_new_chat.py b/surfsense_backend/app/tasks/chat/stream_new_chat.py index 53f237f06..dbfe9a67b 100644 --- a/surfsense_backend/app/tasks/chat/stream_new_chat.py +++ b/surfsense_backend/app/tasks/chat/stream_new_chat.py @@ -1814,7 +1814,9 @@ async def _stream_agent_events( resolved_path = _extract_resolved_file_path( tool_name=tool_name, tool_output=tool_output, - tool_input={"file_path": staged_file_path} if staged_file_path else None, + tool_input={"file_path": staged_file_path} + if staged_file_path + else None, ) result_text = _tool_output_to_text(tool_output) if _tool_output_has_error(tool_output): @@ -2441,8 +2443,7 @@ async def stream_new_chat( await _preflight_llm(llm) mark_healthy(llm_config_id) _perf_log.info( - "[stream_new_chat] auto_pin_preflight ok config_id=%s " - "took=%.3fs", + "[stream_new_chat] auto_pin_preflight ok config_id=%s took=%.3fs", llm_config_id, time.perf_counter() - _t_preflight, ) @@ -2891,7 +2892,11 @@ async def stream_new_chat( # Inject title update mid-stream as soon as the background # task finishes. - if title_task is not None and title_task.done() and not title_emitted: + if ( + title_task is not None + and title_task.done() + and not title_emitted + ): generated_title, title_usage = title_task.result() if title_usage: accumulator.add(**title_usage) @@ -2944,7 +2949,9 @@ async def stream_new_chat( ) ).resolved_llm_config_id - llm, agent_config, llm_load_error = await _load_llm_bundle(llm_config_id) + llm, agent_config, llm_load_error = await _load_llm_bundle( + llm_config_id + ) if llm_load_error: raise stream_exc @@ -3480,8 +3487,7 @@ async def stream_resume_chat( await _preflight_llm(llm) mark_healthy(llm_config_id) _perf_log.info( - "[stream_resume] auto_pin_preflight ok config_id=%s " - "took=%.3fs", + "[stream_resume] auto_pin_preflight ok config_id=%s took=%.3fs", llm_config_id, time.perf_counter() - _t_preflight, ) @@ -3684,7 +3690,9 @@ async def stream_resume_chat( ) ).resolved_llm_config_id - llm, agent_config, llm_load_error = await _load_llm_bundle(llm_config_id) + llm, agent_config, llm_load_error = await _load_llm_bundle( + llm_config_id + ) if llm_load_error: raise stream_exc diff --git a/surfsense_backend/tests/unit/services/test_auto_model_pin_service.py b/surfsense_backend/tests/unit/services/test_auto_model_pin_service.py index d333f0b7a..49b3621c7 100644 --- a/surfsense_backend/tests/unit/services/test_auto_model_pin_service.py +++ b/surfsense_backend/tests/unit/services/test_auto_model_pin_service.py @@ -574,7 +574,7 @@ async def test_top_k_picks_only_high_score_models(monkeypatch): monkeypatch.setattr( config, "GLOBAL_LLM_CONFIGS", - high_score_cfgs + [low_score_trap], + [*high_score_cfgs, low_score_trap], ) async def _allowed(*_args, **_kwargs): diff --git a/surfsense_backend/tests/unit/services/test_llm_router_pool_filter.py b/surfsense_backend/tests/unit/services/test_llm_router_pool_filter.py index 0191025ec..c309ff881 100644 --- a/surfsense_backend/tests/unit/services/test_llm_router_pool_filter.py +++ b/surfsense_backend/tests/unit/services/test_llm_router_pool_filter.py @@ -96,9 +96,12 @@ def test_router_pool_includes_or_premium_excludes_or_free(): ), ] - with patch("app.services.llm_router_service.Router") as mock_router, patch( - "app.services.llm_router_service.LLMRouterService._build_context_fallback_groups" - ) as mock_ctx_fb: + with ( + patch("app.services.llm_router_service.Router") as mock_router, + patch( + "app.services.llm_router_service.LLMRouterService._build_context_fallback_groups" + ) as mock_ctx_fb, + ): mock_ctx_fb.side_effect = lambda ml: (ml, None) mock_router.return_value = object() LLMRouterService.initialize(configs) @@ -124,9 +127,10 @@ def test_router_pool_includes_or_premium_excludes_or_free(): assert "openrouter/openai/gpt-4o" in prem assert LLMRouterService.is_premium_model("openrouter/openai/gpt-4o") is True # Dynamic OR free never enters the pool, so it's never counted as premium. - assert LLMRouterService.is_premium_model( - "openrouter/meta-llama/llama-3.3-70b:free" - ) is False + assert ( + LLMRouterService.is_premium_model("openrouter/meta-llama/llama-3.3-70b:free") + is False + ) def test_router_pool_filter_mechanics_respect_override(): @@ -147,9 +151,12 @@ def test_router_pool_filter_mechanics_respect_override(): ), ] - with patch("app.services.llm_router_service.Router") as mock_router, patch( - "app.services.llm_router_service.LLMRouterService._build_context_fallback_groups" - ) as mock_ctx_fb: + with ( + patch("app.services.llm_router_service.Router") as mock_router, + patch( + "app.services.llm_router_service.LLMRouterService._build_context_fallback_groups" + ) as mock_ctx_fb, + ): mock_ctx_fb.side_effect = lambda ml: (ml, None) mock_router.return_value = object() LLMRouterService.initialize(configs) @@ -167,13 +174,17 @@ def test_rebuild_refreshes_pool_after_configs_change(): configs_v1 = [ _fake_yaml_config(id=-1, model_name="gpt-4o", billing_tier="premium"), ] - configs_v2 = configs_v1 + [ + configs_v2 = [ + *configs_v1, _fake_yaml_config(id=-2, model_name="gpt-4o-mini", billing_tier="free"), ] - with patch("app.services.llm_router_service.Router") as mock_router, patch( - "app.services.llm_router_service.LLMRouterService._build_context_fallback_groups" - ) as mock_ctx_fb: + with ( + patch("app.services.llm_router_service.Router") as mock_router, + patch( + "app.services.llm_router_service.LLMRouterService._build_context_fallback_groups" + ) as mock_ctx_fb, + ): mock_ctx_fb.side_effect = lambda ml: (ml, None) mock_router.return_value = object() diff --git a/surfsense_backend/tests/unit/services/test_openrouter_integration_service.py b/surfsense_backend/tests/unit/services/test_openrouter_integration_service.py index d3921729d..085740032 100644 --- a/surfsense_backend/tests/unit/services/test_openrouter_integration_service.py +++ b/surfsense_backend/tests/unit/services/test_openrouter_integration_service.py @@ -214,5 +214,3 @@ def test_generate_configs_drops_non_text_and_non_tool_models(): assert "openai/gpt-4o" in model_names assert "openai/dall-e" not in model_names assert "openai/completion-only" not in model_names - - diff --git a/surfsense_backend/tests/unit/services/test_openrouter_legacy_config.py b/surfsense_backend/tests/unit/services/test_openrouter_legacy_config.py index b3dd2bf18..4eb1f2295 100644 --- a/surfsense_backend/tests/unit/services/test_openrouter_legacy_config.py +++ b/surfsense_backend/tests/unit/services/test_openrouter_legacy_config.py @@ -68,9 +68,7 @@ openrouter_integration: assert "deprecated" in captured -def test_new_keys_take_priority_over_legacy_back_compat( - monkeypatch, tmp_path, capsys -): +def test_new_keys_take_priority_over_legacy_back_compat(monkeypatch, tmp_path, capsys): """If both legacy and new keys are present, new keys win (setdefault).""" _write_yaml( tmp_path, diff --git a/surfsense_backend/tests/unit/services/test_quality_score.py b/surfsense_backend/tests/unit/services/test_quality_score.py index fbc91521d..6fbc8fd62 100644 --- a/surfsense_backend/tests/unit/services/test_quality_score.py +++ b/surfsense_backend/tests/unit/services/test_quality_score.py @@ -106,9 +106,12 @@ def test_context_signal_bands(ctx, expected): def test_capabilities_signal_caps_at_five(): - assert capabilities_signal( - ["tools", "structured_outputs", "reasoning", "include_reasoning"] - ) <= 5 + assert ( + capabilities_signal( + ["tools", "structured_outputs", "reasoning", "include_reasoning"] + ) + <= 5 + ) def test_capabilities_signal_tools_only(): diff --git a/surfsense_backend/tests/unit/test_stream_new_chat_contract.py b/surfsense_backend/tests/unit/test_stream_new_chat_contract.py index 3676601f4..910009667 100644 --- a/surfsense_backend/tests/unit/test_stream_new_chat_contract.py +++ b/surfsense_backend/tests/unit/test_stream_new_chat_contract.py @@ -13,8 +13,8 @@ from app.tasks.chat.stream_new_chat import ( StreamResult, _classify_stream_exception, _contract_enforcement_active, - _extract_resolved_file_path, _evaluate_file_contract_outcome, + _extract_resolved_file_path, _log_chat_stream_error, _tool_output_has_error, ) @@ -222,7 +222,7 @@ async def test_preflight_swallows_non_rate_limit_errors_and_re_raises_429(monkey from app.tasks.chat.stream_new_chat import _preflight_llm - class _RateLimitedExc(Exception): + class _RateLimitedError(Exception): """Class-name carries 'RateLimit' so _is_provider_rate_limited triggers.""" rate_calls: list[dict] = [] @@ -230,7 +230,7 @@ async def test_preflight_swallows_non_rate_limit_errors_and_re_raises_429(monkey async def _fake_acompletion_429(**kwargs): rate_calls.append(kwargs) - raise _RateLimitedExc("simulated 429") + raise _RateLimitedError("simulated 429") async def _fake_acompletion_other(**kwargs): other_calls.append(kwargs) @@ -245,7 +245,7 @@ async def test_preflight_swallows_non_rate_limit_errors_and_re_raises_429(monkey import litellm # type: ignore[import-not-found] monkeypatch.setattr(litellm, "acompletion", _fake_acompletion_429) - with pytest.raises(_RateLimitedExc): + with pytest.raises(_RateLimitedError): await _preflight_llm(fake_llm) assert len(rate_calls) == 1 assert rate_calls[0]["max_tokens"] == 1 diff --git a/surfsense_web/components/assistant-ui/markdown-text.tsx b/surfsense_web/components/assistant-ui/markdown-text.tsx index bfbc3a423..9fddec360 100644 --- a/surfsense_web/components/assistant-ui/markdown-text.tsx +++ b/surfsense_web/components/assistant-ui/markdown-text.tsx @@ -19,6 +19,7 @@ import remarkMath from "remark-math"; import { openEditorPanelAtom } from "@/atoms/editor/editor-panel.atom"; import { ImagePreview, ImageRoot, ImageZoom } from "@/components/assistant-ui/image"; import "katex/dist/katex.min.css"; +import { toast } from "sonner"; import { processChildrenWithCitations } from "@/components/citations/citation-renderer"; import { Skeleton } from "@/components/ui/skeleton"; import { @@ -33,7 +34,6 @@ import { useElectronAPI } from "@/hooks/use-platform"; import { documentsApiService } from "@/lib/apis/documents-api.service"; import { type CitationUrlMap, preprocessCitationMarkdown } from "@/lib/citations/citation-parser"; import { cn } from "@/lib/utils"; -import { toast } from "sonner"; function MarkdownCodeBlockSkeleton() { return ( @@ -207,13 +207,7 @@ function isStandaloneDocumentsPathText(node: ReactNode): string | null { return value; } -function FilePathLink({ - path, - className, -}: { - path: string; - className?: string; -}) { +function FilePathLink({ path, className }: { path: string; className?: string }) { const openEditorPanel = useSetAtom(openEditorPanelAtom); const params = useParams(); const electronAPI = useElectronAPI(); @@ -221,7 +215,9 @@ function FilePathLink({ const parsedSearchSpaceId = Array.isArray(searchSpaceIdParam) ? Number(searchSpaceIdParam[0]) : Number(searchSpaceIdParam); - const resolvedSearchSpaceId = Number.isFinite(parsedSearchSpaceId) ? parsedSearchSpaceId : undefined; + const resolvedSearchSpaceId = Number.isFinite(parsedSearchSpaceId) + ? parsedSearchSpaceId + : undefined; return (