chore: linting
Some checks are pending
Build and Push Docker Images / tag_release (push) Waiting to run
Build and Push Docker Images / build (./surfsense_backend, ./surfsense_backend/Dockerfile, backend, surfsense-backend, ubuntu-24.04-arm, linux/arm64, arm64) (push) Blocked by required conditions
Build and Push Docker Images / build (./surfsense_backend, ./surfsense_backend/Dockerfile, backend, surfsense-backend, ubuntu-latest, linux/amd64, amd64) (push) Blocked by required conditions
Build and Push Docker Images / build (./surfsense_web, ./surfsense_web/Dockerfile, web, surfsense-web, ubuntu-24.04-arm, linux/arm64, arm64) (push) Blocked by required conditions
Build and Push Docker Images / build (./surfsense_web, ./surfsense_web/Dockerfile, web, surfsense-web, ubuntu-latest, linux/amd64, amd64) (push) Blocked by required conditions
Build and Push Docker Images / create_manifest (backend, surfsense-backend) (push) Blocked by required conditions
Build and Push Docker Images / create_manifest (web, surfsense-web) (push) Blocked by required conditions

This commit is contained in:
DESKTOP-RTLN3BA\$punk 2026-04-30 18:42:38 -07:00
parent 7aeb8bb0a8
commit c644f02d05
26 changed files with 346 additions and 380 deletions

View file

@ -66,7 +66,13 @@ async def test_auto_first_turn_pins_one_model(monkeypatch):
"GLOBAL_LLM_CONFIGS",
[
{"id": -2, "provider": "OPENAI", "model_name": "gpt-free", "api_key": "k1"},
{"id": -1, "provider": "OPENAI", "model_name": "gpt-prem", "api_key": "k2", "billing_tier": "premium"},
{
"id": -1,
"provider": "OPENAI",
"model_name": "gpt-prem",
"api_key": "k2",
"billing_tier": "premium",
},
],
)
@ -103,12 +109,20 @@ async def test_next_turn_reuses_existing_pin(monkeypatch):
config,
"GLOBAL_LLM_CONFIGS",
[
{"id": -1, "provider": "OPENAI", "model_name": "gpt-prem", "api_key": "k2", "billing_tier": "premium"},
{
"id": -1,
"provider": "OPENAI",
"model_name": "gpt-prem",
"api_key": "k2",
"billing_tier": "premium",
},
],
)
async def _must_not_call(*_args, **_kwargs):
raise AssertionError("premium_get_usage should not be called for valid pin reuse")
raise AssertionError(
"premium_get_usage should not be called for valid pin reuse"
)
monkeypatch.setattr(
"app.services.auto_model_pin_service.TokenQuotaService.premium_get_usage",
@ -136,7 +150,13 @@ async def test_premium_eligible_auto_can_pin_premium(monkeypatch):
config,
"GLOBAL_LLM_CONFIGS",
[
{"id": -1, "provider": "OPENAI", "model_name": "gpt-prem", "api_key": "k2", "billing_tier": "premium"},
{
"id": -1,
"provider": "OPENAI",
"model_name": "gpt-prem",
"api_key": "k2",
"billing_tier": "premium",
},
],
)
@ -168,8 +188,20 @@ async def test_premium_ineligible_auto_pins_free_only(monkeypatch):
config,
"GLOBAL_LLM_CONFIGS",
[
{"id": -2, "provider": "OPENAI", "model_name": "gpt-free", "api_key": "k1", "billing_tier": "free"},
{"id": -1, "provider": "OPENAI", "model_name": "gpt-prem", "api_key": "k2", "billing_tier": "premium"},
{
"id": -2,
"provider": "OPENAI",
"model_name": "gpt-free",
"api_key": "k1",
"billing_tier": "free",
},
{
"id": -1,
"provider": "OPENAI",
"model_name": "gpt-prem",
"api_key": "k2",
"billing_tier": "premium",
},
],
)
@ -203,8 +235,20 @@ async def test_pinned_premium_stays_premium_after_quota_exhaustion(monkeypatch):
config,
"GLOBAL_LLM_CONFIGS",
[
{"id": -2, "provider": "OPENAI", "model_name": "gpt-free", "api_key": "k1", "billing_tier": "free"},
{"id": -1, "provider": "OPENAI", "model_name": "gpt-prem", "api_key": "k2", "billing_tier": "premium"},
{
"id": -2,
"provider": "OPENAI",
"model_name": "gpt-free",
"api_key": "k1",
"billing_tier": "free",
},
{
"id": -1,
"provider": "OPENAI",
"model_name": "gpt-prem",
"api_key": "k2",
"billing_tier": "premium",
},
],
)
@ -238,8 +282,20 @@ async def test_force_repin_free_switches_auto_premium_pin_to_free(monkeypatch):
config,
"GLOBAL_LLM_CONFIGS",
[
{"id": -2, "provider": "OPENAI", "model_name": "gpt-free", "api_key": "k1", "billing_tier": "free"},
{"id": -1, "provider": "OPENAI", "model_name": "gpt-prem", "api_key": "k2", "billing_tier": "premium"},
{
"id": -2,
"provider": "OPENAI",
"model_name": "gpt-free",
"api_key": "k1",
"billing_tier": "free",
},
{
"id": -1,
"provider": "OPENAI",
"model_name": "gpt-prem",
"api_key": "k2",
"billing_tier": "premium",
},
],
)

View file

@ -203,7 +203,10 @@ def test_stream_exception_classifies_turn_cancelling_when_cancel_requested():
def test_premium_classification_is_error_code_driven():
classifier_path = Path(__file__).resolve().parents[3] / "surfsense_web/lib/chat/chat-error-classifier.ts"
classifier_path = (
Path(__file__).resolve().parents[3]
/ "surfsense_web/lib/chat/chat-error-classifier.ts"
)
source = classifier_path.read_text(encoding="utf-8")
assert "PREMIUM_KEYWORDS" not in source
@ -229,7 +232,8 @@ def test_stream_terminal_error_handler_has_pre_accept_soft_rollback_hook():
def test_toast_only_pre_accept_policy_has_no_inline_failed_marker():
user_message_path = (
Path(__file__).resolve().parents[3] / "surfsense_web/components/assistant-ui/user-message.tsx"
Path(__file__).resolve().parents[3]
/ "surfsense_web/components/assistant-ui/user-message.tsx"
)
source = user_message_path.read_text(encoding="utf-8")
@ -238,10 +242,14 @@ def test_toast_only_pre_accept_policy_has_no_inline_failed_marker():
def test_network_send_failures_use_unified_retry_toast_message():
classifier_path = Path(__file__).resolve().parents[3] / "surfsense_web/lib/chat/chat-error-classifier.ts"
classifier_path = (
Path(__file__).resolve().parents[3]
/ "surfsense_web/lib/chat/chat-error-classifier.ts"
)
classifier_source = classifier_path.read_text(encoding="utf-8")
request_errors_path = (
Path(__file__).resolve().parents[3] / "surfsense_web/lib/chat/chat-request-errors.ts"
Path(__file__).resolve().parents[3]
/ "surfsense_web/lib/chat/chat-request-errors.ts"
)
request_errors_source = request_errors_path.read_text(encoding="utf-8")
@ -350,15 +358,17 @@ def test_turn_status_sse_contract_exists():
/ "surfsense_backend/app/tasks/chat/stream_new_chat.py"
).read_text(encoding="utf-8")
state_source = (
Path(__file__).resolve().parents[3] / "surfsense_web/lib/chat/streaming-state.ts"
Path(__file__).resolve().parents[3]
/ "surfsense_web/lib/chat/streaming-state.ts"
).read_text(encoding="utf-8")
pipeline_source = (
Path(__file__).resolve().parents[3] / "surfsense_web/lib/chat/stream-pipeline.ts"
Path(__file__).resolve().parents[3]
/ "surfsense_web/lib/chat/stream-pipeline.ts"
).read_text(encoding="utf-8")
assert '"turn-status"' in stream_source
assert '"status": "busy"' in stream_source
assert '"status": "idle"' in stream_source
assert "type: \"data-turn-status\"" in state_source
assert "case \"data-turn-status\":" in pipeline_source
assert 'type: "data-turn-status"' in state_source
assert 'case "data-turn-status":' in pipeline_source
assert "end_turn(str(chat_id))" in stream_source