chore: ran linting

This commit is contained in:
Anish Sarkar 2026-05-02 03:36:13 +05:30
parent b9b4d0b377
commit cd25175b84
14 changed files with 78 additions and 82 deletions

View file

@ -574,7 +574,7 @@ async def test_top_k_picks_only_high_score_models(monkeypatch):
monkeypatch.setattr(
config,
"GLOBAL_LLM_CONFIGS",
high_score_cfgs + [low_score_trap],
[*high_score_cfgs, low_score_trap],
)
async def _allowed(*_args, **_kwargs):

View file

@ -96,9 +96,12 @@ def test_router_pool_includes_or_premium_excludes_or_free():
),
]
with patch("app.services.llm_router_service.Router") as mock_router, patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb:
with (
patch("app.services.llm_router_service.Router") as mock_router,
patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb,
):
mock_ctx_fb.side_effect = lambda ml: (ml, None)
mock_router.return_value = object()
LLMRouterService.initialize(configs)
@ -124,9 +127,10 @@ def test_router_pool_includes_or_premium_excludes_or_free():
assert "openrouter/openai/gpt-4o" in prem
assert LLMRouterService.is_premium_model("openrouter/openai/gpt-4o") is True
# Dynamic OR free never enters the pool, so it's never counted as premium.
assert LLMRouterService.is_premium_model(
"openrouter/meta-llama/llama-3.3-70b:free"
) is False
assert (
LLMRouterService.is_premium_model("openrouter/meta-llama/llama-3.3-70b:free")
is False
)
def test_router_pool_filter_mechanics_respect_override():
@ -147,9 +151,12 @@ def test_router_pool_filter_mechanics_respect_override():
),
]
with patch("app.services.llm_router_service.Router") as mock_router, patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb:
with (
patch("app.services.llm_router_service.Router") as mock_router,
patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb,
):
mock_ctx_fb.side_effect = lambda ml: (ml, None)
mock_router.return_value = object()
LLMRouterService.initialize(configs)
@ -167,13 +174,17 @@ def test_rebuild_refreshes_pool_after_configs_change():
configs_v1 = [
_fake_yaml_config(id=-1, model_name="gpt-4o", billing_tier="premium"),
]
configs_v2 = configs_v1 + [
configs_v2 = [
*configs_v1,
_fake_yaml_config(id=-2, model_name="gpt-4o-mini", billing_tier="free"),
]
with patch("app.services.llm_router_service.Router") as mock_router, patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb:
with (
patch("app.services.llm_router_service.Router") as mock_router,
patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb,
):
mock_ctx_fb.side_effect = lambda ml: (ml, None)
mock_router.return_value = object()

View file

@ -214,5 +214,3 @@ def test_generate_configs_drops_non_text_and_non_tool_models():
assert "openai/gpt-4o" in model_names
assert "openai/dall-e" not in model_names
assert "openai/completion-only" not in model_names

View file

@ -68,9 +68,7 @@ openrouter_integration:
assert "deprecated" in captured
def test_new_keys_take_priority_over_legacy_back_compat(
monkeypatch, tmp_path, capsys
):
def test_new_keys_take_priority_over_legacy_back_compat(monkeypatch, tmp_path, capsys):
"""If both legacy and new keys are present, new keys win (setdefault)."""
_write_yaml(
tmp_path,

View file

@ -106,9 +106,12 @@ def test_context_signal_bands(ctx, expected):
def test_capabilities_signal_caps_at_five():
assert capabilities_signal(
["tools", "structured_outputs", "reasoning", "include_reasoning"]
) <= 5
assert (
capabilities_signal(
["tools", "structured_outputs", "reasoning", "include_reasoning"]
)
<= 5
)
def test_capabilities_signal_tools_only():

View file

@ -13,8 +13,8 @@ from app.tasks.chat.stream_new_chat import (
StreamResult,
_classify_stream_exception,
_contract_enforcement_active,
_extract_resolved_file_path,
_evaluate_file_contract_outcome,
_extract_resolved_file_path,
_log_chat_stream_error,
_tool_output_has_error,
)
@ -222,7 +222,7 @@ async def test_preflight_swallows_non_rate_limit_errors_and_re_raises_429(monkey
from app.tasks.chat.stream_new_chat import _preflight_llm
class _RateLimitedExc(Exception):
class _RateLimitedError(Exception):
"""Class-name carries 'RateLimit' so _is_provider_rate_limited triggers."""
rate_calls: list[dict] = []
@ -230,7 +230,7 @@ async def test_preflight_swallows_non_rate_limit_errors_and_re_raises_429(monkey
async def _fake_acompletion_429(**kwargs):
rate_calls.append(kwargs)
raise _RateLimitedExc("simulated 429")
raise _RateLimitedError("simulated 429")
async def _fake_acompletion_other(**kwargs):
other_calls.append(kwargs)
@ -245,7 +245,7 @@ async def test_preflight_swallows_non_rate_limit_errors_and_re_raises_429(monkey
import litellm # type: ignore[import-not-found]
monkeypatch.setattr(litellm, "acompletion", _fake_acompletion_429)
with pytest.raises(_RateLimitedExc):
with pytest.raises(_RateLimitedError):
await _preflight_llm(fake_llm)
assert len(rate_calls) == 1
assert rate_calls[0]["max_tokens"] == 1