chore: ran linting

This commit is contained in:
Anish Sarkar 2026-05-02 03:36:13 +05:30
parent b9b4d0b377
commit cd25175b84
14 changed files with 78 additions and 82 deletions

View file

@ -574,7 +574,7 @@ async def test_top_k_picks_only_high_score_models(monkeypatch):
monkeypatch.setattr(
config,
"GLOBAL_LLM_CONFIGS",
high_score_cfgs + [low_score_trap],
[*high_score_cfgs, low_score_trap],
)
async def _allowed(*_args, **_kwargs):

View file

@ -96,9 +96,12 @@ def test_router_pool_includes_or_premium_excludes_or_free():
),
]
with patch("app.services.llm_router_service.Router") as mock_router, patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb:
with (
patch("app.services.llm_router_service.Router") as mock_router,
patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb,
):
mock_ctx_fb.side_effect = lambda ml: (ml, None)
mock_router.return_value = object()
LLMRouterService.initialize(configs)
@ -124,9 +127,10 @@ def test_router_pool_includes_or_premium_excludes_or_free():
assert "openrouter/openai/gpt-4o" in prem
assert LLMRouterService.is_premium_model("openrouter/openai/gpt-4o") is True
# Dynamic OR free never enters the pool, so it's never counted as premium.
assert LLMRouterService.is_premium_model(
"openrouter/meta-llama/llama-3.3-70b:free"
) is False
assert (
LLMRouterService.is_premium_model("openrouter/meta-llama/llama-3.3-70b:free")
is False
)
def test_router_pool_filter_mechanics_respect_override():
@ -147,9 +151,12 @@ def test_router_pool_filter_mechanics_respect_override():
),
]
with patch("app.services.llm_router_service.Router") as mock_router, patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb:
with (
patch("app.services.llm_router_service.Router") as mock_router,
patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb,
):
mock_ctx_fb.side_effect = lambda ml: (ml, None)
mock_router.return_value = object()
LLMRouterService.initialize(configs)
@ -167,13 +174,17 @@ def test_rebuild_refreshes_pool_after_configs_change():
configs_v1 = [
_fake_yaml_config(id=-1, model_name="gpt-4o", billing_tier="premium"),
]
configs_v2 = configs_v1 + [
configs_v2 = [
*configs_v1,
_fake_yaml_config(id=-2, model_name="gpt-4o-mini", billing_tier="free"),
]
with patch("app.services.llm_router_service.Router") as mock_router, patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb:
with (
patch("app.services.llm_router_service.Router") as mock_router,
patch(
"app.services.llm_router_service.LLMRouterService._build_context_fallback_groups"
) as mock_ctx_fb,
):
mock_ctx_fb.side_effect = lambda ml: (ml, None)
mock_router.return_value = object()

View file

@ -214,5 +214,3 @@ def test_generate_configs_drops_non_text_and_non_tool_models():
assert "openai/gpt-4o" in model_names
assert "openai/dall-e" not in model_names
assert "openai/completion-only" not in model_names

View file

@ -68,9 +68,7 @@ openrouter_integration:
assert "deprecated" in captured
def test_new_keys_take_priority_over_legacy_back_compat(
monkeypatch, tmp_path, capsys
):
def test_new_keys_take_priority_over_legacy_back_compat(monkeypatch, tmp_path, capsys):
"""If both legacy and new keys are present, new keys win (setdefault)."""
_write_yaml(
tmp_path,

View file

@ -106,9 +106,12 @@ def test_context_signal_bands(ctx, expected):
def test_capabilities_signal_caps_at_five():
assert capabilities_signal(
["tools", "structured_outputs", "reasoning", "include_reasoning"]
) <= 5
assert (
capabilities_signal(
["tools", "structured_outputs", "reasoning", "include_reasoning"]
)
<= 5
)
def test_capabilities_signal_tools_only():