fix black formatting for Python test files

This commit is contained in:
Adil Hafeez 2026-03-15 08:49:22 +00:00
parent c4634d0034
commit 74f9b44378
2 changed files with 103 additions and 50 deletions

View file

@ -11,7 +11,9 @@ def cleanup_env(monkeypatch):
monkeypatch.undo() monkeypatch.undo()
@pytest.mark.parametrize("plano_config", [ @pytest.mark.parametrize(
"plano_config",
[
# Case 1: LLM provider config # Case 1: LLM provider config
""" """
version: v0.1.0 version: v0.1.0
@ -86,7 +88,9 @@ model_providers:
- access_key: ${OPENAI_API_KEY} - access_key: ${OPENAI_API_KEY}
model: openai/gpt-4o model: openai/gpt-4o
""", """,
], ids=["llm_provider_config", "agent_config"]) ],
ids=["llm_provider_config", "agent_config"],
)
def test_validate_and_render_happy_path(monkeypatch, plano_config): def test_validate_and_render_happy_path(monkeypatch, plano_config):
monkeypatch.setenv("PLANO_CONFIG_FILE", "fake_plano_config.yaml") monkeypatch.setenv("PLANO_CONFIG_FILE", "fake_plano_config.yaml")
monkeypatch.setenv("PLANO_CONFIG_SCHEMA_FILE", "fake_plano_config_schema.yaml") monkeypatch.setenv("PLANO_CONFIG_SCHEMA_FILE", "fake_plano_config_schema.yaml")
@ -310,29 +314,59 @@ def test_validate_and_render_schema_tests(monkeypatch, plano_config_test_case):
validate_and_render_schema() validate_and_render_schema()
@pytest.mark.parametrize("listeners,expected_providers,expected_llm_gateway,expected_prompt_gateway", [ @pytest.mark.parametrize(
"listeners,expected_providers,expected_llm_gateway,expected_prompt_gateway",
[
# Case 1: With prompt gateway (ingress + egress) # Case 1: With prompt gateway (ingress + egress)
( (
{ {
"ingress_traffic": {"address": "0.0.0.0", "port": 10000, "timeout": "30s"}, "ingress_traffic": {
"egress_traffic": {"address": "0.0.0.0", "port": 12000, "timeout": "30s"}, "address": "0.0.0.0",
"port": 10000,
"timeout": "30s",
},
"egress_traffic": {
"address": "0.0.0.0",
"port": 12000,
"timeout": "30s",
},
}, },
[ [
{ {
"name": "egress_traffic", "type": "model_listener", "port": 12000, "name": "egress_traffic",
"address": "0.0.0.0", "timeout": "30s", "type": "model_listener",
"model_providers": [{"model": "openai/gpt-4o", "access_key": "test_key"}], "port": 12000,
"address": "0.0.0.0",
"timeout": "30s",
"model_providers": [
{"model": "openai/gpt-4o", "access_key": "test_key"}
],
}, },
{ {
"name": "ingress_traffic", "type": "prompt_listener", "port": 10000, "name": "ingress_traffic",
"address": "0.0.0.0", "timeout": "30s", "type": "prompt_listener",
"port": 10000,
"address": "0.0.0.0",
"timeout": "30s",
}, },
], ],
{ {
"address": "0.0.0.0", "model_providers": [{"access_key": "test_key", "model": "openai/gpt-4o"}], "address": "0.0.0.0",
"name": "egress_traffic", "type": "model_listener", "port": 12000, "timeout": "30s", "model_providers": [
{"access_key": "test_key", "model": "openai/gpt-4o"}
],
"name": "egress_traffic",
"type": "model_listener",
"port": 12000,
"timeout": "30s",
},
{
"address": "0.0.0.0",
"name": "ingress_traffic",
"port": 10000,
"timeout": "30s",
"type": "prompt_listener",
}, },
{"address": "0.0.0.0", "name": "ingress_traffic", "port": 10000, "timeout": "30s", "type": "prompt_listener"},
), ),
# Case 2: Without prompt gateway (egress only) # Case 2: Without prompt gateway (egress only)
( (
@ -340,19 +374,35 @@ def test_validate_and_render_schema_tests(monkeypatch, plano_config_test_case):
[ [
{ {
"address": "0.0.0.0", "address": "0.0.0.0",
"model_providers": [{"access_key": "test_key", "model": "openai/gpt-4o"}], "model_providers": [
"name": "egress_traffic", "port": 12000, "timeout": "30s", "type": "model_listener", {"access_key": "test_key", "model": "openai/gpt-4o"}
],
"name": "egress_traffic",
"port": 12000,
"timeout": "30s",
"type": "model_listener",
} }
], ],
{ {
"address": "0.0.0.0", "model_providers": [{"access_key": "test_key", "model": "openai/gpt-4o"}], "address": "0.0.0.0",
"name": "egress_traffic", "type": "model_listener", "port": 12000, "timeout": "30s", "model_providers": [
{"access_key": "test_key", "model": "openai/gpt-4o"}
],
"name": "egress_traffic",
"type": "model_listener",
"port": 12000,
"timeout": "30s",
}, },
None, None,
), ),
], ids=["with_prompt_gateway", "without_prompt_gateway"]) ],
def test_convert_legacy_llm_providers(listeners, expected_providers, expected_llm_gateway, expected_prompt_gateway): ids=["with_prompt_gateway", "without_prompt_gateway"],
)
def test_convert_legacy_llm_providers(
listeners, expected_providers, expected_llm_gateway, expected_prompt_gateway
):
from planoai.utils import convert_legacy_listeners from planoai.utils import convert_legacy_listeners
llm_providers = [{"model": "openai/gpt-4o", "access_key": "test_key"}] llm_providers = [{"model": "openai/gpt-4o", "access_key": "test_key"}]
updated_providers, llm_gateway, prompt_gateway = convert_legacy_listeners( updated_providers, llm_gateway, prompt_gateway = convert_legacy_listeners(
listeners, llm_providers listeners, llm_providers

View file

@ -52,11 +52,15 @@ class TestCheckVersionStatus:
assert status["is_outdated"] is False assert status["is_outdated"] is False
assert status["message"] is None assert status["message"] is None
@pytest.mark.parametrize("current,latest", [ @pytest.mark.parametrize(
"current,latest",
[
("0.4.1", "1.0.0"), # major ("0.4.1", "1.0.0"), # major
("0.4.1", "0.5.0"), # minor ("0.4.1", "0.5.0"), # minor
("0.4.1", "0.4.2"), # patch ("0.4.1", "0.4.2"), # patch
], ids=["major", "minor", "patch"]) ],
ids=["major", "minor", "patch"],
)
def test_version_outdated(self, current, latest): def test_version_outdated(self, current, latest):
status = check_version_status(current, latest) status = check_version_status(current, latest)
assert status["is_outdated"] is True assert status["is_outdated"] is True
@ -150,4 +154,3 @@ class TestVersionCheckIntegration:
status = check_version_status(current_version, latest) status = check_version_status(current_version, latest)
assert status["is_outdated"] is False assert status["is_outdated"] is False