support base_url path for model providers (#608)

* adding support for base_url

* updated docs

* fixed tests for config generator

* making fixes based on PR comments

---------

Co-authored-by: Salman Paracha <salmanparacha@MacBook-Pro-288.local>
This commit is contained in:
Salman Paracha 2025-10-29 17:08:07 -07:00 committed by GitHub
parent 5108013df4
commit cdfcfb9169
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 459 additions and 46 deletions

View file

@ -8,7 +8,14 @@ from urllib.parse import urlparse
from copy import deepcopy
SUPPORTED_PROVIDERS = [
SUPPORTED_PROVIDERS_WITH_BASE_URL = [
"azure_openai",
"ollama",
"qwen",
"amazon_bedrock",
]
SUPPORTED_PROVIDERS_WITHOUT_BASE_URL = [
"arch",
"deepseek",
"groq",
@ -17,15 +24,15 @@ SUPPORTED_PROVIDERS = [
"gemini",
"anthropic",
"together_ai",
"azure_openai",
"xai",
"ollama",
"moonshotai",
"zhipu",
"qwen",
"amazon_bedrock",
]
SUPPORTED_PROVIDERS = (
SUPPORTED_PROVIDERS_WITHOUT_BASE_URL + SUPPORTED_PROVIDERS_WITH_BASE_URL
)
def get_endpoint_and_port(endpoint, protocol):
endpoint_tokens = endpoint.split(":")
@ -189,12 +196,9 @@ def validate_and_render_schema():
provider = model_name_tokens[0]
# Validate azure_openai and ollama provider requires base_url
if (
provider == "azure_openai"
or provider == "ollama"
or provider == "qwen"
or provider == "amazon_bedrock"
) and model_provider.get("base_url") is None:
if (provider in SUPPORTED_PROVIDERS_WITH_BASE_URL) and model_provider.get(
"base_url"
) is None:
raise Exception(
f"Provider '{provider}' requires 'base_url' to be set for model {model_name}"
)
@ -245,11 +249,11 @@ def validate_and_render_schema():
if model_provider.get("base_url", None):
base_url = model_provider["base_url"]
urlparse_result = urlparse(base_url)
url_path = urlparse_result.path
if url_path and url_path != "/":
raise Exception(
f"Please provide base_url without path, got {base_url}. Use base_url like 'http://example.com' instead of 'http://example.com/path'."
)
base_url_path_prefix = urlparse_result.path
if base_url_path_prefix and base_url_path_prefix != "/":
# we will now support base_url_path_prefix. This means that the user can provide base_url like http://example.com/path and we will extract /path as base_url_path_prefix
model_provider["base_url_path_prefix"] = base_url_path_prefix
if urlparse_result.scheme == "" or urlparse_result.scheme not in [
"http",
"https",

View file

@ -243,14 +243,13 @@ listeners:
timeout: 30s
llm_providers:
- model: custom/gpt-4o
""",
},
{
"id": "base_url_no_prefix",
"expected_error": "Please provide base_url without path",
"id": "base_url_with_path_prefix",
"expected_error": None,
"arch_config": """
version: v0.1.0
@ -264,7 +263,7 @@ listeners:
llm_providers:
- model: custom/gpt-4o
base_url: "http://custom.com/test"
base_url: "http://custom.com/api/v2"
provider_interface: openai
""",
@ -322,8 +321,7 @@ def test_validate_and_render_schema_tests(monkeypatch, arch_config_test_case):
monkeypatch.setenv("TEMPLATE_ROOT", "../")
arch_config = arch_config_test_case["arch_config"]
expected_error = arch_config_test_case["expected_error"]
test_id = arch_config_test_case["id"]
expected_error = arch_config_test_case.get("expected_error")
arch_config_schema = ""
with open("../arch_config_schema.yaml", "r") as file:
@ -346,9 +344,14 @@ def test_validate_and_render_schema_tests(monkeypatch, arch_config_test_case):
]
with mock.patch("builtins.open", m_open):
with mock.patch("config_generator.Environment"):
with pytest.raises(Exception) as excinfo:
if expected_error:
# Test expects an error
with pytest.raises(Exception) as excinfo:
validate_and_render_schema()
assert expected_error in str(excinfo.value)
else:
# Test expects success - no exception should be raised
validate_and_render_schema()
assert expected_error in str(excinfo.value)
def test_convert_legacy_llm_providers():