mirror of
https://github.com/katanemo/plano.git
synced 2026-04-25 08:46:24 +02:00
add support for agents (#564)
This commit is contained in:
parent
f8991a3c4b
commit
96e0732089
41 changed files with 3571 additions and 856 deletions
|
|
@ -1,3 +1,4 @@
|
|||
import json
|
||||
import pytest
|
||||
from unittest import mock
|
||||
import sys
|
||||
|
|
@ -81,10 +82,88 @@ tracing:
|
|||
validate_and_render_schema()
|
||||
|
||||
|
||||
def test_validate_and_render_happy_path_agent_config(monkeypatch):
|
||||
monkeypatch.setenv("ARCH_CONFIG_FILE", "fake_arch_config.yaml")
|
||||
monkeypatch.setenv("ARCH_CONFIG_SCHEMA_FILE", "fake_arch_config_schema.yaml")
|
||||
monkeypatch.setenv("ENVOY_CONFIG_TEMPLATE_FILE", "./envoy.template.yaml")
|
||||
monkeypatch.setenv("ARCH_CONFIG_FILE_RENDERED", "fake_arch_config_rendered.yaml")
|
||||
monkeypatch.setenv("ENVOY_CONFIG_FILE_RENDERED", "fake_envoy.yaml")
|
||||
monkeypatch.setenv("TEMPLATE_ROOT", "../")
|
||||
|
||||
arch_config = """
|
||||
version: v0.3.0
|
||||
|
||||
agents:
|
||||
- name: query_rewriter
|
||||
kind: openai
|
||||
endpoint: http://localhost:10500
|
||||
- name: context_builder
|
||||
kind: openai
|
||||
endpoint: http://localhost:10501
|
||||
- name: response_generator
|
||||
kind: openai
|
||||
endpoint: http://localhost:10502
|
||||
- name: research_agent
|
||||
kind: openai
|
||||
endpoint: http://localhost:10500
|
||||
- name: input_guard_rails
|
||||
kind: openai
|
||||
endpoint: http://localhost:10503
|
||||
|
||||
listeners:
|
||||
- name: tmobile
|
||||
type: agent
|
||||
router: arch_agent_v2
|
||||
agents:
|
||||
- name: simple_tmobile_rag_agent
|
||||
description: t-mobile virtual assistant for device contracts.
|
||||
filter_chain:
|
||||
- query_rewriter
|
||||
- context_builder
|
||||
- response_generator
|
||||
- name: research_agent
|
||||
description: agent to research and gather information from various sources.
|
||||
filter_chain:
|
||||
- research_agent
|
||||
- response_generator
|
||||
port: 8000
|
||||
|
||||
- name: llm_provider
|
||||
type: model
|
||||
description: llm provider configuration
|
||||
port: 12000
|
||||
llm_providers:
|
||||
- access_key: ${OPENAI_API_KEY}
|
||||
model: openai/gpt-4o
|
||||
"""
|
||||
arch_config_schema = ""
|
||||
with open("../arch_config_schema.yaml", "r") as file:
|
||||
arch_config_schema = file.read()
|
||||
|
||||
m_open = mock.mock_open()
|
||||
# Provide enough file handles for all open() calls in validate_and_render_schema
|
||||
m_open.side_effect = [
|
||||
mock.mock_open(read_data="").return_value,
|
||||
mock.mock_open(read_data=arch_config).return_value, # ARCH_CONFIG_FILE
|
||||
mock.mock_open(
|
||||
read_data=arch_config_schema
|
||||
).return_value, # ARCH_CONFIG_SCHEMA_FILE
|
||||
mock.mock_open(read_data=arch_config).return_value, # ARCH_CONFIG_FILE
|
||||
mock.mock_open(
|
||||
read_data=arch_config_schema
|
||||
).return_value, # ARCH_CONFIG_SCHEMA_FILE
|
||||
mock.mock_open().return_value, # ENVOY_CONFIG_FILE_RENDERED (write)
|
||||
mock.mock_open().return_value, # ARCH_CONFIG_FILE_RENDERED (write)
|
||||
]
|
||||
with mock.patch("builtins.open", m_open):
|
||||
with mock.patch("config_generator.Environment"):
|
||||
validate_and_render_schema()
|
||||
|
||||
|
||||
arch_config_test_cases = [
|
||||
{
|
||||
"id": "duplicate_provider_name",
|
||||
"expected_error": "Duplicate llm_provider name",
|
||||
"expected_error": "Duplicate model_provider name",
|
||||
"arch_config": """
|
||||
version: v0.1.0
|
||||
|
||||
|
|
@ -270,3 +349,126 @@ def test_validate_and_render_schema_tests(monkeypatch, arch_config_test_case):
|
|||
with pytest.raises(Exception) as excinfo:
|
||||
validate_and_render_schema()
|
||||
assert expected_error in str(excinfo.value)
|
||||
|
||||
|
||||
def test_convert_legacy_llm_providers():
|
||||
from cli.utils import convert_legacy_listeners
|
||||
|
||||
listeners = {
|
||||
"ingress_traffic": {
|
||||
"address": "0.0.0.0",
|
||||
"port": 10000,
|
||||
"timeout": "30s",
|
||||
},
|
||||
"egress_traffic": {
|
||||
"address": "0.0.0.0",
|
||||
"port": 12000,
|
||||
"timeout": "30s",
|
||||
},
|
||||
}
|
||||
llm_providers = [
|
||||
{
|
||||
"model": "openai/gpt-4o",
|
||||
"access_key": "test_key",
|
||||
}
|
||||
]
|
||||
|
||||
updated_providers, llm_gateway, prompt_gateway = convert_legacy_listeners(
|
||||
listeners, llm_providers
|
||||
)
|
||||
assert isinstance(updated_providers, list)
|
||||
assert llm_gateway is not None
|
||||
assert prompt_gateway is not None
|
||||
print(json.dumps(updated_providers))
|
||||
assert updated_providers == [
|
||||
{
|
||||
"name": "egress_traffic",
|
||||
"type": "model_listener",
|
||||
"port": 12000,
|
||||
"address": "0.0.0.0",
|
||||
"timeout": "30s",
|
||||
"model_providers": [{"model": "openai/gpt-4o", "access_key": "test_key"}],
|
||||
},
|
||||
{
|
||||
"name": "ingress_traffic",
|
||||
"type": "prompt_listener",
|
||||
"port": 10000,
|
||||
"address": "0.0.0.0",
|
||||
"timeout": "30s",
|
||||
},
|
||||
]
|
||||
|
||||
assert llm_gateway == {
|
||||
"address": "0.0.0.0",
|
||||
"model_providers": [
|
||||
{
|
||||
"access_key": "test_key",
|
||||
"model": "openai/gpt-4o",
|
||||
},
|
||||
],
|
||||
"name": "egress_traffic",
|
||||
"type": "model_listener",
|
||||
"port": 12000,
|
||||
"timeout": "30s",
|
||||
}
|
||||
|
||||
assert prompt_gateway == {
|
||||
"address": "0.0.0.0",
|
||||
"name": "ingress_traffic",
|
||||
"port": 10000,
|
||||
"timeout": "30s",
|
||||
"type": "prompt_listener",
|
||||
}
|
||||
|
||||
|
||||
def test_convert_legacy_llm_providers_no_prompt_gateway():
|
||||
from cli.utils import convert_legacy_listeners
|
||||
|
||||
listeners = {
|
||||
"egress_traffic": {
|
||||
"address": "0.0.0.0",
|
||||
"port": 12000,
|
||||
"timeout": "30s",
|
||||
}
|
||||
}
|
||||
llm_providers = [
|
||||
{
|
||||
"model": "openai/gpt-4o",
|
||||
"access_key": "test_key",
|
||||
}
|
||||
]
|
||||
|
||||
updated_providers, llm_gateway, prompt_gateway = convert_legacy_listeners(
|
||||
listeners, llm_providers
|
||||
)
|
||||
assert isinstance(updated_providers, list)
|
||||
assert llm_gateway is not None
|
||||
assert prompt_gateway is not None
|
||||
assert updated_providers == [
|
||||
{
|
||||
"address": "0.0.0.0",
|
||||
"model_providers": [
|
||||
{
|
||||
"access_key": "test_key",
|
||||
"model": "openai/gpt-4o",
|
||||
},
|
||||
],
|
||||
"name": "egress_traffic",
|
||||
"port": 12000,
|
||||
"timeout": "30s",
|
||||
"type": "model_listener",
|
||||
}
|
||||
]
|
||||
assert llm_gateway == {
|
||||
"address": "0.0.0.0",
|
||||
"model_providers": [
|
||||
{
|
||||
"access_key": "test_key",
|
||||
"model": "openai/gpt-4o",
|
||||
},
|
||||
],
|
||||
"name": "egress_traffic",
|
||||
"type": "model_listener",
|
||||
"port": 12000,
|
||||
"timeout": "30s",
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue