mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-25 08:26:21 +02:00
release/v1.4 -> master (#548)
This commit is contained in:
parent
3ec2cd54f9
commit
2bd68ed7f4
94 changed files with 8571 additions and 1740 deletions
238
tests/unit/test_base/test_flow_parameter_specs.py
Normal file
238
tests/unit/test_base/test_flow_parameter_specs.py
Normal file
|
|
@ -0,0 +1,238 @@
|
|||
"""
|
||||
Unit tests for Flow Parameter Specification functionality
|
||||
Testing parameter specification registration and handling in flow processors
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from unittest import IsolatedAsyncioTestCase
|
||||
|
||||
from trustgraph.base.flow_processor import FlowProcessor
|
||||
from trustgraph.base import ParameterSpec, ConsumerSpec, ProducerSpec
|
||||
|
||||
|
||||
class MockAsyncProcessor:
|
||||
def __init__(self, **params):
|
||||
self.config_handlers = []
|
||||
self.id = params.get('id', 'test-service')
|
||||
self.specifications = []
|
||||
|
||||
|
||||
class TestFlowParameterSpecs(IsolatedAsyncioTestCase):
|
||||
"""Test flow processor parameter specification functionality"""
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
def test_parameter_spec_registration(self):
|
||||
"""Test that parameter specs can be registered with flow processors"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-flow-processor',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
processor = FlowProcessor(**config)
|
||||
|
||||
# Create test parameter specs
|
||||
model_spec = ParameterSpec(name="model")
|
||||
temperature_spec = ParameterSpec(name="temperature")
|
||||
|
||||
# Act
|
||||
processor.register_specification(model_spec)
|
||||
processor.register_specification(temperature_spec)
|
||||
|
||||
# Assert
|
||||
assert len(processor.specifications) >= 2
|
||||
|
||||
param_specs = [spec for spec in processor.specifications
|
||||
if isinstance(spec, ParameterSpec)]
|
||||
assert len(param_specs) >= 2
|
||||
|
||||
param_names = [spec.name for spec in param_specs]
|
||||
assert "model" in param_names
|
||||
assert "temperature" in param_names
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
def test_mixed_specification_types(self):
|
||||
"""Test registration of mixed specification types (parameters, consumers, producers)"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-flow-processor',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
processor = FlowProcessor(**config)
|
||||
|
||||
# Create different spec types
|
||||
param_spec = ParameterSpec(name="model")
|
||||
consumer_spec = ConsumerSpec(name="input", schema=MagicMock(), handler=MagicMock())
|
||||
producer_spec = ProducerSpec(name="output", schema=MagicMock())
|
||||
|
||||
# Act
|
||||
processor.register_specification(param_spec)
|
||||
processor.register_specification(consumer_spec)
|
||||
processor.register_specification(producer_spec)
|
||||
|
||||
# Assert
|
||||
assert len(processor.specifications) == 3
|
||||
|
||||
# Count each type
|
||||
param_specs = [s for s in processor.specifications if isinstance(s, ParameterSpec)]
|
||||
consumer_specs = [s for s in processor.specifications if isinstance(s, ConsumerSpec)]
|
||||
producer_specs = [s for s in processor.specifications if isinstance(s, ProducerSpec)]
|
||||
|
||||
assert len(param_specs) == 1
|
||||
assert len(consumer_specs) == 1
|
||||
assert len(producer_specs) == 1
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
def test_parameter_spec_metadata(self):
|
||||
"""Test parameter specification metadata handling"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-flow-processor',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
processor = FlowProcessor(**config)
|
||||
|
||||
# Create parameter specs with metadata (if supported)
|
||||
model_spec = ParameterSpec(name="model")
|
||||
temperature_spec = ParameterSpec(name="temperature")
|
||||
|
||||
# Act
|
||||
processor.register_specification(model_spec)
|
||||
processor.register_specification(temperature_spec)
|
||||
|
||||
# Assert
|
||||
param_specs = [spec for spec in processor.specifications
|
||||
if isinstance(spec, ParameterSpec)]
|
||||
|
||||
model_spec_registered = next((s for s in param_specs if s.name == "model"), None)
|
||||
temperature_spec_registered = next((s for s in param_specs if s.name == "temperature"), None)
|
||||
|
||||
assert model_spec_registered is not None
|
||||
assert temperature_spec_registered is not None
|
||||
assert model_spec_registered.name == "model"
|
||||
assert temperature_spec_registered.name == "temperature"
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
def test_duplicate_parameter_spec_handling(self):
|
||||
"""Test handling of duplicate parameter spec registration"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-flow-processor',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
processor = FlowProcessor(**config)
|
||||
|
||||
# Create duplicate parameter specs
|
||||
model_spec1 = ParameterSpec(name="model")
|
||||
model_spec2 = ParameterSpec(name="model")
|
||||
|
||||
# Act
|
||||
processor.register_specification(model_spec1)
|
||||
processor.register_specification(model_spec2)
|
||||
|
||||
# Assert - Should allow duplicates (or handle appropriately)
|
||||
param_specs = [spec for spec in processor.specifications
|
||||
if isinstance(spec, ParameterSpec) and spec.name == "model"]
|
||||
|
||||
# Either should have 2 duplicates or the system should handle deduplication
|
||||
assert len(param_specs) >= 1 # At least one should be registered
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
@patch('trustgraph.base.flow_processor.Flow')
|
||||
async def test_parameter_specs_available_to_flows(self, mock_flow_class):
|
||||
"""Test that parameter specs are available when flows are created"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-flow-processor',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
processor = FlowProcessor(**config)
|
||||
processor.id = 'test-processor'
|
||||
|
||||
# Register parameter specs
|
||||
model_spec = ParameterSpec(name="model")
|
||||
temperature_spec = ParameterSpec(name="temperature")
|
||||
processor.register_specification(model_spec)
|
||||
processor.register_specification(temperature_spec)
|
||||
|
||||
mock_flow = AsyncMock()
|
||||
mock_flow_class.return_value = mock_flow
|
||||
|
||||
flow_name = 'test-flow'
|
||||
flow_defn = {'config': 'test-config'}
|
||||
|
||||
# Act
|
||||
await processor.start_flow(flow_name, flow_defn)
|
||||
|
||||
# Assert - Flow should be created with access to processor specifications
|
||||
mock_flow_class.assert_called_once_with('test-processor', flow_name, processor, flow_defn)
|
||||
|
||||
# The flow should have access to the processor's specifications
|
||||
# (The exact mechanism depends on Flow implementation)
|
||||
assert len(processor.specifications) >= 2
|
||||
|
||||
|
||||
class TestParameterSpecValidation(IsolatedAsyncioTestCase):
|
||||
"""Test parameter specification validation functionality"""
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
def test_parameter_spec_name_validation(self):
|
||||
"""Test parameter spec name validation"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-flow-processor',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
processor = FlowProcessor(**config)
|
||||
|
||||
# Act & Assert - Valid parameter names
|
||||
valid_specs = [
|
||||
ParameterSpec(name="model"),
|
||||
ParameterSpec(name="temperature"),
|
||||
ParameterSpec(name="max_tokens"),
|
||||
ParameterSpec(name="api_key")
|
||||
]
|
||||
|
||||
for spec in valid_specs:
|
||||
# Should not raise any exceptions
|
||||
processor.register_specification(spec)
|
||||
|
||||
assert len([s for s in processor.specifications if isinstance(s, ParameterSpec)]) >= 4
|
||||
|
||||
def test_parameter_spec_creation_validation(self):
|
||||
"""Test parameter spec creation with various inputs"""
|
||||
# Test valid parameter spec creation
|
||||
valid_specs = [
|
||||
ParameterSpec(name="model"),
|
||||
ParameterSpec(name="temperature"),
|
||||
ParameterSpec(name="max_output"),
|
||||
]
|
||||
|
||||
for spec in valid_specs:
|
||||
assert spec.name is not None
|
||||
assert isinstance(spec.name, str)
|
||||
|
||||
# Test edge cases (if parameter specs have validation)
|
||||
# This depends on the actual ParameterSpec implementation
|
||||
try:
|
||||
empty_name_spec = ParameterSpec(name="")
|
||||
# May or may not be valid depending on implementation
|
||||
except Exception:
|
||||
# If validation exists, it should catch invalid names
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pytest.main([__file__])
|
||||
264
tests/unit/test_base/test_llm_service_parameters.py
Normal file
264
tests/unit/test_base/test_llm_service_parameters.py
Normal file
|
|
@ -0,0 +1,264 @@
|
|||
"""
|
||||
Unit tests for LLM Service Parameter Specifications
|
||||
Testing the new parameter-aware functionality added to the LLM base service
|
||||
"""
|
||||
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
from unittest import IsolatedAsyncioTestCase
|
||||
|
||||
from trustgraph.base.llm_service import LlmService, LlmResult
|
||||
from trustgraph.base import ParameterSpec, ConsumerSpec, ProducerSpec
|
||||
from trustgraph.schema import TextCompletionRequest, TextCompletionResponse
|
||||
|
||||
|
||||
class MockAsyncProcessor:
|
||||
def __init__(self, **params):
|
||||
self.config_handlers = []
|
||||
self.id = params.get('id', 'test-service')
|
||||
self.specifications = []
|
||||
|
||||
|
||||
|
||||
|
||||
class TestLlmServiceParameters(IsolatedAsyncioTestCase):
|
||||
"""Test LLM service parameter specification functionality"""
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
def test_parameter_specs_registration(self):
|
||||
"""Test that LLM service registers model and temperature parameter specs"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-llm-service',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock() # Add required taskgroup
|
||||
}
|
||||
|
||||
# Act
|
||||
service = LlmService(**config)
|
||||
|
||||
# Assert
|
||||
param_specs = {spec.name: spec for spec in service.specifications
|
||||
if isinstance(spec, ParameterSpec)}
|
||||
|
||||
assert "model" in param_specs
|
||||
assert "temperature" in param_specs
|
||||
assert len(param_specs) >= 2 # May have other parameter specs
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
def test_model_parameter_spec_properties(self):
|
||||
"""Test that model parameter spec has correct properties"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-llm-service',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
# Act
|
||||
service = LlmService(**config)
|
||||
|
||||
# Assert
|
||||
model_spec = None
|
||||
for spec in service.specifications:
|
||||
if isinstance(spec, ParameterSpec) and spec.name == "model":
|
||||
model_spec = spec
|
||||
break
|
||||
|
||||
assert model_spec is not None
|
||||
assert model_spec.name == "model"
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
def test_temperature_parameter_spec_properties(self):
|
||||
"""Test that temperature parameter spec has correct properties"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-llm-service',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
# Act
|
||||
service = LlmService(**config)
|
||||
|
||||
# Assert
|
||||
temperature_spec = None
|
||||
for spec in service.specifications:
|
||||
if isinstance(spec, ParameterSpec) and spec.name == "temperature":
|
||||
temperature_spec = spec
|
||||
break
|
||||
|
||||
assert temperature_spec is not None
|
||||
assert temperature_spec.name == "temperature"
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
async def test_on_request_extracts_parameters_from_flow(self):
|
||||
"""Test that on_request method extracts model and temperature from flow"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-llm-service',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
service = LlmService(**config)
|
||||
|
||||
# Mock the metrics
|
||||
service.text_completion_model_metric = MagicMock()
|
||||
service.text_completion_model_metric.labels.return_value.info = AsyncMock()
|
||||
|
||||
# Mock the generate_content method to capture parameters
|
||||
service.generate_content = AsyncMock(return_value=LlmResult(
|
||||
text="test response",
|
||||
in_token=10,
|
||||
out_token=5,
|
||||
model="gpt-4"
|
||||
))
|
||||
|
||||
# Mock message and flow
|
||||
mock_message = MagicMock()
|
||||
mock_message.value.return_value = MagicMock()
|
||||
mock_message.value.return_value.system = "system prompt"
|
||||
mock_message.value.return_value.prompt = "user prompt"
|
||||
mock_message.properties.return_value = {"id": "test-id"}
|
||||
|
||||
mock_consumer = MagicMock()
|
||||
mock_consumer.name = "request"
|
||||
|
||||
mock_flow = MagicMock()
|
||||
mock_flow.name = "test-flow"
|
||||
mock_flow.return_value = "test-model" # flow("model") returns this
|
||||
mock_flow.side_effect = lambda param: {
|
||||
"model": "gpt-4",
|
||||
"temperature": 0.7
|
||||
}.get(param, f"mock-{param}")
|
||||
|
||||
mock_producer = AsyncMock()
|
||||
mock_flow.producer = {"response": mock_producer}
|
||||
|
||||
# Act
|
||||
await service.on_request(mock_message, mock_consumer, mock_flow)
|
||||
|
||||
# Assert
|
||||
# Verify that generate_content was called with parameters from flow
|
||||
service.generate_content.assert_called_once()
|
||||
call_args = service.generate_content.call_args
|
||||
|
||||
assert call_args[0][0] == "system prompt" # system
|
||||
assert call_args[0][1] == "user prompt" # prompt
|
||||
assert call_args[0][2] == "gpt-4" # model
|
||||
assert call_args[0][3] == 0.7 # temperature
|
||||
|
||||
# Verify flow was queried for both parameters
|
||||
mock_flow.assert_any_call("model")
|
||||
mock_flow.assert_any_call("temperature")
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
async def test_on_request_handles_missing_parameters_gracefully(self):
|
||||
"""Test that on_request handles missing parameters gracefully"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-llm-service',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
service = LlmService(**config)
|
||||
|
||||
# Mock the metrics
|
||||
service.text_completion_model_metric = MagicMock()
|
||||
service.text_completion_model_metric.labels.return_value.info = AsyncMock()
|
||||
|
||||
# Mock the generate_content method
|
||||
service.generate_content = AsyncMock(return_value=LlmResult(
|
||||
text="test response",
|
||||
in_token=10,
|
||||
out_token=5,
|
||||
model="default-model"
|
||||
))
|
||||
|
||||
# Mock message and flow where flow returns None for parameters
|
||||
mock_message = MagicMock()
|
||||
mock_message.value.return_value = MagicMock()
|
||||
mock_message.value.return_value.system = "system prompt"
|
||||
mock_message.value.return_value.prompt = "user prompt"
|
||||
mock_message.properties.return_value = {"id": "test-id"}
|
||||
|
||||
mock_consumer = MagicMock()
|
||||
mock_consumer.name = "request"
|
||||
|
||||
mock_flow = MagicMock()
|
||||
mock_flow.name = "test-flow"
|
||||
mock_flow.return_value = None # Both parameters return None
|
||||
|
||||
mock_producer = AsyncMock()
|
||||
mock_flow.producer = {"response": mock_producer}
|
||||
|
||||
# Act
|
||||
await service.on_request(mock_message, mock_consumer, mock_flow)
|
||||
|
||||
# Assert
|
||||
# Should still call generate_content, with None values that will use processor defaults
|
||||
service.generate_content.assert_called_once()
|
||||
call_args = service.generate_content.call_args
|
||||
|
||||
assert call_args[0][0] == "system prompt" # system
|
||||
assert call_args[0][1] == "user prompt" # prompt
|
||||
assert call_args[0][2] is None # model (will use processor default)
|
||||
assert call_args[0][3] is None # temperature (will use processor default)
|
||||
|
||||
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
|
||||
async def test_on_request_error_handling_preserves_behavior(self):
|
||||
"""Test that parameter extraction doesn't break existing error handling"""
|
||||
# Arrange
|
||||
config = {
|
||||
'id': 'test-llm-service',
|
||||
'concurrency': 1,
|
||||
'taskgroup': AsyncMock()
|
||||
}
|
||||
|
||||
service = LlmService(**config)
|
||||
|
||||
# Mock the metrics
|
||||
service.text_completion_model_metric = MagicMock()
|
||||
service.text_completion_model_metric.labels.return_value.info = AsyncMock()
|
||||
|
||||
# Mock generate_content to raise an exception
|
||||
service.generate_content = AsyncMock(side_effect=Exception("Test error"))
|
||||
|
||||
# Mock message and flow
|
||||
mock_message = MagicMock()
|
||||
mock_message.value.return_value = MagicMock()
|
||||
mock_message.value.return_value.system = "system prompt"
|
||||
mock_message.value.return_value.prompt = "user prompt"
|
||||
mock_message.properties.return_value = {"id": "test-id"}
|
||||
|
||||
mock_consumer = MagicMock()
|
||||
mock_consumer.name = "request"
|
||||
|
||||
mock_flow = MagicMock()
|
||||
mock_flow.name = "test-flow"
|
||||
mock_flow.side_effect = lambda param: {
|
||||
"model": "gpt-4",
|
||||
"temperature": 0.7
|
||||
}.get(param, f"mock-{param}")
|
||||
|
||||
mock_producer = AsyncMock()
|
||||
mock_flow.producer = {"response": mock_producer}
|
||||
|
||||
# Act
|
||||
await service.on_request(mock_message, mock_consumer, mock_flow)
|
||||
|
||||
# Assert
|
||||
# Should have sent error response
|
||||
mock_producer.send.assert_called_once()
|
||||
error_response = mock_producer.send.call_args[0][0]
|
||||
|
||||
assert error_response.error is not None
|
||||
assert error_response.error.type == "llm-error"
|
||||
assert "Test error" in error_response.error.message
|
||||
assert error_response.response is None
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pytest.main([__file__])
|
||||
Loading…
Add table
Add a link
Reference in a new issue