release/v1.4 -> master (#548)

This commit is contained in:
cybermaggedon 2025-10-06 17:54:26 +01:00 committed by GitHub
parent 3ec2cd54f9
commit 2bd68ed7f4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
94 changed files with 8571 additions and 1740 deletions

View file

@ -0,0 +1,264 @@
"""
Unit tests for LLM Service Parameter Specifications
Testing the new parameter-aware functionality added to the LLM base service
"""
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from unittest import IsolatedAsyncioTestCase
from trustgraph.base.llm_service import LlmService, LlmResult
from trustgraph.base import ParameterSpec, ConsumerSpec, ProducerSpec
from trustgraph.schema import TextCompletionRequest, TextCompletionResponse
class MockAsyncProcessor:
def __init__(self, **params):
self.config_handlers = []
self.id = params.get('id', 'test-service')
self.specifications = []
class TestLlmServiceParameters(IsolatedAsyncioTestCase):
"""Test LLM service parameter specification functionality"""
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
def test_parameter_specs_registration(self):
"""Test that LLM service registers model and temperature parameter specs"""
# Arrange
config = {
'id': 'test-llm-service',
'concurrency': 1,
'taskgroup': AsyncMock() # Add required taskgroup
}
# Act
service = LlmService(**config)
# Assert
param_specs = {spec.name: spec for spec in service.specifications
if isinstance(spec, ParameterSpec)}
assert "model" in param_specs
assert "temperature" in param_specs
assert len(param_specs) >= 2 # May have other parameter specs
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
def test_model_parameter_spec_properties(self):
"""Test that model parameter spec has correct properties"""
# Arrange
config = {
'id': 'test-llm-service',
'concurrency': 1,
'taskgroup': AsyncMock()
}
# Act
service = LlmService(**config)
# Assert
model_spec = None
for spec in service.specifications:
if isinstance(spec, ParameterSpec) and spec.name == "model":
model_spec = spec
break
assert model_spec is not None
assert model_spec.name == "model"
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
def test_temperature_parameter_spec_properties(self):
"""Test that temperature parameter spec has correct properties"""
# Arrange
config = {
'id': 'test-llm-service',
'concurrency': 1,
'taskgroup': AsyncMock()
}
# Act
service = LlmService(**config)
# Assert
temperature_spec = None
for spec in service.specifications:
if isinstance(spec, ParameterSpec) and spec.name == "temperature":
temperature_spec = spec
break
assert temperature_spec is not None
assert temperature_spec.name == "temperature"
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
async def test_on_request_extracts_parameters_from_flow(self):
"""Test that on_request method extracts model and temperature from flow"""
# Arrange
config = {
'id': 'test-llm-service',
'concurrency': 1,
'taskgroup': AsyncMock()
}
service = LlmService(**config)
# Mock the metrics
service.text_completion_model_metric = MagicMock()
service.text_completion_model_metric.labels.return_value.info = AsyncMock()
# Mock the generate_content method to capture parameters
service.generate_content = AsyncMock(return_value=LlmResult(
text="test response",
in_token=10,
out_token=5,
model="gpt-4"
))
# Mock message and flow
mock_message = MagicMock()
mock_message.value.return_value = MagicMock()
mock_message.value.return_value.system = "system prompt"
mock_message.value.return_value.prompt = "user prompt"
mock_message.properties.return_value = {"id": "test-id"}
mock_consumer = MagicMock()
mock_consumer.name = "request"
mock_flow = MagicMock()
mock_flow.name = "test-flow"
mock_flow.return_value = "test-model" # flow("model") returns this
mock_flow.side_effect = lambda param: {
"model": "gpt-4",
"temperature": 0.7
}.get(param, f"mock-{param}")
mock_producer = AsyncMock()
mock_flow.producer = {"response": mock_producer}
# Act
await service.on_request(mock_message, mock_consumer, mock_flow)
# Assert
# Verify that generate_content was called with parameters from flow
service.generate_content.assert_called_once()
call_args = service.generate_content.call_args
assert call_args[0][0] == "system prompt" # system
assert call_args[0][1] == "user prompt" # prompt
assert call_args[0][2] == "gpt-4" # model
assert call_args[0][3] == 0.7 # temperature
# Verify flow was queried for both parameters
mock_flow.assert_any_call("model")
mock_flow.assert_any_call("temperature")
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
async def test_on_request_handles_missing_parameters_gracefully(self):
"""Test that on_request handles missing parameters gracefully"""
# Arrange
config = {
'id': 'test-llm-service',
'concurrency': 1,
'taskgroup': AsyncMock()
}
service = LlmService(**config)
# Mock the metrics
service.text_completion_model_metric = MagicMock()
service.text_completion_model_metric.labels.return_value.info = AsyncMock()
# Mock the generate_content method
service.generate_content = AsyncMock(return_value=LlmResult(
text="test response",
in_token=10,
out_token=5,
model="default-model"
))
# Mock message and flow where flow returns None for parameters
mock_message = MagicMock()
mock_message.value.return_value = MagicMock()
mock_message.value.return_value.system = "system prompt"
mock_message.value.return_value.prompt = "user prompt"
mock_message.properties.return_value = {"id": "test-id"}
mock_consumer = MagicMock()
mock_consumer.name = "request"
mock_flow = MagicMock()
mock_flow.name = "test-flow"
mock_flow.return_value = None # Both parameters return None
mock_producer = AsyncMock()
mock_flow.producer = {"response": mock_producer}
# Act
await service.on_request(mock_message, mock_consumer, mock_flow)
# Assert
# Should still call generate_content, with None values that will use processor defaults
service.generate_content.assert_called_once()
call_args = service.generate_content.call_args
assert call_args[0][0] == "system prompt" # system
assert call_args[0][1] == "user prompt" # prompt
assert call_args[0][2] is None # model (will use processor default)
assert call_args[0][3] is None # temperature (will use processor default)
@patch('trustgraph.base.async_processor.AsyncProcessor', MockAsyncProcessor)
async def test_on_request_error_handling_preserves_behavior(self):
"""Test that parameter extraction doesn't break existing error handling"""
# Arrange
config = {
'id': 'test-llm-service',
'concurrency': 1,
'taskgroup': AsyncMock()
}
service = LlmService(**config)
# Mock the metrics
service.text_completion_model_metric = MagicMock()
service.text_completion_model_metric.labels.return_value.info = AsyncMock()
# Mock generate_content to raise an exception
service.generate_content = AsyncMock(side_effect=Exception("Test error"))
# Mock message and flow
mock_message = MagicMock()
mock_message.value.return_value = MagicMock()
mock_message.value.return_value.system = "system prompt"
mock_message.value.return_value.prompt = "user prompt"
mock_message.properties.return_value = {"id": "test-id"}
mock_consumer = MagicMock()
mock_consumer.name = "request"
mock_flow = MagicMock()
mock_flow.name = "test-flow"
mock_flow.side_effect = lambda param: {
"model": "gpt-4",
"temperature": 0.7
}.get(param, f"mock-{param}")
mock_producer = AsyncMock()
mock_flow.producer = {"response": mock_producer}
# Act
await service.on_request(mock_message, mock_consumer, mock_flow)
# Assert
# Should have sent error response
mock_producer.send.assert_called_once()
error_response = mock_producer.send.call_args[0][0]
assert error_response.error is not None
assert error_response.error.type == "llm-error"
assert "Test error" in error_response.error.message
assert error_response.response is None
if __name__ == '__main__':
pytest.main([__file__])