trustgraph/tests/integration/test_template_service_integration.py

205 lines
7.7 KiB
Python
Raw Normal View History

Release/v1.2 (#457) * Bump setup.py versions for 1.1 * PoC MCP server (#419) * Very initial MCP server PoC for TrustGraph * Put service on port 8000 * Add MCP container and packages to buildout * Update docs for API/CLI changes in 1.0 (#421) * Update some API basics for the 0.23/1.0 API change * Add MCP container push (#425) * Add command args to the MCP server (#426) * Host and port parameters * Added websocket arg * More docs * MCP client support (#427) - MCP client service - Tool request/response schema - API gateway support for mcp-tool - Message translation for tool request & response - Make mcp-tool using configuration service for information about where the MCP services are. * Feature/react call mcp (#428) Key Features - MCP Tool Integration: Added core MCP tool support with ToolClientSpec and ToolClient classes - API Enhancement: New mcp_tool method for flow-specific tool invocation - CLI Tooling: New tg-invoke-mcp-tool command for testing MCP integration - React Agent Enhancement: Fixed and improved multi-tool invocation capabilities - Tool Management: Enhanced CLI for tool configuration and management Changes - Added MCP tool invocation to API with flow-specific integration - Implemented ToolClientSpec and ToolClient for tool call handling - Updated agent-manager-react to invoke MCP tools with configurable types - Enhanced CLI with new commands and improved help text - Added comprehensive documentation for new CLI commands - Improved tool configuration management Testing - Added tg-invoke-mcp-tool CLI command for isolated MCP integration testing - Enhanced agent capability to invoke multiple tools simultaneously * Test suite executed from CI pipeline (#433) * Test strategy & test cases * Unit tests * Integration tests * Extending test coverage (#434) * Contract tests * Testing embeedings * Agent unit tests * Knowledge pipeline tests * Turn on contract tests * Increase storage test coverage (#435) * Fixing storage and adding tests * PR pipeline only runs quick tests * Empty configuration is returned as empty list, previously was not in response (#436) * Update config util to take files as well as command-line text (#437) * Updated CLI invocation and config model for tools and mcp (#438) * Updated CLI invocation and config model for tools and mcp * CLI anomalies * Tweaked the MCP tool implementation for new model * Update agent implementation to match the new model * Fix agent tools, now all tested * Fixed integration tests * Fix MCP delete tool params * Update Python deps to 1.2 * Update to enable knowledge extraction using the agent framework (#439) * Implement KG extraction agent (kg-extract-agent) * Using ReAct framework (agent-manager-react) * ReAct manager had an issue when emitting JSON, which conflicts which ReAct manager's own JSON messages, so refactored ReAct manager to use traditional ReAct messages, non-JSON structure. * Minor refactor to take the prompt template client out of prompt-template so it can be more readily used by other modules. kg-extract-agent uses this framework. * Migrate from setup.py to pyproject.toml (#440) * Converted setup.py to pyproject.toml * Modern package infrastructure as recommended by py docs * Install missing build deps (#441) * Install missing build deps (#442) * Implement logging strategy (#444) * Logging strategy and convert all prints() to logging invocations * Fix/startup failure (#445) * Fix loggin startup problems * Fix logging startup problems (#446) * Fix logging startup problems (#447) * Fixed Mistral OCR to use current API (#448) * Fixed Mistral OCR to use current API * Added PDF decoder tests * Fix Mistral OCR ident to be standard pdf-decoder (#450) * Fix Mistral OCR ident to be standard pdf-decoder * Correct test * Schema structure refactor (#451) * Write schema refactor spec * Implemented schema refactor spec * Structure data mvp (#452) * Structured data tech spec * Architecture principles * New schemas * Updated schemas and specs * Object extractor * Add .coveragerc * New tests * Cassandra object storage * Trying to object extraction working, issues exist * Validate librarian collection (#453) * Fix token chunker, broken API invocation (#454) * Fix token chunker, broken API invocation (#455) * Knowledge load utility CLI (#456) * Knowledge loader * More tests
2025-08-18 20:56:09 +01:00
"""
Simplified integration tests for Template Service
These tests verify the basic functionality of the template service
without the full message queue infrastructure.
"""
import pytest
import json
from unittest.mock import AsyncMock, MagicMock
from trustgraph.schema import PromptRequest, PromptResponse
from trustgraph.template.prompt_manager import PromptManager
@pytest.mark.integration
class TestTemplateServiceSimple:
"""Simplified integration tests for Template Service components"""
@pytest.fixture
def sample_config(self):
"""Sample configuration for testing"""
return {
"system": json.dumps("You are a helpful assistant."),
"template-index": json.dumps(["greeting", "json_test"]),
"template.greeting": json.dumps({
"prompt": "Hello {{ name }}, welcome to {{ system_name }}!",
"response-type": "text"
}),
"template.json_test": json.dumps({
"prompt": "Generate profile for {{ username }}",
"response-type": "json",
"schema": {
"type": "object",
"properties": {
"name": {"type": "string"},
"role": {"type": "string"}
},
"required": ["name", "role"]
}
})
}
@pytest.fixture
def prompt_manager(self, sample_config):
"""Create a configured PromptManager"""
pm = PromptManager()
pm.load_config(sample_config)
pm.terms["system_name"] = "TrustGraph"
return pm
@pytest.mark.asyncio
async def test_prompt_manager_text_invocation(self, prompt_manager):
"""Test PromptManager text response invocation"""
# Mock LLM function
async def mock_llm(system, prompt):
assert system == "You are a helpful assistant."
assert "Hello Alice, welcome to TrustGraph!" in prompt
return "Welcome message processed!"
result = await prompt_manager.invoke("greeting", {"name": "Alice"}, mock_llm)
assert result == "Welcome message processed!"
@pytest.mark.asyncio
async def test_prompt_manager_json_invocation(self, prompt_manager):
"""Test PromptManager JSON response invocation"""
# Mock LLM function
async def mock_llm(system, prompt):
assert "Generate profile for johndoe" in prompt
return '{"name": "John Doe", "role": "user"}'
result = await prompt_manager.invoke("json_test", {"username": "johndoe"}, mock_llm)
assert isinstance(result, dict)
assert result["name"] == "John Doe"
assert result["role"] == "user"
@pytest.mark.asyncio
async def test_prompt_manager_json_validation_error(self, prompt_manager):
"""Test JSON schema validation failure"""
# Mock LLM function that returns invalid JSON
async def mock_llm(system, prompt):
return '{"name": "John Doe"}' # Missing required "role"
with pytest.raises(RuntimeError) as exc_info:
await prompt_manager.invoke("json_test", {"username": "johndoe"}, mock_llm)
assert "Schema validation fail" in str(exc_info.value)
@pytest.mark.asyncio
async def test_prompt_manager_json_parse_error(self, prompt_manager):
"""Test JSON parsing failure"""
# Mock LLM function that returns non-JSON
async def mock_llm(system, prompt):
return "This is not JSON at all"
with pytest.raises(RuntimeError) as exc_info:
await prompt_manager.invoke("json_test", {"username": "johndoe"}, mock_llm)
assert "JSON parse fail" in str(exc_info.value)
@pytest.mark.asyncio
async def test_prompt_manager_unknown_prompt(self, prompt_manager):
"""Test unknown prompt ID handling"""
async def mock_llm(system, prompt):
return "Response"
with pytest.raises(KeyError):
await prompt_manager.invoke("unknown_prompt", {}, mock_llm)
@pytest.mark.asyncio
async def test_prompt_manager_term_merging(self, prompt_manager):
"""Test proper term merging (global + prompt + input)"""
# Add prompt-specific terms
prompt_manager.prompts["greeting"].terms = {"greeting_prefix": "Hi"}
async def mock_llm(system, prompt):
# Should have global term (system_name), input term (name), and any prompt terms
assert "TrustGraph" in prompt # Global term
assert "Bob" in prompt # Input term
return "Merged correctly"
result = await prompt_manager.invoke("greeting", {"name": "Bob"}, mock_llm)
assert result == "Merged correctly"
def test_prompt_manager_template_rendering(self, prompt_manager):
"""Test direct template rendering"""
result = prompt_manager.render("greeting", {"name": "Charlie"})
assert "Hello Charlie, welcome to TrustGraph!" == result.strip()
def test_prompt_manager_configuration_loading(self):
"""Test configuration loading with various formats"""
pm = PromptManager()
# Test empty configuration
pm.load_config({})
assert pm.config.system_template == "Be helpful."
assert len(pm.prompts) == 0
# Test configuration with single prompt
config = {
"system": json.dumps("Test system"),
"template-index": json.dumps(["test"]),
"template.test": json.dumps({
"prompt": "Test {{ value }}",
"response-type": "text"
})
}
pm.load_config(config)
assert pm.config.system_template == "Test system"
assert "test" in pm.prompts
assert pm.prompts["test"].response_type == "text"
@pytest.mark.asyncio
async def test_prompt_manager_json_with_markdown(self, prompt_manager):
"""Test JSON extraction from markdown code blocks"""
async def mock_llm(system, prompt):
return '''
Here's the profile:
```json
{"name": "Jane Smith", "role": "admin"}
```
'''
result = await prompt_manager.invoke("json_test", {"username": "jane"}, mock_llm)
assert isinstance(result, dict)
assert result["name"] == "Jane Smith"
assert result["role"] == "admin"
def test_prompt_manager_error_handling_in_templates(self, prompt_manager):
"""Test error handling in template rendering"""
# Test with missing variable - ibis might handle this differently than Jinja2
try:
result = prompt_manager.render("greeting", {}) # Missing 'name'
# If no exception, check that result is still a string
assert isinstance(result, str)
except Exception as e:
# If exception is raised, that's also acceptable
assert "name" in str(e) or "undefined" in str(e).lower() or "variable" in str(e).lower()
@pytest.mark.asyncio
async def test_concurrent_prompt_invocations(self, prompt_manager):
"""Test concurrent invocations"""
async def mock_llm(system, prompt):
# Extract name from prompt for response
if "Alice" in prompt:
return "Alice response"
elif "Bob" in prompt:
return "Bob response"
else:
return "Default response"
# Run concurrent invocations
import asyncio
results = await asyncio.gather(
prompt_manager.invoke("greeting", {"name": "Alice"}, mock_llm),
prompt_manager.invoke("greeting", {"name": "Bob"}, mock_llm),
)
assert "Alice response" in results
assert "Bob response" in results