mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-28 18:06:21 +02:00
* Bump setup.py versions for 1.1 * PoC MCP server (#419) * Very initial MCP server PoC for TrustGraph * Put service on port 8000 * Add MCP container and packages to buildout * Update docs for API/CLI changes in 1.0 (#421) * Update some API basics for the 0.23/1.0 API change * Add MCP container push (#425) * Add command args to the MCP server (#426) * Host and port parameters * Added websocket arg * More docs * MCP client support (#427) - MCP client service - Tool request/response schema - API gateway support for mcp-tool - Message translation for tool request & response - Make mcp-tool using configuration service for information about where the MCP services are. * Feature/react call mcp (#428) Key Features - MCP Tool Integration: Added core MCP tool support with ToolClientSpec and ToolClient classes - API Enhancement: New mcp_tool method for flow-specific tool invocation - CLI Tooling: New tg-invoke-mcp-tool command for testing MCP integration - React Agent Enhancement: Fixed and improved multi-tool invocation capabilities - Tool Management: Enhanced CLI for tool configuration and management Changes - Added MCP tool invocation to API with flow-specific integration - Implemented ToolClientSpec and ToolClient for tool call handling - Updated agent-manager-react to invoke MCP tools with configurable types - Enhanced CLI with new commands and improved help text - Added comprehensive documentation for new CLI commands - Improved tool configuration management Testing - Added tg-invoke-mcp-tool CLI command for isolated MCP integration testing - Enhanced agent capability to invoke multiple tools simultaneously * Test suite executed from CI pipeline (#433) * Test strategy & test cases * Unit tests * Integration tests * Extending test coverage (#434) * Contract tests * Testing embeedings * Agent unit tests * Knowledge pipeline tests * Turn on contract tests * Increase storage test coverage (#435) * Fixing storage and adding tests * PR pipeline only runs quick tests * Empty configuration is returned as empty list, previously was not in response (#436) * Update config util to take files as well as command-line text (#437) * Updated CLI invocation and config model for tools and mcp (#438) * Updated CLI invocation and config model for tools and mcp * CLI anomalies * Tweaked the MCP tool implementation for new model * Update agent implementation to match the new model * Fix agent tools, now all tested * Fixed integration tests * Fix MCP delete tool params * Update Python deps to 1.2 * Update to enable knowledge extraction using the agent framework (#439) * Implement KG extraction agent (kg-extract-agent) * Using ReAct framework (agent-manager-react) * ReAct manager had an issue when emitting JSON, which conflicts which ReAct manager's own JSON messages, so refactored ReAct manager to use traditional ReAct messages, non-JSON structure. * Minor refactor to take the prompt template client out of prompt-template so it can be more readily used by other modules. kg-extract-agent uses this framework. * Migrate from setup.py to pyproject.toml (#440) * Converted setup.py to pyproject.toml * Modern package infrastructure as recommended by py docs * Install missing build deps (#441) * Install missing build deps (#442) * Implement logging strategy (#444) * Logging strategy and convert all prints() to logging invocations * Fix/startup failure (#445) * Fix loggin startup problems * Fix logging startup problems (#446) * Fix logging startup problems (#447) * Fixed Mistral OCR to use current API (#448) * Fixed Mistral OCR to use current API * Added PDF decoder tests * Fix Mistral OCR ident to be standard pdf-decoder (#450) * Fix Mistral OCR ident to be standard pdf-decoder * Correct test * Schema structure refactor (#451) * Write schema refactor spec * Implemented schema refactor spec * Structure data mvp (#452) * Structured data tech spec * Architecture principles * New schemas * Updated schemas and specs * Object extractor * Add .coveragerc * New tests * Cassandra object storage * Trying to object extraction working, issues exist * Validate librarian collection (#453) * Fix token chunker, broken API invocation (#454) * Fix token chunker, broken API invocation (#455) * Knowledge load utility CLI (#456) * Knowledge loader * More tests
170 lines
5 KiB
Python
170 lines
5 KiB
Python
|
|
"""
|
|
Simple LLM service, performs text prompt completion using GoogleAIStudio.
|
|
Input is prompt, output is response.
|
|
"""
|
|
|
|
#
|
|
# Using this SDK:
|
|
# https://googleapis.github.io/python-genai/genai.html#module-genai.client
|
|
#
|
|
# Seems to have simpler dependencies on the 'VertexAI' service, which
|
|
# TrustGraph implements in the trustgraph-vertexai package.
|
|
#
|
|
|
|
from google import genai
|
|
from google.genai import types
|
|
from google.genai.types import HarmCategory, HarmBlockThreshold
|
|
from google.api_core.exceptions import ResourceExhausted
|
|
import os
|
|
import logging
|
|
|
|
# Module logger
|
|
logger = logging.getLogger(__name__)
|
|
|
|
from .... exceptions import TooManyRequests
|
|
from .... base import LlmService, LlmResult
|
|
|
|
default_ident = "text-completion"
|
|
|
|
default_model = 'gemini-2.0-flash-001'
|
|
default_temperature = 0.0
|
|
default_max_output = 8192
|
|
default_api_key = os.getenv("GOOGLE_AI_STUDIO_KEY")
|
|
|
|
class Processor(LlmService):
|
|
|
|
def __init__(self, **params):
|
|
|
|
model = params.get("model", default_model)
|
|
api_key = params.get("api_key", default_api_key)
|
|
temperature = params.get("temperature", default_temperature)
|
|
max_output = params.get("max_output", default_max_output)
|
|
|
|
if api_key is None:
|
|
raise RuntimeError("Google AI Studio API key not specified")
|
|
|
|
super(Processor, self).__init__(
|
|
**params | {
|
|
"model": model,
|
|
"temperature": temperature,
|
|
"max_output": max_output,
|
|
}
|
|
)
|
|
|
|
self.client = genai.Client(api_key=api_key)
|
|
self.model = model
|
|
self.temperature = temperature
|
|
self.max_output = max_output
|
|
|
|
block_level = HarmBlockThreshold.BLOCK_ONLY_HIGH
|
|
|
|
self.safety_settings = [
|
|
types.SafetySetting(
|
|
category = HarmCategory.HARM_CATEGORY_HATE_SPEECH,
|
|
threshold = block_level,
|
|
),
|
|
types.SafetySetting(
|
|
category = HarmCategory.HARM_CATEGORY_HARASSMENT,
|
|
threshold = block_level,
|
|
),
|
|
types.SafetySetting(
|
|
category = HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
|
|
threshold = block_level,
|
|
),
|
|
types.SafetySetting(
|
|
category = HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
|
|
threshold = block_level,
|
|
),
|
|
# There is a documentation conflict on whether or not
|
|
# CIVIC_INTEGRITY is a valid category
|
|
# HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY: block_level,
|
|
]
|
|
|
|
logger.info("GoogleAIStudio LLM service initialized")
|
|
|
|
async def generate_content(self, system, prompt):
|
|
|
|
generation_config = types.GenerateContentConfig(
|
|
temperature = self.temperature,
|
|
top_p = 1,
|
|
top_k = 40,
|
|
max_output_tokens = self.max_output,
|
|
response_mime_type = "text/plain",
|
|
system_instruction = system,
|
|
safety_settings = self.safety_settings,
|
|
)
|
|
|
|
try:
|
|
|
|
response = self.client.models.generate_content(
|
|
model=self.model,
|
|
config=generation_config,
|
|
contents=prompt,
|
|
)
|
|
|
|
resp = response.text
|
|
inputtokens = int(response.usage_metadata.prompt_token_count)
|
|
outputtokens = int(response.usage_metadata.candidates_token_count)
|
|
logger.debug(f"LLM response: {resp}")
|
|
logger.info(f"Input Tokens: {inputtokens}")
|
|
logger.info(f"Output Tokens: {outputtokens}")
|
|
|
|
resp = LlmResult(
|
|
text = resp,
|
|
in_token = inputtokens,
|
|
out_token = outputtokens,
|
|
model = self.model
|
|
)
|
|
|
|
return resp
|
|
|
|
except ResourceExhausted as e:
|
|
|
|
logger.warning("Rate limit exceeded")
|
|
|
|
# Leave rate limit retries to the default handler
|
|
raise TooManyRequests()
|
|
|
|
except Exception as e:
|
|
|
|
# Apart from rate limits, treat all exceptions as unrecoverable
|
|
|
|
logger.error(f"GoogleAIStudio LLM exception ({type(e).__name__}): {e}", exc_info=True)
|
|
raise e
|
|
|
|
@staticmethod
|
|
def add_args(parser):
|
|
|
|
LlmService.add_args(parser)
|
|
|
|
parser.add_argument(
|
|
'-m', '--model',
|
|
default=default_model,
|
|
help=f'LLM model (default: {default_model})'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'-k', '--api-key',
|
|
default=default_api_key,
|
|
help=f'GoogleAIStudio API key'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'-t', '--temperature',
|
|
type=float,
|
|
default=default_temperature,
|
|
help=f'LLM temperature parameter (default: {default_temperature})'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'-x', '--max-output',
|
|
type=int,
|
|
default=default_max_output,
|
|
help=f'LLM max output tokens (default: {default_max_output})'
|
|
)
|
|
|
|
def run():
|
|
|
|
Processor.launch(default_ident, __doc__)
|
|
|