Merge 2.0 to master (#651)

This commit is contained in:
cybermaggedon 2026-02-28 11:03:14 +00:00 committed by GitHub
parent 3666ece2c5
commit b9d7bf9a8b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
212 changed files with 13940 additions and 6180 deletions

View file

@ -10,9 +10,10 @@ description = "TrustGraph provides a means to run a pipeline of flexible AI proc
readme = "README.md"
requires-python = ">=3.8"
dependencies = [
"trustgraph-base>=1.8,<1.9",
"trustgraph-base>=2.0,<2.1",
"pulsar-client",
"google-cloud-aiplatform",
"google-genai",
"google-api-core",
"prometheus-client",
"anthropic",
]
@ -25,6 +26,7 @@ classifiers = [
Homepage = "https://github.com/trustgraph-ai/trustgraph"
[project.scripts]
text-completion-googleaistudio = "trustgraph.model.text_completion.googleaistudio:run"
text-completion-vertexai = "trustgraph.model.text_completion.vertexai:run"
[tool.setuptools.packages.find]

View file

@ -0,0 +1,3 @@
from . llm import *

View file

@ -0,0 +1,7 @@
#!/usr/bin/env python3
from . llm import run
if __name__ == '__main__':
run()

View file

@ -0,0 +1,276 @@
"""
Simple LLM service, performs text prompt completion using GoogleAIStudio.
Input is prompt, output is response.
"""
#
# Using this SDK:
# https://googleapis.github.io/python-genai/genai.html#module-genai.client
#
# Seems to have simpler dependencies on the 'VertexAI' service, which
# TrustGraph implements in the trustgraph-vertexai package.
#
from google import genai
from google.genai import types
from google.genai.types import HarmCategory, HarmBlockThreshold
from google.genai.errors import ClientError
from google.api_core.exceptions import ResourceExhausted
import os
import logging
# Module logger
logger = logging.getLogger(__name__)
from .... exceptions import TooManyRequests
from .... base import LlmService, LlmResult, LlmChunk
default_ident = "text-completion"
default_model = 'gemini-2.0-flash-001'
default_temperature = 0.0
default_max_output = 8192
default_api_key = os.getenv("GOOGLE_AI_STUDIO_KEY")
class Processor(LlmService):
def __init__(self, **params):
model = params.get("model", default_model)
api_key = params.get("api_key", default_api_key)
temperature = params.get("temperature", default_temperature)
max_output = params.get("max_output", default_max_output)
if api_key is None:
raise RuntimeError("Google AI Studio API key not specified")
super(Processor, self).__init__(
**params | {
"model": model,
"temperature": temperature,
"max_output": max_output,
}
)
self.client = genai.Client(api_key=api_key, vertexai=False)
self.default_model = model
self.temperature = temperature
self.max_output = max_output
# Cache for generation configs per model
self.generation_configs = {}
block_level = HarmBlockThreshold.BLOCK_ONLY_HIGH
self.safety_settings = [
types.SafetySetting(
category = HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold = block_level,
),
types.SafetySetting(
category = HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold = block_level,
),
types.SafetySetting(
category = HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold = block_level,
),
types.SafetySetting(
category = HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold = block_level,
),
# There is a documentation conflict on whether or not
# CIVIC_INTEGRITY is a valid category
# HarmCategory.HARM_CATEGORY_CIVIC_INTEGRITY: block_level,
]
logger.info("GoogleAIStudio LLM service initialized")
def _get_or_create_config(self, model_name, temperature=None):
"""Get or create generation config with dynamic temperature"""
# Use provided temperature or fall back to default
effective_temperature = temperature if temperature is not None else self.temperature
# Create cache key that includes temperature to avoid conflicts
cache_key = f"{model_name}:{effective_temperature}"
if cache_key not in self.generation_configs:
logger.info(f"Creating generation config for '{model_name}' with temperature {effective_temperature}")
self.generation_configs[cache_key] = types.GenerateContentConfig(
temperature = effective_temperature,
top_p = 1,
top_k = 40,
max_output_tokens = self.max_output,
response_mime_type = "text/plain",
safety_settings = self.safety_settings,
)
return self.generation_configs[cache_key]
async def generate_content(self, system, prompt, model=None, temperature=None):
# Use provided model or fall back to default
model_name = model or self.default_model
# Use provided temperature or fall back to default
effective_temperature = temperature if temperature is not None else self.temperature
logger.debug(f"Using model: {model_name}")
logger.debug(f"Using temperature: {effective_temperature}")
generation_config = self._get_or_create_config(model_name, effective_temperature)
# Set system instruction per request (can't be cached)
generation_config.system_instruction = system
try:
response = self.client.models.generate_content(
model=model_name,
config=generation_config,
contents=prompt,
)
resp = response.text
inputtokens = int(response.usage_metadata.prompt_token_count)
outputtokens = int(response.usage_metadata.candidates_token_count)
logger.debug(f"LLM response: {resp}")
logger.info(f"Input Tokens: {inputtokens}")
logger.info(f"Output Tokens: {outputtokens}")
resp = LlmResult(
text = resp,
in_token = inputtokens,
out_token = outputtokens,
model = model_name
)
return resp
except ResourceExhausted as e:
logger.warning("Rate limit exceeded")
# Leave rate limit retries to the default handler
raise TooManyRequests()
except ClientError as e:
# google-genai SDK throws ClientError for 4xx errors
if e.code == 429:
logger.warning(f"Rate limit exceeded (ClientError 429): {e}")
raise TooManyRequests()
# Other client errors are unrecoverable
logger.error(f"GoogleAIStudio ClientError: {e}", exc_info=True)
raise e
except Exception as e:
# Apart from rate limits, treat all exceptions as unrecoverable
logger.error(f"GoogleAIStudio LLM exception ({type(e).__name__}): {e}", exc_info=True)
raise e
def supports_streaming(self):
"""Google AI Studio supports streaming"""
return True
async def generate_content_stream(self, system, prompt, model=None, temperature=None):
"""Stream content generation from Google AI Studio"""
model_name = model or self.default_model
effective_temperature = temperature if temperature is not None else self.temperature
logger.debug(f"Using model (streaming): {model_name}")
logger.debug(f"Using temperature: {effective_temperature}")
generation_config = self._get_or_create_config(model_name, effective_temperature)
generation_config.system_instruction = system
try:
response = self.client.models.generate_content_stream(
model=model_name,
config=generation_config,
contents=prompt,
)
total_input_tokens = 0
total_output_tokens = 0
for chunk in response:
if hasattr(chunk, 'text') and chunk.text:
yield LlmChunk(
text=chunk.text,
in_token=None,
out_token=None,
model=model_name,
is_final=False
)
# Accumulate token counts if available
if hasattr(chunk, 'usage_metadata'):
if hasattr(chunk.usage_metadata, 'prompt_token_count'):
total_input_tokens = int(chunk.usage_metadata.prompt_token_count)
if hasattr(chunk.usage_metadata, 'candidates_token_count'):
total_output_tokens = int(chunk.usage_metadata.candidates_token_count)
# Send final chunk with token counts
yield LlmChunk(
text="",
in_token=total_input_tokens,
out_token=total_output_tokens,
model=model_name,
is_final=True
)
logger.debug("Streaming complete")
except ResourceExhausted:
logger.warning("Rate limit exceeded during streaming")
raise TooManyRequests()
except ClientError as e:
# google-genai SDK throws ClientError for 4xx errors
if e.code == 429:
logger.warning(f"Rate limit exceeded during streaming (ClientError 429): {e}")
raise TooManyRequests()
# Other client errors are unrecoverable
logger.error(f"GoogleAIStudio streaming ClientError: {e}", exc_info=True)
raise e
except Exception as e:
logger.error(f"GoogleAIStudio streaming exception ({type(e).__name__}): {e}", exc_info=True)
raise e
@staticmethod
def add_args(parser):
LlmService.add_args(parser)
parser.add_argument(
'-m', '--model',
default=default_model,
help=f'LLM model (default: {default_model})'
)
parser.add_argument(
'-k', '--api-key',
default=default_api_key,
help=f'GoogleAIStudio API key'
)
parser.add_argument(
'-t', '--temperature',
type=float,
default=default_temperature,
help=f'LLM temperature parameter (default: {default_temperature})'
)
parser.add_argument(
'-x', '--max-output',
type=int,
default=default_max_output,
help=f'LLM max output tokens (default: {default_max_output})'
)
def run():
Processor.launch(default_ident, __doc__)

View file

@ -4,29 +4,20 @@ Google Cloud. Input is prompt, output is response.
Supports both Google's Gemini models and Anthropic's Claude models.
"""
#
# Somewhat perplexed by the Google Cloud SDK choices. We're going off this
# one, which uses the google-cloud-aiplatform library:
# https://cloud.google.com/python/docs/reference/vertexai/1.94.0
# It seems it is possible to invoke VertexAI from the google-genai
# SDK too:
# https://googleapis.github.io/python-genai/genai.html#module-genai.client
# That would make this code look very much like the GoogleAIStudio
# code. And maybe not reliant on the google-cloud-aiplatform library?
#
# This module's imports bring in a lot of libraries.
# Uses the google-genai SDK for Gemini models on Vertex AI:
# https://googleapis.github.io/python-genai/genai.html#module-genai.client
#
from google.oauth2 import service_account
import google.auth
import google.api_core.exceptions
import vertexai
import logging
# Why is preview here?
from vertexai.generative_models import (
Content, FunctionDeclaration, GenerativeModel, GenerationConfig,
HarmCategory, HarmBlockThreshold, Part, Tool, SafetySetting,
)
from google import genai
from google.genai import types
from google.genai.types import HarmCategory, HarmBlockThreshold
from google.genai.errors import ClientError
from google.api_core.exceptions import ResourceExhausted
# Added for Anthropic model support
from anthropic import AnthropicVertex, RateLimitError
@ -67,12 +58,10 @@ class Processor(LlmService):
self.max_output = max_output
self.private_key = private_key
# Model client caches
self.model_clients = {} # Cache for model instances
self.generation_configs = {} # Cache for generation configs (Gemini only)
self.anthropic_client = None # Single Anthropic client (handles multiple models)
# Anthropic client (handles Claude models)
self.anthropic_client = None
# Shared parameters for both model types
# Shared parameters for Anthropic models
self.api_params = {
"temperature": temperature,
"top_p": 1.0,
@ -84,10 +73,10 @@ class Processor(LlmService):
# Unified credential and project ID loading
if private_key:
credentials = (
service_account.Credentials.from_service_account_file(
private_key
)
scopes = ["https://www.googleapis.com/auth/cloud-platform"]
credentials = service_account.Credentials.from_service_account_file(
private_key,
scopes=scopes
)
project_id = credentials.project_id
else:
@ -103,12 +92,13 @@ class Processor(LlmService):
self.credentials = credentials
self.project_id = project_id
# Initialize Vertex AI SDK for Gemini models
init_kwargs = {'location': region, 'project': project_id}
if credentials and private_key: # Pass credentials only if from a file
init_kwargs['credentials'] = credentials
vertexai.init(**init_kwargs)
# Initialize Google GenAI client for Gemini models
self.client = genai.Client(
vertexai=True,
project=project_id,
location=region,
credentials=credentials
)
# Pre-initialize Anthropic client if needed (single client handles all Claude models)
if 'claude' in self.default_model.lower():
@ -117,24 +107,27 @@ class Processor(LlmService):
# Safety settings for Gemini models
block_level = HarmBlockThreshold.BLOCK_ONLY_HIGH
self.safety_settings = [
SafetySetting(
category = HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold = block_level,
types.SafetySetting(
category=HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold=block_level,
),
SafetySetting(
category = HarmCategory.HARM_CATEGORY_HATE_SPEECH,
threshold = block_level,
types.SafetySetting(
category=HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold=block_level,
),
SafetySetting(
category = HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold = block_level,
types.SafetySetting(
category=HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=block_level,
),
SafetySetting(
category = HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold = block_level,
types.SafetySetting(
category=HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold=block_level,
),
]
# Cache for generation configs
self.generation_configs = {}
logger.info("VertexAI initialization complete")
def _get_anthropic_client(self):
@ -152,25 +145,26 @@ class Processor(LlmService):
return self.anthropic_client
def _get_gemini_model(self, model_name, temperature=None):
"""Get or create a Gemini model instance"""
if model_name not in self.model_clients:
logger.info(f"Creating GenerativeModel instance for '{model_name}'")
self.model_clients[model_name] = GenerativeModel(model_name)
def _get_or_create_config(self, model_name, temperature=None):
"""Get or create generation config with dynamic temperature"""
# Use provided temperature or fall back to default
effective_temperature = temperature if temperature is not None else self.temperature
# Create generation config with the effective temperature
generation_config = GenerationConfig(
temperature=effective_temperature,
top_p=1.0,
top_k=10,
candidate_count=1,
max_output_tokens=self.max_output,
)
# Create cache key that includes temperature to avoid conflicts
cache_key = f"{model_name}:{effective_temperature}"
return self.model_clients[model_name], generation_config
if cache_key not in self.generation_configs:
logger.info(f"Creating generation config for '{model_name}' with temperature {effective_temperature}")
self.generation_configs[cache_key] = types.GenerateContentConfig(
temperature=effective_temperature,
top_p=1.0,
top_k=40,
max_output_tokens=self.max_output,
response_mime_type="text/plain",
safety_settings=self.safety_settings,
)
return self.generation_configs[cache_key]
async def generate_content(self, system, prompt, model=None, temperature=None):
@ -205,22 +199,24 @@ class Processor(LlmService):
model=model_name
)
else:
# Gemini API combines system and user prompts
# Gemini API using google-genai SDK
logger.debug(f"Sending request to Gemini model '{model_name}'...")
full_prompt = system + "\n\n" + prompt
llm, generation_config = self._get_gemini_model(model_name, effective_temperature)
generation_config = self._get_or_create_config(model_name, effective_temperature)
# Set system instruction per request (can't be cached)
generation_config.system_instruction = system
response = llm.generate_content(
full_prompt, generation_config = generation_config,
safety_settings = self.safety_settings,
response = self.client.models.generate_content(
model=model_name,
config=generation_config,
contents=prompt,
)
resp = LlmResult(
text = response.text,
in_token = response.usage_metadata.prompt_token_count,
out_token = response.usage_metadata.candidates_token_count,
model = model_name
text=response.text,
in_token=int(response.usage_metadata.prompt_token_count),
out_token=int(response.usage_metadata.candidates_token_count),
model=model_name
)
logger.info(f"Input Tokens: {resp.in_token}")
@ -229,11 +225,20 @@ class Processor(LlmService):
return resp
except (google.api_core.exceptions.ResourceExhausted, RateLimitError) as e:
except (ResourceExhausted, RateLimitError) as e:
logger.warning(f"Hit rate limit: {e}")
# Leave rate limit retries to the base handler
raise TooManyRequests()
except ClientError as e:
# google-genai SDK throws ClientError for 4xx errors
if e.code == 429:
logger.warning(f"Hit rate limit (ClientError 429): {e}")
raise TooManyRequests()
# Other client errors are unrecoverable
logger.error(f"VertexAI ClientError: {e}", exc_info=True)
raise e
except Exception as e:
# Apart from rate limits, treat all exceptions as unrecoverable
logger.error(f"VertexAI LLM exception: {e}", exc_info=True)
@ -302,17 +307,16 @@ class Processor(LlmService):
logger.info(f"Output Tokens: {total_out_tokens}")
else:
# Gemini streaming
# Gemini streaming using google-genai SDK
logger.debug(f"Streaming request to Gemini model '{model_name}'...")
full_prompt = system + "\n\n" + prompt
llm, generation_config = self._get_gemini_model(model_name, effective_temperature)
generation_config = self._get_or_create_config(model_name, effective_temperature)
generation_config.system_instruction = system
response = llm.generate_content(
full_prompt,
generation_config=generation_config,
safety_settings=self.safety_settings,
stream=True # Enable streaming
response = self.client.models.generate_content_stream(
model=model_name,
config=generation_config,
contents=prompt,
)
total_in_tokens = 0
@ -348,10 +352,19 @@ class Processor(LlmService):
logger.info(f"Input Tokens: {total_in_tokens}")
logger.info(f"Output Tokens: {total_out_tokens}")
except (google.api_core.exceptions.ResourceExhausted, RateLimitError) as e:
except (ResourceExhausted, RateLimitError) as e:
logger.warning(f"Hit rate limit during streaming: {e}")
raise TooManyRequests()
except ClientError as e:
# google-genai SDK throws ClientError for 4xx errors
if e.code == 429:
logger.warning(f"Hit rate limit during streaming (ClientError 429): {e}")
raise TooManyRequests()
# Other client errors are unrecoverable
logger.error(f"VertexAI streaming ClientError: {e}", exc_info=True)
raise e
except Exception as e:
logger.error(f"VertexAI streaming exception: {e}", exc_info=True)
raise e