Implement logging strategy (#444)

* Logging strategy and convert all prints() to logging invocations
This commit is contained in:
cybermaggedon 2025-07-30 23:18:38 +01:00 committed by GitHub
parent 3e0651222b
commit dd70aade11
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
117 changed files with 1216 additions and 667 deletions

View file

@ -19,6 +19,7 @@ Google Cloud. Input is prompt, output is response.
from google.oauth2 import service_account
import google
import vertexai
import logging
# Why is preview here?
from vertexai.generative_models import (
@ -29,6 +30,9 @@ from vertexai.generative_models import (
from .... exceptions import TooManyRequests
from .... base import LlmService, LlmResult
# Module logger
logger = logging.getLogger(__name__)
default_ident = "text-completion"
default_model = 'gemini-2.0-flash-001'
@ -91,7 +95,7 @@ class Processor(LlmService):
),
]
print("Initialise VertexAI...", flush=True)
logger.info("Initializing VertexAI...")
if private_key:
credentials = (
@ -113,11 +117,11 @@ class Processor(LlmService):
location=region
)
print(f"Initialise model {model}", flush=True)
logger.info(f"Initializing model {model}")
self.llm = GenerativeModel(model)
self.model = model
print("Initialisation complete", flush=True)
logger.info("VertexAI initialization complete")
async def generate_content(self, system, prompt):
@ -137,16 +141,16 @@ class Processor(LlmService):
model = self.model
)
print(f"Input Tokens: {resp.in_token}", flush=True)
print(f"Output Tokens: {resp.out_token}", flush=True)
logger.info(f"Input Tokens: {resp.in_token}")
logger.info(f"Output Tokens: {resp.out_token}")
print("Send response...", flush=True)
logger.debug("Send response...")
return resp
except google.api_core.exceptions.ResourceExhausted as e:
print("Hit rate limit:", e, flush=True)
logger.warning(f"Hit rate limit: {e}")
# Leave rate limit retries to the base handler
raise TooManyRequests()
@ -154,7 +158,7 @@ class Processor(LlmService):
except Exception as e:
# Apart from rate limits, treat all exceptions as unrecoverable
print(f"Exception: {e}")
logger.error(f"VertexAI LLM exception: {e}", exc_info=True)
raise e
@staticmethod