mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-28 01:46:22 +02:00
Release/v1.2 (#457)
* Bump setup.py versions for 1.1 * PoC MCP server (#419) * Very initial MCP server PoC for TrustGraph * Put service on port 8000 * Add MCP container and packages to buildout * Update docs for API/CLI changes in 1.0 (#421) * Update some API basics for the 0.23/1.0 API change * Add MCP container push (#425) * Add command args to the MCP server (#426) * Host and port parameters * Added websocket arg * More docs * MCP client support (#427) - MCP client service - Tool request/response schema - API gateway support for mcp-tool - Message translation for tool request & response - Make mcp-tool using configuration service for information about where the MCP services are. * Feature/react call mcp (#428) Key Features - MCP Tool Integration: Added core MCP tool support with ToolClientSpec and ToolClient classes - API Enhancement: New mcp_tool method for flow-specific tool invocation - CLI Tooling: New tg-invoke-mcp-tool command for testing MCP integration - React Agent Enhancement: Fixed and improved multi-tool invocation capabilities - Tool Management: Enhanced CLI for tool configuration and management Changes - Added MCP tool invocation to API with flow-specific integration - Implemented ToolClientSpec and ToolClient for tool call handling - Updated agent-manager-react to invoke MCP tools with configurable types - Enhanced CLI with new commands and improved help text - Added comprehensive documentation for new CLI commands - Improved tool configuration management Testing - Added tg-invoke-mcp-tool CLI command for isolated MCP integration testing - Enhanced agent capability to invoke multiple tools simultaneously * Test suite executed from CI pipeline (#433) * Test strategy & test cases * Unit tests * Integration tests * Extending test coverage (#434) * Contract tests * Testing embeedings * Agent unit tests * Knowledge pipeline tests * Turn on contract tests * Increase storage test coverage (#435) * Fixing storage and adding tests * PR pipeline only runs quick tests * Empty configuration is returned as empty list, previously was not in response (#436) * Update config util to take files as well as command-line text (#437) * Updated CLI invocation and config model for tools and mcp (#438) * Updated CLI invocation and config model for tools and mcp * CLI anomalies * Tweaked the MCP tool implementation for new model * Update agent implementation to match the new model * Fix agent tools, now all tested * Fixed integration tests * Fix MCP delete tool params * Update Python deps to 1.2 * Update to enable knowledge extraction using the agent framework (#439) * Implement KG extraction agent (kg-extract-agent) * Using ReAct framework (agent-manager-react) * ReAct manager had an issue when emitting JSON, which conflicts which ReAct manager's own JSON messages, so refactored ReAct manager to use traditional ReAct messages, non-JSON structure. * Minor refactor to take the prompt template client out of prompt-template so it can be more readily used by other modules. kg-extract-agent uses this framework. * Migrate from setup.py to pyproject.toml (#440) * Converted setup.py to pyproject.toml * Modern package infrastructure as recommended by py docs * Install missing build deps (#441) * Install missing build deps (#442) * Implement logging strategy (#444) * Logging strategy and convert all prints() to logging invocations * Fix/startup failure (#445) * Fix loggin startup problems * Fix logging startup problems (#446) * Fix logging startup problems (#447) * Fixed Mistral OCR to use current API (#448) * Fixed Mistral OCR to use current API * Added PDF decoder tests * Fix Mistral OCR ident to be standard pdf-decoder (#450) * Fix Mistral OCR ident to be standard pdf-decoder * Correct test * Schema structure refactor (#451) * Write schema refactor spec * Implemented schema refactor spec * Structure data mvp (#452) * Structured data tech spec * Architecture principles * New schemas * Updated schemas and specs * Object extractor * Add .coveragerc * New tests * Cassandra object storage * Trying to object extraction working, issues exist * Validate librarian collection (#453) * Fix token chunker, broken API invocation (#454) * Fix token chunker, broken API invocation (#455) * Knowledge load utility CLI (#456) * Knowledge loader * More tests
This commit is contained in:
parent
c85ba197be
commit
89be656990
509 changed files with 49632 additions and 5159 deletions
|
|
@ -19,6 +19,7 @@ Google Cloud. Input is prompt, output is response.
|
|||
from google.oauth2 import service_account
|
||||
import google
|
||||
import vertexai
|
||||
import logging
|
||||
|
||||
# Why is preview here?
|
||||
from vertexai.generative_models import (
|
||||
|
|
@ -29,6 +30,9 @@ from vertexai.generative_models import (
|
|||
from .... exceptions import TooManyRequests
|
||||
from .... base import LlmService, LlmResult
|
||||
|
||||
# Module logger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
default_ident = "text-completion"
|
||||
|
||||
default_model = 'gemini-2.0-flash-001'
|
||||
|
|
@ -91,7 +95,7 @@ class Processor(LlmService):
|
|||
),
|
||||
]
|
||||
|
||||
print("Initialise VertexAI...", flush=True)
|
||||
logger.info("Initializing VertexAI...")
|
||||
|
||||
if private_key:
|
||||
credentials = (
|
||||
|
|
@ -113,11 +117,11 @@ class Processor(LlmService):
|
|||
location=region
|
||||
)
|
||||
|
||||
print(f"Initialise model {model}", flush=True)
|
||||
logger.info(f"Initializing model {model}")
|
||||
self.llm = GenerativeModel(model)
|
||||
self.model = model
|
||||
|
||||
print("Initialisation complete", flush=True)
|
||||
logger.info("VertexAI initialization complete")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
|
||||
|
|
@ -137,16 +141,16 @@ class Processor(LlmService):
|
|||
model = self.model
|
||||
)
|
||||
|
||||
print(f"Input Tokens: {resp.in_token}", flush=True)
|
||||
print(f"Output Tokens: {resp.out_token}", flush=True)
|
||||
logger.info(f"Input Tokens: {resp.in_token}")
|
||||
logger.info(f"Output Tokens: {resp.out_token}")
|
||||
|
||||
print("Send response...", flush=True)
|
||||
logger.debug("Send response...")
|
||||
|
||||
return resp
|
||||
|
||||
except google.api_core.exceptions.ResourceExhausted as e:
|
||||
|
||||
print("Hit rate limit:", e, flush=True)
|
||||
logger.warning(f"Hit rate limit: {e}")
|
||||
|
||||
# Leave rate limit retries to the base handler
|
||||
raise TooManyRequests()
|
||||
|
|
@ -154,7 +158,7 @@ class Processor(LlmService):
|
|||
except Exception as e:
|
||||
|
||||
# Apart from rate limits, treat all exceptions as unrecoverable
|
||||
print(f"Exception: {e}")
|
||||
logger.error(f"VertexAI LLM exception: {e}", exc_info=True)
|
||||
raise e
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue