mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-25 08:26:21 +02:00
* Bump setup.py versions for 1.1 * PoC MCP server (#419) * Very initial MCP server PoC for TrustGraph * Put service on port 8000 * Add MCP container and packages to buildout * Update docs for API/CLI changes in 1.0 (#421) * Update some API basics for the 0.23/1.0 API change * Add MCP container push (#425) * Add command args to the MCP server (#426) * Host and port parameters * Added websocket arg * More docs * MCP client support (#427) - MCP client service - Tool request/response schema - API gateway support for mcp-tool - Message translation for tool request & response - Make mcp-tool using configuration service for information about where the MCP services are. * Feature/react call mcp (#428) Key Features - MCP Tool Integration: Added core MCP tool support with ToolClientSpec and ToolClient classes - API Enhancement: New mcp_tool method for flow-specific tool invocation - CLI Tooling: New tg-invoke-mcp-tool command for testing MCP integration - React Agent Enhancement: Fixed and improved multi-tool invocation capabilities - Tool Management: Enhanced CLI for tool configuration and management Changes - Added MCP tool invocation to API with flow-specific integration - Implemented ToolClientSpec and ToolClient for tool call handling - Updated agent-manager-react to invoke MCP tools with configurable types - Enhanced CLI with new commands and improved help text - Added comprehensive documentation for new CLI commands - Improved tool configuration management Testing - Added tg-invoke-mcp-tool CLI command for isolated MCP integration testing - Enhanced agent capability to invoke multiple tools simultaneously * Test suite executed from CI pipeline (#433) * Test strategy & test cases * Unit tests * Integration tests * Extending test coverage (#434) * Contract tests * Testing embeedings * Agent unit tests * Knowledge pipeline tests * Turn on contract tests * Increase storage test coverage (#435) * Fixing storage and adding tests * PR pipeline only runs quick tests * Empty configuration is returned as empty list, previously was not in response (#436) * Update config util to take files as well as command-line text (#437) * Updated CLI invocation and config model for tools and mcp (#438) * Updated CLI invocation and config model for tools and mcp * CLI anomalies * Tweaked the MCP tool implementation for new model * Update agent implementation to match the new model * Fix agent tools, now all tested * Fixed integration tests * Fix MCP delete tool params * Update Python deps to 1.2 * Update to enable knowledge extraction using the agent framework (#439) * Implement KG extraction agent (kg-extract-agent) * Using ReAct framework (agent-manager-react) * ReAct manager had an issue when emitting JSON, which conflicts which ReAct manager's own JSON messages, so refactored ReAct manager to use traditional ReAct messages, non-JSON structure. * Minor refactor to take the prompt template client out of prompt-template so it can be more readily used by other modules. kg-extract-agent uses this framework. * Migrate from setup.py to pyproject.toml (#440) * Converted setup.py to pyproject.toml * Modern package infrastructure as recommended by py docs * Install missing build deps (#441) * Install missing build deps (#442) * Implement logging strategy (#444) * Logging strategy and convert all prints() to logging invocations * Fix/startup failure (#445) * Fix loggin startup problems * Fix logging startup problems (#446) * Fix logging startup problems (#447) * Fixed Mistral OCR to use current API (#448) * Fixed Mistral OCR to use current API * Added PDF decoder tests * Fix Mistral OCR ident to be standard pdf-decoder (#450) * Fix Mistral OCR ident to be standard pdf-decoder * Correct test * Schema structure refactor (#451) * Write schema refactor spec * Implemented schema refactor spec * Structure data mvp (#452) * Structured data tech spec * Architecture principles * New schemas * Updated schemas and specs * Object extractor * Add .coveragerc * New tests * Cassandra object storage * Trying to object extraction working, issues exist * Validate librarian collection (#453) * Fix token chunker, broken API invocation (#454) * Fix token chunker, broken API invocation (#455) * Knowledge load utility CLI (#456) * Knowledge loader * More tests
205 lines
No EOL
4.7 KiB
Python
205 lines
No EOL
4.7 KiB
Python
"""
|
|
Loads a text document into TrustGraph processing by directing to a text
|
|
loader queue.
|
|
Consider using tg-add-library-document to load
|
|
a document, followed by tg-start-library-processing to initiate processing.
|
|
"""
|
|
|
|
import pulsar
|
|
from pulsar.schema import JsonSchema
|
|
import hashlib
|
|
import argparse
|
|
import os
|
|
import time
|
|
import uuid
|
|
|
|
from trustgraph.api import Api
|
|
from trustgraph.knowledge import hash, to_uri
|
|
from trustgraph.knowledge import PREF_PUBEV, PREF_DOC, PREF_ORG
|
|
from trustgraph.knowledge import Organization, PublicationEvent
|
|
from trustgraph.knowledge import DigitalDocument
|
|
|
|
default_url = os.getenv("TRUSTGRAPH_URL", 'http://localhost:8088/')
|
|
default_user = 'trustgraph'
|
|
default_collection = 'default'
|
|
|
|
class Loader:
|
|
|
|
def __init__(
|
|
self,
|
|
url,
|
|
flow_id,
|
|
user,
|
|
collection,
|
|
metadata,
|
|
):
|
|
|
|
self.api = Api(url).flow().id(flow_id)
|
|
|
|
self.user = user
|
|
self.collection = collection
|
|
self.metadata = metadata
|
|
|
|
def load(self, files):
|
|
|
|
for file in files:
|
|
self.load_file(file)
|
|
|
|
def load_file(self, file):
|
|
|
|
try:
|
|
|
|
path = file
|
|
data = open(path, "rb").read()
|
|
|
|
# Create a SHA256 hash from the data
|
|
id = hash(data)
|
|
|
|
id = to_uri(PREF_DOC, id)
|
|
|
|
self.metadata.id = id
|
|
|
|
self.api.load_text(
|
|
text=data, id=id, metadata=self.metadata,
|
|
user=self.user,
|
|
collection=self.collection,
|
|
)
|
|
|
|
print(f"{file}: Loaded successfully.")
|
|
|
|
except Exception as e:
|
|
print(f"{file}: Failed: {str(e)}", flush=True)
|
|
raise e
|
|
|
|
def main():
|
|
|
|
parser = argparse.ArgumentParser(
|
|
prog='tg-load-text',
|
|
description=__doc__,
|
|
)
|
|
|
|
parser.add_argument(
|
|
'-u', '--url',
|
|
default=default_url,
|
|
help=f'API URL (default: {default_url})',
|
|
)
|
|
|
|
parser.add_argument(
|
|
'-f', '--flow-id',
|
|
default="default",
|
|
help=f'Flow ID (default: default)'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'-U', '--user',
|
|
default=default_user,
|
|
help=f'User ID (default: {default_user})'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'-C', '--collection',
|
|
default=default_collection,
|
|
help=f'Collection ID (default: {default_collection})'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--name', help=f'Document name'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--description', help=f'Document description'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--copyright-notice', help=f'Copyright notice'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--copyright-holder', help=f'Copyright holder'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--copyright-year', help=f'Copyright year'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--license', help=f'Copyright license'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--publication-organization', help=f'Publication organization'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--publication-description', help=f'Publication description'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--publication-date', help=f'Publication date'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--document-url', help=f'Document URL'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--keyword', nargs='+', help=f'Keyword'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'--identifier', '--id', help=f'Document ID'
|
|
)
|
|
|
|
parser.add_argument(
|
|
'files', nargs='+',
|
|
help=f'File to load'
|
|
)
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
try:
|
|
|
|
document = DigitalDocument(
|
|
id,
|
|
name=args.name,
|
|
description=args.description,
|
|
copyright_notice=args.copyright_notice,
|
|
copyright_holder=args.copyright_holder,
|
|
copyright_year=args.copyright_year,
|
|
license=args.license,
|
|
url=args.document_url,
|
|
keywords=args.keyword,
|
|
)
|
|
|
|
if args.publication_organization:
|
|
org = Organization(
|
|
id=to_uri(PREF_ORG, hash(args.publication_organization)),
|
|
name=args.publication_organization,
|
|
)
|
|
document.publication = PublicationEvent(
|
|
id = to_uri(PREF_PUBEV, str(uuid.uuid4())),
|
|
organization=org,
|
|
description=args.publication_description,
|
|
start_date=args.publication_date,
|
|
end_date=args.publication_date,
|
|
)
|
|
|
|
p = Loader(
|
|
url = args.url,
|
|
flow_id = args.flow_id,
|
|
user = args.user,
|
|
collection = args.collection,
|
|
metadata = document,
|
|
)
|
|
|
|
p.load(args.files)
|
|
|
|
print("All done.")
|
|
|
|
except Exception as e:
|
|
|
|
print("Exception:", e, flush=True)
|
|
|
|
if __name__ == "__main__":
|
|
main() |