mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-05-12 08:42:37 +02:00
Script for deploy-bundle generation (#65)
* Added output config generation to ZIP file * Added a README which is bundled with the ZIP file * Remove CONFIGDIR reference
This commit is contained in:
parent
1d89e466d5
commit
92a3e9816c
3 changed files with 187 additions and 2 deletions
|
|
@ -145,7 +145,7 @@
|
|||
|
||||
name: dir,
|
||||
|
||||
volid:: "${CONFIGDIR}/" + dir,
|
||||
volid:: dir,
|
||||
|
||||
with_size:: function(size) self + { size: size },
|
||||
|
||||
|
|
@ -161,7 +161,7 @@
|
|||
|
||||
name: dir,
|
||||
|
||||
volid:: "${CONFIGDIR}/" + dir,
|
||||
volid:: dir,
|
||||
|
||||
with_size:: function(size) self + { size: size },
|
||||
|
||||
|
|
|
|||
169
templates/generate-all
Executable file
169
templates/generate-all
Executable file
|
|
@ -0,0 +1,169 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import _jsonnet as j
|
||||
import json
|
||||
import yaml
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import zipfile
|
||||
|
||||
logger = logging.getLogger("generate")
|
||||
logging.basicConfig(level=logging.INFO, format='%(message)s')
|
||||
|
||||
private_json = "Put your GCP private.json here"
|
||||
|
||||
class Generator:
|
||||
|
||||
def __init__(self, config, base="./templates/"):
|
||||
|
||||
self.jsonnet_base = base
|
||||
self.config = config
|
||||
|
||||
def process(self, config):
|
||||
|
||||
res = j.evaluate_snippet("config", config, import_callback=self.load)
|
||||
return json.loads(res)
|
||||
|
||||
def load(self, dir, filename):
|
||||
|
||||
logger.debug("Request jsonnet: %s %s", dir, filename)
|
||||
|
||||
if filename == "config.json" and dir == "":
|
||||
path = os.path.join(".", dir, filename)
|
||||
return str(path), self.config
|
||||
|
||||
if dir:
|
||||
candidates = [
|
||||
os.path.join(".", dir, filename),
|
||||
os.path.join(".", filename)
|
||||
]
|
||||
else:
|
||||
candidates = [
|
||||
os.path.join(".", filename)
|
||||
]
|
||||
|
||||
try:
|
||||
|
||||
if filename == "vertexai/private.json":
|
||||
|
||||
return candidates[0], private_json.encode("utf-8")
|
||||
|
||||
for c in candidates:
|
||||
logger.debug("Try: %s", c)
|
||||
|
||||
if os.path.isfile(c):
|
||||
with open(c, "rb") as f:
|
||||
logger.debug("Loading: %s", c)
|
||||
return str(c), f.read()
|
||||
|
||||
raise RuntimeError(
|
||||
f"Could not load file={filename} dir={dir}"
|
||||
)
|
||||
|
||||
except:
|
||||
|
||||
path = os.path.join(self.jsonnet_base, filename)
|
||||
logger.debug("Try: %s", path)
|
||||
with open(path, "rb") as f:
|
||||
logger.debug("Loaded: %s", path)
|
||||
return str(path), f.read()
|
||||
|
||||
def config_object(items):
|
||||
|
||||
return [
|
||||
{ "name": v, "parameters": {} }
|
||||
for v in items
|
||||
]
|
||||
|
||||
def full_config_object(
|
||||
vector_store="qdrant", embeddings="embeddings-hf",
|
||||
graph_store="cassandra", llm="vertexai",
|
||||
):
|
||||
|
||||
return config_object([
|
||||
graph_store, "pulsar", vector_store, embeddings,
|
||||
"graph-rag", "grafana", "trustgraph", llm
|
||||
])
|
||||
|
||||
def generate_config(
|
||||
vector_store="qdrant", embeddings="embeddings-hf",
|
||||
graph_store="cassandra", llm="vertexai",
|
||||
platform = "docker-compose"
|
||||
):
|
||||
|
||||
config = full_config_object(
|
||||
vector_store=vector_store,
|
||||
embeddings=embeddings,
|
||||
graph_store=graph_store,
|
||||
llm=llm,
|
||||
)
|
||||
|
||||
with open(f"./templates/config-to-{platform}.jsonnet", "r") as f:
|
||||
wrapper = f.read()
|
||||
|
||||
gen = Generator(json.dumps(config).encode("utf-8"))
|
||||
|
||||
processed = gen.process(wrapper)
|
||||
|
||||
y = yaml.dump(processed)
|
||||
|
||||
return y
|
||||
|
||||
def generate_all(output):
|
||||
|
||||
for platform in [
|
||||
"docker-compose", "minikube-k8s", "gcp-k8s"
|
||||
]:
|
||||
for model in [
|
||||
"azure", "bedrock", "claude", "cohere", "llamafile", "ollama",
|
||||
"openai", "vertexai"
|
||||
]:
|
||||
for graph in [ "cassandra", "neo4j" ]:
|
||||
|
||||
y = generate_config(
|
||||
llm=model, graph_store=graph, platform=platform
|
||||
)
|
||||
|
||||
fname =f"{platform}/tg-{model}-{graph}.yaml"
|
||||
|
||||
output(fname, y)
|
||||
|
||||
|
||||
if len(sys.argv) < 2: raise RuntimeError("Usage: generate-all <file>")
|
||||
|
||||
outfile = sys.argv[1]
|
||||
logger.info(f"Outputting to {outfile}...")
|
||||
|
||||
with zipfile.ZipFile(outfile, mode='w') as out:
|
||||
|
||||
def output(name, content):
|
||||
logger.info(f"Adding {name}...")
|
||||
out.writestr(name, content)
|
||||
|
||||
generate_all(output)
|
||||
|
||||
# Placeholder for the private.json file. Won't put actual credentials
|
||||
# here.
|
||||
output("vertexai/private.json", private_json)
|
||||
|
||||
# Grafana config
|
||||
with open("grafana/dashboards/dashboard.json") as f:
|
||||
output("grafana/dashboards/dashboard.json", f.read())
|
||||
|
||||
with open("grafana/provisioning/dashboard.yml") as f:
|
||||
output("grafana/provisioning/dashboard.yml", f.read())
|
||||
|
||||
with open("grafana/provisioning/datasource.yml") as f:
|
||||
output("grafana/provisioning/datasource.yml", f.read())
|
||||
|
||||
# Prometheus config
|
||||
with open("prometheus/prometheus.yml") as f:
|
||||
output("prometheus/prometheus.yml", f.read())
|
||||
|
||||
# A README
|
||||
with open("templates/zip-readme.md") as f:
|
||||
output("README.md", f.read())
|
||||
|
||||
logger.info("Output file written.")
|
||||
|
||||
16
templates/zip-readme.md
Normal file
16
templates/zip-readme.md
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
These are launch configurations for TrustGraph. See https://trustgraph.ai for
|
||||
the quickstart using docker compose.
|
||||
|
||||
Hint for Linux: There are files here which get mounted as volumes inside
|
||||
Docker Compose containers. This may trigger SELinux rules on your system, to
|
||||
permit access insider the containers, use a command like this...
|
||||
|
||||
chcon -Rt svirt_sandbox_file_t grafana/ prometheus/
|
||||
|
||||
The file vertexai/private.json is a placeholder for real GCP credentials if
|
||||
you are using the VertexAI LLM. If you're using that in Docker Compose,
|
||||
replace with your real credentials, and don't forget to permit access if you
|
||||
are using Linux:
|
||||
|
||||
chcon -Rt svirt_sandbox_file_t vertexai/
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue