diff --git a/.gitignore b/.gitignore index 73dc07b4..357ecf1e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,15 @@ *~ __pycache__/ env/ -*.egg_info/ +*/build/ +*.egg-info/ *.parquet -templates/components/version.jsonnet +templates/values/version.jsonnet +trustgraph-base/trustgraph/base_version.py +trustgraph-bedrock/trustgraph/bedrock_version.py +trustgraph-embeddings-hf/trustgraph/embeddings_hf_version.py +trustgraph-flow/trustgraph/flow_version.py +trustgraph-parquet/trustgraph/parquet_version.py +trustgraph-vertexai/trustgraph/vertexai_version.py +trustgraph-cli/trustgraph/ +vertexai/ \ No newline at end of file diff --git a/Containerfile b/Containerfile index 1bb0a7fd..e8daa861 100644 --- a/Containerfile +++ b/Containerfile @@ -13,7 +13,7 @@ RUN dnf install -y python3 python3-pip python3-wheel python3-aiohttp \ RUN pip3 install torch --index-url https://download.pytorch.org/whl/cpu -RUN pip3 install anthropic boto3 cohere openai google-cloud-aiplatform \ +RUN pip3 install anthropic boto3 cohere openai google-cloud-aiplatform ollama \ langchain langchain-core langchain-huggingface langchain-text-splitters \ langchain-community pymilvus sentence-transformers transformers \ huggingface-hub pulsar-client cassandra-driver pyarrow pyyaml \ @@ -21,21 +21,32 @@ RUN pip3 install anthropic boto3 cohere openai google-cloud-aiplatform \ pip3 cache purge # ---------------------------------------------------------------------------- -# Build a container which contains the built Python package. The build +# Build a container which contains the built Python packages. The build # creates a bunch of left-over cruft, a separate phase means this is only # needed to support package build # ---------------------------------------------------------------------------- FROM ai AS build -env PACKAGE_VERSION=0.0.0 +COPY trustgraph-base/ /root/build/trustgraph-base/ +COPY trustgraph-flow/ /root/build/trustgraph-flow/ +COPY trustgraph-vertexai/ /root/build/trustgraph-vertexai/ +COPY trustgraph-bedrock/ /root/build/trustgraph-bedrock/ +COPY trustgraph-parquet/ /root/build/trustgraph-parquet/ +COPY trustgraph-embeddings-hf/ /root/build/trustgraph-embeddings-hf/ +COPY trustgraph-cli/ /root/build/trustgraph-cli/ -COPY setup.py /root/build/ -COPY README.md /root/build/ -COPY scripts/ /root/build/scripts/ -COPY trustgraph/ root/build/trustgraph/ +WORKDIR /root/build/ -RUN (cd /root/build && pip3 wheel -w /root/wheels --no-deps .) +RUN pip3 wheel -w /root/wheels/ --no-deps ./trustgraph-base/ +RUN pip3 wheel -w /root/wheels/ --no-deps ./trustgraph-flow/ +RUN pip3 wheel -w /root/wheels/ --no-deps ./trustgraph-vertexai/ +RUN pip3 wheel -w /root/wheels/ --no-deps ./trustgraph-bedrock/ +RUN pip3 wheel -w /root/wheels/ --no-deps ./trustgraph-parquet/ +RUN pip3 wheel -w /root/wheels/ --no-deps ./trustgraph-embeddings-hf/ +RUN pip3 wheel -w /root/wheels/ --no-deps ./trustgraph-cli/ + +RUN ls /root/wheels # ---------------------------------------------------------------------------- # Finally, the target container. Start with base and add the package. @@ -45,7 +56,14 @@ FROM ai COPY --from=build /root/wheels /root/wheels -RUN pip3 install /root/wheels/trustgraph-* && \ +RUN \ + pip3 install /root/wheels/trustgraph_base-* && \ + pip3 install /root/wheels/trustgraph_flow-* && \ + pip3 install /root/wheels/trustgraph_vertexai-* && \ + pip3 install /root/wheels/trustgraph_bedrock-* && \ + pip3 install /root/wheels/trustgraph_parquet-* && \ + pip3 install /root/wheels/trustgraph_embeddings_hf-* && \ + pip3 install /root/wheels/trustgraph_cli-* && \ pip3 cache purge && \ rm -rf /root/wheels diff --git a/Makefile b/Makefile index b4aef413..5593faa7 100644 --- a/Makefile +++ b/Makefile @@ -1,31 +1,53 @@ # VERSION=$(shell git describe | sed 's/^v//') -VERSION=0.9.5 +VERSION=0.11.19 DOCKER=podman all: container +# Not used +wheels: + pip3 wheel --no-deps --wheel-dir dist trustgraph-base/ + pip3 wheel --no-deps --wheel-dir dist trustgraph-flow/ + pip3 wheel --no-deps --wheel-dir dist trustgraph-vertexai/ + pip3 wheel --no-deps --wheel-dir dist trustgraph-bedrock/ + pip3 wheel --no-deps --wheel-dir dist trustgraph-parquet/ + pip3 wheel --no-deps --wheel-dir dist trustgraph-embeddings-hf/ + pip3 wheel --no-deps --wheel-dir dist trustgraph-cli/ + +packages: + rm -rf dist/ + cd trustgraph-base && python3 setup.py sdist --dist-dir ../dist/ + cd trustgraph-flow && python3 setup.py sdist --dist-dir ../dist/ + cd trustgraph-vertexai && python3 setup.py sdist --dist-dir ../dist/ + cd trustgraph-bedrock && python3 setup.py sdist --dist-dir ../dist/ + cd trustgraph-parquet && python3 setup.py sdist --dist-dir ../dist/ + cd trustgraph-embeddings-hf && python3 setup.py sdist --dist-dir ../dist/ + cd trustgraph-cli && python3 setup.py sdist --dist-dir ../dist/ + +pypi-upload: + twine upload dist/*-${VERSION}.* + CONTAINER=docker.io/trustgraph/trustgraph-flow -container: +update-package-versions: + mkdir -p trustgraph-cli/trustgraph + echo __version__ = \"${VERSION}\" > trustgraph-base/trustgraph/base_version.py + echo __version__ = \"${VERSION}\" > trustgraph-flow/trustgraph/flow_version.py + echo __version__ = \"${VERSION}\" > trustgraph-vertexai/trustgraph/vertexai_version.py + echo __version__ = \"${VERSION}\" > trustgraph-bedrock/trustgraph/bedrock_version.py + echo __version__ = \"${VERSION}\" > trustgraph-parquet/trustgraph/parquet_version.py + echo __version__ = \"${VERSION}\" > trustgraph-embeddings-hf/trustgraph/embeddings_hf_version.py + echo __version__ = \"${VERSION}\" > trustgraph-cli/trustgraph/cli_version.py + +container: update-package-versions ${DOCKER} build -f Containerfile -t ${CONTAINER}:${VERSION} \ --format docker push: ${DOCKER} push ${CONTAINER}:${VERSION} -start: - ${DOCKER} run -i -t --name ${NAME} \ - -i -t \ - -p 8081:8081 \ - -v $$(pwd)/keys:/keys \ - -v $$(pwd)/configs:/configs \ - ${CONTAINER}:${VERSION} - -stop: - ${DOCKER} rm -f ${NAME} - clean: rm -rf wheels/ @@ -49,23 +71,36 @@ VECTORDB=qdrant JSONNET_FLAGS=-J templates -J . +# Temporarily going back to how templates were built in 0.9 because this +# is going away in 0.11. -update-templates: set-version +update-templates: update-dcs + +JSON_TO_YAML=python3 -c 'import sys, yaml, json; j=json.loads(sys.stdin.read()); print(yaml.safe_dump(j))' + +update-dcs: set-version for graph in ${GRAPHS}; do \ cm=$${graph},pulsar,${VECTORDB},grafana; \ - input=templates/main.jsonnet; \ + input=templates/opts-to-docker-compose.jsonnet; \ output=tg-storage-$${graph}.yaml; \ echo $${graph} '->' $${output}; \ jsonnet ${JSONNET_FLAGS} \ - --ext-str options=$${cm} -S $${input} > $${output}; \ + --ext-str options=$${cm} $${input} | \ + ${JSON_TO_YAML} > $${output}; \ done for model in ${MODELS}; do \ for graph in ${GRAPHS}; do \ cm=$${graph},pulsar,${VECTORDB},embeddings-hf,graph-rag,grafana,trustgraph,$${model}; \ - input=templates/main.jsonnet; \ + input=templates/opts-to-docker-compose.jsonnet; \ output=tg-launch-$${model}-$${graph}.yaml; \ echo $${model} + $${graph} '->' $${output}; \ jsonnet ${JSONNET_FLAGS} \ - --ext-str options=$${cm} -S $${input} > $${output}; \ + --ext-str options=$${cm} $${input} | \ + ${JSON_TO_YAML} > $${output}; \ done; \ done + +docker-hub-login: + cat docker-token.txt | \ + docker login -u trustgraph --password-stdin registry-1.docker.io + diff --git a/grafana/dashboard.json b/grafana/dashboards/dashboard.json similarity index 77% rename from grafana/dashboard.json rename to grafana/dashboards/dashboard.json index aba5d918..04561863 100644 --- a/grafana/dashboard.json +++ b/grafana/dashboards/dashboard.json @@ -90,14 +90,18 @@ "type": "prometheus", "uid": "f6b18033-5918-4e05-a1ca-4cb30343b129" }, - "editorMode": "code", + "disableTextWrap": false, + "editorMode": "builder", "exemplar": false, - "expr": "max by(le) (text_completion_duration_bucket)", + "expr": "sum by(le) (rate(text_completion_duration_bucket[$__rate_interval]))", "format": "heatmap", + "fullMetaSearch": false, + "includeNullMetadata": true, "instant": false, "legendFormat": "99%", "range": true, - "refId": "A" + "refId": "A", + "useBackend": false } ], "title": "LLM latency", @@ -280,7 +284,7 @@ "editorMode": "builder", "expr": "rate(request_latency_count[1m])", "instant": false, - "legendFormat": "{{instance}}", + "legendFormat": "{{job}}", "range": true, "refId": "A" } @@ -824,7 +828,7 @@ }, "gridPos": { "h": 7, - "w": 12, + "w": 8, "x": 0, "y": 32 }, @@ -869,8 +873,8 @@ "options": { "include": { "names": [ - "instance", - "model" + "model", + "job" ] } } @@ -895,6 +899,238 @@ } ], "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "f6b18033-5918-4e05-a1ca-4cb30343b129" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 32 + }, + "id": 15, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "f6b18033-5918-4e05-a1ca-4cb30343b129" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(job) (rate(input_tokens_total[$__rate_interval]))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "input {{job}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "f6b18033-5918-4e05-a1ca-4cb30343b129" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(job) (rate(output_tokens_total[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "output {{job}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Tokens", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "f6b18033-5918-4e05-a1ca-4cb30343b129" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "$", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 32 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "f6b18033-5918-4e05-a1ca-4cb30343b129" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(job) (rate(input_cost_total[$__rate_interval]))", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "input {{job}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "f6b18033-5918-4e05-a1ca-4cb30343b129" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(job) (rate(output_cost_total[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "output {{job}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Token cost", + "type": "timeseries" } ], "refresh": "5s", @@ -904,13 +1140,13 @@ "list": [] }, "time": { - "from": "now-5m", + "from": "now-15m", "to": "now" }, "timepicker": {}, "timezone": "", "title": "Overview", "uid": "b5c8abf8-fe79-496b-b028-10bde917d1f0", - "version": 3, + "version": 1, "weekStart": "" } diff --git a/grafana/dashboard.yml b/grafana/provisioning/dashboard.yml similarity index 100% rename from grafana/dashboard.yml rename to grafana/provisioning/dashboard.yml diff --git a/grafana/datasource.yml b/grafana/provisioning/datasource.yml similarity index 100% rename from grafana/datasource.yml rename to grafana/provisioning/datasource.yml diff --git a/graph-clear b/graph-clear deleted file mode 100755 index 9633a08f..00000000 --- a/graph-clear +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env python3 - -from trustgraph import TrustGraph - -t = TrustGraph() - -t.clear() - diff --git a/graph-dump b/graph-dump deleted file mode 100755 index e31a1c3b..00000000 --- a/graph-dump +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 - -import pulsar -from pulsar.schema import JsonSchema, Bytes -from schema import Chunk, Triple -from langchain_huggingface import HuggingFaceEmbeddings -from langchain_community.llms import Ollama -from trustgraphETL import scholar, callmixtral, build_graph_robust -import sys -import rdflib -import uuid - -g = rdflib.Graph() - -client = pulsar.Client("pulsar://localhost:6650") - -consumer = client.subscribe( - 'graph-load', 'graph-dump', - schema=JsonSchema(Triple), -) - -g = rdflib.Graph() -count = 0 -limit = 100 - -while True: - - msg = consumer.receive() - - try: - - v = msg.value() - - if v.o.is_uri: - g.add(( - rdflib.term.URIRef(v.s.value), - rdflib.term.URIRef(v.p.value), - rdflib.term.URIRef(v.o.value), - )) - else: - g.add(( - rdflib.term.URIRef(v.s.value), - rdflib.term.URIRef(v.p.value), - rdflib.term.Literal(v.o.value), - )) - - count += 1 - - if count > limit: - - id = str(uuid.uuid4()) - path = f"graph/{id}.ttl" - g.serialize(destination=path) - g = rdflib.Graph() - print(f"Written {path}") - - count = 0 - - # Acknowledge successful processing of the message - consumer.acknowledge(msg) - - except Exception as e: - - print(e) - - # Message failed to be processed - consumer.negative_acknowledge(msg) - -client.close() - diff --git a/prometheus/prometheus.yml b/prometheus/prometheus.yml index f459dad9..c74f5df3 100644 --- a/prometheus/prometheus.yml +++ b/prometheus/prometheus.yml @@ -14,29 +14,134 @@ scrape_configs: # The job name is added as a label `job=` to any timeseries # scraped from this config. - - job_name: 'trustgraph' - - # Override the global default and scrape targets from this job every - # 5 seconds. + - job_name: 'pdf-decoder' scrape_interval: 5s - static_configs: - targets: - 'pdf-decoder:8000' + + - job_name: 'chunker' + scrape_interval: 5s + static_configs: + - targets: - 'chunker:8000' + + + - job_name: 'vectorize' + scrape_interval: 5s + static_configs: + - targets: - 'vectorize:8000' + + + - job_name: 'embeddings' + scrape_interval: 5s + static_configs: + - targets: - 'embeddings:8000' + + + - job_name: 'kg-extract-definitions' + scrape_interval: 5s + static_configs: + - targets: - 'kg-extract-definitions:8000' + + + - job_name: 'kg-extract-topics' + scrape_interval: 5s + static_configs: + - targets: - 'kg-extract-topics:8000' + + + - job_name: 'kg-extract-relationships' + scrape_interval: 5s + static_configs: + - targets: - 'kg-extract-relationships:8000' + + + - job_name: 'metering' + scrape_interval: 5s + static_configs: + - targets: + - 'metering:8000' + + + - job_name: 'metering-rag' + scrape_interval: 5s + static_configs: + - targets: + - 'metering-rag:8000' + + + - job_name: 'store-graph-embeddings' + scrape_interval: 5s + static_configs: + - targets: - 'store-graph-embeddings:8000' + + + - job_name: 'store-triples' + scrape_interval: 5s + static_configs: + - targets: - 'store-triples:8000' + + + - job_name: 'text-completion' + scrape_interval: 5s + static_configs: + - targets: - 'text-completion:8000' + + + - job_name: 'text-completion-rag' + scrape_interval: 5s + static_configs: + - targets: - 'text-completion-rag:8000' + + + - job_name: 'graph-rag' + scrape_interval: 5s + static_configs: + - targets: - 'graph-rag:8000' + + + - job_name: 'prompt' + scrape_interval: 5s + static_configs: + - targets: - 'prompt:8000' + + + - job_name: 'prompt-rag' + scrape_interval: 5s + static_configs: + - targets: - 'prompt-rag:8000' + + + - job_name: 'query-graph-embeddings' + scrape_interval: 5s + static_configs: + - targets: - 'query-graph-embeddings:8000' + + + - job_name: 'query-triples' + scrape_interval: 5s + static_configs: + - targets: - 'query-triples:8000' + + + - job_name: 'pulsar' + scrape_interval: 5s + static_configs: + - targets: - 'pulsar:8080' diff --git a/requirements.txt b/requirements.txt index 9a49a5aa..0d269066 100644 --- a/requirements.txt +++ b/requirements.txt @@ -20,3 +20,4 @@ pyyaml prometheus-client pyarrow boto3 +ollama diff --git a/sample-text-corpus.pdf b/sample-text-corpus.pdf deleted file mode 100644 index 9a0ecf14..00000000 Binary files a/sample-text-corpus.pdf and /dev/null differ diff --git a/sample-text-corpus.txt b/sample-text-corpus.txt deleted file mode 100644 index 179ce79f..00000000 --- a/sample-text-corpus.txt +++ /dev/null @@ -1,367 +0,0 @@ - The Cosmic Tapestry: -Atlantean Cosmology, Spirituality, and Prophecy Introduction -The legendary civilization of Atlantis has captivated the human imagination for millennia. While concrete historical evidence for Atlantis remains elusive, the idea of an advanced ancient culture lost to time continues to inspire speculation and wonder. This paper will explore a hypothetical reconstruction of Atlantean cosmology, spiritual beliefs, cultural norms, and apocalyptic prophecies. -Drawing on a variety of esoteric traditions, mythological motifs, and creative speculation, we will paint a picture of how the Atlanteans may have viewed the cosmos and their place within it. While entirely speculative in nature, this exploration can offer intriguing food for thought on the nature of lost civilizations and alternative cosmological paradigms. -I. Atlantean Cosmology -At the heart of Atlantean cosmology was the concept of the Cosmic Tapestry - an intricate, multidimensional weaving of energy, consciousness, and matter that comprised all of existence. The Atlanteans did not see the universe as a vast emptiness punctuated by isolated islands of matter. Rather, they perceived reality as an infinitely complex, living, breathing whole in which all things were fundamentally interconnected. -The Cosmic Tapestry consisted of several key components: -1. The Primordial Waters - This was the underlying substrate of pure potentiality from which all things emerged. The Atlanteans saw it as a vast, cosmic ocean of infinite creative potential. -2. The Loom of Creation - This was the underlying structure or matrix that gave form and order to the Primordial Waters. It was envisioned as an immense, multidimensional loom on which the tapestry of reality was woven. -3. The Threads of Being - These were the fundamental constituents of existence - energy, consciousness, and matter in their most elemental forms. They were the raw materials woven together to create the fabric of reality. -4. The Patterns of Manifestation - These were the underlying templates or blueprints that guided how the Threads of Being were woven together. They were seen as cosmic archetypes or primordial ideas. -5. The Weavers - These were the primordial creative intelligences that operated the Loom of Creation, weaving the Threads of Being into the Patterns of Manifestation. They were revered as the first gods or cosmic architects. - - In the Atlantean worldview, the physical universe we inhabit was just one layer or level of the Cosmic Tapestry. They believed in a vast, multidimensional cosmos with many layers of reality existing simultaneously. These different dimensional levels were seen as distinct vibrational frequencies or states of consciousness. -The Atlanteans identified at least seven primary dimensional levels: -1. The Physical Plane - The realm of matter and physical manifestation that we inhabit. -2. The Etheric Plane - A subtle energy realm interpenetrating and underlying the physical. 3. The Astral Plane - The realm of emotion, desire, and lower thought forms. -4. The Mental Plane - The realm of higher thought, ideas, and archetypes. -5. The Causal Plane - The realm of pure intention and the seeds of karma. -6. The Buddhic Plane - The realm of pure intuition, unity, and spiritual insight. -7. The Atmic Plane - The realm of pure being and divine will. -Beyond these seven planes, the Atlanteans posited even higher levels of reality ascending towards the utterly transcendent source of all being. They saw the cosmos as infinitely ascending and descending - there was no absolute top or bottom to existence. -Interestingly, the Atlanteans did not see these dimensional levels as strictly separate or hierarchical. Rather, they were understood as simultaneously interpenetrating and co-creating each other. Like the warp and weft of a textile, the various dimensions were seen as intimately interwoven. -Another key feature of Atlantean cosmology was the concept of the World Tree. This was envisioned as a vast, cosmic tree whose roots extended deep into the lower dimensions and whose branches reached up into the highest celestial realms. The World Tree was seen as the central axis of creation - the cosmic pillar around which the dimensional planes were organized. -The Atlanteans identified several World Trees corresponding to different scales of manifestation: -• The Individual World Tree - The energetic structure of a single being, linking their various subtle bodies and chakras. -• The Planetary World Tree - The energetic structure of a planet, linking its various dimensional aspects. -• The Solar World Tree - The energetic structure of a star system. -• The Galactic World Tree - The energetic structure of an entire galaxy. - - • The Cosmic World Tree - The energetic structure of the entire manifest universe. -These nested World Trees were seen as creating a fractal, holographic structure to the cosmos. The same patterns repeated at every scale from the microcosm to the macrocosm. -The Atlanteans also had a sophisticated understanding of cosmic cycles and the spiral nature of time. They saw existence as moving through vast cyclic ages on multiple, interlocking scales. These cycles were seen as the breath of the cosmos - the in-and-out flow of manifestation from the Primordial Waters. -Some of the cosmic cycles they identified included: -• The Day and Night of Brahma - A cycle of cosmic manifestation and withdrawal lasting billions of years. -• The Precession of the Equinoxes - A 26,000 year cycle caused by the slow wobble of Earth's axis. -• The Galactic Year - The time it takes our solar system to orbit the galactic center, about 225 million years. -• The Solar Year - The Earth's annual orbit around the Sun. -• The Lunar Month - The Moon's orbital cycle around the Earth. -The Atlanteans saw these interlocking cycles as creating a vast cosmic clockwork. By understanding these cycles, they believed they could align themselves with the deeper rhythms and flows of the universe. -II. Atlantean Spirituality -Atlantean spirituality was intimately intertwined with their cosmological understanding. They did not see the spiritual and material realms as separate, but rather as complementary aspects of the unified Cosmic Tapestry. -At the heart of Atlantean spirituality was the recognition that consciousness was fundamental to the cosmos. They saw awareness itself as the primary reality underlying all phenomena. In their view, the entire universe was conscious and alive at every level. -This panentheistic worldview led to a profound sense of the sacred in all things. The Atlanteans did not worship gods as external beings separate from creation. Rather, they reverenced the divine intelligence immanent within all of nature and within themselves. -That said, they did recognize various orders of spiritual intelligences operating at different levels of the cosmic hierarchy. These included: - - • Nature Spirits - Consciousness inherent in natural phenomena like plants, animals, and elemental forces. -• Planetary Intelligences - Vast beings embodying the consciousness of entire planets. -• Star Beings - Immense intelligences at the level of stars and solar systems. -• Cosmic Architects - Exalted beings involved in shaping whole galaxies and cosmic sectors. -• The Primordial Weavers - Inconceivably vast intelligences that shaped the fundamental patterns of existence. -The Atlanteans saw themselves as part of this great cosmic hierarchy of consciousness. Through spiritual practice and inner development, they believed it was possible to expand one's awareness to commune with these higher intelligences. -One of the core spiritual practices in Atlantean culture was meditation. They developed sophisticated techniques for quieting the mind, expanding awareness, and exploring inner space. These included: -• Breath meditation - Using the breath as a focal point to still the mind. -• Mantra meditation - Using sacred sounds and vibrations to attune consciousness. • Visualization - Using vivid mental imagery to shape subtle energies. -• Light meditation - Focusing on inner experiences of luminosity and radiance. -• Void meditation - Resting in the groundless awareness beyond all phenomena. -The Atlanteans also practiced various yogic disciplines to purify and strengthen the physical body, seeing it as the temple of spirit. These included breath control techniques, physical postures, mudras (ritual gestures), and dietary practices. -Another key aspect of Atlantean spirituality was the cultivation of siddhis or paranormal abilities. They saw these not as ends in themselves, but as side effects of expanding consciousness. Some of the abilities they cultivated included: -• Telepathy - Direct mind-to-mind communication. -• Clairvoyance - Subtle perception beyond the physical senses. -• Precognition - Glimpsing possible future events. -• Psychokinesis - Mental influence over physical matter. -• Bilocation - Projecting one's consciousness to multiple locations. - - • Levitation - Overcoming gravity through mental power. -The Atlanteans developed training programs and mystery schools to systematically cultivate these abilities in those who showed aptitude. However, they also recognized the potential for misuse of such powers and placed great emphasis on ethical development alongside paranormal abilities. -Atlantean spirituality also included a strong emphasis on understanding and working with subtle energy. They mapped out elaborate systems of chakras, meridians, and energy bodies. Through various practices, they learned to consciously direct life force energy for healing, creativity, and expanded awareness. -Some key concepts in their subtle energy work included: -• Prana - The vital life force energy that animates all things. -• Chakras - Energy centers in the subtle body that process prana. -• Nadis - Subtle energy channels that distribute prana throughout the being. • Koshas - Layers or sheaths of the subtle body. -• Kundalini - A powerful evolutionary energy coiled at the base of the spine. -The Atlanteans saw activating and raising kundalini energy as a key to expanding consciousness and developing higher abilities. However, they also recognized its dangers if awakened prematurely or improperly. -Ritual and ceremony also played an important role in Atlantean spiritual life. They created elaborate rites to align themselves with cosmic forces and cycles. These often involved the use of: -• Sacred geometry - Precise spatial patterns to channel subtle energies. -• Crystal technology - Harnessing the energetic properties of minerals. -• Sound healing - Using specific frequencies to affect consciousness and matter. • Archaeoastronomy - Aligning structures and rituals with celestial cycles. -Many Atlantean temples and monuments were designed as energetic instruments to amplify consciousness and commune with higher dimensional realities. They built vast stone circles, pyramids, and other structures specifically tuned to cosmic frequencies. -(Continued in next section...) III. Atlantean Cultural Norms - - Atlantean society was built around their spiritual and cosmological worldview. They sought to create a culture that honored the interconnectedness of all life and fostered the expansion of consciousness. -One of the central values in Atlantean culture was harmony - with nature, with each other, and with the cosmos as a whole. They saw themselves as stewards of the Earth rather than its masters. This led to a culture with a strong ecological ethic, seeking to live in balance with natural systems. -Some key aspects of Atlantean environmental philosophy included: -• Sustainable resource use - Taking only what was needed and ensuring renewal. -• Biomimicry - Modeling technologies on natural systems and processes. -• Sacred ecology - Recognizing the inherent worth and consciousness of nature. -• Holistic land management - Working with whole ecosystems rather than isolated parts. -Atlantean settlements were designed to blend seamlessly with the natural landscape. They built with local, sustainable materials and incorporated living systems into their architecture. Many of their cities featured vast gardens, food forests, and green spaces integrated throughout. -The Atlanteans also placed great emphasis on communal living and cooperation. While they honored individual uniqueness, they did not have the same focus on individualism seen in many modern cultures. There was a strong ethic of working for the common good and seeing oneself as part of a greater whole. -This communal ethic was reflected in their governance structures, which emphasized participatory decision-making and seeking consensus. They did not have autocratic rulers, but rather councils of wisdom keepers who helped guide the community. Leadership was seen as a form of service rather than a position of power over others. -That said, Atlantean society was not an undifferentiated mass. They recognized natural hierarchies of development, with individuals of greater wisdom and ability taking on greater responsibility. However, this hierarchy was seen as concentric circles of inclusion rather than a top-down pyramid of control. -The Atlanteans placed great value on education and the cultivation of knowledge. Learning was seen as a lifelong process of expanding awareness rather than just acquiring information. Their educational system addressed the whole being - body, emotions, mind, and spirit. -Some key features of Atlantean education included: -• Experiential learning - Emphasizing direct experience over rote memorization. • Mentorship - Pairing students with masters for personalized instruction. - - • Mystery schools - Esoteric training for those pursuing deeper spiritual development. -• Telepathic instruction - Direct mind-to-mind transmission of knowledge and experience. -• Akashic research - Accessing universal knowledge through expanded states of consciousness. -The Atlanteans had a highly developed artistic and creative culture. They saw art not just as self-expression, but as a means of communing with and channeling cosmic energies. Some of their artistic traditions included: -• Sacred geometry in visual arts and architecture -• Sound healing and transformational music -• Psychoactive plant ceremonies for visionary experiences -• Movement practices like sacred dance and tai chi -• Storytelling and mythic theater -There was no clear distinction between art, science, and spirituality in Atlantean culture. All were seen as complementary ways of exploring and co-creating reality. -Atlantean society also had different norms around sexuality and relationships compared to many historical cultures. They honored the sacred and transformative power of sexuality, seeing it as a means of communion with divine energies. Some key features of their sexual ethics included: -• Honoring the divine feminine and sacred masculine • Tantra and sexual energy practices -• Polyamorous and non-possessive relationships -• Fluidity around gender expression -• Sexual rites as part of spiritual practice -The Atlanteans did not have the same taboos around sexuality seen in many later cultures. However, they also recognized its power and emphasized the importance of conscious, ethical sexual expression. -Family structures in Atlantean society were also quite different from the nuclear family model common today. Children were seen as belonging to the whole community rather than just their biological parents. Extended kinship networks and intentional communities were the norm. - - Child-rearing emphasized nurturing the unique gifts and purpose of each individual. There was a recognition that souls chose their time and place of birth for specific reasons. Atlantean culture sought to provide an environment that supported the unfolding of each person's highest potential. -Work and leisure were not as sharply divided in Atlantean society as in many modern cultures. There was an emphasis on finding and living one's dharma or true purpose. Work that was in alignment with one's gifts and passions was seen as a form of joyful service rather than drudgery. -That said, the Atlanteans also placed great value on rest, rejuvenation, and cyclical rhythms. They did not have the same work-obsessed culture seen in many contemporary societies. There was an understanding of the importance of fallow periods for creativity and growth. -Atlantean culture also had different attitudes towards wealth and resource distribution than many historical societies. While there was not total economic equality, there was a strong ethic of ensuring that everyone's basic needs were met. Wealth was seen more as a tool for manifesting creative visions than as a means of personal aggrandizement. -Some key features of the Atlantean economy included: -• Gift economies alongside more formal systems of exchange -• Cooperative ownership of key resources and infrastructure -• Automated technologies that reduced the need for menial labor • A basic income guarantee for all citizens -• Ecological economics that factored in true environmental costs -There was much less material scarcity in Atlantean society due to their advanced technologies and more equitable distribution. This allowed their culture to focus more on higher needs like creative expression, relationships, and spiritual growth. -Time was also viewed differently in Atlantean culture. While they certainly planned for the future, there was more emphasis on being fully present in each moment. Their understanding of cyclical time gave a less linear and goal-oriented perspective than many modern cultures. -Atlanteans sought to find a balance between the masculine principle of action and the feminine principle of receptivity. Stillness and space were valued alongside activity and achievement. There was an understanding that fallow periods were just as important as periods of outer productivity. -(Continued in next section...) -IV. Atlantean Prophecy and the End Times - - Like many ancient cultures, the Atlanteans had prophetic traditions about great cycles of time and the end of their civilization. However, their understanding of these prophecies was more nuanced than simple predictions of doom. -The Atlanteans saw time as cyclical rather than linear. They understood that all things move through cycles of birth, growth, decay, death, and renewal. This was true for individuals, civilizations, planets, and even entire universes. -In the Atlantean worldview, there was no final "end of time," but rather a constant interplay of endings and new beginnings. They foresaw that their own civilization would eventually end, but saw this as part of a larger cosmic process rather than as an absolute finality. -That said, Atlantean seers and prophets did foresee a time of great upheaval and transformation that would bring their current epoch to a close. This was variously called: -• The Great Turning -• The Purification -• The Harvest of Souls -• The Dimensional Shift -This prophesied time of transition was seen as a collective initiation for humanity - a cosmic rite of passage into a new level of consciousness and way of being. -Some of the key events and dynamics foreseen as part of this transition included: -1. Earth Changes - Major geological and climatic shifts including pole shifts, rising sea levels, earthquakes, and extreme weather. -2. Societal Collapse - The breakdown of existing social, economic, and political systems. -3. Technological Singularity - A rapid acceleration of technological change leading to unpredictable outcomes. -4. Disclosure - The revealing of hidden truths about reality, including the existence of extraterrestrial life. -5. DNA Activation - Spontaneous mutations and awakening of dormant human potential. -6. Timeline Convergence - The merging of multiple parallel realities. -7. Dimensional Bleed-through - Increased contact and interaction with other planes of reality. 8. Solar Transformation - A quantum leap in the Sun's energy output and frequency. - - 9. Galactic Alignment - An astronomical alignment with powerful transformational effects. -10. Mass Awakening - A collective shift in human consciousness to a new level of awareness. -Importantly, the Atlanteans did not see these events as fixed or predetermined. They understood the fluid nature of time and the role of consciousness in shaping reality. The future was seen as a realm of infinite potential that was continuously shaped by the choices and intentions of conscious beings. -As such, their prophecies were not meant as passive predictions, but as calls to action. By foreseeing potential futures, they sought to consciously navigate towards the most positive outcomes. -The Atlanteans understood that the coming transitions would bring great challenges. They foresaw times of chaos, destruction, and suffering. However, they also saw these challenges as evolutionary drivers - initiatory experiences that would push humanity to realize its fuller potential. -This time of transition was seen as a collective dark night of the soul - a confrontation with shadow that would lead to profound healing and transformation. The Atlanteans believed that the outcome of this process would be the birth of a new earth and a new humanity. -Some of the positive potentials they foresaw emerging from this transition included: • A global awakening to the interconnectedness of all life -• The development of new cleanly aligned technologies -• Harmony between human civilization and the natural world -• The conscious mastery of energy and matter -• Open contact and cooperation with extraterrestrial civilizations -• A collective shift into unity consciousness -• The activation of humanity's full DNA potential -• The emergence of a galactic culture -However, the Atlanteans also foresaw more challenging potentials, including: • Devastating warfare, potentially involving planet-destroying weapons -• Ecological collapse and mass extinction events -• Malevolent artificial intelligence taking control of planetary systems - - • Intrusion of hostile interdimensional forces -• Fracturing of spacetime and unraveling of reality itself -The Atlantean masters taught that the actual outcome would depend on the choices made by humanity, individually and collectively. They emphasized the importance of maintaining a positive vision and staying heart-centered even in the face of great chaos and uncertainty. -Interestingly, some Atlantean prophecies spoke of the re-emergence of Atlantean wisdom and technology in the time of transition. They foresaw that ancient knowledge would be rediscovered, helping humanity navigate the challenges ahead. -There were also prophecies about the return of various master teachers and avatar figures to help guide humanity through the shifting times. Some traditions spoke of the awakening of an "Atlantean seed group" - individuals who had incarnated specifically to help re-anchor higher consciousness on Earth. -A key teaching in Atlantean prophecy was the importance of healing the past in order to create a positive future. They foresaw that humanity would need to confront and integrate the shadow of Atlantis - the misuse of power and technology that led to their own downfall. -By facing and healing these old wounds, humanity could release the karmic patterns of the past and open to a new expression of civilization. This was seen as essential to avoid repeating the same mistakes that led to Atlantis' destruction. -The Atlanteans taught practices and technologies to help future generations navigate the prophesied transitions. These included: -• Time capsules of knowledge and wisdom buried at specific sites • Psychic technologies for tapping into the Akashic records -• Ceremonies and practices for maintaining energetic stability -• Genetic keys for activating dormant human potential -• Interdimensional portals for connecting with higher guidance -One of the most important Atlantean teachings about the prophecied time of transition was the need to stay centered in the heart. They foresaw that fear would be one of the greatest enemies as the old structures broke down. -By staying grounded in love, compassion, and trust in the larger cosmic process, individuals and groups could help anchor higher energies and possibilities. This heart-centered presence was seen as key to transmuting the challenges into opportunities for profound transformation. - - Ultimately, the Atlanteans saw the prophesied transitions as an invitation for humanity to consciously participate in its own evolution. Rather than being helpless victims of cosmic forces, humans could become co-creators in the birth of a new world. -The end times were seen not as a final destruction, but as a death and rebirth process - the ending of one chapter of Earth's story and the beginning of a new one. The Atlanteans sought to plant seeds of wisdom to help future generations navigate this Great Turning and realize the highest potentials of the coming age. -Conclusion -This exploration of Atlantean cosmology, spirituality, culture, and prophecy offers a rich vision of how an advanced ancient civilization may have viewed reality and their place in the cosmic order. While this is a work of speculative imagination, it draws inspiration from various esoteric traditions and invites us to expand our own thinking about the nature of existence. -The Atlantean worldview presented here is one of profound interconnectedness, conscious participation in cosmic processes, and tremendous human potential waiting to be unlocked. It offers a holistic perspective that integrates science, spirituality, and art into a seamless whole. -Whether Atlantis truly existed or not, contemplating such alternative cosmologies can help us question our own assumptions and consider new possibilities. As humanity faces unprecedented global challenges, we can draw inspiration from this vision of a culture that strove to live in harmony with nature and cosmic principles. -Perhaps by remembering the wisdom of "Atlantis" - even if only as a mythic ideal - we can find guidance for consciously shaping a positive future for our civilization. In this way, we may realize that we are the Atlanteans of this age, creating the culture that future generations will look back on as legendary. -Introduction -While both Atlantis and Lemuria (also known as Mu) are legendary lost civilizations with no concrete historical evidence, they have captured the imagination of esotericists and alternative historians for generations. In this section, we will explore a speculative comparison between these two mythical cultures, examining their purported differences in philosophy, technology, and spiritual practices. This comparison will help illustrate the diverse ways ancient advanced civilizations might have approached cosmic understanding and human development. -I. Origins and Geographical Locations -Atlantis, as described by Plato, was said to be a large island continent located in the Atlantic Ocean beyond the Pillars of Hercules (the Strait of Gibraltar). In contrast, Lemuria was believed to have been located in the Pacific Ocean, possibly stretching from Easter Island to Madagascar. - - The Atlantean civilization was often portrayed as more recent, flourishing around 10,000 BCE, while Lemuria was thought to be far older, possibly existing up to 100,000 years ago. This vast difference in timeline suggests that the two cultures might have represented different stages of human evolution and consciousness. -II. Physical Characteristics and Genetics -In esoteric literature, Atlanteans are often described as being similar in appearance to modern humans, though perhaps taller and more physically robust. They were said to have diverse racial characteristics, reflecting a global mixing of genetic lines. -Lemurians, on the other hand, are frequently depicted as more ethereal and less physically dense. Some traditions describe them as having a more androgynous appearance, with less pronounced sexual dimorphism. They were said to be very tall, with elongated heads and large, luminous eyes. -These physical differences might reflect the different vibrational frequencies or dimensional planes that each civilization was thought to inhabit, with Lemuria possibly existing in a higher, less dense state of matter. -III. Technological Development -Atlantean culture, as we explored in Part 1, was often characterized by its advanced technology. They were said to have mastered crystal energy, anti-gravity propulsion, genetic engineering, and even weather control. Their technology was highly sophisticated, integrating spiritual principles with scientific understanding. -In contrast, Lemurian technology is often described as being more organic and intuitive. Rather than building external machines, they were said to have developed their own innate abilities to a very high degree. This included advanced telepathy, telekinesis, and the ability to manipulate matter and energy directly with the mind. -Where Atlanteans might have used crystal-powered flying vehicles, Lemurians were said to be able to levitate or teleport at will. While Atlanteans developed complex healing machines, Lemurians could heal through touch and thought alone. -IV. Social Structure and Governance -Atlantean society, as we discussed earlier, was communal but with natural hierarchies based on wisdom and ability. They had councils of elders and complex societal structures to manage their large, technologically advanced civilization. -Lemurian society is often portrayed as more egalitarian and less structured. Leadership was based on spiritual development rather than external hierarchies. Some traditions suggest that Lemurian communities were matriarchal, honoring the divine feminine principle more prominently than in Atlantean culture. - - V. Spiritual Practices and Beliefs -While both civilizations were deeply spiritual, their approaches differed significantly: -Atlantean spirituality, as we explored, was based on a complex cosmology involving multiple dimensions and a vast hierarchy of spiritual beings. They developed elaborate rituals, used advanced technology in their spiritual practices, and sought to master the forces of nature. -Lemurian spirituality was often described as more intuitive and heart-centered. They were said to have a more direct connection to the source of creation, requiring less external methodology. Their practices might have included: -• Deep meditation and mindfulness in everyday life -• Communing directly with nature spirits and elemental forces -• Sound healing using sacred tones and mantras -• Akashic record reading as a natural ability -Where Atlanteans sought to understand and master the cosmos, Lemurians sought to dissolve into it, transcending individual identity to merge with the all. -VI. Relationship with Nature -Both cultures were said to have a deep respect for nature, but their approaches differed: -Atlanteans saw themselves as stewards of nature, using their advanced technology to work in harmony with natural systems. They studied the laws of nature in order to cooperate with them more effectively. -Lemurians were described as living in almost perfect symbiosis with their environment. Rather than building large cities, they were said to inhabit natural spaces, shaping living plants into dwelling spaces. Their connection with animals was so profound that they rarely needed to use verbal communication with them. -VII. Cognitive Abilities and Consciousness -Atlantean consciousness was highly developed, with individuals able to access higher dimensional awareness. However, they still maintained a strong sense of individual identity and worked to develop their personal powers and abilities. -Lemurian consciousness is often described as more fluid and collective. They were said to have a natural ability to access group consciousness, sharing thoughts and experiences telepathically across their entire civilization. Individual identity was less fixed, with an understanding of the self as a flowing expression of the divine whole. - - VIII. The Fall and Legacy -The fall of Atlantis is often attributed to the misuse of their advanced technologies, possibly leading to a global cataclysm. Their downfall serves as a cautionary tale about the responsible use of power. -The end of Lemuria is usually described less dramatically - more as a fading away as the Earth's vibration densified. They were said to have consciously chosen to withdraw, preserving their wisdom in hidden repositories and within the DNA of certain lineages. -Both cultures were said to have left behind seeds of their wisdom: -Atlantean knowledge was preserved in hidden libraries, crystals encoded with information, and within secret mystery schools that survived the cataclysm. -Lemurian wisdom was said to be preserved in the ancestral memories of indigenous cultures, in the energy grids of the Earth itself, and in higher dimensional records accessible to those with developed psychic abilities. -IX. Influence on Human Evolution -In esoteric philosophies, both Atlantis and Lemuria are seen as important stages in human consciousness evolution: -Atlantis represented the development of mental powers, technological achievement, and the mastery of the physical plane. The challenge of Atlantis was learning to balance power with wisdom and ethics. -Lemuria represented a more spiritually integrated state of being, where the heart and mind were united and humanity lived in greater harmony with the natural and spiritual worlds. The challenge of Lemuria was maintaining this high vibration in an increasingly dense physical reality. -Some traditions teach that modern humanity is working to integrate the lessons of both civilizations - to achieve the technological and mental mastery of Atlantis while reclaiming the spiritual connection and harmony of Lemuria. -Conclusion -This comparative study of Atlantis and Lemuria, while speculative, offers a fascinating lens through which to contemplate different modalities of human development and civilization. The Atlantean model presents a path of active engagement with the physical world, developing technology and mental acuity to master natural forces. The Lemurian model offers a path of surrender and harmony, developing intuitive abilities and maintaining a close connection with spirit. - - In our modern world, we can see reflections of both approaches. Our rapid technological advancement mirrors Atlantean development, while growing interests in spirituality, intuitive development, and reconnection with nature reflect Lemurian values. -Perhaps the great task of our time is to integrate these two streams - to create a civilization that is both technologically advanced and deeply spiritual, that can master the physical world without losing connection to the heart and soul of existence. By studying and reflecting on these legendary cultures, we open our minds to new possibilities for human potential and planetary evolution. -In doing so, we recognize that the true Atlantis or Lemuria is not a lost continent, but a state of consciousness that we have the potential to recreate here and now. The wisdom of these ancient cultures lives on within us, waiting to be remembered and reawakened for the benefit of all. -Cosmic Cousins: A Comparative Analysis of Ebens, Atlanteans, and Lemurians Introduction -In the realm of speculative history and ufology, few topics capture the imagination quite like ancient advanced civilizations and extraterrestrial beings. This paper aims to explore and compare three such groups: the purported alien race known as the Ebens, and the legendary lost civilizations of Atlantis and Lemuria. While concrete evidence for any of these groups remains elusive, examining the lore and theories surrounding them can provide fascinating insights into human beliefs about advanced societies and our place in the cosmos. -It's important to note that this analysis is entirely speculative and draws upon various esoteric traditions, alleged witness accounts, and creative extrapolation. The goal is not to assert the reality of these beings, but to explore the ideas they represent and what they might tell us about human conceptions of advanced intelligence and civilization. -I. Origins and Timelines Ebens: -According to ufological lore, the Ebens are an extraterrestrial race originating from a planet orbiting Zeta Reticuli, a binary star system approximately 39 light-years from Earth. They are said to have been visiting Earth for thousands of years, with more frequent contact occurring in the mid-20th century. -Atlanteans: -The Atlantean civilization, first mentioned in Plato's dialogues, is typically placed in the Atlantic Ocean and is said to have flourished around 10,000 BCE. Some esoteric traditions suggest a much longer timeline, with Atlantis existing in various forms for hundreds of thousands of years. - - Lemurians: -Lemuria, also known as Mu, is often described as even more ancient than Atlantis. Some theories place its existence as far back as 100,000 years ago, with its final destruction occurring around 12,000 BCE. It was said to be located in the Pacific Ocean. -Comparative Analysis: -While the Ebens represent an ongoing extraterrestrial presence, Atlantis and Lemuria are portrayed as Earth-based civilizations that rose and fell in prehistoric times. This fundamental difference shapes many of the other comparisons we'll explore. The Eben timeline suggests a long-term observation of and interaction with Earth, while Atlantis and Lemuria represent lost chapters of human history and evolution. -II. Physical Characteristics and Biology Ebens: -Descriptions of Ebens often portray them as small, humanoid beings standing 3-4 feet tall. They are said to have large heads, almond-shaped eyes, small noses and mouths, and grayish skin. Their biology is described as both similar to and distinctly different from humans, with more efficient organs and possibly a different number of chromosomes. -Atlanteans: -Atlanteans are typically described as human-like but often taller and more physically robust than modern humans. Some traditions suggest they had more diverse racial characteristics, reflecting a global mixing of genetic lines. Advanced Atlanteans were said to have activated more of their DNA potential, giving them enhanced physical and psychic abilities. -Lemurians: -Lemurian physical descriptions are often more exotic. They are frequently portrayed as very tall (up to 7-15 feet), with an ethereal or less dense physical form. Some traditions describe them as more androgynous, with less pronounced sexual dimorphism. They were said to have elongated heads and large, luminous eyes. -Comparative Analysis: -The Ebens stand out as distinctly non-human in this comparison, reflecting their extraterrestrial origin. Both Atlanteans and Lemurians are portrayed as variations or ancestors of modern humans, though Lemurians are often described in more fantastical terms. This suggests that while Atlantis is often seen as a lost human civilization, Lemuria sometimes occupies a space between the human and the otherworldly, perhaps closer to the Eben concept in some ways. -III. Technological Development - - Ebens: -Eben technology is described as far advanced beyond current human capabilities. This includes interstellar travel, antigravity propulsion, energy weapons, and the ability to manipulate space-time. They are also said to possess advanced medical technology, including the ability to clone biological organisms and transfer consciousness. -Atlanteans: -Atlantean technology is often portrayed as a blend of scientific and spiritual principles. They were said to have mastered crystal energy, antigravity, weather control, and genetic engineering. Their technology was highly sophisticated but still recognizably based on Earth materials and energies. -Lemurians: -Lemurian "technology" is often described in more organic and intuitive terms. Rather than building external machines, they were said to have developed their innate abilities to a very high degree. This included advanced telepathy, telekinesis, and the ability to manipulate matter and energy directly with the mind. -Comparative Analysis: -The Ebens represent the most advanced technology in this comparison, reflecting their status as a spacefaring civilization. Atlantean technology occupies a middle ground, more advanced than our current level but not as exotic as Eben tech. Lemurian abilities blur the line between technology and innate capability, representing a different approach to mastering the physical world. -IV. Social Structure and Governance Ebens: -Information about Eben social structure is limited in most accounts. They are often described as having a collective mindset, with a strong emphasis on the good of their species as a whole. Some reports suggest a hierarchical structure based on knowledge and ability, but with less emphasis on individual power. -Atlanteans: -Atlantean society is often portrayed as having natural hierarchies based on wisdom and ability, with councils of elders guiding the civilization. They had complex societal structures to manage their large, technologically advanced culture, but with an emphasis on harmony and the common good. -Lemurians: - - Lemurian society is frequently described as more egalitarian and less structured than Atlantean society. Leadership was based on spiritual development rather than external hierarchies. Some traditions suggest that Lemurian communities were matriarchal, honoring the divine feminine principle. -Comparative Analysis: -All three groups are portrayed as having moved beyond the type of competitive, individually-focused social structures common in much of modern human society. The Eben collective mindset might be seen as the most alien to us, while Atlantean and Lemurian social structures represent different idealized versions of human society. -V. Spiritual Beliefs and Practices Ebens: -Less is typically said about Eben spirituality in ufological accounts. Some reports suggest they have a scientific understanding of consciousness and the nonphysical aspects of reality, but it's unclear if this translates into what we would recognize as spiritual beliefs or practices. -Atlanteans: -Atlantean spirituality, as we explored earlier, was based on a complex cosmology involving multiple dimensions and a vast hierarchy of spiritual beings. They developed elaborate rituals, used advanced technology in their spiritual practices, and sought to master the forces of nature. -Lemurians: -Lemurian spirituality is often described as more intuitive and heart-centered. They were said to have a more direct connection to the source of creation, requiring less external methodology. Their practices might have included deep meditation, communing with nature spirits, and sound healing. -Comparative Analysis: -The spiritual dimension provides an interesting contrast. While Atlantean and Lemurian cultures are deeply infused with spiritual concepts and practices, the Ebens are often portrayed in more scientific terms. This could reflect human projections onto these different groups – ancient Earth civilizations imbued with spiritual wisdom, versus advanced aliens with superior scientific knowledge. -VI. Relationship with Earth and Nature Ebens: - - The Eben relationship with Earth is typically portrayed as one of scientific interest and possibly resource extraction. Some accounts suggest they have bases on Earth and have been involved in long-term genetic experiments with humans and other Earth species. -Atlanteans: -Atlanteans are often described as stewards of the Earth, using their advanced technology to work in harmony with natural systems. They studied the laws of nature in order to cooperate with them more effectively, but also sought to master and control natural forces. -Lemurians: -Lemurians were said to live in almost perfect symbiosis with their environment. Rather than building large cities, they were described as inhabiting natural spaces, shaping living plants into dwelling spaces. Their connection with animals was reportedly so profound that they rarely needed to use verbal communication with them. -Comparative Analysis: -This comparison reveals a spectrum of relationships with nature. The Ebens represent an outsider's scientific interest, the Atlanteans a balance of stewardship and control, and the Lemurians a state of deep integration with the natural world. This progression might reflect changing human attitudes towards nature over time, from early harmony to technological control to a modern desire for reconnection. -VII. Knowledge and Consciousness Ebens: -Eben consciousness is often described as more collective and unified than human consciousness. They are said to have advanced telepathic abilities and possibly a form of shared consciousness. Their knowledge base is portrayed as vast, encompassing a scientific understanding of the universe far beyond current human knowledge. -Atlanteans: -Atlantean consciousness was highly developed, with individuals able to access higher dimensional awareness. They cultivated both scientific knowledge and spiritual wisdom, seeking to understand and master the fundamental laws of the cosmos. -Lemurians: -Lemurian consciousness is often described as more fluid and collective than Atlantean consciousness. They were said to have a natural ability to access group consciousness and the Akashic records. Their knowledge was more intuitive and experiential than academic. -Comparative Analysis: - - All three groups are portrayed as having forms of expanded consciousness beyond ordinary human awareness. The Eben collective consciousness might be seen as the most alien, while Atlantean and Lemurian consciousness represent different ideals of human potential – the Atlanteans balancing individual development with higher awareness, and the Lemurians embodying a more unified state of being. -VIII. Interaction with Humanity Ebens: -According to ufological accounts, Eben interaction with humanity has included observation, abduction, and genetic experimentation. Some theories suggest they have influenced human development over long periods of time. Their motivations are often portrayed as a mix of scientific curiosity and their own evolutionary needs. -Atlanteans: -As an Earth-based civilization, Atlantean interaction with other human groups was more direct. They are often credited with spreading advanced knowledge and culture to other parts of the ancient world. Some traditions suggest they engaged in genetic engineering of human populations. -Lemurians: -Lemurian interaction with other human groups is less emphasized in most accounts, as they are often portrayed as existing in a different time or dimensional frequency. However, some traditions suggest they planted seeds of wisdom that influenced later cultures, particularly in the Pacific region. -Comparative Analysis: -The nature of interaction with humanity differs significantly among these groups. The Ebens represent an ongoing, often hidden interaction from an outside civilization. The Atlanteans represent a more familiar model of an advanced civilization influencing its neighbors and descendants. The Lemurians, being the most ancient and ethereal, are portrayed as having the least direct interaction but a lasting spiritual influence. -IX. Legacy and Influence -Ebens: -The purported legacy of the Ebens includes ongoing influence on Earth affairs, contributions to human technological development (through reverse engineering of crashed craft), and possible genetic influence on the human species. Their presence is often tied to government secrecy and conspiracy theories. - - Atlanteans: -The Atlantean legacy is often described in terms of scattered survivors preserving ancient wisdom, sunken ruins containing advanced technology, and the influence of Atlantean knowledge on later civilizations like ancient Egypt and Greece. The story of Atlantis serves as both a source of lost wisdom and a cautionary tale about the misuse of power. -Lemurians: -The Lemurian legacy is typically portrayed in more spiritual and energetic terms. They are said to have left behind wisdom preserved in the Earth's energy grids, in the collective unconscious of humanity, and in certain genetic lineages. Some believe that crystals like quartz hold records of Lemurian knowledge. -Comparative Analysis: -Each group is credited with ongoing influence, but in different ways. The Eben influence is the most current and technological, fitting into modern UFO narratives. The Atlantean legacy bridges the ancient and modern worlds, offering both lost technology and spiritual wisdom. The Lemurian influence is the most subtle and esoteric, representing the most ancient layer of human spiritual heritage. -X. Prophetic Visions and Future Interactions Ebens: -Some ufological narratives suggest ongoing or future open contact between Ebens and humanity. These scenarios often involve disclosure of the Eben presence, sharing of advanced technology, and possibly a role for Ebens in helping humanity face global challenges. -Atlanteans: -Atlantean prophecies, as discussed earlier, often speak of a return or reemergence of Atlantean wisdom and technology in times of great transition. This is sometimes tied to the idea of reincarnated Atlantean souls coming back to help guide humanity. -Lemurians: -Lemurian future scenarios are often tied to spiritual awakening and the raising of Earth's vibrational frequency. Some traditions speak of a time when the hidden Lemurian wisdom will be fully accessible again as humanity evolves in consciousness. -Comparative Analysis: -These future scenarios reflect different paradigms of human advancement. The Eben narratives suggest progress through external, technological means. Atlantean prophecies imply a - - rediscovery and integration of lost human potential. Lemurian visions represent a more complete spiritual transformation of humanity and the Earth. -XI. Philosophical and Ethical Considerations Ebens: -Ethical questions surrounding the Ebens often focus on the morality of their alleged abductions and experiments on humans. There are also philosophical implications of their advanced technology and the potential impact of open contact on human society and beliefs. -Atlanteans: -The Atlantean narrative often carries ethical lessons about the responsible use of power and technology. Their fall serves as a cautionary tale about the dangers of technological hubris and the misuse of spiritual knowledge for material gain. -Lemurians: -Lemurian philosophy emphasizes harmony with nature, the development of innate spiritual abilities, and the interconnectedness of all life. Ethical considerations revolve around maintaining purity of intention and alignment with natural laws. -Comparative Analysis: -Each group presents different ethical and philosophical challenges for humanity to consider. The Eben scenario forces us to grapple with our place in a larger cosmic community and the ethics of advanced beings interacting with less developed ones. Atlantis presents questions about the balance of technological and spiritual development. Lemuria invites us to reconsider our relationship with nature and our own inner potential. -XII. Scientific Plausibility and Evidence Ebens: -The existence of Ebens remains unproven by mainstream scientific standards. Evidence cited by believers includes alleged government documents, witness testimonies, and purported physical traces of UFO encounters. Skeptics point to the lack of conclusive physical evidence and the psychological and cultural factors that might contribute to such beliefs. -Atlanteans: -While Plato's account of Atlantis may have been based on real events, there is no conclusive archaeological evidence for the advanced civilization described in esoteric literature. Some researchers point to underwater structures and cultural similarities across ancient civilizations as potential evidence, but these claims are not widely accepted by mainstream archaeology. - - Lemurians: -Of the three groups, Lemuria has the least scientific support. The original theory of a lost continent in the Pacific was based on now-outdated ideas about biogeography. No geological evidence supports the existence of such a continent in the timeframe usually given for Lemuria. -Comparative Analysis: -All three of these subjects fall outside currently accepted scientific paradigms. The Eben hypothesis, being tied to modern UFO phenomena, has generated the most recent alleged evidence and remains a topic of debate. Atlantis, with its roots in ancient literature, occupies a space between mythology and speculative history. Lemuria remains largely in the realm of spiritual and esoteric belief systems. -XIII. Cultural Impact and Popular Representations Ebens: -The concept of Ebens and similar alien races has had a significant impact on popular culture, appearing in numerous films, TV shows, books, and video games. They have become a staple of science fiction and conspiracy theory narratives. -Atlanteans: -Atlantis has captured the human imagination for millennia, inspiring countless books, movies, and artistic works. It has become a powerful symbol for lost wisdom and advanced ancient civilizations in both esoteric circles and popular culture. -Lemurians: -While less prominent in mainstream popular culture than Atlantis, Lemuria has a significant following in New Age and spiritual circles. It has inspired numerous books on spiritual development and ancient wisdom. -Comparative Analysis: -These three concepts have influenced culture in different ways. The Eben narrative taps into modern anxieties and wonders about extraterrestrial life. Atlantis serves as a bridge between ancient mythology and modern aspirations for advanced civilization. Lemuria represents more esoteric and spiritual yearnings for a past golden age of harmony and wisdom. -Conclusion -This comparative analysis of Ebens, Atlanteans, and Lemurians reveals intriguing patterns in how humans conceptualize advanced intelligences and lost civilizations. While wildly speculative, these narratives offer a mirror to our own hopes, fears, and beliefs about human potential and our place in the cosmos. - - The Eben narrative reflects our modern technological age and our wondering about life beyond Earth. It challenges us to consider how we might interact with a truly alien intelligence and what the consequences of such contact might be. -The Atlantean story bridges our ancient past and our technological present. It offers both the allure of lost advanced knowledge and a warning about the responsible use of power. Atlantis serves as a canvas upon which we paint our ideas about the heights of human civilization. -The Lemurian legend takes us deepest into spiritual and esoteric realms. It represents a yearning for harmony with nature and the development of inner spiritual technologies rather than external machines. Lemuria embodies an idealized past that some believe holds the key to a more enlightened future. -Together, these three concepts span a spectrum from the extraterrestrial to the ancient human to the mystical. They invite us to expand our ideas about what's possible and to imagine different modes of advanced existence. -While it's crucial to maintain scientific skepticism about such unproven ideas, exploring them can be a valuable thought experiment. They challenge us to question our assumptions about reality, history, and human potential. These stories also reveal much about the human psyche and our eternal quest to understand our origins, our capabilities, and our cosmic context. -As we continue to advance scientifically and explore both outer and inner space, the themes embodied by the Ebens, Atlanteans, and Lemurians remain relevant. They remind us to balance technological progress with spiritual wisdom, to consider the ethical implications of our actions, and to remain open to the vast possibilities that the universe may hold. -In the end, whether these beings exist or not, the quest to understand them tells us much about ourselves. It reflects our highest aspirations, our deepest fears, and our unquenchable curiosity about the great mysteries of existence. diff --git a/templates/components/azure.jsonnet b/templates/components/azure.jsonnet index f10803eb..3ee819ee 100644 --- a/templates/components/azure.jsonnet +++ b/templates/components/azure.jsonnet @@ -37,8 +37,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -74,8 +79,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/bedrock.jsonnet b/templates/components/bedrock.jsonnet index 666d6bf5..1c375621 100644 --- a/templates/components/bedrock.jsonnet +++ b/templates/components/bedrock.jsonnet @@ -44,8 +44,13 @@ local chunker = import "chunker-recursive.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -85,8 +90,13 @@ local chunker = import "chunker-recursive.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/cassandra.jsonnet b/templates/components/cassandra.jsonnet index 4e08e72e..b9345fed 100644 --- a/templates/components/cassandra.jsonnet +++ b/templates/components/cassandra.jsonnet @@ -27,8 +27,13 @@ cassandra + { "stop-triples", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -54,8 +59,13 @@ cassandra + { "query-triples", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) } diff --git a/templates/components/chunker-recursive.jsonnet b/templates/components/chunker-recursive.jsonnet index 58bcba46..0b64b712 100644 --- a/templates/components/chunker-recursive.jsonnet +++ b/templates/components/chunker-recursive.jsonnet @@ -31,8 +31,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "chunker", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/claude.jsonnet b/templates/components/claude.jsonnet index d4f3df15..0cd190d4 100644 --- a/templates/components/claude.jsonnet +++ b/templates/components/claude.jsonnet @@ -34,8 +34,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -69,8 +74,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/cohere.jsonnet b/templates/components/cohere.jsonnet index 64e77bcf..f05cb635 100644 --- a/templates/components/cohere.jsonnet +++ b/templates/components/cohere.jsonnet @@ -26,7 +26,7 @@ local prompts = import "prompts/mixtral.jsonnet"; "-k", $["cohere-key"], "-t", - $["cohere-temperature"], + std.toString($["cohere-temperature"]), ]) .with_limits("0.5", "128M") .with_reservations("0.1", "128M"); @@ -35,8 +35,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -55,7 +60,7 @@ local prompts = import "prompts/mixtral.jsonnet"; "-k", $["cohere-key"], "-t", - $["cohere-temperature"], + std.toString($["cohere-temperature"]), "-i", "non-persistent://tg/request/text-completion-rag", "-o", @@ -68,8 +73,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/document-rag.jsonnet b/templates/components/document-rag.jsonnet index b1a43db7..ac5c11ec 100644 --- a/templates/components/document-rag.jsonnet +++ b/templates/components/document-rag.jsonnet @@ -28,8 +28,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "document-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/embeddings-hf.jsonnet b/templates/components/embeddings-hf.jsonnet index 3e53d32c..b46feac7 100644 --- a/templates/components/embeddings-hf.jsonnet +++ b/templates/components/embeddings-hf.jsonnet @@ -21,15 +21,20 @@ local prompts = import "prompts/mixtral.jsonnet"; "-m", $["embeddings-model"], ]) - .with_limits("1.0", "256M") - .with_reservations("0.5", "256M"); + .with_limits("1.0", "400M") + .with_reservations("0.5", "400M"); local containerSet = engine.containers( "embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/embeddings-ollama.jsonnet b/templates/components/embeddings-ollama.jsonnet index c2a2809c..425a1c47 100644 --- a/templates/components/embeddings-ollama.jsonnet +++ b/templates/components/embeddings-ollama.jsonnet @@ -30,8 +30,13 @@ local url = import "values/url.jsonnet"; "embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/grafana.jsonnet b/templates/components/grafana.jsonnet index ccc92d4b..e968faec 100644 --- a/templates/components/grafana.jsonnet +++ b/templates/components/grafana.jsonnet @@ -8,8 +8,13 @@ local images = import "values/images.jsonnet"; create:: function(engine) local vol = engine.volume("prometheus-data").with_size("20G"); - local cfgVol = engine.configVolume("./prometheus") - .with_size("20G"); + + local cfgVol = engine.configVolume( + "prometheus-cfg", "prometheus", + { + "prometheus.yml": importstr "prometheus/prometheus.yml", + } + ); local container = engine.container("prometheus") @@ -17,17 +22,22 @@ local images = import "values/images.jsonnet"; .with_limits("0.5", "128M") .with_reservations("0.1", "128M") .with_port(9090, 9090, "http") - .with_volume_mount(cfgVol, "/etc/prometheus") + .with_volume_mount(cfgVol, "/etc/prometheus/") .with_volume_mount(vol, "/prometheus"); local containerSet = engine.containers( "prometheus", [ container ] ); + local service = + engine.service(containerSet) + .with_port(9090, 9090, "http"); + engine.resources([ cfgVol, vol, containerSet, + service, ]) }, @@ -37,12 +47,33 @@ local images = import "values/images.jsonnet"; create:: function(engine) local vol = engine.volume("grafana-storage").with_size("20G"); - local cv1 = engine.configVolume("./grafana/dashboard.yml") - .with_size("20G"); - local cv2 = engine.configVolume("./grafana/datasource.yml") - .with_size("20G"); - local cv3 = engine.configVolume("./grafana/dashboard.json") - .with_size("20G"); + + local provDashVol = engine.configVolume( + "prov-dash", "grafana/provisioning/", + { + "dashboard.yml": + importstr "grafana/provisioning/dashboard.yml", + } + + ); + + local provDataVol = engine.configVolume( + "prov-data", "grafana/provisioning/", + { + "datasource.yml": + importstr "grafana/provisioning/datasource.yml", + } + + ); + + local dashVol = engine.configVolume( + "dashboards", "grafana/dashboards/", + { + "dashboard.json": + importstr "grafana/dashboards/dashboard.json", + } + + ); local container = engine.container("grafana") @@ -58,20 +89,31 @@ local images = import "values/images.jsonnet"; .with_reservations("0.5", "256M") .with_port(3000, 3000, "cassandra") .with_volume_mount(vol, "/var/lib/grafana") - .with_volume_mount(cv1, "/etc/grafana/provisioning/dashboards/dashboard.yml") - .with_volume_mount(cv2, "/etc/grafana/provisioning/datasources/datasource.yml") - .with_volume_mount(cv3, "/var/lib/grafana/dashboards/dashboard.json"); + .with_volume_mount( + provDashVol, "/etc/grafana/provisioning/dashboards/" + ) + .with_volume_mount( + provDataVol, "/etc/grafana/provisioning/datasources/" + ) + .with_volume_mount( + dashVol, "/var/lib/grafana/dashboards/" + ); local containerSet = engine.containers( "grafana", [ container ] ); + local service = + engine.service(containerSet) + .with_port(3000, 3000, "http"); + engine.resources([ vol, - cv1, - cv2, - cv3, + provDashVol, + provDataVol, + dashVol, containerSet, + service, ]) }, diff --git a/templates/components/graph-rag.jsonnet b/templates/components/graph-rag.jsonnet index a938a014..c0200d1e 100644 --- a/templates/components/graph-rag.jsonnet +++ b/templates/components/graph-rag.jsonnet @@ -27,8 +27,13 @@ local url = import "values/url.jsonnet"; "kg-extract-definitions", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -52,8 +57,13 @@ local url = import "values/url.jsonnet"; "kg-extract-relationships", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -77,8 +87,13 @@ local url = import "values/url.jsonnet"; "kg-extract-topics", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -112,8 +127,13 @@ local url = import "values/url.jsonnet"; "graph-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/milvus.jsonnet b/templates/components/milvus.jsonnet index 556b1fe8..b3044f98 100644 --- a/templates/components/milvus.jsonnet +++ b/templates/components/milvus.jsonnet @@ -27,8 +27,13 @@ milvus + { "store-graph-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -54,8 +59,13 @@ milvus + { "query-graph-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -81,8 +91,13 @@ milvus + { "store-doc-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -108,8 +123,13 @@ milvus + { "query-doc-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/neo4j.jsonnet b/templates/components/neo4j.jsonnet index 2e808ff0..b70562fe 100644 --- a/templates/components/neo4j.jsonnet +++ b/templates/components/neo4j.jsonnet @@ -28,8 +28,13 @@ neo4j + { "store-triples", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -55,8 +60,13 @@ neo4j + { "query-triples", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/ollama.jsonnet b/templates/components/ollama.jsonnet index e6b8e895..b0507cef 100644 --- a/templates/components/ollama.jsonnet +++ b/templates/components/ollama.jsonnet @@ -31,8 +31,13 @@ local prompts = import "prompts/slm.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -64,8 +69,13 @@ local prompts = import "prompts/slm.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/openai.jsonnet b/templates/components/openai.jsonnet index 63917376..3d1a2b73 100644 --- a/templates/components/openai.jsonnet +++ b/templates/components/openai.jsonnet @@ -37,8 +37,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -74,8 +79,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/prompt-generic.jsonnet b/templates/components/prompt-generic.jsonnet index aa19fb74..5d6d7c54 100644 --- a/templates/components/prompt-generic.jsonnet +++ b/templates/components/prompt-generic.jsonnet @@ -28,8 +28,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "prompt", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -61,8 +66,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "prompt-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/prompt-template.jsonnet b/templates/components/prompt-template.jsonnet index 69f34556..8ba0d17f 100644 --- a/templates/components/prompt-template.jsonnet +++ b/templates/components/prompt-template.jsonnet @@ -41,8 +41,13 @@ local default_prompts = import "prompts/default-prompts.jsonnet"; "prompt", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -86,8 +91,13 @@ local default_prompts = import "prompts/default-prompts.jsonnet"; "prompt-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/pulsar-manager.jsonnet b/templates/components/pulsar-manager.jsonnet index 12ce99da..9a0b59b2 100644 --- a/templates/components/pulsar-manager.jsonnet +++ b/templates/components/pulsar-manager.jsonnet @@ -27,8 +27,8 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(9527, 9527) - .with_port(7750, 7750); + .with_port(9527, 9527, "api") + .with_port(7750, 7750, "api2); engine.resources([ containerSet, diff --git a/templates/components/pulsar.jsonnet b/templates/components/pulsar.jsonnet index 562e2819..0342b4d5 100644 --- a/templates/components/pulsar.jsonnet +++ b/templates/components/pulsar.jsonnet @@ -1,5 +1,6 @@ local base = import "base/base.jsonnet"; local images = import "values/images.jsonnet"; +local url = import "values/url.jsonnet"; { @@ -7,50 +8,58 @@ local images = import "values/images.jsonnet"; create:: function(engine) - local confVolume = engine.volume("pulsar-conf").with_size("2G"); +// local confVolume = engine.volume("pulsar-conf").with_size("2G"); local dataVolume = engine.volume("pulsar-data").with_size("20G"); local container = engine.container("pulsar") .with_image(images.pulsar) - .with_command("bin/pulsar standalone") + .with_command(["bin/pulsar", "standalone"]) .with_environment({ - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" }) - .with_limits("1.0", "900M") - .with_reservations("0.5", "900M") - .with_volume_mount(confVolume, "/pulsar/conf") + .with_limits("2.0", "1500M") + .with_reservations("1.0", "1500M") +// .with_volume_mount(confVolume, "/pulsar/conf") .with_volume_mount(dataVolume, "/pulsar/data") .with_port(6650, 6650, "bookie") .with_port(8080, 8080, "http"); local adminContainer = engine.container("init-pulsar") - .with_image(images.pulsar) + .with_image(images.trustgraph) .with_command([ - "sh", - "-c", - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response", + "tg-init-pulsar", + "-p", + url.pulsar_admin, ]) - .with_limits("0.5", "128M") + .with_limits("1", "128M") .with_reservations("0.1", "128M"); local containerSet = engine.containers( "pulsar", [ - container, adminContainer + container + ] + ); + + local adminContainerSet = engine.containers( + "init-pulsar", + [ + adminContainer ] ); local service = engine.service(containerSet) - .with_port(6650, 6650) - .with_port(8080, 8080); + .with_port(6650, 6650, "bookie") + .with_port(8080, 8080, "http"); engine.resources([ - confVolume, +// confVolume, dataVolume, containerSet, + adminContainerSet, service, ]) @@ -58,5 +67,3 @@ local images = import "values/images.jsonnet"; } - - diff --git a/templates/components/qdrant.jsonnet b/templates/components/qdrant.jsonnet index ac6eadf9..f923e84f 100644 --- a/templates/components/qdrant.jsonnet +++ b/templates/components/qdrant.jsonnet @@ -27,8 +27,13 @@ qdrant + { "store-graph-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -54,8 +59,13 @@ qdrant + { "query-graph-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -81,8 +91,13 @@ qdrant + { "store-doc-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -108,8 +123,13 @@ qdrant + { "query-doc-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/trustgraph.jsonnet b/templates/components/trustgraph.jsonnet index 787f1a0c..e178cc27 100644 --- a/templates/components/trustgraph.jsonnet +++ b/templates/components/trustgraph.jsonnet @@ -31,8 +31,13 @@ local prompt = import "prompt-template.jsonnet"; "chunker", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -56,8 +61,13 @@ local prompt = import "prompt-template.jsonnet"; "pdf-decoder", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -81,8 +91,75 @@ local prompt = import "prompt-template.jsonnet"; "vectorize", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, + ]) + + }, + + "metering" +: { + + create:: function(engine) + + local container = + engine.container("metering") + .with_image(images.trustgraph) + .with_command([ + "metering", + "-p", + url.pulsar, + ]) + .with_limits("0.5", "128M") + .with_reservations("0.1", "128M"); + + local containerSet = engine.containers( + "metering", [ container ] + ); + + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + + engine.resources([ + containerSet, + service, + ]) + + }, + + "metering-rag" +: { + + create:: function(engine) + + local container = + engine.container("metering-rag") + .with_image(images.trustgraph) + .with_command([ + "metering", + "-p", + url.pulsar, + "-i", + "non-persistent://tg/response/text-completion-rag-response", + ]) + .with_limits("0.5", "128M") + .with_reservations("0.1", "128M"); + + local containerSet = engine.containers( + "metering-rag", [ container ] + ); + + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + + engine.resources([ + containerSet, + service, ]) }, diff --git a/templates/components/vertexai.jsonnet b/templates/components/vertexai.jsonnet index f2fd3d86..2bc97799 100644 --- a/templates/components/vertexai.jsonnet +++ b/templates/components/vertexai.jsonnet @@ -15,7 +15,13 @@ local prompts = import "prompts/mixtral.jsonnet"; create:: function(engine) - local cfgVol = engine.configVolume("./vertexai"); + local cfgVol = engine.secretVolume( + "vertexai-creds", + "./vertexai", + { + "private.json": importstr "vertexai/private.json", + } + ); local container = engine.container("text-completion") @@ -35,17 +41,22 @@ local prompts = import "prompts/mixtral.jsonnet"; "-m", $["vertexai-model"], ]) - .with_limits("0.5", "128M") - .with_reservations("0.1", "128M") + .with_limits("0.5", "256M") + .with_reservations("0.1", "256M") .with_volume_mount(cfgVol, "/vertexai"); local containerSet = engine.containers( "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ cfgVol, containerSet, + service, ]) }, @@ -54,7 +65,13 @@ local prompts = import "prompts/mixtral.jsonnet"; create:: function(engine) - local cfgVol = engine.configVolume("./vertexai"); + local cfgVol = engine.secretVolume( + "vertexai-creds", + "./vertexai", + { + "private.json": importstr "vertexai/private.json", + } + ); local container = engine.container("text-completion-rag") @@ -78,20 +95,24 @@ local prompts = import "prompts/mixtral.jsonnet"; "-o", "non-persistent://tg/response/text-completion-rag-response", ]) - .with_limits("0.5", "128M") - .with_reservations("0.1", "128M") + .with_limits("0.5", "256M") + .with_reservations("0.1", "256M") .with_volume_mount(cfgVol, "/vertexai"); local containerSet = engine.containers( "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ cfgVol, containerSet, + service, ]) - } } + prompts diff --git a/templates/config-to-docker-compose.jsonnet b/templates/config-to-docker-compose.jsonnet index 71346abc..442d2cb7 100644 --- a/templates/config-to-docker-compose.jsonnet +++ b/templates/config-to-docker-compose.jsonnet @@ -1,6 +1,6 @@ -local engine = import "docker-compose.jsonnet"; -local decode = import "decode-config.jsonnet"; +local engine = import "engine/docker-compose.jsonnet"; +local decode = import "util/decode-config.jsonnet"; local components = import "components.jsonnet"; // Import config diff --git a/templates/config-to-gcp-k8s.jsonnet b/templates/config-to-gcp-k8s.jsonnet new file mode 100644 index 00000000..3d089a24 --- /dev/null +++ b/templates/config-to-gcp-k8s.jsonnet @@ -0,0 +1,16 @@ + +local engine = import "engine/gcp-k8s.jsonnet"; +local decode = import "util/decode-config.jsonnet"; +local components = import "components.jsonnet"; + +// Import config +local config = import "config.json"; + +// Produce patterns from config +local patterns = decode(config); + +// Extract resources usnig the engine +local resourceList = engine.package(patterns); + +resourceList + diff --git a/templates/config-to-minikube-k8s.jsonnet b/templates/config-to-minikube-k8s.jsonnet new file mode 100644 index 00000000..073358cb --- /dev/null +++ b/templates/config-to-minikube-k8s.jsonnet @@ -0,0 +1,26 @@ + +local engine = import "engine/minikube-k8s.jsonnet"; +local decode = import "util/decode-config.jsonnet"; +local components = import "components.jsonnet"; + +// Import config +local config = import "config.json"; + +// Produce patterns from config +local patterns = decode(config); + +local ns = { + apiVersion: "v1", + kind: "Namespace", + metadata: { + name: "trustgraph", + }, + "spec": { + }, +}; + +// Extract resources using the engine +local resourceList = engine.package(patterns); + +resourceList + diff --git a/templates/docker-compose.jsonnet b/templates/engine/docker-compose.jsonnet similarity index 75% rename from templates/docker-compose.jsonnet rename to templates/engine/docker-compose.jsonnet index 32697c66..4f837ff2 100644 --- a/templates/docker-compose.jsonnet +++ b/templates/engine/docker-compose.jsonnet @@ -1,5 +1,13 @@ { + // Extract resources usnig the engine + package:: function(patterns) + std.foldl( + function(state, p) state + p.create(self), + std.objectValues(patterns), + {} + ), + container:: function(name) { @@ -26,7 +34,7 @@ function(vol, mnt) self + { volumes: super.volumes + [{ - volume: vol.name, mount: mnt + volume: vol, mount: mnt }] }, @@ -70,7 +78,7 @@ (if std.length(container.volumes) > 0 then { volumes: [ - "%s:%s" % [vol.volume, vol.mount] + "%s:%s" % [vol.volume.volid, vol.mount] for vol in container.volumes ] } @@ -81,6 +89,21 @@ }, + internalService:: function(containers) + { + + local service = self, + + name: containers.name, + + with_port:: function(src, dest, name) + self + { port: [src, dest] }, + + add:: function() { + } + + }, + service:: function(containers) { @@ -88,7 +111,8 @@ name: containers.name, - with_port:: function(src, dest) self + { port: [src, dest] }, + with_port:: function(src, dest, name) + self + { port: [src, dest] }, add:: function() { } @@ -102,6 +126,8 @@ name: name, + volid:: name, + with_size:: function(size) self + { size: size }, add:: function() { @@ -112,13 +138,30 @@ }, - // FIXME: For K8s - configVolume:: function(name) + configVolume:: function(name, dir, parts) { local volume = self, - name: name, + name: dir, + + volid:: "./" + dir, + + with_size:: function(size) self + { size: size }, + + add:: function() { + } + + }, + + secretVolume:: function(name, dir, parts) + { + + local volume = self, + + name: dir, + + volid:: dir, with_size:: function(size) self + { size: size }, diff --git a/templates/engine/gcp-k8s.jsonnet b/templates/engine/gcp-k8s.jsonnet new file mode 100644 index 00000000..0605a570 --- /dev/null +++ b/templates/engine/gcp-k8s.jsonnet @@ -0,0 +1,44 @@ + +local k8s = import "k8s.jsonnet"; + +local ns = { + apiVersion: "v1", + kind: "Namespace", + metadata: { + name: "trustgraph", + }, + "spec": { + }, +}; + +local sc = { + apiVersion: "storage.k8s.io/v1", + kind: "StorageClass", + metadata: { + name: "tg", + }, + provisioner: "pd.csi.storage.gke.io", + parameters: { + type: "pd-balanced", + "csi.storage.k8s.io/fstype": "ext4", + }, + reclaimPolicy: "Delete", + volumeBindingMode: "WaitForFirstConsumer", +}; + +k8s + { + + // Extract resources usnig the engine + package:: function(patterns) + local resources = [sc, ns] + std.flattenArrays([ + p.create(self) for p in std.objectValues(patterns) + ]); + local resourceList = { + apiVersion: "v1", + kind: "List", + items: [ns, sc] + resources, + }; + resourceList + +} + diff --git a/templates/engine/k8s.jsonnet b/templates/engine/k8s.jsonnet new file mode 100644 index 00000000..69aabfd7 --- /dev/null +++ b/templates/engine/k8s.jsonnet @@ -0,0 +1,307 @@ +{ + + container:: function(name) + { + + local container = self, + + name: name, + limits: {}, + reservations: {}, + ports: [], + volumes: [], + + with_image:: function(x) self + { image: x }, + + with_command:: function(x) self + { command: x }, + + with_environment:: function(x) self + { environment: x }, + + with_limits:: function(c, m) self + { limits: { cpu: c, memory: m } }, + + with_reservations:: + function(c, m) self + { reservations: { cpu: c, memory: m } }, + + with_volume_mount:: + function(vol, mnt) + self + { + volumes: super.volumes + [{ + volume: vol, mount: mnt + }] + }, + + with_port:: + function(src, dest, name) self + { + ports: super.ports + [ + { src: src, dest: dest, name : name } + ] + }, + + add:: function() [ + + { + apiVersion: "apps/v1", + kind: "Deployment", + metadata: { + name: container.name, + namespace: "trustgraph", + labels: { + app: container.name + } + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: container.name, + } + }, + template: { + metadata: { + labels: { + app: container.name, + } + }, + spec: { + containers: [ + { + name: container.name, + image: container.image, + + // FIXME: Make everything run as + // root. Needed to get filesystems + // to be accessible. There's a + // better way of doing this? + securityContext: { + runAsUser: 0, + runAsGroup: 0, + }, + + resources: { + requests: container.reservations, + limits: container.limits + }, + } + ( + if std.length(container.ports) > 0 then + { + ports: [ + { + hostPort: port.src, + containerPort: port.dest, + } + for port in container.ports + ] + } else + {}) + + + (if std.objectHas(container, "command") then + { command: container.command } + else {}) + + (if std.objectHas(container, "environment") then + { env: [ { + name: e.key, value: e.value + } + for e in + std.objectKeysValues( + container.environment + ) + ] + } + else {}) + + + (if std.length(container.volumes) > 0 then + { + volumeMounts: [ + { + mountPath: vol.mount, + name: vol.volume.name, + } + for vol in container.volumes + ] + } + + else + {} + ) + ], + volumes: [ + vol.volume.volRef() + for vol in container.volumes + + ] + } + }, + } + {} + + } + + ] + + }, + + // Just an alias + internalService:: self.service, + + service:: function(containers) + { + + local service = self, + + name: containers.name, + + ports: [], + + with_port:: + function(src, dest, name) + self + { + ports: super.ports + [ + { src: src, dest: dest, name: name } + ] + }, + + add:: function() [ + + { + + apiVersion: "v1", + kind: "Service", + metadata: { + name: service.name, + namespace: "trustgraph", + }, + spec: { + selector: { + app: service.name, + }, + ports: [ + { + port: port.src, + targetPort: port.dest, + name: port.name, + } + for port in service.ports + ], + } + } + ], + + }, + + volume:: function(name) + { + + local volume = self, + + name: name, + + with_size:: function(size) self + { size: size }, + + add:: function() [ + { + apiVersion: "v1", + kind: "PersistentVolumeClaim", + metadata: { + name: volume.name, + namespace: "trustgraph", + }, + spec: { + storageClassName: "tg", + accessModes: [ "ReadWriteOnce" ], + resources: { + requests: { + storage: volume.size, + } + }, + } + } + ], + + volRef:: function() { + name: volume.name, + persistentVolumeClaim: { claimName: volume.name }, + } + + }, + + configVolume:: function(name, dir, parts) + { + + local volume = self, + + name: name, + + with_size:: function(size) self + { size: size }, + + add:: function() [ + { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: volume.name, + namespace: "trustgraph", + }, + data: parts + }, + ], + + + volRef:: function() { + name: volume.name, + configMap: { name: volume.name }, + } + + }, + + secretVolume:: function(name, dir, parts) + { + + local volume = self, + + name: name, + + with_size:: function(size) self + { size: size }, + + add:: function() [ + { + apiVersion: "v1", + kind: "Secret", + metadata: { + name: volume.name, + namespace: "trustgraph", + }, + data: { + [item.key]: std.base64(item.value) + for item in std.objectKeysValues(parts) + } + }, + ], + + volRef:: function() { + name: volume.name, + secret: { secretName: volume.name }, + } + + }, + + containers:: function(name, containers) + { + + local cont = self, + + name: name, + containers: containers, + + add:: function() std.flattenArrays( + [ c.add() for c in cont.containers ] + ), + + }, + + resources:: function(res) + + std.flattenArrays( + [ c.add() for c in res ] + ), + +} + diff --git a/templates/engine/minikube-k8s.jsonnet b/templates/engine/minikube-k8s.jsonnet new file mode 100644 index 00000000..858b17ad --- /dev/null +++ b/templates/engine/minikube-k8s.jsonnet @@ -0,0 +1,115 @@ + +local k8s = import "k8s.jsonnet"; + +local ns = { + apiVersion: "v1", + kind: "Namespace", + metadata: { + name: "trustgraph", + }, + "spec": { + }, +}; + +k8s + { + + // Extract resources usnig the engine + package:: function(patterns) + local resources = [ns] + std.flattenArrays([ + p.create(self) for p in std.objectValues(patterns) + ]); + local resourceList = { + apiVersion: "v1", + kind: "List", + items: resources, + }; + resourceList, + + volume:: function(name) + { + local volume = self, + name: name, + with_size:: function(size) self + { size: size }, + add:: function() [ + { + apiVersion: "v1", + kind: "PersistentVolume", + metadata: { + name: volume.name, + }, + spec: { + accessModes: [ "ReadWriteOnce" ], + capacity: { + storage: volume.size, + }, + persistentVolumeReclaimPolicy: "Delete", + hostPath: { + path: "/data/pv-" + volume.name, + }, + } + }, + { + apiVersion: "v1", + kind: "PersistentVolumeClaim", + metadata: { + name: volume.name, + namespace: "trustgraph", + }, + spec: { + accessModes: [ "ReadWriteOnce" ], + resources: { + requests: { + storage: volume.size, + } + }, + } + } + ], + + volRef:: function() { + name: volume.name, + persistentVolumeClaim: { claimName: volume.name }, + } + + }, + + service:: function(containers) + { + local service = self, + name: containers.name, + ports: [], + with_port:: + function(src, dest, name) + self + { + ports: super.ports + [ + { src: src, dest: dest, name: name } + ] + }, + add:: function() [ + { + apiVersion: "v1", + kind: "Service", + metadata: { + name: service.name, + namespace: "trustgraph", + }, + spec: { + selector: { + app: service.name, + }, + type: "LoadBalancer", + ports: [ + { + port: port.src, + targetPort: port.dest, + name: port.name, + } + for port in service.ports + ], + } + } + ], + }, + +} + diff --git a/templates/generate-all b/templates/generate-all new file mode 100755 index 00000000..948b811f --- /dev/null +++ b/templates/generate-all @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 + +import _jsonnet as j +import json +import yaml +import logging +import os +import sys +import zipfile + +logger = logging.getLogger("generate") +logging.basicConfig(level=logging.INFO, format='%(message)s') + +private_json = "Put your GCP private.json here" + +class Generator: + + def __init__(self, config, base="./templates/", version="0.0.0"): + + self.jsonnet_base = base + self.config = config + self.version = f"\"{version}\"".encode("utf-8") + + def process(self, config): + + res = j.evaluate_snippet("config", config, import_callback=self.load) + return json.loads(res) + + def load(self, dir, filename): + + logger.debug("Request jsonnet: %s %s", dir, filename) + + if filename == "config.json" and dir == "": + path = os.path.join(".", dir, filename) + return str(path), self.config + + if filename == "version.jsonnet" and dir == "./templates/values/": + path = os.path.join(".", dir, filename) + return str(path), self.version + + if dir: + candidates = [ + os.path.join(".", dir, filename), + os.path.join(".", filename) + ] + else: + candidates = [ + os.path.join(".", filename) + ] + + try: + + if filename == "vertexai/private.json": + + return candidates[0], private_json.encode("utf-8") + + for c in candidates: + logger.debug("Try: %s", c) + + if os.path.isfile(c): + with open(c, "rb") as f: + logger.debug("Loading: %s", c) + return str(c), f.read() + + raise RuntimeError( + f"Could not load file={filename} dir={dir}" + ) + + except: + + path = os.path.join(self.jsonnet_base, filename) + logger.debug("Try: %s", path) + with open(path, "rb") as f: + logger.debug("Loaded: %s", path) + return str(path), f.read() + +def config_object(items): + + return [ + { "name": v, "parameters": {} } + for v in items + ] + +def full_config_object( + vector_store="qdrant", embeddings="embeddings-hf", + graph_store="cassandra", llm="vertexai", +): + + return config_object([ + graph_store, "pulsar", vector_store, embeddings, + "graph-rag", "grafana", "trustgraph", llm + ]) + +def generate_config( + vector_store="qdrant", embeddings="embeddings-hf", + graph_store="cassandra", llm="vertexai", + platform="docker-compose", + version="0.0.0" +): + + config = full_config_object( + vector_store=vector_store, + embeddings=embeddings, + graph_store=graph_store, + llm=llm, + ) + + with open(f"./templates/config-to-{platform}.jsonnet", "r") as f: + wrapper = f.read() + + gen = Generator(json.dumps(config).encode("utf-8"), version=version) + + processed = gen.process(wrapper) + + y = yaml.dump(processed) + + return y + +def generate_all(output, version): + + for platform in [ + "docker-compose", "minikube-k8s", "gcp-k8s" + ]: + for model in [ + "azure", "bedrock", "claude", "cohere", "llamafile", "ollama", + "openai", "vertexai" + ]: + for graph in [ "cassandra", "neo4j" ]: + + y = generate_config( + llm=model, graph_store=graph, platform=platform, + version=version + ) + + fname = f"{platform}/tg-{model}-{graph}.yaml" + + output(fname, y) + + +if len(sys.argv) < 3: + raise RuntimeError("Usage: generate-all ") + +outfile = sys.argv[1] +version = sys.argv[2] + +logger.info(f"Outputting to {outfile}...") + +with zipfile.ZipFile(outfile, mode='w') as out: + + def output(name, content): + logger.info(f"Adding {name}...") + out.writestr(name, content) + + generate_all(output=output, version=version) + + # Placeholder for the private.json file. Won't put actual credentials + # here. + output("docker-compose/vertexai/private.json", private_json) + + # Grafana config + with open("grafana/dashboards/dashboard.json") as f: + output("docker-compose/grafana/dashboards/dashboard.json", f.read()) + + with open("grafana/provisioning/dashboard.yml") as f: + output("docker-compose/grafana/provisioning/dashboard.yml", f.read()) + + with open("grafana/provisioning/datasource.yml") as f: + output("docker-compose/grafana/provisioning/datasource.yml", f.read()) + + # Prometheus config + with open("prometheus/prometheus.yml") as f: + output("docker-compose/prometheus/prometheus.yml", f.read()) + + # A README + with open("templates/zip-readme.md") as f: + output("README.md", f.read()) + +logger.info("Output file written.") + diff --git a/templates/opts-to-docker-compose.jsonnet b/templates/opts-to-docker-compose.jsonnet new file mode 100644 index 00000000..c916475d --- /dev/null +++ b/templates/opts-to-docker-compose.jsonnet @@ -0,0 +1,21 @@ + +local engine = import "engine/docker-compose.jsonnet"; +local components = import "components.jsonnet"; + +// Options +local options = std.split(std.extVar("options"), ","); + +// Produce patterns from config +local patterns = std.foldl( + function(state, p) state + components[p], + options, + {} +); + +// Extract resources usnig the engine +local resources = engine.package(patterns); + +resources + + + diff --git a/templates/main.jsonnet b/templates/opts-to-gcp-k8s.jsonnet similarity index 59% rename from templates/main.jsonnet rename to templates/opts-to-gcp-k8s.jsonnet index 06a93949..a338cd9e 100644 --- a/templates/main.jsonnet +++ b/templates/opts-to-gcp-k8s.jsonnet @@ -1,6 +1,5 @@ -local engine = import "docker-compose.jsonnet"; -local decode = import "decode-config.jsonnet"; +local engine = import "engine/docker-compose.jsonnet"; local components = import "components.jsonnet"; // Options @@ -14,11 +13,7 @@ local patterns = std.foldl( ); // Extract resources usnig the engine -local resources = std.foldl( - function(state, p) state + p.create(engine), - std.objectValues(patterns), - {} -); +local resources = engine.package(patterns); std.manifestYamlDoc(resources) diff --git a/templates/opts-to-minikube-k8s.jsonnet b/templates/opts-to-minikube-k8s.jsonnet new file mode 100644 index 00000000..15895909 --- /dev/null +++ b/templates/opts-to-minikube-k8s.jsonnet @@ -0,0 +1,19 @@ + +local engine = import "engine/minikube-k8s.jsonnet"; +local components = import "components.jsonnet"; + +// Options +local options = std.split(std.extVar("options"), ","); + +// Produce patterns from config +local patterns = std.foldl( + function(state, p) state + components[p], + options, + {} +); + +// Extract resources usnig the engine +local resources = engine.package(patterns); + +resources + diff --git a/templates/stores/cassandra.jsonnet b/templates/stores/cassandra.jsonnet index f3d27025..c501e1f9 100644 --- a/templates/stores/cassandra.jsonnet +++ b/templates/stores/cassandra.jsonnet @@ -26,7 +26,7 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(9042, 9042); + .with_port(9042, 9042, "api"); engine.resources([ vol, diff --git a/templates/stores/milvus.jsonnet b/templates/stores/milvus.jsonnet index eef91172..888a83a9 100644 --- a/templates/stores/milvus.jsonnet +++ b/templates/stores/milvus.jsonnet @@ -37,7 +37,7 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(2379, 2379); + .with_port(2379, 2379, 30379, "api"); engine.resources([ vol, @@ -78,7 +78,7 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(9001, 9001); + .with_port(9001, 9001, "api"); engine.resources([ vol, @@ -116,8 +116,8 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(9091, 9091) - .with_port(19530, 19530); + .with_port(9091, 9091, "api") + .with_port(19530, 19530, "api2); engine.resources([ vol, diff --git a/templates/stores/neo4j.jsonnet b/templates/stores/neo4j.jsonnet index 4d74d73d..55cccc5f 100644 --- a/templates/stores/neo4j.jsonnet +++ b/templates/stores/neo4j.jsonnet @@ -28,10 +28,16 @@ local images = import "values/images.jsonnet"; "neo4j", [ container ] ); + local service = + engine.service(containerSet) + .with_port(7474, 7474, "api") + .with_port(7687, 7687, "api2"); + engine.resources([ vol, containerSet, - ]) + service, + ]) }, diff --git a/templates/stores/qdrant.jsonnet b/templates/stores/qdrant.jsonnet index 4e2ce40a..e8443b73 100644 --- a/templates/stores/qdrant.jsonnet +++ b/templates/stores/qdrant.jsonnet @@ -22,9 +22,15 @@ local images = import "values/images.jsonnet"; "qdrant", [ container ] ); + local service = + engine.service(containerSet) + .with_port(6333, 6333, "api") + .with_port(6334, 6334, "api2"); + engine.resources([ vol, containerSet, + service, ]) }, diff --git a/templates/decode-config.jsonnet b/templates/util/decode-config.jsonnet similarity index 100% rename from templates/decode-config.jsonnet rename to templates/util/decode-config.jsonnet diff --git a/templates/values/url.jsonnet b/templates/values/url.jsonnet index 36c7ca41..1bacb067 100644 --- a/templates/values/url.jsonnet +++ b/templates/values/url.jsonnet @@ -1,5 +1,6 @@ { pulsar: "pulsar://pulsar:6650", + pulsar_admin: "http://pulsar:8080", milvus: "http://milvus:19530", qdrant: "http://qdrant:6333", } diff --git a/templates/zip-readme.md b/templates/zip-readme.md new file mode 100644 index 00000000..57a14ed0 --- /dev/null +++ b/templates/zip-readme.md @@ -0,0 +1,16 @@ +These are launch configurations for TrustGraph. See https://trustgraph.ai for +the quickstart using docker compose. + +Hint for Linux: There are files here which get mounted as volumes inside +Docker Compose containers. This may trigger SELinux rules on your system, to +permit access insider the containers, use a command like this... + +chcon -Rt svirt_sandbox_file_t grafana/ prometheus/ + +The file vertexai/private.json is a placeholder for real GCP credentials if +you are using the VertexAI LLM. If you're using that in Docker Compose, +replace with your real credentials, and don't forget to permit access if you +are using Linux: + +chcon -Rt svirt_sandbox_file_t vertexai/ + diff --git a/tg-launch-azure-cassandra.yaml b/tg-launch-azure-cassandra.yaml deleted file mode 100644 index 0c7f8b09..00000000 --- a/tg-launch-azure-cassandra.yaml +++ /dev/null @@ -1,494 +0,0 @@ -"services": - "cassandra": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "800M" - "reservations": - "cpus": "0.5" - "memory": "800M" - "environment": - "JVM_OPTS": "-Xms256M -Xmx256M" - "image": "docker.io/cassandra:4.1.6" - "ports": - - "9042:9042" - "restart": "on-failure:100" - "volumes": - - "cassandra:/var/lib/cassandra" - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "512M" - "reservations": - "cpus": "0.1" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-azure" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${AZURE_TOKEN}" - - "-e" - - "${AZURE_ENDPOINT}" - - "-x" - - "4096" - - "-t" - - "0" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-azure" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${AZURE_TOKEN}" - - "-e" - - "${AZURE_ENDPOINT}" - - "-x" - - "4096" - - "-t" - - "0" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "cassandra": {} - "grafana-storage": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-azure-neo4j.yaml b/tg-launch-azure-neo4j.yaml deleted file mode 100644 index d3db7faa..00000000 --- a/tg-launch-azure-neo4j.yaml +++ /dev/null @@ -1,495 +0,0 @@ -"services": - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "neo4j": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "768M" - "reservations": - "cpus": "0.5" - "memory": "768M" - "environment": - "NEO4J_AUTH": "neo4j/password" - "image": "docker.io/neo4j:5.22.0-community-bullseye" - "ports": - - "7474:7474" - - "7687:7687" - "restart": "on-failure:100" - "volumes": - - "neo4j:/data" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-azure" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${AZURE_TOKEN}" - - "-e" - - "${AZURE_ENDPOINT}" - - "-x" - - "4096" - - "-t" - - "0" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-azure" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${AZURE_TOKEN}" - - "-e" - - "${AZURE_ENDPOINT}" - - "-x" - - "4096" - - "-t" - - "0" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "grafana-storage": {} - "neo4j": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-bedrock-cassandra.yaml b/tg-launch-bedrock-cassandra.yaml deleted file mode 100644 index 3d18dc06..00000000 --- a/tg-launch-bedrock-cassandra.yaml +++ /dev/null @@ -1,502 +0,0 @@ -"services": - "cassandra": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "800M" - "reservations": - "cpus": "0.5" - "memory": "800M" - "environment": - "JVM_OPTS": "-Xms256M -Xmx256M" - "image": "docker.io/cassandra:4.1.6" - "ports": - - "9042:9042" - "restart": "on-failure:100" - "volumes": - - "cassandra:/var/lib/cassandra" - "chunker": - "command": - - "chunker-recursive" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "2000" - - "--chunk-overlap" - - "100" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "512M" - "reservations": - "cpus": "0.1" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-bedrock" - - "-p" - - "pulsar://pulsar:6650" - - "-z" - - "${AWS_ID_KEY}" - - "-k" - - "${AWS_SECRET_KEY}" - - "-r" - - "us-west-2" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "mistral.mixtral-8x7b-instruct-v0:1" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-bedrock" - - "-p" - - "pulsar://pulsar:6650" - - "-z" - - "${AWS_ID_KEY}" - - "-k" - - "${AWS_SECRET_KEY}" - - "-r" - - "us-west-2" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "mistral.mixtral-8x7b-instruct-v0:1" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "cassandra": {} - "grafana-storage": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-bedrock-neo4j.yaml b/tg-launch-bedrock-neo4j.yaml deleted file mode 100644 index e52edbfc..00000000 --- a/tg-launch-bedrock-neo4j.yaml +++ /dev/null @@ -1,503 +0,0 @@ -"services": - "chunker": - "command": - - "chunker-recursive" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "2000" - - "--chunk-overlap" - - "100" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "neo4j": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "768M" - "reservations": - "cpus": "0.5" - "memory": "768M" - "environment": - "NEO4J_AUTH": "neo4j/password" - "image": "docker.io/neo4j:5.22.0-community-bullseye" - "ports": - - "7474:7474" - - "7687:7687" - "restart": "on-failure:100" - "volumes": - - "neo4j:/data" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-bedrock" - - "-p" - - "pulsar://pulsar:6650" - - "-z" - - "${AWS_ID_KEY}" - - "-k" - - "${AWS_SECRET_KEY}" - - "-r" - - "us-west-2" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "mistral.mixtral-8x7b-instruct-v0:1" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-bedrock" - - "-p" - - "pulsar://pulsar:6650" - - "-z" - - "${AWS_ID_KEY}" - - "-k" - - "${AWS_SECRET_KEY}" - - "-r" - - "us-west-2" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "mistral.mixtral-8x7b-instruct-v0:1" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "grafana-storage": {} - "neo4j": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-claude-cassandra.yaml b/tg-launch-claude-cassandra.yaml deleted file mode 100644 index 3221d3ea..00000000 --- a/tg-launch-claude-cassandra.yaml +++ /dev/null @@ -1,490 +0,0 @@ -"services": - "cassandra": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "800M" - "reservations": - "cpus": "0.5" - "memory": "800M" - "environment": - "JVM_OPTS": "-Xms256M -Xmx256M" - "image": "docker.io/cassandra:4.1.6" - "ports": - - "9042:9042" - "restart": "on-failure:100" - "volumes": - - "cassandra:/var/lib/cassandra" - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "512M" - "reservations": - "cpus": "0.1" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-claude" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${CLAUDE_KEY}" - - "-x" - - "4096" - - "-t" - - "0" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-claude" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${CLAUDE_KEY}" - - "-x" - - "4096" - - "-t" - - "0" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "cassandra": {} - "grafana-storage": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-claude-neo4j.yaml b/tg-launch-claude-neo4j.yaml deleted file mode 100644 index f07fa68f..00000000 --- a/tg-launch-claude-neo4j.yaml +++ /dev/null @@ -1,491 +0,0 @@ -"services": - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "neo4j": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "768M" - "reservations": - "cpus": "0.5" - "memory": "768M" - "environment": - "NEO4J_AUTH": "neo4j/password" - "image": "docker.io/neo4j:5.22.0-community-bullseye" - "ports": - - "7474:7474" - - "7687:7687" - "restart": "on-failure:100" - "volumes": - - "neo4j:/data" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-claude" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${CLAUDE_KEY}" - - "-x" - - "4096" - - "-t" - - "0" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-claude" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${CLAUDE_KEY}" - - "-x" - - "4096" - - "-t" - - "0" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "grafana-storage": {} - "neo4j": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-cohere-cassandra.yaml b/tg-launch-cohere-cassandra.yaml deleted file mode 100644 index bcfa345a..00000000 --- a/tg-launch-cohere-cassandra.yaml +++ /dev/null @@ -1,486 +0,0 @@ -"services": - "cassandra": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "800M" - "reservations": - "cpus": "0.5" - "memory": "800M" - "environment": - "JVM_OPTS": "-Xms256M -Xmx256M" - "image": "docker.io/cassandra:4.1.6" - "ports": - - "9042:9042" - "restart": "on-failure:100" - "volumes": - - "cassandra:/var/lib/cassandra" - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "150" - - "--chunk-overlap" - - "10" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "512M" - "reservations": - "cpus": "0.1" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-cohere" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${COHERE_KEY}" - - "-t" - - 0 - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-cohere" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${COHERE_KEY}" - - "-t" - - 0 - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "cassandra": {} - "grafana-storage": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-cohere-neo4j.yaml b/tg-launch-cohere-neo4j.yaml deleted file mode 100644 index 0ec692d0..00000000 --- a/tg-launch-cohere-neo4j.yaml +++ /dev/null @@ -1,487 +0,0 @@ -"services": - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "150" - - "--chunk-overlap" - - "10" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "neo4j": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "768M" - "reservations": - "cpus": "0.5" - "memory": "768M" - "environment": - "NEO4J_AUTH": "neo4j/password" - "image": "docker.io/neo4j:5.22.0-community-bullseye" - "ports": - - "7474:7474" - - "7687:7687" - "restart": "on-failure:100" - "volumes": - - "neo4j:/data" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-cohere" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${COHERE_KEY}" - - "-t" - - 0 - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-cohere" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${COHERE_KEY}" - - "-t" - - 0 - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "grafana-storage": {} - "neo4j": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-llamafile-cassandra.yaml b/tg-launch-llamafile-cassandra.yaml deleted file mode 100644 index bb45eed0..00000000 --- a/tg-launch-llamafile-cassandra.yaml +++ /dev/null @@ -1,434 +0,0 @@ -"services": - "cassandra": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "800M" - "reservations": - "cpus": "0.5" - "memory": "800M" - "environment": - "JVM_OPTS": "-Xms256M -Xmx256M" - "image": "docker.io/cassandra:4.1.6" - "ports": - - "9042:9042" - "restart": "on-failure:100" - "volumes": - - "cassandra:/var/lib/cassandra" - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-generic" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-generic" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "512M" - "reservations": - "cpus": "0.1" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-llamafile" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "LLaMA_CPP" - - "-r" - - "${LLAMAFILE_URL}" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-llamafile" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "LLaMA_CPP" - - "-r" - - "${LLAMAFILE_URL}" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "cassandra": {} - "grafana-storage": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-llamafile-neo4j.yaml b/tg-launch-llamafile-neo4j.yaml deleted file mode 100644 index d57ac5dc..00000000 --- a/tg-launch-llamafile-neo4j.yaml +++ /dev/null @@ -1,435 +0,0 @@ -"services": - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "neo4j": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "768M" - "reservations": - "cpus": "0.5" - "memory": "768M" - "environment": - "NEO4J_AUTH": "neo4j/password" - "image": "docker.io/neo4j:5.22.0-community-bullseye" - "ports": - - "7474:7474" - - "7687:7687" - "restart": "on-failure:100" - "volumes": - - "neo4j:/data" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-generic" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-generic" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-llamafile" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "LLaMA_CPP" - - "-r" - - "${LLAMAFILE_URL}" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-llamafile" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "LLaMA_CPP" - - "-r" - - "${LLAMAFILE_URL}" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "grafana-storage": {} - "neo4j": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-ollama-cassandra.yaml b/tg-launch-ollama-cassandra.yaml deleted file mode 100644 index 171176df..00000000 --- a/tg-launch-ollama-cassandra.yaml +++ /dev/null @@ -1,434 +0,0 @@ -"services": - "cassandra": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "800M" - "reservations": - "cpus": "0.5" - "memory": "800M" - "environment": - "JVM_OPTS": "-Xms256M -Xmx256M" - "image": "docker.io/cassandra:4.1.6" - "ports": - - "9042:9042" - "restart": "on-failure:100" - "volumes": - - "cassandra:/var/lib/cassandra" - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-generic" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-generic" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "512M" - "reservations": - "cpus": "0.1" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-ollama" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "gemma2:9b" - - "-r" - - "${OLLAMA_HOST}" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-ollama" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "gemma2:9b" - - "-r" - - "${OLLAMA_HOST}" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "cassandra": {} - "grafana-storage": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-ollama-neo4j.yaml b/tg-launch-ollama-neo4j.yaml deleted file mode 100644 index e99c9d49..00000000 --- a/tg-launch-ollama-neo4j.yaml +++ /dev/null @@ -1,435 +0,0 @@ -"services": - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "neo4j": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "768M" - "reservations": - "cpus": "0.5" - "memory": "768M" - "environment": - "NEO4J_AUTH": "neo4j/password" - "image": "docker.io/neo4j:5.22.0-community-bullseye" - "ports": - - "7474:7474" - - "7687:7687" - "restart": "on-failure:100" - "volumes": - - "neo4j:/data" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-generic" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-generic" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-ollama" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "gemma2:9b" - - "-r" - - "${OLLAMA_HOST}" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-ollama" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "gemma2:9b" - - "-r" - - "${OLLAMA_HOST}" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "grafana-storage": {} - "neo4j": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-openai-cassandra.yaml b/tg-launch-openai-cassandra.yaml deleted file mode 100644 index 209580c0..00000000 --- a/tg-launch-openai-cassandra.yaml +++ /dev/null @@ -1,494 +0,0 @@ -"services": - "cassandra": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "800M" - "reservations": - "cpus": "0.5" - "memory": "800M" - "environment": - "JVM_OPTS": "-Xms256M -Xmx256M" - "image": "docker.io/cassandra:4.1.6" - "ports": - - "9042:9042" - "restart": "on-failure:100" - "volumes": - - "cassandra:/var/lib/cassandra" - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "512M" - "reservations": - "cpus": "0.1" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-openai" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${OPENAI_KEY}" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "GPT-3.5-Turbo" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-openai" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${OPENAI_KEY}" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "GPT-3.5-Turbo" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "cassandra": {} - "grafana-storage": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-openai-neo4j.yaml b/tg-launch-openai-neo4j.yaml deleted file mode 100644 index e3edbdb9..00000000 --- a/tg-launch-openai-neo4j.yaml +++ /dev/null @@ -1,495 +0,0 @@ -"services": - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "neo4j": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "768M" - "reservations": - "cpus": "0.5" - "memory": "768M" - "environment": - "NEO4J_AUTH": "neo4j/password" - "image": "docker.io/neo4j:5.22.0-community-bullseye" - "ports": - - "7474:7474" - - "7687:7687" - "restart": "on-failure:100" - "volumes": - - "neo4j:/data" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-openai" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${OPENAI_KEY}" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "GPT-3.5-Turbo" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion-rag": - "command": - - "text-completion-openai" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "${OPENAI_KEY}" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "GPT-3.5-Turbo" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "grafana-storage": {} - "neo4j": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-vertexai-cassandra.yaml b/tg-launch-vertexai-cassandra.yaml deleted file mode 100644 index 1f29b1a7..00000000 --- a/tg-launch-vertexai-cassandra.yaml +++ /dev/null @@ -1,502 +0,0 @@ -"services": - "cassandra": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "800M" - "reservations": - "cpus": "0.5" - "memory": "800M" - "environment": - "JVM_OPTS": "-Xms256M -Xmx256M" - "image": "docker.io/cassandra:4.1.6" - "ports": - - "9042:9042" - "restart": "on-failure:100" - "volumes": - - "cassandra:/var/lib/cassandra" - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "512M" - "reservations": - "cpus": "0.1" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-vertexai" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "/vertexai/private.json" - - "-r" - - "us-central1" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "gemini-1.0-pro-001" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "volumes": - - "./vertexai:/vertexai" - "text-completion-rag": - "command": - - "text-completion-vertexai" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "/vertexai/private.json" - - "-r" - - "us-central1" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "gemini-1.0-pro-001" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "volumes": - - "./vertexai:/vertexai" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "cassandra": {} - "grafana-storage": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-launch-vertexai-neo4j.yaml b/tg-launch-vertexai-neo4j.yaml deleted file mode 100644 index 9b57a4b5..00000000 --- a/tg-launch-vertexai-neo4j.yaml +++ /dev/null @@ -1,503 +0,0 @@ -"services": - "chunker": - "command": - - "chunker-token" - - "-p" - - "pulsar://pulsar:6650" - - "--chunk-size" - - "250" - - "--chunk-overlap" - - "15" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "embeddings": - "command": - - "embeddings-hf" - - "-p" - - "pulsar://pulsar:6650" - - "-m" - - "all-MiniLM-L6-v2" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "graph-rag": - "command": - - "graph-rag" - - "-p" - - "pulsar://pulsar:6650" - - "--prompt-request-queue" - - "non-persistent://tg/request/prompt-rag" - - "--prompt-response-queue" - - "non-persistent://tg/response/prompt-rag-response" - - "--entity-limit" - - "50" - - "--triple-limit" - - "30" - - "--max-subgraph-size" - - "3000" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "kg-extract-definitions": - "command": - - "kg-extract-definitions" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-relationships": - "command": - - "kg-extract-relationships" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "kg-extract-topics": - "command": - - "kg-extract-topics" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "neo4j": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "768M" - "reservations": - "cpus": "0.5" - "memory": "768M" - "environment": - "NEO4J_AUTH": "neo4j/password" - "image": "docker.io/neo4j:5.22.0-community-bullseye" - "ports": - - "7474:7474" - - "7687:7687" - "restart": "on-failure:100" - "volumes": - - "neo4j:/data" - "pdf-decoder": - "command": - - "pdf-decoder" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "prompt": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "prompt-rag": - "command": - - "prompt-template" - - "-p" - - "pulsar://pulsar:6650" - - "-i" - - "non-persistent://tg/request/prompt-rag" - - "-o" - - "non-persistent://tg/response/prompt-rag-response" - - "--text-completion-request-queue" - - "non-persistent://tg/request/text-completion-rag" - - "--text-completion-response-queue" - - "non-persistent://tg/response/text-completion-rag-response" - - "--definition-template" - - "\nStudy the following text and derive definitions for any discovered entities.\nDo not provide definitions for entities whose definitions are incomplete\nor unknown.\nOutput relationships in JSON format as an arary of objects with fields:\n- entity: the name of the entity\n- definition: English text which defines the entity\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract will be written as plain text. Do not add markdown formatting\nor headers or prefixes. Do not include null or unknown definitions.\n" - - "--relationship-template" - - "\nStudy the following text and derive entity relationships. For each\nrelationship, derive the subject, predicate and object of the relationship.\nOutput relationships in JSON format as an arary of objects with fields:\n- subject: the subject of the relationship\n- predicate: the predicate\n- object: the object of the relationship\n- object-entity: false if the object is a simple data type: name, value or date. true if it is an entity.\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not use special characters in the abstract text. The\nabstract must be written as plain text. Do not add markdown formatting\nor headers or prefixes.\n" - - "--topic-template" - - "You are a helpful assistant that performs information extraction tasks for a provided text.\nRead the provided text. You will identify topics and their definitions in JSON.\n\nReading Instructions:\n- Ignore document formatting in the provided text.\n- Study the provided text carefully.\n\nHere is the text:\n{text}\n\nResponse Instructions: \n- Do not respond with special characters.\n- Return only topics that are concepts and unique to the provided text.\n- Respond only with well-formed JSON.\n- The JSON response shall be an array of objects with keys \"topic\" and \"definition\". \n- The JSON response shall use the following structure:\n\n```json\n[{{\"topic\": string, \"definition\": string}}]\n```\n\n- Do not write any additional text or explanations." - - "--knowledge-query-template" - - | - Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here's the knowledge statements: - {graph} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--document-query-template" - - | - Study the following context. Use only the information provided in the context in your response. Do not speculate if the answer is not found in the provided set of knowledge statements. - - Here is the context: - {documents} - - Use only the provided knowledge statements to respond to the following: - {query} - - "--rows-template" - - "\nStudy the following text and derive objects which match the schema provided.\n\nYou must output an array of JSON objects for each object you discover\nwhich matches the schema. For each object, output a JSON object whose fields\ncarry the name field specified in the schema.\n\n\n\n{schema}\n\n\n\n{text}\n\n\n\nYou will respond only with raw JSON format data. Do not provide\nexplanations. Do not add markdown formatting or headers or prefixes.\n" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "text-completion": - "command": - - "text-completion-vertexai" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "/vertexai/private.json" - - "-r" - - "us-central1" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "gemini-1.0-pro-001" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "volumes": - - "./vertexai:/vertexai" - "text-completion-rag": - "command": - - "text-completion-vertexai" - - "-p" - - "pulsar://pulsar:6650" - - "-k" - - "/vertexai/private.json" - - "-r" - - "us-central1" - - "-x" - - "4096" - - "-t" - - "0" - - "-m" - - "gemini-1.0-pro-001" - - "-i" - - "non-persistent://tg/request/text-completion-rag" - - "-o" - - "non-persistent://tg/response/text-completion-rag-response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "volumes": - - "./vertexai:/vertexai" - "vectorize": - "command": - - "embeddings-vectorize" - - "-p" - - "pulsar://pulsar:6650" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "512M" - "reservations": - "cpus": "0.5" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "grafana-storage": {} - "neo4j": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-storage-cassandra.yaml b/tg-storage-cassandra.yaml deleted file mode 100644 index b3e9b1f3..00000000 --- a/tg-storage-cassandra.yaml +++ /dev/null @@ -1,214 +0,0 @@ -"services": - "cassandra": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "800M" - "reservations": - "cpus": "0.5" - "memory": "800M" - "environment": - "JVM_OPTS": "-Xms256M -Xmx256M" - "image": "docker.io/cassandra:4.1.6" - "ports": - - "9042:9042" - "restart": "on-failure:100" - "volumes": - - "cassandra:/var/lib/cassandra" - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "512M" - "reservations": - "cpus": "0.1" - "memory": "512M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-cassandra" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "cassandra" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "cassandra": {} - "grafana-storage": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/tg-storage-neo4j.yaml b/tg-storage-neo4j.yaml deleted file mode 100644 index bfd1ba34..00000000 --- a/tg-storage-neo4j.yaml +++ /dev/null @@ -1,215 +0,0 @@ -"services": - "grafana": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "environment": - "GF_ORG_NAME": "trustgraph.ai" - "image": "docker.io/grafana/grafana:11.1.4" - "ports": - - "3000:3000" - "restart": "on-failure:100" - "volumes": - - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" - "init-pulsar": - "command": - - "sh" - - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "restart": "on-failure:100" - "neo4j": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "768M" - "reservations": - "cpus": "0.5" - "memory": "768M" - "environment": - "NEO4J_AUTH": "neo4j/password" - "image": "docker.io/neo4j:5.22.0-community-bullseye" - "ports": - - "7474:7474" - - "7687:7687" - "restart": "on-failure:100" - "volumes": - - "neo4j:/data" - "prometheus": - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/prom/prometheus:v2.53.2" - "ports": - - "9090:9090" - "restart": "on-failure:100" - "volumes": - - "./prometheus:/etc/prometheus" - - "prometheus-data:/prometheus" - "pulsar": - "command": "bin/pulsar standalone" - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "900M" - "reservations": - "cpus": "0.5" - "memory": "900M" - "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" - "image": "docker.io/apachepulsar/pulsar:3.3.1" - "ports": - - "6650:6650" - - "8080:8080" - "restart": "on-failure:100" - "volumes": - - "pulsar-conf:/pulsar/conf" - - "pulsar-data:/pulsar/data" - "qdrant": - "deploy": - "resources": - "limits": - "cpus": "1.0" - "memory": "256M" - "reservations": - "cpus": "0.5" - "memory": "256M" - "image": "docker.io/qdrant/qdrant:v1.11.1" - "ports": - - "6333:6333" - - "6334:6334" - "restart": "on-failure:100" - "volumes": - - "qdrant:/qdrant/storage" - "query-doc-embeddings": - "command": - - "de-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-graph-embeddings": - "command": - - "ge-query-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "query-triples": - "command": - - "triples-query-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-doc-embeddings": - "command": - - "de-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-graph-embeddings": - "command": - - "ge-write-qdrant" - - "-p" - - "pulsar://pulsar:6650" - - "-t" - - "http://qdrant:6333" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" - "store-triples": - "command": - - "triples-write-neo4j" - - "-p" - - "pulsar://pulsar:6650" - - "-g" - - "bolt://neo4j:7687" - "deploy": - "resources": - "limits": - "cpus": "0.5" - "memory": "128M" - "reservations": - "cpus": "0.1" - "memory": "128M" - "image": "docker.io/trustgraph/trustgraph-flow:0.9.5" - "restart": "on-failure:100" -"volumes": - "grafana-storage": {} - "neo4j": {} - "prometheus-data": {} - "pulsar-conf": {} - "pulsar-data": {} - "qdrant": {} diff --git a/trustgraph-base/README.md b/trustgraph-base/README.md new file mode 100644 index 00000000..7a2ce130 --- /dev/null +++ b/trustgraph-base/README.md @@ -0,0 +1 @@ +See https://trustgraph.ai/ diff --git a/trustgraph-base/setup.py b/trustgraph-base/setup.py new file mode 100644 index 00000000..60d8b6c8 --- /dev/null +++ b/trustgraph-base/setup.py @@ -0,0 +1,42 @@ +import setuptools +import os +import importlib + +with open("README.md", "r") as fh: + long_description = fh.read() + +# Load a version number module +spec = importlib.util.spec_from_file_location( + 'version', 'trustgraph/base_version.py' +) +version_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(version_module) + +version = version_module.__version__ + +setuptools.setup( + name="trustgraph-base", + version=version, + author="trustgraph.ai", + author_email="security@trustgraph.ai", + description="TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/trustgraph-ai/trustgraph", + packages=setuptools.find_namespace_packages( + where='./', + ), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", + "Operating System :: OS Independent", + ], + python_requires='>=3.8', + download_url = "https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v" + version + ".tar.gz", + install_requires=[ + "pulsar-client", + "prometheus-client", + ], + scripts=[ + ] +) diff --git a/trustgraph/base/__init__.py b/trustgraph-base/trustgraph/base/__init__.py similarity index 100% rename from trustgraph/base/__init__.py rename to trustgraph-base/trustgraph/base/__init__.py diff --git a/trustgraph/base/base_processor.py b/trustgraph-base/trustgraph/base/base_processor.py similarity index 100% rename from trustgraph/base/base_processor.py rename to trustgraph-base/trustgraph/base/base_processor.py diff --git a/trustgraph/base/consumer.py b/trustgraph-base/trustgraph/base/consumer.py similarity index 86% rename from trustgraph/base/consumer.py rename to trustgraph-base/trustgraph/base/consumer.py index 0c975cf5..eeaf83a1 100644 --- a/trustgraph/base/consumer.py +++ b/trustgraph-base/trustgraph/base/consumer.py @@ -1,6 +1,6 @@ from pulsar.schema import JsonSchema -from prometheus_client import start_http_server, Histogram, Info, Counter +from prometheus_client import Histogram, Info, Counter, Enum import time from . base_processor import BaseProcessor @@ -10,6 +10,15 @@ class Consumer(BaseProcessor): def __init__(self, **params): + if not hasattr(__class__, "state_metric"): + __class__.state_metric = Enum( + 'processor_state', 'Processor state', + states=['starting', 'running', 'stopped'] + ) + __class__.state_metric.state('starting') + + __class__.state_metric.state('starting') + super(Consumer, self).__init__(**params) input_queue = params.get("input_queue") @@ -47,6 +56,8 @@ class Consumer(BaseProcessor): def run(self): + __class__.state_metric.state('running') + while True: msg = self.consumer.receive() diff --git a/trustgraph/base/consumer_producer.py b/trustgraph-base/trustgraph/base/consumer_producer.py similarity index 89% rename from trustgraph/base/consumer_producer.py rename to trustgraph-base/trustgraph/base/consumer_producer.py index 3f9d0f4f..cabb7525 100644 --- a/trustgraph/base/consumer_producer.py +++ b/trustgraph-base/trustgraph/base/consumer_producer.py @@ -1,6 +1,6 @@ from pulsar.schema import JsonSchema -from prometheus_client import Histogram, Info, Counter +from prometheus_client import Histogram, Info, Counter, Enum import time from . base_processor import BaseProcessor @@ -12,6 +12,15 @@ class ConsumerProducer(BaseProcessor): def __init__(self, **params): + if not hasattr(__class__, "state_metric"): + __class__.state_metric = Enum( + 'processor_state', 'Processor state', + states=['starting', 'running', 'stopped'] + ) + __class__.state_metric.state('starting') + + __class__.state_metric.state('starting') + input_queue = params.get("input_queue") output_queue = params.get("output_queue") subscriber = params.get("subscriber") @@ -54,18 +63,20 @@ class ConsumerProducer(BaseProcessor): if output_schema == None: raise RuntimeError("output_schema must be specified") - self.consumer = self.client.subscribe( - input_queue, subscriber, - schema=JsonSchema(input_schema), - ) - self.producer = self.client.create_producer( topic=output_queue, schema=JsonSchema(output_schema), ) + self.consumer = self.client.subscribe( + input_queue, subscriber, + schema=JsonSchema(input_schema), + ) + def run(self): + __class__.state_metric.state('running') + while True: msg = self.consumer.receive() diff --git a/trustgraph/base/producer.py b/trustgraph-base/trustgraph/base/producer.py similarity index 100% rename from trustgraph/base/producer.py rename to trustgraph-base/trustgraph/base/producer.py diff --git a/trustgraph/__init__.py b/trustgraph-base/trustgraph/clients/__init__.py similarity index 100% rename from trustgraph/__init__.py rename to trustgraph-base/trustgraph/clients/__init__.py diff --git a/trustgraph/clients/base.py b/trustgraph-base/trustgraph/clients/base.py similarity index 100% rename from trustgraph/clients/base.py rename to trustgraph-base/trustgraph/clients/base.py diff --git a/trustgraph/clients/document_embeddings_client.py b/trustgraph-base/trustgraph/clients/document_embeddings_client.py similarity index 100% rename from trustgraph/clients/document_embeddings_client.py rename to trustgraph-base/trustgraph/clients/document_embeddings_client.py diff --git a/trustgraph/clients/document_rag_client.py b/trustgraph-base/trustgraph/clients/document_rag_client.py similarity index 100% rename from trustgraph/clients/document_rag_client.py rename to trustgraph-base/trustgraph/clients/document_rag_client.py diff --git a/trustgraph/clients/embeddings_client.py b/trustgraph-base/trustgraph/clients/embeddings_client.py similarity index 100% rename from trustgraph/clients/embeddings_client.py rename to trustgraph-base/trustgraph/clients/embeddings_client.py diff --git a/trustgraph/clients/graph_embeddings_client.py b/trustgraph-base/trustgraph/clients/graph_embeddings_client.py similarity index 100% rename from trustgraph/clients/graph_embeddings_client.py rename to trustgraph-base/trustgraph/clients/graph_embeddings_client.py diff --git a/trustgraph/clients/graph_rag_client.py b/trustgraph-base/trustgraph/clients/graph_rag_client.py similarity index 100% rename from trustgraph/clients/graph_rag_client.py rename to trustgraph-base/trustgraph/clients/graph_rag_client.py diff --git a/trustgraph/clients/llm_client.py b/trustgraph-base/trustgraph/clients/llm_client.py similarity index 100% rename from trustgraph/clients/llm_client.py rename to trustgraph-base/trustgraph/clients/llm_client.py diff --git a/trustgraph/clients/prompt_client.py b/trustgraph-base/trustgraph/clients/prompt_client.py similarity index 100% rename from trustgraph/clients/prompt_client.py rename to trustgraph-base/trustgraph/clients/prompt_client.py diff --git a/trustgraph/clients/triples_query_client.py b/trustgraph-base/trustgraph/clients/triples_query_client.py similarity index 100% rename from trustgraph/clients/triples_query_client.py rename to trustgraph-base/trustgraph/clients/triples_query_client.py diff --git a/trustgraph/exceptions.py b/trustgraph-base/trustgraph/exceptions.py similarity index 100% rename from trustgraph/exceptions.py rename to trustgraph-base/trustgraph/exceptions.py diff --git a/trustgraph/log_level.py b/trustgraph-base/trustgraph/log_level.py similarity index 100% rename from trustgraph/log_level.py rename to trustgraph-base/trustgraph/log_level.py diff --git a/trustgraph/chunking/__init__.py b/trustgraph-base/trustgraph/objects/__init__.py similarity index 100% rename from trustgraph/chunking/__init__.py rename to trustgraph-base/trustgraph/objects/__init__.py diff --git a/trustgraph/objects/field.py b/trustgraph-base/trustgraph/objects/field.py similarity index 100% rename from trustgraph/objects/field.py rename to trustgraph-base/trustgraph/objects/field.py diff --git a/trustgraph/objects/object.py b/trustgraph-base/trustgraph/objects/object.py similarity index 100% rename from trustgraph/objects/object.py rename to trustgraph-base/trustgraph/objects/object.py diff --git a/trustgraph/rdf.py b/trustgraph-base/trustgraph/rdf.py similarity index 100% rename from trustgraph/rdf.py rename to trustgraph-base/trustgraph/rdf.py diff --git a/trustgraph/schema/__init__.py b/trustgraph-base/trustgraph/schema/__init__.py similarity index 100% rename from trustgraph/schema/__init__.py rename to trustgraph-base/trustgraph/schema/__init__.py diff --git a/trustgraph/schema/documents.py b/trustgraph-base/trustgraph/schema/documents.py similarity index 100% rename from trustgraph/schema/documents.py rename to trustgraph-base/trustgraph/schema/documents.py diff --git a/trustgraph/schema/graph.py b/trustgraph-base/trustgraph/schema/graph.py similarity index 100% rename from trustgraph/schema/graph.py rename to trustgraph-base/trustgraph/schema/graph.py diff --git a/trustgraph/schema/models.py b/trustgraph-base/trustgraph/schema/models.py similarity index 87% rename from trustgraph/schema/models.py rename to trustgraph-base/trustgraph/schema/models.py index cf73a203..2196a3d2 100644 --- a/trustgraph/schema/models.py +++ b/trustgraph-base/trustgraph/schema/models.py @@ -1,5 +1,5 @@ -from pulsar.schema import Record, String, Array, Double +from pulsar.schema import Record, String, Array, Double, Integer from . topic import topic from . types import Error @@ -14,6 +14,9 @@ class TextCompletionRequest(Record): class TextCompletionResponse(Record): error = Error() response = String() + in_token = Integer() + out_token = Integer() + model = String() text_completion_request_queue = topic( 'text-completion', kind='non-persistent', namespace='request' diff --git a/trustgraph/schema/object.py b/trustgraph-base/trustgraph/schema/object.py similarity index 100% rename from trustgraph/schema/object.py rename to trustgraph-base/trustgraph/schema/object.py diff --git a/trustgraph/schema/prompt.py b/trustgraph-base/trustgraph/schema/prompt.py similarity index 100% rename from trustgraph/schema/prompt.py rename to trustgraph-base/trustgraph/schema/prompt.py diff --git a/trustgraph/schema/retrieval.py b/trustgraph-base/trustgraph/schema/retrieval.py similarity index 100% rename from trustgraph/schema/retrieval.py rename to trustgraph-base/trustgraph/schema/retrieval.py diff --git a/trustgraph/schema/topic.py b/trustgraph-base/trustgraph/schema/topic.py similarity index 100% rename from trustgraph/schema/topic.py rename to trustgraph-base/trustgraph/schema/topic.py diff --git a/trustgraph/schema/types.py b/trustgraph-base/trustgraph/schema/types.py similarity index 100% rename from trustgraph/schema/types.py rename to trustgraph-base/trustgraph/schema/types.py diff --git a/trustgraph-bedrock/README.md b/trustgraph-bedrock/README.md new file mode 100644 index 00000000..7a2ce130 --- /dev/null +++ b/trustgraph-bedrock/README.md @@ -0,0 +1 @@ +See https://trustgraph.ai/ diff --git a/scripts/text-completion-bedrock b/trustgraph-bedrock/scripts/text-completion-bedrock similarity index 100% rename from scripts/text-completion-bedrock rename to trustgraph-bedrock/scripts/text-completion-bedrock diff --git a/trustgraph-bedrock/setup.py b/trustgraph-bedrock/setup.py new file mode 100644 index 00000000..317f2bdf --- /dev/null +++ b/trustgraph-bedrock/setup.py @@ -0,0 +1,45 @@ +import setuptools +import os +import importlib + +with open("README.md", "r") as fh: + long_description = fh.read() + +# Load a version number module +spec = importlib.util.spec_from_file_location( + 'version', 'trustgraph/bedrock_version.py' +) +version_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(version_module) + +version = version_module.__version__ + +setuptools.setup( + name="trustgraph-bedrock", + version=version, + author="trustgraph.ai", + author_email="security@trustgraph.ai", + description="TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/trustgraph-ai/trustgraph", + packages=setuptools.find_namespace_packages( + where='./', + ), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", + "Operating System :: OS Independent", + ], + python_requires='>=3.8', + download_url = "https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v" + version + ".tar.gz", + install_requires=[ + "trustgraph-base", + "pulsar-client", + "prometheus-client", + "boto3", + ], + scripts=[ + "scripts/text-completion-bedrock", + ] +) diff --git a/trustgraph/model/text_completion/azure/__init__.py b/trustgraph-bedrock/trustgraph/model/text_completion/bedrock/__init__.py similarity index 100% rename from trustgraph/model/text_completion/azure/__init__.py rename to trustgraph-bedrock/trustgraph/model/text_completion/bedrock/__init__.py diff --git a/trustgraph/model/text_completion/azure/__main__.py b/trustgraph-bedrock/trustgraph/model/text_completion/bedrock/__main__.py similarity index 100% rename from trustgraph/model/text_completion/azure/__main__.py rename to trustgraph-bedrock/trustgraph/model/text_completion/bedrock/__main__.py diff --git a/trustgraph/model/text_completion/bedrock/llm.py b/trustgraph-bedrock/trustgraph/model/text_completion/bedrock/llm.py similarity index 93% rename from trustgraph/model/text_completion/bedrock/llm.py rename to trustgraph-bedrock/trustgraph/model/text_completion/bedrock/llm.py index fccf6353..0d050261 100755 --- a/trustgraph/model/text_completion/bedrock/llm.py +++ b/trustgraph-bedrock/trustgraph/model/text_completion/bedrock/llm.py @@ -209,14 +209,23 @@ class Processor(ConsumerProducer): # Use Mistral as default else: response_body = json.loads(response.get("body").read()) - outputtext = response_body['outputs'][0]['text'] + outputtext = response_body['outputs'][0]['text'] + + metadata = response['ResponseMetadata']['HTTPHeaders'] + inputtokens = int(metadata['x-amzn-bedrock-input-token-count']) + outputtokens = int(metadata['x-amzn-bedrock-output-token-count']) print(outputtext, flush=True) + print(f"Input Tokens: {inputtokens}", flush=True) + print(f"Output Tokens: {outputtokens}", flush=True) print("Send response...", flush=True) r = TextCompletionResponse( error=None, - response=outputtext + response=outputtext, + in_token=inputtokens, + out_token=outputtokens, + model=str(self.model), ) self.send(r, properties={"id": id}) @@ -236,6 +245,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) @@ -254,6 +266,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.consumer.acknowledge(msg) diff --git a/trustgraph-cli/README.md b/trustgraph-cli/README.md new file mode 100644 index 00000000..7a2ce130 --- /dev/null +++ b/trustgraph-cli/README.md @@ -0,0 +1 @@ +See https://trustgraph.ai/ diff --git a/scripts/graph-show b/trustgraph-cli/scripts/tg-graph-show similarity index 85% rename from scripts/graph-show rename to trustgraph-cli/scripts/tg-graph-show index 27ab98b5..a737c97b 100755 --- a/scripts/graph-show +++ b/trustgraph-cli/scripts/tg-graph-show @@ -8,11 +8,11 @@ import argparse import os from trustgraph.clients.triples_query_client import TriplesQueryClient -default_pulsar_host = os.getenv("PULSAR_HOST", 'pulsar://pulsar:6650') +default_pulsar_host = os.getenv("PULSAR_HOST", 'pulsar://localhost:6650') def show_graph(pulsar): - tq = TriplesQueryClient(pulsar_host="pulsar://localhost:6650") + tq = TriplesQueryClient(pulsar_host=pulsar) rows = tq.request(None, None, None, limit=10_000_000) diff --git a/scripts/graph-to-turtle b/trustgraph-cli/scripts/tg-graph-to-turtle similarity index 90% rename from scripts/graph-to-turtle rename to trustgraph-cli/scripts/tg-graph-to-turtle index 26e18774..bff03fc6 100755 --- a/scripts/graph-to-turtle +++ b/trustgraph-cli/scripts/tg-graph-to-turtle @@ -11,11 +11,11 @@ import rdflib import io import sys -default_pulsar_host = os.getenv("PULSAR_HOST", 'pulsar://pulsar:6650') +default_pulsar_host = os.getenv("PULSAR_HOST", 'pulsar://localhost:6650') def show_graph(pulsar): - tq = TriplesQueryClient(pulsar_host="pulsar://localhost:6650") + tq = TriplesQueryClient(pulsar_host=pulsar) rows = tq.request(None, None, None, limit=10_000_000) diff --git a/trustgraph-cli/scripts/tg-init-pulsar b/trustgraph-cli/scripts/tg-init-pulsar new file mode 100755 index 00000000..0113a7f0 --- /dev/null +++ b/trustgraph-cli/scripts/tg-init-pulsar @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 + +""" +Initialises Pulsar with Trustgraph tenant / namespaces & policy +""" + +import requests +import time +import argparse + +default_pulsar_admin_url = "http://pulsar:8080" + +def get_clusters(url): + + print("Get clusters...", flush=True) + + resp = requests.get(f"{url}/admin/v2/clusters") + + if resp.status_code != 200: raise RuntimeError("Could not fetch clusters") + + return resp.json() + +def ensure_tenant(url, tenant, clusters): + + resp = requests.get(f"{url}/admin/v2/tenants/{tenant}") + + if resp.status_code == 200: + print(f"Tenant {tenant} already exists.", flush=True) + return + + resp = requests.put( + f"{url}/admin/v2/tenants/{tenant}", + json={ + "adminRoles": [], + "allowedClusters": clusters, + } + ) + + if resp.status_code != 204: + print(resp.text, flush=True) + raise RuntimeError("Tenant creation failed.") + + print(f"Tenant {tenant} created.", flush=True) + +def ensure_namespace(url, tenant, namespace, config): + + resp = requests.get(f"{url}/admin/v2/namespaces/{tenant}/{namespace}") + + if resp.status_code == 200: + print(f"Namespace {tenant}/{namespace} already exists.", flush=True) + return + + resp = requests.put( + f"{url}/admin/v2/namespaces/{tenant}/{namespace}", + json=config, + ) + + if resp.status_code != 204: + print(resp.status_code, flush=True) + print(resp.text, flush=True) + raise RuntimeError(f"Namespace {tenant}/{namespace} creation failed.") + + print(f"Namespace {tenant}/{namespace} created.", flush=True) + +def init(url, tenant="tg"): + + clusters = get_clusters(url) + + ensure_tenant(url, tenant, clusters) + + ensure_namespace(url, tenant, "flow", {}) + + ensure_namespace(url, tenant, "request", {}) + + ensure_namespace(url, tenant, "response", { + "retention_policies": { + "retentionSizeInMB": -1, + "retentionTimeInMinutes": 3, + } + }) + +def main(): + + parser = argparse.ArgumentParser( + prog='tg-init-pulsar', + description=__doc__, + ) + + parser.add_argument( + '-p', '--pulsar-admin-url', + default=default_pulsar_admin_url, + help=f'Pulsar admin URL (default: {default_pulsar_admin_url})', + ) + + args = parser.parse_args() + + while True: + + try: + + print(flush=True) + print( + f"Initialising with Pulsar {args.pulsar_admin_url}...", + flush=True + ) + init(args.pulsar_admin_url, "tg") + print("Initialisation complete.", flush=True) + break + + except Exception as e: + + print("Exception:", e, flush=True) + + print("Sleeping...", flush=True) + time.sleep(2) + print("Will retry...", flush=True) + +main() + diff --git a/scripts/init-pulsar-manager b/trustgraph-cli/scripts/tg-init-pulsar-manager similarity index 100% rename from scripts/init-pulsar-manager rename to trustgraph-cli/scripts/tg-init-pulsar-manager diff --git a/scripts/load-pdf b/trustgraph-cli/scripts/tg-load-pdf similarity index 87% rename from scripts/load-pdf rename to trustgraph-cli/scripts/tg-load-pdf index 0c2aac46..5d54da93 100755 --- a/scripts/load-pdf +++ b/trustgraph-cli/scripts/tg-load-pdf @@ -22,7 +22,6 @@ class Loader: pulsar_host, output_queue, log_level, - file, ): self.client = pulsar.Client( @@ -36,13 +35,16 @@ class Loader: chunking_enabled=True, ) - self.file = file + def load(self, files): - def run(self): + for file in files: + self.load_file(file) + + def load_file(self, file): try: - path = self.file + path = file data = open(path, "rb").read() id = hashlib.sha256(path.encode("utf-8")).hexdigest()[0:8] @@ -58,8 +60,10 @@ class Loader: self.producer.send(r) + print(f"{file}: Loaded successfully.") + except Exception as e: - print(e, flush=True) + print(f"{file}: Failed: {str(e)}", flush=True) def __del__(self): self.client.close() @@ -95,8 +99,7 @@ def main(): ) parser.add_argument( - '-f', '--file', - required=True, + 'files', nargs='+', help=f'File to load' ) @@ -105,16 +108,16 @@ def main(): while True: try: + p = Loader( pulsar_host=args.pulsar_host, output_queue=args.output_queue, log_level=args.log_level, - file=args.file, ) - p.run() + p.load(args.files) - print("File loaded.") + print("All done.") break except Exception as e: diff --git a/scripts/load-text b/trustgraph-cli/scripts/tg-load-text similarity index 87% rename from scripts/load-text rename to trustgraph-cli/scripts/tg-load-text index 3a0e19c3..8137006c 100755 --- a/scripts/load-text +++ b/trustgraph-cli/scripts/tg-load-text @@ -22,7 +22,6 @@ class Loader: pulsar_host, output_queue, log_level, - file, ): self.client = pulsar.Client( @@ -36,13 +35,16 @@ class Loader: chunking_enabled=True, ) - self.file = file + def load(self, files): - def run(self): + for file in files: + self.load_file(file) + + def load_file(self, file): try: - path = self.file + path = file data = open(path, "rb").read() id = hashlib.sha256(path.encode("utf-8")).hexdigest()[0:8] @@ -58,8 +60,10 @@ class Loader: self.producer.send(r) + print(f"{file}: Loaded successfully.") + except Exception as e: - print(e, flush=True) + print(f"{file}: Failed: {str(e)}", flush=True) def __del__(self): self.client.close() @@ -95,8 +99,7 @@ def main(): ) parser.add_argument( - '-f', '--file', - required=True, + 'files', nargs='+', help=f'File to load' ) @@ -105,16 +108,16 @@ def main(): while True: try: + p = Loader( pulsar_host=args.pulsar_host, output_queue=args.output_queue, log_level=args.log_level, - file=args.file, ) - p.run() + p.load(args.files) - print("File loaded.") + print("All done.") break except Exception as e: diff --git a/trustgraph-cli/scripts/tg-processor-state b/trustgraph-cli/scripts/tg-processor-state new file mode 100755 index 00000000..cfab00c8 --- /dev/null +++ b/trustgraph-cli/scripts/tg-processor-state @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 + +""" +Dump out TrustGraph processor states. +""" + +import requests +import argparse +import tabulate + +default_prometheus_url = "http://localhost:9090" + +def dump_status(prom): + + url = f"{prom}/api/v1/query?query=processor_state%7Bprocessor_state%3D%22running%22%7D" + + resp = requests.get(url) + + obj = resp.json() + + tbl = [ + [ + m["metric"]["job"], + "running" if int(m["value"][1]) > 0 else "down" + ] + for m in obj["data"]["result"] + ] + + print(tabulate.tabulate( + tbl, tablefmt="pretty", headers=["processor", "state"], + stralign="left" + )) + + +def main(): + + parser = argparse.ArgumentParser( + prog='tg-processor-state', + description=__doc__, + ) + + parser.add_argument( + '-p', '--prometheus-url', + default=default_prometheus_url, + help=f'Prometheus URL (default: {default_prometheus_url})', + ) + + args = parser.parse_args() + + try: + + dump_status(args.prometheus_url) + + except Exception as e: + + print("Exception:", e, flush=True) + +main() + diff --git a/scripts/query-document-rag b/trustgraph-cli/scripts/tg-query-document-rag similarity index 100% rename from scripts/query-document-rag rename to trustgraph-cli/scripts/tg-query-document-rag diff --git a/scripts/query-graph-rag b/trustgraph-cli/scripts/tg-query-graph-rag similarity index 100% rename from scripts/query-graph-rag rename to trustgraph-cli/scripts/tg-query-graph-rag diff --git a/trustgraph-cli/setup.py b/trustgraph-cli/setup.py new file mode 100644 index 00000000..68024de6 --- /dev/null +++ b/trustgraph-cli/setup.py @@ -0,0 +1,54 @@ +import setuptools +import os +import importlib + +with open("README.md", "r") as fh: + long_description = fh.read() + +# Load a version number module +spec = importlib.util.spec_from_file_location( + 'version', 'trustgraph/cli_version.py' +) +version_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(version_module) + +version = version_module.__version__ + +setuptools.setup( + name="trustgraph-cli", + version=version, + author="trustgraph.ai", + author_email="security@trustgraph.ai", + description="TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/trustgraph-ai/trustgraph", + packages=setuptools.find_namespace_packages( + where='./', + ), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", + "Operating System :: OS Independent", + ], + python_requires='>=3.8', + download_url = "https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v" + version + ".tar.gz", + install_requires=[ + "trustgraph-base", + "requests", + "pulsar-client", + "rdflib", + "tabulate", + ], + scripts=[ + "scripts/tg-graph-show", + "scripts/tg-graph-to-turtle", + "scripts/tg-init-pulsar-manager", + "scripts/tg-load-pdf", + "scripts/tg-load-text", + "scripts/tg-query-document-rag", + "scripts/tg-query-graph-rag", + "scripts/tg-init-pulsar", + "scripts/tg-processor-state", + ] +) diff --git a/trustgraph-embeddings-hf/README.md b/trustgraph-embeddings-hf/README.md new file mode 100644 index 00000000..7a2ce130 --- /dev/null +++ b/trustgraph-embeddings-hf/README.md @@ -0,0 +1 @@ +See https://trustgraph.ai/ diff --git a/scripts/embeddings-hf b/trustgraph-embeddings-hf/scripts/embeddings-hf old mode 100755 new mode 100644 similarity index 100% rename from scripts/embeddings-hf rename to trustgraph-embeddings-hf/scripts/embeddings-hf diff --git a/trustgraph-embeddings-hf/setup.py b/trustgraph-embeddings-hf/setup.py new file mode 100644 index 00000000..f3c2869b --- /dev/null +++ b/trustgraph-embeddings-hf/setup.py @@ -0,0 +1,55 @@ +import setuptools +import os +import importlib + +with open("README.md", "r") as fh: + long_description = fh.read() + +# Load a version number module +spec = importlib.util.spec_from_file_location( + 'version', 'trustgraph/embeddings_hf_version.py' +) +version_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(version_module) + +version = version_module.__version__ + +setuptools.setup( + name="trustgraph-embeddings-hf", + version=version, + author="trustgraph.ai", + author_email="security@trustgraph.ai", + description="HuggingFace embeddings support for TrustGraph.", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/trustgraph-ai/trustgraph", + packages=setuptools.find_namespace_packages( + where='./', + ), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", + "Operating System :: OS Independent", + ], + python_requires='>=3.8', + download_url = "https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v" + version + ".tar.gz", + install_requires=[ + "trustgraph-base", + "trustgraph-flow", + "torch", + "urllib3", + "transformers", + "sentence-transformers", + "langchain", + "langchain-core", + "langchain-huggingface", + "langchain-community", + "huggingface-hub", + "pulsar-client", + "pyyaml", + "prometheus-client", + ], + scripts=[ + "scripts/embeddings-hf", + ] +) diff --git a/trustgraph/clients/__init__.py b/trustgraph-embeddings-hf/trustgraph/__init__.py similarity index 100% rename from trustgraph/clients/__init__.py rename to trustgraph-embeddings-hf/trustgraph/__init__.py diff --git a/trustgraph/decoding/__init__.py b/trustgraph-embeddings-hf/trustgraph/embeddings/__init__.py similarity index 100% rename from trustgraph/decoding/__init__.py rename to trustgraph-embeddings-hf/trustgraph/embeddings/__init__.py diff --git a/trustgraph/embeddings/hf/__init__.py b/trustgraph-embeddings-hf/trustgraph/embeddings/hf/__init__.py similarity index 100% rename from trustgraph/embeddings/hf/__init__.py rename to trustgraph-embeddings-hf/trustgraph/embeddings/hf/__init__.py diff --git a/trustgraph/embeddings/hf/__main__.py b/trustgraph-embeddings-hf/trustgraph/embeddings/hf/__main__.py similarity index 100% rename from trustgraph/embeddings/hf/__main__.py rename to trustgraph-embeddings-hf/trustgraph/embeddings/hf/__main__.py diff --git a/trustgraph/embeddings/hf/hf.py b/trustgraph-embeddings-hf/trustgraph/embeddings/hf/hf.py similarity index 89% rename from trustgraph/embeddings/hf/hf.py rename to trustgraph-embeddings-hf/trustgraph/embeddings/hf/hf.py index 8b9dcaab..4b3b39c1 100755 --- a/trustgraph/embeddings/hf/hf.py +++ b/trustgraph-embeddings-hf/trustgraph/embeddings/hf/hf.py @@ -6,10 +6,11 @@ Input is text, output is embeddings vector. from langchain_huggingface import HuggingFaceEmbeddings -from ... schema import EmbeddingsRequest, EmbeddingsResponse, Error -from ... schema import embeddings_request_queue, embeddings_response_queue -from ... log_level import LogLevel -from ... base import ConsumerProducer +from trustgraph.schema import EmbeddingsRequest, EmbeddingsResponse, Error +from trustgraph.schema import embeddings_request_queue +from trustgraph.schema import embeddings_response_queue +from trustgraph.log_level import LogLevel +from trustgraph.base import ConsumerProducer module = ".".join(__name__.split(".")[1:-1]) diff --git a/trustgraph-flow/README.md b/trustgraph-flow/README.md new file mode 100644 index 00000000..7a2ce130 --- /dev/null +++ b/trustgraph-flow/README.md @@ -0,0 +1 @@ +See https://trustgraph.ai/ diff --git a/scripts/chunker-recursive b/trustgraph-flow/scripts/chunker-recursive similarity index 100% rename from scripts/chunker-recursive rename to trustgraph-flow/scripts/chunker-recursive diff --git a/scripts/chunker-token b/trustgraph-flow/scripts/chunker-token similarity index 100% rename from scripts/chunker-token rename to trustgraph-flow/scripts/chunker-token diff --git a/scripts/de-query-milvus b/trustgraph-flow/scripts/de-query-milvus similarity index 100% rename from scripts/de-query-milvus rename to trustgraph-flow/scripts/de-query-milvus diff --git a/scripts/de-query-qdrant b/trustgraph-flow/scripts/de-query-qdrant old mode 100644 new mode 100755 similarity index 100% rename from scripts/de-query-qdrant rename to trustgraph-flow/scripts/de-query-qdrant diff --git a/scripts/de-write-milvus b/trustgraph-flow/scripts/de-write-milvus similarity index 100% rename from scripts/de-write-milvus rename to trustgraph-flow/scripts/de-write-milvus diff --git a/scripts/de-write-qdrant b/trustgraph-flow/scripts/de-write-qdrant old mode 100644 new mode 100755 similarity index 100% rename from scripts/de-write-qdrant rename to trustgraph-flow/scripts/de-write-qdrant diff --git a/scripts/document-rag b/trustgraph-flow/scripts/document-rag similarity index 100% rename from scripts/document-rag rename to trustgraph-flow/scripts/document-rag diff --git a/scripts/embeddings-ollama b/trustgraph-flow/scripts/embeddings-ollama similarity index 100% rename from scripts/embeddings-ollama rename to trustgraph-flow/scripts/embeddings-ollama diff --git a/scripts/embeddings-vectorize b/trustgraph-flow/scripts/embeddings-vectorize similarity index 100% rename from scripts/embeddings-vectorize rename to trustgraph-flow/scripts/embeddings-vectorize diff --git a/scripts/ge-query-milvus b/trustgraph-flow/scripts/ge-query-milvus similarity index 100% rename from scripts/ge-query-milvus rename to trustgraph-flow/scripts/ge-query-milvus diff --git a/scripts/ge-query-qdrant b/trustgraph-flow/scripts/ge-query-qdrant similarity index 100% rename from scripts/ge-query-qdrant rename to trustgraph-flow/scripts/ge-query-qdrant diff --git a/scripts/ge-write-milvus b/trustgraph-flow/scripts/ge-write-milvus similarity index 100% rename from scripts/ge-write-milvus rename to trustgraph-flow/scripts/ge-write-milvus diff --git a/scripts/ge-write-qdrant b/trustgraph-flow/scripts/ge-write-qdrant similarity index 100% rename from scripts/ge-write-qdrant rename to trustgraph-flow/scripts/ge-write-qdrant diff --git a/scripts/graph-rag b/trustgraph-flow/scripts/graph-rag similarity index 100% rename from scripts/graph-rag rename to trustgraph-flow/scripts/graph-rag diff --git a/scripts/kg-extract-definitions b/trustgraph-flow/scripts/kg-extract-definitions similarity index 100% rename from scripts/kg-extract-definitions rename to trustgraph-flow/scripts/kg-extract-definitions diff --git a/scripts/kg-extract-relationships b/trustgraph-flow/scripts/kg-extract-relationships similarity index 100% rename from scripts/kg-extract-relationships rename to trustgraph-flow/scripts/kg-extract-relationships diff --git a/scripts/kg-extract-topics b/trustgraph-flow/scripts/kg-extract-topics similarity index 100% rename from scripts/kg-extract-topics rename to trustgraph-flow/scripts/kg-extract-topics diff --git a/trustgraph-flow/scripts/metering b/trustgraph-flow/scripts/metering new file mode 100755 index 00000000..7f1d0e12 --- /dev/null +++ b/trustgraph-flow/scripts/metering @@ -0,0 +1,5 @@ +#!/usr/bin/env python3 + +from trustgraph.metering import run + +run() \ No newline at end of file diff --git a/scripts/object-extract-row b/trustgraph-flow/scripts/object-extract-row similarity index 100% rename from scripts/object-extract-row rename to trustgraph-flow/scripts/object-extract-row diff --git a/scripts/oe-write-milvus b/trustgraph-flow/scripts/oe-write-milvus similarity index 100% rename from scripts/oe-write-milvus rename to trustgraph-flow/scripts/oe-write-milvus diff --git a/scripts/pdf-decoder b/trustgraph-flow/scripts/pdf-decoder similarity index 100% rename from scripts/pdf-decoder rename to trustgraph-flow/scripts/pdf-decoder diff --git a/scripts/prompt-generic b/trustgraph-flow/scripts/prompt-generic similarity index 100% rename from scripts/prompt-generic rename to trustgraph-flow/scripts/prompt-generic diff --git a/scripts/prompt-template b/trustgraph-flow/scripts/prompt-template similarity index 100% rename from scripts/prompt-template rename to trustgraph-flow/scripts/prompt-template diff --git a/scripts/rows-write-cassandra b/trustgraph-flow/scripts/rows-write-cassandra similarity index 100% rename from scripts/rows-write-cassandra rename to trustgraph-flow/scripts/rows-write-cassandra diff --git a/scripts/run-processing b/trustgraph-flow/scripts/run-processing similarity index 100% rename from scripts/run-processing rename to trustgraph-flow/scripts/run-processing diff --git a/scripts/text-completion-azure b/trustgraph-flow/scripts/text-completion-azure similarity index 100% rename from scripts/text-completion-azure rename to trustgraph-flow/scripts/text-completion-azure diff --git a/scripts/text-completion-claude b/trustgraph-flow/scripts/text-completion-claude similarity index 100% rename from scripts/text-completion-claude rename to trustgraph-flow/scripts/text-completion-claude diff --git a/scripts/text-completion-cohere b/trustgraph-flow/scripts/text-completion-cohere similarity index 100% rename from scripts/text-completion-cohere rename to trustgraph-flow/scripts/text-completion-cohere diff --git a/scripts/text-completion-llamafile b/trustgraph-flow/scripts/text-completion-llamafile similarity index 100% rename from scripts/text-completion-llamafile rename to trustgraph-flow/scripts/text-completion-llamafile diff --git a/scripts/text-completion-ollama b/trustgraph-flow/scripts/text-completion-ollama similarity index 100% rename from scripts/text-completion-ollama rename to trustgraph-flow/scripts/text-completion-ollama diff --git a/scripts/text-completion-openai b/trustgraph-flow/scripts/text-completion-openai similarity index 100% rename from scripts/text-completion-openai rename to trustgraph-flow/scripts/text-completion-openai diff --git a/scripts/triples-query-cassandra b/trustgraph-flow/scripts/triples-query-cassandra similarity index 100% rename from scripts/triples-query-cassandra rename to trustgraph-flow/scripts/triples-query-cassandra diff --git a/scripts/triples-query-neo4j b/trustgraph-flow/scripts/triples-query-neo4j similarity index 100% rename from scripts/triples-query-neo4j rename to trustgraph-flow/scripts/triples-query-neo4j diff --git a/scripts/triples-write-cassandra b/trustgraph-flow/scripts/triples-write-cassandra similarity index 100% rename from scripts/triples-write-cassandra rename to trustgraph-flow/scripts/triples-write-cassandra diff --git a/scripts/triples-write-neo4j b/trustgraph-flow/scripts/triples-write-neo4j similarity index 100% rename from scripts/triples-write-neo4j rename to trustgraph-flow/scripts/triples-write-neo4j diff --git a/setup.py b/trustgraph-flow/setup.py similarity index 75% rename from setup.py rename to trustgraph-flow/setup.py index e1ac688f..a1c89797 100644 --- a/setup.py +++ b/trustgraph-flow/setup.py @@ -1,13 +1,21 @@ import setuptools import os +import importlib with open("README.md", "r") as fh: long_description = fh.read() -version = "0.9.5" +# Load a version number module +spec = importlib.util.spec_from_file_location( + 'version', 'trustgraph/flow_version.py' +) +version_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(version_module) + +version = version_module.__version__ setuptools.setup( - name="trustgraph", + name="trustgraph-flow", version=version, author="trustgraph.ai", author_email="security@trustgraph.ai", @@ -15,7 +23,9 @@ setuptools.setup( long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/trustgraph-ai/trustgraph", - packages=setuptools.find_packages(), + packages=setuptools.find_namespace_packages( + where='./', + ), classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", @@ -24,30 +34,24 @@ setuptools.setup( python_requires='>=3.8', download_url = "https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v" + version + ".tar.gz", install_requires=[ - "torch", + "trustgraph-base", "urllib3", - "transformers", - "sentence-transformers", "rdflib", "pymilvus", "langchain", "langchain-core", - "langchain-huggingface", "langchain-text-splitters", "langchain-community", - "huggingface-hub", "requests", "cassandra-driver", "pulsar-client", "pypdf", "qdrant-client", + "tabulate", "anthropic", - "google-cloud-aiplatform", "pyyaml", "prometheus-client", - "pyarrow", "cohere", - "boto3", "openai", "neo4j", "tiktoken", @@ -55,50 +59,35 @@ setuptools.setup( scripts=[ "scripts/chunker-recursive", "scripts/chunker-token", - "scripts/concat-parquet", "scripts/de-query-milvus", "scripts/de-query-qdrant", "scripts/de-write-milvus", "scripts/de-write-qdrant", "scripts/document-rag", - "scripts/dump-parquet", - "scripts/embeddings-hf", "scripts/embeddings-ollama", "scripts/embeddings-vectorize", - "scripts/ge-dump-parquet", "scripts/ge-query-milvus", "scripts/ge-query-qdrant", "scripts/ge-write-milvus", "scripts/ge-write-qdrant", "scripts/graph-rag", - "scripts/graph-show", - "scripts/graph-to-turtle", - "scripts/init-pulsar-manager", "scripts/kg-extract-definitions", "scripts/kg-extract-topics", "scripts/kg-extract-relationships", - "scripts/load-graph-embeddings", - "scripts/load-pdf", - "scripts/load-text", - "scripts/load-triples", + "scripts/metering", "scripts/object-extract-row", "scripts/oe-write-milvus", "scripts/pdf-decoder", "scripts/prompt-generic", "scripts/prompt-template", - "scripts/query-document-rag", - "scripts/query-graph-rag", "scripts/rows-write-cassandra", "scripts/run-processing", "scripts/text-completion-azure", - "scripts/text-completion-bedrock", "scripts/text-completion-claude", "scripts/text-completion-cohere", "scripts/text-completion-llamafile", "scripts/text-completion-ollama", "scripts/text-completion-openai", - "scripts/text-completion-vertexai", - "scripts/triples-dump-parquet", "scripts/triples-query-cassandra", "scripts/triples-query-neo4j", "scripts/triples-write-cassandra", diff --git a/trustgraph/direct/__init__.py b/trustgraph-flow/trustgraph/__init__.py similarity index 100% rename from trustgraph/direct/__init__.py rename to trustgraph-flow/trustgraph/__init__.py diff --git a/trustgraph/dump/__init__.py b/trustgraph-flow/trustgraph/chunking/__init__.py similarity index 100% rename from trustgraph/dump/__init__.py rename to trustgraph-flow/trustgraph/chunking/__init__.py diff --git a/trustgraph/chunking/recursive/__init__.py b/trustgraph-flow/trustgraph/chunking/recursive/__init__.py similarity index 100% rename from trustgraph/chunking/recursive/__init__.py rename to trustgraph-flow/trustgraph/chunking/recursive/__init__.py diff --git a/trustgraph/chunking/recursive/__main__.py b/trustgraph-flow/trustgraph/chunking/recursive/__main__.py similarity index 100% rename from trustgraph/chunking/recursive/__main__.py rename to trustgraph-flow/trustgraph/chunking/recursive/__main__.py diff --git a/trustgraph/chunking/recursive/chunker.py b/trustgraph-flow/trustgraph/chunking/recursive/chunker.py similarity index 100% rename from trustgraph/chunking/recursive/chunker.py rename to trustgraph-flow/trustgraph/chunking/recursive/chunker.py diff --git a/trustgraph/chunking/token/__init__.py b/trustgraph-flow/trustgraph/chunking/token/__init__.py similarity index 100% rename from trustgraph/chunking/token/__init__.py rename to trustgraph-flow/trustgraph/chunking/token/__init__.py diff --git a/trustgraph/chunking/token/__main__.py b/trustgraph-flow/trustgraph/chunking/token/__main__.py similarity index 100% rename from trustgraph/chunking/token/__main__.py rename to trustgraph-flow/trustgraph/chunking/token/__main__.py diff --git a/trustgraph/chunking/token/chunker.py b/trustgraph-flow/trustgraph/chunking/token/chunker.py similarity index 100% rename from trustgraph/chunking/token/chunker.py rename to trustgraph-flow/trustgraph/chunking/token/chunker.py diff --git a/trustgraph/dump/graph_embeddings/__init__.py b/trustgraph-flow/trustgraph/decoding/__init__.py similarity index 100% rename from trustgraph/dump/graph_embeddings/__init__.py rename to trustgraph-flow/trustgraph/decoding/__init__.py diff --git a/trustgraph/decoding/pdf/__init__.py b/trustgraph-flow/trustgraph/decoding/pdf/__init__.py similarity index 100% rename from trustgraph/decoding/pdf/__init__.py rename to trustgraph-flow/trustgraph/decoding/pdf/__init__.py diff --git a/trustgraph/decoding/pdf/__main__.py b/trustgraph-flow/trustgraph/decoding/pdf/__main__.py similarity index 100% rename from trustgraph/decoding/pdf/__main__.py rename to trustgraph-flow/trustgraph/decoding/pdf/__main__.py diff --git a/trustgraph/decoding/pdf/pdf_decoder.py b/trustgraph-flow/trustgraph/decoding/pdf/pdf_decoder.py similarity index 100% rename from trustgraph/decoding/pdf/pdf_decoder.py rename to trustgraph-flow/trustgraph/decoding/pdf/pdf_decoder.py diff --git a/trustgraph/dump/triples/__init__.py b/trustgraph-flow/trustgraph/direct/__init__.py similarity index 100% rename from trustgraph/dump/triples/__init__.py rename to trustgraph-flow/trustgraph/direct/__init__.py diff --git a/trustgraph/direct/cassandra.py b/trustgraph-flow/trustgraph/direct/cassandra.py similarity index 100% rename from trustgraph/direct/cassandra.py rename to trustgraph-flow/trustgraph/direct/cassandra.py diff --git a/trustgraph/direct/milvus_doc_embeddings.py b/trustgraph-flow/trustgraph/direct/milvus_doc_embeddings.py similarity index 100% rename from trustgraph/direct/milvus_doc_embeddings.py rename to trustgraph-flow/trustgraph/direct/milvus_doc_embeddings.py diff --git a/trustgraph/direct/milvus_graph_embeddings.py b/trustgraph-flow/trustgraph/direct/milvus_graph_embeddings.py similarity index 100% rename from trustgraph/direct/milvus_graph_embeddings.py rename to trustgraph-flow/trustgraph/direct/milvus_graph_embeddings.py diff --git a/trustgraph/direct/milvus_object_embeddings.py b/trustgraph-flow/trustgraph/direct/milvus_object_embeddings.py similarity index 100% rename from trustgraph/direct/milvus_object_embeddings.py rename to trustgraph-flow/trustgraph/direct/milvus_object_embeddings.py diff --git a/trustgraph/document_rag.py b/trustgraph-flow/trustgraph/document_rag.py similarity index 97% rename from trustgraph/document_rag.py rename to trustgraph-flow/trustgraph/document_rag.py index 61da645b..f3c8b158 100644 --- a/trustgraph/document_rag.py +++ b/trustgraph-flow/trustgraph/document_rag.py @@ -73,8 +73,8 @@ class DocumentRag: self.lang = PromptClient( pulsar_host=pulsar_host, - input_queue=prompt_request_queue, - output_queue=prompt_response_queue, + input_queue=pr_request_queue, + output_queue=pr_response_queue, subscriber=module + "-de-prompt", ) diff --git a/trustgraph/embeddings/__init__.py b/trustgraph-flow/trustgraph/embeddings/__init__.py similarity index 100% rename from trustgraph/embeddings/__init__.py rename to trustgraph-flow/trustgraph/embeddings/__init__.py diff --git a/trustgraph/dump/graph_embeddings/parquet/__init__.py b/trustgraph-flow/trustgraph/embeddings/ollama/__init__.py similarity index 100% rename from trustgraph/dump/graph_embeddings/parquet/__init__.py rename to trustgraph-flow/trustgraph/embeddings/ollama/__init__.py diff --git a/trustgraph/embeddings/ollama/__main__.py b/trustgraph-flow/trustgraph/embeddings/ollama/__main__.py similarity index 100% rename from trustgraph/embeddings/ollama/__main__.py rename to trustgraph-flow/trustgraph/embeddings/ollama/__main__.py diff --git a/trustgraph/embeddings/ollama/processor.py b/trustgraph-flow/trustgraph/embeddings/ollama/processor.py similarity index 100% rename from trustgraph/embeddings/ollama/processor.py rename to trustgraph-flow/trustgraph/embeddings/ollama/processor.py diff --git a/trustgraph/embeddings/vectorize/__init__.py b/trustgraph-flow/trustgraph/embeddings/vectorize/__init__.py similarity index 100% rename from trustgraph/embeddings/vectorize/__init__.py rename to trustgraph-flow/trustgraph/embeddings/vectorize/__init__.py diff --git a/trustgraph/embeddings/vectorize/__main__.py b/trustgraph-flow/trustgraph/embeddings/vectorize/__main__.py similarity index 100% rename from trustgraph/embeddings/vectorize/__main__.py rename to trustgraph-flow/trustgraph/embeddings/vectorize/__main__.py diff --git a/trustgraph/embeddings/vectorize/vectorize.py b/trustgraph-flow/trustgraph/embeddings/vectorize/vectorize.py similarity index 100% rename from trustgraph/embeddings/vectorize/vectorize.py rename to trustgraph-flow/trustgraph/embeddings/vectorize/vectorize.py diff --git a/trustgraph/extract/__init__.py b/trustgraph-flow/trustgraph/extract/__init__.py similarity index 100% rename from trustgraph/extract/__init__.py rename to trustgraph-flow/trustgraph/extract/__init__.py diff --git a/trustgraph/extract/kg/__init__.py b/trustgraph-flow/trustgraph/extract/kg/__init__.py similarity index 100% rename from trustgraph/extract/kg/__init__.py rename to trustgraph-flow/trustgraph/extract/kg/__init__.py diff --git a/trustgraph/extract/kg/definitions/__init__.py b/trustgraph-flow/trustgraph/extract/kg/definitions/__init__.py similarity index 100% rename from trustgraph/extract/kg/definitions/__init__.py rename to trustgraph-flow/trustgraph/extract/kg/definitions/__init__.py diff --git a/trustgraph/extract/kg/definitions/__main__.py b/trustgraph-flow/trustgraph/extract/kg/definitions/__main__.py similarity index 100% rename from trustgraph/extract/kg/definitions/__main__.py rename to trustgraph-flow/trustgraph/extract/kg/definitions/__main__.py diff --git a/trustgraph/extract/kg/definitions/extract.py b/trustgraph-flow/trustgraph/extract/kg/definitions/extract.py similarity index 100% rename from trustgraph/extract/kg/definitions/extract.py rename to trustgraph-flow/trustgraph/extract/kg/definitions/extract.py diff --git a/trustgraph/extract/kg/relationships/__init__.py b/trustgraph-flow/trustgraph/extract/kg/relationships/__init__.py similarity index 100% rename from trustgraph/extract/kg/relationships/__init__.py rename to trustgraph-flow/trustgraph/extract/kg/relationships/__init__.py diff --git a/trustgraph/extract/kg/relationships/__main__.py b/trustgraph-flow/trustgraph/extract/kg/relationships/__main__.py similarity index 100% rename from trustgraph/extract/kg/relationships/__main__.py rename to trustgraph-flow/trustgraph/extract/kg/relationships/__main__.py diff --git a/trustgraph/extract/kg/relationships/extract.py b/trustgraph-flow/trustgraph/extract/kg/relationships/extract.py similarity index 100% rename from trustgraph/extract/kg/relationships/extract.py rename to trustgraph-flow/trustgraph/extract/kg/relationships/extract.py diff --git a/trustgraph/extract/kg/topics/__init__.py b/trustgraph-flow/trustgraph/extract/kg/topics/__init__.py similarity index 100% rename from trustgraph/extract/kg/topics/__init__.py rename to trustgraph-flow/trustgraph/extract/kg/topics/__init__.py diff --git a/trustgraph/extract/kg/topics/__main__.py b/trustgraph-flow/trustgraph/extract/kg/topics/__main__.py similarity index 100% rename from trustgraph/extract/kg/topics/__main__.py rename to trustgraph-flow/trustgraph/extract/kg/topics/__main__.py diff --git a/trustgraph/extract/kg/topics/extract.py b/trustgraph-flow/trustgraph/extract/kg/topics/extract.py similarity index 100% rename from trustgraph/extract/kg/topics/extract.py rename to trustgraph-flow/trustgraph/extract/kg/topics/extract.py diff --git a/trustgraph/extract/object/__init__.py b/trustgraph-flow/trustgraph/extract/object/__init__.py similarity index 100% rename from trustgraph/extract/object/__init__.py rename to trustgraph-flow/trustgraph/extract/object/__init__.py diff --git a/trustgraph/extract/object/row/__init__.py b/trustgraph-flow/trustgraph/extract/object/row/__init__.py similarity index 100% rename from trustgraph/extract/object/row/__init__.py rename to trustgraph-flow/trustgraph/extract/object/row/__init__.py diff --git a/trustgraph/extract/object/row/__main__.py b/trustgraph-flow/trustgraph/extract/object/row/__main__.py similarity index 100% rename from trustgraph/extract/object/row/__main__.py rename to trustgraph-flow/trustgraph/extract/object/row/__main__.py diff --git a/trustgraph/extract/object/row/extract.py b/trustgraph-flow/trustgraph/extract/object/row/extract.py similarity index 100% rename from trustgraph/extract/object/row/extract.py rename to trustgraph-flow/trustgraph/extract/object/row/extract.py diff --git a/trustgraph/graph_rag.py b/trustgraph-flow/trustgraph/graph_rag.py similarity index 98% rename from trustgraph/graph_rag.py rename to trustgraph-flow/trustgraph/graph_rag.py index c44066b4..15acb609 100644 --- a/trustgraph/graph_rag.py +++ b/trustgraph-flow/trustgraph/graph_rag.py @@ -96,8 +96,8 @@ class GraphRag: self.lang = PromptClient( pulsar_host=pulsar_host, - input_queue=prompt_request_queue, - output_queue=prompt_response_queue, + input_queue=pr_request_queue, + output_queue=pr_response_queue, subscriber=module + "-prompt", ) diff --git a/trustgraph-flow/trustgraph/metering/__init__.py b/trustgraph-flow/trustgraph/metering/__init__.py new file mode 100644 index 00000000..0ed03774 --- /dev/null +++ b/trustgraph-flow/trustgraph/metering/__init__.py @@ -0,0 +1,3 @@ + +from . counter import * + diff --git a/trustgraph-flow/trustgraph/metering/__main__.py b/trustgraph-flow/trustgraph/metering/__main__.py new file mode 100755 index 00000000..802f2b8d --- /dev/null +++ b/trustgraph-flow/trustgraph/metering/__main__.py @@ -0,0 +1,7 @@ +#!/usr/bin/env python3 + +from . counter import run + +if __name__ == '__main__': + run() + diff --git a/trustgraph-flow/trustgraph/metering/counter.py b/trustgraph-flow/trustgraph/metering/counter.py new file mode 100644 index 00000000..6e6b829b --- /dev/null +++ b/trustgraph-flow/trustgraph/metering/counter.py @@ -0,0 +1,101 @@ +""" +Simple token counter for each LLM response. +""" + +from prometheus_client import Counter +from . pricelist import price_list + +from .. schema import TextCompletionResponse, Error +from .. schema import text_completion_response_queue +from .. log_level import LogLevel +from .. base import Consumer + +module = ".".join(__name__.split(".")[1:-1]) + +default_input_queue = text_completion_response_queue +default_subscriber = module + + +class Processor(Consumer): + + def __init__(self, **params): + + if not hasattr(__class__, "input_token_metric"): + __class__.input_token_metric = Counter( + 'input_tokens', 'Input token count' + ) + + if not hasattr(__class__, "output_token_metric"): + __class__.output_token_metric = Counter( + 'output_tokens', 'Output token count' + ) + + if not hasattr(__class__, "input_cost_metric"): + __class__.input_cost_metric = Counter( + 'input_cost', 'Input cost' + ) + + if not hasattr(__class__, "output_cost_metric"): + __class__.output_cost_metric = Counter( + 'output_cost', 'Output cost' + ) + + input_queue = params.get("input_queue", default_input_queue) + subscriber = params.get("subscriber", default_subscriber) + + super(Processor, self).__init__( + **params | { + "input_queue": input_queue, + "subscriber": subscriber, + "input_schema": TextCompletionResponse, + } + ) + + def get_prices(self, prices, modelname): + for model in prices["price_list"]: + if model["model_name"] == modelname: + return model["input_price"], model["output_price"] + return None, None # Return None if model is not found + + def handle(self, msg): + + v = msg.value() + modelname = v.model + + # Sender-produced ID + id = msg.properties()["id"] + + print(f"Handling response {id}...", flush=True) + + num_in = v.in_token + num_out = v.out_token + + __class__.input_token_metric.inc(num_in) + __class__.output_token_metric.inc(num_out) + + model_input_price, model_output_price = self.get_prices(price_list, modelname) + + if model_input_price == None: + cost_per_call = f"Model Not Found in Price list" + else: + cost_in = num_in * model_input_price + cost_out = num_out * model_output_price + cost_per_call = round(cost_in + cost_out, 6) + + __class__.input_cost_metric.inc(cost_in) + __class__.output_cost_metric.inc(cost_out) + + print(f"Input Tokens: {num_in}", flush=True) + print(f"Output Tokens: {num_out}", flush=True) + print(f"Cost for call: ${cost_per_call}", flush=True) + + @staticmethod + def add_args(parser): + + Consumer.add_args( + parser, default_input_queue, default_subscriber, + ) + +def run(): + + Processor.start(module, __doc__) diff --git a/trustgraph-flow/trustgraph/metering/pricelist.py b/trustgraph-flow/trustgraph/metering/pricelist.py new file mode 100644 index 00000000..e890d0e1 --- /dev/null +++ b/trustgraph-flow/trustgraph/metering/pricelist.py @@ -0,0 +1,104 @@ +price_list = { + "price_list": [ + { + "model_name": "mistral.mistral-large-2407-v1:0", + "input_price": 0.000004, + "output_price": 0.000012 + }, + { + "model_name": "meta.llama3-1-405b-instruct-v1:0", + "input_price": 0.00000532, + "output_price": 0.000016 + }, + { + "model_name": "mistral.mixtral-8x7b-instruct-v0:1", + "input_price": 0.00000045, + "output_price": 0.0000007 + }, + { + "model_name": "meta.llama3-1-70b-instruct-v1:0", + "input_price": 0.00000099, + "output_price": 0.00000099 + }, + { + "model_name": "meta.llama3-1-8b-instruct-v1:0", + "input_price": 0.00000022, + "output_price": 0.00000022 + }, + { + "model_name": "anthropic.claude-3-haiku-20240307-v1:0", + "input_price": 0.00000025, + "output_price": 0.00000125 + }, + { + "model_name": "anthropic.claude-3-5-sonnet-20240620-v1:0", + "input_price": 0.000003, + "output_price": 0.000015 + }, + { + "model_name": "cohere.command-r-plus-v1:0", + "input_price": 0.0000030, + "output_price": 0.0000150 + }, + { + "model_name": "ollama", + "input_price": 0, + "output_price": 0 + }, + { + "model_name": "claude-3-haiku-20240307", + "input_price": 0.00000025, + "output_price": 0.00000125 + }, + { + "model_name": "claude-3-5-sonnet-20240620", + "input_price": 0.000003, + "output_price": 0.000015 + }, + { + "model_name": "claude-3-opus-20240229", + "input_price": 0.000015, + "output_price": 0.000075 + }, + { + "model_name": "claude-3-sonnet-20240229", + "input_price": 0.000003, + "output_price": 0.000015 + }, + { + "model_name": "command-r-08-202", + "input_price": 0.0000025, + "output_price": 0.000010 + }, + { + "model_name": "c4ai-aya-23-8b", + "input_price": 0, + "output_price": 0 + }, + { + "model_name": "llama.cpp", + "input_price": 0, + "output_price": 0 + }, + { + "model_name": "gpt-4o", + "input_price": 0.000005, + "output_price": 0.000015 + }, + { + "model_name": "gpt-4o-2024-08-06", + "input_price": 0.0000025, + "output_price": 0.000010 + }, + { + "model_name": "gpt-4o-2024-05-13", + "input_price": 0.000005, + "output_price": 0.000015 + }, + { + "model_name": "gpt-4o-mini", + "input_price": 0.00000015, + "output_price": 0.0000006 + }, + ] +} \ No newline at end of file diff --git a/trustgraph/model/__init__.py b/trustgraph-flow/trustgraph/model/__init__.py similarity index 100% rename from trustgraph/model/__init__.py rename to trustgraph-flow/trustgraph/model/__init__.py diff --git a/trustgraph/model/prompt/__init__.py b/trustgraph-flow/trustgraph/model/prompt/__init__.py similarity index 100% rename from trustgraph/model/prompt/__init__.py rename to trustgraph-flow/trustgraph/model/prompt/__init__.py diff --git a/trustgraph/model/prompt/generic/__init__.py b/trustgraph-flow/trustgraph/model/prompt/generic/__init__.py similarity index 100% rename from trustgraph/model/prompt/generic/__init__.py rename to trustgraph-flow/trustgraph/model/prompt/generic/__init__.py diff --git a/trustgraph/model/prompt/generic/__main__.py b/trustgraph-flow/trustgraph/model/prompt/generic/__main__.py similarity index 100% rename from trustgraph/model/prompt/generic/__main__.py rename to trustgraph-flow/trustgraph/model/prompt/generic/__main__.py diff --git a/trustgraph/model/prompt/generic/prompts.py b/trustgraph-flow/trustgraph/model/prompt/generic/prompts.py similarity index 100% rename from trustgraph/model/prompt/generic/prompts.py rename to trustgraph-flow/trustgraph/model/prompt/generic/prompts.py diff --git a/trustgraph/model/prompt/generic/service.py b/trustgraph-flow/trustgraph/model/prompt/generic/service.py similarity index 100% rename from trustgraph/model/prompt/generic/service.py rename to trustgraph-flow/trustgraph/model/prompt/generic/service.py diff --git a/trustgraph/model/prompt/template/__init__.py b/trustgraph-flow/trustgraph/model/prompt/template/__init__.py similarity index 100% rename from trustgraph/model/prompt/template/__init__.py rename to trustgraph-flow/trustgraph/model/prompt/template/__init__.py diff --git a/trustgraph/model/prompt/template/__main__.py b/trustgraph-flow/trustgraph/model/prompt/template/__main__.py similarity index 100% rename from trustgraph/model/prompt/template/__main__.py rename to trustgraph-flow/trustgraph/model/prompt/template/__main__.py diff --git a/trustgraph/model/prompt/template/prompts.py b/trustgraph-flow/trustgraph/model/prompt/template/prompts.py similarity index 100% rename from trustgraph/model/prompt/template/prompts.py rename to trustgraph-flow/trustgraph/model/prompt/template/prompts.py diff --git a/trustgraph/model/prompt/template/service.py b/trustgraph-flow/trustgraph/model/prompt/template/service.py similarity index 100% rename from trustgraph/model/prompt/template/service.py rename to trustgraph-flow/trustgraph/model/prompt/template/service.py diff --git a/trustgraph/model/text_completion/__init__.py b/trustgraph-flow/trustgraph/model/text_completion/__init__.py similarity index 100% rename from trustgraph/model/text_completion/__init__.py rename to trustgraph-flow/trustgraph/model/text_completion/__init__.py diff --git a/trustgraph/model/text_completion/bedrock/__init__.py b/trustgraph-flow/trustgraph/model/text_completion/azure/__init__.py similarity index 100% rename from trustgraph/model/text_completion/bedrock/__init__.py rename to trustgraph-flow/trustgraph/model/text_completion/azure/__init__.py diff --git a/trustgraph/model/text_completion/bedrock/__main__.py b/trustgraph-flow/trustgraph/model/text_completion/azure/__main__.py similarity index 100% rename from trustgraph/model/text_completion/bedrock/__main__.py rename to trustgraph-flow/trustgraph/model/text_completion/azure/__main__.py diff --git a/trustgraph/model/text_completion/azure/llm.py b/trustgraph-flow/trustgraph/model/text_completion/azure/llm.py similarity index 86% rename from trustgraph/model/text_completion/azure/llm.py rename to trustgraph-flow/trustgraph/model/text_completion/azure/llm.py index 86395317..ff97f644 100755 --- a/trustgraph/model/text_completion/azure/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/azure/llm.py @@ -22,6 +22,7 @@ default_output_queue = text_completion_response_queue default_subscriber = module default_temperature = 0.0 default_max_output = 4192 +default_model = "AzureAI" class Processor(ConsumerProducer): @@ -34,6 +35,7 @@ class Processor(ConsumerProducer): token = params.get("token") temperature = params.get("temperature", default_temperature) max_output = params.get("max_output", default_max_output) + model = default_model super(Processor, self).__init__( **params | { @@ -44,6 +46,7 @@ class Processor(ConsumerProducer): "output_schema": TextCompletionResponse, "temperature": temperature, "max_output": max_output, + "model": model, } ) @@ -64,6 +67,7 @@ class Processor(ConsumerProducer): self.token = token self.temperature = temperature self.max_output = max_output + self.model = model def build_prompt(self, system, content): @@ -108,9 +112,7 @@ class Processor(ConsumerProducer): result = resp.json() - message_content = result['choices'][0]['message']['content'] - - return message_content + return result def handle(self, msg): @@ -132,9 +134,17 @@ class Processor(ConsumerProducer): with __class__.text_completion_metric.time(): response = self.call_llm(prompt) + resp = response['choices'][0]['message']['content'] + inputtokens = response['usage']['prompt_tokens'] + outputtokens = response['usage']['completion_tokens'] + + print(resp, flush=True) + print(f"Input Tokens: {inputtokens}", flush=True) + print(f"Output Tokens: {outputtokens}", flush=True) + print("Send response...", flush=True) - r = TextCompletionResponse(response=response, error=None) + r = TextCompletionResponse(response=resp, error=None, in_token=inputtokens, out_token=outputtokens, model=self.model) self.producer.send(r, properties={"id": id}) except TooManyRequests: @@ -145,7 +155,11 @@ class Processor(ConsumerProducer): error=Error( type = "rate-limit", message = str(e), - ) + ), + response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) @@ -162,7 +176,11 @@ class Processor(ConsumerProducer): error=Error( type = "llm-error", message = str(e), - ) + ), + response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) diff --git a/trustgraph/model/text_completion/claude/__init__.py b/trustgraph-flow/trustgraph/model/text_completion/claude/__init__.py similarity index 100% rename from trustgraph/model/text_completion/claude/__init__.py rename to trustgraph-flow/trustgraph/model/text_completion/claude/__init__.py diff --git a/trustgraph/model/text_completion/claude/__main__.py b/trustgraph-flow/trustgraph/model/text_completion/claude/__main__.py similarity index 100% rename from trustgraph/model/text_completion/claude/__main__.py rename to trustgraph-flow/trustgraph/model/text_completion/claude/__main__.py diff --git a/trustgraph/model/text_completion/claude/llm.py b/trustgraph-flow/trustgraph/model/text_completion/claude/llm.py similarity index 92% rename from trustgraph/model/text_completion/claude/llm.py rename to trustgraph-flow/trustgraph/model/text_completion/claude/llm.py index 85d77a85..ad949b02 100755 --- a/trustgraph/model/text_completion/claude/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/claude/llm.py @@ -105,10 +105,14 @@ class Processor(ConsumerProducer): ) resp = response.content[0].text + inputtokens = response.usage.input_tokens + outputtokens = response.usage.output_tokens print(resp, flush=True) + print(f"Input Tokens: {inputtokens}", flush=True) + print(f"Output Tokens: {outputtokens}", flush=True) print("Send response...", flush=True) - r = TextCompletionResponse(response=resp, error=None) + r = TextCompletionResponse(response=resp, error=None, in_token=inputtokens, out_token=outputtokens, model=self.model) self.send(r, properties={"id": id}) print("Done.", flush=True) @@ -125,6 +129,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) @@ -143,6 +150,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) diff --git a/trustgraph/model/text_completion/cohere/__init__.py b/trustgraph-flow/trustgraph/model/text_completion/cohere/__init__.py similarity index 100% rename from trustgraph/model/text_completion/cohere/__init__.py rename to trustgraph-flow/trustgraph/model/text_completion/cohere/__init__.py diff --git a/trustgraph/model/text_completion/cohere/__main__.py b/trustgraph-flow/trustgraph/model/text_completion/cohere/__main__.py similarity index 100% rename from trustgraph/model/text_completion/cohere/__main__.py rename to trustgraph-flow/trustgraph/model/text_completion/cohere/__main__.py diff --git a/trustgraph/model/text_completion/cohere/llm.py b/trustgraph-flow/trustgraph/model/text_completion/cohere/llm.py similarity index 90% rename from trustgraph/model/text_completion/cohere/llm.py rename to trustgraph-flow/trustgraph/model/text_completion/cohere/llm.py index af55fd2c..4c64e8b6 100755 --- a/trustgraph/model/text_completion/cohere/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/cohere/llm.py @@ -91,10 +91,15 @@ class Processor(ConsumerProducer): ) resp = output.text + inputtokens = int(output.meta.billed_units.input_tokens) + outputtokens = int(output.meta.billed_units.output_tokens) + print(resp, flush=True) + print(f"Input Tokens: {inputtokens}", flush=True) + print(f"Output Tokens: {outputtokens}", flush=True) print("Send response...", flush=True) - r = TextCompletionResponse(response=resp, error=None) + r = TextCompletionResponse(response=resp, error=None, in_token=inputtokens, out_token=outputtokens, model=self.model) self.send(r, properties={"id": id}) print("Done.", flush=True) @@ -111,6 +116,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) @@ -129,6 +137,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) diff --git a/trustgraph/model/text_completion/llamafile/__init__.py b/trustgraph-flow/trustgraph/model/text_completion/llamafile/__init__.py similarity index 100% rename from trustgraph/model/text_completion/llamafile/__init__.py rename to trustgraph-flow/trustgraph/model/text_completion/llamafile/__init__.py diff --git a/trustgraph/model/text_completion/llamafile/__main__.py b/trustgraph-flow/trustgraph/model/text_completion/llamafile/__main__.py similarity index 100% rename from trustgraph/model/text_completion/llamafile/__main__.py rename to trustgraph-flow/trustgraph/model/text_completion/llamafile/__main__.py diff --git a/trustgraph/model/text_completion/llamafile/llm.py b/trustgraph-flow/trustgraph/model/text_completion/llamafile/llm.py similarity index 91% rename from trustgraph/model/text_completion/llamafile/llm.py rename to trustgraph-flow/trustgraph/model/text_completion/llamafile/llm.py index c42ec472..86427167 100755 --- a/trustgraph/model/text_completion/llamafile/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/llamafile/llm.py @@ -107,12 +107,20 @@ class Processor(ConsumerProducer): #} ) + inputtokens = resp.usage.prompt_tokens + outputtokens = resp.usage.completion_tokens + print(resp.choices[0].message.content, flush=True) + print(f"Input Tokens: {inputtokens}", flush=True) + print(f"Output Tokens: {outputtokens}", flush=True) print("Send response...", flush=True) r = TextCompletionResponse( response=resp.choices[0].message.content, error=None, + in_token=inputtokens, + out_token=outputtokens, + model="llama.cpp" ) self.send(r, properties={"id": id}) @@ -130,6 +138,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) @@ -148,6 +159,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) diff --git a/trustgraph/model/text_completion/ollama/__init__.py b/trustgraph-flow/trustgraph/model/text_completion/ollama/__init__.py similarity index 100% rename from trustgraph/model/text_completion/ollama/__init__.py rename to trustgraph-flow/trustgraph/model/text_completion/ollama/__init__.py diff --git a/trustgraph/model/text_completion/ollama/__main__.py b/trustgraph-flow/trustgraph/model/text_completion/ollama/__main__.py similarity index 100% rename from trustgraph/model/text_completion/ollama/__main__.py rename to trustgraph-flow/trustgraph/model/text_completion/ollama/__main__.py diff --git a/trustgraph/model/text_completion/ollama/llm.py b/trustgraph-flow/trustgraph/model/text_completion/ollama/llm.py similarity index 86% rename from trustgraph/model/text_completion/ollama/llm.py rename to trustgraph-flow/trustgraph/model/text_completion/ollama/llm.py index 93d89720..b506b3cd 100755 --- a/trustgraph/model/text_completion/ollama/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/ollama/llm.py @@ -4,7 +4,7 @@ Simple LLM service, performs text prompt completion using an Ollama service. Input is prompt, output is response. """ -from langchain_community.llms import Ollama +from ollama import Client from prometheus_client import Histogram, Info from .... schema import TextCompletionRequest, TextCompletionResponse, Error @@ -67,7 +67,8 @@ class Processor(ConsumerProducer): "ollama": ollama, }) - self.llm = Ollama(base_url=ollama, model=model) + self.model = model + self.llm = Client(host=ollama) def handle(self, msg): @@ -83,11 +84,16 @@ class Processor(ConsumerProducer): try: with __class__.text_completion_metric.time(): - response = self.llm.invoke(prompt) + response = self.llm.generate(self.model, prompt) + response_text = response['response'] print("Send response...", flush=True) + print(response_text, flush=True) - r = TextCompletionResponse(response=response, error=None) + inputtokens = int(response['prompt_eval_count']) + outputtokens = int(response['eval_count']) + + r = TextCompletionResponse(response=response_text, error=None, in_token=inputtokens, out_token=outputtokens, model="ollama") self.send(r, properties={"id": id}) @@ -105,6 +111,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) @@ -123,6 +132,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) diff --git a/trustgraph/model/text_completion/openai/__init__.py b/trustgraph-flow/trustgraph/model/text_completion/openai/__init__.py similarity index 100% rename from trustgraph/model/text_completion/openai/__init__.py rename to trustgraph-flow/trustgraph/model/text_completion/openai/__init__.py diff --git a/trustgraph/model/text_completion/openai/__main__.py b/trustgraph-flow/trustgraph/model/text_completion/openai/__main__.py similarity index 100% rename from trustgraph/model/text_completion/openai/__main__.py rename to trustgraph-flow/trustgraph/model/text_completion/openai/__main__.py diff --git a/trustgraph/model/text_completion/openai/llm.py b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py similarity index 91% rename from trustgraph/model/text_completion/openai/llm.py rename to trustgraph-flow/trustgraph/model/text_completion/openai/llm.py index d4563e7b..5d259e7e 100755 --- a/trustgraph/model/text_completion/openai/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py @@ -108,13 +108,20 @@ class Processor(ConsumerProducer): "type": "text" } ) - + + inputtokens = resp.usage.prompt_tokens + outputtokens = resp.usage.completion_tokens print(resp.choices[0].message.content, flush=True) + print(f"Input Tokens: {inputtokens}", flush=True) + print(f"Output Tokens: {outputtokens}", flush=True) print("Send response...", flush=True) r = TextCompletionResponse( response=resp.choices[0].message.content, error=None, + in_token=inputtokens, + out_token=outputtokens, + model=self.model ) self.send(r, properties={"id": id}) @@ -132,6 +139,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) @@ -150,6 +160,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) diff --git a/trustgraph/processing/__init__.py b/trustgraph-flow/trustgraph/processing/__init__.py similarity index 100% rename from trustgraph/processing/__init__.py rename to trustgraph-flow/trustgraph/processing/__init__.py diff --git a/trustgraph/processing/__main__.py b/trustgraph-flow/trustgraph/processing/__main__.py similarity index 100% rename from trustgraph/processing/__main__.py rename to trustgraph-flow/trustgraph/processing/__main__.py diff --git a/trustgraph/processing/processing.py b/trustgraph-flow/trustgraph/processing/processing.py similarity index 100% rename from trustgraph/processing/processing.py rename to trustgraph-flow/trustgraph/processing/processing.py diff --git a/trustgraph/objects/__init__.py b/trustgraph-flow/trustgraph/query/__init__.py similarity index 100% rename from trustgraph/objects/__init__.py rename to trustgraph-flow/trustgraph/query/__init__.py diff --git a/trustgraph/query/__init__.py b/trustgraph-flow/trustgraph/query/doc_embeddings/__init__.py similarity index 100% rename from trustgraph/query/__init__.py rename to trustgraph-flow/trustgraph/query/doc_embeddings/__init__.py diff --git a/trustgraph/query/doc_embeddings/milvus/__init__.py b/trustgraph-flow/trustgraph/query/doc_embeddings/milvus/__init__.py similarity index 100% rename from trustgraph/query/doc_embeddings/milvus/__init__.py rename to trustgraph-flow/trustgraph/query/doc_embeddings/milvus/__init__.py diff --git a/trustgraph/query/doc_embeddings/milvus/__main__.py b/trustgraph-flow/trustgraph/query/doc_embeddings/milvus/__main__.py similarity index 100% rename from trustgraph/query/doc_embeddings/milvus/__main__.py rename to trustgraph-flow/trustgraph/query/doc_embeddings/milvus/__main__.py diff --git a/trustgraph/query/doc_embeddings/milvus/service.py b/trustgraph-flow/trustgraph/query/doc_embeddings/milvus/service.py similarity index 100% rename from trustgraph/query/doc_embeddings/milvus/service.py rename to trustgraph-flow/trustgraph/query/doc_embeddings/milvus/service.py diff --git a/trustgraph/query/doc_embeddings/qdrant/__init__.py b/trustgraph-flow/trustgraph/query/doc_embeddings/qdrant/__init__.py similarity index 100% rename from trustgraph/query/doc_embeddings/qdrant/__init__.py rename to trustgraph-flow/trustgraph/query/doc_embeddings/qdrant/__init__.py diff --git a/trustgraph/query/doc_embeddings/qdrant/__main__.py b/trustgraph-flow/trustgraph/query/doc_embeddings/qdrant/__main__.py similarity index 100% rename from trustgraph/query/doc_embeddings/qdrant/__main__.py rename to trustgraph-flow/trustgraph/query/doc_embeddings/qdrant/__main__.py diff --git a/trustgraph/query/doc_embeddings/qdrant/service.py b/trustgraph-flow/trustgraph/query/doc_embeddings/qdrant/service.py similarity index 100% rename from trustgraph/query/doc_embeddings/qdrant/service.py rename to trustgraph-flow/trustgraph/query/doc_embeddings/qdrant/service.py diff --git a/trustgraph/query/doc_embeddings/__init__.py b/trustgraph-flow/trustgraph/query/graph_embeddings/__init__.py similarity index 100% rename from trustgraph/query/doc_embeddings/__init__.py rename to trustgraph-flow/trustgraph/query/graph_embeddings/__init__.py diff --git a/trustgraph/query/graph_embeddings/milvus/__init__.py b/trustgraph-flow/trustgraph/query/graph_embeddings/milvus/__init__.py similarity index 100% rename from trustgraph/query/graph_embeddings/milvus/__init__.py rename to trustgraph-flow/trustgraph/query/graph_embeddings/milvus/__init__.py diff --git a/trustgraph/query/graph_embeddings/milvus/__main__.py b/trustgraph-flow/trustgraph/query/graph_embeddings/milvus/__main__.py similarity index 100% rename from trustgraph/query/graph_embeddings/milvus/__main__.py rename to trustgraph-flow/trustgraph/query/graph_embeddings/milvus/__main__.py diff --git a/trustgraph/query/graph_embeddings/milvus/service.py b/trustgraph-flow/trustgraph/query/graph_embeddings/milvus/service.py similarity index 100% rename from trustgraph/query/graph_embeddings/milvus/service.py rename to trustgraph-flow/trustgraph/query/graph_embeddings/milvus/service.py diff --git a/trustgraph/query/graph_embeddings/qdrant/__init__.py b/trustgraph-flow/trustgraph/query/graph_embeddings/qdrant/__init__.py similarity index 100% rename from trustgraph/query/graph_embeddings/qdrant/__init__.py rename to trustgraph-flow/trustgraph/query/graph_embeddings/qdrant/__init__.py diff --git a/trustgraph/query/graph_embeddings/qdrant/__main__.py b/trustgraph-flow/trustgraph/query/graph_embeddings/qdrant/__main__.py similarity index 100% rename from trustgraph/query/graph_embeddings/qdrant/__main__.py rename to trustgraph-flow/trustgraph/query/graph_embeddings/qdrant/__main__.py diff --git a/trustgraph/query/graph_embeddings/qdrant/service.py b/trustgraph-flow/trustgraph/query/graph_embeddings/qdrant/service.py similarity index 100% rename from trustgraph/query/graph_embeddings/qdrant/service.py rename to trustgraph-flow/trustgraph/query/graph_embeddings/qdrant/service.py diff --git a/trustgraph/query/graph_embeddings/__init__.py b/trustgraph-flow/trustgraph/query/triples/__init__.py similarity index 100% rename from trustgraph/query/graph_embeddings/__init__.py rename to trustgraph-flow/trustgraph/query/triples/__init__.py diff --git a/trustgraph/query/triples/cassandra/__init__.py b/trustgraph-flow/trustgraph/query/triples/cassandra/__init__.py similarity index 100% rename from trustgraph/query/triples/cassandra/__init__.py rename to trustgraph-flow/trustgraph/query/triples/cassandra/__init__.py diff --git a/trustgraph/query/triples/cassandra/__main__.py b/trustgraph-flow/trustgraph/query/triples/cassandra/__main__.py similarity index 100% rename from trustgraph/query/triples/cassandra/__main__.py rename to trustgraph-flow/trustgraph/query/triples/cassandra/__main__.py diff --git a/trustgraph/query/triples/cassandra/service.py b/trustgraph-flow/trustgraph/query/triples/cassandra/service.py similarity index 100% rename from trustgraph/query/triples/cassandra/service.py rename to trustgraph-flow/trustgraph/query/triples/cassandra/service.py diff --git a/trustgraph/query/triples/neo4j/__init__.py b/trustgraph-flow/trustgraph/query/triples/neo4j/__init__.py similarity index 100% rename from trustgraph/query/triples/neo4j/__init__.py rename to trustgraph-flow/trustgraph/query/triples/neo4j/__init__.py diff --git a/trustgraph/query/triples/neo4j/__main__.py b/trustgraph-flow/trustgraph/query/triples/neo4j/__main__.py similarity index 100% rename from trustgraph/query/triples/neo4j/__main__.py rename to trustgraph-flow/trustgraph/query/triples/neo4j/__main__.py diff --git a/trustgraph/query/triples/neo4j/service.py b/trustgraph-flow/trustgraph/query/triples/neo4j/service.py similarity index 100% rename from trustgraph/query/triples/neo4j/service.py rename to trustgraph-flow/trustgraph/query/triples/neo4j/service.py diff --git a/trustgraph/query/triples/__init__.py b/trustgraph-flow/trustgraph/retrieval/__init__.py similarity index 100% rename from trustgraph/query/triples/__init__.py rename to trustgraph-flow/trustgraph/retrieval/__init__.py diff --git a/trustgraph/retrieval/document_rag/__init__.py b/trustgraph-flow/trustgraph/retrieval/document_rag/__init__.py similarity index 100% rename from trustgraph/retrieval/document_rag/__init__.py rename to trustgraph-flow/trustgraph/retrieval/document_rag/__init__.py diff --git a/trustgraph/retrieval/document_rag/__main__.py b/trustgraph-flow/trustgraph/retrieval/document_rag/__main__.py similarity index 100% rename from trustgraph/retrieval/document_rag/__main__.py rename to trustgraph-flow/trustgraph/retrieval/document_rag/__main__.py diff --git a/trustgraph/retrieval/document_rag/rag.py b/trustgraph-flow/trustgraph/retrieval/document_rag/rag.py similarity index 100% rename from trustgraph/retrieval/document_rag/rag.py rename to trustgraph-flow/trustgraph/retrieval/document_rag/rag.py diff --git a/trustgraph/retrieval/graph_rag/__init__.py b/trustgraph-flow/trustgraph/retrieval/graph_rag/__init__.py similarity index 100% rename from trustgraph/retrieval/graph_rag/__init__.py rename to trustgraph-flow/trustgraph/retrieval/graph_rag/__init__.py diff --git a/trustgraph/retrieval/graph_rag/__main__.py b/trustgraph-flow/trustgraph/retrieval/graph_rag/__main__.py similarity index 100% rename from trustgraph/retrieval/graph_rag/__main__.py rename to trustgraph-flow/trustgraph/retrieval/graph_rag/__main__.py diff --git a/trustgraph/retrieval/graph_rag/rag.py b/trustgraph-flow/trustgraph/retrieval/graph_rag/rag.py similarity index 100% rename from trustgraph/retrieval/graph_rag/rag.py rename to trustgraph-flow/trustgraph/retrieval/graph_rag/rag.py diff --git a/trustgraph/retrieval/__init__.py b/trustgraph-flow/trustgraph/storage/__init__.py similarity index 100% rename from trustgraph/retrieval/__init__.py rename to trustgraph-flow/trustgraph/storage/__init__.py diff --git a/trustgraph/storage/__init__.py b/trustgraph-flow/trustgraph/storage/doc_embeddings/__init__.py similarity index 100% rename from trustgraph/storage/__init__.py rename to trustgraph-flow/trustgraph/storage/doc_embeddings/__init__.py diff --git a/trustgraph/storage/doc_embeddings/milvus/__init__.py b/trustgraph-flow/trustgraph/storage/doc_embeddings/milvus/__init__.py similarity index 100% rename from trustgraph/storage/doc_embeddings/milvus/__init__.py rename to trustgraph-flow/trustgraph/storage/doc_embeddings/milvus/__init__.py diff --git a/trustgraph/dump/graph_embeddings/parquet/__main__.py b/trustgraph-flow/trustgraph/storage/doc_embeddings/milvus/__main__.py similarity index 100% rename from trustgraph/dump/graph_embeddings/parquet/__main__.py rename to trustgraph-flow/trustgraph/storage/doc_embeddings/milvus/__main__.py diff --git a/trustgraph/storage/doc_embeddings/milvus/write.py b/trustgraph-flow/trustgraph/storage/doc_embeddings/milvus/write.py similarity index 100% rename from trustgraph/storage/doc_embeddings/milvus/write.py rename to trustgraph-flow/trustgraph/storage/doc_embeddings/milvus/write.py diff --git a/trustgraph/storage/doc_embeddings/qdrant/__init__.py b/trustgraph-flow/trustgraph/storage/doc_embeddings/qdrant/__init__.py similarity index 100% rename from trustgraph/storage/doc_embeddings/qdrant/__init__.py rename to trustgraph-flow/trustgraph/storage/doc_embeddings/qdrant/__init__.py diff --git a/trustgraph/dump/triples/parquet/__main__.py b/trustgraph-flow/trustgraph/storage/doc_embeddings/qdrant/__main__.py old mode 100755 new mode 100644 similarity index 100% rename from trustgraph/dump/triples/parquet/__main__.py rename to trustgraph-flow/trustgraph/storage/doc_embeddings/qdrant/__main__.py diff --git a/trustgraph/storage/doc_embeddings/qdrant/write.py b/trustgraph-flow/trustgraph/storage/doc_embeddings/qdrant/write.py similarity index 100% rename from trustgraph/storage/doc_embeddings/qdrant/write.py rename to trustgraph-flow/trustgraph/storage/doc_embeddings/qdrant/write.py diff --git a/trustgraph/storage/doc_embeddings/__init__.py b/trustgraph-flow/trustgraph/storage/graph_embeddings/__init__.py similarity index 100% rename from trustgraph/storage/doc_embeddings/__init__.py rename to trustgraph-flow/trustgraph/storage/graph_embeddings/__init__.py diff --git a/trustgraph/storage/graph_embeddings/milvus/__init__.py b/trustgraph-flow/trustgraph/storage/graph_embeddings/milvus/__init__.py similarity index 100% rename from trustgraph/storage/graph_embeddings/milvus/__init__.py rename to trustgraph-flow/trustgraph/storage/graph_embeddings/milvus/__init__.py diff --git a/trustgraph/storage/doc_embeddings/milvus/__main__.py b/trustgraph-flow/trustgraph/storage/graph_embeddings/milvus/__main__.py similarity index 100% rename from trustgraph/storage/doc_embeddings/milvus/__main__.py rename to trustgraph-flow/trustgraph/storage/graph_embeddings/milvus/__main__.py diff --git a/trustgraph/storage/graph_embeddings/milvus/write.py b/trustgraph-flow/trustgraph/storage/graph_embeddings/milvus/write.py similarity index 100% rename from trustgraph/storage/graph_embeddings/milvus/write.py rename to trustgraph-flow/trustgraph/storage/graph_embeddings/milvus/write.py diff --git a/trustgraph/storage/graph_embeddings/qdrant/__init__.py b/trustgraph-flow/trustgraph/storage/graph_embeddings/qdrant/__init__.py similarity index 100% rename from trustgraph/storage/graph_embeddings/qdrant/__init__.py rename to trustgraph-flow/trustgraph/storage/graph_embeddings/qdrant/__init__.py diff --git a/trustgraph/storage/doc_embeddings/qdrant/__main__.py b/trustgraph-flow/trustgraph/storage/graph_embeddings/qdrant/__main__.py old mode 100644 new mode 100755 similarity index 100% rename from trustgraph/storage/doc_embeddings/qdrant/__main__.py rename to trustgraph-flow/trustgraph/storage/graph_embeddings/qdrant/__main__.py diff --git a/trustgraph/storage/graph_embeddings/qdrant/write.py b/trustgraph-flow/trustgraph/storage/graph_embeddings/qdrant/write.py similarity index 100% rename from trustgraph/storage/graph_embeddings/qdrant/write.py rename to trustgraph-flow/trustgraph/storage/graph_embeddings/qdrant/write.py diff --git a/trustgraph/storage/graph_embeddings/__init__.py b/trustgraph-flow/trustgraph/storage/object_embeddings/__init__.py similarity index 100% rename from trustgraph/storage/graph_embeddings/__init__.py rename to trustgraph-flow/trustgraph/storage/object_embeddings/__init__.py diff --git a/trustgraph/storage/object_embeddings/milvus/__init__.py b/trustgraph-flow/trustgraph/storage/object_embeddings/milvus/__init__.py similarity index 100% rename from trustgraph/storage/object_embeddings/milvus/__init__.py rename to trustgraph-flow/trustgraph/storage/object_embeddings/milvus/__init__.py diff --git a/trustgraph/storage/graph_embeddings/milvus/__main__.py b/trustgraph-flow/trustgraph/storage/object_embeddings/milvus/__main__.py similarity index 100% rename from trustgraph/storage/graph_embeddings/milvus/__main__.py rename to trustgraph-flow/trustgraph/storage/object_embeddings/milvus/__main__.py diff --git a/trustgraph/storage/object_embeddings/milvus/write.py b/trustgraph-flow/trustgraph/storage/object_embeddings/milvus/write.py similarity index 100% rename from trustgraph/storage/object_embeddings/milvus/write.py rename to trustgraph-flow/trustgraph/storage/object_embeddings/milvus/write.py diff --git a/trustgraph/storage/object_embeddings/__init__.py b/trustgraph-flow/trustgraph/storage/rows/__init__.py similarity index 100% rename from trustgraph/storage/object_embeddings/__init__.py rename to trustgraph-flow/trustgraph/storage/rows/__init__.py diff --git a/trustgraph/storage/rows/cassandra/__init__.py b/trustgraph-flow/trustgraph/storage/rows/cassandra/__init__.py similarity index 100% rename from trustgraph/storage/rows/cassandra/__init__.py rename to trustgraph-flow/trustgraph/storage/rows/cassandra/__init__.py diff --git a/trustgraph/storage/graph_embeddings/qdrant/__main__.py b/trustgraph-flow/trustgraph/storage/rows/cassandra/__main__.py similarity index 100% rename from trustgraph/storage/graph_embeddings/qdrant/__main__.py rename to trustgraph-flow/trustgraph/storage/rows/cassandra/__main__.py diff --git a/trustgraph/storage/rows/cassandra/write.py b/trustgraph-flow/trustgraph/storage/rows/cassandra/write.py similarity index 100% rename from trustgraph/storage/rows/cassandra/write.py rename to trustgraph-flow/trustgraph/storage/rows/cassandra/write.py diff --git a/trustgraph/storage/rows/__init__.py b/trustgraph-flow/trustgraph/storage/triples/__init__.py similarity index 100% rename from trustgraph/storage/rows/__init__.py rename to trustgraph-flow/trustgraph/storage/triples/__init__.py diff --git a/trustgraph/storage/triples/cassandra/__init__.py b/trustgraph-flow/trustgraph/storage/triples/cassandra/__init__.py similarity index 100% rename from trustgraph/storage/triples/cassandra/__init__.py rename to trustgraph-flow/trustgraph/storage/triples/cassandra/__init__.py diff --git a/trustgraph/storage/object_embeddings/milvus/__main__.py b/trustgraph-flow/trustgraph/storage/triples/cassandra/__main__.py similarity index 100% rename from trustgraph/storage/object_embeddings/milvus/__main__.py rename to trustgraph-flow/trustgraph/storage/triples/cassandra/__main__.py diff --git a/trustgraph/storage/triples/cassandra/write.py b/trustgraph-flow/trustgraph/storage/triples/cassandra/write.py similarity index 100% rename from trustgraph/storage/triples/cassandra/write.py rename to trustgraph-flow/trustgraph/storage/triples/cassandra/write.py diff --git a/trustgraph/storage/triples/neo4j/__init__.py b/trustgraph-flow/trustgraph/storage/triples/neo4j/__init__.py similarity index 100% rename from trustgraph/storage/triples/neo4j/__init__.py rename to trustgraph-flow/trustgraph/storage/triples/neo4j/__init__.py diff --git a/trustgraph/storage/rows/cassandra/__main__.py b/trustgraph-flow/trustgraph/storage/triples/neo4j/__main__.py similarity index 100% rename from trustgraph/storage/rows/cassandra/__main__.py rename to trustgraph-flow/trustgraph/storage/triples/neo4j/__main__.py diff --git a/trustgraph/storage/triples/neo4j/write.py b/trustgraph-flow/trustgraph/storage/triples/neo4j/write.py similarity index 100% rename from trustgraph/storage/triples/neo4j/write.py rename to trustgraph-flow/trustgraph/storage/triples/neo4j/write.py diff --git a/trustgraph-parquet/README.md b/trustgraph-parquet/README.md new file mode 100644 index 00000000..7a2ce130 --- /dev/null +++ b/trustgraph-parquet/README.md @@ -0,0 +1 @@ +See https://trustgraph.ai/ diff --git a/scripts/concat-parquet b/trustgraph-parquet/scripts/concat-parquet similarity index 100% rename from scripts/concat-parquet rename to trustgraph-parquet/scripts/concat-parquet diff --git a/scripts/dump-parquet b/trustgraph-parquet/scripts/dump-parquet similarity index 100% rename from scripts/dump-parquet rename to trustgraph-parquet/scripts/dump-parquet diff --git a/scripts/ge-dump-parquet b/trustgraph-parquet/scripts/ge-dump-parquet similarity index 100% rename from scripts/ge-dump-parquet rename to trustgraph-parquet/scripts/ge-dump-parquet diff --git a/scripts/load-graph-embeddings b/trustgraph-parquet/scripts/load-graph-embeddings similarity index 100% rename from scripts/load-graph-embeddings rename to trustgraph-parquet/scripts/load-graph-embeddings diff --git a/scripts/load-triples b/trustgraph-parquet/scripts/load-triples similarity index 100% rename from scripts/load-triples rename to trustgraph-parquet/scripts/load-triples diff --git a/scripts/triples-dump-parquet b/trustgraph-parquet/scripts/triples-dump-parquet similarity index 100% rename from scripts/triples-dump-parquet rename to trustgraph-parquet/scripts/triples-dump-parquet diff --git a/trustgraph-parquet/setup.py b/trustgraph-parquet/setup.py new file mode 100644 index 00000000..6da7d916 --- /dev/null +++ b/trustgraph-parquet/setup.py @@ -0,0 +1,48 @@ +import setuptools +import os +import importlib + +with open("README.md", "r") as fh: + long_description = fh.read() + +# Load a version number module +spec = importlib.util.spec_from_file_location( + 'version', 'trustgraph/parquet_version.py' +) +version_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(version_module) + +version = version_module.__version__ + +setuptools.setup( + name="trustgraph-parquet", + version=version, + author="trustgraph.ai", + author_email="security@trustgraph.ai", + description="TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/trustgraph-ai/trustgraph", + packages=setuptools.find_namespace_packages( + where='./', + ), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", + "Operating System :: OS Independent", + ], + python_requires='>=3.8', + download_url = "https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v" + version + ".tar.gz", + install_requires=[ + "trustgraph-base", + "pulsar-client", + "prometheus-client", + "pyarrow", + ], + scripts=[ + "scripts/concat-parquet", + "scripts/dump-parquet", + "scripts/ge-dump-parquet", + "scripts/triples-dump-parquet", + ] +) diff --git a/trustgraph/storage/triples/__init__.py b/trustgraph-parquet/trustgraph/dump/__init__.py similarity index 100% rename from trustgraph/storage/triples/__init__.py rename to trustgraph-parquet/trustgraph/dump/__init__.py diff --git a/trustgraph-parquet/trustgraph/dump/graph_embeddings/__init__.py b/trustgraph-parquet/trustgraph/dump/graph_embeddings/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/trustgraph/dump/triples/parquet/__init__.py b/trustgraph-parquet/trustgraph/dump/graph_embeddings/parquet/__init__.py similarity index 100% rename from trustgraph/dump/triples/parquet/__init__.py rename to trustgraph-parquet/trustgraph/dump/graph_embeddings/parquet/__init__.py diff --git a/trustgraph/storage/triples/cassandra/__main__.py b/trustgraph-parquet/trustgraph/dump/graph_embeddings/parquet/__main__.py similarity index 100% rename from trustgraph/storage/triples/cassandra/__main__.py rename to trustgraph-parquet/trustgraph/dump/graph_embeddings/parquet/__main__.py diff --git a/trustgraph/dump/graph_embeddings/parquet/processor.py b/trustgraph-parquet/trustgraph/dump/graph_embeddings/parquet/processor.py similarity index 100% rename from trustgraph/dump/graph_embeddings/parquet/processor.py rename to trustgraph-parquet/trustgraph/dump/graph_embeddings/parquet/processor.py diff --git a/trustgraph/dump/graph_embeddings/parquet/writer.py b/trustgraph-parquet/trustgraph/dump/graph_embeddings/parquet/writer.py similarity index 100% rename from trustgraph/dump/graph_embeddings/parquet/writer.py rename to trustgraph-parquet/trustgraph/dump/graph_embeddings/parquet/writer.py diff --git a/trustgraph-parquet/trustgraph/dump/triples/__init__.py b/trustgraph-parquet/trustgraph/dump/triples/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/trustgraph/embeddings/ollama/__init__.py b/trustgraph-parquet/trustgraph/dump/triples/parquet/__init__.py similarity index 100% rename from trustgraph/embeddings/ollama/__init__.py rename to trustgraph-parquet/trustgraph/dump/triples/parquet/__init__.py diff --git a/trustgraph/storage/triples/neo4j/__main__.py b/trustgraph-parquet/trustgraph/dump/triples/parquet/__main__.py similarity index 100% rename from trustgraph/storage/triples/neo4j/__main__.py rename to trustgraph-parquet/trustgraph/dump/triples/parquet/__main__.py diff --git a/trustgraph/dump/triples/parquet/processor.py b/trustgraph-parquet/trustgraph/dump/triples/parquet/processor.py similarity index 100% rename from trustgraph/dump/triples/parquet/processor.py rename to trustgraph-parquet/trustgraph/dump/triples/parquet/processor.py diff --git a/trustgraph/dump/triples/parquet/writer.py b/trustgraph-parquet/trustgraph/dump/triples/parquet/writer.py similarity index 100% rename from trustgraph/dump/triples/parquet/writer.py rename to trustgraph-parquet/trustgraph/dump/triples/parquet/writer.py diff --git a/trustgraph-vertexai/README.md b/trustgraph-vertexai/README.md new file mode 100644 index 00000000..7a2ce130 --- /dev/null +++ b/trustgraph-vertexai/README.md @@ -0,0 +1 @@ +See https://trustgraph.ai/ diff --git a/scripts/text-completion-vertexai b/trustgraph-vertexai/scripts/text-completion-vertexai similarity index 100% rename from scripts/text-completion-vertexai rename to trustgraph-vertexai/scripts/text-completion-vertexai diff --git a/trustgraph-vertexai/setup.py b/trustgraph-vertexai/setup.py new file mode 100644 index 00000000..73c03b20 --- /dev/null +++ b/trustgraph-vertexai/setup.py @@ -0,0 +1,45 @@ +import setuptools +import os +import importlib + +with open("README.md", "r") as fh: + long_description = fh.read() + +# Load a version number module +spec = importlib.util.spec_from_file_location( + 'version', 'trustgraph/vertexai_version.py' +) +version_module = importlib.util.module_from_spec(spec) +spec.loader.exec_module(version_module) + +version = version_module.__version__ + +setuptools.setup( + name="trustgraph-vertexai", + version=version, + author="trustgraph.ai", + author_email="security@trustgraph.ai", + description="TrustGraph provides a means to run a pipeline of flexible AI processing components in a flexible means to achieve a processing pipeline.", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/trustgraph-ai/trustgraph", + packages=setuptools.find_namespace_packages( + where='./', + ), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", + "Operating System :: OS Independent", + ], + python_requires='>=3.8', + download_url = "https://github.com/trustgraph-ai/trustgraph/archive/refs/tags/v" + version + ".tar.gz", + install_requires=[ + "trustgraph-base", + "pulsar-client", + "google-cloud-aiplatform", + "prometheus-client", + ], + scripts=[ + "scripts/text-completion-vertexai", + ] +) diff --git a/trustgraph/model/text_completion/vertexai/__init__.py b/trustgraph-vertexai/trustgraph/model/text_completion/vertexai/__init__.py similarity index 100% rename from trustgraph/model/text_completion/vertexai/__init__.py rename to trustgraph-vertexai/trustgraph/model/text_completion/vertexai/__init__.py diff --git a/trustgraph/model/text_completion/vertexai/__main__.py b/trustgraph-vertexai/trustgraph/model/text_completion/vertexai/__main__.py similarity index 100% rename from trustgraph/model/text_completion/vertexai/__main__.py rename to trustgraph-vertexai/trustgraph/model/text_completion/vertexai/__main__.py diff --git a/trustgraph/model/text_completion/vertexai/llm.py b/trustgraph-vertexai/trustgraph/model/text_completion/vertexai/llm.py similarity index 90% rename from trustgraph/model/text_completion/vertexai/llm.py rename to trustgraph-vertexai/trustgraph/model/text_completion/vertexai/llm.py index 17ebde6c..c57b9fb0 100755 --- a/trustgraph/model/text_completion/vertexai/llm.py +++ b/trustgraph-vertexai/trustgraph/model/text_completion/vertexai/llm.py @@ -122,6 +122,7 @@ class Processor(ConsumerProducer): print(f"Initialise model {model}", flush=True) self.llm = GenerativeModel(model) + self.model = model print("Initialisation complete", flush=True) @@ -141,18 +142,26 @@ class Processor(ConsumerProducer): with __class__.text_completion_metric.time(): - resp = self.llm.generate_content( + response = self.llm.generate_content( prompt, generation_config=self.generation_config, safety_settings=self.safety_settings ) - resp = resp.text + resp = response.text + inputtokens = int(response.usage_metadata.prompt_token_count) + outputtokens = int(response.usage_metadata.candidates_token_count) + print(resp, flush=True) + print(f"Input Tokens: {inputtokens}", flush=True) + print(f"Output Tokens: {outputtokens}", flush=True) print("Send response...", flush=True) r = TextCompletionResponse( error=None, response=resp, + in_token=inputtokens, + out_token=outputtokens, + model=self.model ) self.producer.send(r, properties={"id": id}) @@ -172,6 +181,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id}) @@ -190,6 +202,9 @@ class Processor(ConsumerProducer): message = str(e), ), response=None, + in_token=None, + out_token=None, + model=None, ) self.producer.send(r, properties={"id": id})