diff --git a/Containerfile b/Containerfile
index e01bd7a2..1c22ebf5 100644
--- a/Containerfile
+++ b/Containerfile
@@ -13,7 +13,7 @@ RUN dnf install -y python3 python3-pip python3-wheel python3-aiohttp \
RUN pip3 install torch --index-url https://download.pytorch.org/whl/cpu
-RUN pip3 install anthropic google-cloud-aiplatform langchain langchain-core \
+RUN pip3 install anthropic boto3 cohere google-cloud-aiplatform langchain langchain-core \
langchain-huggingface langchain-text-splitters langchain-community \
pymilvus sentence-transformers transformers huggingface-hub \
pulsar-client && \
diff --git a/Makefile b/Makefile
index 0d96919f..b6735943 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
# VERSION=$(shell git describe | sed 's/^v//')
-VERSION=0.5.6
+VERSION=0.5.7
all: container
diff --git a/docker-compose-bedrock.yaml b/docker-compose-bedrock.yaml
new file mode 100644
index 00000000..00150a93
--- /dev/null
+++ b/docker-compose-bedrock.yaml
@@ -0,0 +1,267 @@
+
+volumes:
+ cassandra:
+ pulsar-conf:
+ pulsar-data:
+ etcd:
+ minio-data:
+ milvus:
+ prometheus-data:
+ grafana-storage:
+
+services:
+
+ cassandra:
+ image: docker.io/cassandra:4.1.5
+ ports:
+ - "9042:9042"
+ volumes:
+ - "cassandra:/var/lib/cassandra"
+ restart: on-failure:100
+
+ pulsar:
+ image: docker.io/apachepulsar/pulsar:3.3.0
+ command: bin/pulsar standalone
+ ports:
+ - "6650:6650"
+ - "8080:8080"
+ volumes:
+ - "pulsar-conf:/pulsar/conf"
+ - "pulsar-data:/pulsar/data"
+ restart: on-failure:100
+
+ init-pulsar:
+ image: docker.io/apachepulsar/pulsar:3.3.0
+ command:
+ - "sh"
+ - "-c"
+ - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response"
+ depends_on:
+ pulsar:
+ condition: service_started
+ restart: on-failure:100
+
+ pulsar-manager:
+ image: docker.io/apachepulsar/pulsar-manager:v0.3.0
+ ports:
+ - "9527:9527"
+ - "7750:7750"
+ environment:
+ SPRING_CONFIGURATION_FILE: /pulsar-manager/pulsar-manager/application.properties
+ restart: on-failure:100
+
+ etcd:
+ image: quay.io/coreos/etcd:v3.5.5
+ command:
+ - "etcd"
+ - "-advertise-client-urls=http://127.0.0.1:2379"
+ - "-listen-client-urls"
+ - "http://0.0.0.0:2379"
+ - "--data-dir"
+ - "/etcd"
+ environment:
+ ETCD_AUTO_COMPACTION_MODE: revision
+ ETCD_AUTO_COMPACTION_RETENTION: "1000"
+ ETCD_QUOTA_BACKEND_BYTES: "4294967296"
+ ETCD_SNAPSHOT_COUNT: "50000"
+ ports:
+ - "2379:2379"
+ volumes:
+ - "etcd:/etcd"
+ restart: on-failure:100
+
+ minio:
+ image: docker.io/minio/minio:RELEASE.2024-07-04T14-25-45Z
+ command:
+ - "minio"
+ - "server"
+ - "/minio_data"
+ - "--console-address"
+ - ":9001"
+ environment:
+ MINIO_ROOT_USER: minioadmin
+ MINIO_ROOT_PASSWORD: minioadmin
+ ports:
+ - "9001:9001"
+ volumes:
+ - "minio-data:/minio_data"
+ restart: on-failure:100
+
+ milvus:
+ image: docker.io/milvusdb/milvus:v2.4.5
+ command:
+ - "milvus"
+ - "run"
+ - "standalone"
+ environment:
+ ETCD_ENDPOINTS: etcd:2379
+ MINIO_ADDRESS: minio:9000
+ ports:
+ - "9091:9091"
+ - "19530:19530"
+ volumes:
+ - "milvus:/var/lib/milvus"
+ restart: on-failure:100
+
+ prometheus:
+ image: docker.io/prom/prometheus:v2.53.1
+ ports:
+ - "9090:9090"
+ volumes:
+ - "./prometheus:/etc/prometheus"
+ - "prometheus-data:/prometheus"
+ restart: on-failure:100
+
+ grafana:
+ image: docker.io/grafana/grafana:10.0.0
+ ports:
+ - "3000:3000"
+ volumes:
+ - "grafana-storage:/var/lib/grafana"
+ - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml"
+ - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml"
+ - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json"
+ environment:
+# GF_AUTH_ANONYMOUS_ORG_ROLE: Admin
+# GF_AUTH_ANONYMOUS_ENABLED: true
+# GF_ORG_ROLE: Admin
+ GF_ORG_NAME: trustgraph.ai
+# GF_SERVER_ROOT_URL: https://example.com
+ restart: on-failure:100
+
+ pdf-decoder:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "pdf-decoder"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ chunker:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "chunker-recursive"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "--chunk-size"
+ - "2000"
+ - "--chunk-overlap"
+ - "100"
+ restart: on-failure:100
+
+ vectorize:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "embeddings-vectorize"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ embeddings:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "embeddings-hf"
+ - "-p"
+ - "pulsar://pulsar:6650"
+# - "-m"
+# - "mixedbread-ai/mxbai-embed-large-v1"
+ restart: on-failure:100
+
+ kg-extract-definitions:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "kg-extract-definitions"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ kg-extract-relationships:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "kg-extract-relationships"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ store-graph-embeddings:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "ge-write-milvus"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-t"
+ - "http://milvus:19530"
+ restart: on-failure:100
+
+ store-triples:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "triples-write-cassandra"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-g"
+ - "cassandra"
+ restart: on-failure:100
+
+ text-completion:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "text-completion-bedrock"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-z"
+ - "${AWS_ID_KEY}"
+ - "-k"
+ - "${AWS_SECRET_KEY}"
+ - "-r"
+ - "us-west-2"
+ restart: on-failure:100
+
+ text-completion-rag:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "text-completion-bedrock"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ # - "-m"
+ # - "mistral.mistral-large-2407-v1:0"
+ - "-z"
+ - "${AWS_ID_KEY}"
+ - "-k"
+ - "${AWS_SECRET_KEY}"
+ - "-r"
+ - "us-west-2"
+ - "-i"
+ - "non-persistent://tg/request/text-completion-rag"
+ - "-o"
+ - "non-persistent://tg/response/text-completion-rag-response"
+ restart: on-failure:100
+
+ #text-completion-rag:
+ # image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ # command:
+ # - "text-completion-ollama"
+ # - "-p"
+ # - "pulsar://pulsar:6650"
+ # - "-r"
+ # - "http://${OLLAMA_HOST}:11434/"
+ # - "-i"
+ # - "non-persistent://tg/request/text-completion-rag"
+ # - "-o"
+ # - "non-persistent://tg/response/text-completion-rag-response"
+ # - "-m"
+ # - "gemma2:2b"
+ # restart: on-failure:100
+
+ graph-rag:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "graph-rag"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "--text-completion-request-queue"
+ - "non-persistent://tg/request/text-completion-rag"
+ - "--text-completion-response-queue"
+ - "non-persistent://tg/response/text-completion-rag-response"
+ restart: on-failure:100
+
diff --git a/docker-compose-cohere.yaml b/docker-compose-cohere.yaml
new file mode 100644
index 00000000..47e8c81c
--- /dev/null
+++ b/docker-compose-cohere.yaml
@@ -0,0 +1,223 @@
+
+volumes:
+ cassandra:
+ pulsar-conf:
+ pulsar-data:
+ etcd:
+ minio-data:
+ milvus:
+ prometheus-data:
+ grafana-storage:
+
+services:
+
+ cassandra:
+ image: docker.io/cassandra:4.1.5
+ ports:
+ - "9042:9042"
+ volumes:
+ - "cassandra:/var/lib/cassandra"
+ restart: on-failure:100
+
+ pulsar:
+ image: docker.io/apachepulsar/pulsar:3.3.0
+ command: bin/pulsar standalone
+ ports:
+ - "6650:6650"
+ - "8080:8080"
+ volumes:
+ - "pulsar-conf:/pulsar/conf"
+ - "pulsar-data:/pulsar/data"
+ restart: on-failure:100
+
+ init-pulsar:
+ image: docker.io/apachepulsar/pulsar:3.3.0
+ command:
+ - "sh"
+ - "-c"
+ - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response"
+ depends_on:
+ pulsar:
+ condition: service_started
+ restart: on-failure:100
+
+ pulsar-manager:
+ image: docker.io/apachepulsar/pulsar-manager:v0.3.0
+ ports:
+ - "9527:9527"
+ - "7750:7750"
+ environment:
+ SPRING_CONFIGURATION_FILE: /pulsar-manager/pulsar-manager/application.properties
+ restart: on-failure:100
+
+ etcd:
+ image: quay.io/coreos/etcd:v3.5.5
+ command:
+ - "etcd"
+ - "-advertise-client-urls=http://127.0.0.1:2379"
+ - "-listen-client-urls"
+ - "http://0.0.0.0:2379"
+ - "--data-dir"
+ - "/etcd"
+ environment:
+ ETCD_AUTO_COMPACTION_MODE: revision
+ ETCD_AUTO_COMPACTION_RETENTION: "1000"
+ ETCD_QUOTA_BACKEND_BYTES: "4294967296"
+ ETCD_SNAPSHOT_COUNT: "50000"
+ ports:
+ - "2379:2379"
+ volumes:
+ - "etcd:/etcd"
+ restart: on-failure:100
+
+ minio:
+ image: docker.io/minio/minio:RELEASE.2024-07-04T14-25-45Z
+ command:
+ - "minio"
+ - "server"
+ - "/minio_data"
+ - "--console-address"
+ - ":9001"
+ environment:
+ MINIO_ROOT_USER: minioadmin
+ MINIO_ROOT_PASSWORD: minioadmin
+ ports:
+ - "9001:9001"
+ volumes:
+ - "minio-data:/minio_data"
+ restart: on-failure:100
+
+ milvus:
+ image: docker.io/milvusdb/milvus:v2.4.5
+ command:
+ - "milvus"
+ - "run"
+ - "standalone"
+ environment:
+ ETCD_ENDPOINTS: etcd:2379
+ MINIO_ADDRESS: minio:9000
+ ports:
+ - "9091:9091"
+ - "19530:19530"
+ volumes:
+ - "milvus:/var/lib/milvus"
+ restart: on-failure:100
+
+ prometheus:
+ image: docker.io/prom/prometheus:v2.53.1
+ ports:
+ - "9090:9090"
+ volumes:
+ - "./prometheus:/etc/prometheus"
+ - "prometheus-data:/prometheus"
+ restart: on-failure:100
+
+ grafana:
+ image: docker.io/grafana/grafana:10.0.0
+ ports:
+ - "3000:3000"
+ volumes:
+ - "grafana-storage:/var/lib/grafana"
+ - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml"
+ - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml"
+ - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json"
+ environment:
+# GF_AUTH_ANONYMOUS_ORG_ROLE: Admin
+# GF_AUTH_ANONYMOUS_ENABLED: true
+# GF_ORG_ROLE: Admin
+ GF_ORG_NAME: trustgraph.ai
+# GF_SERVER_ROOT_URL: https://example.com
+ restart: on-failure:100
+
+ pdf-decoder:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "pdf-decoder"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ chunker:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "chunker-recursive"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "--chunk-size"
+ - "1000"
+ - "--chunk-overlap"
+ - "50"
+ restart: on-failure:100
+
+ vectorize:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "embeddings-vectorize"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ embeddings:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "embeddings-hf"
+ - "-p"
+ - "pulsar://pulsar:6650"
+# - "-m"
+# - "mixedbread-ai/mxbai-embed-large-v1"
+ restart: on-failure:100
+
+ kg-extract-definitions:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "kg-extract-definitions"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ kg-extract-relationships:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "kg-extract-relationships"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ store-graph-embeddings:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "ge-write-milvus"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-t"
+ - "http://milvus:19530"
+ restart: on-failure:100
+
+ store-triples:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "triples-write-cassandra"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-g"
+ - "cassandra"
+ restart: on-failure:100
+
+ text-completion:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "text-completion-cohere"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-k"
+ - ${COHERE_KEY}
+ restart: on-failure:100
+
+ graph-rag:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.5
+ command:
+ - "graph-rag"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
diff --git a/docker-compose-mix.yaml b/docker-compose-mix.yaml
new file mode 100644
index 00000000..8b94825a
--- /dev/null
+++ b/docker-compose-mix.yaml
@@ -0,0 +1,261 @@
+
+volumes:
+ cassandra:
+ pulsar-conf:
+ pulsar-data:
+ etcd:
+ minio-data:
+ milvus:
+ prometheus-data:
+ grafana-storage:
+
+services:
+
+ cassandra:
+ image: docker.io/cassandra:4.1.5
+ ports:
+ - "9042:9042"
+ volumes:
+ - "cassandra:/var/lib/cassandra"
+ restart: on-failure:100
+
+ pulsar:
+ image: docker.io/apachepulsar/pulsar:3.3.0
+ command: bin/pulsar standalone
+ ports:
+ - "6650:6650"
+ - "8080:8080"
+ volumes:
+ - "pulsar-conf:/pulsar/conf"
+ - "pulsar-data:/pulsar/data"
+ restart: on-failure:100
+
+ init-pulsar:
+ image: docker.io/apachepulsar/pulsar:3.3.0
+ command:
+ - "sh"
+ - "-c"
+ - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response"
+ depends_on:
+ pulsar:
+ condition: service_started
+ restart: on-failure:100
+
+ pulsar-manager:
+ image: docker.io/apachepulsar/pulsar-manager:v0.3.0
+ ports:
+ - "9527:9527"
+ - "7750:7750"
+ environment:
+ SPRING_CONFIGURATION_FILE: /pulsar-manager/pulsar-manager/application.properties
+ restart: on-failure:100
+
+ etcd:
+ image: quay.io/coreos/etcd:v3.5.5
+ command:
+ - "etcd"
+ - "-advertise-client-urls=http://127.0.0.1:2379"
+ - "-listen-client-urls"
+ - "http://0.0.0.0:2379"
+ - "--data-dir"
+ - "/etcd"
+ environment:
+ ETCD_AUTO_COMPACTION_MODE: revision
+ ETCD_AUTO_COMPACTION_RETENTION: "1000"
+ ETCD_QUOTA_BACKEND_BYTES: "4294967296"
+ ETCD_SNAPSHOT_COUNT: "50000"
+ ports:
+ - "2379:2379"
+ volumes:
+ - "etcd:/etcd"
+ restart: on-failure:100
+
+ minio:
+ image: docker.io/minio/minio:RELEASE.2024-07-04T14-25-45Z
+ command:
+ - "minio"
+ - "server"
+ - "/minio_data"
+ - "--console-address"
+ - ":9001"
+ environment:
+ MINIO_ROOT_USER: minioadmin
+ MINIO_ROOT_PASSWORD: minioadmin
+ ports:
+ - "9001:9001"
+ volumes:
+ - "minio-data:/minio_data"
+ restart: on-failure:100
+
+ milvus:
+ image: docker.io/milvusdb/milvus:v2.4.5
+ command:
+ - "milvus"
+ - "run"
+ - "standalone"
+ environment:
+ ETCD_ENDPOINTS: etcd:2379
+ MINIO_ADDRESS: minio:9000
+ ports:
+ - "9091:9091"
+ - "19530:19530"
+ volumes:
+ - "milvus:/var/lib/milvus"
+ restart: on-failure:100
+
+ prometheus:
+ image: docker.io/prom/prometheus:v2.53.1
+ ports:
+ - "9090:9090"
+ volumes:
+ - "./prometheus:/etc/prometheus"
+ - "prometheus-data:/prometheus"
+ restart: on-failure:100
+
+ grafana:
+ image: docker.io/grafana/grafana:10.0.0
+ ports:
+ - "3000:3000"
+ volumes:
+ - "grafana-storage:/var/lib/grafana"
+ - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml"
+ - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml"
+ - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json"
+ environment:
+# GF_AUTH_ANONYMOUS_ORG_ROLE: Admin
+# GF_AUTH_ANONYMOUS_ENABLED: true
+# GF_ORG_ROLE: Admin
+ GF_ORG_NAME: trustgraph.ai
+# GF_SERVER_ROOT_URL: https://example.com
+ restart: on-failure:100
+
+ pdf-decoder:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "pdf-decoder"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ chunker:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "chunker-recursive"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "--chunk-size"
+ - "4000"
+ - "--chunk-overlap"
+ - "120"
+ restart: on-failure:100
+
+ vectorize:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "embeddings-vectorize"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ embeddings:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "embeddings-hf"
+ - "-p"
+ - "pulsar://pulsar:6650"
+# - "-m"
+# - "mixedbread-ai/mxbai-embed-large-v1"
+ restart: on-failure:100
+
+ kg-extract-definitions:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "kg-extract-definitions"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ kg-extract-relationships:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "kg-extract-relationships"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ restart: on-failure:100
+
+ store-graph-embeddings:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "ge-write-milvus"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-t"
+ - "http://milvus:19530"
+ restart: on-failure:100
+
+ store-triples:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "triples-write-cassandra"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-g"
+ - "cassandra"
+ restart: on-failure:100
+
+ text-completion:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "text-completion-cohere"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-k"
+ - ${COHERE_KEY}
+ - "-m"
+ - "c4ai-aya-23-35b"
+ restart: on-failure:100
+
+ text-completion-rag:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "text-completion-cohere"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "-k"
+ - ${COHERE_KEY}
+ - "-m"
+ - "c4ai-aya-23-8b"
+ - "-i"
+ - "non-persistent://tg/request/text-completion-rag"
+ - "-o"
+ - "non-persistent://tg/response/text-completion-rag-response"
+ restart: on-failure:100
+
+ #text-completion-rag:
+ # image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ # command:
+ # - "text-completion-ollama"
+ # - "-p"
+ # - "pulsar://pulsar:6650"
+ # - "-r"
+ # - "http://${OLLAMA_HOST}:11434/"
+ # - "-i"
+ # - "non-persistent://tg/request/text-completion-rag"
+ # - "-o"
+ # - "non-persistent://tg/response/text-completion-rag-response"
+ # - "-m"
+ # - "gemma2:2b"
+ # restart: on-failure:100
+
+ graph-rag:
+ image: docker.io/trustgraph/trustgraph-flow:0.5.6
+ command:
+ - "graph-rag"
+ - "-p"
+ - "pulsar://pulsar:6650"
+ - "--text-completion-request-queue"
+ - "non-persistent://tg/request/text-completion-rag"
+ - "--text-completion-response-queue"
+ - "non-persistent://tg/response/text-completion-rag-response"
+ restart: on-failure:100
+
diff --git a/requirements.txt b/requirements.txt
index a0c184ca..9a49a5aa 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -19,3 +19,4 @@ google-cloud-aiplatform
pyyaml
prometheus-client
pyarrow
+boto3
diff --git a/scripts/text-completion-bedrock b/scripts/text-completion-bedrock
new file mode 100755
index 00000000..55c26314
--- /dev/null
+++ b/scripts/text-completion-bedrock
@@ -0,0 +1,6 @@
+#!/usr/bin/env python3
+
+from trustgraph.model.text_completion.bedrock import run
+
+run()
+
diff --git a/scripts/text-completion-cohere b/scripts/text-completion-cohere
new file mode 100755
index 00000000..42110db6
--- /dev/null
+++ b/scripts/text-completion-cohere
@@ -0,0 +1,6 @@
+#!/usr/bin/env python3
+
+from trustgraph.model.text_completion.cohere import run
+
+run()
+
diff --git a/setup.py b/setup.py
index 2be871ac..2cf35306 100644
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@ import os
with open("README.md", "r") as fh:
long_description = fh.read()
-version = "0.5.6"
+version = "0.5.7"
setuptools.setup(
name="trustgraph",
@@ -45,6 +45,8 @@ setuptools.setup(
"pyyaml",
"prometheus-client",
"pyarrow",
+ "cohere",
+ "boto3",
],
scripts=[
"scripts/chunker-recursive",
@@ -68,9 +70,11 @@ setuptools.setup(
"scripts/query",
"scripts/run-processing",
"scripts/text-completion-azure",
+ "scripts/text-completion-bedrock",
"scripts/text-completion-claude",
"scripts/text-completion-ollama",
"scripts/text-completion-vertexai",
+ "scripts/text-completion-cohere",
"scripts/triples-dump-parquet",
"scripts/triples-write-cassandra",
]
diff --git a/tests/test-graph-rag2 b/tests/test-graph-rag2
new file mode 100755
index 00000000..4837d3bf
--- /dev/null
+++ b/tests/test-graph-rag2
@@ -0,0 +1,14 @@
+#!/usr/bin/env python3
+
+import pulsar
+from trustgraph.graph_rag_client import GraphRagClient
+
+rag = GraphRagClient(pulsar_host="pulsar://localhost:6650")
+
+query="""List 20 key points to describe the research that led to the discovery of Leo VI.
+"""
+
+resp = rag.request(query)
+
+print(resp)
+
diff --git a/trustgraph/graph_rag.py b/trustgraph/graph_rag.py
index 967bd68a..135797da 100644
--- a/trustgraph/graph_rag.py
+++ b/trustgraph/graph_rag.py
@@ -228,16 +228,13 @@ class GraphRag:
kg = self.get_cypher(query)
- prompt=f"""Study the knowledge graph provided, and use
-the information to answer the question. The question should be answered
-in plain English only.
-
-
+ prompt=f"""Study the following set of knowledge statements. The statements are written in Cypher format that has been extracted from a knowledge graph. Use only the provided set of knowledge statements in your response. Do not speculate if the answer is not found in the provided set of knowledge statements.
+
+Here's the knowledge statements:
{kg}
-
-
+
+Use only the provided knowledge statements to respond to the following:
{query}
-
"""
return prompt
diff --git a/trustgraph/model/text_completion/bedrock/__init__.py b/trustgraph/model/text_completion/bedrock/__init__.py
new file mode 100644
index 00000000..f2017af8
--- /dev/null
+++ b/trustgraph/model/text_completion/bedrock/__init__.py
@@ -0,0 +1,3 @@
+
+from . llm import *
+
diff --git a/trustgraph/model/text_completion/bedrock/__main__.py b/trustgraph/model/text_completion/bedrock/__main__.py
new file mode 100755
index 00000000..91342d2d
--- /dev/null
+++ b/trustgraph/model/text_completion/bedrock/__main__.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python3
+
+from . llm import run
+
+if __name__ == '__main__':
+ run()
+
diff --git a/trustgraph/model/text_completion/bedrock/llm.py b/trustgraph/model/text_completion/bedrock/llm.py
new file mode 100755
index 00000000..21ff73d8
--- /dev/null
+++ b/trustgraph/model/text_completion/bedrock/llm.py
@@ -0,0 +1,159 @@
+
+"""
+Simple LLM service, performs text prompt completion using AWS Bedrock.
+Input is prompt, output is response. Mistral is default.
+"""
+
+import boto3
+import json
+import re
+
+from .... schema import TextCompletionRequest, TextCompletionResponse
+from .... schema import text_completion_request_queue
+from .... schema import text_completion_response_queue
+from .... log_level import LogLevel
+from .... base import ConsumerProducer
+
+module = ".".join(__name__.split(".")[1:-1])
+
+default_input_queue = text_completion_request_queue
+default_output_queue = text_completion_response_queue
+default_subscriber = module
+default_model = 'mistral.mistral-large-2407-v1:0'
+default_region = 'us-west-2'
+
+class Processor(ConsumerProducer):
+
+ def __init__(self, **params):
+
+ input_queue = params.get("input_queue", default_input_queue)
+ output_queue = params.get("output_queue", default_output_queue)
+ subscriber = params.get("subscriber", default_subscriber)
+ model = params.get("model", default_model)
+ aws_id = params.get("aws_id_key")
+ aws_secret = params.get("aws_secret")
+ aws_region = params.get("aws_region", default_region)
+
+ super(Processor, self).__init__(
+ **params | {
+ "input_queue": input_queue,
+ "output_queue": output_queue,
+ "subscriber": subscriber,
+ "input_schema": TextCompletionRequest,
+ "output_schema": TextCompletionResponse,
+ "model": model,
+ }
+ )
+
+ self.model = model
+
+ self.session = boto3.Session(
+ aws_access_key_id=aws_id,
+ aws_secret_access_key=aws_secret,
+ region_name=aws_region
+ )
+
+ self.bedrock = self.session.client(service_name='bedrock-runtime')
+
+ print("Initialised", flush=True)
+
+ def handle(self, msg):
+
+ v = msg.value()
+
+ # Sender-produced ID
+
+ id = msg.properties()["id"]
+
+ print(f"Handling prompt {id}...", flush=True)
+
+ prompt = v.prompt
+
+ promptbody = json.dumps({
+ "prompt": prompt,
+ "max_tokens": 8192,
+ "temperature": 0.0,
+ "top_p": 0.99,
+ "top_k": 40
+ })
+
+ accept = 'application/json'
+ contentType = 'application/json'
+
+ response = self.bedrock.invoke_model(body=promptbody, modelId=self.model, accept=accept, contentType=contentType)
+
+ # Mistral Response Structure
+ if self.model.startswith("mistral"):
+ response_body = json.loads(response.get("body").read())
+ outputtext = response_body['outputs'][0]['text']
+
+ # Claude Response Structure
+ elif self.model.startswith("anthropic"):
+ model_response = json.loads(response["body"].read())
+ outputtext = model_response['content'][0]['text']
+
+ # Llama 3.1 Response Structure
+ elif self.model.startswith("meta"):
+ model_response = json.loads(response["body"].read())
+ outputtext = model_response["generation"]
+
+ # Use Mistral as default
+ else:
+ response_body = json.loads(response.get("body").read())
+ outputtext = response_body['outputs'][0]['text']
+
+ print(outputtext, flush=True)
+
+ # Parse output for ```json``` delimiters
+ pattern = r'```json\s*([\s\S]*?)\s*```'
+ match = re.search(pattern, outputtext)
+
+ if match:
+ # If delimiters are found, extract the JSON content
+ json_content = match.group(1)
+ json_resp = json_content.strip()
+
+ else:
+ # If no delimiters are found, return the original text
+ json_resp = outputtext.strip()
+
+ print("Send response...", flush=True)
+ r = TextCompletionResponse(response=json_resp)
+ self.send(r, properties={"id": id})
+
+ print("Done.", flush=True)
+
+ @staticmethod
+ def add_args(parser):
+
+ ConsumerProducer.add_args(
+ parser, default_input_queue, default_subscriber,
+ default_output_queue,
+ )
+
+ parser.add_argument(
+ '-m', '--model',
+ default="mistral.mistral-large-2407-v1:0",
+ help=f'Bedrock model (default: Mistral-Large-2407)'
+ )
+
+ parser.add_argument(
+ '-z', '--aws-id-key',
+ help=f'AWS ID Key'
+ )
+
+ parser.add_argument(
+ '-k', '--aws-secret',
+ help=f'AWS Secret Key'
+ )
+
+ parser.add_argument(
+ '-r', '--aws-region',
+ help=f'AWS Region (default: us-west-2)'
+ )
+
+def run():
+
+ Processor.start(module, __doc__)
+
+
diff --git a/trustgraph/model/text_completion/cohere/__init__.py b/trustgraph/model/text_completion/cohere/__init__.py
new file mode 100644
index 00000000..f2017af8
--- /dev/null
+++ b/trustgraph/model/text_completion/cohere/__init__.py
@@ -0,0 +1,3 @@
+
+from . llm import *
+
diff --git a/trustgraph/model/text_completion/cohere/__main__.py b/trustgraph/model/text_completion/cohere/__main__.py
new file mode 100755
index 00000000..91342d2d
--- /dev/null
+++ b/trustgraph/model/text_completion/cohere/__main__.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python3
+
+from . llm import run
+
+if __name__ == '__main__':
+ run()
+
diff --git a/trustgraph/model/text_completion/cohere/llm.py b/trustgraph/model/text_completion/cohere/llm.py
new file mode 100755
index 00000000..5d8ba57c
--- /dev/null
+++ b/trustgraph/model/text_completion/cohere/llm.py
@@ -0,0 +1,117 @@
+
+"""
+Simple LLM service, performs text prompt completion using Cohere.
+Input is prompt, output is response.
+"""
+
+import cohere
+import re
+
+from .... schema import TextCompletionRequest, TextCompletionResponse
+from .... schema import text_completion_request_queue
+from .... schema import text_completion_response_queue
+from .... log_level import LogLevel
+from .... base import ConsumerProducer
+
+module = ".".join(__name__.split(".")[1:-1])
+
+default_input_queue = text_completion_request_queue
+default_output_queue = text_completion_response_queue
+default_subscriber = module
+default_model = 'c4ai-aya-23-8b'
+
+class Processor(ConsumerProducer):
+
+ def __init__(self, **params):
+
+ input_queue = params.get("input_queue", default_input_queue)
+ output_queue = params.get("output_queue", default_output_queue)
+ subscriber = params.get("subscriber", default_subscriber)
+ model = params.get("model", default_model)
+ api_key = params.get("api_key")
+
+ super(Processor, self).__init__(
+ **params | {
+ "input_queue": input_queue,
+ "output_queue": output_queue,
+ "subscriber": subscriber,
+ "input_schema": TextCompletionRequest,
+ "output_schema": TextCompletionResponse,
+ "model": model,
+ }
+ )
+
+ self.model = model
+
+ self.cohere = cohere.Client(api_key=api_key)
+
+ print("Initialised", flush=True)
+
+ def handle(self, msg):
+
+ v = msg.value()
+
+ # Sender-produced ID
+
+ id = msg.properties()["id"]
+
+ print(f"Handling prompt {id}...", flush=True)
+
+ prompt = v.prompt
+
+ output = self.cohere.chat(
+ model=self.model,
+ message=prompt,
+ preamble = "You are a helpful AI-assistant."
+ temperature=0.0,
+ chat_history=[],
+ prompt_truncation='auto',
+ connectors=[]
+ )
+
+ resp = output.text
+ print(resp, flush=True)
+
+ # Parse output for ```json``` delimiters
+ pattern = r'```json\s*([\s\S]*?)\s*```'
+ match = re.search(pattern, resp)
+
+ if match:
+ # If delimiters are found, extract the JSON content
+ json_content = match.group(1)
+ json_resp = json_content.strip()
+
+ else:
+ # If no delimiters are found, return the original text
+ json_resp = resp.strip()
+
+ print("Send response...", flush=True)
+ r = TextCompletionResponse(response=json_resp)
+ self.send(r, properties={"id": id})
+
+ print("Done.", flush=True)
+
+ @staticmethod
+ def add_args(parser):
+
+ ConsumerProducer.add_args(
+ parser, default_input_queue, default_subscriber,
+ default_output_queue,
+ )
+
+ parser.add_argument(
+ '-m', '--model',
+ default="c4ai-aya-23-8b",
+ help=f'Cohere model (default: c4ai-aya-23-8b)'
+ )
+
+ parser.add_argument(
+ '-k', '--api-key',
+ help=f'Cohere API key'
+ )
+
+def run():
+
+ Processor.start(module, __doc__)
+
+