From f661791bbf30d14e3b151353b70cfe61c6795586 Mon Sep 17 00:00:00 2001 From: cybermaggedon Date: Sat, 7 Sep 2024 18:59:38 +0100 Subject: [PATCH] K8s (#58) Added templates which produce K8s resources. With the provided GCP wrapper, it works on GCP K8s cluster. This isn't stable enough for other folks to use so will need more piloting before it can be documented and released. --- Makefile | 7 + grafana/{ => dashboards}/dashboard.json | 0 grafana/{ => provisioning}/dashboard.yml | 0 grafana/{ => provisioning}/datasource.yml | 0 templates/components/azure.jsonnet | 10 + templates/components/bedrock.jsonnet | 10 + templates/components/cassandra.jsonnet | 10 + .../components/chunker-recursive.jsonnet | 5 + templates/components/claude.jsonnet | 10 + templates/components/cohere.jsonnet | 10 + templates/components/document-rag.jsonnet | 5 + templates/components/embeddings-hf.jsonnet | 9 +- .../components/embeddings-ollama.jsonnet | 5 + templates/components/grafana.jsonnet | 73 +++- templates/components/graph-rag.jsonnet | 15 + templates/components/milvus.jsonnet | 20 ++ templates/components/neo4j.jsonnet | 10 + templates/components/ollama.jsonnet | 10 + templates/components/openai.jsonnet | 10 + templates/components/prompt-generic.jsonnet | 10 + templates/components/prompt-template.jsonnet | 10 + templates/components/pulsar-manager.jsonnet | 4 +- templates/components/pulsar.jsonnet | 37 +- templates/components/qdrant.jsonnet | 20 ++ templates/components/trustgraph.jsonnet | 15 + templates/components/vertexai.jsonnet | 35 +- templates/config-to-gcp-k8s.jsonnet | 52 +++ templates/docker-compose.jsonnet | 41 ++- templates/k8s.jsonnet | 330 ++++++++++++++++++ templates/stores/cassandra.jsonnet | 2 +- templates/stores/milvus.jsonnet | 8 +- templates/stores/neo4j.jsonnet | 8 +- templates/stores/qdrant.jsonnet | 6 + templates/values/version.jsonnet | 1 + tg-launch-azure-cassandra.yaml | 36 +- tg-launch-azure-neo4j.yaml | 36 +- tg-launch-bedrock-cassandra.yaml | 36 +- tg-launch-bedrock-neo4j.yaml | 36 +- tg-launch-claude-cassandra.yaml | 36 +- tg-launch-claude-neo4j.yaml | 36 +- tg-launch-cohere-cassandra.yaml | 36 +- tg-launch-cohere-neo4j.yaml | 36 +- tg-launch-ollama-cassandra.yaml | 36 +- tg-launch-ollama-neo4j.yaml | 36 +- tg-launch-openai-cassandra.yaml | 36 +- tg-launch-openai-neo4j.yaml | 36 +- tg-launch-vertexai-cassandra.yaml | 44 +-- tg-launch-vertexai-neo4j.yaml | 44 +-- tg-storage-cassandra.yaml | 32 +- tg-storage-neo4j.yaml | 32 +- 50 files changed, 1037 insertions(+), 345 deletions(-) rename grafana/{ => dashboards}/dashboard.json (100%) rename grafana/{ => provisioning}/dashboard.yml (100%) rename grafana/{ => provisioning}/datasource.yml (100%) create mode 100644 templates/config-to-gcp-k8s.jsonnet create mode 100644 templates/k8s.jsonnet create mode 100644 templates/values/version.jsonnet diff --git a/Makefile b/Makefile index f361bd80..5fd5cc9b 100644 --- a/Makefile +++ b/Makefile @@ -69,3 +69,10 @@ update-templates: set-version --ext-str options=$${cm} -S $${input} > $${output}; \ done; \ done + +config.yaml: config.json FORCE + jsonnet -J . -J templates/ templates/config-to-k8s.jsonnet | \ + python3 -c 'import sys, yaml, json; j=json.loads(sys.stdin.read()); print(yaml.safe_dump(j))' > $@ + +FORCE: + diff --git a/grafana/dashboard.json b/grafana/dashboards/dashboard.json similarity index 100% rename from grafana/dashboard.json rename to grafana/dashboards/dashboard.json diff --git a/grafana/dashboard.yml b/grafana/provisioning/dashboard.yml similarity index 100% rename from grafana/dashboard.yml rename to grafana/provisioning/dashboard.yml diff --git a/grafana/datasource.yml b/grafana/provisioning/datasource.yml similarity index 100% rename from grafana/datasource.yml rename to grafana/provisioning/datasource.yml diff --git a/templates/components/azure.jsonnet b/templates/components/azure.jsonnet index f10803eb..3ee819ee 100644 --- a/templates/components/azure.jsonnet +++ b/templates/components/azure.jsonnet @@ -37,8 +37,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -74,8 +79,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/bedrock.jsonnet b/templates/components/bedrock.jsonnet index 666d6bf5..1c375621 100644 --- a/templates/components/bedrock.jsonnet +++ b/templates/components/bedrock.jsonnet @@ -44,8 +44,13 @@ local chunker = import "chunker-recursive.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -85,8 +90,13 @@ local chunker = import "chunker-recursive.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/cassandra.jsonnet b/templates/components/cassandra.jsonnet index 4e08e72e..b9345fed 100644 --- a/templates/components/cassandra.jsonnet +++ b/templates/components/cassandra.jsonnet @@ -27,8 +27,13 @@ cassandra + { "stop-triples", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -54,8 +59,13 @@ cassandra + { "query-triples", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) } diff --git a/templates/components/chunker-recursive.jsonnet b/templates/components/chunker-recursive.jsonnet index 58bcba46..0b64b712 100644 --- a/templates/components/chunker-recursive.jsonnet +++ b/templates/components/chunker-recursive.jsonnet @@ -31,8 +31,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "chunker", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/claude.jsonnet b/templates/components/claude.jsonnet index d4f3df15..0cd190d4 100644 --- a/templates/components/claude.jsonnet +++ b/templates/components/claude.jsonnet @@ -34,8 +34,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -69,8 +74,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/cohere.jsonnet b/templates/components/cohere.jsonnet index 64e77bcf..6c99086a 100644 --- a/templates/components/cohere.jsonnet +++ b/templates/components/cohere.jsonnet @@ -35,8 +35,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -68,8 +73,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/document-rag.jsonnet b/templates/components/document-rag.jsonnet index b1a43db7..ac5c11ec 100644 --- a/templates/components/document-rag.jsonnet +++ b/templates/components/document-rag.jsonnet @@ -28,8 +28,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "document-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/embeddings-hf.jsonnet b/templates/components/embeddings-hf.jsonnet index 3e53d32c..b46feac7 100644 --- a/templates/components/embeddings-hf.jsonnet +++ b/templates/components/embeddings-hf.jsonnet @@ -21,15 +21,20 @@ local prompts = import "prompts/mixtral.jsonnet"; "-m", $["embeddings-model"], ]) - .with_limits("1.0", "256M") - .with_reservations("0.5", "256M"); + .with_limits("1.0", "400M") + .with_reservations("0.5", "400M"); local containerSet = engine.containers( "embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/embeddings-ollama.jsonnet b/templates/components/embeddings-ollama.jsonnet index c2a2809c..425a1c47 100644 --- a/templates/components/embeddings-ollama.jsonnet +++ b/templates/components/embeddings-ollama.jsonnet @@ -30,8 +30,13 @@ local url = import "values/url.jsonnet"; "embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/grafana.jsonnet b/templates/components/grafana.jsonnet index ccc92d4b..133e49c1 100644 --- a/templates/components/grafana.jsonnet +++ b/templates/components/grafana.jsonnet @@ -8,26 +8,37 @@ local images = import "values/images.jsonnet"; create:: function(engine) local vol = engine.volume("prometheus-data").with_size("20G"); - local cfgVol = engine.configVolume("./prometheus") - .with_size("20G"); + + local cfgVol = engine.configVolume( + "prometheus-cfg", "./prometheus", + { + "prometheus.yml": importstr "prometheus/prometheus.yml", + } + ); local container = engine.container("prometheus") .with_image(images.prometheus) .with_limits("0.5", "128M") .with_reservations("0.1", "128M") +// .with_command(["/bin/sh", "-c", "sleep 9999999"]) .with_port(9090, 9090, "http") - .with_volume_mount(cfgVol, "/etc/prometheus") + .with_volume_mount(cfgVol, "/etc/prometheus/") .with_volume_mount(vol, "/prometheus"); local containerSet = engine.containers( "prometheus", [ container ] ); + local service = + engine.service(containerSet) + .with_port(9090, 9090, "http"); + engine.resources([ cfgVol, vol, containerSet, + service, ]) }, @@ -37,12 +48,33 @@ local images = import "values/images.jsonnet"; create:: function(engine) local vol = engine.volume("grafana-storage").with_size("20G"); - local cv1 = engine.configVolume("./grafana/dashboard.yml") - .with_size("20G"); - local cv2 = engine.configVolume("./grafana/datasource.yml") - .with_size("20G"); - local cv3 = engine.configVolume("./grafana/dashboard.json") - .with_size("20G"); + + local provDashVol = engine.configVolume( + "prov-dash", "./grafana/provisioning/", + { + "dashboard.yml": + importstr "grafana/provisioning/dashboard.yml", + } + + ); + + local provDataVol = engine.configVolume( + "prov-data", "./grafana/provisioning/", + { + "datasource.yml": + importstr "grafana/provisioning/datasource.yml", + } + + ); + + local dashVol = engine.configVolume( + "dashboards", "./grafana/dashboards/", + { + "dashboard.json": + importstr "grafana/dashboards/dashboard.json", + } + + ); local container = engine.container("grafana") @@ -58,20 +90,31 @@ local images = import "values/images.jsonnet"; .with_reservations("0.5", "256M") .with_port(3000, 3000, "cassandra") .with_volume_mount(vol, "/var/lib/grafana") - .with_volume_mount(cv1, "/etc/grafana/provisioning/dashboards/dashboard.yml") - .with_volume_mount(cv2, "/etc/grafana/provisioning/datasources/datasource.yml") - .with_volume_mount(cv3, "/var/lib/grafana/dashboards/dashboard.json"); + .with_volume_mount( + provDashVol, "/etc/grafana/provisioning/dashboards/" + ) + .with_volume_mount( + provDataVol, "/etc/grafana/provisioning/datasources/" + ) + .with_volume_mount( + dashVol, "/var/lib/grafana/dashboards/" + ); local containerSet = engine.containers( "grafana", [ container ] ); + local service = + engine.service(containerSet) + .with_port(3000, 3000, "http"); + engine.resources([ vol, - cv1, - cv2, - cv3, + provDashVol, + provDataVol, + dashVol, containerSet, + service, ]) }, diff --git a/templates/components/graph-rag.jsonnet b/templates/components/graph-rag.jsonnet index ad5ae6f4..39fe3b47 100644 --- a/templates/components/graph-rag.jsonnet +++ b/templates/components/graph-rag.jsonnet @@ -27,8 +27,13 @@ local url = import "values/url.jsonnet"; "kg-extract-definitions", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -52,8 +57,13 @@ local url = import "values/url.jsonnet"; "kg-extract-relationships", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -87,8 +97,13 @@ local url = import "values/url.jsonnet"; "graph-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/milvus.jsonnet b/templates/components/milvus.jsonnet index 556b1fe8..b3044f98 100644 --- a/templates/components/milvus.jsonnet +++ b/templates/components/milvus.jsonnet @@ -27,8 +27,13 @@ milvus + { "store-graph-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -54,8 +59,13 @@ milvus + { "query-graph-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -81,8 +91,13 @@ milvus + { "store-doc-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -108,8 +123,13 @@ milvus + { "query-doc-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/neo4j.jsonnet b/templates/components/neo4j.jsonnet index 2e808ff0..b70562fe 100644 --- a/templates/components/neo4j.jsonnet +++ b/templates/components/neo4j.jsonnet @@ -28,8 +28,13 @@ neo4j + { "store-triples", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -55,8 +60,13 @@ neo4j + { "query-triples", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/ollama.jsonnet b/templates/components/ollama.jsonnet index e6b8e895..b0507cef 100644 --- a/templates/components/ollama.jsonnet +++ b/templates/components/ollama.jsonnet @@ -31,8 +31,13 @@ local prompts = import "prompts/slm.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -64,8 +69,13 @@ local prompts = import "prompts/slm.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/openai.jsonnet b/templates/components/openai.jsonnet index 63917376..3d1a2b73 100644 --- a/templates/components/openai.jsonnet +++ b/templates/components/openai.jsonnet @@ -37,8 +37,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -74,8 +79,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/prompt-generic.jsonnet b/templates/components/prompt-generic.jsonnet index aa19fb74..5d6d7c54 100644 --- a/templates/components/prompt-generic.jsonnet +++ b/templates/components/prompt-generic.jsonnet @@ -28,8 +28,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "prompt", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -61,8 +66,13 @@ local prompts = import "prompts/mixtral.jsonnet"; "prompt-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/prompt-template.jsonnet b/templates/components/prompt-template.jsonnet index e64a18e8..7816fa39 100644 --- a/templates/components/prompt-template.jsonnet +++ b/templates/components/prompt-template.jsonnet @@ -39,8 +39,13 @@ local default_prompts = import "prompts/default-prompts.jsonnet"; "prompt", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -82,8 +87,13 @@ local default_prompts = import "prompts/default-prompts.jsonnet"; "prompt-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/pulsar-manager.jsonnet b/templates/components/pulsar-manager.jsonnet index 12ce99da..9a0b59b2 100644 --- a/templates/components/pulsar-manager.jsonnet +++ b/templates/components/pulsar-manager.jsonnet @@ -27,8 +27,8 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(9527, 9527) - .with_port(7750, 7750); + .with_port(9527, 9527, "api") + .with_port(7750, 7750, "api2); engine.resources([ containerSet, diff --git a/templates/components/pulsar.jsonnet b/templates/components/pulsar.jsonnet index 562e2819..058c35c1 100644 --- a/templates/components/pulsar.jsonnet +++ b/templates/components/pulsar.jsonnet @@ -7,19 +7,20 @@ local images = import "values/images.jsonnet"; create:: function(engine) - local confVolume = engine.volume("pulsar-conf").with_size("2G"); +// local confVolume = engine.volume("pulsar-conf").with_size("2G"); local dataVolume = engine.volume("pulsar-data").with_size("20G"); local container = engine.container("pulsar") .with_image(images.pulsar) - .with_command("bin/pulsar standalone") + .with_command(["bin/pulsar", "standalone"]) +// .with_command(["/bin/sh", "-c", "sleep 9999999"]) .with_environment({ - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" }) - .with_limits("1.0", "900M") - .with_reservations("0.5", "900M") - .with_volume_mount(confVolume, "/pulsar/conf") + .with_limits("2.0", "1500M") + .with_reservations("1.0", "1500M") +// .with_volume_mount(confVolume, "/pulsar/conf") .with_volume_mount(dataVolume, "/pulsar/data") .with_port(6650, 6650, "bookie") .with_port(8080, 8080, "http"); @@ -30,27 +31,35 @@ local images = import "values/images.jsonnet"; .with_command([ "sh", "-c", - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response", + "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done", ]) - .with_limits("0.5", "128M") - .with_reservations("0.1", "128M"); + .with_limits("1", "400M") + .with_reservations("0.1", "400M"); local containerSet = engine.containers( "pulsar", [ - container, adminContainer + container + ] + ); + + local adminContainerSet = engine.containers( + "init-pulsar", + [ + adminContainer ] ); local service = engine.service(containerSet) - .with_port(6650, 6650) - .with_port(8080, 8080); + .with_port(6650, 6650, "bookie") + .with_port(8080, 8080, "http"); engine.resources([ - confVolume, +// confVolume, dataVolume, containerSet, + adminContainerSet, service, ]) @@ -58,5 +67,3 @@ local images = import "values/images.jsonnet"; } - - diff --git a/templates/components/qdrant.jsonnet b/templates/components/qdrant.jsonnet index ac6eadf9..f923e84f 100644 --- a/templates/components/qdrant.jsonnet +++ b/templates/components/qdrant.jsonnet @@ -27,8 +27,13 @@ qdrant + { "store-graph-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -54,8 +59,13 @@ qdrant + { "query-graph-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -81,8 +91,13 @@ qdrant + { "store-doc-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -108,8 +123,13 @@ qdrant + { "query-doc-embeddings", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + engine.resources([ containerSet, + service, ]) diff --git a/templates/components/trustgraph.jsonnet b/templates/components/trustgraph.jsonnet index 787f1a0c..c7615ed4 100644 --- a/templates/components/trustgraph.jsonnet +++ b/templates/components/trustgraph.jsonnet @@ -31,8 +31,13 @@ local prompt = import "prompt-template.jsonnet"; "chunker", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -56,8 +61,13 @@ local prompt = import "prompt-template.jsonnet"; "pdf-decoder", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, @@ -81,8 +91,13 @@ local prompt = import "prompt-template.jsonnet"; "vectorize", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ containerSet, + service, ]) }, diff --git a/templates/components/vertexai.jsonnet b/templates/components/vertexai.jsonnet index f2fd3d86..2bc97799 100644 --- a/templates/components/vertexai.jsonnet +++ b/templates/components/vertexai.jsonnet @@ -15,7 +15,13 @@ local prompts = import "prompts/mixtral.jsonnet"; create:: function(engine) - local cfgVol = engine.configVolume("./vertexai"); + local cfgVol = engine.secretVolume( + "vertexai-creds", + "./vertexai", + { + "private.json": importstr "vertexai/private.json", + } + ); local container = engine.container("text-completion") @@ -35,17 +41,22 @@ local prompts = import "prompts/mixtral.jsonnet"; "-m", $["vertexai-model"], ]) - .with_limits("0.5", "128M") - .with_reservations("0.1", "128M") + .with_limits("0.5", "256M") + .with_reservations("0.1", "256M") .with_volume_mount(cfgVol, "/vertexai"); local containerSet = engine.containers( "text-completion", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ cfgVol, containerSet, + service, ]) }, @@ -54,7 +65,13 @@ local prompts = import "prompts/mixtral.jsonnet"; create:: function(engine) - local cfgVol = engine.configVolume("./vertexai"); + local cfgVol = engine.secretVolume( + "vertexai-creds", + "./vertexai", + { + "private.json": importstr "vertexai/private.json", + } + ); local container = engine.container("text-completion-rag") @@ -78,20 +95,24 @@ local prompts = import "prompts/mixtral.jsonnet"; "-o", "non-persistent://tg/response/text-completion-rag-response", ]) - .with_limits("0.5", "128M") - .with_reservations("0.1", "128M") + .with_limits("0.5", "256M") + .with_reservations("0.1", "256M") .with_volume_mount(cfgVol, "/vertexai"); local containerSet = engine.containers( "text-completion-rag", [ container ] ); + local service = + engine.internalService(containerSet) + .with_port(8000, 8000, "metrics"); + engine.resources([ cfgVol, containerSet, + service, ]) - } } + prompts diff --git a/templates/config-to-gcp-k8s.jsonnet b/templates/config-to-gcp-k8s.jsonnet new file mode 100644 index 00000000..4ba0325e --- /dev/null +++ b/templates/config-to-gcp-k8s.jsonnet @@ -0,0 +1,52 @@ + +local engine = import "k8s.jsonnet"; +local decode = import "decode-config.jsonnet"; +local components = import "components.jsonnet"; + +// Import config +local config = import "config.json"; + +// Produce patterns from config +local patterns = decode(config); + +local ns = { + apiVersion: "v1", + kind: "Namespace", + metadata: { + name: "trustgraph", + }, + "spec": { + }, +}; + +local sc = { + apiVersion: "storage.k8s.io/v1", + kind: "StorageClass", + metadata: { + name: "tg", + }, + provisioner: "pd.csi.storage.gke.io", + parameters: { + type: "pd-balanced", + "csi.storage.k8s.io/fstype": "ext4", + }, + reclaimPolicy: "Delete", + volumeBindingMode: "WaitForFirstConsumer", +}; + +//patterns["pulsar"].create(engine) + +// Extract resources usnig the engine +local resources = std.flattenArrays([ + p.create(engine) for p in std.objectValues(patterns) +]); + +local resourceList = { + apiVersion: "v1", + kind: "List", + items: [ns, sc] + resources, +}; + + +resourceList + diff --git a/templates/docker-compose.jsonnet b/templates/docker-compose.jsonnet index 32697c66..e1733b78 100644 --- a/templates/docker-compose.jsonnet +++ b/templates/docker-compose.jsonnet @@ -26,7 +26,7 @@ function(vol, mnt) self + { volumes: super.volumes + [{ - volume: vol.name, mount: mnt + volume: vol, mount: mnt }] }, @@ -70,7 +70,7 @@ (if std.length(container.volumes) > 0 then { volumes: [ - "%s:%s" % [vol.volume, vol.mount] + "%s:%s" % [vol.volume.name, vol.mount] for vol in container.volumes ] } @@ -81,6 +81,21 @@ }, + internalService:: function(containers) + { + + local service = self, + + name: containers.name, + + with_port:: function(src, dest, name) + self + { port: [src, dest] }, + + add:: function() { + } + + }, + service:: function(containers) { @@ -88,7 +103,8 @@ name: containers.name, - with_port:: function(src, dest) self + { port: [src, dest] }, + with_port:: function(src, dest, name) + self + { port: [src, dest] }, add:: function() { } @@ -112,13 +128,26 @@ }, - // FIXME: For K8s - configVolume:: function(name) + configVolume:: function(name, dir, parts) { local volume = self, - name: name, + name: dir, + + with_size:: function(size) self + { size: size }, + + add:: function() { + } + + }, + + secretVolume:: function(name, dir, parts) + { + + local volume = self, + + name: dir, with_size:: function(size) self + { size: size }, diff --git a/templates/k8s.jsonnet b/templates/k8s.jsonnet new file mode 100644 index 00000000..cbde28d0 --- /dev/null +++ b/templates/k8s.jsonnet @@ -0,0 +1,330 @@ +{ + + container:: function(name) + { + + local container = self, + + name: name, + limits: {}, + reservations: {}, + ports: [], + volumes: [], + + with_image:: function(x) self + { image: x }, + + with_command:: function(x) self + { command: x }, + + with_environment:: function(x) self + { environment: x }, + + with_limits:: function(c, m) self + { limits: { cpu: c, memory: m } }, + + with_reservations:: + function(c, m) self + { reservations: { cpu: c, memory: m } }, + + with_volume_mount:: + function(vol, mnt) + self + { + volumes: super.volumes + [{ + volume: vol, mount: mnt + }] + }, + + with_port:: + function(src, dest, name) self + { + ports: super.ports + [ + { src: src, dest: dest, name : name } + ] + }, + + add:: function() [ + + { + apiVersion: "apps/v1", + kind: "Deployment", + metadata: { + name: container.name, + namespace: "trustgraph", + labels: { + app: container.name + } + }, + spec: { + replicas: 1, + selector: { + matchLabels: { + app: container.name, + } + }, + template: { + metadata: { + labels: { + app: container.name, + } + }, + spec: { + containers: [ + { + name: container.name, + image: container.image, + securityContext: { + // fsGroup: 1234 + // runAsUser: 65534 + // runAsGroup: 65534 + // runAsNonRoot: true + runAsUser: 0, + runAsGroup: 0, + // runAsNonRoot: true, + // readOnlyRootFilesystem: true, + }, + resources: { + requests: container.reservations, + limits: container.limits + }, + } + ( + if std.length(container.ports) > 0 then + { + ports: [ + { + hostPort: port.src, + containerPort: port.dest, + } + for port in container.ports + ] + } else + {}) + + + (if std.objectHas(container, "command") then + { command: container.command } + else {}) + + (if std.objectHas(container, "environment") then + { env: [ { + name: e.key, value: e.value + } + for e in + std.objectKeysValues( + container.environment + ) + ] + } + else {}) + + + (if std.length(container.volumes) > 0 then + { + volumeMounts: [ + { + mountPath: vol.mount, + name: vol.volume.name, + } + for vol in container.volumes + ] + } + + else + {} + ) + ], + volumes: [ + vol.volume.volRef() + for vol in container.volumes + + ] + } + }, + } + {} + + } + + ] + + }, + + // Just an alias + internalService:: self.service, + + service:: function(containers) + { + + local service = self, + + name: containers.name, + + ports: [], + + with_port:: + function(src, dest, name) + self + { + ports: super.ports + [ + { src: src, dest: dest, name: name } + ] + }, + + add:: function() [ + + { + + apiVersion: "v1", + kind: "Service", + metadata: { + name: service.name, + namespace: "trustgraph", + }, + spec: { + selector: { + app: service.name, + }, + ports: [ + { + port: port.src, + targetPort: port.dest, + name: port.name, + } + for port in service.ports + ], + } + } + ], + + }, + + volume:: function(name) + { + + local volume = self, + + name: name, + + with_size:: function(size) self + { size: size }, + + add:: function() [ +/* + { + + apiVersion: "v1", + kind: "PersistentVolume", + metadata: { + name: volume.name, + labels: { + type: "local", + } + }, + spec: { + storageClassName: "tg", + volumeMode: "Filesystem", + capacity: { + storage: volume.size, + }, + accessModes: [ "ReadWriteOnce" ], + persistentVolumeReclaimPolicy: "Delete", + } + }, +*/ + { + apiVersion: "v1", + kind: "PersistentVolumeClaim", + metadata: { + name: volume.name, + namespace: "trustgraph", + }, + spec: { + storageClassName: "tg", + accessModes: [ "ReadWriteOnce" ], + resources: { + requests: { + storage: volume.size, + } + }, +// volumeName: volume.name, + } + } + ], + + volRef:: function() { + name: volume.name, + persistentVolumeClaim: { claimName: volume.name }, + } + + }, + + configVolume:: function(name, dir, parts) + { + + local volume = self, + + name: name, + + with_size:: function(size) self + { size: size }, + + add:: function() [ + { + apiVersion: "v1", + kind: "ConfigMap", + metadata: { + name: volume.name, + namespace: "trustgraph", + }, + data: parts + }, + ], + + + volRef:: function() { + name: volume.name, + configMap: { name: volume.name }, + } + + }, + + secretVolume:: function(name, dir, parts) + { + + local volume = self, + + name: name, + + with_size:: function(size) self + { size: size }, + + add:: function() [ + { + apiVersion: "v1", + kind: "Secret", + metadata: { + name: volume.name, + namespace: "trustgraph", + }, + data: { + [item.key]: std.base64(item.value) + for item in std.objectKeysValues(parts) + } + }, + ], + + volRef:: function() { + name: volume.name, + secret: { secretName: volume.name }, + } + + }, + + containers:: function(name, containers) + { + + local cont = self, + + name: name, + containers: containers, + + add:: function() std.flattenArrays( + [ c.add() for c in cont.containers ] + ), + + }, + + resources:: function(res) + + std.flattenArrays( + [ c.add() for c in res ] + ), + +} + diff --git a/templates/stores/cassandra.jsonnet b/templates/stores/cassandra.jsonnet index f3d27025..c501e1f9 100644 --- a/templates/stores/cassandra.jsonnet +++ b/templates/stores/cassandra.jsonnet @@ -26,7 +26,7 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(9042, 9042); + .with_port(9042, 9042, "api"); engine.resources([ vol, diff --git a/templates/stores/milvus.jsonnet b/templates/stores/milvus.jsonnet index eef91172..888a83a9 100644 --- a/templates/stores/milvus.jsonnet +++ b/templates/stores/milvus.jsonnet @@ -37,7 +37,7 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(2379, 2379); + .with_port(2379, 2379, 30379, "api"); engine.resources([ vol, @@ -78,7 +78,7 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(9001, 9001); + .with_port(9001, 9001, "api"); engine.resources([ vol, @@ -116,8 +116,8 @@ local images = import "values/images.jsonnet"; local service = engine.service(containerSet) - .with_port(9091, 9091) - .with_port(19530, 19530); + .with_port(9091, 9091, "api") + .with_port(19530, 19530, "api2); engine.resources([ vol, diff --git a/templates/stores/neo4j.jsonnet b/templates/stores/neo4j.jsonnet index 4d74d73d..55cccc5f 100644 --- a/templates/stores/neo4j.jsonnet +++ b/templates/stores/neo4j.jsonnet @@ -28,10 +28,16 @@ local images = import "values/images.jsonnet"; "neo4j", [ container ] ); + local service = + engine.service(containerSet) + .with_port(7474, 7474, "api") + .with_port(7687, 7687, "api2"); + engine.resources([ vol, containerSet, - ]) + service, + ]) }, diff --git a/templates/stores/qdrant.jsonnet b/templates/stores/qdrant.jsonnet index 4e2ce40a..e8443b73 100644 --- a/templates/stores/qdrant.jsonnet +++ b/templates/stores/qdrant.jsonnet @@ -22,9 +22,15 @@ local images = import "values/images.jsonnet"; "qdrant", [ container ] ); + local service = + engine.service(containerSet) + .with_port(6333, 6333, "api") + .with_port(6334, 6334, "api2"); + engine.resources([ vol, containerSet, + service, ]) }, diff --git a/templates/values/version.jsonnet b/templates/values/version.jsonnet new file mode 100644 index 00000000..e127b65d --- /dev/null +++ b/templates/values/version.jsonnet @@ -0,0 +1 @@ +"0.9.3" diff --git a/tg-launch-azure-cassandra.yaml b/tg-launch-azure-cassandra.yaml index 39d8c539..d6eb9207 100644 --- a/tg-launch-azure-cassandra.yaml +++ b/tg-launch-azure-cassandra.yaml @@ -46,10 +46,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -69,9 +69,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -101,15 +101,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -171,7 +171,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -264,24 +264,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -470,6 +471,5 @@ "cassandra": {} "grafana-storage": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-azure-neo4j.yaml b/tg-launch-azure-neo4j.yaml index a0dee80c..12809976 100644 --- a/tg-launch-azure-neo4j.yaml +++ b/tg-launch-azure-neo4j.yaml @@ -29,10 +29,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -52,9 +52,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -84,15 +84,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -172,7 +172,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -265,24 +265,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -471,6 +472,5 @@ "grafana-storage": {} "neo4j": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-bedrock-cassandra.yaml b/tg-launch-bedrock-cassandra.yaml index 5d218d17..3aa98042 100644 --- a/tg-launch-bedrock-cassandra.yaml +++ b/tg-launch-bedrock-cassandra.yaml @@ -46,10 +46,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -69,9 +69,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -101,15 +101,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -171,7 +171,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -264,24 +264,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -478,6 +479,5 @@ "cassandra": {} "grafana-storage": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-bedrock-neo4j.yaml b/tg-launch-bedrock-neo4j.yaml index 8de30c3b..a5963277 100644 --- a/tg-launch-bedrock-neo4j.yaml +++ b/tg-launch-bedrock-neo4j.yaml @@ -29,10 +29,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -52,9 +52,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -84,15 +84,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -172,7 +172,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -265,24 +265,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -479,6 +480,5 @@ "grafana-storage": {} "neo4j": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-claude-cassandra.yaml b/tg-launch-claude-cassandra.yaml index 22772ada..dd2aea90 100644 --- a/tg-launch-claude-cassandra.yaml +++ b/tg-launch-claude-cassandra.yaml @@ -46,10 +46,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -69,9 +69,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -101,15 +101,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -171,7 +171,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -264,24 +264,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -466,6 +467,5 @@ "cassandra": {} "grafana-storage": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-claude-neo4j.yaml b/tg-launch-claude-neo4j.yaml index cec0f747..abf2e8d2 100644 --- a/tg-launch-claude-neo4j.yaml +++ b/tg-launch-claude-neo4j.yaml @@ -29,10 +29,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -52,9 +52,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -84,15 +84,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -172,7 +172,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -265,24 +265,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -467,6 +468,5 @@ "grafana-storage": {} "neo4j": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-cohere-cassandra.yaml b/tg-launch-cohere-cassandra.yaml index 906a5c64..f92789c0 100644 --- a/tg-launch-cohere-cassandra.yaml +++ b/tg-launch-cohere-cassandra.yaml @@ -46,10 +46,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -69,9 +69,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -101,15 +101,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -171,7 +171,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -264,24 +264,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -462,6 +463,5 @@ "cassandra": {} "grafana-storage": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-cohere-neo4j.yaml b/tg-launch-cohere-neo4j.yaml index 0dc2bd12..77b6151a 100644 --- a/tg-launch-cohere-neo4j.yaml +++ b/tg-launch-cohere-neo4j.yaml @@ -29,10 +29,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -52,9 +52,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -84,15 +84,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -172,7 +172,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -265,24 +265,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -463,6 +464,5 @@ "grafana-storage": {} "neo4j": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-ollama-cassandra.yaml b/tg-launch-ollama-cassandra.yaml index 51fe0ca6..3929fb42 100644 --- a/tg-launch-ollama-cassandra.yaml +++ b/tg-launch-ollama-cassandra.yaml @@ -46,10 +46,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -69,9 +69,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -101,15 +101,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -171,7 +171,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -216,24 +216,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -414,6 +415,5 @@ "cassandra": {} "grafana-storage": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-ollama-neo4j.yaml b/tg-launch-ollama-neo4j.yaml index ccbea47e..0c3eb43a 100644 --- a/tg-launch-ollama-neo4j.yaml +++ b/tg-launch-ollama-neo4j.yaml @@ -29,10 +29,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -52,9 +52,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -84,15 +84,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -172,7 +172,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -217,24 +217,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -415,6 +416,5 @@ "grafana-storage": {} "neo4j": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-openai-cassandra.yaml b/tg-launch-openai-cassandra.yaml index 4415cff3..46921ae2 100644 --- a/tg-launch-openai-cassandra.yaml +++ b/tg-launch-openai-cassandra.yaml @@ -46,10 +46,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -69,9 +69,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -101,15 +101,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -171,7 +171,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -264,24 +264,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -470,6 +471,5 @@ "cassandra": {} "grafana-storage": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-openai-neo4j.yaml b/tg-launch-openai-neo4j.yaml index 6a62caba..c0ffdfee 100644 --- a/tg-launch-openai-neo4j.yaml +++ b/tg-launch-openai-neo4j.yaml @@ -29,10 +29,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -52,9 +52,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -84,15 +84,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -172,7 +172,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -265,24 +265,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -471,6 +472,5 @@ "grafana-storage": {} "neo4j": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-vertexai-cassandra.yaml b/tg-launch-vertexai-cassandra.yaml index 59a26c07..05adee9a 100644 --- a/tg-launch-vertexai-cassandra.yaml +++ b/tg-launch-vertexai-cassandra.yaml @@ -46,10 +46,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -69,9 +69,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -101,15 +101,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -171,7 +171,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -264,24 +264,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -420,10 +421,10 @@ "resources": "limits": "cpus": "0.5" - "memory": "128M" + "memory": "256M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "256M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "volumes": @@ -451,10 +452,10 @@ "resources": "limits": "cpus": "0.5" - "memory": "128M" + "memory": "256M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "256M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "volumes": @@ -478,6 +479,5 @@ "cassandra": {} "grafana-storage": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-launch-vertexai-neo4j.yaml b/tg-launch-vertexai-neo4j.yaml index 83c7c681..ed0f74dd 100644 --- a/tg-launch-vertexai-neo4j.yaml +++ b/tg-launch-vertexai-neo4j.yaml @@ -29,10 +29,10 @@ "resources": "limits": "cpus": "1.0" - "memory": "256M" + "memory": "400M" "reservations": "cpus": "0.5" - "memory": "256M" + "memory": "400M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "grafana": @@ -52,9 +52,9 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "graph-rag": "command": - "graph-rag" @@ -84,15 +84,15 @@ "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "kg-extract-definitions": @@ -172,7 +172,7 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "prompt": "command": @@ -265,24 +265,25 @@ "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -421,10 +422,10 @@ "resources": "limits": "cpus": "0.5" - "memory": "128M" + "memory": "256M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "256M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "volumes": @@ -452,10 +453,10 @@ "resources": "limits": "cpus": "0.5" - "memory": "128M" + "memory": "256M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "256M" "image": "docker.io/trustgraph/trustgraph-flow:0.9.3" "restart": "on-failure:100" "volumes": @@ -479,6 +480,5 @@ "grafana-storage": {} "neo4j": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-storage-cassandra.yaml b/tg-storage-cassandra.yaml index 2e358e41..a74fb726 100644 --- a/tg-storage-cassandra.yaml +++ b/tg-storage-cassandra.yaml @@ -33,22 +33,22 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "init-pulsar": "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "prometheus": @@ -65,27 +65,28 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -209,6 +210,5 @@ "cassandra": {} "grafana-storage": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {} diff --git a/tg-storage-neo4j.yaml b/tg-storage-neo4j.yaml index d5bca10c..ea863dba 100644 --- a/tg-storage-neo4j.yaml +++ b/tg-storage-neo4j.yaml @@ -16,22 +16,22 @@ "restart": "on-failure:100" "volumes": - "grafana-storage:/var/lib/grafana" - - "./grafana/dashboard.yml:/etc/grafana/provisioning/dashboards/dashboard.yml" - - "./grafana/datasource.yml:/etc/grafana/provisioning/datasources/datasource.yml" - - "./grafana/dashboard.json:/var/lib/grafana/dashboards/dashboard.json" + - "./grafana/provisioning/:/etc/grafana/provisioning/dashboards/" + - "./grafana/provisioning/:/etc/grafana/provisioning/datasources/" + - "./grafana/dashboards/:/var/lib/grafana/dashboards/" "init-pulsar": "command": - "sh" - "-c" - - "pulsar-admin --admin-url http://pulsar:8080 tenants create tg && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request && pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response && pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response" + - "while true; do pulsar-admin --admin-url http://pulsar:8080 tenants create tg ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/flow ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/request ; pulsar-admin --admin-url http://pulsar:8080 namespaces create tg/response ; pulsar-admin --admin-url http://pulsar:8080 namespaces set-retention --size -1 --time 3m tg/response; sleep 20; done" "deploy": "resources": "limits": - "cpus": "0.5" - "memory": "128M" + "cpus": "1" + "memory": "400M" "reservations": "cpus": "0.1" - "memory": "128M" + "memory": "400M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "restart": "on-failure:100" "neo4j": @@ -66,27 +66,28 @@ - "9090:9090" "restart": "on-failure:100" "volumes": - - "./prometheus:/etc/prometheus" + - "./prometheus:/etc/prometheus/" - "prometheus-data:/prometheus" "pulsar": - "command": "bin/pulsar standalone" + "command": + - "bin/pulsar" + - "standalone" "deploy": "resources": "limits": - "cpus": "1.0" - "memory": "900M" + "cpus": "2.0" + "memory": "1500M" "reservations": - "cpus": "0.5" - "memory": "900M" + "cpus": "1.0" + "memory": "1500M" "environment": - "PULSAR_MEM": "-Xms700M -Xmx700M" + "PULSAR_MEM": "-Xms600M -Xmx600M" "image": "docker.io/apachepulsar/pulsar:3.3.1" "ports": - "6650:6650" - "8080:8080" "restart": "on-failure:100" "volumes": - - "pulsar-conf:/pulsar/conf" - "pulsar-data:/pulsar/data" "qdrant": "deploy": @@ -210,6 +211,5 @@ "grafana-storage": {} "neo4j": {} "prometheus-data": {} - "pulsar-conf": {} "pulsar-data": {} "qdrant": {}