From d6768041079ac04e7a6313b9d8738595a7108493 Mon Sep 17 00:00:00 2001 From: JackColquitt Date: Fri, 14 Mar 2025 18:07:51 -0700 Subject: [PATCH] Added Mistral jsonnet templates --- containers/Containerfile.flow | 2 +- templates/all-patterns.jsonnet | 1 + templates/components.jsonnet | 2 + templates/components/mistral-rag.jsonnet | 63 ++++++++++++++++++++++++ templates/components/mistral.jsonnet | 59 ++++++++++++++++++++++ templates/generate-all | 2 +- templates/patterns/llm-mistral.jsonnet | 32 ++++++++++++ 7 files changed, 159 insertions(+), 2 deletions(-) create mode 100644 templates/components/mistral-rag.jsonnet create mode 100644 templates/components/mistral.jsonnet create mode 100644 templates/patterns/llm-mistral.jsonnet diff --git a/containers/Containerfile.flow b/containers/Containerfile.flow index 8d47effe..352e5ac5 100644 --- a/containers/Containerfile.flow +++ b/containers/Containerfile.flow @@ -12,7 +12,7 @@ RUN dnf install -y python3 python3-pip python3-wheel python3-aiohttp \ python3-rdflib RUN pip3 install --no-cache-dir \ - anthropic cohere openai google-generativeai \ + anthropic cohere mistralai openai google-generativeai \ ollama \ langchain==0.3.13 langchain-core==0.3.28 \ langchain-text-splitters==0.3.4 \ diff --git a/templates/all-patterns.jsonnet b/templates/all-patterns.jsonnet index f68f307d..3282be53 100644 --- a/templates/all-patterns.jsonnet +++ b/templates/all-patterns.jsonnet @@ -13,6 +13,7 @@ import "patterns/llm-claude.jsonnet", import "patterns/llm-cohere.jsonnet", import "patterns/llm-llamafile.jsonnet", + import "patterns/llm-mistral.jsonnet", import "patterns/llm-ollama.jsonnet", import "patterns/llm-openai.jsonnet", import "patterns/llm-vertexai.jsonnet", diff --git a/templates/components.jsonnet b/templates/components.jsonnet index 19a52206..ee2ae881 100644 --- a/templates/components.jsonnet +++ b/templates/components.jsonnet @@ -11,6 +11,7 @@ "claude": import "components/claude.jsonnet", "cohere": import "components/cohere.jsonnet", "googleaistudio": import "components/googleaistudio.jsonnet", + "mistral": import "components/mistral.jsonnet", "ollama": import "components/ollama.jsonnet", "openai": import "components/openai.jsonnet", "vertexai": import "components/vertexai.jsonnet", @@ -22,6 +23,7 @@ "claude-rag": import "components/claude-rag.jsonnet", "cohere-rag": import "components/cohere-rag.jsonnet", "googleaistudio-rag": import "components/googleaistudio-rag.jsonnet", + "mistral-rag": import "components/mistral-rag.jsonnet", "ollama-rag": import "components/ollama-rag.jsonnet", "openai-rag": import "components/openai-rag.jsonnet", "vertexai-rag": import "components/vertexai-rag.jsonnet", diff --git a/templates/components/mistral-rag.jsonnet b/templates/components/mistral-rag.jsonnet new file mode 100644 index 00000000..12fbe8a5 --- /dev/null +++ b/templates/components/mistral-rag.jsonnet @@ -0,0 +1,63 @@ +local base = import "base/base.jsonnet"; +local images = import "values/images.jsonnet"; +local url = import "values/url.jsonnet"; +local prompts = import "prompts/mixtral.jsonnet"; + +{ + + with:: function(key, value) + self + { + ["mistral-rag-" + key]:: value, + }, + + "mistral-rag-max-output-tokens":: 4096, + "mistral-rag-temperature":: 0.0, + "mistral-rag-model":: "ministral-8b-latest", + + "text-completion-rag" +: { + + create:: function(engine) + + local envSecrets = engine.envSecrets("mistral-credentials") + .with_env_var("MISTRAL_TOKEN", "mistral-token"); + + local containerRag = + engine.container("text-completion-rag") + .with_image(images.trustgraph_flow) + .with_command([ + "text-completion-mistral", + "-p", + url.pulsar, + "-x", + std.toString($["mistral-rag-max-output-tokens"]), + "-t", + "%0.3f" % $["mistral-rag-temperature"], + "-m", + $["mistral-rag-model"], + "-i", + "non-persistent://tg/request/text-completion-rag", + "-o", + "non-persistent://tg/response/text-completion-rag", + ]) + .with_env_var_secrets(envSecrets) + .with_limits("0.5", "128M") + .with_reservations("0.1", "128M"); + + local containerSetRag = engine.containers( + "text-completion-rag", [ containerRag ] + ); + + local serviceRag = + engine.internalService(containerSetRag) + .with_port(8080, 8080, "metrics"); + + engine.resources([ + envSecrets, + containerSetRag, + serviceRag, + ]) + + }, + +} + prompts + diff --git a/templates/components/mistral.jsonnet b/templates/components/mistral.jsonnet new file mode 100644 index 00000000..4de332c9 --- /dev/null +++ b/templates/components/mistral.jsonnet @@ -0,0 +1,59 @@ +local base = import "base/base.jsonnet"; +local images = import "values/images.jsonnet"; +local url = import "values/url.jsonnet"; +local prompts = import "prompts/mixtral.jsonnet"; + +{ + + with:: function(key, value) + self + { + ["mistral-" + key]:: value, + }, + + "mistral-max-output-tokens":: 4096, + "mistral-temperature":: 0.0, + "mistral-model":: "ministral-8b-latest", + + "text-completion" +: { + + create:: function(engine) + + local envSecrets = engine.envSecrets("mistral-credentials") + .with_env_var("MISTRAL_TOKEN", "mistral-token"); + + local container = + engine.container("text-completion") + .with_image(images.trustgraph_flow) + .with_command([ + "text-completion-mistral", + "-p", + url.pulsar, + "-x", + std.toString($["mistral-max-output-tokens"]), + "-t", + "%0.3f" % $["mistral-temperature"], + "-m", + $["mistral-model"], + ]) + .with_env_var_secrets(envSecrets) + .with_limits("0.5", "128M") + .with_reservations("0.1", "128M"); + + local containerSet = engine.containers( + "text-completion", [ container ] + ); + + local service = + engine.internalService(containerSet) + .with_port(8080, 8080, "metrics"); + + engine.resources([ + envSecrets, + containerSet, + service, + ]) + + }, + +} + prompts + diff --git a/templates/generate-all b/templates/generate-all index 22c9a5b0..fb1fe917 100755 --- a/templates/generate-all +++ b/templates/generate-all @@ -134,7 +134,7 @@ def generate_all(output, version): ]: for model in [ # "azure", "azure-openai", "bedrock", "claude", "cohere", - # "googleaistudio", "llamafile", + # "googleaistudio", "llamafile", "mistral", "ollama", # "openai", "vertexai", ]: diff --git a/templates/patterns/llm-mistral.jsonnet b/templates/patterns/llm-mistral.jsonnet new file mode 100644 index 00000000..11f6de22 --- /dev/null +++ b/templates/patterns/llm-mistral.jsonnet @@ -0,0 +1,32 @@ +{ + pattern: { + name: "mistral", + icon: "🤖💬", + title: "Add Mistral LLM endpoint for text completion", + description: "This pattern integrates a Mistral LLM service for text completion operations. You need a Mistral subscription and have an API key to be able to use this service.", + requires: ["pulsar", "trustgraph"], + features: ["llm"], + args: [ + { + name: "mistral-max-output-tokens", + label: "Maximum output tokens", + type: "integer", + description: "Limit on number tokens to generate", + default: 4096, + required: true, + }, + { + name: "mistral-temperature", + label: "Temperature", + type: "slider", + description: "Controlling predictability / creativity balance", + min: 0, + max: 1, + step: 0.05, + default: 0.5, + }, + ], + category: [ "llm" ], + }, + module: "components/mistral.jsonnet", +}