Added Mistral jsonnet templates

This commit is contained in:
JackColquitt 2025-03-14 18:07:51 -07:00
parent 5f5cf8fd07
commit d676804107
7 changed files with 159 additions and 2 deletions

View file

@ -12,7 +12,7 @@ RUN dnf install -y python3 python3-pip python3-wheel python3-aiohttp \
python3-rdflib
RUN pip3 install --no-cache-dir \
anthropic cohere openai google-generativeai \
anthropic cohere mistralai openai google-generativeai \
ollama \
langchain==0.3.13 langchain-core==0.3.28 \
langchain-text-splitters==0.3.4 \

View file

@ -13,6 +13,7 @@
import "patterns/llm-claude.jsonnet",
import "patterns/llm-cohere.jsonnet",
import "patterns/llm-llamafile.jsonnet",
import "patterns/llm-mistral.jsonnet",
import "patterns/llm-ollama.jsonnet",
import "patterns/llm-openai.jsonnet",
import "patterns/llm-vertexai.jsonnet",

View file

@ -11,6 +11,7 @@
"claude": import "components/claude.jsonnet",
"cohere": import "components/cohere.jsonnet",
"googleaistudio": import "components/googleaistudio.jsonnet",
"mistral": import "components/mistral.jsonnet",
"ollama": import "components/ollama.jsonnet",
"openai": import "components/openai.jsonnet",
"vertexai": import "components/vertexai.jsonnet",
@ -22,6 +23,7 @@
"claude-rag": import "components/claude-rag.jsonnet",
"cohere-rag": import "components/cohere-rag.jsonnet",
"googleaistudio-rag": import "components/googleaistudio-rag.jsonnet",
"mistral-rag": import "components/mistral-rag.jsonnet",
"ollama-rag": import "components/ollama-rag.jsonnet",
"openai-rag": import "components/openai-rag.jsonnet",
"vertexai-rag": import "components/vertexai-rag.jsonnet",

View file

@ -0,0 +1,63 @@
local base = import "base/base.jsonnet";
local images = import "values/images.jsonnet";
local url = import "values/url.jsonnet";
local prompts = import "prompts/mixtral.jsonnet";
{
with:: function(key, value)
self + {
["mistral-rag-" + key]:: value,
},
"mistral-rag-max-output-tokens":: 4096,
"mistral-rag-temperature":: 0.0,
"mistral-rag-model":: "ministral-8b-latest",
"text-completion-rag" +: {
create:: function(engine)
local envSecrets = engine.envSecrets("mistral-credentials")
.with_env_var("MISTRAL_TOKEN", "mistral-token");
local containerRag =
engine.container("text-completion-rag")
.with_image(images.trustgraph_flow)
.with_command([
"text-completion-mistral",
"-p",
url.pulsar,
"-x",
std.toString($["mistral-rag-max-output-tokens"]),
"-t",
"%0.3f" % $["mistral-rag-temperature"],
"-m",
$["mistral-rag-model"],
"-i",
"non-persistent://tg/request/text-completion-rag",
"-o",
"non-persistent://tg/response/text-completion-rag",
])
.with_env_var_secrets(envSecrets)
.with_limits("0.5", "128M")
.with_reservations("0.1", "128M");
local containerSetRag = engine.containers(
"text-completion-rag", [ containerRag ]
);
local serviceRag =
engine.internalService(containerSetRag)
.with_port(8080, 8080, "metrics");
engine.resources([
envSecrets,
containerSetRag,
serviceRag,
])
},
} + prompts

View file

@ -0,0 +1,59 @@
local base = import "base/base.jsonnet";
local images = import "values/images.jsonnet";
local url = import "values/url.jsonnet";
local prompts = import "prompts/mixtral.jsonnet";
{
with:: function(key, value)
self + {
["mistral-" + key]:: value,
},
"mistral-max-output-tokens":: 4096,
"mistral-temperature":: 0.0,
"mistral-model":: "ministral-8b-latest",
"text-completion" +: {
create:: function(engine)
local envSecrets = engine.envSecrets("mistral-credentials")
.with_env_var("MISTRAL_TOKEN", "mistral-token");
local container =
engine.container("text-completion")
.with_image(images.trustgraph_flow)
.with_command([
"text-completion-mistral",
"-p",
url.pulsar,
"-x",
std.toString($["mistral-max-output-tokens"]),
"-t",
"%0.3f" % $["mistral-temperature"],
"-m",
$["mistral-model"],
])
.with_env_var_secrets(envSecrets)
.with_limits("0.5", "128M")
.with_reservations("0.1", "128M");
local containerSet = engine.containers(
"text-completion", [ container ]
);
local service =
engine.internalService(containerSet)
.with_port(8080, 8080, "metrics");
engine.resources([
envSecrets,
containerSet,
service,
])
},
} + prompts

View file

@ -134,7 +134,7 @@ def generate_all(output, version):
]:
for model in [
# "azure", "azure-openai", "bedrock", "claude", "cohere",
# "googleaistudio", "llamafile",
# "googleaistudio", "llamafile", "mistral",
"ollama",
# "openai", "vertexai",
]:

View file

@ -0,0 +1,32 @@
{
pattern: {
name: "mistral",
icon: "🤖💬",
title: "Add Mistral LLM endpoint for text completion",
description: "This pattern integrates a Mistral LLM service for text completion operations. You need a Mistral subscription and have an API key to be able to use this service.",
requires: ["pulsar", "trustgraph"],
features: ["llm"],
args: [
{
name: "mistral-max-output-tokens",
label: "Maximum output tokens",
type: "integer",
description: "Limit on number tokens to generate",
default: 4096,
required: true,
},
{
name: "mistral-temperature",
label: "Temperature",
type: "slider",
description: "Controlling predictability / creativity balance",
min: 0,
max: 1,
step: 0.05,
default: 0.5,
},
],
category: [ "llm" ],
},
module: "components/mistral.jsonnet",
}