trustgraph/templates/components/openai.jsonnet

78 lines
1.6 KiB
Jsonnet
Raw Normal View History

local base = import "base/base.jsonnet";
local images = import "values/images.jsonnet";
local url = import "values/url.jsonnet";
local prompts = import "prompts/openai.jsonnet";
{
"openai-key":: "${OPENAI_KEY}",
"openai-max-output-tokens":: 4096,
"openai-temperature":: 0.0,
"openai-model":: "GPT-3.5-Turbo",
services +: {
"text-completion": base + {
image: images.trustgraph,
command: [
"text-completion-openai",
"-p",
url.pulsar,
"-k",
$["openai-key"],
2024-08-22 00:19:55 +01:00
"-x",
std.toString($["openai-max-output-tokens"]),
2024-08-22 00:19:55 +01:00
"-t",
std.toString($["openai-temperature"]),
"-m",
$["openai-model"],
],
deploy: {
resources: {
limits: {
cpus: '0.5',
memory: '128M'
},
reservations: {
cpus: '0.1',
memory: '128M'
}
}
},
},
"text-completion-rag": base + {
image: images.trustgraph,
command: [
"text-completion-openai",
"-p",
url.pulsar,
"-k",
$["openai-key"],
2024-08-22 00:19:55 +01:00
"-x",
std.toString($["openai-max-output-tokens"]),
2024-08-22 00:19:55 +01:00
"-t",
std.toString($["openai-temperature"]),
"-m",
$["openai-model"],
"-i",
"non-persistent://tg/request/text-completion-rag",
"-o",
"non-persistent://tg/response/text-completion-rag-response",
],
deploy: {
resources: {
limits: {
cpus: '0.5',
memory: '128M'
},
reservations: {
cpus: '0.1',
memory: '128M'
}
}
},
},
},
} + prompts