mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-05-02 20:03:19 +02:00
Enable AKS integration of text-completion-azure-openai (#319)
Reconfigure so that AZURE_TOKEN, AZURE_MODEL and AZURE_ENDPOINT can be used to set the token/model/endpoint parameters. This allows it to be deployed in K8s and use secrets to set these environment variables
This commit is contained in:
parent
a22bf0f04e
commit
6565adb1ec
1 changed files with 11 additions and 6 deletions
|
|
@ -23,9 +23,10 @@ default_output_queue = text_completion_response_queue
|
||||||
default_subscriber = module
|
default_subscriber = module
|
||||||
default_temperature = 0.0
|
default_temperature = 0.0
|
||||||
default_max_output = 4192
|
default_max_output = 4192
|
||||||
default_api = "2024-02-15-preview"
|
default_api = "2024-12-01-preview"
|
||||||
default_endpoint = os.getenv("AZURE_ENDPOINT")
|
default_endpoint = os.getenv("AZURE_ENDPOINT", None)
|
||||||
default_token = os.getenv("AZURE_TOKEN")
|
default_token = os.getenv("AZURE_TOKEN", None)
|
||||||
|
default_modeel = os.getenv("AZURE_MODEL", None)
|
||||||
|
|
||||||
class Processor(ConsumerProducer):
|
class Processor(ConsumerProducer):
|
||||||
|
|
||||||
|
|
@ -34,12 +35,13 @@ class Processor(ConsumerProducer):
|
||||||
input_queue = params.get("input_queue", default_input_queue)
|
input_queue = params.get("input_queue", default_input_queue)
|
||||||
output_queue = params.get("output_queue", default_output_queue)
|
output_queue = params.get("output_queue", default_output_queue)
|
||||||
subscriber = params.get("subscriber", default_subscriber)
|
subscriber = params.get("subscriber", default_subscriber)
|
||||||
endpoint = params.get("endpoint", default_endpoint)
|
|
||||||
token = params.get("token", default_token)
|
|
||||||
temperature = params.get("temperature", default_temperature)
|
temperature = params.get("temperature", default_temperature)
|
||||||
max_output = params.get("max_output", default_max_output)
|
max_output = params.get("max_output", default_max_output)
|
||||||
model = params.get("model")
|
|
||||||
api = params.get("api_version", default_api)
|
api = params.get("api_version", default_api)
|
||||||
|
endpoint = params.get("endpoint", default_endpoint)
|
||||||
|
token = params.get("token", default_token)
|
||||||
|
model = params.get("model", default_model)
|
||||||
|
|
||||||
if endpoint is None:
|
if endpoint is None:
|
||||||
raise RuntimeError("Azure endpoint not specified")
|
raise RuntimeError("Azure endpoint not specified")
|
||||||
|
|
@ -177,6 +179,7 @@ class Processor(ConsumerProducer):
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-e', '--endpoint',
|
'-e', '--endpoint',
|
||||||
|
default=default_endpoint,
|
||||||
help=f'LLM model endpoint'
|
help=f'LLM model endpoint'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -188,11 +191,13 @@ class Processor(ConsumerProducer):
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-k', '--token',
|
'-k', '--token',
|
||||||
|
default=default_token,
|
||||||
help=f'LLM model token'
|
help=f'LLM model token'
|
||||||
)
|
)
|
||||||
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-m', '--model',
|
'-m', '--model',
|
||||||
|
default=default_model,
|
||||||
help=f'LLM model'
|
help=f'LLM model'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue