Update LLMs to LlmService API (#353)

This commit is contained in:
cybermaggedon 2025-04-25 19:57:42 +01:00 committed by GitHub
parent 099018e103
commit 5af7909122
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
13 changed files with 297 additions and 969 deletions

View file

@ -6,22 +6,14 @@ Input is prompt, output is response. Mistral is default.
import boto3
import json
from prometheus_client import Histogram
import os
import enum
from .... schema import TextCompletionRequest, TextCompletionResponse, Error
from .... schema import text_completion_request_queue
from .... schema import text_completion_response_queue
from .... log_level import LogLevel
from .... base import ConsumerProducer
from .... exceptions import TooManyRequests
from .... base import LlmService, LlmResult
module = "text-completion"
default_ident = "text-completion"
default_input_queue = text_completion_request_queue
default_output_queue = text_completion_response_queue
default_subscriber = module
default_model = 'mistral.mistral-large-2407-v1:0'
default_temperature = 0.0
default_max_output = 2048
@ -149,16 +141,12 @@ class Cohere(ModelHandler):
Default=Mistral
class Processor(ConsumerProducer):
class Processor(LlmService):
def __init__(self, **params):
print(params)
input_queue = params.get("input_queue", default_input_queue)
output_queue = params.get("output_queue", default_output_queue)
subscriber = params.get("subscriber", default_subscriber)
model = params.get("model", default_model)
temperature = params.get("temperature", default_temperature)
max_output = params.get("max_output", default_max_output)
@ -185,30 +173,12 @@ class Processor(ConsumerProducer):
super(Processor, self).__init__(
**params | {
"input_queue": input_queue,
"output_queue": output_queue,
"subscriber": subscriber,
"input_schema": TextCompletionRequest,
"output_schema": TextCompletionResponse,
"model": model,
"temperature": temperature,
"max_output": max_output,
}
)
if not hasattr(__class__, "text_completion_metric"):
__class__.text_completion_metric = Histogram(
'text_completion_duration',
'Text completion duration (seconds)',
buckets=[
0.25, 0.5, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0,
8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0,
30.0, 35.0, 40.0, 45.0, 50.0, 60.0, 80.0, 100.0,
120.0
]
)
self.model = model
self.temperature = temperature
self.max_output = max_output
@ -257,30 +227,21 @@ class Processor(ConsumerProducer):
return Default
async def handle(self, msg):
v = msg.value()
# Sender-produced ID
id = msg.properties()["id"]
print(f"Handling prompt {id}...", flush=True)
async def generate_content(self, system, prompt):
try:
promptbody = self.variant.encode_request(v.system, v.prompt)
promptbody = self.variant.encode_request(system, prompt)
accept = 'application/json'
contentType = 'application/json'
with __class__.text_completion_metric.time():
response = self.bedrock.invoke_model(
body=promptbody,
modelId=self.model,
accept=accept,
contentType=contentType
)
response = self.bedrock.invoke_model(
body=promptbody,
modelId=self.model,
accept=accept,
contentType=contentType
)
# Response structure decode
outputtext = self.variant.decode_response(response)
@ -293,18 +254,14 @@ class Processor(ConsumerProducer):
print(f"Input Tokens: {inputtokens}", flush=True)
print(f"Output Tokens: {outputtokens}", flush=True)
print("Send response...", flush=True)
r = TextCompletionResponse(
error=None,
response=outputtext,
in_token=inputtokens,
out_token=outputtokens,
model=str(self.model),
resp = LlmResult(
text = outputtext,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
)
await self.send(r, properties={"id": id})
print("Done.", flush=True)
return resp
except self.bedrock.exceptions.ThrottlingException as e:
@ -319,31 +276,12 @@ class Processor(ConsumerProducer):
print(type(e))
print(f"Exception: {e}")
print("Send error response...", flush=True)
r = TextCompletionResponse(
error=Error(
type = "llm-error",
message = str(e),
),
response=None,
in_token=None,
out_token=None,
model=None,
)
await self.send(r, properties={"id": id})
self.consumer.acknowledge(msg)
raise e
@staticmethod
def add_args(parser):
ConsumerProducer.add_args(
parser, default_input_queue, default_subscriber,
default_output_queue,
)
LlmService.add_args(parser)
parser.add_argument(
'-m', '--model',
@ -391,5 +329,4 @@ class Processor(ConsumerProducer):
def run():
Processor.launch(module, __doc__)
Processor.launch(default_ident, __doc__)