LLM dynamic settings, using the llm-model and llm-rag-model paramters to a flow (#531)

* Ported LLMs to dynamic models
This commit is contained in:
cybermaggedon 2025-09-24 16:36:25 +01:00 committed by GitHub
parent 9a34ab1b93
commit 7a3bfad826
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 266 additions and 143 deletions

View file

@ -32,7 +32,7 @@ class Processor(LlmService):
token = params.get("token", default_token)
temperature = params.get("temperature", default_temperature)
max_output = params.get("max_output", default_max_output)
model = default_model
model = params.get("model", default_model)
if endpoint is None:
raise RuntimeError("Azure endpoint not specified")
@ -53,7 +53,7 @@ class Processor(LlmService):
self.token = token
self.temperature = temperature
self.max_output = max_output
self.model = model
self.default_model = model
def build_prompt(self, system, content):
@ -100,7 +100,12 @@ class Processor(LlmService):
return result
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
try:
@ -125,7 +130,7 @@ class Processor(LlmService):
text = resp,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp

View file

@ -54,7 +54,7 @@ class Processor(LlmService):
self.temperature = temperature
self.max_output = max_output
self.model = model
self.default_model = model
self.openai = AzureOpenAI(
api_key=token,
@ -62,14 +62,19 @@ class Processor(LlmService):
azure_endpoint = endpoint,
)
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
prompt = system + "\n\n" + prompt
try:
resp = self.openai.chat.completions.create(
model=self.model,
model=model_name,
messages=[
{
"role": "user",
@ -97,7 +102,7 @@ class Processor(LlmService):
text = resp.choices[0].message.content,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return r

View file

@ -41,19 +41,24 @@ class Processor(LlmService):
}
)
self.model = model
self.default_model = model
self.claude = anthropic.Anthropic(api_key=api_key)
self.temperature = temperature
self.max_output = max_output
logger.info("Claude LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
try:
response = message = self.claude.messages.create(
model=self.model,
model=model_name,
max_tokens=self.max_output,
temperature=self.temperature,
system = system,
@ -81,7 +86,7 @@ class Processor(LlmService):
text = resp,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp

View file

@ -39,18 +39,23 @@ class Processor(LlmService):
}
)
self.model = model
self.default_model = model
self.temperature = temperature
self.cohere = cohere.Client(api_key=api_key)
logger.info("Cohere LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
try:
output = self.cohere.chat(
model=self.model,
output = self.cohere.chat(
model=model_name,
message=prompt,
preamble = system,
temperature=self.temperature,
@ -71,7 +76,7 @@ class Processor(LlmService):
text = resp,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp

View file

@ -53,10 +53,13 @@ class Processor(LlmService):
)
self.client = genai.Client(api_key=api_key)
self.model = model
self.default_model = model
self.temperature = temperature
self.max_output = max_output
# Cache for generation configs per model
self.generation_configs = {}
block_level = HarmBlockThreshold.BLOCK_ONLY_HIGH
self.safety_settings = [
@ -83,22 +86,36 @@ class Processor(LlmService):
logger.info("GoogleAIStudio LLM service initialized")
async def generate_content(self, system, prompt):
def _get_or_create_config(self, model_name):
"""Get cached generation config or create new one"""
if model_name not in self.generation_configs:
logger.info(f"Creating generation config for '{model_name}'")
self.generation_configs[model_name] = types.GenerateContentConfig(
temperature = self.temperature,
top_p = 1,
top_k = 40,
max_output_tokens = self.max_output,
response_mime_type = "text/plain",
safety_settings = self.safety_settings,
)
generation_config = types.GenerateContentConfig(
temperature = self.temperature,
top_p = 1,
top_k = 40,
max_output_tokens = self.max_output,
response_mime_type = "text/plain",
system_instruction = system,
safety_settings = self.safety_settings,
)
return self.generation_configs[model_name]
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
generation_config = self._get_or_create_config(model_name)
# Set system instruction per request (can't be cached)
generation_config.system_instruction = system
try:
response = self.client.models.generate_content(
model=self.model,
model=model_name,
config=generation_config,
contents=prompt,
)
@ -114,7 +131,7 @@ class Processor(LlmService):
text = resp,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp

View file

@ -39,7 +39,7 @@ class Processor(LlmService):
}
)
self.model = model
self.default_model = model
self.llamafile=llamafile
self.temperature = temperature
self.max_output = max_output
@ -50,14 +50,19 @@ class Processor(LlmService):
logger.info("Llamafile LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
prompt = system + "\n\n" + prompt
try:
resp = self.openai.chat.completions.create(
model=self.model,
model=model_name,
messages=[
{"role": "user", "content": prompt}
]
@ -82,7 +87,7 @@ class Processor(LlmService):
text = resp.choices[0].message.content,
in_token = inputtokens,
out_token = outputtokens,
model = "llama.cpp",
model = model_name,
)
return resp

View file

@ -39,7 +39,7 @@ class Processor(LlmService):
}
)
self.model = model
self.default_model = model
self.url = url + "v1/"
self.temperature = temperature
self.max_output = max_output
@ -50,7 +50,12 @@ class Processor(LlmService):
logger.info("LMStudio LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
prompt = system + "\n\n" + prompt
@ -59,7 +64,7 @@ class Processor(LlmService):
logger.debug(f"Prompt: {prompt}")
resp = self.openai.chat.completions.create(
model=self.model,
model=model_name,
messages=[
{"role": "user", "content": prompt}
]
@ -86,7 +91,7 @@ class Processor(LlmService):
text = resp.choices[0].message.content,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp

View file

@ -41,21 +41,26 @@ class Processor(LlmService):
}
)
self.model = model
self.default_model = model
self.temperature = temperature
self.max_output = max_output
self.mistral = Mistral(api_key=api_key)
logger.info("Mistral LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
prompt = system + "\n\n" + prompt
try:
resp = self.mistral.chat.complete(
model=self.model,
model=model_name,
messages=[
{
"role": "user",
@ -87,7 +92,7 @@ class Processor(LlmService):
text = resp.choices[0].message.content,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp

View file

@ -33,16 +33,21 @@ class Processor(LlmService):
}
)
self.model = model
self.default_model = model
self.llm = Client(host=ollama)
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
prompt = system + "\n\n" + prompt
try:
response = self.llm.generate(self.model, prompt)
response = self.llm.generate(model_name, prompt)
response_text = response['response']
logger.debug("Sending response...")
@ -55,7 +60,7 @@ class Processor(LlmService):
text = response_text,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp

View file

@ -47,7 +47,7 @@ class Processor(LlmService):
}
)
self.model = model
self.default_model = model
self.temperature = temperature
self.max_output = max_output
@ -58,14 +58,19 @@ class Processor(LlmService):
logger.info("OpenAI LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
prompt = system + "\n\n" + prompt
try:
resp = self.openai.chat.completions.create(
model=self.model,
model=model_name,
messages=[
{
"role": "user",
@ -97,7 +102,7 @@ class Processor(LlmService):
text = resp.choices[0].message.content,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp

View file

@ -30,32 +30,40 @@ class Processor(LlmService):
base_url = params.get("url", default_base_url)
temperature = params.get("temperature", default_temperature)
max_output = params.get("max_output", default_max_output)
model = params.get("model", "tgi")
super(Processor, self).__init__(
**params | {
"temperature": temperature,
"max_output": max_output,
"url": base_url,
"model": model,
}
)
self.base_url = base_url
self.temperature = temperature
self.max_output = max_output
self.default_model = model
self.session = aiohttp.ClientSession()
logger.info(f"Using TGI service at {base_url}")
logger.info("TGI LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
headers = {
"Content-Type": "application/json",
}
request = {
"model": "tgi",
"model": model_name,
"messages": [
{
"role": "system",
@ -96,7 +104,7 @@ class Processor(LlmService):
text = ans,
in_token = inputtokens,
out_token = outputtokens,
model = "tgi",
model = model_name,
)
return resp

View file

@ -45,21 +45,26 @@ class Processor(LlmService):
self.base_url = base_url
self.temperature = temperature
self.max_output = max_output
self.model = model
self.default_model = model
self.session = aiohttp.ClientSession()
logger.info(f"Using vLLM service at {base_url}")
logger.info("vLLM LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
headers = {
"Content-Type": "application/json",
}
request = {
"model": self.model,
"model": model_name,
"prompt": system + "\n\n" + prompt,
"max_tokens": self.max_output,
"temperature": self.temperature,
@ -91,7 +96,7 @@ class Processor(LlmService):
text = ans,
in_token = inputtokens,
out_token = outputtokens,
model = self.model,
model = model_name,
)
return resp