LLM dynamic settings, using the llm-model and llm-rag-model paramters to a flow (#531)

* Ported LLMs to dynamic models
This commit is contained in:
cybermaggedon 2025-09-24 16:36:25 +01:00 committed by GitHub
parent 9a34ab1b93
commit 7a3bfad826
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 266 additions and 143 deletions

View file

@ -41,21 +41,26 @@ class Processor(LlmService):
}
)
self.model = model
self.default_model = model
self.temperature = temperature
self.max_output = max_output
self.mistral = Mistral(api_key=api_key)
logger.info("Mistral LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
prompt = system + "\n\n" + prompt
try:
resp = self.mistral.chat.complete(
model=self.model,
model=model_name,
messages=[
{
"role": "user",
@ -87,7 +92,7 @@ class Processor(LlmService):
text = resp.choices[0].message.content,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp