LLM dynamic settings, using the llm-model and llm-rag-model paramters to a flow (#531)

* Ported LLMs to dynamic models
This commit is contained in:
cybermaggedon 2025-09-24 16:36:25 +01:00 committed by GitHub
parent 9a34ab1b93
commit 7a3bfad826
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
15 changed files with 266 additions and 143 deletions

View file

@ -41,19 +41,24 @@ class Processor(LlmService):
}
)
self.model = model
self.default_model = model
self.claude = anthropic.Anthropic(api_key=api_key)
self.temperature = temperature
self.max_output = max_output
logger.info("Claude LLM service initialized")
async def generate_content(self, system, prompt):
async def generate_content(self, system, prompt, model=None):
# Use provided model or fall back to default
model_name = model or self.default_model
logger.debug(f"Using model: {model_name}")
try:
response = message = self.claude.messages.create(
model=self.model,
model=model_name,
max_tokens=self.max_output,
temperature=self.temperature,
system = system,
@ -81,7 +86,7 @@ class Processor(LlmService):
text = resp,
in_token = inputtokens,
out_token = outputtokens,
model = self.model
model = model_name
)
return resp