mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-05-09 15:22:38 +02:00
LLM dynamic settings, using the llm-model and llm-rag-model paramters to a flow (#531)
* Ported LLMs to dynamic models
This commit is contained in:
parent
9a34ab1b93
commit
7a3bfad826
15 changed files with 266 additions and 143 deletions
|
|
@ -41,21 +41,26 @@ class Processor(LlmService):
|
|||
}
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
self.mistral = Mistral(api_key=api_key)
|
||||
|
||||
logger.info("Mistral LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
|
||||
prompt = system + "\n\n" + prompt
|
||||
|
||||
try:
|
||||
|
||||
resp = self.mistral.chat.complete(
|
||||
model=self.model,
|
||||
model=model_name,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
|
|
@ -87,7 +92,7 @@ class Processor(LlmService):
|
|||
text = resp.choices[0].message.content,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue