mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-27 01:16:22 +02:00
release/v1.4 -> master (#548)
This commit is contained in:
parent
3ec2cd54f9
commit
2bd68ed7f4
94 changed files with 8571 additions and 1740 deletions
|
|
@ -32,7 +32,7 @@ class Processor(LlmService):
|
|||
token = params.get("token", default_token)
|
||||
temperature = params.get("temperature", default_temperature)
|
||||
max_output = params.get("max_output", default_max_output)
|
||||
model = default_model
|
||||
model = params.get("model", default_model)
|
||||
|
||||
if endpoint is None:
|
||||
raise RuntimeError("Azure endpoint not specified")
|
||||
|
|
@ -53,9 +53,11 @@ class Processor(LlmService):
|
|||
self.token = token
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
|
||||
def build_prompt(self, system, content):
|
||||
def build_prompt(self, system, content, temperature=None):
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
data = {
|
||||
"messages": [
|
||||
|
|
@ -67,7 +69,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
"max_tokens": self.max_output,
|
||||
"temperature": self.temperature,
|
||||
"temperature": effective_temperature,
|
||||
"top_p": 1
|
||||
}
|
||||
|
||||
|
|
@ -100,13 +102,22 @@ class Processor(LlmService):
|
|||
|
||||
return result
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
try:
|
||||
|
||||
prompt = self.build_prompt(
|
||||
system,
|
||||
prompt
|
||||
prompt,
|
||||
effective_temperature
|
||||
)
|
||||
|
||||
response = self.call_llm(prompt)
|
||||
|
|
@ -125,7 +136,7 @@ class Processor(LlmService):
|
|||
text = resp,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ class Processor(LlmService):
|
|||
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
|
||||
self.openai = AzureOpenAI(
|
||||
api_key=token,
|
||||
|
|
@ -62,14 +62,22 @@ class Processor(LlmService):
|
|||
azure_endpoint = endpoint,
|
||||
)
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
prompt = system + "\n\n" + prompt
|
||||
|
||||
try:
|
||||
|
||||
resp = self.openai.chat.completions.create(
|
||||
model=self.model,
|
||||
model=model_name,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
|
|
@ -81,7 +89,7 @@ class Processor(LlmService):
|
|||
]
|
||||
}
|
||||
],
|
||||
temperature=self.temperature,
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
top_p=1,
|
||||
)
|
||||
|
|
@ -97,7 +105,7 @@ class Processor(LlmService):
|
|||
text = resp.choices[0].message.content,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return r
|
||||
|
|
|
|||
|
|
@ -41,21 +41,29 @@ class Processor(LlmService):
|
|||
}
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
self.claude = anthropic.Anthropic(api_key=api_key)
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
|
||||
logger.info("Claude LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
try:
|
||||
|
||||
response = message = self.claude.messages.create(
|
||||
model=self.model,
|
||||
model=model_name,
|
||||
max_tokens=self.max_output,
|
||||
temperature=self.temperature,
|
||||
temperature=effective_temperature,
|
||||
system = system,
|
||||
messages=[
|
||||
{
|
||||
|
|
@ -81,7 +89,7 @@ class Processor(LlmService):
|
|||
text = resp,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
|
|
@ -39,21 +39,29 @@ class Processor(LlmService):
|
|||
}
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
self.temperature = temperature
|
||||
self.cohere = cohere.Client(api_key=api_key)
|
||||
|
||||
logger.info("Cohere LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
try:
|
||||
|
||||
output = self.cohere.chat(
|
||||
model=self.model,
|
||||
output = self.cohere.chat(
|
||||
model=model_name,
|
||||
message=prompt,
|
||||
preamble = system,
|
||||
temperature=self.temperature,
|
||||
temperature=effective_temperature,
|
||||
chat_history=[],
|
||||
prompt_truncation='auto',
|
||||
connectors=[]
|
||||
|
|
@ -71,7 +79,7 @@ class Processor(LlmService):
|
|||
text = resp,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
|
|
@ -53,10 +53,13 @@ class Processor(LlmService):
|
|||
)
|
||||
|
||||
self.client = genai.Client(api_key=api_key)
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
|
||||
# Cache for generation configs per model
|
||||
self.generation_configs = {}
|
||||
|
||||
block_level = HarmBlockThreshold.BLOCK_ONLY_HIGH
|
||||
|
||||
self.safety_settings = [
|
||||
|
|
@ -83,22 +86,45 @@ class Processor(LlmService):
|
|||
|
||||
logger.info("GoogleAIStudio LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
def _get_or_create_config(self, model_name, temperature=None):
|
||||
"""Get or create generation config with dynamic temperature"""
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
generation_config = types.GenerateContentConfig(
|
||||
temperature = self.temperature,
|
||||
top_p = 1,
|
||||
top_k = 40,
|
||||
max_output_tokens = self.max_output,
|
||||
response_mime_type = "text/plain",
|
||||
system_instruction = system,
|
||||
safety_settings = self.safety_settings,
|
||||
)
|
||||
# Create cache key that includes temperature to avoid conflicts
|
||||
cache_key = f"{model_name}:{effective_temperature}"
|
||||
|
||||
if cache_key not in self.generation_configs:
|
||||
logger.info(f"Creating generation config for '{model_name}' with temperature {effective_temperature}")
|
||||
self.generation_configs[cache_key] = types.GenerateContentConfig(
|
||||
temperature = effective_temperature,
|
||||
top_p = 1,
|
||||
top_k = 40,
|
||||
max_output_tokens = self.max_output,
|
||||
response_mime_type = "text/plain",
|
||||
safety_settings = self.safety_settings,
|
||||
)
|
||||
|
||||
return self.generation_configs[cache_key]
|
||||
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
generation_config = self._get_or_create_config(model_name, effective_temperature)
|
||||
# Set system instruction per request (can't be cached)
|
||||
generation_config.system_instruction = system
|
||||
|
||||
try:
|
||||
|
||||
response = self.client.models.generate_content(
|
||||
model=self.model,
|
||||
model=model_name,
|
||||
config=generation_config,
|
||||
contents=prompt,
|
||||
)
|
||||
|
|
@ -114,7 +140,7 @@ class Processor(LlmService):
|
|||
text = resp,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class Processor(LlmService):
|
|||
}
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
self.llamafile=llamafile
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
|
|
@ -50,25 +50,33 @@ class Processor(LlmService):
|
|||
|
||||
logger.info("Llamafile LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
prompt = system + "\n\n" + prompt
|
||||
|
||||
try:
|
||||
|
||||
resp = self.openai.chat.completions.create(
|
||||
model=self.model,
|
||||
model=model_name,
|
||||
messages=[
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
#temperature=self.temperature,
|
||||
#max_tokens=self.max_output,
|
||||
#top_p=1,
|
||||
#frequency_penalty=0,
|
||||
#presence_penalty=0,
|
||||
#response_format={
|
||||
# "type": "text"
|
||||
#}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0,
|
||||
response_format={
|
||||
"type": "text"
|
||||
}
|
||||
)
|
||||
|
||||
inputtokens = resp.usage.prompt_tokens
|
||||
|
|
@ -82,7 +90,7 @@ class Processor(LlmService):
|
|||
text = resp.choices[0].message.content,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = "llama.cpp",
|
||||
model = model_name,
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class Processor(LlmService):
|
|||
}
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
self.url = url + "v1/"
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
|
|
@ -50,7 +50,15 @@ class Processor(LlmService):
|
|||
|
||||
logger.info("LMStudio LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
prompt = system + "\n\n" + prompt
|
||||
|
||||
|
|
@ -59,18 +67,18 @@ class Processor(LlmService):
|
|||
logger.debug(f"Prompt: {prompt}")
|
||||
|
||||
resp = self.openai.chat.completions.create(
|
||||
model=self.model,
|
||||
model=model_name,
|
||||
messages=[
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
#temperature=self.temperature,
|
||||
#max_tokens=self.max_output,
|
||||
#top_p=1,
|
||||
#frequency_penalty=0,
|
||||
#presence_penalty=0,
|
||||
#response_format={
|
||||
# "type": "text"
|
||||
#}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0,
|
||||
response_format={
|
||||
"type": "text"
|
||||
}
|
||||
)
|
||||
|
||||
logger.debug(f"Full response: {resp}")
|
||||
|
|
@ -86,7 +94,7 @@ class Processor(LlmService):
|
|||
text = resp.choices[0].message.content,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
|
|
@ -41,21 +41,29 @@ class Processor(LlmService):
|
|||
}
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
self.mistral = Mistral(api_key=api_key)
|
||||
|
||||
logger.info("Mistral LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
prompt = system + "\n\n" + prompt
|
||||
|
||||
try:
|
||||
|
||||
resp = self.mistral.chat.complete(
|
||||
model=self.model,
|
||||
model=model_name,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
|
|
@ -67,7 +75,7 @@ class Processor(LlmService):
|
|||
]
|
||||
}
|
||||
],
|
||||
temperature=self.temperature,
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
|
|
@ -87,7 +95,7 @@ class Processor(LlmService):
|
|||
text = resp.choices[0].message.content,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ from .... base import LlmService, LlmResult
|
|||
default_ident = "text-completion"
|
||||
|
||||
default_model = 'gemma2:9b'
|
||||
default_temperature = 0.0
|
||||
default_ollama = os.getenv("OLLAMA_HOST", 'http://localhost:11434')
|
||||
|
||||
class Processor(LlmService):
|
||||
|
|
@ -24,25 +25,36 @@ class Processor(LlmService):
|
|||
def __init__(self, **params):
|
||||
|
||||
model = params.get("model", default_model)
|
||||
temperature = params.get("temperature", default_temperature)
|
||||
ollama = params.get("ollama", default_ollama)
|
||||
|
||||
super(Processor, self).__init__(
|
||||
**params | {
|
||||
"model": model,
|
||||
"temperature": temperature,
|
||||
"ollama": ollama,
|
||||
}
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
self.temperature = temperature
|
||||
self.llm = Client(host=ollama)
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
prompt = system + "\n\n" + prompt
|
||||
|
||||
try:
|
||||
|
||||
response = self.llm.generate(self.model, prompt)
|
||||
response = self.llm.generate(model_name, prompt, options={'temperature': effective_temperature})
|
||||
|
||||
response_text = response['response']
|
||||
logger.debug("Sending response...")
|
||||
|
|
@ -55,7 +67,7 @@ class Processor(LlmService):
|
|||
text = response_text,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
@ -84,6 +96,13 @@ class Processor(LlmService):
|
|||
help=f'ollama (default: {default_ollama})'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'-t', '--temperature',
|
||||
type=float,
|
||||
default=default_temperature,
|
||||
help=f'LLM temperature parameter (default: {default_temperature})'
|
||||
)
|
||||
|
||||
def run():
|
||||
|
||||
Processor.launch(default_ident, __doc__)
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ class Processor(LlmService):
|
|||
}
|
||||
)
|
||||
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
|
||||
|
|
@ -58,14 +58,22 @@ class Processor(LlmService):
|
|||
|
||||
logger.info("OpenAI LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
prompt = system + "\n\n" + prompt
|
||||
|
||||
try:
|
||||
|
||||
resp = self.openai.chat.completions.create(
|
||||
model=self.model,
|
||||
model=model_name,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
|
|
@ -77,7 +85,7 @@ class Processor(LlmService):
|
|||
]
|
||||
}
|
||||
],
|
||||
temperature=self.temperature,
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
|
|
@ -97,7 +105,7 @@ class Processor(LlmService):
|
|||
text = resp.choices[0].message.content,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model
|
||||
model = model_name
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
|
|
@ -30,32 +30,43 @@ class Processor(LlmService):
|
|||
base_url = params.get("url", default_base_url)
|
||||
temperature = params.get("temperature", default_temperature)
|
||||
max_output = params.get("max_output", default_max_output)
|
||||
model = params.get("model", "tgi")
|
||||
|
||||
super(Processor, self).__init__(
|
||||
**params | {
|
||||
"temperature": temperature,
|
||||
"max_output": max_output,
|
||||
"url": base_url,
|
||||
"model": model,
|
||||
}
|
||||
)
|
||||
|
||||
self.base_url = base_url
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
self.default_model = model
|
||||
|
||||
self.session = aiohttp.ClientSession()
|
||||
|
||||
logger.info(f"Using TGI service at {base_url}")
|
||||
logger.info("TGI LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
request = {
|
||||
"model": "tgi",
|
||||
"model": model_name,
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
|
|
@ -67,7 +78,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
"max_tokens": self.max_output,
|
||||
"temperature": self.temperature,
|
||||
"temperature": effective_temperature,
|
||||
}
|
||||
|
||||
try:
|
||||
|
|
@ -96,7 +107,7 @@ class Processor(LlmService):
|
|||
text = ans,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = "tgi",
|
||||
model = model_name,
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
|
|
@ -45,24 +45,32 @@ class Processor(LlmService):
|
|||
self.base_url = base_url
|
||||
self.temperature = temperature
|
||||
self.max_output = max_output
|
||||
self.model = model
|
||||
self.default_model = model
|
||||
|
||||
self.session = aiohttp.ClientSession()
|
||||
|
||||
logger.info(f"Using vLLM service at {base_url}")
|
||||
logger.info("vLLM LLM service initialized")
|
||||
|
||||
async def generate_content(self, system, prompt):
|
||||
async def generate_content(self, system, prompt, model=None, temperature=None):
|
||||
|
||||
# Use provided model or fall back to default
|
||||
model_name = model or self.default_model
|
||||
# Use provided temperature or fall back to default
|
||||
effective_temperature = temperature if temperature is not None else self.temperature
|
||||
|
||||
logger.debug(f"Using model: {model_name}")
|
||||
logger.debug(f"Using temperature: {effective_temperature}")
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
request = {
|
||||
"model": self.model,
|
||||
"model": model_name,
|
||||
"prompt": system + "\n\n" + prompt,
|
||||
"max_tokens": self.max_output,
|
||||
"temperature": self.temperature,
|
||||
"temperature": effective_temperature,
|
||||
}
|
||||
|
||||
try:
|
||||
|
|
@ -91,7 +99,7 @@ class Processor(LlmService):
|
|||
text = ans,
|
||||
in_token = inputtokens,
|
||||
out_token = outputtokens,
|
||||
model = self.model,
|
||||
model = model_name,
|
||||
)
|
||||
|
||||
return resp
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue