mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-05-12 08:42:37 +02:00
Use max_completion_tokens for OpenAI and Azure OpenAI providers
The OpenAI API deprecated max_tokens in favor of max_completion_tokens for chat completions. Newer models (gpt-4o, o1, o3) reject the old parameter with a 400 error.
This commit is contained in:
parent
a634520509
commit
4aa540c622
2 changed files with 4 additions and 4 deletions
|
|
@ -90,7 +90,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
max_completion_tokens=self.max_output,
|
||||
top_p=1,
|
||||
)
|
||||
|
||||
|
|
@ -159,7 +159,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
max_completion_tokens=self.max_output,
|
||||
top_p=1,
|
||||
stream=True,
|
||||
stream_options={"include_usage": True}
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
max_completion_tokens=self.max_output,
|
||||
)
|
||||
|
||||
inputtokens = resp.usage.prompt_tokens
|
||||
|
|
@ -152,7 +152,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
max_completion_tokens=self.max_output,
|
||||
stream=True,
|
||||
stream_options={"include_usage": True}
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue