From 4aa540c62295297b0ef614e3565eea1e18c3dc2b Mon Sep 17 00:00:00 2001 From: Cyber MacGeddon Date: Sat, 28 Mar 2026 11:01:55 +0000 Subject: [PATCH] Use max_completion_tokens for OpenAI and Azure OpenAI providers The OpenAI API deprecated max_tokens in favor of max_completion_tokens for chat completions. Newer models (gpt-4o, o1, o3) reject the old parameter with a 400 error. --- .../trustgraph/model/text_completion/azure_openai/llm.py | 4 ++-- .../trustgraph/model/text_completion/openai/llm.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/trustgraph-flow/trustgraph/model/text_completion/azure_openai/llm.py b/trustgraph-flow/trustgraph/model/text_completion/azure_openai/llm.py index 4ab0b302..9d803c90 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/azure_openai/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/azure_openai/llm.py @@ -90,7 +90,7 @@ class Processor(LlmService): } ], temperature=effective_temperature, - max_tokens=self.max_output, + max_completion_tokens=self.max_output, top_p=1, ) @@ -159,7 +159,7 @@ class Processor(LlmService): } ], temperature=effective_temperature, - max_tokens=self.max_output, + max_completion_tokens=self.max_output, top_p=1, stream=True, stream_options={"include_usage": True} diff --git a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py index d65e27bf..cdc8602a 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py @@ -86,7 +86,7 @@ class Processor(LlmService): } ], temperature=effective_temperature, - max_tokens=self.max_output, + max_completion_tokens=self.max_output, ) inputtokens = resp.usage.prompt_tokens @@ -152,7 +152,7 @@ class Processor(LlmService): } ], temperature=effective_temperature, - max_tokens=self.max_output, + max_completion_tokens=self.max_output, stream=True, stream_options={"include_usage": True} )