diff --git a/trustgraph-flow/trustgraph/model/text_completion/azure_openai/llm.py b/trustgraph-flow/trustgraph/model/text_completion/azure_openai/llm.py index 4ab0b302..9d803c90 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/azure_openai/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/azure_openai/llm.py @@ -90,7 +90,7 @@ class Processor(LlmService): } ], temperature=effective_temperature, - max_tokens=self.max_output, + max_completion_tokens=self.max_output, top_p=1, ) @@ -159,7 +159,7 @@ class Processor(LlmService): } ], temperature=effective_temperature, - max_tokens=self.max_output, + max_completion_tokens=self.max_output, top_p=1, stream=True, stream_options={"include_usage": True} diff --git a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py index d65e27bf..cdc8602a 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py @@ -86,7 +86,7 @@ class Processor(LlmService): } ], temperature=effective_temperature, - max_tokens=self.max_output, + max_completion_tokens=self.max_output, ) inputtokens = resp.usage.prompt_tokens @@ -152,7 +152,7 @@ class Processor(LlmService): } ], temperature=effective_temperature, - max_tokens=self.max_output, + max_completion_tokens=self.max_output, stream=True, stream_options={"include_usage": True} )