mirror of
https://github.com/trustgraph-ai/trustgraph.git
synced 2026-04-25 00:16:23 +02:00
Fix OpenAI compatibility issues for newer models and Azure config (#727)
Use max_completion_tokens for OpenAI and Azure OpenAI providers: The OpenAI API deprecated max_tokens in favor of max_completion_tokens for chat completions. Newer models (gpt-4o, o1, o3) reject the old parameter with a 400 error. AZURE_API_VERSION env var now overrides the default API version: (falls back to 2024-12-01-preview). Update tests to test for expected structures
This commit is contained in:
parent
a634520509
commit
20204d87c3
6 changed files with 14 additions and 14 deletions
|
|
@ -93,7 +93,7 @@ class TestTextCompletionIntegration:
|
|||
|
||||
assert call_args.kwargs['model'] == "gpt-3.5-turbo"
|
||||
assert call_args.kwargs['temperature'] == 0.7
|
||||
assert call_args.kwargs['max_tokens'] == 1024
|
||||
assert call_args.kwargs['max_completion_tokens'] == 1024
|
||||
assert len(call_args.kwargs['messages']) == 1
|
||||
assert call_args.kwargs['messages'][0]['role'] == "user"
|
||||
assert "You are a helpful assistant." in call_args.kwargs['messages'][0]['content'][0]['text']
|
||||
|
|
@ -134,7 +134,7 @@ class TestTextCompletionIntegration:
|
|||
call_args = mock_openai_client.chat.completions.create.call_args
|
||||
assert call_args.kwargs['model'] == config['model']
|
||||
assert call_args.kwargs['temperature'] == config['temperature']
|
||||
assert call_args.kwargs['max_tokens'] == config['max_output']
|
||||
assert call_args.kwargs['max_completion_tokens'] == config['max_output']
|
||||
|
||||
# Reset mock for next iteration
|
||||
mock_openai_client.reset_mock()
|
||||
|
|
@ -286,7 +286,7 @@ class TestTextCompletionIntegration:
|
|||
# were removed in #561 as unnecessary parameters
|
||||
assert 'model' in call_args.kwargs
|
||||
assert 'temperature' in call_args.kwargs
|
||||
assert 'max_tokens' in call_args.kwargs
|
||||
assert 'max_completion_tokens' in call_args.kwargs
|
||||
|
||||
# Verify result structure
|
||||
assert hasattr(result, 'text')
|
||||
|
|
@ -362,7 +362,7 @@ class TestTextCompletionIntegration:
|
|||
call_args = mock_openai_client.chat.completions.create.call_args
|
||||
assert call_args.kwargs['model'] == "gpt-4"
|
||||
assert call_args.kwargs['temperature'] == 0.8
|
||||
assert call_args.kwargs['max_tokens'] == 2048
|
||||
assert call_args.kwargs['max_completion_tokens'] == 2048
|
||||
# Note: top_p, frequency_penalty, and presence_penalty
|
||||
# were removed in #561 as unnecessary parameters
|
||||
|
||||
|
|
|
|||
|
|
@ -201,7 +201,7 @@ class TestTextCompletionStreaming:
|
|||
call_args = mock_streaming_openai_client.chat.completions.create.call_args
|
||||
assert call_args.kwargs['model'] == "gpt-4"
|
||||
assert call_args.kwargs['temperature'] == 0.5
|
||||
assert call_args.kwargs['max_tokens'] == 2048
|
||||
assert call_args.kwargs['max_completion_tokens'] == 2048
|
||||
assert call_args.kwargs['stream'] is True
|
||||
|
||||
# Verify chunks have correct model
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ class TestAzureOpenAIProcessorSimple(IsolatedAsyncioTestCase):
|
|||
}]
|
||||
}],
|
||||
temperature=0.0,
|
||||
max_tokens=4192,
|
||||
max_completion_tokens=4192,
|
||||
top_p=1
|
||||
)
|
||||
|
||||
|
|
@ -399,7 +399,7 @@ class TestAzureOpenAIProcessorSimple(IsolatedAsyncioTestCase):
|
|||
# Verify other parameters
|
||||
assert call_args[1]['model'] == 'gpt-4'
|
||||
assert call_args[1]['temperature'] == 0.5
|
||||
assert call_args[1]['max_tokens'] == 1024
|
||||
assert call_args[1]['max_completion_tokens'] == 1024
|
||||
assert call_args[1]['top_p'] == 1
|
||||
|
||||
@patch('trustgraph.model.text_completion.azure_openai.llm.AzureOpenAI')
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ class TestOpenAIProcessorSimple(IsolatedAsyncioTestCase):
|
|||
}]
|
||||
}],
|
||||
temperature=0.0,
|
||||
max_tokens=4096
|
||||
max_completion_tokens=4096
|
||||
)
|
||||
|
||||
@patch('trustgraph.model.text_completion.openai.llm.OpenAI')
|
||||
|
|
@ -380,7 +380,7 @@ class TestOpenAIProcessorSimple(IsolatedAsyncioTestCase):
|
|||
# Verify other parameters
|
||||
assert call_args[1]['model'] == 'gpt-3.5-turbo'
|
||||
assert call_args[1]['temperature'] == 0.5
|
||||
assert call_args[1]['max_tokens'] == 1024
|
||||
assert call_args[1]['max_completion_tokens'] == 1024
|
||||
|
||||
|
||||
@patch('trustgraph.model.text_completion.openai.llm.OpenAI')
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ default_ident = "text-completion"
|
|||
|
||||
default_temperature = 0.0
|
||||
default_max_output = 4192
|
||||
default_api = "2024-12-01-preview"
|
||||
default_api = os.getenv("AZURE_API_VERSION", "2024-12-01-preview")
|
||||
default_endpoint = os.getenv("AZURE_ENDPOINT", None)
|
||||
default_token = os.getenv("AZURE_TOKEN", None)
|
||||
default_model = os.getenv("AZURE_MODEL", None)
|
||||
|
|
@ -90,7 +90,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
max_completion_tokens=self.max_output,
|
||||
top_p=1,
|
||||
)
|
||||
|
||||
|
|
@ -159,7 +159,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
max_completion_tokens=self.max_output,
|
||||
top_p=1,
|
||||
stream=True,
|
||||
stream_options={"include_usage": True}
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
max_completion_tokens=self.max_output,
|
||||
)
|
||||
|
||||
inputtokens = resp.usage.prompt_tokens
|
||||
|
|
@ -152,7 +152,7 @@ class Processor(LlmService):
|
|||
}
|
||||
],
|
||||
temperature=effective_temperature,
|
||||
max_tokens=self.max_output,
|
||||
max_completion_tokens=self.max_output,
|
||||
stream=True,
|
||||
stream_options={"include_usage": True}
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue