From 3580e7a7ae0e4d4793cbbde839def9f5a0dfd55e Mon Sep 17 00:00:00 2001 From: cybermaggedon Date: Thu, 20 Nov 2025 17:56:31 +0000 Subject: [PATCH] Remove some 'unnecessary' parameters from OpenAI invocation (#561) * Remove some 'unnecessary' parameters from OpenAI invocation. The OpenAI API is getting complicated with the API and SDK changing on OpenAI's end, but this not getting mapped through to other services which are 'compatible' with OpenAI. * Update OpenAI test for this change * Trying running tests with Python 3.13 --- .github/workflows/pull-request.yaml | 2 +- .../unit/test_text_completion/test_openai_processor.py | 10 +--------- .../trustgraph/model/text_completion/openai/llm.py | 6 ------ 3 files changed, 2 insertions(+), 16 deletions(-) diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml index 9d0a11e4..28b21772 100644 --- a/.github/workflows/pull-request.yaml +++ b/.github/workflows/pull-request.yaml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest container: - image: python:3.12 + image: python:3.13 steps: - name: Checkout diff --git a/tests/unit/test_text_completion/test_openai_processor.py b/tests/unit/test_text_completion/test_openai_processor.py index a9a43b37..352af062 100644 --- a/tests/unit/test_text_completion/test_openai_processor.py +++ b/tests/unit/test_text_completion/test_openai_processor.py @@ -102,11 +102,7 @@ class TestOpenAIProcessorSimple(IsolatedAsyncioTestCase): }] }], temperature=0.0, - max_tokens=4096, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - response_format={"type": "text"} + max_tokens=4096 ) @patch('trustgraph.model.text_completion.openai.llm.OpenAI') @@ -385,10 +381,6 @@ class TestOpenAIProcessorSimple(IsolatedAsyncioTestCase): assert call_args[1]['model'] == 'gpt-3.5-turbo' assert call_args[1]['temperature'] == 0.5 assert call_args[1]['max_tokens'] == 1024 - assert call_args[1]['top_p'] == 1 - assert call_args[1]['frequency_penalty'] == 0 - assert call_args[1]['presence_penalty'] == 0 - assert call_args[1]['response_format'] == {"type": "text"} @patch('trustgraph.model.text_completion.openai.llm.OpenAI') diff --git a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py index 74ed2353..d2698589 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py @@ -87,12 +87,6 @@ class Processor(LlmService): ], temperature=effective_temperature, max_tokens=self.max_output, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - response_format={ - "type": "text" - } ) inputtokens = resp.usage.prompt_tokens