Remove some 'unnecessary' parameters from OpenAI invocation (#561)

* Remove some 'unnecessary' parameters from OpenAI invocation.  The OpenAI
API is getting complicated with the API and SDK changing on OpenAI's end,
but this not getting mapped through to other services which are 'compatible'
with OpenAI.

* Update OpenAI test for this change

* Trying running tests with Python 3.13
This commit is contained in:
cybermaggedon 2025-11-20 17:56:31 +00:00 committed by GitHub
parent 6c85038c75
commit 3580e7a7ae
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 2 additions and 16 deletions

View file

@ -15,7 +15,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: python:3.12 image: python:3.13
steps: steps:
- name: Checkout - name: Checkout

View file

@ -102,11 +102,7 @@ class TestOpenAIProcessorSimple(IsolatedAsyncioTestCase):
}] }]
}], }],
temperature=0.0, temperature=0.0,
max_tokens=4096, max_tokens=4096
top_p=1,
frequency_penalty=0,
presence_penalty=0,
response_format={"type": "text"}
) )
@patch('trustgraph.model.text_completion.openai.llm.OpenAI') @patch('trustgraph.model.text_completion.openai.llm.OpenAI')
@ -385,10 +381,6 @@ class TestOpenAIProcessorSimple(IsolatedAsyncioTestCase):
assert call_args[1]['model'] == 'gpt-3.5-turbo' assert call_args[1]['model'] == 'gpt-3.5-turbo'
assert call_args[1]['temperature'] == 0.5 assert call_args[1]['temperature'] == 0.5
assert call_args[1]['max_tokens'] == 1024 assert call_args[1]['max_tokens'] == 1024
assert call_args[1]['top_p'] == 1
assert call_args[1]['frequency_penalty'] == 0
assert call_args[1]['presence_penalty'] == 0
assert call_args[1]['response_format'] == {"type": "text"}
@patch('trustgraph.model.text_completion.openai.llm.OpenAI') @patch('trustgraph.model.text_completion.openai.llm.OpenAI')

View file

@ -87,12 +87,6 @@ class Processor(LlmService):
], ],
temperature=effective_temperature, temperature=effective_temperature,
max_tokens=self.max_output, max_tokens=self.max_output,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
response_format={
"type": "text"
}
) )
inputtokens = resp.usage.prompt_tokens inputtokens = resp.usage.prompt_tokens