diff --git a/.github/workflows/pull-request.yaml b/.github/workflows/pull-request.yaml index 9d0a11e4..28b21772 100644 --- a/.github/workflows/pull-request.yaml +++ b/.github/workflows/pull-request.yaml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest container: - image: python:3.12 + image: python:3.13 steps: - name: Checkout diff --git a/tests/unit/test_text_completion/test_openai_processor.py b/tests/unit/test_text_completion/test_openai_processor.py index a9a43b37..352af062 100644 --- a/tests/unit/test_text_completion/test_openai_processor.py +++ b/tests/unit/test_text_completion/test_openai_processor.py @@ -102,11 +102,7 @@ class TestOpenAIProcessorSimple(IsolatedAsyncioTestCase): }] }], temperature=0.0, - max_tokens=4096, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - response_format={"type": "text"} + max_tokens=4096 ) @patch('trustgraph.model.text_completion.openai.llm.OpenAI') @@ -385,10 +381,6 @@ class TestOpenAIProcessorSimple(IsolatedAsyncioTestCase): assert call_args[1]['model'] == 'gpt-3.5-turbo' assert call_args[1]['temperature'] == 0.5 assert call_args[1]['max_tokens'] == 1024 - assert call_args[1]['top_p'] == 1 - assert call_args[1]['frequency_penalty'] == 0 - assert call_args[1]['presence_penalty'] == 0 - assert call_args[1]['response_format'] == {"type": "text"} @patch('trustgraph.model.text_completion.openai.llm.OpenAI') diff --git a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py index 74ed2353..d2698589 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py @@ -87,12 +87,6 @@ class Processor(LlmService): ], temperature=effective_temperature, max_tokens=self.max_output, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - response_format={ - "type": "text" - } ) inputtokens = resp.usage.prompt_tokens