diff --git a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py index a52f400e..88872e8d 100755 --- a/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py +++ b/trustgraph-flow/trustgraph/model/text_completion/openai/llm.py @@ -16,7 +16,10 @@ default_model = 'gpt-3.5-turbo' default_temperature = 0.0 default_max_output = 4096 default_api_key = os.getenv("OPENAI_TOKEN") -default_base_url = os.getenv("OPENAI_BASE_URL", None) +default_base_url = os.getenv("OPENAI_BASE_URL") + +if default_base_url is None or default_base_url == "": + default_base_url = "https://api.openai.com/v1" class Processor(LlmService): @@ -24,7 +27,7 @@ class Processor(LlmService): model = params.get("model", default_model) api_key = params.get("api_key", default_api_key) - base_url = params.get("base_url", default_base_url) + base_url = params.get("url", default_base_url) temperature = params.get("temperature", default_temperature) max_output = params.get("max_output", default_max_output) @@ -43,7 +46,11 @@ class Processor(LlmService): self.model = model self.temperature = temperature self.max_output = max_output - self.openai = OpenAI(base_url=base_url, api_key=api_key) + + if base_url: + self.openai = OpenAI(base_url=base_url, api_key=api_key) + else: + self.openai = OpenAI(api_key=api_key) print("Initialised", flush=True) @@ -102,7 +109,7 @@ class Processor(LlmService): # Apart from rate limits, treat all exceptions as unrecoverable - print(f"Exception: {e}") + print(f"Exception: {type(e)} {e}") raise e @staticmethod