From b113aa246f704326b87e1437dc8e2a41ef0d1ec7 Mon Sep 17 00:00:00 2001 From: shenchucheng Date: Mon, 25 Dec 2023 17:22:30 +0800 Subject: [PATCH] update log_llm_stream in log_llm_stream.py/ollama_api.py --- metagpt/provider/google_gemini_api.py | 2 +- metagpt/provider/ollama_api.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metagpt/provider/google_gemini_api.py b/metagpt/provider/google_gemini_api.py index 63a2ff687..825b0bfe3 100644 --- a/metagpt/provider/google_gemini_api.py +++ b/metagpt/provider/google_gemini_api.py @@ -119,7 +119,7 @@ class GeminiGPTAPI(BaseGPTAPI): collected_content = [] async for chunk in resp: content = chunk.text - log_llm_stream(content, end="") + log_llm_stream(content) collected_content.append(content) full_content = "".join(collected_content) diff --git a/metagpt/provider/ollama_api.py b/metagpt/provider/ollama_api.py index d668d3af1..e913f3d0d 100644 --- a/metagpt/provider/ollama_api.py +++ b/metagpt/provider/ollama_api.py @@ -127,7 +127,7 @@ class OllamaGPTAPI(BaseGPTAPI): if not chunk.get("done", False): content = self.get_choice_text(chunk) collected_content.append(content) - log_llm_stream(content, end="") + log_llm_stream(content) else: # stream finished usage = self.get_usage(chunk)