diff --git a/metagpt/provider/base_llm.py b/metagpt/provider/base_llm.py index 80e51f8ac..b3aa1b340 100644 --- a/metagpt/provider/base_llm.py +++ b/metagpt/provider/base_llm.py @@ -215,7 +215,10 @@ class BaseLLM(ABC): def get_choice_text(self, rsp: dict) -> str: """Required to provide the first text of choice""" - return rsp.get("choices")[0]["message"]["content"] + message = rsp.get("choices")[0]["message"] + if "reasoning_content" in message: + self.reasoning_content = message["reasoning_content"] + return message["content"] def get_choice_delta_text(self, rsp: dict) -> str: """Required to provide the first text of stream choice""" diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py index bd9c02231..5e718f45f 100644 --- a/metagpt/provider/openai_api.py +++ b/metagpt/provider/openai_api.py @@ -91,8 +91,12 @@ class OpenAILLM(BaseLLM): ) usage = None collected_messages = [] + collected_reasoning_messages = [] has_finished = False async for chunk in response: + if hasattr(chunk.choices[0].delta, "reasoning_content"): + collected_reasoning_messages.append(chunk.choices[0].delta.reasoning_content) # for deepseek + continue chunk_message = chunk.choices[0].delta.content or "" if chunk.choices else "" # extract the message finish_reason = ( chunk.choices[0].finish_reason if chunk.choices and hasattr(chunk.choices[0], "finish_reason") else None @@ -118,6 +122,8 @@ class OpenAILLM(BaseLLM): log_llm_stream("\n") full_reply_content = "".join(collected_messages) + if collected_reasoning_messages: + self.reasoning_content = "".join(collected_reasoning_messages) if not usage: # Some services do not provide the usage attribute, such as OpenAI or OpenLLM usage = self._calc_usage(messages, full_reply_content)