mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-07 14:52:37 +02:00
Revert "feat(core): Add stream data return and reception"
This reverts commit 7706b88f03.
This commit is contained in:
parent
29fecffa3f
commit
923150b2f3
13 changed files with 3 additions and 211 deletions
|
|
@ -62,9 +62,6 @@ class AnthropicLLM(BaseLLM):
|
|||
elif event_type == "content_block_delta":
|
||||
content = event.delta.text
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
|
||||
collected_content.append(content)
|
||||
elif event_type == "message_delta":
|
||||
usage.output_tokens = event.usage.output_tokens # update final output_tokens
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ from metagpt.logs import logger
|
|||
from metagpt.schema import Message
|
||||
from metagpt.utils.common import log_and_reraise
|
||||
from metagpt.utils.cost_manager import CostManager, Costs
|
||||
from metagpt.utils.stream_pipe import StreamPipe
|
||||
|
||||
|
||||
class BaseLLM(ABC):
|
||||
|
|
@ -43,7 +42,6 @@ class BaseLLM(ABC):
|
|||
cost_manager: Optional[CostManager] = None
|
||||
model: Optional[str] = None # deprecated
|
||||
pricing_plan: Optional[str] = None
|
||||
stream_pipe: Optional[StreamPipe] = None
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self, config: LLMConfig):
|
||||
|
|
|
|||
|
|
@ -221,8 +221,6 @@ class DashScopeLLM(BaseLLM):
|
|||
content = chunk.output.choices[0]["message"]["content"]
|
||||
usage = dict(chunk.usage) # each chunk has usage
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
collected_content.append(content)
|
||||
log_llm_stream("\n")
|
||||
self._update_costs(usage)
|
||||
|
|
|
|||
|
|
@ -149,8 +149,6 @@ class GeminiLLM(BaseLLM):
|
|||
logger.warning(f"messages: {messages}\nerrors: {e}\n{BlockedPromptException(str(chunk))}")
|
||||
raise BlockedPromptException(str(chunk))
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
collected_content.append(content)
|
||||
log_llm_stream("\n")
|
||||
|
||||
|
|
|
|||
|
|
@ -83,8 +83,6 @@ class OllamaLLM(BaseLLM):
|
|||
content = self.get_choice_text(chunk)
|
||||
collected_content.append(content)
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
else:
|
||||
# stream finished
|
||||
usage = self.get_usage(chunk)
|
||||
|
|
|
|||
|
|
@ -87,9 +87,6 @@ class OpenAILLM(BaseLLM):
|
|||
chunk.choices[0].finish_reason if chunk.choices and hasattr(chunk.choices[0], "finish_reason") else None
|
||||
)
|
||||
log_llm_stream(chunk_message)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(chunk_message)
|
||||
|
||||
collected_messages.append(chunk_message)
|
||||
if finish_reason:
|
||||
if hasattr(chunk, "usage"):
|
||||
|
|
|
|||
|
|
@ -124,8 +124,6 @@ class QianFanLLM(BaseLLM):
|
|||
content = chunk.body.get("result", "")
|
||||
usage = chunk.body.get("usage", {})
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
collected_content.append(content)
|
||||
log_llm_stream("\n")
|
||||
|
||||
|
|
|
|||
|
|
@ -73,8 +73,6 @@ class ZhiPuAILLM(BaseLLM):
|
|||
content = self.get_choice_delta_text(chunk)
|
||||
collected_content.append(content)
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
|
||||
log_llm_stream("\n")
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue