mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-08 07:12:38 +02:00
feat(core): Add stream data return and reception
1. add file: utils/steam_pipe.py 2. add demo: samples/flask_web_api.py 3. Other core code modifications, Add and use the StreamPipe class at night 4. Add flask library to requirements
This commit is contained in:
parent
643450388a
commit
7706b88f03
14 changed files with 213 additions and 4 deletions
|
|
@ -62,6 +62,9 @@ class AnthropicLLM(BaseLLM):
|
|||
elif event_type == "content_block_delta":
|
||||
content = event.delta.text
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
|
||||
collected_content.append(content)
|
||||
elif event_type == "message_delta":
|
||||
usage.output_tokens = event.usage.output_tokens # update final output_tokens
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ from metagpt.logs import logger
|
|||
from metagpt.schema import Message
|
||||
from metagpt.utils.common import log_and_reraise
|
||||
from metagpt.utils.cost_manager import CostManager, Costs
|
||||
from metagpt.utils.stream_pipe import StreamPipe
|
||||
|
||||
|
||||
class BaseLLM(ABC):
|
||||
|
|
@ -42,6 +43,7 @@ class BaseLLM(ABC):
|
|||
cost_manager: Optional[CostManager] = None
|
||||
model: Optional[str] = None # deprecated
|
||||
pricing_plan: Optional[str] = None
|
||||
stream_pipe: Optional[StreamPipe] = None
|
||||
|
||||
@abstractmethod
|
||||
def __init__(self, config: LLMConfig):
|
||||
|
|
|
|||
|
|
@ -221,6 +221,8 @@ class DashScopeLLM(BaseLLM):
|
|||
content = chunk.output.choices[0]["message"]["content"]
|
||||
usage = dict(chunk.usage) # each chunk has usage
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
collected_content.append(content)
|
||||
log_llm_stream("\n")
|
||||
self._update_costs(usage)
|
||||
|
|
|
|||
|
|
@ -149,6 +149,8 @@ class GeminiLLM(BaseLLM):
|
|||
logger.warning(f"messages: {messages}\nerrors: {e}\n{BlockedPromptException(str(chunk))}")
|
||||
raise BlockedPromptException(str(chunk))
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
collected_content.append(content)
|
||||
log_llm_stream("\n")
|
||||
|
||||
|
|
|
|||
|
|
@ -83,6 +83,8 @@ class OllamaLLM(BaseLLM):
|
|||
content = self.get_choice_text(chunk)
|
||||
collected_content.append(content)
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
else:
|
||||
# stream finished
|
||||
usage = self.get_usage(chunk)
|
||||
|
|
|
|||
|
|
@ -87,6 +87,9 @@ class OpenAILLM(BaseLLM):
|
|||
chunk.choices[0].finish_reason if chunk.choices and hasattr(chunk.choices[0], "finish_reason") else None
|
||||
)
|
||||
log_llm_stream(chunk_message)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(chunk_message)
|
||||
|
||||
collected_messages.append(chunk_message)
|
||||
if finish_reason:
|
||||
if hasattr(chunk, "usage"):
|
||||
|
|
|
|||
|
|
@ -124,6 +124,8 @@ class QianFanLLM(BaseLLM):
|
|||
content = chunk.body.get("result", "")
|
||||
usage = chunk.body.get("usage", {})
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
collected_content.append(content)
|
||||
log_llm_stream("\n")
|
||||
|
||||
|
|
|
|||
|
|
@ -73,6 +73,8 @@ class ZhiPuAILLM(BaseLLM):
|
|||
content = self.get_choice_delta_text(chunk)
|
||||
collected_content.append(content)
|
||||
log_llm_stream(content)
|
||||
if self.stream_pipe:
|
||||
self.stream_pipe.set_message(content)
|
||||
|
||||
log_llm_stream("\n")
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue