MetaGPT/metagpt/logs.py

154 lines
3.9 KiB
Python
Raw Normal View History

2023-06-30 17:10:48 +08:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/6/1 12:41
@Author : alexanderwu
@File : logs.py
"""
from __future__ import annotations
2024-04-20 20:08:33 +08:00
import asyncio
2024-04-13 11:44:31 +08:00
import inspect
2023-06-30 17:10:48 +08:00
import sys
2024-04-20 20:08:33 +08:00
from contextvars import ContextVar
2023-12-05 16:30:46 +08:00
from datetime import datetime
2023-12-23 22:45:20 +08:00
from functools import partial
2024-04-03 22:16:17 +08:00
from typing import Any
2023-07-22 11:28:22 +08:00
2023-06-30 17:10:48 +08:00
from loguru import logger as _logger
2024-04-02 12:08:39 +08:00
from pydantic import BaseModel, Field
2023-07-22 11:28:22 +08:00
from metagpt.const import METAGPT_ROOT
2023-06-30 17:10:48 +08:00
2024-04-20 20:08:33 +08:00
LLM_STREAM_QUEUE: ContextVar[asyncio.Queue] = ContextVar("llm-stream")
2023-11-22 16:26:48 +08:00
2024-04-02 12:08:39 +08:00
class ToolLogItem(BaseModel):
type_: str = Field(alias="type", default="str", description="Data type of `value` field.")
name: str
2024-04-03 22:16:17 +08:00
value: Any
2024-04-02 12:08:39 +08:00
TOOL_LOG_END_MARKER = ToolLogItem(
2024-04-08 23:54:55 +08:00
type="str", name="end_marker", value="\x18\x19\x1B\x18"
2024-04-02 12:08:39 +08:00
) # A special log item to suggest the end of a stream log
_print_level = "INFO"
2023-11-22 16:26:48 +08:00
2024-04-02 12:08:39 +08:00
2024-01-30 19:06:49 +08:00
def define_log_level(print_level="INFO", logfile_level="DEBUG", name: str = None):
"""Adjust the log level to above level"""
global _print_level
_print_level = print_level
2023-12-05 16:30:46 +08:00
current_date = datetime.now()
formatted_date = current_date.strftime("%Y%m%d")
2024-01-30 19:06:49 +08:00
log_name = f"{name}_{formatted_date}" if name else formatted_date # name a log with prefix name
2023-12-05 16:30:46 +08:00
2023-06-30 17:10:48 +08:00
_logger.remove()
_logger.add(sys.stderr, level=print_level)
2024-01-30 19:06:49 +08:00
_logger.add(METAGPT_ROOT / f"logs/{log_name}.txt", level=logfile_level)
2023-06-30 17:10:48 +08:00
return _logger
2023-11-22 16:26:48 +08:00
2023-06-30 17:10:48 +08:00
logger = define_log_level()
2023-12-23 22:45:20 +08:00
def log_llm_stream(msg):
2024-04-20 20:08:33 +08:00
"""
Logs a message to the LLM stream.
Args:
msg: The message to be logged.
Notes:
If the LLM_STREAM_QUEUE has not been set (e.g., if `create_llm_stream_queue` has not been called),
the message will not be added to the LLM stream queue.
"""
queue = get_llm_stream_queue()
if queue:
queue.put_nowait(msg)
2023-12-23 22:45:20 +08:00
_llm_stream_log(msg)
2024-04-02 12:08:39 +08:00
def log_tool_output(output: ToolLogItem | list[ToolLogItem], tool_name: str = ""):
"""interface for logging tool output, can be set to log tool output in different ways to different places with set_tool_output_logfunc"""
2024-04-02 12:08:39 +08:00
_tool_output_log(output=output, tool_name=tool_name)
2024-04-11 14:30:38 +08:00
async def log_tool_output_async(output: ToolLogItem | list[ToolLogItem], tool_name: str = ""):
"""async interface for logging tool output, used when output contains async object"""
await _tool_output_log_async(output=output, tool_name=tool_name)
2024-04-13 11:44:31 +08:00
async def get_human_input(prompt: str = ""):
"""interface for getting human input, can be set to get input from different sources with set_human_input_func"""
if inspect.iscoroutinefunction(_get_human_input):
return await _get_human_input(prompt)
else:
return _get_human_input(prompt)
2023-12-23 22:45:20 +08:00
def set_llm_stream_logfunc(func):
global _llm_stream_log
_llm_stream_log = func
def set_tool_output_logfunc(func):
global _tool_output_log
_tool_output_log = func
2024-04-11 14:30:38 +08:00
async def set_tool_output_logfunc_async(func):
# async version
global _tool_output_log_async
_tool_output_log_async = func
2024-04-13 11:44:31 +08:00
def set_human_input_func(func):
global _get_human_input
_get_human_input = func
2023-12-23 22:45:20 +08:00
_llm_stream_log = partial(print, end="")
2024-04-03 22:16:17 +08:00
_tool_output_log = (
lambda *args, **kwargs: None
) # a dummy function to avoid errors if set_tool_output_logfunc is not called
2024-04-11 14:30:38 +08:00
async def _tool_output_log_async(*args, **kwargs):
# async version
pass
2024-04-20 20:08:33 +08:00
def create_llm_stream_queue():
"""Creates a new LLM stream queue and sets it in the context variable.
Returns:
The newly created asyncio.Queue instance.
"""
queue = asyncio.Queue()
LLM_STREAM_QUEUE.set(queue)
return queue
def get_llm_stream_queue():
"""Retrieves the current LLM stream queue from the context variable.
Returns:
The asyncio.Queue instance if set, otherwise None.
"""
return LLM_STREAM_QUEUE.get(None)
2024-04-26 21:47:43 +08:00
2024-05-06 11:25:52 +08:00
2024-04-13 11:44:31 +08:00
_get_human_input = input # get human input from console by default
2025-02-26 22:20:14 +08:00
2024-03-27 22:13:52 +08:00
def _llm_stream_log(msg):
2024-04-05 21:57:37 +08:00
if _print_level in ["INFO"]:
2024-04-05 21:37:07 +08:00
print(msg, end="")