From b2e8be2d785d5b7b8c9146a7d5888c92bb4e3cd9 Mon Sep 17 00:00:00 2001 From: shenchucheng Date: Thu, 22 Aug 2024 12:05:12 +0800 Subject: [PATCH] fix base_llm.py obfuscate error --- metagpt/provider/base_llm.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/metagpt/provider/base_llm.py b/metagpt/provider/base_llm.py index 813e77d95..75d8bfe00 100644 --- a/metagpt/provider/base_llm.py +++ b/metagpt/provider/base_llm.py @@ -300,7 +300,6 @@ class BaseLLM(ABC): if compress_type == CompressType.NO_COMPRESS: return messages - current_token_count = 0 max_token = TOKEN_MAX.get(self.config.model, max_token) keep_token = int(max_token * threshold) compressed = [] @@ -318,7 +317,7 @@ class BaseLLM(ABC): # system_msgs = [msg for msg in messages if msg["role"] == system_msg_val] # user_assistant_msgs = [msg for msg in messages if msg["role"] != system_msg_val] compressed.extend(system_msgs) - current_token_count += self.count_tokens(system_msgs) + current_token_count = self.count_tokens(system_msgs) if compress_type in [CompressType.POST_CUT_BY_TOKEN, CompressType.POST_CUT_BY_MSG]: # Under keep_token constraint, keep as many latest messages as possible