From 23fa79289529d8449a274a04755b9c4e6e2bef27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=8E=98=E6=9D=83=20=E9=A9=AC?= Date: Mon, 29 Jan 2024 23:13:54 +0800 Subject: [PATCH 1/4] fixbug: IndexableDocument.from_path error --- metagpt/document.py | 8 ++++++-- setup.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/metagpt/document.py b/metagpt/document.py index f4fa0a489..be80f0c71 100644 --- a/metagpt/document.py +++ b/metagpt/document.py @@ -20,6 +20,7 @@ from langchain.text_splitter import CharacterTextSplitter from pydantic import BaseModel, ConfigDict, Field from tqdm import tqdm +from metagpt.logs import logger from metagpt.repo_parser import RepoParser @@ -130,9 +131,12 @@ class IndexableDocument(Document): if isinstance(data, pd.DataFrame): validate_cols(content_col, data) return cls(data=data, content=str(data), content_col=content_col, meta_col=meta_col) - else: + try: content = data_path.read_text() - return cls(data=data, content=content, content_col=content_col, meta_col=meta_col) + except Exception as e: + logger.debug(f"Load {str(data_path)} error: {e}") + content = "" + return cls(data=data, content=content, content_col=content_col, meta_col=meta_col) def _get_docs_and_metadatas_by_df(self) -> (list, list): df = self.data diff --git a/setup.py b/setup.py index cc8112ba9..2b16e2cd1 100644 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ extras_require["dev"] = (["pylint~=3.0.3", "black~=23.3.0", "isort~=5.12.0", "pr setup( name="metagpt", - version="0.6.6", + version="0.6.7", description="The Multi-Agent Framework", long_description=long_description, long_description_content_type="text/markdown", From bc5a509136bd372e4978f9f1529afe788c1d0ccf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=8E=98=E6=9D=83=20=E9=A9=AC?= Date: Wed, 31 Jan 2024 16:34:06 +0800 Subject: [PATCH 2/4] fixbug: llm not answering the question feat: ver+ --- metagpt/actions/skill_action.py | 9 ++++---- metagpt/memory/brain_memory.py | 39 +++++++++++++++++++-------------- setup.py | 2 +- 3 files changed, 29 insertions(+), 21 deletions(-) diff --git a/metagpt/actions/skill_action.py b/metagpt/actions/skill_action.py index 301cebaab..5af2b3fc4 100644 --- a/metagpt/actions/skill_action.py +++ b/metagpt/actions/skill_action.py @@ -29,9 +29,7 @@ class ArgumentsParingAction(Action): @property def prompt(self): - prompt = "You are a function parser. You can convert spoken words into function parameters.\n" - prompt += "\n---\n" - prompt += f"{self.skill.name} function parameters description:\n" + prompt = f"{self.skill.name} function parameters description:\n" for k, v in self.skill.arguments.items(): prompt += f"parameter `{k}`: {v}\n" prompt += "\n---\n" @@ -49,7 +47,10 @@ class ArgumentsParingAction(Action): async def run(self, with_message=None, **kwargs) -> Message: prompt = self.prompt - rsp = await self.llm.aask(msg=prompt, system_msgs=[]) + rsp = await self.llm.aask( + msg=prompt, + system_msgs=["You are a function parser. You can convert spoken words into function parameters."], + ) logger.debug(f"SKILL:{prompt}\n, RESULT:{rsp}") self.args = ArgumentsParingAction.parse_arguments(skill_name=self.skill.name, txt=rsp) self.rsp = Message(content=rsp, role="assistant", instruct_content=self.args, cause_by=self) diff --git a/metagpt/memory/brain_memory.py b/metagpt/memory/brain_memory.py index ff29eaddb..384c4d3a9 100644 --- a/metagpt/memory/brain_memory.py +++ b/metagpt/memory/brain_memory.py @@ -201,11 +201,14 @@ class BrainMemory(BaseModel): @staticmethod async def _openai_is_related(text1, text2, llm, **kwargs): - command = ( - f"{text2}\n\nIs there any sentence above related to the following sentence: {text1}.\nIf is there " - "any relevance, return [TRUE] brief and clear. Otherwise, return [FALSE] brief and clear." + context = f"## Paragraph 1\n{text2}\n---\n## Paragraph 2\n{text1}\n" + rsp = await llm.aask( + msg=context, + system_msgs=[ + "You are a tool capable of determining whether two paragraphs are semantically related." + 'Return "TRUE" if "Paragraph 1" is semantically relevant to "Paragraph 2", otherwise return "FALSE".' + ], ) - rsp = await llm.aask(msg=command, system_msgs=[]) result = True if "TRUE" in rsp else False p2 = text2.replace("\n", "") p1 = text1.replace("\n", "") @@ -223,12 +226,16 @@ class BrainMemory(BaseModel): @staticmethod async def _openai_rewrite(sentence: str, context: str, llm): - command = ( - f"{context}\n\nExtract relevant information from every preceding sentence and use it to succinctly " - f"supplement or rewrite the following text in brief and clear:\n{sentence}" + prompt = f"## Context\n{context}\n---\n## Sentence\n{sentence}\n" + rsp = await llm.aask( + msg=prompt, + system_msgs=[ + 'You are a tool augmenting the "Sentence" with information from the "Context".', + "Do not supplement the context with information that is not present, especially regarding the subject and object.", + "Return the augmented sentence.", + ], ) - rsp = await llm.aask(msg=command, system_msgs=[]) - logger.info(f"REWRITE:\nCommand: {command}\nRESULT: {rsp}\n") + logger.info(f"REWRITE:\nCommand: {prompt}\nRESULT: {rsp}\n") return rsp @staticmethod @@ -293,14 +300,14 @@ class BrainMemory(BaseModel): """Generate text summary""" if len(text) < max_words: return text + system_msgs = [ + "You are a tool for summarizing and abstracting text.", + "Return the summarized text to less than 200 words.", + ] if keep_language: - command = f".Translate the above content into a summary of less than {max_words} words in language of the content strictly." - else: - command = f"Translate the above content into a summary of less than {max_words} words." - msg = text + "\n\n" + command - logger.debug(f"summary ask:{msg}") - response = await self.llm.aask(msg=msg, system_msgs=[]) - logger.debug(f"summary rsp: {response}") + system_msgs.append("The generated summary should be in the same language as the original text.") + response = await self.llm.aask(msg=text, system_msgs=system_msgs) + logger.debug(f"{text}\nsummary rsp: {response}") return response @staticmethod diff --git a/setup.py b/setup.py index 2b16e2cd1..235c18f53 100644 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ extras_require["dev"] = (["pylint~=3.0.3", "black~=23.3.0", "isort~=5.12.0", "pr setup( name="metagpt", - version="0.6.7", + version="0.6.8", description="The Multi-Agent Framework", long_description=long_description, long_description_content_type="text/markdown", From 2689cbccc4c078debca456853cfca25c224b08e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=8E=98=E6=9D=83=20=E9=A9=AC?= Date: Wed, 31 Jan 2024 17:48:05 +0800 Subject: [PATCH 3/4] fixbug: max_words --- metagpt/memory/brain_memory.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metagpt/memory/brain_memory.py b/metagpt/memory/brain_memory.py index 384c4d3a9..56b13c875 100644 --- a/metagpt/memory/brain_memory.py +++ b/metagpt/memory/brain_memory.py @@ -302,7 +302,7 @@ class BrainMemory(BaseModel): return text system_msgs = [ "You are a tool for summarizing and abstracting text.", - "Return the summarized text to less than 200 words.", + f"Return the summarized text to less than {max_words} words.", ] if keep_language: system_msgs.append("The generated summary should be in the same language as the original text.") From 525c62b2358ba4f78819710c4f6553bb4c64038d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=8E=98=E6=9D=83=20=E9=A9=AC?= Date: Thu, 1 Feb 2024 15:23:24 +0800 Subject: [PATCH 4/4] fixbug: METAGPT model is None fixbug: add METAGPT model calc usage logic --- metagpt/provider/metagpt_api.py | 20 +++++++++++++++++++- metagpt/utils/cost_manager.py | 2 ++ setup.py | 2 +- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/metagpt/provider/metagpt_api.py b/metagpt/provider/metagpt_api.py index 69aa7f305..f3dd14bbc 100644 --- a/metagpt/provider/metagpt_api.py +++ b/metagpt/provider/metagpt_api.py @@ -5,12 +5,30 @@ @File : metagpt_api.py @Desc : MetaGPT LLM provider. """ -from metagpt.config import LLMProviderEnum +from openai.types import CompletionUsage + +from metagpt.config import CONFIG, LLMProviderEnum from metagpt.provider import OpenAILLM from metagpt.provider.llm_provider_registry import register_provider +from metagpt.utils.exceptions import handle_exception @register_provider(LLMProviderEnum.METAGPT) class MetaGPTLLM(OpenAILLM): def __init__(self): super().__init__() + self.model = CONFIG.DEPLOYMENT_NAME + + def _calc_usage(self, messages: list[dict], rsp: str) -> CompletionUsage: + usage = CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0) + if not CONFIG.calc_usage: + return usage + + # The current billing is based on usage frequency. If there is a future billing logic based on the + # number of tokens, please refine the logic here accordingly. + + return usage + + @handle_exception + def _update_costs(self, usage: CompletionUsage): + pass diff --git a/metagpt/utils/cost_manager.py b/metagpt/utils/cost_manager.py index ce53f2285..e6d51e641 100644 --- a/metagpt/utils/cost_manager.py +++ b/metagpt/utils/cost_manager.py @@ -39,6 +39,8 @@ class CostManager(BaseModel): completion_tokens (int): The number of tokens used in the completion. model (str): The model used for the API call. """ + if prompt_tokens + completion_tokens == 0: + return self.total_prompt_tokens += prompt_tokens self.total_completion_tokens += completion_tokens cost = ( diff --git a/setup.py b/setup.py index 235c18f53..83bdcce9a 100644 --- a/setup.py +++ b/setup.py @@ -57,7 +57,7 @@ extras_require["dev"] = (["pylint~=3.0.3", "black~=23.3.0", "isort~=5.12.0", "pr setup( name="metagpt", - version="0.6.8", + version="0.6.9", description="The Multi-Agent Framework", long_description=long_description, long_description_content_type="text/markdown",