diff --git a/config/config.yaml b/config/config.yaml index 590ef2561..303f4824b 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -63,9 +63,9 @@ SD_T2I_API: "/sdapi/v1/txt2img" #PUPPETEER_CONFIG: "./config/puppeteer-config.json" #MMDC: "./node_modules/.bin/mmdc" -### for update_costs & calc_usage -UPDATE_COSTS: false -CALC_USAGE: false + +### for calc_usage +# CALC_USAGE: false ### for Research MODEL_FOR_RESEARCHER_SUMMARY: gpt-3.5-turbo diff --git a/metagpt/config.py b/metagpt/config.py index 792233ab2..fb1aa485c 100644 --- a/metagpt/config.py +++ b/metagpt/config.py @@ -76,10 +76,10 @@ class Config(metaclass=Singleton): logger.warning("LONG_TERM_MEMORY is True") self.max_budget = self._get("MAX_BUDGET", 10.0) self.total_cost = 0.0 - self.puppeteer_config = self._get("PUPPETEER_CONFIG", "") - self.mmdc = self._get("MMDC", "mmdc") - self.update_costs = self._get("UPDATE_COSTS", True) - self.calc_usage = self._get("CALC_USAGE", True) + + self.puppeteer_config = self._get("PUPPETEER_CONFIG","") + self.mmdc = self._get("MMDC","mmdc") + self.calc_usage = self._get("CALC_USAGE",True) self.model_for_researcher_summary = self._get("MODEL_FOR_RESEARCHER_SUMMARY") self.model_for_researcher_report = self._get("MODEL_FOR_RESEARCHER_REPORT") diff --git a/metagpt/provider/openai_api.py b/metagpt/provider/openai_api.py index b4fa8752b..79121c8de 100644 --- a/metagpt/provider/openai_api.py +++ b/metagpt/provider/openai_api.py @@ -226,11 +226,16 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): def _calc_usage(self, messages: list[dict], rsp: str) -> dict: usage = {} if CONFIG.calc_usage: - prompt_tokens = count_message_tokens(messages, self.model) - completion_tokens = count_string_tokens(rsp, self.model) - usage['prompt_tokens'] = prompt_tokens - usage['completion_tokens'] = completion_tokens - return usage + try: + prompt_tokens = count_message_tokens(messages, self.model) + completion_tokens = count_string_tokens(rsp, self.model) + usage['prompt_tokens'] = prompt_tokens + usage['completion_tokens'] = completion_tokens + return usage + except Exception as e: + logger.error("usage calculation failed!", e) + else: + return usage async def acompletion_batch(self, batch: list[list[dict]]) -> list[dict]: """返回完整JSON""" @@ -259,10 +264,13 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter): return results def _update_costs(self, usage: dict): - if CONFIG.update_costs: - prompt_tokens = int(usage['prompt_tokens']) - completion_tokens = int(usage['completion_tokens']) - self._cost_manager.update_cost(prompt_tokens, completion_tokens, self.model) + if CONFIG.calc_usage: + try: + prompt_tokens = int(usage['prompt_tokens']) + completion_tokens = int(usage['completion_tokens']) + self._cost_manager.update_cost(prompt_tokens, completion_tokens, self.model) + except Exception as e: + logger.error("updating costs failed!", e) def get_costs(self) -> Costs: return self._cost_manager.get_costs()