mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-11 16:52:37 +02:00
feat: Remove global configuration , enable configuration support for business isolation.
This commit is contained in:
parent
d764b8e6fa
commit
f45a8e5284
50 changed files with 437 additions and 278 deletions
|
|
@ -4,17 +4,22 @@
|
|||
@Time : 2023/7/21 11:15
|
||||
@Author : Leo Xiao
|
||||
@File : anthropic_api.py
|
||||
@Modified By: mashenquan, 2023/8/20. Remove global configuration `CONFIG`, enable configuration support for business isolation;
|
||||
Change cost control from global to company level.
|
||||
"""
|
||||
|
||||
import anthropic
|
||||
from anthropic import Anthropic
|
||||
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.config import Config
|
||||
|
||||
|
||||
class Claude2:
|
||||
def __init__(self, options=None):
|
||||
self.options = options or Config().runtime_options
|
||||
|
||||
def ask(self, prompt):
|
||||
client = Anthropic(api_key=CONFIG.claude_api_key)
|
||||
client = Anthropic(api_key=self.claude_api_key)
|
||||
|
||||
res = client.completions.create(
|
||||
model="claude-2",
|
||||
|
|
@ -24,7 +29,7 @@ class Claude2:
|
|||
return res.completion
|
||||
|
||||
async def aask(self, prompt):
|
||||
client = Anthropic(api_key=CONFIG.claude_api_key)
|
||||
client = Anthropic(api_key=self.claude_api_key)
|
||||
|
||||
res = client.completions.create(
|
||||
model="claude-2",
|
||||
|
|
@ -32,3 +37,7 @@ class Claude2:
|
|||
max_tokens_to_sample=1000,
|
||||
)
|
||||
return res.completion
|
||||
|
||||
@property
|
||||
def claude_api_key(self):
|
||||
return self.options.get("claude_api_key")
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@
|
|||
@Time : 2023/5/5 23:08
|
||||
@Author : alexanderwu
|
||||
@File : openai.py
|
||||
@Modified By: mashenquan, 2023/8/20. Remove global configuration `CONFIG`, enable configuration support for business isolation;
|
||||
Change cost control from global to company level.
|
||||
"""
|
||||
import asyncio
|
||||
import time
|
||||
|
|
@ -12,10 +14,8 @@ import openai
|
|||
from openai.error import APIConnectionError
|
||||
from tenacity import retry, stop_after_attempt, after_log, wait_fixed, retry_if_exception_type
|
||||
|
||||
from metagpt.config import CONFIG
|
||||
from metagpt.logs import logger
|
||||
from metagpt.provider.base_gpt_api import BaseGPTAPI
|
||||
from metagpt.utils.singleton import Singleton
|
||||
from metagpt.utils.token_counter import (
|
||||
TOKEN_COSTS,
|
||||
count_message_tokens,
|
||||
|
|
@ -56,13 +56,13 @@ class Costs(NamedTuple):
|
|||
total_budget: float
|
||||
|
||||
|
||||
class CostManager(metaclass=Singleton):
|
||||
class CostManager:
|
||||
"""计算使用接口的开销"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, options):
|
||||
self.total_prompt_tokens = 0
|
||||
self.total_completion_tokens = 0
|
||||
self.total_cost = 0
|
||||
self.options = options
|
||||
self.total_budget = 0
|
||||
|
||||
def update_cost(self, prompt_tokens, completion_tokens, model):
|
||||
|
|
@ -79,10 +79,9 @@ class CostManager(metaclass=Singleton):
|
|||
cost = (prompt_tokens * TOKEN_COSTS[model]["prompt"] + completion_tokens * TOKEN_COSTS[model]["completion"]) / 1000
|
||||
self.total_cost += cost
|
||||
logger.info(
|
||||
f"Total running cost: ${self.total_cost:.3f} | Max budget: ${CONFIG.max_budget:.3f} | "
|
||||
f"Total running cost: ${self.total_cost:.3f} | Max budget: ${self.max_budget:.3f} | "
|
||||
f"Current cost: ${cost:.3f}, prompt_tokens: {prompt_tokens}, completion_tokens: {completion_tokens}"
|
||||
)
|
||||
CONFIG.total_cost = self.total_cost
|
||||
|
||||
def get_total_prompt_tokens(self):
|
||||
"""
|
||||
|
|
@ -115,6 +114,18 @@ class CostManager(metaclass=Singleton):
|
|||
"""获得所有开销"""
|
||||
return Costs(self.total_prompt_tokens, self.total_completion_tokens, self.total_cost, self.total_budget)
|
||||
|
||||
@property
|
||||
def total_cost(self):
|
||||
return self.options.get("total_cost", 0)
|
||||
|
||||
@total_cost.setter
|
||||
def total_cost(self, v):
|
||||
self.options["total_cost"] = v
|
||||
|
||||
@property
|
||||
def max_budget(self):
|
||||
return self.options.get("max_budget", 0)
|
||||
|
||||
|
||||
def log_and_reraise(retry_state):
|
||||
logger.error(f"Retry attempts exhausted. Last exception: {retry_state.outcome.exception()}")
|
||||
|
|
@ -130,22 +141,23 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter):
|
|||
Check https://platform.openai.com/examples for examples
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.__init_openai(CONFIG)
|
||||
def __init__(self, options, cost_manager):
|
||||
self._options = options
|
||||
self.__init_openai()
|
||||
self.llm = openai
|
||||
self.model = CONFIG.openai_api_model
|
||||
self.model = self.openai_api_model
|
||||
self.auto_max_tokens = False
|
||||
self._cost_manager = CostManager()
|
||||
self._cost_manager = cost_manager
|
||||
RateLimiter.__init__(self, rpm=self.rpm)
|
||||
|
||||
def __init_openai(self, config):
|
||||
openai.api_key = config.openai_api_key
|
||||
if config.openai_api_base:
|
||||
openai.api_base = config.openai_api_base
|
||||
if config.openai_api_type:
|
||||
openai.api_type = config.openai_api_type
|
||||
openai.api_version = config.openai_api_version
|
||||
self.rpm = int(config.get("RPM", 10))
|
||||
def __init_openai(self):
|
||||
openai.api_key = self.openai_api_key
|
||||
if self.openai_api_base:
|
||||
openai.api_base = self.openai_api_base
|
||||
if self.openai_api_type:
|
||||
openai.api_type = self.openai_api_type
|
||||
openai.api_version = self.openai_api_version
|
||||
self.rpm = int(self._options.get("RPM", 10))
|
||||
|
||||
async def _achat_completion_stream(self, messages: list[dict]) -> str:
|
||||
response = await openai.ChatCompletion.acreate(**self._cons_kwargs(messages), stream=True)
|
||||
|
|
@ -168,9 +180,9 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter):
|
|||
return full_reply_content
|
||||
|
||||
def _cons_kwargs(self, messages: list[dict]) -> dict:
|
||||
if CONFIG.openai_api_type == "azure":
|
||||
if self._options.get("openai_api_type") == "azure":
|
||||
kwargs = {
|
||||
"deployment_id": CONFIG.deployment_id,
|
||||
"deployment_id": self._options.get("deployment_id"),
|
||||
"messages": messages,
|
||||
"max_tokens": self.get_max_tokens(messages),
|
||||
"n": 1,
|
||||
|
|
@ -225,7 +237,7 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter):
|
|||
|
||||
def _calc_usage(self, messages: list[dict], rsp: str) -> dict:
|
||||
usage = {}
|
||||
if CONFIG.calc_usage:
|
||||
if self._options.get("calc_usage"):
|
||||
try:
|
||||
prompt_tokens = count_message_tokens(messages, self.model)
|
||||
completion_tokens = count_string_tokens(rsp, self.model)
|
||||
|
|
@ -264,7 +276,7 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter):
|
|||
return results
|
||||
|
||||
def _update_costs(self, usage: dict):
|
||||
if CONFIG.calc_usage:
|
||||
if self._options.get("calc_usage"):
|
||||
try:
|
||||
prompt_tokens = int(usage['prompt_tokens'])
|
||||
completion_tokens = int(usage['completion_tokens'])
|
||||
|
|
@ -277,5 +289,25 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter):
|
|||
|
||||
def get_max_tokens(self, messages: list[dict]):
|
||||
if not self.auto_max_tokens:
|
||||
return CONFIG.max_tokens_rsp
|
||||
return get_max_completion_tokens(messages, self.model, CONFIG.max_tokens_rsp)
|
||||
return self._options.get("max_tokens_rsp")
|
||||
return get_max_completion_tokens(messages, self.model, self._options.get("max_tokens_rsp"))
|
||||
|
||||
@property
|
||||
def openai_api_model(self):
|
||||
return self._options.get("openai_api_model")
|
||||
|
||||
@property
|
||||
def openai_api_key(self):
|
||||
return self._options.get("openai_api_key")
|
||||
|
||||
@property
|
||||
def openai_api_base(self):
|
||||
return self._options.get("openai_api_base")
|
||||
|
||||
@property
|
||||
def openai_api_type(self):
|
||||
return self._options.get("openai_api_type")
|
||||
|
||||
@property
|
||||
def openai_api_version(self):
|
||||
return self._options.get("openai_api_version")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue