Merge branch 'geekan:main' into main

This commit is contained in:
Kobayashi 2023-07-09 18:16:25 +08:00 committed by GitHub
commit 559a5f4ef9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 51 additions and 133 deletions

View file

@ -1,23 +1,33 @@
# Do not modify here, create a new key.yaml, define OPENAI_API_KEY. The configuration of key.yaml has a higher priority and will not enter git
# DO NOT MODIFY THIS FILE, create a new key.yaml, define OPENAI_API_KEY.
# The configuration of key.yaml has a higher priority and will not enter git
#### if OpenAI
OPENAI_API_KEY: "YOUR_API_KEY"
#OPENAI_API_BASE: "YOUR_API_BASE"
OPENAI_API_MODEL: "gpt-4"
MAX_TOKENS: 1500
RPM: 10
#### if AZURE, check https://github.com/openai/openai-cookbook/blob/main/examples/azure/chat.ipynb
#OPENAI_API_TYPE: "azure"
#OPENAI_API_BASE: "YOUR_AZURE_ENDPOINT"
#OPENAI_API_KEY: "YOUR_AZURE_API_KEY"
#OPENAI_API_VERSION: "YOUR_AZURE_API_VERSION"
#DEPLOYMENT_ID: "YOUR_DEPLOYMENT_ID"
#### for Search
## Visit https://serpapi.com/ to get key.
#SERPAPI_API_KEY: "YOUR_API_KEY"
#
## Visit https://console.cloud.google.com/apis/credentials to get key.
#GOOGLE_API_KEY: "YOUR_API_KEY"
## Visit https://programmablesearchengine.google.com/controlpanel/create to get id.
#GOOGLE_CSE_ID: "YOUR_CSE_ID"
#
#AZURE_OPENAI_KEY: "YOUR_API_KEY"
#AZURE_OPENAI_ENDPOINT: "YOUR_API_BASE"
#AZURE_DEPLOYMENT_NAME: "gpt-35"
#AZURE_OPENAI_API_VERSION: "2023-03-15-preview"
#
#### for TTS
#AZURE_TTS_SUBSCRIPTION_KEY: "YOUR_API_KEY"
#AZURE_TTS_REGION: "eastus"
#AZURE_TTS_REGION: "eastus"

View file

@ -52,6 +52,7 @@ class Config(metaclass=Singleton):
self.openai_api_rpm = self._get('RPM', 3)
self.openai_api_model = self._get('OPENAI_API_MODEL', "gpt-4")
self.max_tokens_rsp = self._get('MAX_TOKENS', 2048)
self.deployment_id = self._get('DEPLOYMENT_ID')
self.serpapi_api_key = self._get('SERPAPI_API_KEY')
self.google_api_key = self._get('GOOGLE_API_KEY')

View file

@ -14,7 +14,7 @@ def get_project_root():
while True:
if (current_path / '.git').exists() or \
(current_path / '.project_root').exists() or \
(current_path.name.lower() == 'metagpt'):
(current_path / '.gitignore').exists():
return current_path
parent_path = current_path.parent
if parent_path == current_path:

View file

@ -6,5 +6,4 @@
@File : __init__.py
"""
from metagpt.provider.openai_api import OpenAIGPTAPI
from metagpt.provider.azure_api import AzureGPTAPI
from metagpt.provider.openai_api import OpenAIGPTAPI

View file

@ -1,77 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/5 23:55
@Author : alexanderwu
@File : azure_api.py
"""
import json
import requests
from metagpt.logs import logger
import openai
from metagpt.provider.openai_api import OpenAIGPTAPI
from metagpt.config import Config
class AzureGPTAPI(OpenAIGPTAPI):
"""Access GPT capabilities through the Azure interface, which requires separate application
# FIXME: Here we use engine (deployment_name), whereas we used to use model
- Model deployment: https://oai.azure.com/portal/deployment
- Python code example: https://learn.microsoft.com/zh-cn/azure/cognitive-services/openai/chatgpt-quickstart?pivots=programming-language-python&tabs=command-line
- endpoint https://deepwisdom-openai.openai.azure.com/
"""
def __init__(self):
super().__init__()
config = self.config
self.api_key = config.get("AZURE_OPENAI_KEY")
self.base_url = config.get("AZURE_OPENAI_ENDPOINT")
self.deployment_name = config.get("AZURE_DEPLOYMENT_NAME")
self.api_version = config.get("AZURE_OPENAI_API_VERSION")
self.api_type = "azure"
# openai.api_key = self.api_key = config.get("AZURE_OPENAI_KEY")
# openai.api_base = self.base_url = config.get("AZURE_OPENAI_ENDPOINT")
# self.deployment_name = config.get("AZURE_DEPLOYMENT_NAME")
# openai.api_version = self.api_version = config.get("AZURE_OPENAI_API_VERSION")
# openai.api_type = self.api_type = "azure"
def completion(self, messages: list[dict]):
"""
:param messages: 历史对话标明了每个角色说了什么
:return: 返回例子如下
{
"id": "ID of your call",
"object": "text_completion",
"created": 1675444965,
"model": "text-davinci-002",
"choices": [
{
"text": " there lived in a little village a woman who was known as the meanest",
"index": 0,
"finish_reason": "length",
"logprobs": null
}
],
"usage": {
"completion_tokens": 16,
"prompt_tokens": 3,
"total_tokens": 19
}
}
"""
url = self.base_url + "/openai/deployments/" + self.deployment_name + "/chat/completions?api-version=" + self.api_version
payload = {"messages": messages}
rsp = requests.post(url, headers={"api-key": self.api_key, "Content-Type": "application/json"}, json=payload,
timeout=60)
response = json.loads(rsp.text)
formatted_response = json.dumps(response, indent=4)
# logger.info(formatted_response)
return response
def get_choice_text(self, rsp):
"""要求提供choice第一条文本"""
return rsp.get("choices")[0]["message"]['content']

View file

@ -13,7 +13,7 @@ import openai
from metagpt.logs import logger
from metagpt.provider.base_gpt_api import BaseGPTAPI
from metagpt.config import Config
from metagpt.config import CONFIG
from metagpt.utils.singleton import Singleton
from metagpt.utils.token_counter import count_message_tokens, TOKEN_COSTS, count_string_tokens
@ -69,7 +69,6 @@ class CostManager(metaclass=Singleton):
self.total_completion_tokens = 0
self.total_cost = 0
self.total_budget = 0
self.config = Config()
def update_cost(self, prompt_tokens, completion_tokens, model):
"""
@ -87,9 +86,9 @@ class CostManager(metaclass=Singleton):
+ completion_tokens * TOKEN_COSTS[model]["completion"]
) / 1000
self.total_cost += cost
logger.info(f"Total running cost: ${self.total_cost:.3f} | Max budget: ${self.config.max_budget:.3f} | "
logger.info(f"Total running cost: ${self.total_cost:.3f} | Max budget: ${CONFIG.max_budget:.3f} | "
f"Current cost: ${cost:.3f}, {prompt_tokens=}, {completion_tokens=}")
self.config.total_cost = self.total_cost
CONFIG.total_cost = self.total_cost
def get_total_prompt_tokens(self):
"""
@ -128,10 +127,9 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter):
Check https://platform.openai.com/examples for examples
"""
def __init__(self):
self.config = Config()
self.__init_openai(self.config)
self.__init_openai(CONFIG)
self.llm = openai
self.model = self.config.openai_api_model
self.model = CONFIG.openai_api_model
self._cost_manager = CostManager()
RateLimiter.__init__(self, rpm=self.rpm)
@ -146,12 +144,7 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter):
async def _achat_completion_stream(self, messages: list[dict]) -> str:
response = await openai.ChatCompletion.acreate(
model=self.model,
messages=messages,
max_tokens=self.config.max_tokens_rsp,
n=1,
stop=None,
temperature=0,
**self._cons_kwargs(messages),
stream=True
)
@ -172,27 +165,34 @@ class OpenAIGPTAPI(BaseGPTAPI, RateLimiter):
self._update_costs(usage)
return full_reply_content
def _cons_kwargs(self, messages: list[dict]) -> dict:
if CONFIG.openai_api_type == 'azure':
kwargs = {
"deployment_id": CONFIG.deployment_id,
"messages": messages,
"max_tokens": CONFIG.max_tokens_rsp,
"n": 1,
"stop": None,
"temperature": 0.5
}
else:
kwargs = {
"model": self.model,
"messages": messages,
"max_tokens": CONFIG.max_tokens_rsp,
"n": 1,
"stop": None,
"temperature": 0.5
}
return kwargs
async def _achat_completion(self, messages: list[dict]) -> dict:
rsp = await self.llm.ChatCompletion.acreate(
model=self.model,
messages=messages,
max_tokens=self.config.max_tokens_rsp,
n=1,
stop=None,
temperature=0.5,
)
rsp = await self.llm.ChatCompletion.acreate(**self._cons_kwargs(messages))
self._update_costs(rsp.get('usage'))
return rsp
def _chat_completion(self, messages: list[dict]) -> dict:
rsp = self.llm.ChatCompletion.create(
model=self.model,
messages=messages,
max_tokens=self.config.max_tokens_rsp,
n=1,
stop=None,
temperature=0.5,
)
rsp = self.llm.ChatCompletion.create(**self._cons_kwargs(messages))
self._update_costs(rsp)
return rsp

View file

@ -1,15 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2023/5/16 10:12
@Author : alexanderwu
@File : test_azure_gpt_api.py
"""
from metagpt.provider import AzureGPTAPI
def test_azure_gpt_api():
api = AzureGPTAPI()
rsp = api.ask('hello')
assert len(rsp) > 0