mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-15 11:02:36 +02:00
fix conflicts
This commit is contained in:
commit
aada81bfe6
3 changed files with 30 additions and 5 deletions
|
|
@ -16,6 +16,7 @@
|
|||
class.
|
||||
"""
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Tuple
|
||||
|
||||
from pydantic import Field
|
||||
|
|
@ -150,11 +151,23 @@ class RunCode(Action):
|
|||
return subprocess.run(cmd, check=check, cwd=cwd, env=env)
|
||||
|
||||
@staticmethod
|
||||
def _install_dependencies(working_directory, env):
|
||||
def _install_requirements(working_directory, env):
|
||||
file_path = Path(working_directory) / "requirements.txt"
|
||||
if not file_path.exists():
|
||||
return
|
||||
if file_path.stat().st_size == 0:
|
||||
return
|
||||
install_command = ["python", "-m", "pip", "install", "-r", "requirements.txt"]
|
||||
logger.info(" ".join(install_command))
|
||||
RunCode._install_via_subprocess(install_command, check=True, cwd=working_directory, env=env)
|
||||
|
||||
@staticmethod
|
||||
def _install_pytest(working_directory, env):
|
||||
install_pytest_command = ["python", "-m", "pip", "install", "pytest"]
|
||||
logger.info(" ".join(install_pytest_command))
|
||||
RunCode._install_via_subprocess(install_pytest_command, check=True, cwd=working_directory, env=env)
|
||||
|
||||
@staticmethod
|
||||
def _install_dependencies(working_directory, env):
|
||||
RunCode._install_requirements(working_directory, env)
|
||||
RunCode._install_pytest(working_directory, env)
|
||||
|
|
|
|||
|
|
@ -57,6 +57,8 @@ class QaEngineer(Role):
|
|||
code_doc = await src_file_repo.get(filename)
|
||||
if not code_doc:
|
||||
continue
|
||||
if not code_doc.filename.endswith(".py"):
|
||||
continue
|
||||
test_doc = await self.project_repo.tests.get("test_" + code_doc.filename)
|
||||
if not test_doc:
|
||||
test_doc = Document(
|
||||
|
|
|
|||
|
|
@ -4,10 +4,11 @@
|
|||
@Time : 2023/5/18 00:40
|
||||
@Author : alexanderwu
|
||||
@File : token_counter.py
|
||||
ref1: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
ref2: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/llm/token_counter.py
|
||||
ref3: https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/openai.py
|
||||
ref4: https://ai.google.dev/models/gemini
|
||||
ref1: https://openai.com/pricing
|
||||
ref2: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
|
||||
ref3: https://github.com/Significant-Gravitas/Auto-GPT/blob/master/autogpt/llm/token_counter.py
|
||||
ref4: https://github.com/hwchase17/langchain/blob/master/langchain/chat_models/openai.py
|
||||
ref5: https://ai.google.dev/models/gemini
|
||||
"""
|
||||
import tiktoken
|
||||
|
||||
|
|
@ -25,7 +26,10 @@ TOKEN_COSTS = {
|
|||
"gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
|
||||
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
|
||||
"gpt-4-0613": {"prompt": 0.06, "completion": 0.12},
|
||||
"gpt-4-turbo-preview": {"prompt": 0.01, "completion": 0.03},
|
||||
"gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03},
|
||||
"gpt-4-1106-preview": {"prompt": 0.01, "completion": 0.03},
|
||||
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
|
||||
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
|
||||
"glm-3-turbo": {"prompt": 0.0, "completion": 0.0007}, # 128k version, prompt + completion tokens=0.005¥/k-tokens
|
||||
"glm-4": {"prompt": 0.0, "completion": 0.014}, # 128k version, prompt + completion tokens=0.1¥/k-tokens
|
||||
|
|
@ -47,7 +51,10 @@ TOKEN_MAX = {
|
|||
"gpt-4-32k": 32768,
|
||||
"gpt-4-32k-0314": 32768,
|
||||
"gpt-4-0613": 8192,
|
||||
"gpt-4-turbo-preview": 128000,
|
||||
"gpt-4-0125-preview": 128000,
|
||||
"gpt-4-1106-preview": 128000,
|
||||
"gpt-4-1106-vision-preview": 128000,
|
||||
"text-embedding-ada-002": 8192,
|
||||
"chatglm_turbo": 32768,
|
||||
"gemini-pro": 32768,
|
||||
|
|
@ -72,7 +79,10 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0613"):
|
|||
"gpt-4-32k-0314",
|
||||
"gpt-4-0613",
|
||||
"gpt-4-32k-0613",
|
||||
"gpt-4-turbo-preview",
|
||||
"gpt-4-0125-preview",
|
||||
"gpt-4-1106-preview",
|
||||
"gpt-4-1106-vision-preview",
|
||||
}:
|
||||
tokens_per_message = 3 # # every reply is primed with <|start|>assistant<|message|>
|
||||
tokens_per_name = 1
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue