mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-05-04 05:12:37 +02:00
tuning code
This commit is contained in:
parent
6d906ce1ed
commit
e03db313a2
9 changed files with 40 additions and 39 deletions
|
|
@ -34,8 +34,8 @@ from metagpt.utils.common import CodeParser, decode_image, log_and_reraise
|
|||
from metagpt.utils.cost_manager import CostManager
|
||||
from metagpt.utils.exceptions import handle_exception
|
||||
from metagpt.utils.token_counter import (
|
||||
count_message_tokens,
|
||||
count_string_tokens,
|
||||
count_input_tokens,
|
||||
count_output_tokens,
|
||||
get_max_completion_tokens,
|
||||
get_openrouter_tokens,
|
||||
)
|
||||
|
|
@ -252,8 +252,8 @@ class OpenAILLM(BaseLLM):
|
|||
return usage
|
||||
|
||||
try:
|
||||
usage.prompt_tokens = count_message_tokens(messages, self.pricing_plan)
|
||||
usage.completion_tokens = count_string_tokens(rsp, self.pricing_plan)
|
||||
usage.prompt_tokens = count_input_tokens(messages, self.pricing_plan)
|
||||
usage.completion_tokens = count_output_tokens(rsp, self.pricing_plan)
|
||||
except Exception as e:
|
||||
logger.warning(f"usage calculation failed: {e}")
|
||||
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ def llm_output_postprocess(
|
|||
"""
|
||||
default use BasePostProcessPlugin if there is not matched plugin.
|
||||
"""
|
||||
# TODO choose different model's plugin according to the model_name
|
||||
# TODO choose different model's plugin according to the model
|
||||
postprocess_plugin = BasePostProcessPlugin()
|
||||
|
||||
result = postprocess_plugin.run(output=output, schema=schema, req_key=req_key)
|
||||
|
|
|
|||
|
|
@ -89,9 +89,9 @@ class RAGEmbeddingFactory(GenericFactory):
|
|||
return OllamaEmbedding(**params)
|
||||
|
||||
def _try_set_model_and_batch_size(self, params: dict):
|
||||
"""Set the model_name and embed_batch_size only when they are specified."""
|
||||
"""Set the model and embed_batch_size only when they are specified."""
|
||||
if config.embedding.model:
|
||||
params["model_name"] = config.embedding.model
|
||||
params["model"] = config.embedding.model
|
||||
|
||||
if config.embedding.embed_batch_size:
|
||||
params["embed_batch_size"] = config.embedding.embed_batch_size
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ from metagpt.utils.read_document import read_docx
|
|||
from metagpt.utils.singleton import Singleton
|
||||
from metagpt.utils.token_counter import (
|
||||
TOKEN_COSTS,
|
||||
count_message_tokens,
|
||||
count_string_tokens,
|
||||
count_input_tokens,
|
||||
count_output_tokens,
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -19,6 +19,6 @@ __all__ = [
|
|||
"read_docx",
|
||||
"Singleton",
|
||||
"TOKEN_COSTS",
|
||||
"count_message_tokens",
|
||||
"count_string_tokens",
|
||||
"count_input_tokens",
|
||||
"count_output_tokens",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from typing import Generator, Sequence
|
||||
|
||||
from metagpt.utils.token_counter import TOKEN_MAX, count_string_tokens
|
||||
from metagpt.utils.token_counter import TOKEN_MAX, count_output_tokens
|
||||
|
||||
|
||||
def reduce_message_length(
|
||||
|
|
@ -23,9 +23,9 @@ def reduce_message_length(
|
|||
Raises:
|
||||
RuntimeError: If it fails to reduce the concatenated message length.
|
||||
"""
|
||||
max_token = TOKEN_MAX.get(model_name, 2048) - count_string_tokens(system_text, model_name) - reserved
|
||||
max_token = TOKEN_MAX.get(model_name, 2048) - count_output_tokens(system_text, model_name) - reserved
|
||||
for msg in msgs:
|
||||
if count_string_tokens(msg, model_name) < max_token or model_name not in TOKEN_MAX:
|
||||
if count_output_tokens(msg, model_name) < max_token or model_name not in TOKEN_MAX:
|
||||
return msg
|
||||
|
||||
raise RuntimeError("fail to reduce message length")
|
||||
|
|
@ -54,13 +54,13 @@ def generate_prompt_chunk(
|
|||
current_token = 0
|
||||
current_lines = []
|
||||
|
||||
reserved = reserved + count_string_tokens(prompt_template + system_text, model_name)
|
||||
reserved = reserved + count_output_tokens(prompt_template + system_text, model_name)
|
||||
# 100 is a magic number to ensure the maximum context length is not exceeded
|
||||
max_token = TOKEN_MAX.get(model_name, 2048) - reserved - 100
|
||||
|
||||
while paragraphs:
|
||||
paragraph = paragraphs.pop(0)
|
||||
token = count_string_tokens(paragraph, model_name)
|
||||
token = count_output_tokens(paragraph, model_name)
|
||||
if current_token + token <= max_token:
|
||||
current_lines.append(paragraph)
|
||||
current_token += token
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ import tiktoken
|
|||
from openai.types import CompletionUsage
|
||||
from openai.types.chat import ChatCompletionChunk
|
||||
|
||||
from metagpt.logs import logger
|
||||
from metagpt.utils.ahttp_client import apost
|
||||
|
||||
TOKEN_COSTS = {
|
||||
|
|
@ -258,12 +259,12 @@ BEDROCK_TOKEN_COSTS = {
|
|||
}
|
||||
|
||||
|
||||
def count_message_tokens(messages, model="gpt-3.5-turbo-0125"):
|
||||
def count_input_tokens(messages, model="gpt-3.5-turbo-0125"):
|
||||
"""Return the number of tokens used by a list of messages."""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
logger.info(f"Warning: model {model} not found in tiktoken. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model in {
|
||||
"gpt-3.5-turbo-0613",
|
||||
|
|
@ -292,11 +293,11 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0125"):
|
|||
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif "gpt-3.5-turbo" == model:
|
||||
print("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0125.")
|
||||
return count_message_tokens(messages, model="gpt-3.5-turbo-0125")
|
||||
logger.info("Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0125.")
|
||||
return count_input_tokens(messages, model="gpt-3.5-turbo-0125")
|
||||
elif "gpt-4" == model:
|
||||
print("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||
return count_message_tokens(messages, model="gpt-4-0613")
|
||||
logger.info("Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613.")
|
||||
return count_input_tokens(messages, model="gpt-4-0613")
|
||||
elif "open-llm-model" == model:
|
||||
"""
|
||||
For self-hosted open_llm api, they include lots of different models. The message tokens calculation is
|
||||
|
|
@ -327,21 +328,21 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0125"):
|
|||
return num_tokens
|
||||
|
||||
|
||||
def count_string_tokens(string: str, model_name: str) -> int:
|
||||
def count_output_tokens(string: str, model: str) -> int:
|
||||
"""
|
||||
Returns the number of tokens in a text string.
|
||||
|
||||
Args:
|
||||
string (str): The text string.
|
||||
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
|
||||
model (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
|
||||
|
||||
Returns:
|
||||
int: The number of tokens in the text string.
|
||||
"""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model_name)
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
print("Warning: model not found. Using cl100k_base encoding.")
|
||||
logger.info(f"Warning: model {model} not found in tiktoken. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
return len(encoding.encode(string))
|
||||
|
||||
|
|
@ -358,7 +359,7 @@ def get_max_completion_tokens(messages: list[dict], model: str, default: int) ->
|
|||
"""
|
||||
if model not in TOKEN_MAX:
|
||||
return default
|
||||
return TOKEN_MAX[model] - count_message_tokens(messages) - 1
|
||||
return TOKEN_MAX[model] - count_input_tokens(messages) - 1
|
||||
|
||||
|
||||
async def get_openrouter_tokens(chunk: ChatCompletionChunk) -> CompletionUsage:
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ class TestRAGEmbeddingFactory:
|
|||
|
||||
@pytest.mark.parametrize(
|
||||
"model, embed_batch_size, expected_params",
|
||||
[("test_model", 100, {"model_name": "test_model", "embed_batch_size": 100}), (None, None, {})],
|
||||
[("test_model", 100, {"model": "test_model", "embed_batch_size": 100}), (None, None, {})],
|
||||
)
|
||||
def test_try_set_model_and_batch_size(self, mock_config, model, embed_batch_size, expected_params):
|
||||
# Mock
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ def _paragraphs(n):
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"msgs, model_name, system_text, reserved, expected",
|
||||
"msgs, model, system_text, reserved, expected",
|
||||
[
|
||||
(_msgs(), "gpt-3.5-turbo-0613", "System", 1500, 1),
|
||||
(_msgs(), "gpt-3.5-turbo-16k", "System", 3000, 6),
|
||||
|
|
@ -37,7 +37,7 @@ def test_reduce_message_length(msgs, model_name, system_text, reserved, expected
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"text, prompt_template, model_name, system_text, reserved, expected",
|
||||
"text, prompt_template, model, system_text, reserved, expected",
|
||||
[
|
||||
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo-0613", "System", 1500, 2),
|
||||
(" ".join("Hello World." for _ in range(1000)), "Prompt: {}", "gpt-3.5-turbo-16k", "System", 3000, 1),
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
"""
|
||||
import pytest
|
||||
|
||||
from metagpt.utils.token_counter import count_message_tokens, count_string_tokens
|
||||
from metagpt.utils.token_counter import count_input_tokens, count_output_tokens
|
||||
|
||||
|
||||
def test_count_message_tokens():
|
||||
|
|
@ -15,7 +15,7 @@ def test_count_message_tokens():
|
|||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
]
|
||||
assert count_message_tokens(messages) == 15
|
||||
assert count_input_tokens(messages) == 15
|
||||
|
||||
|
||||
def test_count_message_tokens_with_name():
|
||||
|
|
@ -23,12 +23,12 @@ def test_count_message_tokens_with_name():
|
|||
{"role": "user", "content": "Hello", "name": "John"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
]
|
||||
assert count_message_tokens(messages) == 17
|
||||
assert count_input_tokens(messages) == 17
|
||||
|
||||
|
||||
def test_count_message_tokens_empty_input():
|
||||
"""Empty input should return 3 tokens"""
|
||||
assert count_message_tokens([]) == 3
|
||||
assert count_input_tokens([]) == 3
|
||||
|
||||
|
||||
def test_count_message_tokens_invalid_model():
|
||||
|
|
@ -38,7 +38,7 @@ def test_count_message_tokens_invalid_model():
|
|||
{"role": "assistant", "content": "Hi there!"},
|
||||
]
|
||||
with pytest.raises(NotImplementedError):
|
||||
count_message_tokens(messages, model="invalid_model")
|
||||
count_input_tokens(messages, model="invalid_model")
|
||||
|
||||
|
||||
def test_count_message_tokens_gpt_4():
|
||||
|
|
@ -46,27 +46,27 @@ def test_count_message_tokens_gpt_4():
|
|||
{"role": "user", "content": "Hello"},
|
||||
{"role": "assistant", "content": "Hi there!"},
|
||||
]
|
||||
assert count_message_tokens(messages, model="gpt-4-0314") == 15
|
||||
assert count_input_tokens(messages, model="gpt-4-0314") == 15
|
||||
|
||||
|
||||
def test_count_string_tokens():
|
||||
"""Test that the string tokens are counted correctly."""
|
||||
|
||||
string = "Hello, world!"
|
||||
assert count_string_tokens(string, model_name="gpt-3.5-turbo-0301") == 4
|
||||
assert count_output_tokens(string, model="gpt-3.5-turbo-0301") == 4
|
||||
|
||||
|
||||
def test_count_string_tokens_empty_input():
|
||||
"""Test that the string tokens are counted correctly."""
|
||||
|
||||
assert count_string_tokens("", model_name="gpt-3.5-turbo-0301") == 0
|
||||
assert count_output_tokens("", model="gpt-3.5-turbo-0301") == 0
|
||||
|
||||
|
||||
def test_count_string_tokens_gpt_4():
|
||||
"""Test that the string tokens are counted correctly."""
|
||||
|
||||
string = "Hello, world!"
|
||||
assert count_string_tokens(string, model_name="gpt-4-0314") == 4
|
||||
assert count_output_tokens(string, model="gpt-4-0314") == 4
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue