Merge pull request #1217 from geekan/update_model_version

update openai model version
This commit is contained in:
Alexander Wu 2024-04-22 21:25:24 +08:00 committed by GitHub
commit 7bfe2b51d6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
12 changed files with 28 additions and 36 deletions

View file

@ -85,8 +85,8 @@ # Check https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
```yaml
llm:
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
model: "gpt-4-turbo" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
api_type: "openai" # or azure / ollama / groq etc. Check LLMType for more options
model: "gpt-4-turbo" # or gpt-3.5-turbo
base_url: "https://api.openai.com/v1" # or forward url / other llm url
api_key: "YOUR_API_KEY"
```

View file

@ -1,17 +1,13 @@
llm:
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
api_type: "openai" # or azure / ollama / groq etc.
base_url: "YOUR_BASE_URL"
api_key: "YOUR_API_KEY"
model: "gpt-4-turbo-preview" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
model: "gpt-4-turbo" # or gpt-3.5-turbo
proxy: "YOUR_PROXY" # for LLM API requests
# timeout: 600 # Optional. If set to 0, default value is 300.
pricing_plan: "" # Optional. If invalid, it will be automatically filled in with the value of the `model`.
# Azure-exclusive pricing plan mappings
# - gpt-3.5-turbo 4k: "gpt-3.5-turbo-1106"
# - gpt-4-turbo: "gpt-4-turbo-preview"
# - gpt-4-turbo-vision: "gpt-4-vision-preview"
# - gpt-4 8k: "gpt-4"
# See for more: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's
# RAG Embedding.
# For backward compatibility, if the embedding is not set and the llm's api_type is either openai or azure, the llm's config will be used.

View file

@ -2,7 +2,7 @@
# Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
# Config Docs: https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
llm:
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
model: "gpt-4-turbo" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
api_type: "openai" # or azure / ollama / groq etc.
model: "gpt-4-turbo" # or gpt-3.5-turbo
base_url: "https://api.openai.com/v1" # or forward url / other llm url
api_key: "YOUR_API_KEY"

View file

@ -1,5 +0,0 @@
llm:
api_type: openrouter
base_url: "https://openrouter.ai/api/v1"
api_key: "YOUR_API_KEY"
model: microsoft/wizardlm-2-8x22b

View file

@ -38,9 +38,9 @@ ### Chief Evangelist (Monthly Rotation)
### FAQ
1. Code truncation/ Parsing failure:
1. Check if it's due to exceeding length. Consider using the gpt-4-turbo-preview or other long token versions.
1. Check if it's due to exceeding length. Consider using the gpt-4-turbo or other long token versions.
2. Success rate:
1. There hasn't been a quantitative analysis yet, but the success rate of code generated by gpt-4-turbo-preview is significantly higher than that of gpt-3.5-turbo.
1. There hasn't been a quantitative analysis yet, but the success rate of code generated by gpt-4-turbo is significantly higher than that of gpt-3.5-turbo.
3. Support for incremental, differential updates (if you wish to continue a half-done task):
1. There is now an experimental version. Specify `--inc --project-path "<path>"` or `--inc --project-name "<name>"` on the command line and enter the corresponding requirements to try it.
4. Can existing code be loaded?

View file

@ -14,9 +14,9 @@ from metagpt.roles import Role
from metagpt.team import Team
gpt35 = Config.default()
gpt35.llm.model = "gpt-3.5-turbo-1106"
gpt35.llm.model = "gpt-3.5-turbo"
gpt4 = Config.default()
gpt4.llm.model = "gpt-4-1106-preview"
gpt4.llm.model = "gpt-4-turbo"
action1 = Action(config=gpt4, name="AlexSay", instruction="Express your opinion with emotion and don't repeat it")
action2 = Action(config=gpt35, name="BobSay", instruction="Express your opinion with emotion and don't repeat it")
alex = Role(name="Alex", profile="Democratic candidate", goal="Win the election", actions=[action1], watch=[action2])

View file

@ -123,9 +123,10 @@ def startup(
DEFAULT_CONFIG = """# Full Example: https://github.com/geekan/MetaGPT/blob/main/config/config2.example.yaml
# Reflected Code: https://github.com/geekan/MetaGPT/blob/main/metagpt/config2.py
# Config Docs: https://docs.deepwisdom.ai/main/en/guide/get_started/configuration.html
llm:
api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options
model: "gpt-4-turbo" # or gpt-3.5-turbo-1106 / gpt-4-1106-preview
api_type: "openai" # or azure / ollama / groq etc.
model: "gpt-4-turbo" # or gpt-3.5-turbo
base_url: "https://api.openai.com/v1" # or forward url / other llm url
api_key: "YOUR_API_KEY"
"""

View file

@ -32,9 +32,9 @@ TOKEN_COSTS = {
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
"gpt-4-0613": {"prompt": 0.06, "completion": 0.12},
"gpt-4-turbo-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-turbo": {"prompt": 0.01, "completion": 0.03},
"gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-1106-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-0125-preview": {"prompt": 0.01, "completion": 0.03},
"gpt-4-turbo": {"prompt": 0.01, "completion": 0.03},
"gpt-4-vision-preview": {"prompt": 0.01, "completion": 0.03}, # TODO add extra image price calculator
"gpt-4-1106-vision-preview": {"prompt": 0.01, "completion": 0.03},
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
@ -157,8 +157,8 @@ FIREWORKS_GRADE_TOKEN_COSTS = {
TOKEN_MAX = {
"gpt-4-0125-preview": 128000,
"gpt-4-turbo-preview": 128000,
"gpt-4-turbo": 128000,
"gpt-4-1106-preview": 128000,
"gpt-4-turbo": 128000,
"gpt-4-vision-preview": 128000,
"gpt-4-1106-vision-preview": 128000,
"gpt-4": 8192,
@ -221,7 +221,7 @@ def count_message_tokens(messages, model="gpt-3.5-turbo-0125"):
"gpt-4-turbo",
"gpt-4-turbo-preview",
"gpt-4-0125-preview",
"gpt-4-1106-preview",
"gpt-4-turbo",
"gpt-4-vision-preview",
"gpt-4-1106-vision-preview",
}:

View file

@ -1,7 +1,7 @@
llm:
base_url: "https://api.openai.com/v1"
api_key: "sk-xxx"
model: "gpt-3.5-turbo-1106"
model: "gpt-3.5-turbo"
search:
api_type: "serpapi"

View file

@ -105,11 +105,11 @@ def test_config_mixin_4_multi_inheritance_override_config():
async def test_config_priority():
"""If action's config is set, then its llm will be set, otherwise, it will use the role's llm"""
home_dir = Path.home() / CONFIG_ROOT
gpt4t = Config.from_home("gpt-4-1106-preview.yaml")
gpt4t = Config.from_home("gpt-4-turbo.yaml")
if not home_dir.exists():
assert gpt4t is None
gpt35 = Config.default()
gpt35.llm.model = "gpt-3.5-turbo-1106"
gpt35.llm.model = "gpt-4-turbo"
gpt4 = Config.default()
gpt4.llm.model = "gpt-4-0613"
@ -127,8 +127,8 @@ async def test_config_priority():
env = Environment(desc="US election live broadcast")
Team(investment=10.0, env=env, roles=[A, B, C])
assert a1.llm.model == "gpt-4-1106-preview" if Path(home_dir / "gpt-4-1106-preview.yaml").exists() else "gpt-4-0613"
assert a1.llm.model == "gpt-4-turbo" if Path(home_dir / "gpt-4-turbo.yaml").exists() else "gpt-4-0613"
assert a2.llm.model == "gpt-4-0613"
assert a3.llm.model == "gpt-3.5-turbo-1106"
assert a3.llm.model == "gpt-4-turbo"
# history = await team.run(idea="Topic: climate change. Under 80 words per message.", send_to="a1", n_round=3)

View file

@ -56,7 +56,7 @@ class TestUTWriter:
)
],
created=1706710532,
model="gpt-3.5-turbo-1106",
model="gpt-4-turbo",
object="chat.completion",
system_fingerprint="fp_04f9a1eebf",
usage=CompletionUsage(completion_tokens=35, prompt_tokens=1982, total_tokens=2017),

View file

@ -12,11 +12,11 @@ from metagpt.utils.cost_manager import CostManager
def test_cost_manager():
cm = CostManager(total_budget=20)
cm.update_cost(prompt_tokens=1000, completion_tokens=100, model="gpt-4-1106-preview")
cm.update_cost(prompt_tokens=1000, completion_tokens=100, model="gpt-4-turbo")
assert cm.get_total_prompt_tokens() == 1000
assert cm.get_total_completion_tokens() == 100
assert cm.get_total_cost() == 0.013
cm.update_cost(prompt_tokens=100, completion_tokens=10, model="gpt-4-1106-preview")
cm.update_cost(prompt_tokens=100, completion_tokens=10, model="gpt-4-turbo")
assert cm.get_total_prompt_tokens() == 1100
assert cm.get_total_completion_tokens() == 110
assert cm.get_total_cost() == 0.0143