FIX multiLLM bug

This commit is contained in:
zhaowg3 2024-11-01 10:41:21 +08:00
parent 0f27029b2c
commit 6d1ab98c41
4 changed files with 81 additions and 2 deletions

View file

@ -49,7 +49,6 @@ class Action(SerializationMixin, ContextMixin, BaseModel):
llm = create_llm_instance(config)
llm.cost_manager = data.llm.cost_manager
data.llm = llm
data.config = config # if not set self.config, self.llm will be reset when you call Role.set_actions function
return data
@property

View file

@ -247,7 +247,7 @@ class Role(SerializationMixin, ContextMixin, BaseModel):
return self
def _init_action(self, action: Action):
if not action.private_config:
if not action.private_llm:
action.set_llm(self.llm, override=True)
else:
action.set_llm(self.llm, override=False)

View file

@ -0,0 +1,27 @@
llm:
api_type: "openai" # or azure / ollama / groq etc.
base_url: "YOUR_gpt-3.5-turbo_BASE_URL"
api_key: "YOUR_gpt-3.5-turbo_API_KEY"
model: "gpt-3.5-turbo" # or gpt-3.5-turbo
# proxy: "YOUR_gpt-3.5-turbo_PROXY" # for LLM API requests
# timeout: 600 # Optional. If set to 0, default value is 300.
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's
models:
"YOUR_MODEL_NAME_1": # model: "gpt-4-turbo" # or gpt-3.5-turbo
api_type: "openai" # or azure / ollama / groq etc.
base_url: "YOUR_MODEL_1_BASE_URL"
api_key: "YOUR_MODEL_1_API_KEY"
# proxy: "YOUR_MODEL_1_PROXY" # for LLM API requests
# timeout: 600 # Optional. If set to 0, default value is 300.
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's
"YOUR_MODEL_NAME_2": # model: "gpt-4-turbo" # or gpt-3.5-turbo
api_type: "openai" # or azure / ollama / groq etc.
base_url: "YOUR_MODEL_2_BASE_URL"
api_key: "YOUR_MODEL_2_API_KEY"
proxy: "YOUR_MODEL_2_PROXY" # for LLM API requests
# timeout: 600 # Optional. If set to 0, default value is 300.
# Details: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/
pricing_plan: "" # Optional. Use for Azure LLM when its model name is not the same as OpenAI's

View file

@ -0,0 +1,53 @@
import pytest
from metagpt.config2 import Config
from metagpt.roles.role import Role, RoleReactMode
from metagpt.actions.action import Action
from metagpt.context import Context
from metagpt.provider.llm_provider_registry import create_llm_instance
from metagpt.const import TEST_DATA_PATH
def test_set_llm():
config1 = Config.default()
config2 = Config.default()
config2.llm.model = "gpt-3.5-turbo"
context = Context(config=config1)
act = Action(context=context)
assert act.config.llm.model == config1.llm.model
llm2 = create_llm_instance(config2.llm)
act.llm = llm2
assert act.llm.model == llm2.model
role = Role(context=context)
role.set_actions([act])
assert act.llm.model == llm2.model
role1 = Role(context=context)
act1 = Action(context=context)
assert act1.config.llm.model == config1.llm.model
act1.config = config2
role1.set_actions([act1])
assert act1.llm.model == llm2.model
# multiple LLM
config3_path = TEST_DATA_PATH / "config/config2_multi_llm.yaml"
dict3 = Config.read_yaml(config3_path)
config3 = Config(**dict3)
context3 = Context(config=config3)
role3 = Role(context=context3)
act3 = Action(context=context3, llm_name_or_type="YOUR_MODEL_NAME_1")
assert act3.config.llm.model == "gpt-3.5-turbo"
assert act3.llm.model == "gpt-4-turbo"
role3.set_actions([act3])
assert act3.config.llm.model == "gpt-3.5-turbo"
assert act3.llm.model == "gpt-4-turbo"