diff --git a/config/config2.example.yaml b/config/config2.example.yaml index c5454ec32..c65a357b7 100644 --- a/config/config2.example.yaml +++ b/config/config2.example.yaml @@ -13,6 +13,38 @@ llm: # - gpt-4 8k: "gpt-4" # See for more: https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/ +# Role's custom configuration +# Priority: Role's configuration > Global configuration +roles: + - role: "ProductManager" # role's className or role's role_id + llm: + api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options + base_url: "YOUR_BASE_URL" + api_key: "YOUR_API_KEY" + proxy: "YOUR_PROXY" # for LLM API requests + model: "gpt-4-turbo-1106" + - role: "Architect" + llm: + api_type: "openai" # or azure / ollama / open_llm etc. Check LLMType for more options + base_url: "YOUR_BASE_URL" + api_key: "YOUR_API_KEY" + proxy: "YOUR_PROXY" # for LLM API requests + model: "gpt-35-turbo" + - role: "ProjectManager" + llm: + api_type: "azure" + base_url: "YOUR_BASE_URL" + api_key: "YOUR_API_KEY" + api_version: "YOUR_API_VERSION" + model: "gpt-4-1106" + - role: "Engineer" + llm: + api_type: "azure" + base_url: "YOUR_BASE_URL" + api_key: "YOUR_API_KEY" + api_version: "YOUR_API_VERSION" + model: "gpt-35-turbo-1106" + repair_llm_output: true # when the output is not a valid json, try to repair it proxy: "YOUR_PROXY" # for tools like requests, playwright, selenium, etc. diff --git a/metagpt/config2.py b/metagpt/config2.py index cf5ed0da1..021031df7 100644 --- a/metagpt/config2.py +++ b/metagpt/config2.py @@ -15,6 +15,7 @@ from metagpt.configs.browser_config import BrowserConfig from metagpt.configs.llm_config import LLMConfig, LLMType from metagpt.configs.mermaid_config import MermaidConfig from metagpt.configs.redis_config import RedisConfig +from metagpt.configs.role_custom_config import RoleCustomConfig from metagpt.configs.s3_config import S3Config from metagpt.configs.search_config import SearchConfig from metagpt.configs.workspace_config import WorkspaceConfig @@ -76,6 +77,9 @@ class Config(CLIParams, YamlModel): azure_tts_subscription_key: str = "" azure_tts_region: str = "" + # Role's custom configuration, Priority: Role's configuration > Global configuration + roles: Optional[List[RoleCustomConfig]] = None + @classmethod def from_home(cls, path): """Load config from ~/.metagpt/config2.yaml""" diff --git a/metagpt/configs/role_custom_config.py b/metagpt/configs/role_custom_config.py new file mode 100644 index 000000000..414c2a793 --- /dev/null +++ b/metagpt/configs/role_custom_config.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +@Time : 2024/4/22 16:33 +@Author : Justin +@File : role_custom_config.py +""" +from metagpt.configs.llm_config import LLMConfig +from metagpt.utils.yaml_model import YamlModel + + +class RoleCustomConfig(YamlModel): + """custom config for roles + role: role's className or role's role_id + To be expanded + """ + role: str = "" + llm: LLMConfig + diff --git a/metagpt/roles/role.py b/metagpt/roles/role.py index e0f8a7ea6..a5256b201 100644 --- a/metagpt/roles/role.py +++ b/metagpt/roles/role.py @@ -31,6 +31,7 @@ from metagpt.actions import Action, ActionOutput from metagpt.actions.action_node import ActionNode from metagpt.actions.add_requirement import UserRequirement from metagpt.context_mixin import ContextMixin +from metagpt.llm import LLM from metagpt.logs import logger from metagpt.memory import Memory from metagpt.provider import HumanProvider @@ -43,7 +44,6 @@ from metagpt.utils.repair_llm_raw_output import extract_state_value_from_output if TYPE_CHECKING: from metagpt.environment import Environment # noqa: F401 - PREFIX_TEMPLATE = """You are a {profile}, named {name}, your goal is {goal}. """ CONSTRAINT_TEMPLATE = "the constraint is {constraints}. " @@ -167,6 +167,12 @@ class Role(SerializationMixin, ContextMixin, BaseModel): if self.is_human: self.llm = HumanProvider(None) + if self.context.config.roles: + _context, _in = self.get_custom_role_config() + if _in: + self.context = _context + self.llm = LLM(llm_config=self.context.config.llm) + self._check_actions() self.llm.system_prompt = self._get_prefix() self.llm.cost_manager = self.context.cost_manager @@ -240,6 +246,29 @@ class Role(SerializationMixin, ContextMixin, BaseModel): def _setting(self): return f"{self.name}({self.profile})" + def get_custom_role_config(self): + """ + check and get current role's custom config + 1.Check if the custom configurations exist + 2.Check if the current role exist in custom configurations + 3.Update corresponding configurations item by item based on custom configurations + :return: + _context:replaced the corresponding configuration context with custom config + _in:current role has custom config + """ + _context = self.context + _in = False + if self and self.context.config.roles: + for role_config in self.context.config.roles: + if (not bool(self.role_id) and self.role_id == role_config.role) or type( + self).__name__ == role_config.role: + _in = True + for key, _ in role_config.model_fields.items(): + if key not in ["role", "extra_fields"]: + setattr(_context.config, key, getattr(role_config, key)) + break + return _context, _in + def _check_actions(self): """Check actions and set llm and prefix for each action.""" self.set_actions(self.actions)