allow human to play any roles

This commit is contained in:
garylin2099 2023-11-09 01:41:24 +08:00
parent c6350efd7f
commit 0bf2ce707e
3 changed files with 46 additions and 5 deletions

View file

@ -8,6 +8,7 @@
from metagpt.provider.anthropic_api import Claude2 as Claude
from metagpt.provider.openai_api import OpenAIGPTAPI as LLM
from metagpt.provider.human_gpt import HumanGPT
DEFAULT_LLM = LLM()
CLAUDE_LLM = Claude()

View file

@ -0,0 +1,35 @@
'''
Filename: MetaGPT/metagpt/provider/human_speaker.py
Created Date: Wednesday, November 8th 2023, 11:55:46 pm
Author: garylin2099
'''
from typing import Optional
from metagpt.provider.base_gpt_api import BaseGPTAPI
from metagpt.logs import logger
class HumanGPT(BaseGPTAPI):
"""A dummy GPT that actually takes in human input as its response.
This enables replacing LLM anywhere in the framework with a human, thus introducing human interaction
"""
def ask(self, msg: str) -> str:
logger.info("It's your turn, please type in your response. You may also refer to the context below")
rsp = input(msg)
if rsp in ["exit", "quit"]:
exit()
return rsp
async def aask(self, msg: str, system_msgs: Optional[list[str]] = None) -> str:
return self.ask(msg)
def completion(self, messages: list[dict]):
"""dummy implementation of abstract method in base"""
return []
async def acompletion(self, messages: list[dict]):
"""dummy implementation of abstract method in base"""
return []
async def acompletion_text(self, messages: list[dict], stream=False) -> str:
"""dummy implementation of abstract method in base"""
return []

View file

@ -15,7 +15,7 @@ from pydantic import BaseModel, Field
# from metagpt.environment import Environment
from metagpt.config import CONFIG
from metagpt.actions import Action, ActionOutput
from metagpt.llm import LLM
from metagpt.llm import LLM, HumanGPT
from metagpt.logs import logger
from metagpt.memory import Memory, LongTermMemory
from metagpt.schema import Message
@ -65,6 +65,7 @@ class RoleSetting(BaseModel):
goal: str
constraints: str
desc: str
is_human: bool
def __str__(self):
return f"{self.name}({self.profile})"
@ -106,9 +107,10 @@ class RoleContext(BaseModel):
class Role:
"""Role/Agent"""
def __init__(self, name="", profile="", goal="", constraints="", desc=""):
self._llm = LLM()
self._setting = RoleSetting(name=name, profile=profile, goal=goal, constraints=constraints, desc=desc)
def __init__(self, name="", profile="", goal="", constraints="", desc="", is_human=False):
self._llm = LLM() if not is_human else HumanGPT()
self._setting = RoleSetting(name=name, profile=profile, goal=goal,
constraints=constraints, desc=desc, is_human=is_human)
self._states = []
self._actions = []
self._role_id = str(self._setting)
@ -122,8 +124,11 @@ class Role:
self._reset()
for idx, action in enumerate(actions):
if not isinstance(action, Action):
i = action("")
i = action("", llm=self._llm)
else:
if self._setting.is_human and not isinstance(action.llm, HumanGPT):
logger.warning(f"is_human attribute does not take effect,"
f"as Role's {str(action)} was initialized using LLM, try passing in Action classes instead of initialized instances")
i = action
i.set_prefix(self._get_prefix(), self.profile)
self._actions.append(i)