Merge branch 'main' into feature/talk_prompt

This commit is contained in:
莘权 马 2023-09-15 17:44:58 +08:00
commit dda1745c79
12 changed files with 355 additions and 260 deletions

View file

@ -5,6 +5,7 @@
@Author : alexanderwu
@File : action.py
@Modified By: mashenquan, 2023/8/20. Add function return annotations.
@Modified By: mashenquan, 2023/9/8. Replace LLM with LLMFactory
"""
from __future__ import annotations
@ -14,16 +15,18 @@ from typing import Optional
from tenacity import retry, stop_after_attempt, wait_fixed
from metagpt.actions.action_output import ActionOutput
from metagpt.llm import LLM
from metagpt.logs import logger
from metagpt.provider.base_gpt_api import BaseGPTAPI
from metagpt.utils.common import OutputParser
class Action(ABC):
def __init__(self, name: str = "", context=None, llm: LLM = None):
def __init__(self, name: str = "", context=None, llm: BaseGPTAPI = None):
self.name: str = name
if llm is None:
llm = LLM()
from metagpt.llm import LLMFactory
llm = LLMFactory.new_llm()
self.llm = llm
self.context = context
self.prefix = ""

View file

@ -6,10 +6,12 @@
@File : talk_action.py
@Desc : Act as its a talk
"""
import json
from metagpt.actions import Action, ActionOutput
from metagpt.config import CONFIG
from metagpt.const import DEFAULT_LANGUAGE
from metagpt.llm import LLMType
from metagpt.logs import logger
@ -62,13 +64,40 @@ class TalkAction(Action):
logger.info(f"PROMPT: {prompt}")
return prompt
async def run(self, *args, **kwargs) -> ActionOutput:
async def run_old(self, *args, **kwargs) -> ActionOutput:
prompt = self.prompt
rsp = await self.llm.aask(msg=prompt, system_msgs=[])
logger.debug(f"PROMPT:{prompt}\nRESULT:{rsp}\n")
self._rsp = ActionOutput(content=rsp)
return self._rsp
@property
def aask_args(self):
language = CONFIG.language or DEFAULT_LANGUAGE
system_msgs = [
f"You are {CONFIG.agent_description}.",
"Your responses should align with the role-play agreement, "
"maintaining the character's persona and habits. When faced with unrelated questions, playfully "
"decline to answer without revealing your AI nature to preserve the character's image.",
"If the information is insufficient, you can search in the context or knowledge.",
f"Answer the following questions strictly in {language}, and the answers must follow the Markdown format.",
]
format_msgs = []
if self._knowledge:
format_msgs.append({"role": "assistant", "content": self._knowledge})
if self._history_summary:
if CONFIG.LLM_TYPE == LLMType.METAGPT.value:
format_msgs.extend(json.loads(self._history_summary))
else:
format_msgs.append({"role": "assistant", "content": self._history_summary})
return self._talk, format_msgs, system_msgs
async def run(self, *args, **kwargs) -> ActionOutput:
msg, format_msgs, system_msgs = self.aask_args
rsp = await self.llm.aask(msg=msg, format_msgs=format_msgs, system_msgs=system_msgs)
self._rsp = ActionOutput(content=rsp)
return self._rsp
__FORMATION__ = """Formation: "Capacity and role" defines the role you are currently playing;
"[HISTORY_BEGIN]" and "[HISTORY_END]" tags enclose the historical conversation;
"[KNOWLEDGE_BEGIN]" and "[KNOWLEDGE_END]" tags enclose the knowledge may help for your responses;