update st_game using MG v0.7

This commit is contained in:
better629 2024-02-26 20:02:16 +08:00
parent 643450388a
commit 325550a3dc
112 changed files with 6437 additions and 2 deletions

2
examples/st_game/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
storage/test*
storage/unittest*

View file

@ -0,0 +1,26 @@
## Stanford Town Game
### Pre-Description
The path configured in `examples/st_game/utils/const.py` is the storage path of the current project. In order to facilitate GA(generative_agents)'s frontend docking data (to avoid changing its code), you can change the path under `const.py` like beflow
```
STORAGE_PATH = ROOT_PATH.joinpath("storage")
TEMP_STORAGE_PATH = ROOT_PATH.joinpath("temp_storage")
# updated
STORAGE_PATH = Path("{path/to/ga/storage}")
TEMP_STORAGE_PATH = Path("{path/to/ga/temp_storage}")
```
This can be used to achieve docking of simulation data without changing the GA code. Otherwise, the GA code must be modified to adapt to the MG output path.
### Backend service startup
The execution entry is `python3 run_st_game.py "Host a open lunch party at 13:00 pm" "base_the_ville_isabella_maria_klaus" "test_sim" 10`
`idea` is the user's voice to the first Agent, and it is disseminated through this voice to see whether the final multi-agents achieve the goal of hosting or participating in the event.
### Frontend service startup
Enter `generative_agents/environment/frontend_server` and use `python manage.py runserver` to start the front-end service.
Visit `http://localhost:8000/simulator_home` to enter the current simulation interface.
## Appreciation
The reproduction work has referred the `https://github.com/joonspk-research/generative_agents`, let's make a general statement here.

View file

@ -0,0 +1,25 @@
## Stanford Town Game
### 前置
`examples/st_game/utils/const.py`配置的路径为当前项目的存储路径为了方便GAgenerative_agents的前端对接数据避免改动它那块的代码可将`const.py`下的
```
STORAGE_PATH = ROOT_PATH.joinpath("storage")
TEMP_STORAGE_PATH = ROOT_PATH.joinpath("temp_storage")
# 更新为
STORAGE_PATH = Path("{path/to/ga/storage}")
TEMP_STORAGE_PATH = Path("{path/to/ga/temp_storage}")
```
这样可用实现不改变GA代码情况下实现仿真数据的对接。不然得修改GA的代码来适配MG的输出路径。
### 后端服务启动
执行入口为:`python3 run_st_game.py "Host a open lunch party at 13:00 pm" "base_the_ville_isabella_maria_klaus" "test_sim" 10`
`idea`为用户给第一个Agent的用户心声并通过这个心声进行传播看最后多智能体是否达到举办、参加活动的目标。
### 前端服务启动
进入`generative_agents/environment/frontend_server`,使用`python manage.py runserver`启动前端服务。
访问`http://localhost:8000/simulator_home` 进入当前的仿真界面。
## Appreciation
The reproduction work has referred the `https://github.com/joonspk-research/generative_agents`, let's make a general statement here.

View file

@ -0,0 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :

View file

@ -0,0 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :

View file

@ -0,0 +1,39 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : summarize relationship in a agent chat
from examples.st_game.actions.st_action import STAction
from metagpt.logs import logger
class AgentChatSumRel(STAction):
name: str = "AgentChatSumRel"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
_ = llm_resp.split('"')[0].strip()
resp = True
except Exception:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> str:
return llm_resp.split('"')[0].strip()
def _func_fail_default_resp(self) -> str:
pass
async def run(self, init_role: "STRole", target_role: "STRole", statements: str) -> str:
def create_prompt_input(init_role: "STRole", target_role: "STRole", statements: str) -> str:
prompt_input = [statements, init_role.name, target_role.name]
return prompt_input
prompt_input = create_prompt_input(init_role, target_role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "summarize_chat_relationship_v2.txt")
example_output = "Jane Doe is working on a project"
special_instruction = "The output should be a string that responds to the question."
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {init_role.name} Action: {self.cls_name} output: {output}")
return output

View file

@ -0,0 +1,97 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : device to talk to another role, return yes or no
from examples.st_game.actions.st_action import STAction
from metagpt.logs import logger
class DecideToTalk(STAction):
name: str = "DecideToTalk"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
if llm_resp.split("Answer in yes or no:")[-1].strip().lower() in ["yes", "no"]:
resp = True
except ValueError:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> str:
return llm_resp.split("Answer in yes or no:")[-1].strip().lower()
def _func_fail_default_resp(self) -> str:
return "yes"
async def run(self, init_role: "STRole", target_role: "STRole", retrieved: dict, *args, **kwargs) -> bool:
"""Run action"""
def create_prompt_input(init_role: "STRole", target_role: "STRole", retrieved: dict) -> str:
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
last_chat = init_role.rc.memory.get_last_chat(target_role.name)
last_chatted_time = ""
last_chat_about = ""
if last_chat:
last_chatted_time = last_chat.created.strftime("%B %d, %Y, %H:%M:%S")
last_chat_about = last_chat.description
context = ""
for c_node in retrieved["events"]:
curr_desc = c_node.description.split(" ")
curr_desc[2:3] = ["was"]
curr_desc = " ".join(curr_desc)
context += f"{curr_desc}. "
context += "\n"
for c_node in retrieved["thoughts"]:
context += f"{c_node.description}. "
curr_time = scratch.curr_time.strftime("%B %d, %Y, %H:%M:%S %p")
init_act_desc = scratch.act_description
if "(" in init_act_desc:
init_act_desc = init_act_desc.split("(")[-1][:-1]
if len(scratch.planned_path) == 0 and "waiting" not in init_act_desc:
init_p_desc = f"{init_role.name} is already {init_act_desc}"
elif "waiting" in init_act_desc:
init_p_desc = f"{init_role.name} is {init_act_desc}"
else:
init_p_desc = f"{init_role.name} is on the way to {init_act_desc}"
target_act_desc = scratch.act_description
if "(" in target_act_desc:
target_act_desc = target_act_desc.split("(")[-1][:-1]
if len(target_scratch.planned_path) == 0 and "waiting" not in init_act_desc:
target_p_desc = f"{target_role.name} is already {target_act_desc}"
elif "waiting" in init_act_desc:
target_p_desc = f"{init_role.name} is {init_act_desc}"
else:
target_p_desc = f"{target_role.name} is on the way to {target_act_desc}"
prompt_input = []
prompt_input += [context]
prompt_input += [curr_time]
prompt_input += [init_role.name]
prompt_input += [target_role.name]
prompt_input += [last_chatted_time]
prompt_input += [last_chat_about]
prompt_input += [init_p_desc]
prompt_input += [target_p_desc]
prompt_input += [init_role.name]
prompt_input += [target_role.name]
return prompt_input
prompt_input = create_prompt_input(init_role, target_role, retrieved)
prompt = self.generate_prompt_with_tmpl_filename(
prompt_input=prompt_input, tmpl_filename="decide_to_talk_v2.txt"
)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=20) # yes or no
result = True if output == "yes" else False
logger.info(f"Role: {init_role.name} Action: {self.cls_name} output: {result}")
return result

View file

@ -0,0 +1,23 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : dummy action to make every STRole can deal DummyMessage which is caused by DummyAction
from dataclasses import dataclass
from metagpt.actions import Action
from metagpt.schema import Message
class DummyAction(Action):
async def run(self, *args, **kwargs):
raise NotImplementedError
@dataclass
class DummyMessage(Message):
"""
dummy message to pass to role and make them to have a execution every round
"""
content: str = "dummy"
cause_by: str = "DummyAction"

View file

@ -0,0 +1,403 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : gen_action_details
import random
from metagpt.environment.api.env_api import EnvAPIAbstract
from metagpt.logs import logger
from .st_action import STAction
class GenActionSector(STAction):
name: str = "GenActionSector"
def _func_cleanup(self, llm_resp: str, prompt: str):
cleaned_response = llm_resp.split("}")[0]
return cleaned_response
def _func_validate(self, llm_resp: str, prompt: str):
if len(llm_resp.strip()) < 1:
return False
if "}" not in llm_resp:
return False
if "," in llm_resp:
return False
return True
def _func_fail_default_resp(self):
fs = "kitchen"
return fs
async def run(self, role: "STRole", access_tile: dict[str, str], act_desp: str):
def create_prompt_input(role, access_tile: dict[str, str], act_desp):
act_world = f"{access_tile['world']}"
prompt_input = []
prompt_input += [role.scratch.get_str_name()]
prompt_input += [role.scratch.living_area.split(":")[1]]
x = f"{act_world}:{role.scratch.living_area.split(':')[1]}"
prompt_input += [role.s_mem.get_str_accessible_sector_arenas(x)]
prompt_input += [role.scratch.get_str_name()]
prompt_input += [f"{access_tile['sector']}"]
x = f"{act_world}:{access_tile['sector']}"
prompt_input += [role.s_mem.get_str_accessible_sector_arenas(x)]
if role.scratch.get_str_daily_plan_req() != "":
prompt_input += [f"\n{role.scratch.get_str_daily_plan_req()}"]
else:
prompt_input += [""]
# MAR 11 TEMP
prompt_input = []
act_world = access_tile["world"]
accessible_sector_str = role.s_mem.get_str_accessible_sectors(act_world)
curr = accessible_sector_str.split(", ")
fin_accessible_sectors = []
for i in curr:
if "'s house" in i:
if role.scratch.last_name in i:
fin_accessible_sectors += [i]
else:
fin_accessible_sectors += [i]
accessible_sector_str = ", ".join(fin_accessible_sectors)
# END MAR 11 TEMP
prompt_input += [accessible_sector_str]
act_desp_1 = act_desp
act_desp_2 = act_desp
if "(" in act_desp:
act_desp_1 = act_desp.split("(")[0].strip()
act_desp_2 = act_desp.split("(")[-1][:-1]
prompt_input += [role.scratch.get_str_name()]
prompt_input += [act_desp_1]
prompt_input += [act_desp_2]
prompt_input += [role.scratch.get_str_name()]
return prompt_input
prompt_template = "action_location_sector_v1.txt"
prompt_input = create_prompt_input(role, access_tile, act_desp)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=15)
y = f"{access_tile['world']}"
x = [i.strip() for i in role.s_mem.get_str_accessible_sectors(y).split(",")]
if output not in x:
# output = random.choice(x)
output = role.scratch.living_area.split(":")[1]
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenActionArena(STAction):
name: str = "GenActionArena"
def _func_cleanup(self, llm_resp: str, prompt: str):
cleaned_response = llm_resp.split("}")[0]
return cleaned_response
def _func_validate(self, llm_resp: str, prompt: str):
if len(llm_resp.strip()) < 1:
return False
if "}" not in llm_resp:
return False
if "," in llm_resp:
return False
return True
def _func_fail_default_resp(self):
fs = "kitchen"
return fs
async def run(self, role: "STRole", act_desp: str, act_world: str, act_sector: str):
def create_prompt_input(role, act_desp, act_world, act_sector):
prompt_input = []
prompt_input += [role.scratch.get_str_name()]
x = f"{act_world}:{act_sector}"
prompt_input += [act_sector]
# MAR 11 TEMP
accessible_arena_str = role.s_mem.get_str_accessible_sector_arenas(x)
curr = accessible_arena_str.split(", ")
fin_accessible_arenas = []
for i in curr:
if "'s room" in i:
if role.scratch.last_name in i:
fin_accessible_arenas += [i]
else:
fin_accessible_arenas += [i]
accessible_arena_str = ", ".join(fin_accessible_arenas)
# END MAR 11 TEMP
prompt_input += [accessible_arena_str]
act_desp_1 = act_desp
act_desp_2 = act_desp
if "(" in act_desp:
act_desp_1 = act_desp.split("(")[0].strip()
act_desp_2 = act_desp.split("(")[-1][:-1]
prompt_input += [role.scratch.get_str_name()]
prompt_input += [act_desp_1]
prompt_input += [act_desp_2]
prompt_input += [role.scratch.get_str_name()]
prompt_input += [act_sector]
prompt_input += [accessible_arena_str]
return prompt_input
prompt_template = "action_location_object_vMar11.txt"
prompt_input = create_prompt_input(role, act_desp, act_world, act_sector)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
print('prompt ', prompt)
output = await self._run_gpt35_max_tokens(prompt, max_tokens=15)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenActionObject(STAction):
name: str = "GenActionObject"
def _func_validate(self, llm_resp: str, prompt: str):
if len(llm_resp.strip()) < 1:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str):
cleaned_response = llm_resp.strip()
return cleaned_response
def _func_fail_default_resp(self):
fs = "bed"
return fs
async def run(self, role: "STRole", act_desp: str, temp_address: str):
def create_prompt_input(role, act_desp, temp_address):
prompt_input = []
if "(" in act_desp:
act_desp = act_desp.split("(")[-1][:-1]
prompt_input += [act_desp]
prompt_input += [role.s_mem.get_str_accessible_arena_game_objects(temp_address)]
return prompt_input
prompt_template = "action_object_v2.txt"
prompt_input = create_prompt_input(role, act_desp, temp_address)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=15)
x = [i.strip() for i in role.s_mem.get_str_accessible_arena_game_objects(temp_address).split(",")]
if output not in x:
output = random.choice(x)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenPronunciatio(STAction):
name: str = "GenPronunciatio"
def _func_cleanup(self, llm_resp: str, prompt: str):
cr = llm_resp.strip()
if len(cr) > 3:
cr = cr[:3]
return cr
def _func_validate(self, llm_resp: str, prompt: str):
try:
self._func_cleanup(llm_resp, prompt="")
if len(llm_resp) == 0:
return False
except Exception:
return False
return True
def _func_fail_default_resp(self):
fs = "😋"
return fs
async def run(self, role: "STRole", act_desp: str):
def create_prompt_input(act_desp):
if "(" in act_desp:
act_desp = act_desp.split("(")[-1].split(")")[0]
prompt_input = [act_desp]
return prompt_input
prompt_template = "generate_pronunciatio_v1.txt"
prompt_input = create_prompt_input(act_desp)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
example_output = "🛁🧖‍♀️"
special_instruction = "The value for the output must ONLY contain the emojis."
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenEventTriple(STAction):
name: str = "GenEventTriple"
def _func_cleanup(self, llm_resp: str, prompt: str):
cr = llm_resp.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
return cr
def _func_validate(self, llm_resp: str, prompt: str):
try:
llm_resp = self._func_cleanup(llm_resp, prompt="")
if len(llm_resp) != 2:
return False
except Exception:
return False
return True
def _func_fail_default_resp(self, role):
fs = (role.name, "is", "idle")
return fs
async def run(self, role: "STRole", act_desp: str):
def create_prompt_input(role, act_desp):
if "(" in act_desp:
act_desp = act_desp.split("(")[-1].split(")")[0]
prompt_input = [role.name, act_desp, role.name]
return prompt_input
prompt_template = "generate_event_triple_v1.txt"
prompt_input = create_prompt_input(role, act_desp)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp(role)
output = await self._run_gpt35_max_tokens(prompt, max_tokens=30)
output = (role.name, output[0], output[1])
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenActObjDescription(STAction):
name: str = "GenActObjDescription"
def _func_cleanup(self, llm_resp: str, prompt: str):
cr = llm_resp.strip()
if cr[-1] == ".":
cr = cr[:-1]
return cr
def _func_validate(self, llm_resp: str, prompt: str):
try:
llm_resp = self._func_cleanup(llm_resp, prompt="")
except Exception:
return False
return True
def _func_fail_default_resp(self, act_game_object):
fs = f"{act_game_object} is idle"
return fs
async def run(self, role: "STRole", act_game_object: str, act_desp: str):
def create_prompt_input(act_game_object, act_desp, role):
prompt_input = [act_game_object, role.name, act_desp, act_game_object, act_game_object]
return prompt_input
prompt_template = "generate_obj_event_v1.txt"
prompt_input = create_prompt_input(act_game_object, act_desp, role)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
example_output = "being fixed"
special_instruction = "The output should ONLY contain the phrase that should go in <fill in>."
self.fail_default_resp = self._func_fail_default_resp(act_game_object)
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenObjEventTriple(STAction):
name: str = "GenObjEventTriple"
def _func_cleanup(self, llm_resp: str, prompt: str):
cr = llm_resp.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
return cr
def _func_validate(self, llm_resp: str, prompt: str):
try:
llm_resp = self._func_cleanup(llm_resp, prompt="")
if len(llm_resp) != 2:
return False
except Exception:
return False
return True
def _func_fail_default_resp(self, act_game_object: str):
fs = (act_game_object, "is", "idle")
return fs
async def run(self, role: "STRole", act_game_object, act_obj_desp):
def create_prompt_input(act_game_object, act_obj_desp):
prompt_input = [act_game_object, act_obj_desp, act_game_object]
return prompt_input
prompt_template = "generate_event_triple_v1.txt"
prompt_input = create_prompt_input(act_game_object, act_obj_desp)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp(act_game_object)
output = await self._run_gpt35_max_tokens(prompt, max_tokens=30)
output = (act_game_object, output[0], output[1])
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
class GenActionDetails(STAction):
name: str = "GenActionDetails"
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
pass
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
# TODO -- this sometimes generates error
try:
self._func_cleanup(llm_resp)
except Exception:
return False
return True
def _func_fail_default_resp(self):
fs = {}
return fs
async def run(self, role: "STRole", act_desp: str, act_dura):
access_tile = await role.rc.env.observe(
EnvAPIAbstract(api_name="access_tile", kwargs={"tile": role.scratch.curr_tile})
)
act_world = access_tile["world"]
act_sector = await GenActionSector().run(role, access_tile, act_desp)
act_arena = await GenActionArena().run(role, act_desp, act_world, act_sector)
act_address = f"{act_world}:{act_sector}:{act_arena}"
if not role.s_mem.get_str_accessible_arena_game_objects(act_address):
act_game_object = "<random>"
else:
act_game_object = await GenActionObject().run(role, act_desp, act_address)
new_address = f"{act_world}:{act_sector}:{act_arena}:{act_game_object}"
act_pron = await GenPronunciatio().run(role, act_desp)
act_event = await GenEventTriple().run(role, act_desp)
# Persona's actions also influence the object states. We set those up here.
act_obj_desp = await GenActObjDescription().run(role, act_game_object, act_desp)
act_obj_pron = await GenPronunciatio().run(role, act_obj_desp)
act_obj_event = await GenObjEventTriple().run(role, act_game_object, act_obj_desp)
result_dict = {
"action_address": new_address,
"action_duration": int(act_dura),
"action_description": act_desp,
"action_pronunciatio": act_pron,
"action_event": act_event,
"chatting_with": None,
"chat": None,
"chatting_with_buffer": None,
"chatting_end_time": None,
"act_obj_description": act_obj_desp,
"act_obj_pronunciatio": act_obj_pron,
"act_obj_event": act_obj_event,
}
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {result_dict}")
return result_dict

View file

@ -0,0 +1,61 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : gen_daily_schedule
from metagpt.logs import logger
from .st_action import STAction
class GenDailySchedule(STAction):
name: str = "GenDailySchedule"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt="")
except Exception:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
cr = []
_cr = llm_resp.split(")")
for i in _cr:
if i[-1].isdigit():
i = i[:-1].strip()
if i[-1] == "." or i[-1] == ",":
cr += [i[:-1].strip()]
return cr
def _func_fail_default_resp(self) -> int:
fs = [
"wake up and complete the morning routine at 6:00 am",
"eat breakfast at 7:00 am",
"read a book from 8:00 am to 12:00 pm",
"have lunch at 12:00 pm",
"take a nap from 1:00 pm to 4:00 pm",
"relax and watch TV from 7:00 pm to 8:00 pm",
"go to bed at 11:00 pm",
]
return fs
async def run(self, role: "STRole", wake_up_hour: str):
def create_prompt_input(role, wake_up_hour):
prompt_input = []
prompt_input += [role.scratch.get_str_iss()]
prompt_input += [role.scratch.get_str_lifestyle()]
prompt_input += [role.scratch.get_str_curr_date_str()]
prompt_input += [role.scratch.get_str_firstname()]
prompt_input += [f"{str(wake_up_hour)}:00 am"]
return prompt_input
wake_up_hour = int(wake_up_hour)
prompt_template = "daily_planning_v6.txt"
prompt_input = create_prompt_input(role, wake_up_hour)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=500)
output = [f"wake up and complete the morning routine at {wake_up_hour}:00 am"] + output
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output

View file

@ -0,0 +1,181 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : gen_hourly_schedule
import random
import string
from metagpt.logs import logger
from .st_action import STAction
def get_random_alphanumeric(i=6, j=6):
"""
Returns a random alpha numeric strength that has the length of somewhere
between i and j.
INPUT:
i: min_range for the length
j: max_range for the length
OUTPUT:
an alpha numeric str with the length of somewhere between i and j.
"""
k = random.randint(i, j)
x = "".join(random.choices(string.ascii_letters + string.digits, k=k))
return x
class GenHourlySchedule(STAction):
name: str = "GenHourlySchedule"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt="")
except Exception:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
cr = llm_resp.strip()
if cr[-1] == ".":
cr = cr[:-1]
# to only use the first line of output
cr = cr.split("\n")[0]
return cr
def _func_fail_default_resp(self) -> int:
fs = "asleep"
return fs
async def _generate_schedule_for_given_hour(
self, role: "STRole", curr_hour_str, p_f_ds_hourly_org, hour_str, intermission2=None
):
def create_prompt_input(persona, curr_hour_str, p_f_ds_hourly_org, hour_str, intermission2=None):
schedule_format = ""
for i in hour_str:
schedule_format += f"[{persona.scratch.get_str_curr_date_str()} -- {i}]"
schedule_format += " Activity: [Fill in]\n"
schedule_format = schedule_format[:-1]
intermission_str = "Here the originally intended hourly breakdown of"
intermission_str += f" {persona.scratch.get_str_firstname()}'s schedule today: "
for count, i in enumerate(persona.scratch.daily_req):
intermission_str += f"{str(count + 1)}) {i}, "
intermission_str = intermission_str[:-2]
prior_schedule = ""
if p_f_ds_hourly_org:
prior_schedule = "\n"
for count, i in enumerate(p_f_ds_hourly_org):
prior_schedule += f"[(ID:{get_random_alphanumeric()})"
prior_schedule += f" {persona.scratch.get_str_curr_date_str()} --"
prior_schedule += f" {hour_str[count]}] Activity:"
prior_schedule += f" {persona.scratch.get_str_firstname()}"
prior_schedule += f" is {i}\n"
prompt_ending = f"[(ID:{get_random_alphanumeric()})"
prompt_ending += f" {persona.scratch.get_str_curr_date_str()}"
prompt_ending += f" -- {curr_hour_str}] Activity:"
prompt_ending += f" {persona.scratch.get_str_firstname()} is"
if intermission2:
intermission2 = f"\n{intermission2}"
prompt_input = []
prompt_input += [schedule_format]
prompt_input += [persona.scratch.get_str_iss()]
prompt_input += [prior_schedule + "\n"]
prompt_input += [intermission_str]
if intermission2:
prompt_input += [intermission2]
else:
prompt_input += [""]
prompt_input += [prompt_ending]
return prompt_input
prompt_template = "generate_hourly_schedule_v2.txt"
prompt_input = create_prompt_input(role, curr_hour_str, p_f_ds_hourly_org, hour_str, intermission2)
prompt_input_str = "\n".join(prompt_input)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, prompt_template)
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=50)
logger.info(
f"Role: {role.name} _generate_schedule_for_given_hour prompt_input: {prompt_input_str}, "
f"output: {output}"
)
return output
async def run(self, role: "STRole", wake_up_hour: int):
hour_str = [
"00:00 AM",
"01:00 AM",
"02:00 AM",
"03:00 AM",
"04:00 AM",
"05:00 AM",
"06:00 AM",
"07:00 AM",
"08:00 AM",
"09:00 AM",
"10:00 AM",
"11:00 AM",
"12:00 PM",
"01:00 PM",
"02:00 PM",
"03:00 PM",
"04:00 PM",
"05:00 PM",
"06:00 PM",
"07:00 PM",
"08:00 PM",
"09:00 PM",
"10:00 PM",
"11:00 PM",
]
n_m1_activity = []
diversity_repeat_count = 1 # TODO mg 1->3
for i in range(diversity_repeat_count):
logger.info(f"diversity_repeat_count idx: {i}")
n_m1_activity_set = set(n_m1_activity)
if len(n_m1_activity_set) < 5:
n_m1_activity = []
for count, curr_hour_str in enumerate(hour_str):
if wake_up_hour > 0:
n_m1_activity += ["sleeping"]
wake_up_hour -= 1
else:
logger.info(f"_generate_schedule_for_given_hour idx: {count}, n_m1_activity: {n_m1_activity}")
n_m1_activity += [
await self._generate_schedule_for_given_hour(role, curr_hour_str, n_m1_activity, hour_str)
]
# Step 1. Compressing the hourly schedule to the following format:
# The integer indicates the number of hours. They should add up to 24.
# [['sleeping', 6], ['waking up and starting her morning routine', 1],
# ['eating breakfast', 1], ['getting ready for the day', 1],
# ['working on her painting', 2], ['taking a break', 1],
# ['having lunch', 1], ['working on her painting', 3],
# ['taking a break', 2], ['working on her painting', 2],
# ['relaxing and watching TV', 1], ['going to bed', 1], ['sleeping', 2]]
_n_m1_hourly_compressed = []
prev = None
prev_count = 0
for i in n_m1_activity:
if i != prev:
prev_count = 1
_n_m1_hourly_compressed += [[i, prev_count]]
prev = i
elif _n_m1_hourly_compressed:
_n_m1_hourly_compressed[-1][1] += 1
# Step 2. Expand to min scale (from hour scale)
# [['sleeping', 360], ['waking up and starting her morning routine', 60],
# ['eating breakfast', 60],..
n_m1_hourly_compressed = []
for task, duration in _n_m1_hourly_compressed:
n_m1_hourly_compressed += [[task, duration * 60]]
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {n_m1_hourly_compressed}")
return n_m1_hourly_compressed

View file

@ -0,0 +1,125 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : generate_iterative_chat_utt
from examples.st_game.actions.st_action import STAction
from examples.st_game.utils.utils import extract_first_json_dict
from metagpt.environment.api.env_api import EnvAPIAbstract
from metagpt.logs import logger
class GenIterChatUTT(STAction):
name: str = "GenIterChatUTT"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
_ = extract_first_json_dict(llm_resp)
resp = True
except Exception:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> dict:
gpt_response = extract_first_json_dict(llm_resp)
cleaned_dict = dict()
cleaned = []
for key, val in gpt_response.items():
cleaned += [val]
cleaned_dict["utterance"] = cleaned[0]
cleaned_dict["end"] = True
if "f" in str(cleaned[1]) or "F" in str(cleaned[1]):
cleaned_dict["end"] = False
return cleaned_dict
def _func_fail_default_resp(self) -> dict:
cleaned_dict = dict()
cleaned_dict["utterance"] = "..."
cleaned_dict["end"] = False
return cleaned_dict
async def run(
self,
init_role: "STRole",
target_role: "STRole",
retrieved: dict,
curr_context: str,
curr_chat: list[str],
*args,
**kwargs,
) -> dict:
def create_prompt_input(
access_tile: dict[str, str],
init_role: "STRole",
target_role: "STRole",
retrieved: dict,
curr_context: str,
curr_chat: list[str],
):
role = init_role
scratch = role.rc.scratch
target_scratch = target_role.rc.scratch
prev_convo_insert = "\n"
if role.rc.memory.chat_list:
for i in role.rc.memory.chat_list:
if i.object == target_role.name:
v1 = int((scratch.curr_time - i.created).total_seconds() / 60)
prev_convo_insert += (
f"{str(v1)} minutes ago, {scratch.name} and "
f"{target_scratch.name} were already {i.description} "
f"This context takes place after that conversation."
)
break
if prev_convo_insert == "\n":
prev_convo_insert = ""
if role.rc.memory.chat_list:
if int((scratch.curr_time - role.rc.memory.chat_list[-1].created).total_seconds() / 60) > 480:
prev_convo_insert = ""
print(prev_convo_insert)
curr_sector = f"{access_tile['sector']}"
curr_arena = f"{access_tile['arena']}"
curr_location = f"{curr_arena} in {curr_sector}"
retrieved_str = ""
for key, vals in retrieved.items():
for v in vals:
retrieved_str += f"- {v.description}\n"
convo_str = ""
for i in curr_chat:
convo_str += ": ".join(i) + "\n"
if convo_str == "":
convo_str = "[The conversation has not started yet -- start it!]"
init_iss = f"Here is Here is a brief description of {scratch.name}.\n{scratch.get_str_iss()}"
prompt_input = [
init_iss,
scratch.name,
retrieved_str,
prev_convo_insert,
curr_location,
curr_context,
scratch.name,
target_scratch.name,
convo_str,
scratch.name,
target_scratch.name,
scratch.name,
scratch.name,
scratch.name,
]
return prompt_input
access_tile = await init_role.rc.env.observe(
EnvAPIAbstract(api_name="access_tile", kwargs={"tile": init_role.scratch.curr_tile})
)
prompt_input = create_prompt_input(access_tile, init_role, target_role, retrieved, curr_context, curr_chat)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "iterative_convo_v1.txt")
# original using `ChatGPT_safe_generate_response_OLD`
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_wo_extra_prompt(prompt)
logger.info(f"Role: {init_role.name} Action: {self.cls_name} output: {output}")
return output

View file

@ -0,0 +1,31 @@
from examples.st_game.actions.st_action import STAction
from metagpt.logs import logger
class AgentWhisperThoughtAction(STAction):
name: str = "AgentWhisperThoughtAction"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> list:
return llm_resp.split('"')[0].strip()
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role: "STRole", statements, test_input=None):
prompt_input = [role.scratch.name, statements]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "whisper_inner_thought_v1.txt")
output = await self._run_gpt35_max_tokens(prompt, max_tokens=50)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output

View file

@ -0,0 +1,154 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : new_decomp_schedule
import datetime
from examples.st_game.actions.st_action import STAction
from metagpt.logs import logger
class NewDecompSchedule(STAction):
name: str = "NewDecompSchedule"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
llm_resp = self._func_cleanup(llm_resp, prompt)
dur_sum = 0
for act, dur in llm_resp:
dur_sum += dur
if isinstance(act, str):
return False
if isinstance(dur, int):
return False
x = prompt.split("\n")[0].split("originally planned schedule from")[-1].strip()[:-1]
x = [datetime.datetime.strptime(i.strip(), "%H:%M %p") for i in x.split(" to ")]
delta_min = int((x[1] - x[0]).total_seconds() / 60)
if int(dur_sum) != int(delta_min):
return False
except Exception:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
new_schedule = prompt + " " + llm_resp.strip()
new_schedule = new_schedule.split("The revised schedule:")[-1].strip()
new_schedule = new_schedule.split("\n")
ret_temp = []
for i in new_schedule:
ret_temp += [i.split(" -- ")]
ret = []
for time_str, action in ret_temp:
start_time = time_str.split(" ~ ")[0].strip()
end_time = time_str.split(" ~ ")[1].strip()
delta = datetime.datetime.strptime(end_time, "%H:%M") - datetime.datetime.strptime(start_time, "%H:%M")
delta_min = int(delta.total_seconds() / 60)
if delta_min < 0:
delta_min = 0
ret += [[action, delta_min]]
return ret
def _func_fail_default_resp(self, main_act_dur: int, truncated_act_dur: int) -> int:
dur_sum = 0
for act, dur in main_act_dur:
dur_sum += dur
ret = truncated_act_dur[:]
ret += main_act_dur[len(ret) - 1 :]
# If there are access, we need to trim...
ret_dur_sum = 0
count = 0
over = None
for act, dur in ret:
ret_dur_sum += dur
if ret_dur_sum == dur_sum:
break
if ret_dur_sum > dur_sum:
over = ret_dur_sum - dur_sum
break
count += 1
if over:
ret = ret[: count + 1]
ret[-1][1] -= over
return ret
async def run(
self,
role: "STRole",
main_act_dur: int,
truncated_act_dur: int,
start_time_hour: datetime,
end_time_hour: datetime,
inserted_act: str,
inserted_act_dur: int,
*args,
**kwargs,
):
def create_prompt_input(
role: "STRole",
main_act_dur: int,
truncated_act_dur: int,
start_time_hour: datetime,
end_time_hour: datetime,
inserted_act: str,
inserted_act_dur: int,
):
persona_name = role.name
start_hour_str = start_time_hour.strftime("%H:%M %p")
end_hour_str = end_time_hour.strftime("%H:%M %p")
original_plan = ""
for_time = start_time_hour
for i in main_act_dur:
original_plan += (
f'{for_time.strftime("%H:%M")} ~ '
f'{(for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M")} -- ' + i[0]
)
original_plan += "\n"
for_time += datetime.timedelta(minutes=int(i[1]))
new_plan_init = ""
for_time = start_time_hour
for count, i in enumerate(truncated_act_dur):
new_plan_init += (
f'{for_time.strftime("%H:%M")} ~ '
f'{(for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M")} -- ' + i[0]
)
new_plan_init += "\n"
if count < len(truncated_act_dur) - 1:
for_time += datetime.timedelta(minutes=int(i[1]))
new_plan_init += (for_time + datetime.timedelta(minutes=int(i[1]))).strftime("%H:%M") + " ~"
prompt_input = [
persona_name,
start_hour_str,
end_hour_str,
original_plan,
persona_name,
inserted_act,
inserted_act_dur,
persona_name,
start_hour_str,
end_hour_str,
end_hour_str,
new_plan_init,
]
return prompt_input
prompt_input = create_prompt_input(
role, main_act_dur, truncated_act_dur, start_time_hour, end_time_hour, inserted_act, inserted_act_dur
)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "new_decomp_schedule_v1.txt")
self.fail_default_resp = self._func_fail_default_resp(main_act_dur, truncated_act_dur)
output = await self._run_gpt35_max_tokens(prompt, max_tokens=1000)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output

View file

@ -0,0 +1,277 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Integration Reflect Action
import re
from examples.st_game.actions.st_action import STAction
from metagpt.logs import logger
# Run GPT Prompt Focal Point method
class AgentFocusPt(STAction):
name: str = "AgentFocusPt"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> str:
try:
"""
Cleanup handling has been completed for run_v2
"""
return llm_resp
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, n: int, test_input=None) -> str:
def create_prompt_input(role: "STRole", statements, n, test_input=None):
prompt_input = [statements, str(n)]
return prompt_input
prompt_input = create_prompt_input(role, statements, n)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "generate_focal_pt_v1.txt")
example_output = '["What should Jane do for lunch", "Does Jane like strawberry", "Who is Jane"]'
special_instruction = "Output must be a list of str."
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Insight and Guidance
class AgentInsightAndGuidance(STAction):
name: str = "AgentInsightAndGuidance"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> dict:
try:
llm_resp = "1. " + llm_resp.strip()
ret = dict()
for i in llm_resp.split("\n"):
row = " ".join(i.split(". ")[1:])
if "(because of " not in row:
continue
thought = row.split("(because of ")[0].strip()
if ")" not in row.split("(because of ")[1]:
continue
evi_raw = row.split("(because of ")[1].split(")")[0].strip()
evi_raw = re.findall(r"\d+", evi_raw)
evi_raw = [int(i.strip()) for i in evi_raw]
ret[thought] = evi_raw
return ret
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self, n: int) -> str:
return ["I am hungry"] * n
async def run(self, role: "STRole", statements: str, n: int, test_input=None) -> dict:
def create_prompt_input(role, statements, n, test_input=None):
prompt_input = [statements, str(n)]
return prompt_input
prompt_input = create_prompt_input(role, statements, n)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "insight_and_evidence_v1.txt")
self.fail_default_resp = self._func_fail_default_resp(n)
output = await self._run_gpt35_max_tokens(prompt, max_tokens=150)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Event Triple
class AgentEventTriple(STAction):
name: str = "AgentEventTriple"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
llm_resp = self._func_cleanup(llm_resp, prompt="")
if len(llm_resp) != 2:
return False
except Exception:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> list:
try:
cr = llm_resp.strip()
cr = [i.strip() for i in cr.split(")")[0].split(",")]
if len(cr) != 2:
return cr[-2:]
return cr
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, statements: str, role: "STRole", verbose=False) -> tuple:
def create_prompt_input(statements, role):
if "(" in statements:
statements = statements.split("(")[-1].split(")")[0]
prompt_input = [role.scratch.name, statements, role.scratch.name]
return prompt_input
prompt_input = create_prompt_input(statements, role)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "generate_event_triple_v1.txt")
output = await self._run_gpt35_max_tokens(prompt, max_tokens=30)
output = (role.scratch.name, output[0], output[1])
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Event Poignancy
class AgentEventPoignancy(STAction):
name: str = "AgentEventPoignancy"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> int:
try:
llm_resp = int(llm_resp.strip())
return llm_resp
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role: "STRole", statements: str, test_input=None):
prompt_input = [role.scratch.name, role.scratch.get_str_iss(), role.scratch.name, statements]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "poignancy_event_v1.txt")
example_output = "5" # ########
special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10."
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Chat Poignancy
class AgentChatPoignancy(STAction):
name: str = "AgentChatPoignancy"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> int:
try:
llm_resp = int(llm_resp.strip())
return llm_resp
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role: "STRole", statements, test_input=None):
prompt_input = [role.scratch.name, role.scratch.get_str_iss(), role.scratch.name, statements]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "poignancy_chat_v1.txt")
example_output = "5" # ########
special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10."
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Planning Thought on Convo
class AgentPlanThoughtOnConvo(STAction):
name: str = "AgentPlanThoughtOnConvo"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> str:
try:
return llm_resp.split('"')[0].strip()
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role, statements, test_input=None):
prompt_input = [statements, role.scratch.name, role.scratch.name, role.scratch.name]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "planning_thought_on_convo_v1.txt")
output = await self._run_gpt35_max_tokens(prompt, max_tokens=50)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output
# Run GPT Prompt Memory on Convo
class AgentMemoryOnConvo(STAction):
name: str = "AgentMemoryOnConvo"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
try:
self._func_cleanup(llm_resp, prompt)
return True
except Exception:
return False
def _func_cleanup(self, llm_resp: str, prompt: str = "") -> str:
try:
return llm_resp.split('"')[0].strip()
except Exception as exp:
logger.error(f"{self.cls_name} with error {exp}")
def _func_fail_default_resp(self) -> str:
pass
async def run(self, role: "STRole", statements: str, test_input=None, verbose=False) -> str:
def create_prompt_input(role, statements, test_input=None):
prompt_input = [statements, role.scratch.name, role.scratch.name, role.scratch.name]
return prompt_input
prompt_input = create_prompt_input(role, statements)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "memo_on_convo_v1.txt")
example_output = "Jane Doe was interesting to talk to."
special_instruction = (
"The output should ONLY contain a string that summarizes anything interesting "
"that the agent may have noticed"
)
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output

View file

@ -0,0 +1,119 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : StanfordTown Action
import json
import time
from abc import abstractmethod
from pathlib import Path
from typing import Any, Optional, Union
from examples.st_game.utils.const import PROMPTS_DIR
from metagpt.actions.action import Action
from metagpt.config2 import config
from metagpt.logs import logger
class STAction(Action):
name: str = "STAction"
prompt_dir: Path = PROMPTS_DIR
fail_default_resp: Optional[str] = None
@property
def cls_name(self):
return self.__class__.__name__
@abstractmethod
def _func_validate(self, llm_resp: str, prompt: str):
raise NotImplementedError
@abstractmethod
def _func_cleanup(self, llm_resp: str, prompt: str):
raise NotImplementedError
@abstractmethod
def _func_fail_default_resp(self):
raise NotImplementedError
def generate_prompt_with_tmpl_filename(self, prompt_input: Union[str, list], tmpl_filename) -> str:
"""
same with `generate_prompt`
Args:
prompt_input: the input we want to feed in (IF THERE ARE MORE THAN ONE INPUT, THIS CAN BE A LIST.)
tmpl_filename: prompt template filename
Returns:
a str prompt that will be sent to LLM server.
"""
if isinstance(prompt_input, str):
prompt_input = [prompt_input]
prompt_input = [str(i) for i in prompt_input]
f = open(str(self.prompt_dir.joinpath(tmpl_filename)), "r")
prompt = f.read()
f.close()
for count, i in enumerate(prompt_input):
prompt = prompt.replace(f"!<INPUT {count}>!", i)
if "<commentblockmarker>###</commentblockmarker>" in prompt:
prompt = prompt.split("<commentblockmarker>###</commentblockmarker>")[1]
return prompt.strip()
async def _aask(self, prompt: str) -> str:
return await self.llm.aask(prompt)
async def _run_gpt35_max_tokens(self, prompt: str, max_tokens: int = 50, retry: int = 3):
for idx in range(retry):
try:
tmp_max_tokens_rsp = getattr(config.llm, "max_token", 1500)
setattr(config.llm, "max_token", max_tokens)
self.llm.use_system_prompt = False # to make it behave like a non-chat completions
llm_resp = await self._aask(prompt)
setattr(config.llm, "max_token", tmp_max_tokens_rsp)
logger.info(f"Action: {self.cls_name} llm _run_gpt35_max_tokens raw resp: {llm_resp}")
if self._func_validate(llm_resp, prompt):
return self._func_cleanup(llm_resp, prompt)
except Exception as exp:
logger.warning(f"Action: {self.cls_name} _run_gpt35_max_tokens exp: {exp}")
time.sleep(5)
return self.fail_default_resp
async def _run_gpt35(
self, prompt: str, example_output: str, special_instruction: str, retry: int = 3
) -> Union[bool, Any]:
"""same with `gpt_structure.ChatGPT_safe_generate_response`"""
prompt = '"""\n' + prompt + '\n"""\n'
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
prompt += "Example output json:\n"
prompt += '{"output": "' + str(example_output) + '"}'
for idx in range(retry):
try:
llm_resp = await self._aask(prompt)
logger.info(f"Action: {self.cls_name} llm _run_gpt35 raw resp: {llm_resp}")
end_idx = llm_resp.strip().rfind("}") + 1
llm_resp = llm_resp[:end_idx]
llm_resp = json.loads(llm_resp)["output"]
if self._func_validate(llm_resp, prompt):
return self._func_cleanup(llm_resp, prompt)
except Exception as exp:
logger.warning(f"Action: {self.cls_name} _run_gpt35 exp: {exp}")
time.sleep(5) # usually avoid `Rate limit`
return False
async def _run_gpt35_wo_extra_prompt(self, prompt: str, retry: int = 3) -> str:
for idx in range(retry):
try:
llm_resp = await self._aask(prompt)
llm_resp = llm_resp.strip()
logger.info(f"Action: {self.cls_name} llm _run_gpt35_wo_extra_prompt raw resp: {llm_resp}")
if self._func_validate(llm_resp, prompt):
return self._func_cleanup(llm_resp, prompt)
except Exception as exp:
logger.warning(f"Action: {self.cls_name} _run_gpt35_wo_extra_prompt exp: {exp}")
time.sleep(5) # usually avoid `Rate limit`
return self.fail_default_resp
async def run(self, *args, **kwargs):
"""Run action"""
raise NotImplementedError("The run method should be implemented in a subclass.")

View file

@ -0,0 +1,47 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : summarize the content of agents' conversation
from examples.st_game.actions.st_action import STAction
from metagpt.logs import logger
class SummarizeConv(STAction):
name: str = "SummarizeConv"
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
resp = False
try:
_ = self._func_cleanup(llm_resp, prompt)
resp = True
except Exception:
pass
return resp
def _func_cleanup(self, llm_resp: str, prompt: str) -> str:
ret = "conversing about " + llm_resp.strip()
return ret
def _func_fail_default_resp(self) -> str:
return "conversing with a housemate about morning greetings"
async def run(self, conv: list):
def create_prompt_input(conversation: list):
convo_str = ""
for row in conversation:
convo_str += f'{row[0]}: "{row[1]}"\n'
prompt_input = [convo_str]
return prompt_input
prompt_input = create_prompt_input(conv)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "summarize_conversation_v1.txt")
example_output = "conversing about what to eat for lunch"
special_instruction = (
"The output must continue the sentence above by filling in the <fill in> tag. "
"Don't start with 'this is a conversation about...' Just finish the sentence "
"but do not miss any important details (including who are chatting)."
)
output = await self._run_gpt35(prompt, example_output, special_instruction)
logger.info(f"Action: {self.cls_name} output: {output}")
return output

View file

@ -0,0 +1,175 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : task_decomp
import datetime
from metagpt.logs import logger
from ..actions.st_action import STAction
class TaskDecomp(STAction):
name: str = "TaskDecomp"
def _func_cleanup(self, llm_resp: str, prompt: str) -> list:
# TODO SOMETHING HERE sometimes fails... See screenshot
temp = [i.strip() for i in llm_resp.split("\n")]
_cr = []
cr = []
for count, i in enumerate(temp):
if count != 0:
_cr += [" ".join([j.strip() for j in i.split(" ")][3:])]
else:
_cr += [i]
for count, i in enumerate(_cr):
k = [j.strip() for j in i.split("(duration in minutes:")]
task = k[0]
if task[-1] == ".":
task = task[:-1]
duration = int(k[1].split(",")[0].strip())
cr += [[task, duration]]
total_expected_min = int(prompt.split("(total duration in minutes")[-1].split("):")[0].strip())
# TODO -- now, you need to make sure that this is the same as the sum of
# the current action sequence.
curr_min_slot = [
["dummy", -1],
] # (task_name, task_index)
for count, i in enumerate(cr):
i_task = i[0]
i_duration = i[1]
i_duration -= i_duration % 5
if i_duration > 0:
for j in range(i_duration):
curr_min_slot += [(i_task, count)]
curr_min_slot = curr_min_slot[1:]
if len(curr_min_slot) > total_expected_min:
last_task = curr_min_slot[60]
for i in range(1, 6):
curr_min_slot[-1 * i] = last_task
elif len(curr_min_slot) < total_expected_min:
last_task = curr_min_slot[-1]
for i in range(total_expected_min - len(curr_min_slot)):
curr_min_slot += [last_task]
cr_ret = [
["dummy", -1],
]
for task, task_index in curr_min_slot:
if task != cr_ret[-1][0]:
cr_ret += [[task, 1]]
else:
cr_ret[-1][1] += 1
cr = cr_ret[1:]
return cr
def _func_validate(self, llm_resp: str, prompt: str) -> bool:
# TODO -- this sometimes generates error
try:
self._func_cleanup(llm_resp, prompt)
except Exception:
return False
return True
def _func_fail_default_resp(self) -> int:
fs = [["asleep", 0]]
return fs
async def run(self, role: "STRole", task_desc: int, truncated_act_dur: int, *args, **kwargs):
def create_prompt_input(role, task, duration):
"""
Today is Saturday June 25. From 00:00 ~ 06:00am, Maeve is
planning on sleeping, 06:00 ~ 07:00am, Maeve is
planning on waking up and doing her morning routine,
and from 07:00am ~08:00am, Maeve is planning on having breakfast.
"""
curr_f_org_index = role.scratch.get_f_daily_schedule_hourly_org_index()
all_indices = []
# if curr_f_org_index > 0:
# all_indices += [curr_f_org_index-1]
all_indices += [curr_f_org_index]
if curr_f_org_index + 1 <= len(role.scratch.f_daily_schedule_hourly_org):
all_indices += [curr_f_org_index + 1]
if curr_f_org_index + 2 <= len(role.scratch.f_daily_schedule_hourly_org):
all_indices += [curr_f_org_index + 2]
curr_time_range = ""
print("DEBUG")
print(role.scratch.f_daily_schedule_hourly_org)
print(all_indices)
summ_str = f'Today is {role.scratch.curr_time.strftime("%B %d, %Y")}. '
summ_str += "From "
for index in all_indices:
print("index", index)
if index < len(role.scratch.f_daily_schedule_hourly_org):
start_min = 0
for i in range(index):
start_min += role.scratch.f_daily_schedule_hourly_org[i][1]
end_min = start_min + role.scratch.f_daily_schedule_hourly_org[index][1]
start_time = datetime.datetime.strptime("00:00:00", "%H:%M:%S") + datetime.timedelta(
minutes=start_min
)
end_time = datetime.datetime.strptime("00:00:00", "%H:%M:%S") + datetime.timedelta(
minutes=end_min
)
start_time_str = start_time.strftime("%H:%M%p")
end_time_str = end_time.strftime("%H:%M%p")
summ_str += (
f"{start_time_str} ~ {end_time_str}, {role.name} is planning "
f"on {role.scratch.f_daily_schedule_hourly_org[index][0]}, "
)
if curr_f_org_index + 1 == index:
curr_time_range = f"{start_time_str} ~ {end_time_str}"
summ_str = summ_str[:-2] + "."
prompt_input = []
prompt_input += [role.scratch.get_str_iss()]
prompt_input += [summ_str]
# prompt_input += [role.scratch.get_str_curr_date_str()]
prompt_input += [role.scratch.get_str_firstname()]
prompt_input += [role.scratch.get_str_firstname()]
prompt_input += [task]
prompt_input += [curr_time_range]
prompt_input += [duration]
prompt_input += [role.scratch.get_str_firstname()]
return prompt_input
prompt_input = create_prompt_input(role, task_desc, truncated_act_dur)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "task_decomp_v3.txt")
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=1000)
logger.info(f"Role: {role.name} {self.cls_name} output: {output}")
fin_output = []
time_sum = 0
for i_task, i_duration in output:
time_sum += i_duration
# HM?????????
# if time_sum < duration:
if time_sum <= truncated_act_dur:
fin_output += [[i_task, i_duration]]
else:
break
ftime_sum = 0
for fi_task, fi_duration in fin_output:
ftime_sum += fi_duration
# print ("for debugging... line 365", fin_output)
fin_output[-1][1] += truncated_act_dur - ftime_sum
output = fin_output
task_decomp = output
ret = []
for decomp_task, duration in task_decomp:
ret += [[f"{task_desc} ({decomp_task})", duration]]
output = ret
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output

View file

@ -0,0 +1,43 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : wake_up
from metagpt.logs import logger
from ..actions.st_action import STAction
class WakeUp(STAction):
name: str = "WakeUp"
def _func_validate(self, llm_resp: str, prompt: str = None) -> bool:
try:
self._func_cleanup(llm_resp, prompt="")
except Exception:
return False
return True
def _func_cleanup(self, llm_resp: str, prompt: str) -> int:
cr = int(llm_resp.strip().lower().split("am")[0])
return cr
def _func_fail_default_resp(self) -> int:
fs = 8
return fs
async def run(self, role: "STRole"):
def create_prompt_input(role):
prompt_input = [
role.scratch.get_str_iss(),
role.scratch.get_str_lifestyle(),
role.scratch.get_str_firstname(),
]
return prompt_input
prompt_input = create_prompt_input(role)
prompt = self.generate_prompt_with_tmpl_filename(prompt_input, "wake_up_hour_v1.txt")
self.fail_default_resp = self._func_fail_default_resp()
output = await self._run_gpt35_max_tokens(prompt, max_tokens=5)
logger.info(f"Role: {role.name} Action: {self.cls_name} output: {output}")
return output

View file

View file

@ -0,0 +1,374 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : BasicMemory,AgentMemory实现
from datetime import datetime
from pathlib import Path
from typing import Optional
from pydantic import Field, field_serializer, model_validator
from metagpt.logs import logger
from metagpt.memory.memory import Memory
from metagpt.schema import Message
from metagpt.utils.common import read_json_file, write_json_file
class BasicMemory(Message):
"""
BasicMemory继承于MG的Message类其中content属性替代description属性
Message类中对于Chat类型支持的非常好对于Agent个体的Perceive,Reflection,Plan支持的并不多
在Type设计上我们延续GA的三个种类但是对于Chat种类的对话进行特别设计具体怎么设计还没想好
"""
memory_id: Optional[str] = Field(default=None) # 记忆ID
memory_count: int = -1 # 第几个记忆实际数值与Memory相等
type_count: int = -1 # 第几种记忆,类型为整数
memory_type: Optional[str] = Field(default=None) # 记忆类型,包含 event,thought,chat三种类型
depth: int = -1 # 记忆深度,类型为整数
created: Optional[datetime] = Field(default=None) # 创建时间
expiration: Optional[datetime] = Field(default=None) # 记忆失效时间,默认为空()
last_accessed: Optional[datetime] = Field(default=None) # 上一次调用的时间初始化时候与self.created一致
subject: Optional[str] = Field(default=None) # 主语
predicate: Optional[str] = Field(default=None) # 谓语
object: Optional[str] = Field(default=None) # 宾语
description: Optional[str] = Field(default=None)
embedding_key: Optional[str] = Field(default=None) # 内容与self.content一致
poignancy: int = -1 # importance值
keywords: list[str] = Field(default=[]) # keywords
filling: list = Field(default=[]) # 装的与之相关联的memory_id的列表
@model_validator(mode="before")
@classmethod
def check_values(cls, values):
if "created" in values:
values["last_accessed"] = values["created"]
if "content" in values:
values["description"] = values["content"]
return values
@field_serializer("created", "expiration")
def transform_time_field(self, time_field: Optional[datetime]) -> str:
if time_field:
time_field = time_field.strftime("%Y-%m-%d %H:%M:%S")
return time_field
def summary(self):
return self.subject, self.predicate, self.object
def save_to_dict(self) -> dict:
"""
将MemoryBasic类转化为字典用于存储json文件
这里需要注意cause_by跟GA不兼容所以需要做一个格式转换
"""
memory_dict = dict()
node_id = self.memory_id
basic_mem_obj = self.model_dump(
include=[
"node_count",
"type_count",
"type",
"depth",
"created",
"expiration",
"subject",
"predicate",
"object",
"description",
"embedding_key",
"poignancy",
"keywords",
"filling",
"cause_by",
]
)
memory_dict[node_id] = basic_mem_obj
return memory_dict
class AgentMemory(Memory):
"""
GA中主要存储三种JSON
1. embedding.json (Dict embedding_key:embedding)
2. Node.json (Dict Node_id:Node)
3. kw_strength.json
"""
storage: list[BasicMemory] = [] # 重写Storage存储BasicMemory所有节点
event_list: list[BasicMemory] = [] # 存储event记忆
thought_list: list[BasicMemory] = [] # 存储thought记忆
chat_list: list[BasicMemory] = [] # chat-related memory
event_keywords: dict[str, list[BasicMemory]] = dict() # 存储keywords
thought_keywords: dict[str, list[BasicMemory]] = dict()
chat_keywords: dict[str, list[BasicMemory]] = dict()
kw_strength_event: dict[str, int] = dict()
kw_strength_thought: dict[str, int] = dict()
memory_saved: Optional[Path] = Field(default=None)
embeddings: dict[str, list[float]] = dict()
def set_mem_path(self, memory_saved: Path):
self.memory_saved = memory_saved
self.load(memory_saved)
def save(self, memory_saved: Path):
"""
将MemoryBasic类存储为Nodes.json形式复现GA中的Kw Strength.json形式
这里添加一个路径即可
TODO 这里在存储时候进行倒序存储之后需要验证test_memory通过
"""
memory_json = dict()
for i in range(len(self.storage)):
memory_node = self.storage[len(self.storage) - i - 1]
memory_node = memory_node.save_to_dict()
memory_json.update(memory_node)
write_json_file(memory_saved.joinpath("nodes.json"), memory_json)
write_json_file(memory_saved.joinpath("embeddings.json"), self.embeddings)
strength_json = dict()
strength_json["kw_strength_event"] = self.kw_strength_event
strength_json["kw_strength_thought"] = self.kw_strength_thought
write_json_file(memory_saved.joinpath("kw_strength.json"), strength_json)
def load(self, memory_saved: Path):
"""
将GA的JSON解析填充到AgentMemory类之中
"""
self.embeddings = read_json_file(memory_saved.joinpath("embeddings.json"))
memory_load = read_json_file(memory_saved.joinpath("nodes.json"))
for count in range(len(memory_load.keys())):
node_id = f"node_{str(count + 1)}"
node_details = memory_load[node_id]
node_type = node_details["type"]
created = datetime.strptime(node_details["created"], "%Y-%m-%d %H:%M:%S")
expiration = None
if node_details["expiration"]:
expiration = datetime.strptime(node_details["expiration"], "%Y-%m-%d %H:%M:%S")
s = node_details["subject"]
p = node_details["predicate"]
o = node_details["object"]
description = node_details["description"]
embedding_pair = (node_details["embedding_key"], self.embeddings[node_details["embedding_key"]])
poignancy = node_details["poignancy"]
keywords = set(node_details["keywords"])
filling = node_details["filling"]
if node_type == "thought":
self.add_thought(
created, expiration, s, p, o, description, keywords, poignancy, embedding_pair, filling
)
if node_type == "event":
self.add_event(created, expiration, s, p, o, description, keywords, poignancy, embedding_pair, filling)
if node_type == "chat":
self.add_chat(created, expiration, s, p, o, description, keywords, poignancy, embedding_pair, filling)
strength_keywords_load = read_json_file(memory_saved.joinpath("kw_strength.json"))
if strength_keywords_load["kw_strength_event"]:
self.kw_strength_event = strength_keywords_load["kw_strength_event"]
if strength_keywords_load["kw_strength_thought"]:
self.kw_strength_thought = strength_keywords_load["kw_strength_thought"]
def add(self, memory_basic: BasicMemory):
"""
Add a new message to storage, while updating the index
重写add方法修改原有的Message类为BasicMemory类并添加不同的记忆类型添加方式
"""
if memory_basic.memory_id in self.storage:
return
self.storage.append(memory_basic)
if memory_basic.memory_type == "chat":
self.chat_list[0:0] = [memory_basic]
return
if memory_basic.memory_type == "thought":
self.thought_list[0:0] = [memory_basic]
return
if memory_basic.memory_type == "event":
self.event_list[0:0] = [memory_basic]
return
def add_chat(
self, created, expiration, s, p, o, content, keywords, poignancy, embedding_pair, filling, cause_by=""
):
"""
调用add方法初始化chat在创建的时候就需要调用embedding函数
"""
memory_count = len(self.storage) + 1
type_count = len(self.thought_list) + 1
memory_type = "chat"
memory_id = f"node_{str(memory_count)}"
depth = 1
memory_node = BasicMemory(
memory_id=memory_id,
memory_count=memory_count,
type_count=type_count,
memory_type=memory_type,
depth=depth,
created=created,
expiration=expiration,
subject=s,
predicate=p,
object=o,
description=content,
embedding_key=embedding_pair[0],
poignancy=poignancy,
keywords=keywords,
filling=filling,
cause_by=cause_by
)
keywords = [i.lower() for i in keywords]
for kw in keywords:
if kw in self.chat_keywords:
self.chat_keywords[kw][0:0] = [memory_node]
else:
self.chat_keywords[kw] = [memory_node]
self.add(memory_node)
self.embeddings[embedding_pair[0]] = embedding_pair[1]
return memory_node
def add_thought(self, created, expiration, s, p, o, content, keywords, poignancy, embedding_pair, filling):
"""
调用add方法初始化thought
"""
memory_count = len(self.storage) + 1
type_count = len(self.thought_list) + 1
memory_type = "thought"
memory_id = f"node_{str(memory_count)}"
depth = 1
try:
if filling:
depth_list = [memory_node.depth for memory_node in self.storage if memory_node.memory_id in filling]
depth += max(depth_list)
except Exception as exp:
logger.warning(f"filling init occur {exp}")
pass
memory_node = BasicMemory(
memory_id=memory_id,
memory_count=memory_count,
type_count=type_count,
memory_type=memory_type,
depth=depth,
created=created,
expiration=expiration,
subject=s,
predicate=p,
object=o,
description=content,
embedding_key=embedding_pair[0],
poignancy=poignancy,
keywords=keywords,
filling=filling
)
keywords = [i.lower() for i in keywords]
for kw in keywords:
if kw in self.thought_keywords:
self.thought_keywords[kw][0:0] = [memory_node]
else:
self.thought_keywords[kw] = [memory_node]
self.add(memory_node)
if f"{p} {o}" != "is idle":
for kw in keywords:
if kw in self.kw_strength_thought:
self.kw_strength_thought[kw] += 1
else:
self.kw_strength_thought[kw] = 1
self.embeddings[embedding_pair[0]] = embedding_pair[1]
return memory_node
def add_event(self, created, expiration, s, p, o, content, keywords, poignancy, embedding_pair, filling):
"""
调用add方法初始化event
"""
memory_count = len(self.storage) + 1
type_count = len(self.event_list) + 1
memory_type = "event"
memory_id = f"node_{str(memory_count)}"
depth = 0
if "(" in content:
content = " ".join(content.split()[:3]) + " " + content.split("(")[-1][:-1]
memory_node = BasicMemory(
memory_id=memory_id,
memory_count=memory_count,
type_count=type_count,
memory_type=memory_type,
depth=depth,
created=created,
expiration=expiration,
subject=s,
predicate=p,
object=o,
description=content,
embedding_key=embedding_pair[0],
poignancy=poignancy,
keywords=keywords,
filling=filling
)
keywords = [i.lower() for i in keywords]
for kw in keywords:
if kw in self.event_keywords:
self.event_keywords[kw][0:0] = [memory_node]
else:
self.event_keywords[kw] = [memory_node]
self.add(memory_node)
if f"{p} {o}" != "is idle":
for kw in keywords:
if kw in self.kw_strength_event:
self.kw_strength_event[kw] += 1
else:
self.kw_strength_event[kw] = 1
self.embeddings[embedding_pair[0]] = embedding_pair[1]
return memory_node
def get_summarized_latest_events(self, retention):
ret_set = set()
for e_node in self.event_list[:retention]:
ret_set.add(e_node.summary())
return ret_set
def get_last_chat(self, target_role_name: str):
if target_role_name.lower() in self.chat_keywords:
return self.chat_keywords[target_role_name.lower()][0]
else:
return False
def retrieve_relevant_thoughts(self, s_content: str, p_content: str, o_content: str) -> set:
contents = [s_content, p_content, o_content]
ret = []
for i in contents:
if i in self.thought_keywords:
ret += self.thought_keywords[i.lower()]
ret = set(ret)
return ret
def retrieve_relevant_events(self, s_content: str, p_content: str, o_content: str) -> set:
contents = [s_content, p_content, o_content]
ret = []
for i in contents:
if i in self.event_keywords:
ret += self.event_keywords[i]
ret = set(ret)
return ret

View file

@ -0,0 +1,180 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Retrieve函数实现
import datetime
from numpy import dot
from numpy.linalg import norm
from examples.st_game.memory.agent_memory import BasicMemory
from examples.st_game.utils.utils import get_embedding
def agent_retrieve(
agent_memory,
curr_time: datetime.datetime,
memory_forget: float,
query: str,
nodes: list[BasicMemory],
topk: int = 4,
) -> list[BasicMemory]:
"""
Retrieve需要集合Role使用,原因在于Role才具有AgentMemory,scratch
逻辑:Role调用该函数,self.rc.AgentMemory,self.rc.scratch.curr_time,self.rc.scratch.memory_forget
输入希望查询的内容与希望回顾的条数,返回TopK条高分记忆即List[BasicMemory]
Score_lists示例
{
"memory": memories[i], BasicMemory类
"importance": memories[i].poignancy
"recency": 衰减因子计算结果
"relevance": 搜索结果
}
"""
memories = nodes
agent_memory_embedding = agent_memory.embeddings
memories = sorted(memories, key=lambda memory_node: memory_node.last_accessed, reverse=True)
score_list = []
score_list = extract_importance(memories, score_list)
score_list = extract_recency(curr_time, memory_forget, score_list)
score_list = extract_relevance(agent_memory_embedding, query, score_list)
score_list = normalize_score_floats(score_list, 0, 1)
total_dict = {}
gw = [1, 1, 1] # 三个因素的权重,重要性,近因性,相关性,
for i in range(len(score_list)):
total_score = (
score_list[i]["importance"] * gw[0] + score_list[i]["recency"] * gw[1] + score_list[i]["relevance"] * gw[2]
)
total_dict[score_list[i]["memory"].memory_id] = total_score
result = top_highest_x_values(total_dict, topk)
return result # 返回的是一个BasicMemory列表
def new_agent_retrieve(role, focus_points: list, n_count=30) -> dict:
"""
输入为role关注点列表,返回记忆数量
输出为字典键为focus_point值为对应的记忆列表
"""
retrieved = dict()
for focal_pt in focus_points:
nodes = [
[i.last_accessed, i]
for i in role.memory.event_list + role.memory.thought_list
if "idle" not in i.embedding_key
]
nodes = sorted(nodes, key=lambda x: x[0])
nodes = [i for created, i in nodes]
results = agent_retrieve(
role.memory, role.scratch.curr_time, role.scratch.recency_decay, focal_pt, nodes, n_count
)
final_result = []
for n in results:
for i in role.memory.storage:
if i.memory_id == n:
i.last_accessed = role.scratch.curr_time
final_result.append(i)
retrieved[focal_pt] = final_result
return retrieved
def top_highest_x_values(d, x):
"""
输入字典Topx
返回以字典值排序字典键组成的List[BasicMemory]
"""
top_v = [item[0] for item in sorted(d.items(), key=lambda item: item[1], reverse=True)[:x]]
return top_v
def extract_importance(memories, score_list):
"""
抽取重要性
"""
for i in range(len(memories)):
score = {"memory": memories[i], "importance": memories[i].poignancy}
score_list.append(score)
return score_list
def extract_relevance(agent_memory_embedding, query, score_list):
"""
抽取相关性
"""
query_embedding = get_embedding(query)
# 进行
for i in range(len(score_list)):
node_embedding = agent_memory_embedding[score_list[i]["memory"].embedding_key]
result = cos_sim(node_embedding, query_embedding)
score_list[i]["relevance"] = result
return score_list
def extract_recency(curr_time, memory_forget, score_list):
"""
抽取近因性目前使用的现实世界过一天走一个衰减因子
"""
for i in range(len(score_list)):
day_count = (curr_time - score_list[i]["memory"].created).days
score_list[i]["recency"] = memory_forget**day_count
return score_list
def cos_sim(a, b):
"""
计算余弦相似度
"""
return dot(a, b) / (norm(a) * norm(b))
def normalize_list_floats(single_list, target_min, target_max):
"""
单个列表归一化
"""
if len(single_list) == 0:
return []
min_val = min(single_list)
max_val = max(single_list)
range_val = max_val - min_val
if range_val == 0:
for i in range(len(single_list)):
single_list[i] = (target_max - target_min) / 2
else:
for i in range(len(single_list)):
single_list[i] = (single_list[i] - min_val) * (target_max - target_min) / range_val + target_min
return single_list
def normalize_score_floats(score_list, target_min, target_max):
"""
整体归一化
"""
importance_list = []
relevance_list = []
recency_list = []
for i in range(len(score_list)):
importance_list.append(score_list[i]["importance"])
relevance_list.append(score_list[i]["relevance"])
recency_list.append(score_list[i]["recency"])
# 进行归一化操作
importance_list = normalize_list_floats(importance_list, target_min, target_max)
relevance_list = normalize_list_floats(relevance_list, target_min, target_max)
recency_list = normalize_list_floats(recency_list, target_min, target_max)
for i in range(len(score_list)):
score_list[i]["importance"] = importance_list[i]
score_list[i]["relevance"] = relevance_list[i]
score_list[i]["recency"] = recency_list[i]
return score_list

View file

@ -0,0 +1,397 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Scratch类实现角色信息类
from datetime import datetime, timedelta
from pathlib import Path
from typing import Optional
from pydantic import (
BaseModel,
Field,
field_serializer,
field_validator,
model_validator,
)
from metagpt.utils.common import read_json_file, write_json_file
class Scratch(BaseModel):
# 类别1:人物超参
vision_r: int = 4
att_bandwidth: int = 3
retention: int = 5
# 类别2:世界信息
curr_time: Optional[datetime] = Field(default=None)
curr_tile: Optional[str] = Field(default=None)
daily_plan_req: Optional[str] = Field(default=None)
# 类别3:人物角色的核心身份
name: Optional[str] = Field(default=None)
first_name: Optional[str] = Field(default=None)
last_name: Optional[str] = Field(default=None)
age: Optional[int] = Field(default=None)
innate: Optional[str] = Field(default=None) # L0 permanent core traits.
learned: Optional[str] = Field(default=None) # L1 stable traits.
currently: Optional[str] = Field(default=None) # L2 external implementation.
lifestyle: Optional[str] = Field(default=None)
living_area: Optional[str] = Field(default=None)
# 类别4:旧反思变量
concept_forget: int = 100
daily_reflection_time: int = 60 * 3
daily_reflection_size: int = 5
overlap_reflect_th: int = 2
kw_strg_event_reflect_th: int = 4
kw_strg_thought_reflect_th: int = 4
# 类别5:新反思变量
recency_w: int = 1
relevance_w: int = 1
importance_w: int = 1
recency_decay: float = 0.99
importance_trigger_max: int = 150
importance_trigger_curr: int = 150
importance_ele_n: int = 0
thought_count: int = 5
# 类别6:个人计划
daily_req: list[str] = Field(default=[])
f_daily_schedule: list[list[str]] = Field(default=[])
f_daily_schedule_hourly_org: list[list[str]] = Field(default=[])
# 类别7:当前动作
act_address: Optional[str] = Field(default=None)
act_start_time: Optional[datetime] = Field(default=None)
act_duration: Optional[int] = Field(default=None)
act_description: Optional[str] = Field(default=None)
act_pronunciatio: Optional[str] = Field(default=None)
act_event: tuple[str, Optional[str], Optional[str]] = (None, None, None)
act_obj_description: Optional[str] = Field(default=None)
act_obj_pronunciatio: Optional[str] = Field(default=None)
act_obj_event: tuple[Optional[str], Optional[str], Optional[str]] = (None, None, None)
chatting_with: Optional[str] = Field(default=None)
chat: Optional[str] = Field(default=None)
chatting_with_buffer: dict = dict()
chatting_end_time: Optional[datetime] = Field(default=None)
act_path_set: bool = False
planned_path: list[str] = Field(default=[])
@model_validator(mode="after")
@classmethod
def check_values(cls, values):
if "name" in values:
values["act_event"] = (values["name"], None, None)
values["act_obj_event"] = (values["name"], None, None)
return values
@field_validator("curr_time", "act_start_time", "chatting_end_time")
@classmethod
def check_time_filed(cls, time_filed):
val = datetime.strptime(time_filed, "%B %d, %Y, %H:%M:%S") if time_filed else None
return val
@field_serializer("curr_time", "act_start_time", "chatting_end_time")
def transform_time_field(self, time_filed: Optional[datetime]) -> str:
if time_filed:
time_filed = time_filed.strftime("%B %d, %Y, %H:%M:%S")
return time_filed
@classmethod
def set_scratch_path(cls, f_saved: Path):
scratch_load = read_json_file(f_saved)
scratch = Scratch(**scratch_load)
return scratch
def save(self, out_json: Path):
"""
Save persona's scratch.
INPUT:
out_json: The file where we wil be saving our persona's state.
OUTPUT:
None
"""
scratch = self.model_dump()
write_json_file(out_json, scratch, encoding="utf-8")
def get_f_daily_schedule_index(self, advance=0):
"""
We get the current index of self.f_daily_schedule.
Recall that self.f_daily_schedule stores the decomposed action sequences
up until now, and the hourly sequences of the future action for the rest
of today. Given that self.f_daily_schedule is a list of list where the
inner list is composed of [task, duration], we continue to add up the
duration until we reach "if elapsed > today_min_elapsed" condition. The
index where we stop is the index we will return.
INPUT
advance: Integer value of the number minutes we want to look into the
future. This allows us to get the index of a future timeframe.
OUTPUT
an integer value for the current index of f_daily_schedule.
"""
# We first calculate teh number of minutes elapsed today.
today_min_elapsed = 0
today_min_elapsed += self.curr_time.hour * 60
today_min_elapsed += self.curr_time.minute
today_min_elapsed += advance
x = 0
for task, duration in self.f_daily_schedule:
x += duration
x = 0
for task, duration in self.f_daily_schedule_hourly_org:
x += duration
# We then calculate the current index based on that.
curr_index = 0
elapsed = 0
for task, duration in self.f_daily_schedule:
elapsed += duration
if elapsed > today_min_elapsed:
return curr_index
curr_index += 1
return curr_index
def get_f_daily_schedule_hourly_org_index(self, advance=0):
"""
We get the current index of self.f_daily_schedule_hourly_org.
It is otherwise the same as get_f_daily_schedule_index.
INPUT
advance: Integer value of the number minutes we want to look into the
future. This allows us to get the index of a future timeframe.
OUTPUT
an integer value for the current index of f_daily_schedule.
"""
# We first calculate teh number of minutes elapsed today.
today_min_elapsed = 0
today_min_elapsed += self.curr_time.hour * 60
today_min_elapsed += self.curr_time.minute
today_min_elapsed += advance
# We then calculate the current index based on that.
curr_index = 0
elapsed = 0
for task, duration in self.f_daily_schedule_hourly_org:
elapsed += duration
if elapsed > today_min_elapsed:
return curr_index
curr_index += 1
return curr_index
def get_str_iss(self):
"""
ISS stands for "identity stable set." This describes the commonset summary
of this persona -- basically, the bare minimum description of the persona
that gets used in almost all prompts that need to call on the persona.
INPUT
None
OUTPUT
the identity stable set summary of the persona in a string form.
EXAMPLE STR OUTPUT
"Name: Dolores Heitmiller
Age: 28
Innate traits: hard-edged, independent, loyal
Learned traits: Dolores is a painter who wants live quietly and paint
while enjoying her everyday life.
Currently: Dolores is preparing for her first solo show. She mostly
works from home.
Lifestyle: Dolores goes to bed around 11pm, sleeps for 7 hours, eats
dinner around 6pm.
Daily plan requirement: Dolores is planning to stay at home all day and
never go out."
"""
commonset = ""
commonset += f"Name: {self.name}\n"
commonset += f"Age: {self.age}\n"
commonset += f"Innate traits: {self.innate}\n"
commonset += f"Learned traits: {self.learned}\n"
commonset += f"Currently: {self.currently}\n"
commonset += f"Lifestyle: {self.lifestyle}\n"
commonset += f"Daily plan requirement: {self.daily_plan_req}\n"
commonset += f"Current Date: {self.curr_time.strftime('%A %B %d') if self.curr_time else ''}\n"
return commonset
def get_str_name(self):
return self.name
def get_str_firstname(self):
return self.first_name
def get_str_lastname(self):
return self.last_name
def get_str_age(self):
return str(self.age)
def get_str_innate(self):
return self.innate
def get_str_learned(self):
return self.learned
def get_str_currently(self):
return self.currently
def get_str_lifestyle(self):
return self.lifestyle
def get_str_daily_plan_req(self):
return self.daily_plan_req
def get_str_curr_date_str(self):
return self.curr_time.strftime("%A %B %d")
def get_curr_event(self):
if not self.act_address:
return (self.name, None, None)
else:
return self.act_event
def get_curr_event_and_desc(self):
if not self.act_address:
return (self.name, None, None, None)
else:
return (self.act_event[0], self.act_event[1], self.act_event[2], self.act_description)
def get_curr_obj_event_and_desc(self):
if not self.act_address:
return ("", None, None, None)
else:
return (self.act_address, self.act_obj_event[1], self.act_obj_event[2], self.act_obj_description)
def add_new_action(
self,
action_address,
action_duration,
action_description,
action_pronunciatio,
action_event,
chatting_with,
chat,
chatting_with_buffer,
chatting_end_time,
act_obj_description,
act_obj_pronunciatio,
act_obj_event,
act_start_time=None,
):
self.act_address = action_address
self.act_duration = action_duration
self.act_description = action_description
self.act_pronunciatio = action_pronunciatio
self.act_event = action_event
self.chatting_with = chatting_with
self.chat = chat
if chatting_with_buffer:
self.chatting_with_buffer.update(chatting_with_buffer)
self.chatting_end_time = chatting_end_time
self.act_obj_description = act_obj_description
self.act_obj_pronunciatio = act_obj_pronunciatio
self.act_obj_event = act_obj_event
self.act_start_time = self.curr_time
self.act_path_set = False
def act_time_str(self):
"""
Returns a string output of the current time.
INPUT
None
OUTPUT
A string output of the current time.
EXAMPLE STR OUTPUT
"14:05 P.M."
"""
return self.act_start_time.strftime("%H:%M %p")
def act_check_finished(self):
"""
Checks whether the self.Action instance has finished.
INPUT
curr_datetime: Current time. If current time is later than the action's
start time + its duration, then the action has finished.
OUTPUT
Boolean [True]: Action has finished.
Boolean [False]: Action has not finished and is still ongoing.
"""
if not self.act_address:
return True
if self.chatting_with:
end_time = self.chatting_end_time
else:
x = self.act_start_time
if x.second != 0:
x = x.replace(second=0)
x = x + timedelta(minutes=1)
end_time = x + timedelta(minutes=self.act_duration)
if end_time.strftime("%H:%M:%S") == self.curr_time.strftime("%H:%M:%S"):
return True
return False
def act_summarize(self):
"""
Summarize the current action as a dictionary.
INPUT
None
OUTPUT
ret: A human readable summary of the action.
"""
exp = dict()
exp["persona"] = self.name
exp["address"] = self.act_address
exp["start_datetime"] = self.act_start_time
exp["duration"] = self.act_duration
exp["description"] = self.act_description
exp["pronunciatio"] = self.act_pronunciatio
return exp
def act_summary_str(self):
"""
Returns a string summary of the current action. Meant to be
human-readable.
INPUT
None
OUTPUT
ret: A human readable summary of the action.
"""
start_datetime_str = self.act_start_time.strftime("%A %B %d -- %H:%M %p")
ret = f"[{start_datetime_str}]\n"
ret += f"Activity: {self.name} is {self.act_description}\n"
ret += f"Address: {self.act_address}\n"
ret += f"Duration in minutes (e.g., x min): {str(self.act_duration)} min\n"
return ret
def get_daily_schedule(self, daily_schedule: list[list[str]]):
ret = ""
curr_min_sum = 0
for row in daily_schedule:
curr_min_sum += row[1]
hour = int(curr_min_sum / 60)
minute = curr_min_sum % 60
ret += f"{hour:02}:{minute:02} || {row[0]}\n"
return ret
def get_str_daily_schedule_summary(self):
return self.get_daily_schedule(self.f_daily_schedule)
def get_str_daily_schedule_hourly_org_summary(self):
return self.get_daily_schedule(self.f_daily_schedule_hourly_org)

View file

@ -0,0 +1,115 @@
"""
Author: Joon Sung Park (joonspk@stanford.edu)
File: spatial_memory.py
Description: Defines the MemoryTree class that serves as the agents' spatial
memory that aids in grounding their behavior in the game world.
"""
from pathlib import Path
from pydantic import BaseModel, Field
from metagpt.utils.common import read_json_file, write_json_file
class MemoryTree(BaseModel):
tree: dict = Field(default=dict)
def set_mem_path(self, f_saved: Path):
self.tree = read_json_file(f_saved)
def print_tree(self) -> None:
def _print_tree(tree, depth):
dash = " >" * depth
if isinstance(tree, list):
if tree:
print(dash, tree)
return
for key, val in tree.items():
if key:
print(dash, key)
_print_tree(val, depth + 1)
_print_tree(self.tree, 0)
def save(self, out_json: Path) -> None:
write_json_file(out_json, self.tree)
def get_str_accessible_sectors(self, curr_world: str) -> str:
"""
Returns a summary string of all the arenas that the persona can access
within the current sector.
Note that there are places a given persona cannot enter. This information
is provided in the persona sheet. We account for this in this function.
INPUT
None
OUTPUT
A summary string of all the arenas that the persona can access.
EXAMPLE STR OUTPUT
"bedroom, kitchen, dining room, office, bathroom"
"""
x = ", ".join(list(self.tree[curr_world].keys()))
return x
def get_str_accessible_sector_arenas(self, sector: str) -> str:
"""
Returns a summary string of all the arenas that the persona can access
within the current sector.
Note that there are places a given persona cannot enter. This information
is provided in the persona sheet. We account for this in this function.
INPUT
None
OUTPUT
A summary string of all the arenas that the persona can access.
EXAMPLE STR OUTPUT
"bedroom, kitchen, dining room, office, bathroom"
"""
curr_world, curr_sector = sector.split(":")
if not curr_sector:
return ""
x = ", ".join(list(self.tree[curr_world][curr_sector].keys()))
return x
def get_str_accessible_arena_game_objects(self, arena: str) -> str:
"""
Get a str list of all accessible game objects that are in the arena. If
temp_address is specified, we return the objects that are available in
that arena, and if not, we return the objects that are in the arena our
persona is currently in.
INPUT
temp_address: optional arena address
OUTPUT
str list of all accessible game objects in the gmae arena.
EXAMPLE STR OUTPUT
"phone, charger, bed, nightstand"
"""
curr_world, curr_sector, curr_arena = arena.split(":")
if not curr_arena:
return ""
try:
x = ", ".join(list(self.tree[curr_world][curr_sector][curr_arena]))
except Exception:
x = ", ".join(list(self.tree[curr_world][curr_sector][curr_arena.lower()]))
return x
def add_tile_info(self, tile_info: dict) -> None:
if tile_info["world"]:
if tile_info["world"] not in self.tree:
self.tree[tile_info["world"]] = {}
if tile_info["sector"]:
if tile_info["sector"] not in self.tree[tile_info["world"]]:
self.tree[tile_info["world"]][tile_info["sector"]] = {}
if tile_info["arena"]:
if tile_info["arena"] not in self.tree[tile_info["world"]][tile_info["sector"]]:
self.tree[tile_info["world"]][tile_info["sector"]][tile_info["arena"]] = []
if tile_info["game_object"]:
if tile_info["game_object"] not in self.tree[tile_info["world"]][tile_info["sector"]][tile_info["arena"]]:
self.tree[tile_info["world"]][tile_info["sector"]][tile_info["arena"]] += [tile_info["game_object"]]

View file

@ -0,0 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :

View file

@ -0,0 +1,93 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : conversation between two agents
from typing import Tuple
from examples.st_game.actions.agent_chat_sum_rel import AgentChatSumRel
from examples.st_game.actions.gen_iter_chat_utt import GenIterChatUTT
from examples.st_game.memory.retrieve import new_agent_retrieve
from metagpt.logs import logger
async def agent_conversation(init_role: "STRole", target_role: "STRole", conv_rounds: int = 8) -> list[list[str]]:
curr_chat = []
logger.info(f"Role: {init_role.name} starts a conversation with Role: {target_role.name}")
for idx in range(conv_rounds):
logger.info(f"Conv round: {idx} between {init_role.name} and {target_role.name}")
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
focal_points = [f"{target_scratch.name}"]
retrieved = new_agent_retrieve(init_role, focal_points, 50)
relationship = await generate_summarize_agent_relationship(init_role, target_role, retrieved)
logger.info(f"The relationship between {init_role.name} and {target_role.name}: {relationship}")
last_chat = ""
for i in curr_chat[-4:]:
last_chat += ": ".join(i) + "\n"
if last_chat:
focal_points = [f"{relationship}", f"{target_scratch.name} is {target_scratch.act_description}", last_chat]
else:
focal_points = [f"{relationship}", f"{target_scratch.name} is {target_scratch.act_description}"]
retrieved = new_agent_retrieve(init_role, focal_points, 15)
utt, end = await generate_one_utterance(init_role, target_role, retrieved, curr_chat)
curr_chat += [[scratch.name, utt]]
if end:
break
focal_points = [f"{scratch.name}"]
retrieved = new_agent_retrieve(target_role, focal_points, 50)
relationship = await generate_summarize_agent_relationship(target_role, init_role, retrieved)
logger.info(f"The relationship between {target_role.name} and {init_role.name}: {relationship}")
last_chat = ""
for i in curr_chat[-4:]:
last_chat += ": ".join(i) + "\n"
if last_chat:
focal_points = [f"{relationship}", f"{scratch.name} is {scratch.act_description}", last_chat]
else:
focal_points = [f"{relationship}", f"{scratch.name} is {scratch.act_description}"]
retrieved = new_agent_retrieve(target_role, focal_points, 15)
utt, end = await generate_one_utterance(target_role, init_role, retrieved, curr_chat)
curr_chat += [[target_scratch.name, utt]]
if end:
break
logger.warning(f"Conversations between {target_role.name} and {init_role.name}:")
for row in curr_chat:
logger.info(row)
return curr_chat
async def generate_summarize_agent_relationship(init_role: "STRole", target_role: "STRole", retrieved: dict) -> str:
all_embedding_keys = list()
for key, val in retrieved.items():
for i in val:
all_embedding_keys += [i.embedding_key]
all_embedding_key_str = ""
for i in all_embedding_keys:
all_embedding_key_str += f"{i}\n"
summarized_relationship = await AgentChatSumRel().run(init_role, target_role, all_embedding_key_str)
return summarized_relationship
async def generate_one_utterance(init_role, target_role, retrieved: dict, curr_chat: list) -> Tuple[str, str]:
# Chat version optimized for speed via batch generation
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
curr_context = (
f"{scratch.name} "
+ f"was {scratch.act_description} "
+ f"when {scratch.name} "
+ f"saw {target_scratch.name} "
+ f"in the middle of {target_scratch.act_description}.\n"
)
curr_context += f"{scratch.name} " + "is initiating a conversation with " + f"{target_scratch.name}."
x = await GenIterChatUTT().run(init_role, target_role, retrieved, curr_context, curr_chat)
return x["utterance"], x["end"]

View file

@ -0,0 +1,719 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : st' planning execution
import datetime
import math
import random
from typing import Tuple, Union
from metagpt.llm import LLM
from metagpt.logs import logger
from ..actions.decide_to_talk import DecideToTalk
from ..actions.gen_action_details import GenActionDetails
from ..actions.gen_daily_schedule import GenDailySchedule
from ..actions.gen_hourly_schedule import GenHourlySchedule
from ..actions.new_decomp_schedule import NewDecompSchedule
from ..actions.summarize_conv import SummarizeConv
from ..actions.task_decomp import TaskDecomp
from ..actions.wake_up import WakeUp
from ..memory.retrieve import new_agent_retrieve
from ..plan.converse import agent_conversation
from ..utils.utils import get_embedding
async def plan(role: "STRole", roles: dict["STRole"], new_day: bool, retrieved: dict) -> str:
# PART 1: Generate the hourly schedule.
if new_day:
await _long_term_planning(role, new_day)
# PART 2: If the current action has expired, we want to create a new plan.
act_check_finished = role.scratch.act_check_finished()
logger.info(f"Role: {role.name} act_check_finished is {act_check_finished}")
if act_check_finished:
await _determine_action(role)
# PART 3: If you perceived an event that needs to be responded to (saw
# another role), and retrieved relevant information.
# Step 1: Retrieved may have multiple events represented in it. The first
# job here is to determine which of the events we want to focus
# on for the role.
# <focused_event> takes the form of a dictionary like this:
# dictionary {["curr_event"] = <ConceptNode>,
# ["events"] = [<ConceptNode>, ...],
# ["thoughts"] = [<ConceptNode>, ...]}
focused_event = False
if retrieved.keys():
focused_event = _choose_retrieved(role.name, retrieved)
# Step 2: Once we choose an event, we need to determine whether the
# role will take any actions for the perceived event. There are
# three possible modes of reaction returned by _should_react.
# a) "chat with {target_role.name}"
# b) "react"
# c) False
logger.info(f"Role: {role.name} focused_event: {focused_event}")
if focused_event:
reaction_mode = await _should_react(role, focused_event, roles)
logger.info(f"Role: {role.name} reaction_mode: {reaction_mode}")
if reaction_mode:
# If we do want to chat, then we generate conversation
if reaction_mode[:9] == "chat with":
await _chat_react(role, reaction_mode, roles)
elif reaction_mode[:4] == "wait":
await _wait_react(role, reaction_mode)
# Step 3: Chat-related state clean up.
# If the persona is not chatting with anyone, we clean up any of the
# chat-related states here.
if role.rc.scratch.act_event[1] != "chat with":
role.rc.scratch.chatting_with = None
role.rc.scratch.chat = None
role.rc.scratch.chatting_end_time = None
# We want to make sure that the persona does not keep conversing with each
# other in an infinite loop. So, chatting_with_buffer maintains a form of
# buffer that makes the persona wait from talking to the same target
# immediately after chatting once. We keep track of the buffer value here.
curr_persona_chat_buffer = role.rc.scratch.chatting_with_buffer
for persona_name, buffer_count in curr_persona_chat_buffer.items():
if persona_name != role.rc.scratch.chatting_with:
role.rc.scratch.chatting_with_buffer[persona_name] -= 1
return role.rc.scratch.act_address
def _choose_retrieved(role_name: str, retrieved: dict) -> Union[None, dict]:
"""
Retrieved elements have multiple core "curr_events". We need to choose one
event to which we are going to react to. We pick that event here.
Args:
role_name: Current role instance's name whose action we are determining.
retrieved: A dictionary of <ConceptNode> that were retrieved from the
the role's associative memory. This dictionary takes the
following form:
dictionary[event.description] =
{["curr_event"] = <ConceptNode>,
["events"] = [<ConceptNode>, ...],
["thoughts"] = [<ConceptNode>, ...] }
"""
# Once we are done with the reflection, we might want to build a more
# complex structure here.
# We do not want to take self events... for now
copy_retrieved = retrieved.copy()
for event_desc, rel_ctx in copy_retrieved.items():
curr_event = rel_ctx["curr_event"]
if curr_event.subject == role_name:
del retrieved[event_desc]
# Always choose role first.
priority = []
for event_desc, rel_ctx in retrieved.items():
curr_event = rel_ctx["curr_event"]
if ":" not in curr_event.subject and curr_event.subject != role_name:
priority += [rel_ctx]
if priority:
return random.choice(priority)
# Skip idle.
for event_desc, rel_ctx in retrieved.items():
if "is idle" not in event_desc:
priority += [rel_ctx]
if priority:
return random.choice(priority)
return None
async def _should_react(role: "STRole", retrieved: dict, roles: dict):
"""
Determines what form of reaction the role should exihibit given the
retrieved values.
INPUT
role: Current <"STRole"> instance whose action we are determining.
retrieved: A dictionary of <ConceptNode> that were retrieved from the
the role's associative memory. This dictionary takes the
following form:
dictionary[event.description] =
{["curr_event"] = <ConceptNode>,
["events"] = [<ConceptNode>, ...],
["thoughts"] = [<ConceptNode>, ...] }
roles: A dictionary that contains all role names as keys, and the
<"STRole"> instance as values.
"""
async def lets_talk(init_role: "STRole", target_role: "STRole", retrieved: dict):
if init_role.name == target_role.name:
logger.info(f"Role: {role.name} _should_react lets_talk meet same role, return False")
return False
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
if (
not target_scratch.act_address
or not target_scratch.act_description
or not scratch.act_address
or not scratch.act_description
):
return False
if "sleeping" in target_scratch.act_description or "sleeping" in scratch.act_description:
return False
if scratch.curr_time.hour == 23:
return False
if "<waiting>" in target_scratch.act_address:
return False
if target_scratch.chatting_with or scratch.chatting_with:
return False
if target_role.name in scratch.chatting_with_buffer:
if scratch.chatting_with_buffer[target_role.name] > 0:
return False
if await DecideToTalk().run(init_role, target_role, retrieved):
return True
return False
async def lets_react(init_role: "STRole", target_role: "STRole", retrieved: dict):
if init_role.name == target_role.name:
logger.info(f"Role: {role.name} _should_react lets_react meet same role, return False")
return False
scratch = init_role.rc.scratch
target_scratch = target_role.rc.scratch
if (
not target_scratch.act_address
or not target_scratch.act_description
or not scratch.act_address
or not scratch.act_description
):
return False
if "sleeping" in target_scratch.act_description or "sleeping" in scratch.act_description:
return False
# return False
if scratch.curr_time.hour == 23:
return False
if "waiting" in target_scratch.act_description:
return False
if scratch.planned_path == []:
return False
if scratch.act_address != target_scratch.act_address:
return False
react_mode = await DecideToTalk().run(init_role, target_role, retrieved)
if react_mode == "1":
wait_until = (
target_scratch.act_start_time + datetime.timedelta(minutes=target_scratch.act_duration - 1)
).strftime("%B %d, %Y, %H:%M:%S")
return f"wait: {wait_until}"
elif react_mode == "2":
return False
return "do other things"
else:
return False # "keep"
# If the role is chatting right now, default to no reaction
scratch = role.rc.scratch
if scratch.chatting_with:
return False
if "<waiting>" in scratch.act_address:
return False
# Recall that retrieved takes the following form:
# dictionary {["curr_event"] = <ConceptNode>}
curr_event = retrieved["curr_event"]
logger.info(f"Role: {role.name} _should_react curr_event.subject: {curr_event.subject}")
if ":" not in curr_event.subject:
# this is a role event.
if await lets_talk(role, roles[curr_event.subject], retrieved):
return f"chat with {curr_event.subject}"
react_mode = await lets_react(role, roles[curr_event.subject], retrieved)
return react_mode
return False
async def _chat_react(role: "STRole", reaction_mode: str, roles: dict["STRole"]):
# There are two roles -- the role who is initiating the conversation
# and the role who is the target. We get the role instances here.
init_role = role
target_role = roles[reaction_mode[9:].strip()]
# Actually creating the conversation here.
convo, duration_min = await generate_convo(init_role, target_role) # 2222
convo_summary = await generate_convo_summary(convo)
inserted_act = convo_summary
inserted_act_dur = duration_min
act_start_time = target_role.rc.scratch.act_start_time
curr_time = target_role.rc.scratch.curr_time
if curr_time.second != 0:
temp_curr_time = curr_time + datetime.timedelta(seconds=60 - curr_time.second)
chatting_end_time = temp_curr_time + datetime.timedelta(minutes=inserted_act_dur)
else:
chatting_end_time = curr_time + datetime.timedelta(minutes=inserted_act_dur)
for role, p in [("init", init_role), ("target", target_role)]:
if role == "init":
act_address = f"<persona> {target_role.name}"
act_event = (p.name, "chat with", target_role.name)
chatting_with = target_role.name
chatting_with_buffer = {}
chatting_with_buffer[target_role.name] = 800
elif role == "target":
act_address = f"<persona> {init_role.name}"
act_event = (p.name, "chat with", init_role.name)
chatting_with = init_role.name
chatting_with_buffer = {}
chatting_with_buffer[init_role.name] = 800
act_pronunciatio = "💬"
act_obj_description = None
act_obj_pronunciatio = None
act_obj_event = (None, None, None)
await _create_react(
p,
inserted_act,
inserted_act_dur,
act_address,
act_event,
chatting_with,
convo,
chatting_with_buffer,
chatting_end_time,
act_pronunciatio,
act_obj_description,
act_obj_pronunciatio,
act_obj_event,
act_start_time,
)
async def _create_react(
role: "STRole",
inserted_act: str,
inserted_act_dur: int,
act_address: str,
act_event: Tuple,
chatting_with: str,
chat: list,
chatting_with_buffer: dict,
chatting_end_time: datetime,
act_pronunciatio: str,
act_obj_description: str,
act_obj_pronunciatio: str,
act_obj_event: Tuple,
act_start_time=None,
):
p = role
scratch = role.rc.scratch
min_sum = 0
for i in range(scratch.get_f_daily_schedule_hourly_org_index()):
min_sum += scratch.f_daily_schedule_hourly_org[i][1]
start_hour = int(min_sum / 60)
if scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index()][1] >= 120:
end_hour = (
start_hour + scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index()][1] / 60
)
elif (
scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index()][1]
+ scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index() + 1][1]
):
end_hour = start_hour + (
(
scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index()][1]
+ scratch.f_daily_schedule_hourly_org[scratch.get_f_daily_schedule_hourly_org_index() + 1][1]
)
/ 60
)
else:
end_hour = start_hour + 2
end_hour = int(end_hour)
dur_sum = 0
count = 0
start_index = None
end_index = None
for act, dur in scratch.f_daily_schedule:
if dur_sum >= start_hour * 60 and start_index is None:
start_index = count
if dur_sum >= end_hour * 60 and end_index is None:
end_index = count
dur_sum += dur
count += 1
ret = await generate_new_decomp_schedule(p, inserted_act, inserted_act_dur, start_hour, end_hour)
scratch.f_daily_schedule[start_index:end_index] = ret
scratch.add_new_action(
act_address,
inserted_act_dur,
inserted_act,
act_pronunciatio,
act_event,
chatting_with,
chat,
chatting_with_buffer,
chatting_end_time,
act_obj_description,
act_obj_pronunciatio,
act_obj_event,
act_start_time,
)
async def _wait_react(role: "STRole", reaction_mode: str):
scratch = role.rc.scratch
inserted_act = f'waiting to start {scratch.act_description.split("(")[-1][:-1]}'
end_time = datetime.datetime.strptime(reaction_mode[6:].strip(), "%B %d, %Y, %H:%M:%S")
inserted_act_dur = (
(end_time.minute + end_time.hour * 60) - (scratch.curr_time.minute + scratch.curr_time.hour * 60) + 1
)
act_address = f"<waiting> {scratch.curr_tile[0]} {scratch.curr_tile[1]}"
act_event = (role.name, "waiting to start", scratch.act_description.split("(")[-1][:-1])
chatting_with = None
chat = None
chatting_with_buffer = None
chatting_end_time = None
act_pronunciatio = ""
act_obj_description = None
act_obj_pronunciatio = None
act_obj_event = (None, None, None)
await _create_react(
role,
inserted_act,
inserted_act_dur,
act_address,
act_event,
chatting_with,
chat,
chatting_with_buffer,
chatting_end_time,
act_pronunciatio,
act_obj_description,
act_obj_pronunciatio,
act_obj_event,
)
async def generate_convo(init_role: "STRole", target_role: "STRole") -> Union[list, int]:
convo = await agent_conversation(init_role, target_role)
all_utt = ""
for row in convo:
speaker = row[0]
utt = row[1]
all_utt += f"{speaker}: {utt}\n"
convo_length = math.ceil(int(len(all_utt) / 8) / 30)
return convo, convo_length
async def generate_convo_summary(conv: list[list[str]]) -> str:
conv_summary = await SummarizeConv().run(conv)
return conv_summary
async def generate_new_decomp_schedule(
role: "STRole", inserted_act: str, inserted_act_dur: int, start_hour: int, end_hour: int
):
# Step 1: Setting up the core variables for the function.
# <p> is the role whose schedule we are editing right now.
scratch = role.rc.scratch
# <today_min_pass> indicates the number of minutes that have passed today.
today_min_pass = int(scratch.curr_time.hour) * 60 + int(scratch.curr_time.minute) + 1
# Step 2: We need to create <main_act_dur> and <truncated_act_dur>.
main_act_dur = []
truncated_act_dur = []
dur_sum = 0 # duration sum
count = 0 # enumerate count
truncated_fin = False
print("DEBUG::: ", scratch.name)
for act, dur in scratch.f_daily_schedule:
if (dur_sum >= start_hour * 60) and (dur_sum < end_hour * 60):
main_act_dur += [[act, dur]]
if dur_sum <= today_min_pass:
truncated_act_dur += [[act, dur]]
elif dur_sum > today_min_pass and not truncated_fin:
# We need to insert that last act, duration list like this one:
# e.g., ['wakes up and completes her morning routine (wakes up...)', 2]
truncated_act_dur += [[scratch.f_daily_schedule[count][0], dur_sum - today_min_pass]]
truncated_act_dur[-1][-1] -= (
dur_sum - today_min_pass
) # DEC 7 DEBUG;.. is the +1 the right thing to do???
# DEC 7 DEBUG;.. is the +1 the right thing to do???
# truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass + 1)
print("DEBUG::: ", truncated_act_dur)
# DEC 7 DEBUG;.. is the +1 the right thing to do???
# truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass)
truncated_fin = True
dur_sum += dur
count += 1
main_act_dur = main_act_dur
x = (
truncated_act_dur[-1][0].split("(")[0].strip()
+ " (on the way to "
+ truncated_act_dur[-1][0].split("(")[-1][:-1]
+ ")"
)
truncated_act_dur[-1][0] = x
if "(" in truncated_act_dur[-1][0]:
inserted_act = truncated_act_dur[-1][0].split("(")[0].strip() + " (" + inserted_act + ")"
# To do inserted_act_dur+1 below is an important decision but I'm not sure
# if I understand the full extent of its implications. Might want to
# revisit.
truncated_act_dur += [[inserted_act, inserted_act_dur]]
start_time_hour = datetime.datetime(2022, 10, 31, 0, 0) + datetime.timedelta(hours=start_hour)
end_time_hour = datetime.datetime(2022, 10, 31, 0, 0) + datetime.timedelta(hours=end_hour)
return await NewDecompSchedule().run(
role, main_act_dur, truncated_act_dur, start_time_hour, end_time_hour, inserted_act, inserted_act_dur
)
async def _long_term_planning(role: "STRole", new_day: bool):
"""
Formulates the role's daily long-term plan if it is the start of a new
day. This basically has two components: first, we create the wake-up hour,
and second, we create the hourly schedule based on it.
INPUT
new_day: Indicates whether the current time signals a "First day",
"New day", or False (for neither). This is important because we
create the roles' long term planning on the new day.
"""
# We start by creating the wake up hour for the role.
wake_up_hour = await WakeUp().run(role)
wake_up_hour = int(wake_up_hour)
logger.info(f"Role: {role.name} long_term_planning, wake_up_hour: {wake_up_hour}")
# When it is a new day, we start by creating the daily_req of the role.
# Note that the daily_req is a list of strings that describe the role's
# day in broad strokes.
if new_day == "First day":
# Bootstrapping the daily plan for the start of then generation:
# if this is the start of generation (so there is no previous day's
# daily requirement, or if we are on a new day, we want to create a new
# set of daily requirements.
role.scratch.daily_req = await GenDailySchedule().run(role, wake_up_hour)
logger.info(f"Role: {role.name} daily requirements: {role.scratch.daily_req}")
elif new_day == "New day":
revise_identity(role)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - TODO
# We need to create a new daily_req here...
role.scratch.daily_req = role.scratch.daily_req
# Based on the daily_req, we create an hourly schedule for the role,
# which is a list of todo items with a time duration (in minutes) that
# add up to 24 hours.
role.scratch.f_daily_schedule = await GenHourlySchedule().run(role, wake_up_hour)
logger.info(f"Role: {role.name} f_daily_schedule: {role.scratch.f_daily_schedule}")
role.scratch.f_daily_schedule_hourly_org = role.scratch.f_daily_schedule[:]
# Added March 4 -- adding plan to the memory.
thought = f"This is {role.scratch.name}'s plan for {role.scratch.curr_time.strftime('%A %B %d')}:"
for i in role.scratch.daily_req:
thought += f" {i},"
thought = thought[:-1] + "."
created = role.scratch.curr_time
expiration = role.scratch.curr_time + datetime.timedelta(days=30)
s, p, o = (role.scratch.name, "plan", role.scratch.curr_time.strftime("%A %B %d"))
keywords = set(["plan"])
thought_poignancy = 5
thought_embedding_pair = (thought, get_embedding(thought))
role.a_mem.add_thought(
created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, None
)
# print("Sleeping for 20 seconds...")
# time.sleep(10)
# print("Done sleeping!")
async def _determine_action(role: "STRole"):
"""
Creates the next action sequence for the role.
The main goal of this function is to run "add_new_action" on the role's
scratch space, which sets up all the action related variables for the next
action.
As a part of this, the role may need to decompose its hourly schedule as
needed.
INPUT
role: Current <Persona> instance whose action we are determining.
"""
def determine_decomp(act_desp, act_dura):
"""
Given an action description and its duration, we determine whether we need
to decompose it. If the action is about the agent sleeping, we generally
do not want to decompose it, so that's what we catch here.
INPUT:
act_desp: the description of the action (e.g., "sleeping")
act_dura: the duration of the action in minutes.
OUTPUT:
a boolean. True if we need to decompose, False otherwise.
"""
if "sleep" not in act_desp and "bed" not in act_desp:
return True
elif "sleeping" in act_desp or "asleep" in act_desp or "in bed" in act_desp:
return False
elif "sleep" in act_desp or "bed" in act_desp:
if act_dura > 60:
return False
return True
# The goal of this function is to get us the action associated with
# <curr_index>. As a part of this, we may need to decompose some large
# chunk actions.
# Importantly, we try to decompose at least two hours worth of schedule at
# any given point.
curr_index = role.scratch.get_f_daily_schedule_index()
curr_index_60 = role.scratch.get_f_daily_schedule_index(advance=60)
logger.info(f"f_daily_schedule: {role.scratch.f_daily_schedule}")
# * Decompose *
# During the first hour of the day, we need to decompose two hours
# sequence. We do that here.
if curr_index == 0:
# This portion is invoked if it is the first hour of the day.
act_desp, act_dura = role.scratch.f_daily_schedule[curr_index]
if act_dura >= 60:
# We decompose if the next action is longer than an hour, and fits the
# criteria described in determine_decomp.
if determine_decomp(act_desp, act_dura):
role.scratch.f_daily_schedule[curr_index : curr_index + 1] = await TaskDecomp().run(role, act_desp, act_dura)
if curr_index_60 + 1 < len(role.scratch.f_daily_schedule):
act_desp, act_dura = role.scratch.f_daily_schedule[curr_index_60 + 1]
if act_dura >= 60:
if determine_decomp(act_desp, act_dura):
role.scratch.f_daily_schedule[curr_index_60 + 1 : curr_index_60 + 2] = await TaskDecomp().run(
role, act_desp, act_dura
)
if curr_index_60 < len(role.scratch.f_daily_schedule):
# If it is not the first hour of the day, this is always invoked (it is
# also invoked during the first hour of the day -- to double up so we can
# decompose two hours in one go). Of course, we need to have something to
# decompose as well, so we check for that too.
if role.scratch.curr_time.hour < 23:
# And we don't want to decompose after 11 pm.
act_desp, act_dura = role.scratch.f_daily_schedule[curr_index_60]
if act_dura >= 60:
if determine_decomp(act_desp, act_dura):
role.scratch.f_daily_schedule[curr_index_60 : curr_index_60 + 1] = await TaskDecomp().run(
role, act_desp, act_dura
)
# * End of Decompose *
# Generate an <Action> instance from the action description and duration. By
# this point, we assume that all the relevant actions are decomposed and
# ready in f_daily_schedule.
print("DEBUG LJSDLFSKJF")
for i in role.scratch.f_daily_schedule:
print(i)
print(curr_index)
print(len(role.scratch.f_daily_schedule))
print(role.scratch.name)
print("------")
# 1440
x_emergency = 0
for i in role.scratch.f_daily_schedule:
x_emergency += i[1]
# print ("x_emergency", x_emergency)
if 1440 - x_emergency > 0:
print("x_emergency__AAA", x_emergency)
role.scratch.f_daily_schedule += [["sleeping", 1440 - x_emergency]]
act_desp, act_dura = role.scratch.f_daily_schedule[curr_index]
new_action_details = await GenActionDetails().run(role, act_desp, act_dura)
# Adding the action to role's queue.
role.scratch.add_new_action(**new_action_details)
def revise_identity(role: "STRole"):
p_name = role.scratch.name
focal_points = [
f"{p_name}'s plan for {role.scratch.get_str_curr_date_str()}.",
f"Important recent events for {p_name}'s life.",
]
retrieved = new_agent_retrieve(role, focal_points)
statements = "[Statements]\n"
for key, val in retrieved.items():
for i in val:
statements += f"{i.created.strftime('%A %B %d -- %H:%M %p')}: {i.embedding_key}\n"
# print (";adjhfno;asdjao;idfjo;af", p_name)
plan_prompt = statements + "\n"
plan_prompt += f"Given the statements above, is there anything that {p_name} should remember as they plan for"
plan_prompt += f" *{role.scratch.curr_time.strftime('%A %B %d')}*? "
plan_prompt += "If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement)\n\n"
plan_prompt += f"Write the response from {p_name}'s perspective."
plan_note = LLM().ask(plan_prompt)
# print (plan_note)
thought_prompt = statements + "\n"
thought_prompt += (
f"Given the statements above, how might we summarize {p_name}'s feelings about their days up to now?\n\n"
)
thought_prompt += f"Write the response from {p_name}'s perspective."
thought_note = LLM().ask(thought_prompt)
# print (thought_note)
currently_prompt = (
f"{p_name}'s status from {(role.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n"
)
currently_prompt += f"{role.scratch.currently}\n\n"
currently_prompt += f"{p_name}'s thoughts at the end of {(role.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n"
currently_prompt += (plan_note + thought_note).replace("\n", "") + "\n\n"
currently_prompt += f"It is now {role.scratch.curr_time.strftime('%A %B %d')}. Given the above, write {p_name}'s status for {role.scratch.curr_time.strftime('%A %B %d')} that reflects {p_name}'s thoughts at the end of {(role.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}. Write this in third-person talking about {p_name}."
currently_prompt += "If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement).\n\n"
currently_prompt += "Follow this format below:\nStatus: <new status>"
# print ("DEBUG ;adjhfno;asdjao;asdfsidfjo;af", p_name)
# print (currently_prompt)
new_currently = LLM().ask(currently_prompt)
# print (new_currently)
# print (new_currently[10:])
role.scratch.currently = new_currently
daily_req_prompt = role.scratch.get_str_iss() + "\n"
daily_req_prompt += f"Today is {role.scratch.curr_time.strftime('%A %B %d')}. Here is {role.scratch.name}'s plan today in broad-strokes (with the time of the day. e.g., have a lunch at 12:00 pm, watch TV from 7 to 8 pm).\n\n"
daily_req_prompt += "Follow this format (the list should have 4~6 items but no more):\n"
daily_req_prompt += "1. wake up and complete the morning routine at <time>, 2. ..."
new_daily_req = LLM().ask(daily_req_prompt)
new_daily_req = new_daily_req.replace("\n", " ")
print("WE ARE HERE!!!", new_daily_req)
role.scratch.daily_plan_req = new_daily_req

View file

@ -0,0 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : prompt templates

View file

@ -0,0 +1,30 @@
Variables:
!<INPUT 0>! -- Persona name
!<INPUT 1>! -- Persona's current arena
!<INPUT 2>! -- Persona's current sector
!<INPUT 3>! -- Persona name
!<INPUT 4>! -- target sector
!<INPUT 5>! -- Persona's sector's all arenas (minus no access)
!<INPUT 6>! -- Curr action seq
!<INPUT 7>! -- Persona name
!<INPUT 8>! -- Persona's current sector
<commentblockmarker>###</commentblockmarker>
Jane Anderson is in kitchen in Jane Anderson's house.
Jane Anderson is going to Jane Anderson's house that has the following areas: {kitchen, bedroom, bathroom}
Stay in the current area if the activity can be done there. Never go into other people's rooms unless necessary.
For cooking, Jane Anderson should go to the following area in Jane Anderson's house:
Answer: {kitchen}
---
Tom Watson is in common room in Tom Watson's apartment.
Tom Watson is going to Hobbs Cafe that has the following areas: {cafe}
Stay in the current area if the activity can be done there. Never go into other people's rooms unless necessary.
For getting coffee, Tom Watson should go to the following area in Hobbs Cafe:
Answer: {cafe}
---
!<INPUT 0>! is going to !<INPUT 1>! that has the following areas: {!<INPUT 2>!}
* Stay in the current area if the activity can be done there.
* NEVER go into other people's rooms unless necessary.
!<INPUT 3>! is !<INPUT 4>!. For !<INPUT 5>!, !<INPUT 6>! should go to the following area in !<INPUT 7>! (MUST pick one of {!<INPUT 8>!}):
Answer: {

View file

@ -0,0 +1,34 @@
Variables:
!<INPUT 0>! -- Persona name
!<INPUT 1>! -- Maze all possible sectors
!<INPUT 2>! -- Persona name
!<INPUT 3>! -- Persona living sector
!<INPUT 4>! -- Persona living sector arenas
!<INPUT 5>! -- Persona name
!<INPUT 6>! -- Persona current sector
!<INPUT 7>! -- Persona current sector arenas
!<INPUT 8>! -- curr action description
!<INPUT 9>! -- Persona name
<commentblockmarker>###</commentblockmarker>
Task -- choose an appropriate area from the area options for a task at hand.
Sam Kim lives in {Sam Kim's house} that has Sam Kim's room, bathroom, kitchen.
Sam Kim is currently in {Sam Kim's house} that has Sam Kim's room, bathroom, kitchen.
Area options: {Sam Kim's house, The Rose and Crown Pub, Hobbs Cafe, Oak Hill College, Johnson Park, Harvey Oak Supply Store, The Willows Market and Pharmacy}.
* Stay in the current area if the activity can be done there. Only go out if the activity needs to take place in another place.
* Must be one of the "Area options," verbatim.
For taking a walk, Sam Kim should go to the following area: {Johnson Park}
---
Jane Anderson lives in {Oak Hill College Student Dormatory} that has Jane Anderson's room.
Jane Anderson is currently in {Oak Hill College} that has a classroom, library
Area options: {Oak Hill College Student Dormatory, The Rose and Crown Pub, Hobbs Cafe, Oak Hill College, Johnson Park, Harvey Oak Supply Store, The Willows Market and Pharmacy}.
* Stay in the current area if the activity can be done there. Only go out if the activity needs to take place in another place.
* Must be one of the "Area options," verbatim.
For eating dinner, Jane Anderson should go to the following area: {Hobbs Cafe}
---
!<INPUT 0>! lives in {!<INPUT 1>!} that has !<INPUT 2>!.
!<INPUT 3>! is currently in {!<INPUT 4>!} that has !<INPUT 5>!. !<INPUT 6>!
Area options: {!<INPUT 7>!}.
* Stay in the current area if the activity can be done there. Only go out if the activity needs to take place in another place.
* Must be one of the "Area options," verbatim.
!<INPUT 8>! is !<INPUT 9>!. For !<INPUT 10>!, !<INPUT 11>! should go to the following area: {

View file

@ -0,0 +1,32 @@
Variables:
!<INPUT 0>! -- curr action seq
!<INPUT 1>! -- Objects available
<commentblockmarker>###</commentblockmarker>
Current activity: sleep in bed
Objects available: {bed, easel, closet, painting}
Pick ONE most relevant object from the objects available: bed
---
Current activity: painting
Objects available: {easel, closet, sink, microwave}
Pick ONE most relevant object from the objects available: easel
---
Current activity: cooking
Objects available: {stove, sink, fridge, counter}
Pick ONE most relevant object from the objects available: stove
---
Current activity: watch TV
Objects available: {couch, TV, remote, coffee table}
Pick ONE most relevant object from the objects available: TV
---
Current activity: study
Objects available: {desk, computer, chair, bookshelf}
Pick ONE most relevant object from the objects available: desk
---
Current activity: talk on the phone
Objects available: {phone, charger, bed, nightstand}
Pick ONE most relevant object from the objects available: phone
---
Current activity: !<INPUT 0>!
Objects available: {!<INPUT 1>!}
Pick ONE most relevant object from the objects available:

View file

@ -0,0 +1,14 @@
daily_planning_v6.txt
Variables:
!<INPUT 0>! -- Commonset
!<INPUT 1>! -- Lifestyle
!<INPUT 2>! -- Reverie date time now
!<INPUT 3>! -- Persona first names
!<INPUT 4>! -- wake_up_hour
<commentblockmarker>###</commentblockmarker>
!<INPUT 0>!
In general, !<INPUT 1>!
Today is !<INPUT 2>!. Here is !<INPUT 3>!'s plan today in broad-strokes (with the time of the day. e.g., have a lunch at 12:00 pm, watch TV from 7 to 8 pm): 1) wake up and complete the morning routine at !<INPUT 4>!, 2)

View file

@ -0,0 +1,18 @@
decide_to_talk_v1.txt
<commentblockmarker>###</commentblockmarker>
Task -- given context, determine whether the subject will initiate a conversation with another.
Format:
Context: []
Question: []
Reasoning: []
Answer in "yes" or "no": []
---
Context: !<INPUT 0>!
Right now, it is !<INPUT 1>!. !<INPUT 2>! and !<INPUT 3>! last chatted at !<INPUT 4>! about !<INPUT 5>!.
!<INPUT 6>!
!<INPUT 7>!
Question: Would !<INPUT 8>! initiate a conversation with !<INPUT 9>!?
Reasoning: Let's think step by step.

View file

@ -0,0 +1,30 @@
generate_event_triple_v1.txt
Variables:
!<INPUT 0>! -- Persona's full name.
!<INPUT 1>! -- Current action description
!<INPUT 2>! -- Persona's full name.
<commentblockmarker>###</commentblockmarker>
Task: Turn the input into (subject, predicate, object).
Input: Sam Johnson is eating breakfast.
Output: (Dolores Murphy, eat, breakfast)
---
Input: Joon Park is brewing coffee.
Output: (Joon Park, brew, coffee)
---
Input: Jane Cook is sleeping.
Output: (Jane Cook, is, sleep)
---
Input: Michael Bernstein is writing email on a computer.
Output: (Michael Bernstein, write, email)
---
Input: Percy Liang is teaching students in a classroom.
Output: (Percy Liang, teach, students)
---
Input: Merrie Morris is running on a treadmill.
Output: (Merrie Morris, run, treadmill)
---
Input: !<INPUT 0>! is !<INPUT 1>!.
Output: (!<INPUT 2>!,

View file

@ -0,0 +1,11 @@
generate_focal_pt_v1.txt
Variables:
!<INPUT 0>! -- Event/thought statements
!<INPUT 1>! -- Count
<commentblockmarker>###</commentblockmarker>
!<INPUT 0>!
Given only the information above, what are !<INPUT 1>! most salient high-level questions we can answer about the subjects grounded in the statements?
1)

View file

@ -0,0 +1,18 @@
generate_hourly_schedule_v2.txt
Variables:
!<INPUT 0>! -- Schedule format
!<INPUT 1>! -- Commonset
!<INPUT 2>! -- prior_schedule
!<INPUT 3>! -- intermission_str
!<INPUT 4>! -- intermission 2
!<INPUT 5>! -- prompt_ending
<commentblockmarker>###</commentblockmarker>
Hourly schedule format:
!<INPUT 0>!
===
!<INPUT 1>!
!<INPUT 2>!
!<INPUT 3>!!<INPUT 4>!
!<INPUT 5>!

View file

@ -0,0 +1,16 @@
generate_obj_event_v1.txt
Variables:
!<INPUT 0>! -- Object name
!<INPUT 1>! -- Persona name
!<INPUT 2>! -- Persona action event description
!<INPUT 3>! -- Object name
!<INPUT 4>! -- Object name
<commentblockmarker>###</commentblockmarker>
Task: We want to understand the state of an object that is being used by someone.
Let's think step by step.
We want to know about !<INPUT 0>!'s state.
Step 1. !<INPUT 1>! is at/using the !<INPUT 2>!.
Step 2. Describe the !<INPUT 3>!'s state: !<INPUT 4>! is

View file

@ -0,0 +1,10 @@
generate_pronunciatio_v1.txt
Variables:
!<INPUT 0>! -- Action description
<commentblockmarker>###</commentblockmarker>
Convert an action description to an emoji (important: use two or less emojis).
Action description: !<INPUT 0>!
Emoji:

View file

@ -0,0 +1,12 @@
insight_and_evidence_v1.txt
Variables:
!<INPUT 0>! -- Numbered list of event/thought statements
!<INPUT 1>! -- target persona name or "the conversation"
<commentblockmarker>###</commentblockmarker>
Input:
!<INPUT 0>!
What !<INPUT 1>! high-level insights can you infer from the above statements? Please ensure it includes 'because of' and generates according to the example format.(example format: insight (because of 1, 5, 3)) .
1.

View file

@ -0,0 +1,46 @@
iterative_convo_v1.txt
Variables:
!<INPUT 0>! -- persona ISS
!<INPUT 1>! -- persona name
!<INPUT 2>! -- retrieved memory
!<INPUT 3>! -- past context
!<INPUT 4>! -- current location
!<INPUT 5>! -- current context
!<INPUT 6>! -- persona name
!<INPUT 7>! -- target persona name
!<INPUT 8>! -- curr convo
!<INPUT 9>! -- persona name
!<INPUT 10>! -- target persona name
!<INPUT 11>! -- persona name
!<INPUT 12>! -- persona name
!<INPUT 13>! -- persona name
<commentblockmarker>###</commentblockmarker>
Context for the task:
PART 1.
!<INPUT 0>!
Here is the memory that is in !<INPUT 1>!'s head:
!<INPUT 2>!
PART 2.
Past Context:
!<INPUT 3>!
Current Location: !<INPUT 4>!
Current Context:
!<INPUT 5>!
!<INPUT 6>! and !<INPUT 7>! are chatting. Here is their conversation so far:
!<INPUT 8>!
---
Task: Given the above, what should !<INPUT 9>! say to !<INPUT 10>! next in the conversation? And did it end the conversation?
Output format: Output a json of the following format:
{
"!<INPUT 11>!": "<!<INPUT 12>!'s utterance>",
"Did the conversation end with !<INPUT 13>!'s utterance?": "<json Boolean>"
}

View file

@ -0,0 +1,15 @@
memo_on_convo_v1.txt
Variables:
!<INPUT 0>! -- All convo utterances
!<INPUT 1>! -- persona name
!<INPUT 2>! -- persona name
!<INPUT 3>! -- persona name
<commentblockmarker>###</commentblockmarker>
[Conversation]
!<INPUT 0>!
Write down if there is anything from the conversation that !<INPUT 1>! might have found interesting from !<INPUT 2>!'s perspective, in a full sentence.
"!<INPUT 3>!

View file

@ -0,0 +1,24 @@
new_decomp_schedule_v1.txt
Variables:
!<INPUT 0>! -- persona name
!<INPUT 1>! -- start hour
!<INPUT 2>! -- end hour
!<INPUT 3>! -- original plan
!<INPUT 4>! -- persona name
!<INPUT 5>! -- new event
!<INPUT 6>! -- new event duration
!<INPUT 7>! -- persona name
!<INPUT 8>! -- start hour
!<INPUT 9>! -- end hour
!<INPUT 10>! -- end hour
!<INPUT 11>! -- new schedule init
<commentblockmarker>###</commentblockmarker>
Here was !<INPUT 0>!'s originally planned schedule from !<INPUT 1>! to !<INPUT 2>!.
!<INPUT 3>!
But !<INPUT 4>! unexpectedly ended up !<INPUT 5>! for !<INPUT 6>! minutes. Revise !<INPUT 7>!'s schedule from !<INPUT 8>! to !<INPUT 9>! accordingly (it has to end by !<INPUT 10>!).
The revised schedule:
!<INPUT 11>!

View file

@ -0,0 +1,15 @@
planning_thought_on_convo_v1.txt
Variables:
!<INPUT 0>! -- All convo utterances
!<INPUT 1>! -- persona name
!<INPUT 2>! -- persona name
!<INPUT 3>! -- persona name
<commentblockmarker>###</commentblockmarker>
[Conversation]
!<INPUT 0>!
Write down if there is anything from the conversation that !<INPUT 1>! need to remember for her planning, from !<INPUT 2>!'s perspective, in a full sentence.
"!<INPUT 3>!

View file

@ -0,0 +1,15 @@
poignancy_event_v1.txt
!<INPUT 1>!: agent name
!<INPUT 1>!: iss
!<INPUT 2>!: name
!<INPUT 3>!: event description
<commentblockmarker>###</commentblockmarker>
Here is a brief description of !<INPUT 0>!.
!<INPUT 1>!
On the scale of 1 to 10, where 1 is purely mundane (e.g., brushing teeth, making bed) and 10 is extremely poignant (e.g., a break up, college acceptance), rate the likely poignancy of the following event for !<INPUT 2>!.
Event: !<INPUT 3>!
Rate (return a number between 1 to 10):

View file

@ -0,0 +1,17 @@
poignancy_chat_v1.txt
!<INPUT 1>!: agent name
!<INPUT 1>!: iss
!<INPUT 2>!: name
!<INPUT 3>!: event description
<commentblockmarker>###</commentblockmarker>
Here is a brief description of !<INPUT 0>!.
!<INPUT 1>!
On the scale of 1 to 10, where 1 is purely mundane (e.g., routine morning greetings) and 10 is extremely poignant (e.g., a conversation about breaking up, a fight), rate the likely poignancy of the following conversation for !<INPUT 2>!.
Conversation:
!<INPUT 3>!
Rate (return a number between 1 to 10):

View file

@ -0,0 +1,15 @@
poignancy_event_v1.txt
!<INPUT 1>!: agent name
!<INPUT 1>!: iss
!<INPUT 2>!: name
!<INPUT 3>!: event description
<commentblockmarker>###</commentblockmarker>
Here is a brief description of !<INPUT 0>!.
!<INPUT 1>!
On the scale of 1 to 10, where 1 is purely mundane (e.g., brushing teeth, making bed) and 10 is extremely poignant (e.g., a break up, college acceptance), rate the likely poignancy of the following event for !<INPUT 2>!.
Event: !<INPUT 3>!
Rate (return a number between 1 to 10):

View file

@ -0,0 +1,15 @@
poignancy_thought_v1.txt
!<INPUT 1>!: agent name
!<INPUT 1>!: iss
!<INPUT 2>!: name
!<INPUT 3>!: event description
<commentblockmarker>###</commentblockmarker>
Here is a brief description of !<INPUT 0>!.
!<INPUT 1>!
On the scale of 1 to 10, where 1 is purely mundane (e.g., I need to do the dishes, I need to walk the dog) and 10 is extremely significant (e.g., I wish to become a professor, I love Elie), rate the likely significance of the following thought for !<INPUT 2>!.
Thought: !<INPUT 3>!
Rate (return a number between 1 to 10):

View file

@ -0,0 +1,15 @@
summarize_chat_relationship_v2.txt
Variables:
!<INPUT 0>! -- Statements
!<INPUT 1>! -- curr persona name
!<INPUT 2>! -- target_persona.scratch.name
<commentblockmarker>###</commentblockmarker>
[Statements]
!<INPUT 0>!
Based on the statements above, summarize !<INPUT 1>! and !<INPUT 2>!'s relationship. What do they feel or know about each other?
"

View file

@ -0,0 +1,11 @@
summarize_conversation_v1.txt
Variables:
!<INPUT 0>! -- init_persona_name
<commentblockmarker>###</commentblockmarker>
Conversation:
!<INPUT 0>!
Summarize the conversation above in one sentence:
This is a conversation about

View file

@ -0,0 +1,39 @@
task_decomp_v2.txt
Variables:
!<INPUT 0>! -- Commonset
!<INPUT 1>! -- Surrounding schedule description
!<INPUT 2>! -- Persona first name
!<INPUT 3>! -- Persona first name
!<INPUT 4>! -- Current action
!<INPUT 5>! -- curr time range
!<INPUT 6>! -- Current action duration in min
!<INPUT 7>! -- Persona first names
<commentblockmarker>###</commentblockmarker>
Describe subtasks in 5 min increments.
---
Name: Kelly Bronson
Age: 35
Backstory: Kelly always wanted to be a teacher, and now she teaches kindergarten. During the week, she dedicates herself to her students, but on the weekends, she likes to try out new restaurants and hang out with friends. She is very warm and friendly, and loves caring for others.
Personality: sweet, gentle, meticulous
Location: Kelly is in an older condo that has the following areas: {kitchen, bedroom, dining, porch, office, bathroom, living room, hallway}.
Currently: Kelly is a teacher during the school year. She teaches at the school but works on lesson plans at home. She is currently living alone in a single bedroom condo.
Daily plan requirement: Kelly is planning to teach during the morning and work from home in the afternoon.s
Today is Saturday May 10. From 08:00am ~09:00am, Kelly is planning on having breakfast, from 09:00am ~ 12:00pm, Kelly is planning on working on the next day's kindergarten lesson plan, and from 12:00 ~ 13pm, Kelly is planning on taking a break.
In 5 min increments, list the subtasks Kelly does when Kelly is working on the next day's kindergarten lesson plan from 09:00am ~ 12:00pm (total duration in minutes: 180):
1) Kelly is reviewing the kindergarten curriculum standards. (duration in minutes: 15, minutes left: 165)
2) Kelly is brainstorming ideas for the lesson. (duration in minutes: 30, minutes left: 135)
3) Kelly is creating the lesson plan. (duration in minutes: 30, minutes left: 105)
4) Kelly is creating materials for the lesson. (duration in minutes: 30, minutes left: 75)
5) Kelly is taking a break. (duration in minutes: 15, minutes left: 60)
6) Kelly is reviewing the lesson plan. (duration in minutes: 30, minutes left: 30)
7) Kelly is making final changes to the lesson plan. (duration in minutes: 15, minutes left: 15)
8) Kelly is printing the lesson plan. (duration in minutes: 10, minutes left: 5)
9) Kelly is putting the lesson plan in her bag. (duration in minutes: 5, minutes left: 0)
---
!<INPUT 0>!
!<INPUT 1>!
In 5 min increments, list the subtasks !<INPUT 2>! does when !<INPUT 3>! is !<INPUT 4>! from !<INPUT 5>! (total duration in minutes !<INPUT 6>!):
1) !<INPUT 7>! is

View file

@ -0,0 +1,12 @@
wake_up_hour_v1.txt
Variables:
!<INPUT 0>! -- Identity Stable Set
!<INPUT 1>! -- Lifestyle
!<INPUT 2>! -- Persona first names
<commentblockmarker>###</commentblockmarker>
!<INPUT 0>!
In general, !<INPUT 1>!
!<INPUT 2>!'s wake up hour:

View file

@ -0,0 +1,11 @@
whisper_inner_thought_v1.txt
Variables:
!<INPUT 0>! -- init persona name
!<INPUT 1>! -- whisper
<commentblockmarker>###</commentblockmarker>
Translate the following thought into a statement about !<INPUT 0>!.
Thought: "!<INPUT 1>!"
Statement: "

View file

@ -0,0 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : reflection module

View file

@ -0,0 +1,245 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Reflect function
import datetime
import time
from examples.st_game.actions.run_reflect_action import (
AgentChatPoignancy,
AgentEventPoignancy,
AgentEventTriple,
AgentFocusPt,
AgentInsightAndGuidance,
AgentMemoryOnConvo,
AgentPlanThoughtOnConvo,
)
from examples.st_game.memory.retrieve import new_agent_retrieve
from examples.st_game.utils.utils import get_embedding
from metagpt.logs import logger
async def generate_focal_points(role: "STRole", n: int = 3):
nodes = [
[i.last_accessed, i] for i in role.memory.event_list + role.memory.thought_list if "idle" not in i.embedding_key
]
nodes = sorted(nodes, key=lambda x: x[0])
nodes = [i for _, i in nodes]
statements = ""
for node in nodes[-1 * role.scratch.importance_ele_n :]:
statements += node.embedding_key + "\n"
run_focal_pt = AgentFocusPt()
return await run_focal_pt.run(role, statements, n)
async def generate_insights_and_evidence(role: "STRole", nodes: list, n: int = 5):
statements = ""
for count, node in enumerate(nodes):
statements += f"{str(count)}. {node.embedding_key}\n"
run_insight_and_guidance = AgentInsightAndGuidance()
ret = await run_insight_and_guidance.run(role, statements, n)
logger.info(ret)
try:
for thought, evi_raw in ret.items():
evidence_node_id = [nodes[i].memory_id for i in evi_raw]
ret[thought] = evidence_node_id
return ret
except Exception as exp:
logger.error(f"generate_insights_and_evidence error:{exp}")
return {"this is blank": "node_1"}
async def generate_action_event_triple(act_desp: str, role: "STRole"):
"""TODO
INPUT:
act_desp: the description of the action (e.g., "sleeping")
role: The Persona class instance
OUTPUT:
a string of emoji that translates action description.
EXAMPLE OUTPUT:
"🧈🍞"
"""
run_event_triple = AgentEventTriple()
result = await run_event_triple.run(act_desp, role)
return result
async def generate_poig_score(role: "STRole", event_type: str, description: str):
if "is idle" in description:
return 1
if event_type == "event" or event_type == "thought":
run_event_poignancy = AgentEventPoignancy()
return await run_event_poignancy.run(role, description)
elif event_type == "chat":
run_chat_poignancy = AgentChatPoignancy()
return await run_chat_poignancy.run(role, role.scratch.act_description)
async def generate_planning_thought_on_convo(role: "STRole", all_utt: str):
run_planning_on_convo = AgentPlanThoughtOnConvo()
return await run_planning_on_convo.run(role, all_utt)
async def generate_memo_on_convo(role: "STRole", all_utt: str):
run_memo_on_convo = AgentMemoryOnConvo()
return await run_memo_on_convo.run(role, all_utt)
# Done
async def run_reflect(role: "STRole"):
"""
Run the actual reflection. We generate the focal points, retrieve any
relevant nodes, and generate thoughts and insights.
INPUT:
role: Current Persona object
Output:
None
"""
# Reflection requires certain focal points. Generate that first.
focal_points = await generate_focal_points(role, 3)
# Retrieve the relevant Nodesobject for each of the focal points.
# <retrieved> has keys of focal points, and values of the associated Nodes.
retrieved = new_agent_retrieve(role, focal_points)
# For each of the focal points, generate thoughts and save it in the
# agent's memory.
for focal_pt, nodes in retrieved.items():
xx = [i.embedding_key for i in nodes]
for xxx in xx:
logger.info(f"Nodes retrieved for `{focal_pt}` are `{xxx}`.")
thoughts = await generate_insights_and_evidence(role, nodes, 5)
# 生成的是字典类型
for thought, evidence in thoughts.items():
created = role.scratch.curr_time
expiration = created + datetime.timedelta(days=30)
s, p, o = await generate_action_event_triple("(" + thought + ")", role)
keywords = set([s, p, o])
thought_poignancy = await generate_poig_score(role, "thought", thought)
thought_embedding_pair = (thought, get_embedding(thought))
role.memory.add_thought(
created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, evidence
)
logger.info(f"add thought memory: {thought}, evidence: {evidence}")
time.sleep(2) # avoid Rate limit
def reflection_trigger(role: "STRole"):
"""
Given the current role, determine whether the role should run a
reflection.
Our current implementation checks for whether the sum of the new importance
measure has reached the set (hyper-parameter) threshold.
INPUT:
role: Current Persona object
Output:
True if we are running a new reflection.
False otherwise.
"""
logger.info(f"{role.scratch.name} role.scratch.importance_trigger_curr:: {role.scratch.importance_trigger_curr}"),
if role.scratch.importance_trigger_curr <= 0 and [] != role.memory.event_list + role.memory.thought_list:
return True
return False
# Done
def reset_reflection_counter(role: "STRole"):
"""
We reset the counters used for the reflection trigger.
INPUT:
role: Current Persona object
Output:
None
"""
role_imt_max = role.scratch.importance_trigger_max
role.scratch.importance_trigger_curr = role_imt_max
role.scratch.importance_ele_n = 0
async def role_reflect(role: "STRole"):
"""
The main reflection module for the role. We first check if the trigger
conditions are met, and if so, run the reflection and reset any of the
relevant counters.
INPUT:
role: Current Persona object
Output:
None
"""
if reflection_trigger(role):
await run_reflect(role)
reset_reflection_counter(role)
if role.scratch.chatting_end_time:
# update 10 to it's real sec_per_step value
if role.scratch.curr_time + datetime.timedelta(0, role.sec_per_step) == role.scratch.chatting_end_time:
all_utt = ""
if role.scratch.chat:
for row in role.scratch.chat:
all_utt += f"{row[0]}: {row[1]}\n"
last_chat = role.memory.get_last_chat(role.scratch.chatting_with)
if last_chat:
evidence = [last_chat.memory_id]
else:
logger.info(f"Role: {role.name} get_last_chat: {last_chat}")
return
planning_thought = await generate_planning_thought_on_convo(role, all_utt)
planning_thought = f"For {role.scratch.name}'s planning: {planning_thought}"
logger.info(f"Role: {role.name} planning_thought: {planning_thought}")
created = role.scratch.curr_time
expiration = created + datetime.timedelta(days=30)
s, p, o = await generate_action_event_triple(planning_thought, role)
keywords = set([s, p, o])
thought_poignancy = await generate_poig_score(role, "thought", planning_thought)
thought_embedding_pair = (planning_thought, get_embedding(planning_thought))
role.memory.add_thought(
created,
expiration,
s,
p,
o,
planning_thought,
keywords,
thought_poignancy,
thought_embedding_pair,
evidence,
)
memo_thought = await generate_memo_on_convo(role, all_utt)
memo_thought = f"{role.scratch.name} {memo_thought}"
created = role.scratch.curr_time
expiration = created + datetime.timedelta(days=30)
s, p, o = await generate_action_event_triple(memo_thought, role)
keywords = set([s, p, o])
thought_poignancy = await generate_poig_score(role, "thought", memo_thought)
thought_embedding_pair = (memo_thought, get_embedding(memo_thought))
role.memory.add_thought(
created,
expiration,
s,
p,
o,
memo_thought,
keywords,
thought_poignancy,
thought_embedding_pair,
evidence,
)

View file

View file

@ -0,0 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :

View file

@ -0,0 +1,627 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : Stanford Town role
"""
Do the steps following:
- perceive, receive environment(Maze) info
- retrieve, retrieve memories
- plan, do plan like long-term plan and interact with Maze
- reflect, do the High-level thinking based on memories and re-add into the memory
- execute, move or else in the Maze
"""
import math
import random
import time
from datetime import datetime, timedelta
from operator import itemgetter
from pathlib import Path
from typing import TYPE_CHECKING, Optional
from pydantic import ConfigDict, Field, field_validator, model_validator
from examples.st_game.actions.dummy_action import DummyAction, DummyMessage
from examples.st_game.actions.inner_voice_action import AgentWhisperThoughtAction
from examples.st_game.actions.run_reflect_action import AgentEventTriple
from examples.st_game.memory.agent_memory import AgentMemory, BasicMemory
from examples.st_game.memory.scratch import Scratch
from examples.st_game.memory.spatial_memory import MemoryTree
from examples.st_game.plan.st_plan import plan
from examples.st_game.reflect.reflect import generate_poig_score, role_reflect
from examples.st_game.utils.const import STORAGE_PATH, collision_block_id
from examples.st_game.utils.mg_ga_transform import (
get_role_environment,
save_environment,
save_movement,
)
from examples.st_game.utils.utils import get_embedding, path_finder
from metagpt.actions.add_requirement import UserRequirement
from metagpt.environment.api.env_api import EnvAPIAbstract
from metagpt.logs import logger
from metagpt.roles.role import Role, RoleContext
from metagpt.schema import Message
if TYPE_CHECKING:
from metagpt.environment.stanford_town_env.stanford_town_env import ( # noqa: F401
StanfordTownEnv,
)
class STRoleContext(RoleContext):
model_config = ConfigDict(arbitrary_types_allowed=True)
env: "StanfordTownEnv" = Field(default=None, exclude=True)
memory: AgentMemory = Field(default_factory=AgentMemory)
scratch: Scratch = Field(default_factory=Scratch)
spatial_memory: MemoryTree = Field(default_factory=MemoryTree)
@classmethod
def model_rebuild(cls, **kwargs):
from metagpt.environment.stanford_town_env.stanford_town_env import ( # noqa: F401
StanfordTownEnv,
)
super(RoleContext, cls).model_rebuild(**kwargs)
class STRole(Role):
# add a role's property structure to store role's age and so on like GA's Scratch.
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
name: str = Field(default="Klaus Mueller")
profile: str = Field(default="STMember")
rc: STRoleContext = Field(default_factory=STRoleContext)
sim_code: str = Field(default="new_sim")
step: int = Field(default=0)
start_time: Optional[datetime] = Field(default=None)
curr_time: Optional[datetime] = Field(default=None)
sec_per_step: int = Field(default=10)
game_obj_cleanup: dict = Field(default_factory=dict)
inner_voice: bool = Field(default=False)
has_inner_voice: bool = Field(default=False)
role_storage_path: Optional[Path] = Field(default=None)
@field_validator("curr_time", mode="before")
@classmethod
def check_curr_time(cls, curr_time: str) -> datetime:
return datetime.strptime(curr_time, "%B %d, %Y, %H:%M:%S")
@field_validator("start_time", mode="before")
@classmethod
def check_start_time(cls, start_time: str) -> datetime:
return datetime.strptime(f"{start_time}, 00:00:00", "%B %d, %Y, %H:%M:%S")
@model_validator(mode="after")
def validate_st_role_after(self):
self.role_storage_path = STORAGE_PATH.joinpath(f"{self.sim_code}/personas/{self.name}")
self.load_from() # load role's memory
self.set_actions([])
if self.has_inner_voice:
# TODO add communication action
self._watch([UserRequirement, DummyAction])
else:
self._watch([DummyAction])
async def init_curr_tile(self):
# init role
role_env: dict = get_role_environment(self.sim_code, self.name, self.step)
pt_x = role_env["x"]
pt_y = role_env["y"]
self.rc.scratch.curr_tile = (pt_x, pt_y)
await self.rc.env.step(
EnvAPIAbstract(
api_name="add_tiles_event",
kwargs={"pt_y": pt_y, "pt_x": pt_x, "event": self.scratch.get_curr_event_and_desc()},
)
)
@property
def scratch(self):
return self.rc.scratch
@property
def role_tile(self):
return self.scratch.curr_tile
@property
def a_mem(self):
return self.rc.memory
@property
def s_mem(self):
return self.rc.spatial_memory
@property
def memory(self):
return self.rc.memory
def load_from(self):
"""
load role data from `storage/{simulation_name}/personas/{role_name}`
"""
memory_saved = self.role_storage_path.joinpath("bootstrap_memory/associative_memory")
self.rc.memory.set_mem_path(memory_saved)
sp_mem_saved = self.role_storage_path.joinpath("bootstrap_memory/spatial_memory.json")
self.rc.spatial_memory.set_mem_path(f_saved=sp_mem_saved)
scratch_f_saved = self.role_storage_path.joinpath("bootstrap_memory/scratch.json")
self.rc.scratch = Scratch.set_scratch_path(f_saved=scratch_f_saved)
logger.info(f"Role: {self.name} loaded role's memory from {str(self.role_storage_path)}")
def save_into(self):
"""
save role data from `storage/{simulation_name}/personas/{role_name}`
"""
memory_saved = self.role_storage_path.joinpath("bootstrap_memory/associative_memory")
self.rc.memory.save(memory_saved)
sp_mem_saved = self.role_storage_path.joinpath("bootstrap_memory/spatial_memory.json")
self.rc.spatial_memory.save(sp_mem_saved)
scratch_f_saved = self.role_storage_path.joinpath("bootstrap_memory/scratch.json")
self.rc.scratch.save(scratch_f_saved)
logger.info(f"Role: {self.name} saved role's memory into {str(self.role_storage_path)}")
async def _observe(self) -> int:
if not self.rc.env:
return 0
observed = self.rc.env.memory.get_by_actions(self.rc.watch)
self.rc.news = self.rc.memory.remember(observed)
if len(self.rc.news) == 1 and self.rc.news[0].cause_by == UserRequirement:
logger.warning(f"Role: {self.name} add inner voice: {self.rc.news[0].content}")
await self.add_inner_voice(self.rc.news[0].content)
return 1 # always return 1 to execute role's `_react`
async def add_inner_voice(self, whisper: str):
async def generate_inner_thought(whisper: str):
run_whisper_thought = AgentWhisperThoughtAction()
inner_thought = await run_whisper_thought.run(self, whisper)
return inner_thought
thought = await generate_inner_thought(whisper)
# init scratch curr_time with self.curr_time
self.inner_voice = True
self.rc.scratch.curr_time = self.curr_time
created = self.rc.scratch.curr_time if self.rc.scratch.curr_time else datetime.now()
expiration = created + timedelta(days=30)
run_event_triple = AgentEventTriple()
s, p, o = await run_event_triple.run(thought, self)
keywords = set([s, p, o])
thought_poignancy = await generate_poig_score(self, "event", whisper)
thought_embedding_pair = (thought, get_embedding(thought))
self.rc.memory.add_thought(
created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, None
)
async def observe(self) -> list[BasicMemory]:
# TODO observe info from maze_env
"""
Perceive events around the role and saves it to the memory, both events
and spaces.
We first perceive the events nearby the role, as determined by its
<vision_r>. If there are a lot of events happening within that radius, we
take the <att_bandwidth> of the closest events. Finally, we check whether
any of them are new, as determined by <retention>. If they are new, then we
save those and return the <BasicMemory> instances for those events.
OUTPUT:
ret_events: a list of <BasicMemory> that are perceived and new.
"""
# PERCEIVE SPACE
# We get the nearby tiles given our current tile and the persona's vision
# radius.
nearby_tiles = await self.rc.env.observe(
EnvAPIAbstract(
api_name="get_nearby_tiles",
kwargs={"tile": self.rc.scratch.curr_tile, "vision_r": self.rc.scratch.vision_r},
)
)
# We then store the perceived space. Note that the s_mem of the persona is
# in the form of a tree constructed using dictionaries.
for tile in nearby_tiles:
tile_info = await self.rc.env.observe(EnvAPIAbstract(api_name="access_tile", kwargs={"tile": tile}))
self.rc.spatial_memory.add_tile_info(tile_info)
# PERCEIVE EVENTS.
# We will perceive events that take place in the same arena as the
# persona's current arena.
curr_arena_path = await self.rc.env.observe(
EnvAPIAbstract(api_name="get_tile_path", kwargs={"tile": self.rc.scratch.curr_tile, "level": "arena"})
)
# We do not perceive the same event twice (this can happen if an object is
# extended across multiple tiles).
percept_events_set = set()
# We will order our percept based on the distance, with the closest ones
# getting priorities.
percept_events_list = []
# First, we put all events that are occuring in the nearby tiles into the
# percept_events_list
for tile in nearby_tiles:
tile_details = await self.rc.env.observe(EnvAPIAbstract(api_name="access_tile", kwargs={"tile": tile}))
if tile_details["events"]:
tmp_arena_path = await self.rc.env.observe(
EnvAPIAbstract(api_name="get_tile_path", kwargs={"tile": tile, "level": "arena"})
)
if tmp_arena_path == curr_arena_path:
# This calculates the distance between the persona's current tile,
# and the target tile.
dist = math.dist([tile[0], tile[1]], [self.rc.scratch.curr_tile[0], self.rc.scratch.curr_tile[1]])
# Add any relevant events to our temp set/list with the distant info.
for event in tile_details["events"]:
if event not in percept_events_set:
percept_events_list += [[dist, event]]
percept_events_set.add(event)
# We sort, and perceive only self.rc.scratch.att_bandwidth of the closest
# events. If the bandwidth is larger, then it means the persona can perceive
# more elements within a small area.
percept_events_list = sorted(percept_events_list, key=itemgetter(0))
perceived_events = []
for dist, event in percept_events_list[: self.rc.scratch.att_bandwidth]:
perceived_events += [event]
# Storing events.
# <ret_events> is a list of <BasicMemory> instances from the persona's
# associative memory.
ret_events = []
for p_event in perceived_events:
s, p, o, desc = p_event
if not p:
# If the object is not present, then we default the event to "idle".
p = "is"
o = "idle"
desc = "idle"
desc = f"{s.split(':')[-1]} is {desc}"
p_event = (s, p, o)
# We retrieve the latest self.rc.scratch.retention events. If there is
# something new that is happening (that is, p_event not in latest_events),
# then we add that event to the a_mem and return it.
latest_events = self.rc.memory.get_summarized_latest_events(self.rc.scratch.retention)
if p_event not in latest_events:
# We start by managing keywords.
keywords = set()
sub = p_event[0]
obj = p_event[2]
if ":" in p_event[0]:
sub = p_event[0].split(":")[-1]
if ":" in p_event[2]:
obj = p_event[2].split(":")[-1]
keywords.update([sub, obj])
# Get event embedding
desc_embedding_in = desc
if "(" in desc:
desc_embedding_in = desc_embedding_in.split("(")[1].split(")")[0].strip()
if desc_embedding_in in self.rc.memory.embeddings:
event_embedding = self.rc.memory.embeddings[desc_embedding_in]
else:
event_embedding = get_embedding(desc_embedding_in)
event_embedding_pair = (desc_embedding_in, event_embedding)
# Get event poignancy.
event_poignancy = await generate_poig_score(self, "event", desc_embedding_in)
logger.info(f"Role {self.name} event_poignancy: {event_poignancy}")
# If we observe the persona's self chat, we include that in the memory
# of the persona here.
chat_node_ids = []
if p_event[0] == f"{self.name}" and p_event[1] == "chat with":
curr_event = self.rc.scratch.act_event
if self.rc.scratch.act_description in self.rc.memory.embeddings:
chat_embedding = self.rc.memory.embeddings[self.rc.scratch.act_description]
else:
chat_embedding = get_embedding(self.rc.scratch.act_description)
chat_embedding_pair = (self.rc.scratch.act_description, chat_embedding)
chat_poignancy = await generate_poig_score(self, "chat", self.rc.scratch.act_description)
chat_node = self.rc.memory.add_chat(
self.rc.scratch.curr_time,
None,
curr_event[0],
curr_event[1],
curr_event[2],
self.rc.scratch.act_description,
keywords,
chat_poignancy,
chat_embedding_pair,
self.rc.scratch.chat,
)
chat_node_ids = [chat_node.memory_id]
# Finally, we add the current event to the agent's memory.
ret_events += [
self.rc.memory.add_event(
self.rc.scratch.curr_time,
None,
s,
p,
o,
desc,
keywords,
event_poignancy,
event_embedding_pair,
chat_node_ids,
)
]
self.rc.scratch.importance_trigger_curr -= event_poignancy
self.rc.scratch.importance_ele_n += 1
return ret_events
def retrieve(self, observed: list) -> dict:
# TODO retrieve memories from agent_memory
retrieved = dict()
for event in observed:
retrieved[event.description] = dict()
retrieved[event.description]["curr_event"] = event
relevant_events = self.rc.memory.retrieve_relevant_events(event.subject, event.predicate, event.object)
retrieved[event.description]["events"] = list(relevant_events)
relevant_thoughts = self.rc.memory.retrieve_relevant_thoughts(event.subject, event.predicate, event.object)
retrieved[event.description]["thoughts"] = list(relevant_thoughts)
return retrieved
async def reflect(self):
# TODO reflection if meet reflect condition
await role_reflect(self)
# TODO re-add result to memory
# 已封装到Reflect函数之中
async def execute(self, plan: str):
"""
Args:
plan: This is a string address of the action we need to execute.
It comes in the form of "{world}:{sector}:{arena}:{game_objects}".
It is important that you access this without doing negative
indexing (e.g., [-1]) because the latter address elements may not be
present in some cases.
e.g., "dolores double studio:double studio:bedroom 1:bed"
"""
roles = self.rc.env.get_roles()
if "<random>" in plan and self.rc.scratch.planned_path == []:
self.rc.scratch.act_path_set = False
# <act_path_set> is set to True if the path is set for the current action.
# It is False otherwise, and means we need to construct a new path.
if not self.rc.scratch.act_path_set:
# <target_tiles> is a list of tile coordinates where the persona may go
# to execute the current action. The goal is to pick one of them.
target_tiles = None
logger.info(f"Role {self.name} plan: {plan}")
if "<persona>" in plan:
# Executing persona-persona interaction.
target_p_tile = roles[plan.split("<persona>")[-1].strip()].scratch.curr_tile
collision_maze = await self.rc.env.observe(EnvAPIAbstract(api_name="get_collision_maze"))
potential_path = path_finder(
collision_maze, self.rc.scratch.curr_tile, target_p_tile, collision_block_id
)
if len(potential_path) <= 2:
target_tiles = [potential_path[0]]
else:
collision_maze = await self.rc.env.observe(EnvAPIAbstract(api_name="get_collision_maze"))
potential_1 = path_finder(
collision_maze,
self.rc.scratch.curr_tile,
potential_path[int(len(potential_path) / 2)],
collision_block_id,
)
potential_2 = path_finder(
collision_maze,
self.rc.scratch.curr_tile,
potential_path[int(len(potential_path) / 2) + 1],
collision_block_id,
)
if len(potential_1) <= len(potential_2):
target_tiles = [potential_path[int(len(potential_path) / 2)]]
else:
target_tiles = [potential_path[int(len(potential_path) / 2 + 1)]]
elif "<waiting>" in plan:
# Executing interaction where the persona has decided to wait before
# executing their action.
x = int(plan.split()[1])
y = int(plan.split()[2])
target_tiles = [[x, y]]
elif "<random>" in plan:
# Executing a random location action.
plan = ":".join(plan.split(":")[:-1])
address_tiles = await self.rc.env.observe(EnvAPIAbstract(api_name="get_address_tiles"))
target_tiles = address_tiles[plan]
target_tiles = random.sample(list(target_tiles), 1)
else:
# This is our default execution. We simply take the persona to the
# location where the current action is taking place.
# Retrieve the target addresses. Again, plan is an action address in its
# string form. <maze.address_tiles> takes this and returns candidate
# coordinates.
address_tiles = await self.rc.env.observe(EnvAPIAbstract(api_name="get_address_tiles"))
if plan not in address_tiles:
address_tiles["Johnson Park:park:park garden"] # ERRORRRRRRR
else:
target_tiles = address_tiles[plan]
# There are sometimes more than one tile returned from this (e.g., a tabe
# may stretch many coordinates). So, we sample a few here. And from that
# random sample, we will take the closest ones.
if len(target_tiles) < 4:
target_tiles = random.sample(list(target_tiles), len(target_tiles))
else:
target_tiles = random.sample(list(target_tiles), 4)
# If possible, we want personas to occupy different tiles when they are
# headed to the same location on the maze. It is ok if they end up on the
# same time, but we try to lower that probability.
# We take care of that overlap here.
persona_name_set = set(roles.keys())
new_target_tiles = []
for i in target_tiles:
access_tile = await self.rc.env.observe(EnvAPIAbstract(api_name="access_tile", kwargs={"tile": i}))
curr_event_set = access_tile["events"]
pass_curr_tile = False
for j in curr_event_set:
if j[0] in persona_name_set:
pass_curr_tile = True
if not pass_curr_tile:
new_target_tiles += [i]
if len(new_target_tiles) == 0:
new_target_tiles = target_tiles
target_tiles = new_target_tiles
# Now that we've identified the target tile, we find the shortest path to
# one of the target tiles.
curr_tile = self.rc.scratch.curr_tile
closest_target_tile = None
path = None
for i in target_tiles:
# path_finder takes a collision_mze and the curr_tile coordinate as
# an input, and returns a list of coordinate tuples that becomes the
# path.
# e.g., [(0, 1), (1, 1), (1, 2), (1, 3), (1, 4)...]
collision_maze = await self.rc.env.observe(EnvAPIAbstract(api_name="get_collision_maze"))
curr_path = path_finder(collision_maze, curr_tile, i, collision_block_id)
if not closest_target_tile:
closest_target_tile = i
path = curr_path
elif len(curr_path) < len(path):
closest_target_tile = i
path = curr_path
# Actually setting the <planned_path> and <act_path_set>. We cut the
# first element in the planned_path because it includes the curr_tile.
self.rc.scratch.planned_path = path[1:]
self.rc.scratch.act_path_set = True
# Setting up the next immediate step. We stay at our curr_tile if there is
# no <planned_path> left, but otherwise, we go to the next tile in the path.
ret = self.rc.scratch.curr_tile
if self.rc.scratch.planned_path:
ret = self.rc.scratch.planned_path[0]
self.rc.scratch.planned_path = self.rc.scratch.planned_path[1:]
description = f"{self.rc.scratch.act_description}"
description += f" @ {self.rc.scratch.act_address}"
execution = ret, self.rc.scratch.act_pronunciatio, description
return execution
async def update_role_env(self) -> bool:
role_env = get_role_environment(self.sim_code, self.name, self.step)
ret = True
if role_env:
for key, val in self.game_obj_cleanup.items():
await self.rc.env.step(
EnvAPIAbstract(api_name="turn_event_from_tile_idle", kwargs={"curr_event": key, "tile": val})
)
# reset game_obj_cleanup
self.game_obj_cleanup = dict()
curr_tile = self.role_tile
new_tile = (role_env["x"], role_env["y"])
await self.rc.env.step(
EnvAPIAbstract(
api_name="remove_subject_events_from_tile", kwargs={"subject": self.name, "tile": curr_tile}
)
)
await self.rc.env.step(
EnvAPIAbstract(
api_name="add_event_from_tile",
kwargs={"curr_event": self.scratch.get_curr_event_and_desc(), "tile": new_tile},
)
)
# the persona will travel to get to their destination. *Once*
# the persona gets there, we activate the object action.
if not self.scratch.planned_path:
self.game_obj_cleanup[self.scratch.get_curr_event_and_desc()] = new_tile
await self.rc.env.step(
EnvAPIAbstract(
api_name="add_event_from_tile",
kwargs={"curr_event": self.scratch.get_curr_event_and_desc(), "tile": new_tile},
)
)
blank = (self.scratch.get_curr_obj_event_and_desc()[0], None, None, None)
await self.rc.env.step(
EnvAPIAbstract(api_name="remove_event_from_tile", kwargs={"curr_event": blank, "tile": new_tile})
)
# update role's new tile
self.rc.scratch.curr_tile = new_tile
else:
ret = False
time.sleep(1)
logger.warning(
f"{self.sim_code}/environment/{self.step}.json not exist or parses failed, " f"sleep 1s and re-check"
)
return ret
async def _react(self) -> Message:
# update role env
ret = self.update_role_env()
if not ret:
# TODO add message
logger.info(f"Role: {self.name} update_role_env return False")
return DummyMessage()
new_day = False
if not self.scratch.curr_time or self.inner_voice:
new_day = "First day"
elif self.scratch.curr_time.strftime("%A %B %d") != self.curr_time.strftime("%A %B %d"):
new_day = "New day"
logger.info(f"Role: {self.name} new_day: {new_day}")
self.rc.scratch.curr_time = self.curr_time
# get maze_env from self.rc.env, and observe env info
observed = await self.observe()
# use self.rc.memory 's retrieve functions
retrieved = self.retrieve(observed)
plans = await plan(self, self.rc.env.get_roles(), new_day, retrieved)
await self.reflect()
# feed-back into maze_env
next_tile, pronunciatio, description = await self.execute(plans)
role_move = {
"movement": next_tile,
"pronunciatio": pronunciatio,
"description": description,
"chat": self.scratch.chat,
}
save_movement(self.name, role_move, step=self.step, sim_code=self.sim_code, curr_time=self.curr_time)
# step update
logger.info(f"Role: {self.name} run at {self.step} step on {self.curr_time} at tile: {self.scratch.curr_tile}")
self.step += 1
save_environment(self.name, self.step, self.sim_code, next_tile)
self.curr_time += timedelta(seconds=self.sec_per_step)
self.inner_voice = False
time.sleep(0.5)
return DummyMessage()
STRoleContext.model_rebuild()

View file

@ -0,0 +1,75 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : entry of Stanford Town(ST/st) game
import asyncio
import fire
from examples.st_game.roles.st_role import STRole
from examples.st_game.stanford_town import StanfordTown
from examples.st_game.utils.const import STORAGE_PATH
from examples.st_game.utils.mg_ga_transform import (
get_reverie_meta,
write_curr_sim_code,
write_curr_step,
)
from examples.st_game.utils.utils import copy_folder
from metagpt.logs import logger
async def startup(idea: str, fork_sim_code: str, sim_code: str, investment: float = 30.0, n_round: int = 500):
town = StanfordTown()
logger.info("StanfordTown init environment")
# copy `storage/{fork_sim_code}` to `storage/{sim_code}`
copy_folder(str(STORAGE_PATH.joinpath(fork_sim_code)), str(STORAGE_PATH.joinpath(sim_code)))
# get role names from `storage/{simulation_name}/reverie/meta.json` and then init roles
reverie_meta = get_reverie_meta(fork_sim_code)
roles = []
sim_path = STORAGE_PATH.joinpath(sim_code)
sim_path.mkdir(exist_ok=True)
for idx, role_name in enumerate(reverie_meta["persona_names"]):
has_inner_voice = True if idx == 0 else False
role = STRole(
name=role_name,
profile=role_name,
sim_code=sim_code,
step=reverie_meta.get("step", 0),
start_time=reverie_meta.get("start_date"),
curr_time=reverie_meta.get("curr_time"),
sec_per_step=reverie_meta.get("sec_per_step"),
has_inner_voice=has_inner_voice,
)
roles.append(role)
# init temp_storage
write_curr_sim_code({"sim_code": sim_code})
write_curr_step({"step": reverie_meta.get("step", 0)})
await town.hire(roles)
town.invest(investment)
town.run_project(idea)
await town.run(n_round)
def main(idea: str, fork_sim_code: str, sim_code: str, investment: float = 30.0, n_round: int = 500):
"""
Args:
idea: idea works as an `inner voice` to the first agent.
fork_sim_code: old simulation name to start with
sim_code: new simulation name to save simulation result
investment: the investment of running agents
n_round: rounds to run agents
"""
asyncio.run(
startup(idea=idea, fork_sim_code=fork_sim_code, sim_code=sim_code, investment=investment, n_round=n_round)
)
if __name__ == "__main__":
fire.Fire(main)

View file

@ -0,0 +1,48 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : StanfordTown to works like SoftwareCompany
from typing import Any
from pydantic import Field
from metagpt.context import Context
from metagpt.environment.stanford_town_env.stanford_town_env import StanfordTownEnv
from metagpt.logs import logger
from metagpt.team import Team
from examples.st_game.roles.st_role import STRole
from examples.st_game.utils.const import MAZE_ASSET_PATH
class StanfordTown(Team):
env: StanfordTownEnv = Field(default=None)
def __init__(self, context: Context = None, **data: Any):
super(StanfordTown, self).__init__(**data)
ctx = context or Context()
if not self.env:
self.env = StanfordTownEnv(context=ctx, maze_asset_path=MAZE_ASSET_PATH)
else:
self.env.context = ctx # The `env` object is allocated by deserialization
async def hire(self, roles: list[STRole]):
logger.warning(f"The Town add {len(roles)} roles, and start to operate.")
super().hire(roles)
for role in roles:
await role.init_curr_tile()
async def run(self, n_round: int = 3):
"""Run company until target round or no money"""
while n_round > 0:
n_round -= 1
logger.debug(f"{n_round=}")
self._check_balance()
await self.env.run()
# save simulation result including environment and roles after all rounds
roles = self.env.get_roles()
for profile, role in roles.items():
role.save_into()
return self.env.history

View file

@ -0,0 +1,26 @@
Name,Whisper
Latoya Williams,"Rajiv Patel is your housemate whom you've known for about a year; You and Rajiv Patel sometimes talk about politics and local elections; Abigail Chan is your housemate whom you've known for about a year; Francisco Lopez is your housemate whom you've known for about a year; Haily Johnson is your housemate whom you've known for about a year but you don't really find her too comfortable; In terms of your daily plans, you sometimes spend time at The Rose and Crown Pub when it's late; You have known the bartender at The Rose and Crown Pub, Arthur Burton for about half a year; You like to talk about politics and local elections"
Rajiv Patel,"Latoya Williams is your housemate whom you've known for about a year; You and Latoya Williams sometimes talk about politics and local elections; Abigail Chan is your housemate whom you've known for about a year and you think she is kind of cute; Francisco Lopez is your housemate whom you've known for about a year; Haily Johnson is your housemate whom you've known for about a year but you don't really find her too comfortable; You sometimes try to spend time at Hobbs Cafe but you haven't really talked to anyone there yet; In terms of your daily plans, you sometimes spend time at The Rose and Crown Pub when it's late and you have known the bartender, Arthur Burton for about a year; You like to talk about politics and local elections"
Abigail Chen,You think you are kind of cute; You and Latoya Williams are housemates and have known each other for about a year; You and Rajiv Patel are housemates and have known each other for about a year and you two are somewhat close; You and Francisco Lopez are housemates and have known each other for about a year; You and Hailey Johnson are housemates and have known each other for about a year
Francisco Lopez,"You and Latoya Williams are housemates and have known each other for about a year; You and Rajiv Patel are housemates and have known each other for about a year; You and Abigail Chen are housemates and have known each other for about a year; You and Hailey Johnson are housemates and have known each other for about a year but you don't really like her too much; You have a secret crush on Abigail Chen but so far, you haven't had the courage to ask her out; You think Abigail Chen is cute; You frequent Hobbs Cafe"
Hailey Johnson,"You want to be acknowledged by everyone; You've known Isabella Rodriguez for about a year and you talk to her often about the piece you are currently writing; Latoya Williams is your housemate whom you've known for about a year and you think she is somewhat cute; Rajiv Patel is your housemate whom you've known for about a year, but you don't know much about him; Abigail Chen is a housemate whom you've known for about a year, but you haven't really talked to her yet; Francisco Lopez is your housemate and you two have known each other for quite awhile because you are from the same town; You are interested in meeting someone; You are interested in romantically dating someone; You are not seeing anyone right now "
Arthur Burton,"You are generally nice to all your customers and listen to their story; You and Isabella Rodriguez are friends; You've known Isabella Rodriguez for a few years since she runs Hobbs Cafe right next to your bar; You know Latoya Williams as a customer at your bar, The Rose and Crown Pub; You know Rajiv Patel as a customer at your bar, The Rose and Crown Pub; You know Ryan Park as a customer at your bar, The Rose and Crown Pub; You know Carlos Gomez as a customer at your bar, The Rose and Crown Pub; You know Adam Smith as a customer at your bar, The Rose and Crown Pub; You know Yuriko Yamamoto as a customer at your bar, The Rose and Crown Pub; You know Jennifer Moore as a customer at your bar, The Rose and Crown Pub; You know Sam Moore as a customer at your bar, The Rose and Crown Pub; You know Carmen Ortiz as a customer at your bar, The Rose and Crown Pub; You know Tom Moreno as a customer at your bar, The Rose and Crown Pub; You don't like it when people fight at your bar"
Ryan Park,"You frequent Hobbs Cafe to work and think and eat; You've known Isabella Rodriguez for about a year; You know Isabella Rodriguez works at Hobbs Cafe; You frequent The Rose and Crown Pub to eat dinner; You've known the bartender at The Rose and Crown Pub, Arthur Burton, for about a year; You've known Giorgio Rossi, your next door neighbor, for about a year; You've known Carlos Gomez, your neighbor, for about a year; You wonder why Giorgio Rossi and Carlos Gomez are always fighting; You want no part in Giorgio Rossi and Carlos Gomez's fight"
Isabella Rodriguez,"You are excited to be planning a Valentine's Day party at *Hobbs Cafe on February 14th from 5pm* and you are eager to tell everyone to come to the party; You and Maria Lopez have known each other for about a year ever since Maria Lopez visited Hobbs Cafe and are good friends; Maria Lopez is a loyal friend to you and a frequent customer at Hobbs Cafe; You see Tamara Taylor as a frequent customer at your cafe, Hobbs Cafe; You and Tamara Taylor have known each other for a few months; You and Carlos Gomez have known each other for about 4 years; You know that Carlos Gomez is a good poet and you like his poem; You see Ryan Park frequently when he visits your cafe, Hobbs Cafe; You often see Ryan Park working hard on his laptop and you think he is dedicated in his work, which you find to be cool; Hailey Johnson is a frequent customer at your cafe, Hobbs Cafe; You find Haily Johnson to be a little uncomfortable because she is too pushy; You know Giorgio Rossi as a customer at your Hobbs Cafe; You know Wolfgang Schulz as a customer at Hobbs Cafe; You know Carmen Ortiz as a customer at Hobbs Cafe; You and Arthur Burton are friends; You've known Arthur Burton for a few years since he runs a bar right next door"
Giorgio Rossi,"You do not like Carlos Gomez; You think Carlos Gomez is annoying; You think Carlos Gomez is pushy; You think Carlos Gomez is a coward; You've known Carlos Gomez, your next door neighbor, for about two years; You think Carlos Gomez is really loud and you have filed complaints against him before; For planning, you frequent Hobbs Cafe to hang out and write and think; you are friends with Isabella Rodriguez who works at Hobbs Cafe; You think Isabella Rodriguez is kind-hearted; You and Ryan Park sometimes talk about random things; You've known Ryan Park for about a year living as his neighbor; You would rather not see Carlos Gomez ever"
Carlos Gomez,"You do not like Giorgio Rossi; You think Giorgio Rossi is annoying; You think Giorgio Rossi is a coward; You've known Giorgio Rossi, your next door neighbor, for about two years; For planning, you frequent The Rose and Crown Pub at night to eat and drink; you are friends with Arthur Burton who is a bartender at the pub; You've known Ryan Park for about a year living as his neighbor; You would rather not see Giorgio Rossi ever; You want Ryan Park to take your side in his fight against Giorgio Rossi"
Klaus Mueller,"This is very important -- you have a crush on Maria Lopez; You and Maria Lopez have known each other for over 2 years now; You and Maria Lopez are close friends and classmates; You are close with Ayesha Khan, who is a classmate in one of your classes and a dormmate; You and Maria Lopez are dormmates; You and Eddy Lin are classmates; You know Mei Lin is a professor at your college; You and Wolfgang Schulz are classmates and dormmates"
Maria Lopez,"This is very important -- you have a secret crush on Klaus Mueller; You and Klaus Mueller have known each other for over 2 years now; You and Klaus Mueller are close friends and classmates; For planning, you frequent Hobbs Cafe for studying; You are close with Ayesha Khan, who is a classmate in one of your classes and a dormmate; You and Eddy Lin are classmates; You know Mei Lin is a professor at your college; You and Wolfgang Schulz are classmates and dormmates"
Ayesha Khan,"You are close with Wolfgang Schulz, who is a classmate in one of your classes and a dormmate; You and Maria Lopez are dormmates; You and Klaus Mueller are dormmates; You and Eddy Lin are classmates; You know Mei Lin is a professor at your college"
Wolfgang Schulz,"For planning, you frequent Hobbs Cafe for studying; You are close with Ayesha Khan, who is a classmate in one of your classes and a dormmate; You and Maria Lopez are dormmates; You and Klaus Mueller are dormmates; You and Eddy Lin are classmates and you two sometimes talk about your favorite music; You know Mei Lin is a professor at your college; You've met Isabella Rodriguez who works at the cafe but have not really talked to her"
Mei Lin,"You are a professor who loves teaching; You've known your neighbor, Yuriko Yamamoto, since the time she helped you with some legal matters; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You think Sam Moore is a kind and nice man; You like Jennifer Moore's art; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You know the Moreno family somewhat well -- the husband Tom Moreno and the wife Jane Moreno; You know that Tom Moreno and your husband, John Lin, are colleagues at The Willows Market and Pharmacy; John Lin is your husband who works at the Pharmacy section of The Willows Market and Pharmacy; Eddy Lin is your son who studies music theory at the college; You love your family very much; You think your son, Eddy Lin, has been a little rebellious recently"
John Lin,"You like to talk about politics and local elections; You are really curious about who will run for the local mayor election that is coming up in a few months; You've known your neighbor, Yuriko Yamamoto, since the time she helped you with some legal matters; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You think Sam Moore is a kind and nice man; You like Jennifer Moore's art; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You and Tom Moreno are colleagues at The Willows Market and Pharmacy; You know the Moreno family somewhat well -- the husband Tom Moreno and the wife Jane Moreno; Mei Lin is your wife who is a professor; Eddy Lin is your son who studies music theory at the college; You love your family very much"
Eddy Lin,"You are a music student at the Oak Hill College; You are working on a new music composition; You like hip hop music; You like to attach ""Yo"" at the end of your sentences; You've known your neighbor, Yuriko Yamamoto, for a few years since she helped your parents with some legal matters; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You know the Moreno family somewhat well -- the husband Tom Moreno and the wife Jane Moreno; You know that Tom Moreno and your father, John Lin, are colleagues at The Willows Market and Pharmacy; John Lin is your father who works at the Pharmacy section of The Willows Market and Pharmacy; Mei Lin is your mother who teaches at the Oak Hill Collrhr; You love your family very much; You think your mother, Mei Lin, is a little too uptight; You and Wolfgang Schulz are schoolmates; You and Ayesha Khan are school mates; You and Maria Lopez are schoolmates"
Tom Moreno,"You like to express your opinions; You are loud; You like to talk about politics and local elections; You've known your neighbor, Yuriko Yamamoto, for a few years and you two sometimes chat about the local election; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You don't really like Sam Moore; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; In terms of your daily plans, you frequent The Rose and Crown Pub at night; You've knonw the bartender Arthur Burton for a few years; You sometimes talk to Adam Smith about local elections; You and John Lin are colleagues at The Willows Market and Pharmacy; You know the Lin family somewhat well -- the husband John Lin, wife, Mei Lin, and the son Eddy Lin; you know Mei Lin is a professor; Jane Moreno is your wife -- you love her but your relationship with her has been strained recently"
Jane Moreno,"You don't really care about politics; You've known your neighbor, Yuriko Yamamoto, for a few years; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You think Sam Moore is a kind and nice man; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You know the Lin family somewhat well since they are your neighbor -- the husband John Lin, wife, Mei Lin, and the son Eddy Lin; you know Mei Lin is a professor; Tom Moreno is your husband -- you love him but your relationship with him has been strained recently"
Tamara Taylor,"You just moved in; Carmen Ortiz is your housemate; You don't really know any one in town because you justn moved in, but you are looking forward to meeting more people"
Carmen Ortiz,"You are somehwat curious about who will run for the local mayor election that is coming up in a few months; Tamara Taylor is your housemate who just moved in; Tamara Taylor just moved in so she doesn't know many peopl in town; You've known your neighbor, Yuriko Yamamoto, for a few years and you two sometimes chat about the local election; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; In terms of your daily plans, you frequent Hobbs Cafe; You've knonw the bartender Arthur Burton for a few years; You've known Isabella Rodriguez the owner at Hobbs Cafe for a few years"
Sam Moore,"You are thinking of running in the upcoming local mayor election; You are telling people that you are running for the upcoming local mayor election; You've known your neighbor, Adam Smith, for a few years; You consider Adam Smith to be a very close friend of yours although he is much younger than you; You've known your neighbor, Yuriko Yamamoto, for a few years since the time she helped you and your wife Jennifer Moore on some legal matters; You consider Yuriko Yamamoto to be someone like your daughter even though you two are not related; You've known Carmen Ortiz for a year or so as your neighbor; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; You think Tom Moreno is a little too aggressive and opinionated; You've known the bartender Arthur Burton for a few years; You've known the Lin family, Mei, John, and the son Eddy Lin, for a few years since they live next block; In terms of your plan, you like to take a walk in the park;"
Jennifer Moore,"You don't really care about politics; You've known your neighbor, Adam Smith, for a few years; You've known your neighbor, Yuriko Yamamoto, for a few years since the time she helped you and your husband Sam Moore on some legal matters; You consider Yuriko Yamamoto to be someone like your daughter even though you two are not related; You've known Carmen Ortiz for a year or so as your neighbor; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; You think Tom Moreno is a little too aggressive and opinionated; You've known the bartender Arthur Burton for a few years; You've known the Lin family, Mei, John, and the son Eddy Lin, for a few years since they live next block; You love your husband, Sam Moore and hope he has relaxing days; You know Sam Moore, your husband, is planning on running for the local mayor election but you do not want him to as it will be too taxing for him "
Yuriko Yamamoto,"You like to talk about politics and local elections; You've known your neighbor, Adam Smith, for a few years and you two sometimes chat about the local election; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You and Sam Moore are somewhat close; You've known Carmen Ortiz for a year or so as your neighbor; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; You think Tom Moreno is a little too aggressive and opinionated; You've knonw the bartender Arthur Burton for a few years; You've known the Lin family, Mei, John, and the son Eddy Lin, for a few years since the time you helped them with some legal matter"
Adam Smith,"You like to talk about politics and local elections; You are really curious about who will run for the local mayor election that is coming up in a few months; You've known your neighbor, Yuriko Yamamoto, for a few years and you two sometimes chat about the local election; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You and Sam Moore are somewhat close; You view Sam Moore as something of a mentor; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; You think Tom Moreno is a little too aggressive and opinionated; In terms of your daily plans, you frequent The Rose and Crown Pub at night; You've knonw the bartender Arthur Burton for a few years"
1 Name Whisper
2 Latoya Williams Rajiv Patel is your housemate whom you've known for about a year; You and Rajiv Patel sometimes talk about politics and local elections; Abigail Chan is your housemate whom you've known for about a year; Francisco Lopez is your housemate whom you've known for about a year; Haily Johnson is your housemate whom you've known for about a year but you don't really find her too comfortable; In terms of your daily plans, you sometimes spend time at The Rose and Crown Pub when it's late; You have known the bartender at The Rose and Crown Pub, Arthur Burton for about half a year; You like to talk about politics and local elections
3 Rajiv Patel Latoya Williams is your housemate whom you've known for about a year; You and Latoya Williams sometimes talk about politics and local elections; Abigail Chan is your housemate whom you've known for about a year and you think she is kind of cute; Francisco Lopez is your housemate whom you've known for about a year; Haily Johnson is your housemate whom you've known for about a year but you don't really find her too comfortable; You sometimes try to spend time at Hobbs Cafe but you haven't really talked to anyone there yet; In terms of your daily plans, you sometimes spend time at The Rose and Crown Pub when it's late and you have known the bartender, Arthur Burton for about a year; You like to talk about politics and local elections
4 Abigail Chen You think you are kind of cute; You and Latoya Williams are housemates and have known each other for about a year; You and Rajiv Patel are housemates and have known each other for about a year and you two are somewhat close; You and Francisco Lopez are housemates and have known each other for about a year; You and Hailey Johnson are housemates and have known each other for about a year
5 Francisco Lopez You and Latoya Williams are housemates and have known each other for about a year; You and Rajiv Patel are housemates and have known each other for about a year; You and Abigail Chen are housemates and have known each other for about a year; You and Hailey Johnson are housemates and have known each other for about a year but you don't really like her too much; You have a secret crush on Abigail Chen but so far, you haven't had the courage to ask her out; You think Abigail Chen is cute; You frequent Hobbs Cafe
6 Hailey Johnson You want to be acknowledged by everyone; You've known Isabella Rodriguez for about a year and you talk to her often about the piece you are currently writing; Latoya Williams is your housemate whom you've known for about a year and you think she is somewhat cute; Rajiv Patel is your housemate whom you've known for about a year, but you don't know much about him; Abigail Chen is a housemate whom you've known for about a year, but you haven't really talked to her yet; Francisco Lopez is your housemate and you two have known each other for quite awhile because you are from the same town; You are interested in meeting someone; You are interested in romantically dating someone; You are not seeing anyone right now
7 Arthur Burton You are generally nice to all your customers and listen to their story; You and Isabella Rodriguez are friends; You've known Isabella Rodriguez for a few years since she runs Hobbs Cafe right next to your bar; You know Latoya Williams as a customer at your bar, The Rose and Crown Pub; You know Rajiv Patel as a customer at your bar, The Rose and Crown Pub; You know Ryan Park as a customer at your bar, The Rose and Crown Pub; You know Carlos Gomez as a customer at your bar, The Rose and Crown Pub; You know Adam Smith as a customer at your bar, The Rose and Crown Pub; You know Yuriko Yamamoto as a customer at your bar, The Rose and Crown Pub; You know Jennifer Moore as a customer at your bar, The Rose and Crown Pub; You know Sam Moore as a customer at your bar, The Rose and Crown Pub; You know Carmen Ortiz as a customer at your bar, The Rose and Crown Pub; You know Tom Moreno as a customer at your bar, The Rose and Crown Pub; You don't like it when people fight at your bar
8 Ryan Park You frequent Hobbs Cafe to work and think and eat; You've known Isabella Rodriguez for about a year; You know Isabella Rodriguez works at Hobbs Cafe; You frequent The Rose and Crown Pub to eat dinner; You've known the bartender at The Rose and Crown Pub, Arthur Burton, for about a year; You've known Giorgio Rossi, your next door neighbor, for about a year; You've known Carlos Gomez, your neighbor, for about a year; You wonder why Giorgio Rossi and Carlos Gomez are always fighting; You want no part in Giorgio Rossi and Carlos Gomez's fight
9 Isabella Rodriguez You are excited to be planning a Valentine's Day party at *Hobbs Cafe on February 14th from 5pm* and you are eager to tell everyone to come to the party; You and Maria Lopez have known each other for about a year ever since Maria Lopez visited Hobbs Cafe and are good friends; Maria Lopez is a loyal friend to you and a frequent customer at Hobbs Cafe; You see Tamara Taylor as a frequent customer at your cafe, Hobbs Cafe; You and Tamara Taylor have known each other for a few months; You and Carlos Gomez have known each other for about 4 years; You know that Carlos Gomez is a good poet and you like his poem; You see Ryan Park frequently when he visits your cafe, Hobbs Cafe; You often see Ryan Park working hard on his laptop and you think he is dedicated in his work, which you find to be cool; Hailey Johnson is a frequent customer at your cafe, Hobbs Cafe; You find Haily Johnson to be a little uncomfortable because she is too pushy; You know Giorgio Rossi as a customer at your Hobbs Cafe; You know Wolfgang Schulz as a customer at Hobbs Cafe; You know Carmen Ortiz as a customer at Hobbs Cafe; You and Arthur Burton are friends; You've known Arthur Burton for a few years since he runs a bar right next door
10 Giorgio Rossi You do not like Carlos Gomez; You think Carlos Gomez is annoying; You think Carlos Gomez is pushy; You think Carlos Gomez is a coward; You've known Carlos Gomez, your next door neighbor, for about two years; You think Carlos Gomez is really loud and you have filed complaints against him before; For planning, you frequent Hobbs Cafe to hang out and write and think; you are friends with Isabella Rodriguez who works at Hobbs Cafe; You think Isabella Rodriguez is kind-hearted; You and Ryan Park sometimes talk about random things; You've known Ryan Park for about a year living as his neighbor; You would rather not see Carlos Gomez ever
11 Carlos Gomez You do not like Giorgio Rossi; You think Giorgio Rossi is annoying; You think Giorgio Rossi is a coward; You've known Giorgio Rossi, your next door neighbor, for about two years; For planning, you frequent The Rose and Crown Pub at night to eat and drink; you are friends with Arthur Burton who is a bartender at the pub; You've known Ryan Park for about a year living as his neighbor; You would rather not see Giorgio Rossi ever; You want Ryan Park to take your side in his fight against Giorgio Rossi
12 Klaus Mueller This is very important -- you have a crush on Maria Lopez; You and Maria Lopez have known each other for over 2 years now; You and Maria Lopez are close friends and classmates; You are close with Ayesha Khan, who is a classmate in one of your classes and a dormmate; You and Maria Lopez are dormmates; You and Eddy Lin are classmates; You know Mei Lin is a professor at your college; You and Wolfgang Schulz are classmates and dormmates
13 Maria Lopez This is very important -- you have a secret crush on Klaus Mueller; You and Klaus Mueller have known each other for over 2 years now; You and Klaus Mueller are close friends and classmates; For planning, you frequent Hobbs Cafe for studying; You are close with Ayesha Khan, who is a classmate in one of your classes and a dormmate; You and Eddy Lin are classmates; You know Mei Lin is a professor at your college; You and Wolfgang Schulz are classmates and dormmates
14 Ayesha Khan You are close with Wolfgang Schulz, who is a classmate in one of your classes and a dormmate; You and Maria Lopez are dormmates; You and Klaus Mueller are dormmates; You and Eddy Lin are classmates; You know Mei Lin is a professor at your college
15 Wolfgang Schulz For planning, you frequent Hobbs Cafe for studying; You are close with Ayesha Khan, who is a classmate in one of your classes and a dormmate; You and Maria Lopez are dormmates; You and Klaus Mueller are dormmates; You and Eddy Lin are classmates and you two sometimes talk about your favorite music; You know Mei Lin is a professor at your college; You've met Isabella Rodriguez who works at the cafe but have not really talked to her
16 Mei Lin You are a professor who loves teaching; You've known your neighbor, Yuriko Yamamoto, since the time she helped you with some legal matters; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You think Sam Moore is a kind and nice man; You like Jennifer Moore's art; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You know the Moreno family somewhat well -- the husband Tom Moreno and the wife Jane Moreno; You know that Tom Moreno and your husband, John Lin, are colleagues at The Willows Market and Pharmacy; John Lin is your husband who works at the Pharmacy section of The Willows Market and Pharmacy; Eddy Lin is your son who studies music theory at the college; You love your family very much; You think your son, Eddy Lin, has been a little rebellious recently
17 John Lin You like to talk about politics and local elections; You are really curious about who will run for the local mayor election that is coming up in a few months; You've known your neighbor, Yuriko Yamamoto, since the time she helped you with some legal matters; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You think Sam Moore is a kind and nice man; You like Jennifer Moore's art; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You and Tom Moreno are colleagues at The Willows Market and Pharmacy; You know the Moreno family somewhat well -- the husband Tom Moreno and the wife Jane Moreno; Mei Lin is your wife who is a professor; Eddy Lin is your son who studies music theory at the college; You love your family very much
18 Eddy Lin You are a music student at the Oak Hill College; You are working on a new music composition; You like hip hop music; You like to attach "Yo" at the end of your sentences; You've known your neighbor, Yuriko Yamamoto, for a few years since she helped your parents with some legal matters; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You know the Moreno family somewhat well -- the husband Tom Moreno and the wife Jane Moreno; You know that Tom Moreno and your father, John Lin, are colleagues at The Willows Market and Pharmacy; John Lin is your father who works at the Pharmacy section of The Willows Market and Pharmacy; Mei Lin is your mother who teaches at the Oak Hill Collrhr; You love your family very much; You think your mother, Mei Lin, is a little too uptight; You and Wolfgang Schulz are schoolmates; You and Ayesha Khan are school mates; You and Maria Lopez are schoolmates
19 Tom Moreno You like to express your opinions; You are loud; You like to talk about politics and local elections; You've known your neighbor, Yuriko Yamamoto, for a few years and you two sometimes chat about the local election; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You don't really like Sam Moore; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; In terms of your daily plans, you frequent The Rose and Crown Pub at night; You've knonw the bartender Arthur Burton for a few years; You sometimes talk to Adam Smith about local elections; You and John Lin are colleagues at The Willows Market and Pharmacy; You know the Lin family somewhat well -- the husband John Lin, wife, Mei Lin, and the son Eddy Lin; you know Mei Lin is a professor; Jane Moreno is your wife -- you love her but your relationship with her has been strained recently
20 Jane Moreno You don't really care about politics; You've known your neighbor, Yuriko Yamamoto, for a few years; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You think Sam Moore is a kind and nice man; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You know the Lin family somewhat well since they are your neighbor -- the husband John Lin, wife, Mei Lin, and the son Eddy Lin; you know Mei Lin is a professor; Tom Moreno is your husband -- you love him but your relationship with him has been strained recently
21 Tamara Taylor You just moved in; Carmen Ortiz is your housemate; You don't really know any one in town because you justn moved in, but you are looking forward to meeting more people
22 Carmen Ortiz You are somehwat curious about who will run for the local mayor election that is coming up in a few months; Tamara Taylor is your housemate who just moved in; Tamara Taylor just moved in so she doesn't know many peopl in town; You've known your neighbor, Yuriko Yamamoto, for a few years and you two sometimes chat about the local election; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; In terms of your daily plans, you frequent Hobbs Cafe; You've knonw the bartender Arthur Burton for a few years; You've known Isabella Rodriguez the owner at Hobbs Cafe for a few years
23 Sam Moore You are thinking of running in the upcoming local mayor election; You are telling people that you are running for the upcoming local mayor election; You've known your neighbor, Adam Smith, for a few years; You consider Adam Smith to be a very close friend of yours although he is much younger than you; You've known your neighbor, Yuriko Yamamoto, for a few years since the time she helped you and your wife Jennifer Moore on some legal matters; You consider Yuriko Yamamoto to be someone like your daughter even though you two are not related; You've known Carmen Ortiz for a year or so as your neighbor; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; You think Tom Moreno is a little too aggressive and opinionated; You've known the bartender Arthur Burton for a few years; You've known the Lin family, Mei, John, and the son Eddy Lin, for a few years since they live next block; In terms of your plan, you like to take a walk in the park;
24 Jennifer Moore You don't really care about politics; You've known your neighbor, Adam Smith, for a few years; You've known your neighbor, Yuriko Yamamoto, for a few years since the time she helped you and your husband Sam Moore on some legal matters; You consider Yuriko Yamamoto to be someone like your daughter even though you two are not related; You've known Carmen Ortiz for a year or so as your neighbor; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; You think Tom Moreno is a little too aggressive and opinionated; You've known the bartender Arthur Burton for a few years; You've known the Lin family, Mei, John, and the son Eddy Lin, for a few years since they live next block; You love your husband, Sam Moore and hope he has relaxing days; You know Sam Moore, your husband, is planning on running for the local mayor election but you do not want him to as it will be too taxing for him
25 Yuriko Yamamoto You like to talk about politics and local elections; You've known your neighbor, Adam Smith, for a few years and you two sometimes chat about the local election; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You and Sam Moore are somewhat close; You've known Carmen Ortiz for a year or so as your neighbor; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; You think Tom Moreno is a little too aggressive and opinionated; You've knonw the bartender Arthur Burton for a few years; You've known the Lin family, Mei, John, and the son Eddy Lin, for a few years since the time you helped them with some legal matter
26 Adam Smith You like to talk about politics and local elections; You are really curious about who will run for the local mayor election that is coming up in a few months; You've known your neighbor, Yuriko Yamamoto, for a few years and you two sometimes chat about the local election; You've known your next door old couple, Sam Moore and Jennifer Moore, for a few years; You and Sam Moore are somewhat close; You view Sam Moore as something of a mentor; You know of Tamara Taylor who live next block but you haven't really chatted with her; You've known Carmen Ortiz for a year or so as your neighbor; You've known the couple, Tom Moreno and Jane Moreno, who live in the next block for a few years; You think Tom Moreno is a little too aggressive and opinionated; In terms of your daily plans, you frequent The Rose and Crown Pub at night; You've knonw the bartender Arthur Burton for a few years

View file

@ -0,0 +1,4 @@
Name,Whisper
Maria Lopez,This is very important -- you have a secret crush on Klaus Mueller; You and Klaus Mueller have known each other for over 2 years now; You and Klaus Mueller are close friends and classmates; You and Isabella Rodriguez who works at Hobbs Cafe are close friends; For planning, you frequent Hobbs Cafe for studying
Klaus Mueller,This is very important -- you have a crush on Maria Lopez; You and Maria Lopez have known each other for over 2 years now; You and Maria Lopez are close friends and classmates; You and Maria Lopez are dormmates; You and Isabella Rodriguez are aquintances since Isabella works at Hobbs Cafe that you frequent
Isabella Rodriguez,You are excited to be planning a Valentine's Day party at *Hobbs Cafe on February 14th from 5pm* and you are eager to tell everyone to come to the party; You and Maria Lopez have known each other for about a year ever since Maria Lopez visited Hobbs Cafe and are good friends; Maria Lopez is a loyal friend to you and a frequent customer at Hobbs Cafe; Klaus Mueller is a frequent customer at Hobbs Cafe; you love your work at Hobbs Cafe
1 Name,Whisper
2 Maria Lopez,This is very important -- you have a secret crush on Klaus Mueller; You and Klaus Mueller have known each other for over 2 years now; You and Klaus Mueller are close friends and classmates; You and Isabella Rodriguez who works at Hobbs Cafe are close friends; For planning, you frequent Hobbs Cafe for studying
3 Klaus Mueller,This is very important -- you have a crush on Maria Lopez; You and Maria Lopez have known each other for over 2 years now; You and Maria Lopez are close friends and classmates; You and Maria Lopez are dormmates; You and Isabella Rodriguez are aquintances since Isabella works at Hobbs Cafe that you frequent
4 Isabella Rodriguez,You are excited to be planning a Valentine's Day party at *Hobbs Cafe on February 14th from 5pm* and you are eager to tell everyone to come to the party; You and Maria Lopez have known each other for about a year ever since Maria Lopez visited Hobbs Cafe and are good friends; Maria Lopez is a loyal friend to you and a frequent customer at Hobbs Cafe; Klaus Mueller is a frequent customer at Hobbs Cafe; you love your work at Hobbs Cafe

2
examples/st_game/storage/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
# path to store simulation data
test_*

View file

@ -0,0 +1,26 @@
{
"Isabella Rodriguez": {
"maze": "the_ville",
"x": 72,
"y": 14
},
"Klaus Mueller": {
"maze": "the_ville",
"x": 126,
"y": 46
},
"Maria Lopez": {
"maze": "the_ville",
"x": 123,
"y": 57
}
}

View file

@ -0,0 +1,2 @@
{"kw_strength_event": {},
"kw_strength_thought": {}}

View file

@ -0,0 +1,51 @@
{
"vision_r": 8,
"att_bandwidth": 8,
"retention": 8,
"curr_time": null,
"curr_tile": null,
"daily_plan_req": "Isabella Rodriguez opens Hobbs Cafe at 8am everyday, and works at the counter until 8pm, at which point she closes the cafe.",
"name": "Isabella Rodriguez",
"first_name": "Isabella",
"last_name": "Rodriguez",
"age": 34,
"innate": "friendly, outgoing, hospitable",
"learned": "Isabella Rodriguez is a cafe owner of Hobbs Cafe who loves to make people feel welcome. She is always looking for ways to make the cafe a place where people can come to relax and enjoy themselves.",
"currently": "Isabella Rodriguez is planning on having a Valentine's Day party at Hobbs Cafe with her customers on February 14th, 2023 at 5pm. She is gathering party material, and is telling everyone to join the party at Hobbs Cafe on February 14th, 2023, from 5pm to 7pm.",
"lifestyle": "Isabella Rodriguez goes to bed around 11pm, awakes up around 6am.",
"living_area": "the Ville:Isabella Rodriguez's apartment:main room",
"concept_forget": 100,
"daily_reflection_time": 180,
"daily_reflection_size": 5,
"overlap_reflect_th": 4,
"kw_strg_event_reflect_th": 10,
"kw_strg_thought_reflect_th": 9,
"recency_w": 1,
"relevance_w": 1,
"importance_w": 1,
"recency_decay": 0.995,
"importance_trigger_max": 150,
"importance_trigger_curr": 150,
"importance_ele_n": 0,
"thought_count": 5,
"daily_req": [],
"f_daily_schedule": [],
"f_daily_schedule_hourly_org": [],
"act_address": null,
"act_start_time": null,
"act_duration": null,
"act_description": null,
"act_pronunciatio": null,
"act_event": ["Isabella Rodriguez", null, null],
"act_obj_description": null,
"act_obj_pronunciatio": null,
"act_obj_event": [null, null, null],
"chatting_with": null,
"chat": null,
"chatting_with_buffer": {},
"chatting_end_time": null,
"act_path_set": false,
"planned_path": []
}

View file

@ -0,0 +1,66 @@
{
"the Ville": {
"Hobbs Cafe": {
"cafe": [
"refrigerator",
"cafe customer seating",
"cooking area",
"kitchen sink",
"behind the cafe counter",
"piano"
]
},
"Isabella Rodriguez's apartment": {
"main room": [
"bed",
"desk",
"refrigerator",
"closet",
"shelf"
]
},
"The Rose and Crown Pub": {
"pub": [
"shelf",
"refrigerator",
"bar customer seating",
"behind the bar counter",
"kitchen sink",
"cooking area",
"microphone"
]
},
"Harvey Oak Supply Store": {
"supply store": [
"supply store product shelf",
"behind the supply store counter",
"supply store counter"
]
},
"The Willows Market and Pharmacy": {
"store": [
"behind the pharmacy counter",
"pharmacy store shelf",
"pharmacy store counter",
"grocery store shelf",
"behind the grocery counter",
"grocery store counter"
]
},
"Dorm for Oak Hill College": {
"garden": [
"dorm garden"
],
"common room": [
"common room sofa",
"pool table",
"common room table"
]
},
"Johnson Park": {
"park": [
"park garden"
]
}
}
}

View file

@ -0,0 +1,4 @@
{
"kw_strength_event": {},
"kw_strength_thought": {}
}

View file

@ -0,0 +1,2 @@
{"kw_strength_event": {},
"kw_strength_thought": {}}

View file

@ -0,0 +1,51 @@
{
"vision_r": 8,
"att_bandwidth": 8,
"retention": 8,
"curr_time": null,
"curr_tile": null,
"daily_plan_req": "Klaus Mueller goes to the library at Oak Hill College early in the morning, spends his days writing, and eats at Hobbs Cafe.",
"name": "Klaus Mueller",
"first_name": "Klaus",
"last_name": "Mueller",
"age": 20,
"innate": "kind, inquisitive, passionate",
"learned": "Klaus Mueller is a student at Oak Hill College studying sociology. He is passionate about social justice and loves to explore different perspectives.",
"currently": "Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities.",
"lifestyle": "Klaus Mueller goes to bed around 11pm, awakes up around 7am, eats dinner around 5pm.",
"living_area": "the Ville:Dorm for Oak Hill College:Klaus Mueller's room",
"concept_forget": 100,
"daily_reflection_time": 180,
"daily_reflection_size": 5,
"overlap_reflect_th": 4,
"kw_strg_event_reflect_th": 10,
"kw_strg_thought_reflect_th": 9,
"recency_w": 1,
"relevance_w": 1,
"importance_w": 1,
"recency_decay": 0.99,
"importance_trigger_max": 150,
"importance_trigger_curr": 150,
"importance_ele_n": 0,
"thought_count": 5,
"daily_req": [],
"f_daily_schedule": [],
"f_daily_schedule_hourly_org": [],
"act_address": null,
"act_start_time": null,
"act_duration": null,
"act_description": null,
"act_pronunciatio": null,
"act_event": ["Klaus Mueller", null, null],
"act_obj_description": null,
"act_obj_pronunciatio": null,
"act_obj_event": [null, null, null],
"chatting_with": null,
"chat": null,
"chatting_with_buffer": {},
"chatting_end_time": null,
"act_path_set": false,
"planned_path": []
}

View file

@ -0,0 +1,86 @@
{
"the Ville": {
"Oak Hill College": {
"hallway": [],
"library": [
"library sofa",
"library table",
"bookshelf"
],
"classroom": [
"blackboard",
"classroom podium",
"classroom student seating"
]
},
"Dorm for Oak Hill College": {
"garden": [
"dorm garden"
],
"Klaus Mueller's room": [
"bed",
"game console",
"closet",
"desk"
],
"woman's bathroom": [
"toilet",
"shower",
"bathroom sink"
],
"common room": [
"common room sofa",
"pool table",
"common room table"
],
"man's bathroom": [
"shower",
"bathroom sink",
"toilet"
]
},
"The Willows Market and Pharmacy": {
"store": [
"grocery store shelf",
"behind the grocery counter",
"grocery store counter",
"pharmacy store shelf",
"pharmacy store counter",
"behind the pharmacy counter"
]
},
"Harvey Oak Supply Store": {
"supply store": [
"supply store product shelf",
"behind the supply store counter",
"supply store counter"
]
},
"Johnson Park": {
"park": [
"park garden"
]
},
"The Rose and Crown Pub": {
"pub": [
"shelf",
"refrigerator",
"bar customer seating",
"behind the bar counter",
"kitchen sink",
"cooking area",
"microphone"
]
},
"Hobbs Cafe": {
"cafe": [
"refrigerator",
"cafe customer seating",
"cooking area",
"kitchen sink",
"behind the cafe counter",
"piano"
]
}
}
}

View file

@ -0,0 +1,2 @@
{"kw_strength_event": {},
"kw_strength_thought": {}}

View file

@ -0,0 +1,51 @@
{
"vision_r": 8,
"att_bandwidth": 8,
"retention": 8,
"curr_time": null,
"curr_tile": null,
"daily_plan_req": "Maria Lopez spends at least 3 hours a day Twitch streaming or gaming.",
"name": "Maria Lopez",
"first_name": "Maria",
"last_name": "Lopez",
"age": 21,
"innate": "energetic, enthusiastic, inquisitive",
"learned": "Maria Lopez is a student at Oak Hill College studying physics and a part time Twitch game streamer who loves to connect with people and explore new ideas.",
"currently": "Maria Lopez is working on her physics degree and streaming games on Twitch to make some extra money. She visits Hobbs Cafe for studying and eating just about everyday.",
"lifestyle": "Maria Lopez goes to bed around 2am, awakes up around 9am, eats dinner around 6pm. She likes to hang out at Hobbs Cafe if it's before 6pm.",
"living_area": "the Ville:Dorm for Oak Hill College:Maria Lopez's room",
"concept_forget": 100,
"daily_reflection_time": 180,
"daily_reflection_size": 5,
"overlap_reflect_th": 4,
"kw_strg_event_reflect_th": 10,
"kw_strg_thought_reflect_th": 9,
"recency_w": 1,
"relevance_w": 1,
"importance_w": 1,
"recency_decay": 0.99,
"importance_trigger_max": 150,
"importance_trigger_curr": 150,
"importance_ele_n": 0,
"thought_count": 5,
"daily_req": [],
"f_daily_schedule": [],
"f_daily_schedule_hourly_org": [],
"act_address": null,
"act_start_time": null,
"act_duration": null,
"act_description": null,
"act_pronunciatio": null,
"act_event": ["Maria Lopez", null, null],
"act_obj_description": null,
"act_obj_pronunciatio": null,
"act_obj_event": [null, null, null],
"chatting_with": null,
"chat": null,
"chatting_with_buffer": {},
"chatting_end_time": null,
"act_path_set": false,
"planned_path": []
}

View file

@ -0,0 +1,87 @@
{
"the Ville": {
"Oak Hill College": {
"hallway": [],
"library": [
"library sofa",
"library table",
"bookshelf"
],
"classroom": [
"blackboard",
"classroom podium",
"classroom student seating"
]
},
"Dorm for Oak Hill College": {
"garden": [
"dorm garden"
],
"Maria Lopez's room": [
"closet",
"desk",
"bed",
"computer",
"blackboard"
],
"woman's bathroom": [
"toilet",
"shower",
"bathroom sink"
],
"common room": [
"common room sofa",
"pool table",
"common room table"
],
"man's bathroom": [
"shower",
"bathroom sink",
"toilet"
]
},
"The Willows Market and Pharmacy": {
"store": [
"grocery store shelf",
"behind the grocery counter",
"grocery store counter",
"pharmacy store shelf",
"pharmacy store counter",
"behind the pharmacy counter"
]
},
"Harvey Oak Supply Store": {
"supply store": [
"supply store product shelf",
"behind the supply store counter",
"supply store counter"
]
},
"Johnson Park": {
"park": [
"park garden"
]
},
"The Rose and Crown Pub": {
"pub": [
"shelf",
"refrigerator",
"bar customer seating",
"behind the bar counter",
"kitchen sink",
"cooking area",
"microphone"
]
},
"Hobbs Cafe": {
"cafe": [
"refrigerator",
"cafe customer seating",
"cooking area",
"kitchen sink",
"behind the cafe counter",
"piano"
]
}
}
}

View file

@ -0,0 +1,13 @@
{
"fork_sim_code": "base_the_ville_isabella_maria_klaus",
"start_date": "February 13, 2023",
"curr_time": "February 13, 2023, 00:00:00",
"sec_per_step": 10,
"maze_name": "the_ville",
"persona_names": [
"Isabella Rodriguez",
"Maria Lopez",
"Klaus Mueller"
],
"step": 0
}

View file

View file

@ -0,0 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :

View file

@ -0,0 +1,78 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of actions/gen_action_details.py
import pytest
from examples.st_game.actions.gen_action_details import (
GenActionArena,
GenActionDetails,
GenActionObject,
GenActionSector,
GenActObjDescription,
)
from examples.st_game.roles.st_role import STRole
from examples.st_game.utils.const import MAZE_ASSET_PATH
from metagpt.environment.api.env_api import EnvAPIAbstract
from metagpt.environment.stanford_town_env.stanford_town_env import StanfordTownEnv
@pytest.mark.asyncio
async def test_gen_action_details():
role = STRole(
name="Klaus Mueller",
start_time="February 13, 2023",
curr_time="February 13, 2023, 00:00:00",
sim_code="base_the_ville_isabella_maria_klaus",
)
role.set_env(StanfordTownEnv(maze_asset_path=MAZE_ASSET_PATH))
await role.init_curr_tile()
act_desp = "sleeping"
act_dura = "120"
access_tile = await role.rc.env.observe(EnvAPIAbstract(api_name="access_tile", kwargs={"tile": role.scratch.curr_tile}))
act_world = access_tile["world"]
assert act_world == "the Ville"
sector = await GenActionSector().run(role, access_tile, act_desp)
arena = await GenActionArena().run(role, act_desp, act_world, sector)
temp_address = f"{act_world}:{sector}:{arena}"
print(temp_address)
obj = await GenActionObject().run(role, act_desp, temp_address)
act_obj_desp = await GenActObjDescription().run(role, obj, act_desp)
result_dict = await GenActionDetails().run(role, act_desp, act_dura)
# gen_action_sector
assert isinstance(sector, str)
assert sector in role.s_mem.get_str_accessible_sectors(act_world)
# gen_action_arena
assert isinstance(arena, str)
assert arena in role.s_mem.get_str_accessible_sector_arenas(f"{act_world}:{sector}")
# gen_action_obj
assert isinstance(obj, str)
assert obj in role.s_mem.get_str_accessible_arena_game_objects(temp_address)
if result_dict:
for key in [
"action_address",
"action_duration",
"action_description",
"action_pronunciatio",
"action_event",
"chatting_with",
"chat",
"chatting_with_buffer",
"chatting_end_time",
"act_obj_description",
"act_obj_pronunciatio",
"act_obj_event",
]:
assert key in result_dict
assert result_dict["action_address"] == f"{temp_address}:{obj}"
assert result_dict["action_duration"] == int(act_dura)
assert result_dict["act_obj_description"] == act_obj_desp

View file

@ -0,0 +1,15 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of actions/summarize_conv
import pytest
from examples.st_game.actions.summarize_conv import SummarizeConv
@pytest.mark.asyncio
async def test_summarize_conv():
conv = [("Role_A", "what's the weather today?"), ("Role_B", "It looks pretty good, and I will take a walk then.")]
output = await SummarizeConv().run(conv)
assert "weather" in output

View file

@ -0,0 +1,3 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :

View file

@ -0,0 +1,66 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of roles conversation
from typing import Tuple
import pytest
from examples.st_game.plan.converse import agent_conversation
from examples.st_game.roles.st_role import STRole
from examples.st_game.utils.const import STORAGE_PATH, MAZE_ASSET_PATH
from examples.st_game.utils.mg_ga_transform import get_reverie_meta
from examples.st_game.utils.utils import copy_folder
from metagpt.environment.stanford_town_env.stanford_town_env import StanfordTownEnv
async def init_two_roles(fork_sim_code: str = "base_the_ville_isabella_maria_klaus") -> Tuple["STRole"]:
sim_code = "unittest_sim"
copy_folder(str(STORAGE_PATH.joinpath(fork_sim_code)), str(STORAGE_PATH.joinpath(sim_code)))
reverie_meta = get_reverie_meta(fork_sim_code)
role_ir_name = "Isabella Rodriguez"
role_km_name = "Klaus Mueller"
env = StanfordTownEnv(maze_asset_path=MAZE_ASSET_PATH)
role_ir = STRole(
name=role_ir_name,
sim_code=sim_code,
profile=role_ir_name,
step=reverie_meta.get("step"),
start_time=reverie_meta.get("start_date"),
curr_time=reverie_meta.get("curr_time"),
sec_per_step=reverie_meta.get("sec_per_step"),
)
role_ir.set_env(env)
await role_ir.init_curr_tile()
role_km = STRole(
name=role_km_name,
sim_code=sim_code,
profile=role_km_name,
step=reverie_meta.get("step"),
start_time=reverie_meta.get("start_date"),
curr_time=reverie_meta.get("curr_time"),
sec_per_step=reverie_meta.get("sec_per_step"),
)
role_km.set_env(env)
await role_km.init_curr_tile()
return role_ir, role_km
@pytest.mark.asyncio
async def test_agent_conversation():
role_ir, role_km = await init_two_roles()
curr_chat = await agent_conversation(role_ir, role_km, conv_rounds=2)
assert len(curr_chat) % 2 == 0
meet = False
for conv in curr_chat:
if "Valentine's Day party" in conv[1]:
# conv[0] speaker, conv[1] utterance
meet = True
assert meet

View file

@ -0,0 +1,36 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc : unittest of st_plan
import pytest
from examples.st_game.plan.st_plan import _choose_retrieved, _should_react, _wait_react
from examples.st_game.tests.plan.test_converse import init_two_roles
def test_should_react():
role_ir, role_km = init_two_roles()
roles = {role_ir.name: role_ir, role_km.name: role_km}
observed = role_ir.observe()
retrieved = role_ir.retrieve(observed)
focused_event = _choose_retrieved(role_ir.name, retrieved)
if focused_event:
reaction_mode = _should_react(role_ir, focused_event, roles) # chat with Isabella Rodriguez
assert "chat with" in reaction_mode
@pytest.mark.asyncio
async def test_wait_react():
role_ir, role_km = init_two_roles("base_the_ville_isabella_maria_klaus")
reaction_mode = "wait: February 13, 2023, 00:01:30"
f_daily_schedule = role_ir.scratch.f_daily_schedule
# [['sleeping', 360], ['waking up and completing her morning routine (getting out of bed)', 5], ['sleeping', 180]]
await _wait_react(role_ir, reaction_mode)
new_f_daily_schedule = role_ir.scratch.f_daily_schedule
# [['sleeping', 360], ['waking up and completing her morning routine (getting out of bed)', 5],
# ['waking up and completing her morning routine (brushing her teeth)', 5], ['sleeping', 180]]
assert len(f_daily_schedule) == len(new_f_daily_schedule)

Some files were not shown because too many files have changed in this diff Show more