mirror of
https://github.com/FoundationAgents/MetaGPT.git
synced 2026-04-29 10:56:22 +02:00
为了方便各位尽快使用,我们现在提交第一版新版记忆格式,以及retrive和reflect,方便编写
This commit is contained in:
parent
565dedd2c6
commit
0d0656a125
9 changed files with 412 additions and 0 deletions
91
metagpt/reflect_and_retrieve/GA_memory_storage.py
Normal file
91
metagpt/reflect_and_retrieve/GA_memory_storage.py
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Desc : 对应 GA中 concept node 实现 & AssociativeMemory 实现
|
||||
# author: didi
|
||||
# Date:9.24
|
||||
|
||||
from run_gpt import run_gpt_prompt_chat_poignancy,run_gpt_random_concept
|
||||
from gpt_structure import embedding
|
||||
from retrive import agent_retrive
|
||||
from reflect import *
|
||||
import time
|
||||
import json
|
||||
|
||||
# Meomry_basic 类
|
||||
class Meomry_basic:
|
||||
def __init__(
|
||||
self,created_time,accessed_time,
|
||||
description,
|
||||
poignancy,
|
||||
embedding_key = None) -> None:
|
||||
self.created_time = created_time # 记忆创建时间
|
||||
self.accessed_time = accessed_time # 记忆上次调用时间
|
||||
self.description = description # 记忆描述
|
||||
self.poignancy = poignancy # 记忆心酸程度
|
||||
if embedding_key == None: # 记忆emmbeding key(避免重复向量化花钱)
|
||||
self.embedding_key = embedding(self.description)
|
||||
else:
|
||||
self.embedding_key = embedding_key
|
||||
|
||||
# Agent Memory 类
|
||||
class Agent_memeory:
|
||||
|
||||
def __init__(self,name,iss,
|
||||
memory_forget = 0.99,
|
||||
memories_list=[],memory_path = None) -> None:
|
||||
self.name = name # agent name
|
||||
self.iss = iss # agent iss(性格特征)
|
||||
self.memories_list = memories_list # agent 记忆列表
|
||||
self.concept_forget = memory_forget # agent 记忆遗忘速率(计算近因性)
|
||||
self.memory_path = memory_path # agent 记忆JSON文件存储地址
|
||||
self.curr_time = time.time() # agent 当前时间(现在使用的time.time(),等到环境搭好之后使用游戏内时间)
|
||||
# 若给到memory_path 进行记忆初始化
|
||||
if memory_path:
|
||||
self.memories_list = self.memory_load(memory_path)
|
||||
|
||||
def memory_save(self,PATH):
|
||||
# 将Memory存储在指定PATH的JSON文件中,命名为"{self.name}'s memory"
|
||||
with open(PATH, 'w') as file:
|
||||
memory_data = [mem.__dict__ for mem in self.memories_list]
|
||||
json.dump(memory_data, file)
|
||||
|
||||
def memory_load(self,PATH):
|
||||
"""
|
||||
将Memory从指定路径的JSON文件中Load出来,返回一个记忆列表;如果load失败,返回一个空列表
|
||||
"""
|
||||
try:
|
||||
with open(PATH,'r') as file:
|
||||
memory_data = json.load(file)
|
||||
self.memories_list = [Meomry_basic(**mem) for mem in memory_data]
|
||||
return self.memories_list
|
||||
except:
|
||||
return []
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# 例子,构建John Agent,实现retrive
|
||||
John_iss = "John Lin is a pharmacy shopkeeper at the Willow Market and Pharmacy who loves to help people. He is always looking for ways to make the process of getting medication easier for his customers; John Lin is living with his wife, Mei Lin, who is a college professor, and son, Eddy Lin, who is a student studying music theory; John Lin loves his family very much; John Lin has known the old couple next-door, Sam Moore and Jennifer Moore, for a few years; John Lin thinks Sam Moore is a kind and nice man; John Lin knows his neighbor, Yuriko Yamamoto, well; John Lin knows of his neighbors, Tamara Taylor and Carmen Ortiz, but has not met them before; John Lin and Tom Moreno are colleagues at The Willows Market and Pharmacy; John Lin and Tom Moreno are friends and like to discuss local politics together; John Lin knows the Moreno family somewhat well — the husband Tom Moreno and the wife Jane Moreno."
|
||||
John = Agent_memeory("John",John_iss,memory_path="agent_memories/John_memory.json")
|
||||
|
||||
# for i in range(3):
|
||||
# memory = run_gpt_random_concept()
|
||||
# curr_time = time.time()
|
||||
# poignancy = run_gpt_prompt_chat_poignancy(John,memory)
|
||||
# M = Meomry_basic(curr_time,curr_time,memory,poignancy)
|
||||
# John.memories_list.append(M)
|
||||
|
||||
# John.memory_save(John.memory_path)
|
||||
|
||||
for i in range(len(John.memories_list)):
|
||||
print(f"John记忆为:{John.memories_list[i].description}")
|
||||
print(f"心酸程度为:{John.memories_list[i].poignancy}")
|
||||
query = "How has John's personal connection with his neighbors, such as the Moores and Yuriko, influenced his role as a pharmacy shopkeeper?"
|
||||
|
||||
Top_v = agent_retrive(John,query,10,3)
|
||||
print(f"John的相关信息:{Top_v}")
|
||||
|
||||
# John的相关信息:{'Had a friendly chat with Yuriko about her garden.': 2.4992317730827667, 'Helped Mrs. Moore carry groceries into her house.': 1.957656720441911, 'Discussed local politics with Tom Moreno.': 1.9458268038234035}
|
||||
A=generate_focus_point(John.memories_list)
|
||||
B=generate_insights_and_evidence(John,John.memories_list,question=A[0])
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
poignancy_chat_v1.txt
|
||||
|
||||
!<INPUT 1>!: agent name
|
||||
!<INPUT 1>!: iss
|
||||
!<INPUT 2>!: name
|
||||
!<INPUT 3>!: event description
|
||||
|
||||
<commentblockmarker>###</commentblockmarker>
|
||||
Here is a brief description of !<INPUT 0>!.
|
||||
!<INPUT 1>!
|
||||
|
||||
On the scale of 1 to 10, where 1 is purely mundane (e.g., routine morning greetings) and 10 is extremely poignant (e.g., a conversation about breaking up, a fight), rate the likely poignancy of the following conversation for !<INPUT 2>!.
|
||||
|
||||
Conversation:
|
||||
!<INPUT 3>!
|
||||
|
||||
Rate (return a number between 1 to 10):
|
||||
Binary file not shown.
Binary file not shown.
File diff suppressed because one or more lines are too long
67
metagpt/reflect_and_retrieve/gpt_structure.py
Normal file
67
metagpt/reflect_and_retrieve/gpt_structure.py
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Desc : 调用GPT
|
||||
# author: didi
|
||||
# Date:9.25
|
||||
|
||||
import openai
|
||||
openai.api_key = "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"
|
||||
# 直接调用Prompt生成
|
||||
def response_generate(prompt):
|
||||
completion = openai.Completion.create(
|
||||
model="gpt-3.5-turbo-instruct",
|
||||
prompt= prompt,
|
||||
temperature=0,
|
||||
max_tokens = 20,
|
||||
top_p = 1,
|
||||
stream = False,
|
||||
frequency_penalty = 0,
|
||||
presence_penalty = 0
|
||||
)
|
||||
return (completion.choices[0].text)
|
||||
|
||||
# 特殊指令加入Prompt生成
|
||||
def final_response(prompt,special_instruction,example_output = None):
|
||||
prompt = '"""\n' + prompt + '\n"""\n'
|
||||
prompt += f"Output the response to the prompt above in json. {special_instruction}\n"
|
||||
if example_output:
|
||||
prompt += "Example output json:\n"
|
||||
prompt += '{"output": "' + str(example_output) + '"}'
|
||||
return response_generate(prompt)
|
||||
|
||||
# prompt填充模板
|
||||
def prompt_generate(curr_input, prompt_lib_file):
|
||||
"""
|
||||
Takes in the current input (e.g. comment that you want to classifiy) and
|
||||
the path to a prompt file. The prompt file contains the raw str prompt that
|
||||
will be used, which contains the following substr: !<INPUT>! -- this
|
||||
function replaces this substr with the actual curr_input to produce the
|
||||
final promopt that will be sent to the GPT3 server.
|
||||
ARGS:
|
||||
curr_input: the input we want to feed in (IF THERE ARE MORE THAN ONE
|
||||
INPUT, THIS CAN BE A LIST.)
|
||||
prompt_lib_file: the path to the promopt file.
|
||||
RETURNS:
|
||||
a str prompt that will be sent to OpenAI's GPT server.
|
||||
"""
|
||||
if type(curr_input) == type("string"):
|
||||
curr_input = [curr_input]
|
||||
curr_input = [str(i) for i in curr_input]
|
||||
|
||||
f = open(prompt_lib_file, "r")
|
||||
prompt = f.read()
|
||||
f.close()
|
||||
for count, i in enumerate(curr_input):
|
||||
prompt = prompt.replace(f"!<INPUT {count}>!", i)
|
||||
if "<commentblockmarker>###</commentblockmarker>" in prompt:
|
||||
prompt = prompt.split("<commentblockmarker>###</commentblockmarker>")[1]
|
||||
return prompt.strip()
|
||||
|
||||
# 使用OpenAI embedding库进行存储
|
||||
def embedding(query):
|
||||
embedding_result = openai.Embedding.create(
|
||||
model="text-embedding-ada-002",
|
||||
input=query
|
||||
)
|
||||
embedding_key = embedding_result['data'][0]["embedding"]
|
||||
return embedding_key
|
||||
46
metagpt/reflect_and_retrieve/reflect.py
Normal file
46
metagpt/reflect_and_retrieve/reflect.py
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
|
||||
import json
|
||||
from gpt_structure import final_response
|
||||
from retrive import agent_retrive
|
||||
'''
|
||||
首先
|
||||
'''
|
||||
def agent_reflect(agent):
|
||||
'''
|
||||
agent:agent本身
|
||||
'''
|
||||
pass
|
||||
|
||||
def generate_focus_point(memories_list,n=3):
|
||||
wait_sorted_mem=[[i.accessed_time, i] for i in memories_list]
|
||||
sorted_memories=sorted(wait_sorted_mem, key=lambda x: x[0])
|
||||
memorys=[i for created, i in sorted_memories]
|
||||
statements=''
|
||||
for i in memorys:
|
||||
statements += i.description + "\n"
|
||||
prompt='''
|
||||
{statements}
|
||||
Given only the information above, what are {num_question} most salient high-level questions we can answer about the subjects grounded in the statements?
|
||||
'''
|
||||
example_output = '["What should Jane do for lunch", "Does Jane like strawberry", "Who is Jane"]'
|
||||
out = final_response(prompt.format(statements=statements,num_question=n), "Output must be a list of str.",example_output)
|
||||
try:
|
||||
poi_dict = json.loads(out)
|
||||
return (poi_dict['output'])
|
||||
except:
|
||||
return out
|
||||
|
||||
def generate_insights_and_evidence(agent,memories_list,question, n=5):
|
||||
agent_retrive(agent,question,20,10)
|
||||
statements = ""
|
||||
for count, mem in enumerate(memories_list):
|
||||
statements += f'{str(count)}. {mem.description}\n'
|
||||
prompt='''
|
||||
Input:
|
||||
{statements}
|
||||
|
||||
What {n} high-level insights can you infer from the above statements? (example format: insight (because of 1, 5, 3))
|
||||
1.'''
|
||||
|
||||
ret = final_response(prompt.format(question=question,statements=statements,n=n), "['insightA',(1,2,3)]")
|
||||
print(ret)
|
||||
132
metagpt/reflect_and_retrieve/retrive.py
Normal file
132
metagpt/reflect_and_retrieve/retrive.py
Normal file
|
|
@ -0,0 +1,132 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Desc : 实现GA中检索函数
|
||||
# author: didi
|
||||
# Date:9.25
|
||||
|
||||
from numpy import dot
|
||||
from numpy.linalg import norm
|
||||
from gpt_structure import embedding
|
||||
|
||||
# 实现三(2)合一搜索
|
||||
def agent_retrive(agent,query,n,topk):
|
||||
# 将记忆列表按照Nodes[i].accessed_time排列,仅取前十个,如果不够10个就取现有的所有
|
||||
Nodes = agent.memories_list
|
||||
sorted_nodes = sorted(Nodes, key=lambda node: node.accessed_time,reverse=True)
|
||||
Nodes = sorted_nodes[:n] if len(sorted_nodes) >= n else sorted_nodes
|
||||
|
||||
# 创建一个分数列表
|
||||
Score_list = []
|
||||
"""
|
||||
{
|
||||
"memory":Nodes[i],
|
||||
"importance":Nodes[i].poignancy
|
||||
"recency":衰减因子计算结果
|
||||
"relevance":搜索结果
|
||||
}
|
||||
"""
|
||||
Score_list = extract_importance(Nodes,Score_list)
|
||||
Score_list = extract_recency(Score_list) # 计算近因性函数还没有实现,目前都是1
|
||||
Score_list = extract_relevance(Score_list,query)
|
||||
|
||||
Score_list = normalize_Socre_floats(Score_list,0,1)
|
||||
total_dict = {}
|
||||
gw = [1,1,1] # 三个因素的权重,重要性,近因性,相关性
|
||||
for i in range(len(Score_list)):
|
||||
total_score = (Score_list[i]['importance']*gw[0] +
|
||||
Score_list[i]['recency']*gw[1] +
|
||||
Score_list[i]['relevance']*gw[2]
|
||||
)
|
||||
total_dict[Score_list[i]['memory'].description] = total_score
|
||||
|
||||
result = top_highest_x_values(total_dict,topk)
|
||||
|
||||
return result
|
||||
|
||||
def top_highest_x_values(d, x):
|
||||
top_v = dict(sorted(d.items(),
|
||||
key=lambda item: item[1],
|
||||
reverse=True)[:x])
|
||||
return top_v
|
||||
# 抽取重要性
|
||||
def extract_importance(Nodes,Score_list):
|
||||
for i in range(len(Nodes)):
|
||||
Score = {"memory":Nodes[i],
|
||||
"importance":Nodes[i].poignancy
|
||||
}
|
||||
Score_list.append(Score)
|
||||
return Score_list
|
||||
|
||||
# 抽取相关性
|
||||
def extract_relevance(Score_list,query):
|
||||
query_embedding = embedding(query)
|
||||
# 进行
|
||||
for i in range(len(Score_list)):
|
||||
result = cos_sim(Score_list[i]["memory"].embedding_key,query_embedding)
|
||||
Score_list[i]['relevance'] = result
|
||||
|
||||
return Score_list
|
||||
|
||||
# 抽取近因性
|
||||
def extract_recency(Score_list):
|
||||
for i in range(len(Score_list)):
|
||||
Score_list[i]['recency'] = 1
|
||||
return Score_list
|
||||
|
||||
# 计算余弦相似度
|
||||
def cos_sim(a, b):
|
||||
return dot(a, b)/(norm(a)*norm(b))
|
||||
|
||||
# 单个列表归一化
|
||||
def normalize_List_floats(Single_list,target_min, target_max):
|
||||
min_val = min(Single_list)
|
||||
max_val = max(Single_list)
|
||||
range_val = max_val - min_val
|
||||
|
||||
if range_val == 0:
|
||||
for i in range(len(Single_list)):
|
||||
Single_list[i] = (target_max - target_min)/2
|
||||
else:
|
||||
for i in range(len(Single_list)):
|
||||
Single_list[i] = ((Single_list[i] - min_val) * (target_max - target_min)
|
||||
/ range_val + target_min)
|
||||
return Single_list
|
||||
|
||||
# 整体归一化
|
||||
def normalize_Socre_floats(Score_list, target_min, target_max):
|
||||
|
||||
importance_list = []
|
||||
relevance_list = []
|
||||
recency_list = []
|
||||
|
||||
for i in range(len(Score_list)):
|
||||
importance_list.append(Score_list[i]['importance'])
|
||||
relevance_list.append(Score_list[i]['relevance'])
|
||||
recency_list.append(Score_list[i]['recency'])
|
||||
|
||||
# 进行归一化操作
|
||||
importance_list = normalize_List_floats(importance_list,target_min, target_max)
|
||||
relevance_list = normalize_List_floats(relevance_list,target_min, target_max)
|
||||
recency_list =normalize_List_floats(recency_list,target_min, target_max)
|
||||
|
||||
for i in range(len(Score_list)):
|
||||
Score_list[i]['importance'] = importance_list[i]
|
||||
Score_list[i]['relevance'] = relevance_list[i]
|
||||
Score_list[i]['recency'] = recency_list[i]
|
||||
|
||||
return Score_list
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
58
metagpt/reflect_and_retrieve/run_gpt.py
Normal file
58
metagpt/reflect_and_retrieve/run_gpt.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
# @Desc : 调用PROMPT
|
||||
# author: didi
|
||||
# Date:9.25
|
||||
|
||||
import random
|
||||
import json
|
||||
from gpt_structure import final_response,prompt_generate
|
||||
|
||||
# 使用GPT衡量心酸程度
|
||||
def run_gpt_prompt_chat_poignancy(agent,event_description):
|
||||
"""
|
||||
使用GA中的run GPT构造,具体的代码可以参考昨天GPT的内容
|
||||
https://chat.openai.com/c/afddac31-300e-427b-9947-4b3ca16bd3a1
|
||||
其中输入的ISS是identity stable set
|
||||
"""
|
||||
def create_prompt_input(agent,event_description):
|
||||
prompt_input = [agent.name,
|
||||
agent.iss,
|
||||
agent.name,
|
||||
event_description]
|
||||
return prompt_input
|
||||
|
||||
# 1. Prompt构建
|
||||
# 2. Instruction给出
|
||||
prompt_template = "Prompt_template/poignancy_chat_v1.txt" ########
|
||||
prompt_input = create_prompt_input(agent, event_description) ########
|
||||
prompt = prompt_generate(prompt_input, prompt_template)
|
||||
special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10."
|
||||
poignancy = final_response(prompt,special_instruction)
|
||||
try:
|
||||
poi_dict = json.loads(poignancy)
|
||||
return (poi_dict['poignancy'])
|
||||
except:
|
||||
return poignancy
|
||||
|
||||
# 返回John随机记忆
|
||||
def run_gpt_random_concept():
|
||||
random_memories = [
|
||||
"Helped Mrs. Moore carry groceries into her house.",
|
||||
"Had a friendly chat with Yuriko about her garden.",
|
||||
"Met Tom Moreno for coffee during our lunch break.",
|
||||
"Talked to Mei about their upcoming vacation plans.",
|
||||
"Eddy played his new music composition for me.",
|
||||
"Helped a customer find a specific medication.",
|
||||
"John divorced his wife because he was in love with someone else",
|
||||
"Helped Mrs. Moore carry groceries into her house.",
|
||||
"Had a friendly chat with Yuriko about her garden.",
|
||||
"Met Tom Moreno for coffee during our lunch break.",
|
||||
"Talked to Mei about their upcoming vacation plans.",
|
||||
"Eddy played his new music composition for me.",
|
||||
"Helped a customer find a specific medication.",
|
||||
"Wished Carmen a good day as she passed by the pharmacy.",
|
||||
"Discussed local politics with Tom Moreno.",
|
||||
"Gave gardening tips to Mrs. Yamamoto.",
|
||||
"Saw Jane Moreno jogging in the morning."]
|
||||
return(random.choice(random_memories))
|
||||
Loading…
Add table
Add a link
Reference in a new issue