s09
记忆回写
仿真层仿真行为实时更新知识图谱
Agent 行为→自然语言→图谱更新的反馈闭环后台线程按批次将智能体行为转化为自然语言描述写回 Zep 图谱,让报告生成时能同时检索原始知识和仿真产生的新知识
仿真行为实时更新知识图谱
Agent 行为→自然语言→图谱更新的反馈闭环后台线程按批次将智能体行为转化为自然语言描述写回 Zep 图谱,让报告生成时能同时检索原始知识和仿真产生的新知识
仿真过程中智能体会产生大量互动行为(发帖、点赞、转发、评论等),这些行为包含了原始文档中不存在的新信息。如果只在报告阶段检索原始图谱,会丢失仿真产生的洞察。记忆回写模块将智能体行为实时转化为自然语言写回 Zep,形成知识增强闭环。
行为→自然语言:12 种动作类型的描述模板
class AgentActivity:
def to_episode_text(self) -> str:
action_descriptions = {
"CREATE_POST": self._describe_create_post,
"LIKE_POST": self._describe_like_post,
"REPOST": self._describe_repost,
"FOLLOW": self._describe_follow,
"CREATE_COMMENT": self._describe_create_comment,
"MUTE": self._describe_mute,
# ... 共12种
}
describe_func = action_descriptions.get(
self.action_type, self._describe_generic)
return f"{self.agent_name}: {describe_func()}"
def _describe_create_post(self):
content = self.action_args.get("content", "")
return f"发布了一条帖子:「{content}」"
def _describe_like_post(self):
post_author = self.action_args.get("post_author_name", "")
post_content = self.action_args.get("post_content", "")
return f"点赞了{post_author}的帖子:「{post_content}」"批量写入:按平台分组,累积到 BATCH_SIZE 后合并发送
class ZepGraphMemoryUpdater:
BATCH_SIZE = 5
SEND_INTERVAL = 0.5
MAX_RETRIES = 3
def _worker_loop(self):
while self._running or not self._activity_queue.empty():
activity = self._activity_queue.get(timeout=1)
platform = activity.platform.lower()
self._platform_buffers[platform].append(activity)
if len(self._platform_buffers[platform]) >= self.BATCH_SIZE:
batch = self._platform_buffers[platform][:self.BATCH_SIZE]
self._send_batch_activities(batch, platform)
def _send_batch_activities(self, activities, platform):
texts = [a.to_episode_text() for a in activities]
combined = "\n".join(texts)
# 带重试的发送
self.client.graph.add(
graph_id=self.graph_id, type="text", data=combined)