Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
333 changes: 332 additions & 1 deletion docs/tutorial/en/src/task_long_term_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,328 @@ async def retrieve_preferences():
asyncio.run(retrieve_preferences())


# %%
# Using ReMe Long-Term Memory
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. note:: We provide an example of using ReMe long-term memory in the GitHub repository under the ``examples/long_term_memory/reme`` directory.
#

from agentscope.memory import ReMePersonalLongTermMemory

# Create ReMe personal long-term memory instance
reme_long_term_memory = ReMePersonalLongTermMemory(
agent_name="Friday",
user_name="user_123",
model=DashScopeChatModel(
model_name="qwen3-max",
api_key=os.environ.get("DASHSCOPE_API_KEY"),
stream=False,
),
embedding_model=DashScopeTextEmbedding(
model_name="text-embedding-v4",
api_key=os.environ.get("DASHSCOPE_API_KEY"),
dimensions=1024,
),
)

# %%
# The ``ReMePersonalLongTermMemory`` class provides four main methods for long-term memory operations.
# They include ``record_to_memory`` and ``retrieve_from_memory`` for tool calls,
# as well as ``record`` and ``retrieve`` for direct calls.
#
# As an example, we use ``record_to_memory`` to record user preferences.
#


async def test_record_to_memory():
"""Test record_to_memory tool function interface"""
async with reme_long_term_memory:
result = await reme_long_term_memory.record_to_memory(
thinking="The user is sharing their travel preferences and habits",
content=[
"I prefer to stay in homestays when traveling to Hangzhou",
"I like to visit the West Lake in the morning",
"I enjoy drinking Longjing tea",
],
)
# Extract result text
result_text = " ".join(
block.get("text", "")
for block in result.content
if block.get("type") == "text"
)
print(f"Recording result: {result_text}")


# %%
# output
#
# .. code-block:: console
#
# Recording result: Successfully recorded 3 memory/memories to personal memory.
#


# %%
# Then we use ``retrieve_from_memory`` to retrieve related memories.
#


async def test_retrieve_from_memory():
"""Test retrieve_from_memory tool function interface"""
async with reme_long_term_memory:
# First record some content
await reme_long_term_memory.record_to_memory(
thinking="User is sharing travel preferences",
content=[
"I prefer to stay in homestays when traveling to Hangzhou",
],
)

# Then retrieve
result = await reme_long_term_memory.retrieve_from_memory(
keywords=["Hangzhou travel", "tea preference"],
)
retrieved_text = " ".join(
block.get("text", "")
for block in result.content
if block.get("type") == "text"
)
print(f"Retrieved memories: {retrieved_text}")


# %%
# output
#
# .. code-block:: console
#
# Retrieved memories: Keyword 'Hangzhou travel':
# user prefers to stay in homestays when traveling to Hangzhou
#
# Keyword 'tea preference':
# user prefers to stay in homestays when traveling to Hangzhou
#

# %%
# Besides the tool function interface, we can also use the ``record`` method to directly record message conversations.


async def test_record_direct():
"""Test record direct recording method"""
async with reme_long_term_memory:
await reme_long_term_memory.record(
msgs=[
Msg(
role="user",
content="I work as a software engineer and prefer remote work",
name="user",
),
Msg(
role="assistant",
content="Understood! You're a software engineer who values remote work flexibility.",
name="assistant",
),
Msg(
role="user",
content="I usually start my day at 9 AM with a cup of coffee",
name="user",
),
],
)
print("Successfully recorded conversation messages")


# %%
# output
#
# .. code-block:: console
#
# Successfully recorded conversation messages
#

# %%
# Similarly, we use the ``retrieve`` method to retrieve related memories.
#


async def test_retrieve_direct():
"""Test retrieve direct retrieval method"""
async with reme_long_term_memory:
# First record some content
await reme_long_term_memory.record(
msgs=[
Msg(
role="user",
content="I work as a software engineer and prefer remote work",
name="user",
),
],
)

# Then retrieve
memories = await reme_long_term_memory.retrieve(
msg=Msg(
role="user",
content="What do you know about my work preferences?",
name="user",
),
)
print(
f"Retrieved memories: {memories if memories else 'No memories found'}",
)


# %%
# output
#
# .. code-block:: console
#
# Retrieved memories: user works as a software engineer and prefers remote work

# %%
# Integration with ReAct Agent
# ----------------------------------------
# In AgentScope, the ``ReActAgent`` class receives a ``long_term_memory``
# parameter in its constructor, as well as a ``long_term_memory_mode`` parameter.
#
# If ``long_term_memory_mode`` is set to ``agent_control`` or ``both``,
# ``record_to_memory`` and ``retrieve_from_memory`` tool functions will be
# registered, allowing the agent to autonomously manage long-term memory through tool calls.
#
# .. note:: To achieve the best results, the ``"agent_control"`` mode may require
# additional instructions in the system prompt.
#


# Create ReAct agent with long-term memory (agent_control mode)
async def test_react_agent_with_reme():
"""Test ReActAgent integration with ReMe personal memory"""
async with reme_long_term_memory:
agent_with_reme = ReActAgent(
name="Friday",
sys_prompt=(
"You are a helpful assistant named Friday with long-term memory capabilities. "
"\n\n## Memory Management Guidelines:\n"
"1. **Recording Memories**: When users share personal information, preferences, "
"habits, or facts about themselves, ALWAYS record them using `record_to_memory` "
"for future reference.\n"
"\n2. **Retrieving Memories**: BEFORE answering questions about the user's preferences, "
"past information, or personal details, you MUST FIRST call `retrieve_from_memory` "
"to check if you have any relevant stored information. Do NOT rely solely on the "
"current conversation context.\n"
"\n3. **When to Retrieve**: Call `retrieve_from_memory` when:\n"
" - User asks questions like 'what do I like?', 'what are my preferences?', "
"'what do you know about me?'\n"
" - User asks about their past behaviors, habits, or preferences\n"
" - User refers to information they mentioned before\n"
" - You need context about the user to provide personalized responses\n"
"\nAlways check your memory first before claiming you don't know something about the user."
),
model=DashScopeChatModel(
model_name="qwen3-max",
api_key=os.environ.get("DASHSCOPE_API_KEY"),
stream=False,
),
formatter=DashScopeChatFormatter(),
toolkit=Toolkit(),
memory=InMemoryMemory(),
long_term_memory=reme_long_term_memory,
long_term_memory_mode="agent_control", # Use agent_control mode
)

# User shares preferences
msg = Msg(
role="user",
content="When I travel to Hangzhou, I prefer to stay in a homestay",
name="user",
)
response = await agent_with_reme(msg)
print(f"Agent response: {response.get_text_content()}")

# Clear short-term memory to test long-term memory
await agent_with_reme.memory.clear()

# Query preferences
msg2 = Msg(
role="user",
content="what preference do I have?",
name="user",
)
response2 = await agent_with_reme(msg2)
print(f"Agent response: {response2.get_text_content()}")


# %%
# output
#
# .. code-block:: console
#
# system: {
# "type": "tool_result",
# "id": "call_7de95a3382e241309b16cb9e",
# "name": "retrieve_from_memory",
# "output": [
# {
# "type": "text",
# "text": "Keyword 'preference':\nuser prefers to stay in a homestay when traveling to Hangzhou"
# }
# ]
# }
# Friday: Based on what I know, you prefer to stay in a homestay when traveling to Hangzhou. Let me know if there's anything else you'd like me to remember or clarify!
# Agent response: Based on what I know, you prefer to stay in a homestay when traveling to Hangzhou. Let me know if there's anything else you'd like me to remember or clarify!
#

# %%
# Then we clear the short-term memory and ask the agent about the user's preferences.
#


async def retrieve_reme_preferences():
"""Retrieve user preferences from long-term memory"""
async with reme_long_term_memory:
# Create agent (reusing for demonstration completeness)
agent_with_reme = ReActAgent(
name="Friday",
sys_prompt="You are an assistant with long-term memory capabilities.",
model=DashScopeChatModel(
api_key=os.environ.get("DASHSCOPE_API_KEY"),
model_name="qwen3-max",
stream=False,
),
formatter=DashScopeChatFormatter(),
toolkit=Toolkit(),
memory=InMemoryMemory(),
long_term_memory=reme_long_term_memory,
long_term_memory_mode="agent_control",
)

# Clear short-term memory
await agent_with_reme.memory.clear()
# The agent will remember previous conversations
msg2 = Msg("user", "What are my preferences? Answer briefly.", "user")
await agent_with_reme(msg2)


# %%
# output
#
# .. code-block:: console
#
# system: {
# "type": "tool_result",
# "id": "call_e7d354e450684ceca331a8d9",
# "name": "retrieve_from_memory",
# "output": [
# {
# "type": "text",
# "text": "No memories found for the given keywords."
# }
# ]
# }
# Friday: I don't have any recorded information about your preferences yet. Let me know what you like or need, and I can remember it for future reference!
#

# %%
# Customizing Long-Term Memory
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand All @@ -163,8 +485,17 @@ async def retrieve_preferences():
# - - For ``"static_control"`` mode, you must implement the ``record`` and ``retrieve`` methods.
# - For ``"agent_control"`` mode, the ``record_to_memory`` and ``retrieve_from_memory`` methods must be implemented.
# * - ``Mem0LongTermMemory``
# - \-
# - | ``record``
# | ``retrieve``
# | ``record_to_memory``
# | ``retrieve_from_memory``
# - Long-term memory implementation based on the mem0 library, supporting vector storage and retrieval.
# * - ``ReMePersonalLongTermMemory``
# - | ``record``
# | ``retrieve``
# | ``record_to_memory``
# | ``retrieve_from_memory``
# - Personal memory implementation based on the ReMe framework, providing powerful memory management and retrieval capabilities.
#
#
#
Expand Down
Loading
Loading