Skip to content
  • Auto
  • Light
  • Dark
DiscordForumGitHubSign up
Building Agents
Memory
View as Markdown
Copy Markdown

Open in Claude
Open in ChatGPT

Exporting Archival Memories

You can export all archival memories (passages) from an agent programmatically using the Letta SDK. This is useful for:

  • Backing up agent knowledge
  • Analyzing what an agent has learned
  • Migrating memories between agents
  • Auditing archival content

Below is a Python script that paginates through all of an agent’s archival memories and exports them to a JSON file:

#!/usr/bin/env python3
"""
Utility script to export all archival memories (passages) from a Letta agent.
Usage:
python export_agent_memories.py <agent_id> [--output <file>] [--limit <limit>]
Example:
python export_agent_memories.py agent-123e4567-e89b-42d3-8456-426614174000 --output memories.json
"""
import argparse
import json
import os
import sys
from typing import Any, Dict, List
from letta_client import Letta
def export_agent_memories(
client: Letta,
agent_id: str,
page_limit: int = 100,
) -> List[Dict[str, Any]]:
"""
Export all archival memories from an agent by paginating through all results.
Args:
client: Initialized Letta client
agent_id: The agent ID in format 'agent-<uuid4>'
page_limit: Number of results per page (default 100)
Returns:
List of passage dictionaries with embedding and embedding_config removed
"""
all_passages = []
after_cursor = None
page_num = 1
print(f"Exporting archival memories for agent: {agent_id}")
print(f"Using pagination with limit: {page_limit}")
print("-" * 60)
while True:
# Fetch next page
print(f"Fetching page {page_num}...", end=" ", flush=True)
try:
passages = client.agents.passages.list(
agent_id=agent_id,
after=after_cursor,
limit=page_limit,
ascending=True # Get oldest to newest
)
except Exception as e:
print(f"\nError fetching memories: {e}")
raise
if not passages:
print("(no more results)")
break
print(f"got {len(passages)} passages")
# Convert to dict and remove embedding fields
for passage in passages:
passage_dict = passage.model_dump() if hasattr(passage, 'model_dump') else passage.dict()
passage_dict.pop("embedding", None)
passage_dict.pop("embedding_config", None)
all_passages.append(passage_dict)
# Check if we got fewer results than the limit (last page)
if len(passages) < page_limit:
break
# Set cursor for next page (use the ID of the last passage)
after_cursor = passages[-1].id if hasattr(passages[-1], 'id') else passages[-1]['id']
page_num += 1
print("-" * 60)
print(f"Total passages exported: {len(all_passages)}")
return all_passages
def main():
parser = argparse.ArgumentParser(
description="Export archival memories from a Letta agent"
)
parser.add_argument(
"agent_id",
help="Agent ID in format 'agent-<uuid4>'"
)
parser.add_argument(
"--output",
"-o",
help="Output JSON file path (default: <agent_id>_memories.json)"
)
parser.add_argument(
"--limit",
"-l",
type=int,
default=100,
help="Number of results per page (default: 100)"
)
args = parser.parse_args()
# Check for API key
api_key = os.getenv("LETTA_API_KEY")
if not api_key:
print("Error: LETTA_API_KEY environment variable not set", file=sys.stderr)
print("Please export LETTA_API_KEY with your API key", file=sys.stderr)
return 1
# Determine output file
output_file = args.output or f"{args.agent_id}_memories.json"
try:
# Initialize client
client = Letta(token=api_key)
# Export memories
passages = export_agent_memories(
client=client,
agent_id=args.agent_id,
page_limit=args.limit
)
# Write to file
with open(output_file, "w") as f:
json.dump(passages, f, indent=2, default=str)
print(f"\nMemories exported successfully to: {output_file}")
return 0
except Exception as e:
print(f"\nError: {e}", file=sys.stderr)
return 1
if __name__ == "__main__":
sys.exit(main())

Install the Letta Python SDK:

Terminal window
pip install letta-client

Set your API key:

Terminal window
export LETTA_API_KEY="your-api-key-here"

Export all memories from an agent:

Terminal window
python export_agent_memories.py agent-123e4567-e89b-42d3-8456-426614174000

Specify a custom output file:

Terminal window
python export_agent_memories.py agent-123e4567-e89b-42d3-8456-426614174000 --output my_memories.json

Adjust pagination size:

Terminal window
python export_agent_memories.py agent-123e4567-e89b-42d3-8456-426614174000 --limit 50

The script exports passages as a JSON array. Each passage contains all fields except embedding and embedding_config:

[
{
"id": "passage-123e4567-e89b-42d3-8456-426614174000",
"text": "The user prefers Python for data science projects",
"created_at": "2025-01-15T10:30:00Z",
"updated_at": null,
"tags": ["preference", "programming"],
"metadata": {},
"file_id": null,
"file_name": null,
"source_id": null,
"archive_id": "archive-abc123",
"created_by_id": "user-xyz789",
"last_updated_by_id": null,
"is_deleted": false
}
]