Code Examples
Practical code examples to help you integrate Memphora into your applications.
Quick Start
Get started with Memphora in seconds
from memphora_sdk import Memphora
# Initialize with your API key
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
# Store memories
memory.store("I love Python programming")
memory.store("I work at Google")
# Search memories
results = memory.search("What do I love?")
for mem in results:
print(mem['content'])Decorator Pattern
Automatic memory integration with decorators
from memphora_sdk import Memphora
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
@memory.remember
def chat(message: str, memory_context: str = "") -> str:
"""Your chatbot with automatic memory!"""
prompt = f"Context: {memory_context}\n\nUser: {message}"
return your_ai_model(prompt)
# Just call it - memory is automatic!
response = chat("What are my interests?")Advanced Search
Semantic search with filtering and scoring
from memphora_sdk import Memphora
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
# Advanced search with filters
results = memory.client.search_memories_advanced(
user_id="user123",
query="programming languages",
filters={"type": "preference"},
include_related=True,
min_score=0.7,
limit=10
)
# Results are sorted by relevance
for mem in results:
print(f"Score: {mem.get('score', 0):.2f}")
print(f"Content: {mem['content']}\n")Memory Extraction
Automatically extract memories from conversations
from memphora_sdk import Memphora
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
# Extract memories from a conversation
conversation = [
{"role": "user", "content": "I work at Google as a software engineer"},
{"role": "assistant", "content": "That's great! What do you work on?"},
{"role": "user", "content": "I work on Python backend systems"}
]
# Automatically extract and store memories
extracted = memory.extract_from_conversation(conversation)
print(f"Extracted {len(extracted)} memories")
# Memories are automatically stored!Memory Merging
Automatically merge similar memories
from memphora_sdk import Memphora
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
# Store similar memories
memory.store("I love Python")
memory.store("Python is my favorite language")
# Automatically merge similar memories
merged = memory.client.merge_memories(
user_id="user123",
similarity_threshold=0.8
)
print(f"Merged {len(merged)} memories")Context Compression
Get compressed context for efficient LLM usage
from memphora_sdk import Memphora
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
# Get optimized/compressed context
context = memory.get_compressed_context(
query="user preferences",
max_tokens=500
)
# Use compressed context in your LLM prompt
prompt = f"""User Context:
{context}
User Question: What do I like?"""
response = your_llm(prompt)Batch Operations
Efficiently manage multiple memories
from memphora_sdk import Memphora
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
# Store multiple memories at once
memories_to_store = [
"I love Python programming",
"I work at Google",
"I have a pet dog named Max"
]
for content in memories_to_store:
memory.store(content)
# Or use batch operations
memory.client.batch_add_memories(
user_id="user123",
memories=[
{"content": "Memory 1", "metadata": {"type": "fact"}},
{"content": "Memory 2", "metadata": {"type": "preference"}}
]
)Graph Relationships
Link memories with semantic relationships
from memphora_sdk import Memphora
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
# Store memories
memory1 = memory.store("I work at Google")
memory2 = memory.store("I use Python at work")
# Link related memories
memory.client.link_memories(
source_id=memory1['id'],
target_id=memory2['id'],
relationship_type="related"
)
# Get related memories
related = memory.client.get_related_memories(
memory_id=memory1['id']
)Multi-Agent Support
Track memories by agent_id and run_id for AutoGPT/CrewAI
from memphora_sdk import Memphora
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
# Store memories with agent tracking
memory.store(
content="Researched competitor pricing",
metadata={
"agent_id": "research_agent",
"run_id": "run_2024_001",
"task": "market_research"
}
)
memory.store(
content="Generated social media content",
metadata={
"agent_id": "content_agent",
"run_id": "run_2024_001",
"task": "content_creation"
}
)
# Search memories by specific agent
agent_memories = memory.search(
query="agent tasks",
metadata_filter={"agent_id": "research_agent"}
)
# Or search by run_id to get all agents' work from that run
run_memories = memory.search(
query="all tasks",
metadata_filter={"run_id": "run_2024_001"}
)
print(f"Research agent completed {len(agent_memories)} tasks")Group Chat (Multi-User)
Multi-user conversations with shared and private memories
from memphora_sdk import Memphora
memory = Memphora(
user_id="user123",
api_key="your-api-key-here"
)
# Create a group chat
group_id = "team_standup_2024"
# Store shared memory (visible to all group members)
memory.store(
content="Team decided to use Python for the backend",
metadata={
"group_id": group_id,
"visibility": "shared",
"type": "decision"
}
)
# Store private memory (only for specific user)
memory.store(
content="I need to review the database schema tomorrow",
metadata={
"group_id": group_id,
"visibility": "private",
"user_id": "user123", # Only this user can see it
"type": "todo"
}
)
# Search group memories (gets both shared + user's private)
group_context = memory.search(
query="team decisions and my tasks",
metadata_filter={
"AND": [
{"group_id": group_id},
{"OR": [
{"visibility": "shared"},
{"AND": [
{"visibility": "private"},
{"user_id": "user123"}
]}
]}
]
}
)
print(f"Group context: {len(group_context)} relevant memories")