SDK Examples

Copy-paste ready code examples for the Memphora Python SDK.

Quick Start

Get started with Memphora in seconds

from memphora_sdk import Memphora

# Initialize with your API key
memory = Memphora(
    user_id="user123",
    api_key="your-api-key-here"
)

# Store memories
memory.store("I love Python programming")
memory.store("I work at Google")

# Search memories
results = memory.search("What do I love?")
for mem in results:
    print(mem['content'])

Decorator Pattern

Automatic memory integration with decorators

from memphora_sdk import Memphora

memory = Memphora(
    user_id="user123",
    api_key="your-api-key-here"
)

@memory.remember
def chat(message: str, memory_context: str = "") -> str:
    """Your chatbot with automatic memory!"""
    prompt = f"Context: {memory_context}\n\nUser: {message}"
    return your_ai_model(prompt)

# Just call it - memory is automatic!
response = chat("What are my interests?")

Search with Reranking

Better relevance with external reranking

from memphora_sdk import Memphora

memory = Memphora(
    user_id="user123",
    api_key="your-api-key-here"
)

# Search with reranking for better results
results = memory.search(
    query="programming languages",
    limit=10,
    rerank=True,
    rerank_provider="cohere"  # or "jina"
)

for mem in results:
    print(mem['content'])

Multi-Agent Support

Track memories by agent_id for AutoGPT/CrewAI

from memphora_sdk import Memphora

memory = Memphora(
    user_id="user123",
    api_key="your-api-key-here"
)

# Store agent memory
memory.store_agent_memory(
    agent_id="research_agent",
    content="Found competitor pricing data",
    run_id="run_001"
)

# Search agent memories
results = memory.search_agent_memories(
    agent_id="research_agent",
    query="competitor info",
    run_id="run_001"
)

Group Chat Memory

Shared memories for multi-user conversations

from memphora_sdk import Memphora

memory = Memphora(
    user_id="user123",
    api_key="your-api-key-here"
)

# Store group memory
memory.store_group_memory(
    group_id="team_project",
    content="Decided to use FastAPI for backend"
)

# Search group memories
results = memory.search_group_memories(
    group_id="team_project",
    query="What tech stack did we choose?"
)

Get Context for LLM

Retrieve relevant context for your prompts

from memphora_sdk import Memphora

memory = Memphora(
    user_id="user123",
    api_key="your-api-key-here"
)

# Get context for LLM prompt
context = memory.get_context(
    query="user preferences",
    limit=5
)

# Use in your LLM prompt
prompt = f"""Context: {context}

User: What do I like?"""

response = your_llm(prompt)