Quick Start
Get started with Reeflect in just a few minutes with this quick start guide.
Basic Setup
from reeflect import Reeflect
from reeflect.adapters.openai import OpenAIAdapter
# Initialize with OpenAI adapter
memory = Reeflect(
adapter=OpenAIAdapter(
api_key="your_openai_api_key",
embedding_model="text-embedding-3-small",
completion_model="gpt-4-turbo-preview"
),
storage_config={
"type": "local",
"path": "./memory_storage"
}
)
Storing Memories
# Store a simple memory
memory_id = memory.create(
content="The user prefers dark mode in all applications.",
namespace="user_preferences",
importance=0.8
)
# Store multiple memories in batch
memory_ids = memory.batch_create([
{
"content": "The user is allergic to peanuts.",
"namespace": "user_health",
"importance": 0.9
},
{
"content": "The user's favorite color is blue.",
"namespace": "user_preferences",
"importance": 0.6
}
])
Retrieving Memories
# Retrieve a specific memory by ID
memory_obj = memory.retrieve(memory_id)
# Search memories by semantic similarity
results = memory.search(
query="What are the user's color preferences?",
namespace="user_preferences",
limit=5
)
for memory_obj, similarity in results:
print(f"Memory: {memory_obj.content} (Score: {similarity:.2f})")
Enhancing LLM Prompts
# Enhance a prompt with relevant memories
enhanced_prompt = memory.enhance_prompt(
prompt="What theme should I use for the dashboard?",
namespace="user_preferences",
max_memories=3
)
# Generate a response using OpenAI with the enhanced prompt
client = memory.adapter.client
response = client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=[
{"role": "system", "content": "You are a helpful assistant with memory."},
{"role": "user", "content": enhanced_prompt}
]
)
print(response.choices[0].message.content)
Complete Examples
Check out our complete example applications to see Reeflect in action:
Next Steps
Now that you understand the basics, learn about Basic Concepts to gain a deeper understanding of Reeflect's architecture and capabilities.