Chat Integration
One of the most common use cases for Reeflect is enhancing chat applications with persistent memory capabilities. This guide shows how to integrate Reeflect with various chat interfaces.
Basic Chat Integration
Here's a simple example of integrating Reeflect with a chat application:
from reeflect import Reeflect
from reeflect.adapters.openai import OpenAIAdapter
class MemoryEnabledChat:
"""Simple chat application with memory capabilities."""
def __init__(self, user_id: str = "default_user"):
self.user_id = user_id
# Initialize memory system
self.memory = Reeflect(
adapter=OpenAIAdapter(
api_key="your_openai_api_key",
embedding_model="text-embedding-3-small",
completion_model="gpt-4-turbo-preview"
),
storage_config={
"type": "local",
"path": f"./memory/{user_id}"
}
)
self.conversation_history = []
def chat(self, user_input: str) -> str:
"""Process user message and generate response with memory."""
# Add user message to conversation history
self.conversation_history.append({
"role": "user",
"content": user_input
})
# Enhance prompt with relevant memories
enhanced_prompt = self.memory.enhance_prompt(
prompt=user_input,
conversation_history=self.conversation_history[-5:], # Use recent context
namespace=self.user_id,
max_memories=3
)
# Generate response using OpenAI with enhanced prompt
client = self.memory.adapter.client
response = client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=[
{"role": "system", "content": "You are a helpful assistant with memory."},
*[{"role": msg["role"], "content": msg["content"]}
for msg in self.conversation_history[-5:]],
{"role": "user", "content": enhanced_prompt}
]
)
assistant_response = response.choices[0].message.content
# Add assistant message to conversation history
self.conversation_history.append({
"role": "assistant",
"content": assistant_response
})
# Extract and store memories from this exchange
if len(self.conversation_history) >= 4: # Have at least 2 exchanges
self.memory.extract_memories(
conversation=self.conversation_history[-4:],
namespace=self.user_id,
min_importance=0.6,
auto_store=True
)
return assistant_response
Integration with Popular Frameworks
Reeflect can be integrated with various chat frameworks and platforms:
LangChain Integration
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, AIMessage, SystemMessage
from reeflect import Reeflect
from reeflect.adapters.openai import OpenAIAdapter
from reeflect.integrations.langchain import ReeflectMemoryCallbackHandler
# Initialize Reeflect
memory_system = Reeflect(
adapter=OpenAIAdapter(api_key="your_openai_api_key"),
storage_config={"type": "local", "path": "./memory"}
)
# Create LangChain memory callback
memory_callback = ReeflectMemoryCallbackHandler(
memory_system=memory_system,
namespace="langchain_chat",
extract_memories=True,
enhance_prompts=True
)
# Initialize LangChain chat model with memory
chat = ChatOpenAI(
model_name="gpt-4-turbo-preview",
temperature=0.7,
callbacks=[memory_callback]
)
# Use in conversation
messages = [
SystemMessage(content="You are a helpful assistant with memory of past interactions."),
HumanMessage(content="My name is Alice and I prefer dark mode interfaces.")
]
response = chat(messages)
messages.append(AIMessage(content=response.content))
# Later in the conversation, the system will remember user preferences
messages.append(HumanMessage(content="What theme should you use when showing me data?"))
response = chat(messages)
# Response will reference dark mode preference
Streamlit Integration
import streamlit as st
from reeflect import Reeflect
from reeflect.adapters.openai import OpenAIAdapter
from reeflect.integrations.streamlit import initialize_memory_chat
# Initialize Reeflect (only happens once)
@st.cache_resource
def get_memory_system():
return Reeflect(
adapter=OpenAIAdapter(api_key=st.secrets["OPENAI_API_KEY"]),
storage_config={"type": "local", "path": "./streamlit_memory"}
)
memory_system = get_memory_system()
# Set up Streamlit app
st.title("Memory-Enhanced Chat")
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.user_id = f"user_{hash(st.session_state)}"
# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# Initialize memory chat component
memory_chat = initialize_memory_chat(
memory_system=memory_system,
user_id=st.session_state.user_id,
messages=st.session_state.messages,
extract_memories=True,
visualization=True
)
# Get user input
user_input = st.chat_input("What would you like to talk about?")
if user_input:
# Add user message to chat
st.session_state.messages.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.write(user_input)
# Generate assistant response with memory
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = memory_chat.generate_response(user_input)
st.write(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# Display memory visualization (if enabled)
if memory_chat.visualization_enabled:
with st.expander("Memory Visualization"):
memory_chat.render_memory_visualization()
React Integration
import React, { useState, useEffect } from 'react';
import { MemoryChat } from 'reeflect-react';
const ChatApp = () => {
const [messages, setMessages] = useState([]);
const [userId] = useState(`user_${Math.random().toString(36).substring(7)}`);
const handleNewMessage = (newMessage) => {
setMessages((prevMessages) => [...prevMessages, newMessage]);
};
return (
<div className="chat-container">
<h1>Memory-Enhanced Chat</h1>
<MemoryChat
apiKey="your_api_key"
userId={userId}
messages={messages}
onNewMessage={handleNewMessage}
extractMemories={true}
enhancePrompts={true}
model="gpt-4-turbo-preview"
showMemoryIndicator={true}
memoryVisualization="expandable" // Options: none, inline, expandable, sidebar
/>
{/* Optional memory explorer component */}
<MemoryExplorer
apiKey="your_api_key"
userId={userId}
height={300}
width="100%"
/>
</div>
);
};
Advanced Chat Patterns
Here are some advanced patterns for integrating memory in chat applications:
Memory Prompting
Allow users to explicitly prompt the system to remember specific information:
# Detect memory command in user input
if "remember that" in user_input.lower():
importance = 0.9 # High importance for explicit memory requests
memory_system.create(
content=user_input.replace("Remember that ", ""),
namespace=user_id,
importance=importance
)
# Acknowledge the memory was stored
return "I'll remember that."
Memory Reflection
Periodically reflect on conversation to consolidate memories:
# After every 10 exchanges
if len(conversation_history) % 10 == 0:
reflection = memory_system.reason(
query="What are the key points from this conversation?",
conversation_history=conversation_history,
instruction="Summarize the main points, preferences, and decisions."
)
# Store the reflection as a synthetic memory
memory_system.create(
content=reflection,
namespace=user_id,
memory_type=MemoryType.SYNTHETIC,
importance=0.8
)
Handling Multi-User Scenarios
For applications that serve multiple users, you'll need to implement proper isolation between user memories:
def get_user_memory(user_id: str) -> Reeflect:
"""Get or create a memory system for a specific user."""
if user_id not in user_memory_systems:
# Create a new memory system for this user
user_memory_systems[user_id] = Reeflect(
adapter=OpenAIAdapter(api_key="your_openai_api_key"),
storage_config={
"type": "postgres", # Use a scalable backend for multi-user
"connection_string": "postgresql://user:password@localhost:5432/reeflect",
"table_prefix": f"user_{user_id}_" # Namespace tables by user
}
)
return user_memory_systems[user_id]
# In the request handler
def handle_chat_request(user_id, message):
memory = get_user_memory(user_id)
# Process the chat with this user's memory...
Memory Privacy and Transparency
Always provide transparency about memory usage in chat applications. Let users know what information is being stored and give them control over their memory data. Consider adding memory viewing and deletion capabilities to build trust.
Example privacy controls:
# Allow users to view their stored memories
def get_user_memories(user_id: str, limit: int = 20):
memory = get_user_memory(user_id)
return memory.query(
namespace=user_id,
limit=limit,
sort_by="created_at",
sort_order="desc"
)
# Allow users to delete specific memories
def delete_user_memory(user_id: str, memory_id: str):
memory = get_user_memory(user_id)
memory.delete(memory_id)
return {"success": True}
# Allow users to clear all their memories
def clear_user_memories(user_id: str):
memory = get_user_memory(user_id)
memory.clear(namespace=user_id)
return {"success": True}
Next Steps
Learn about related topics to enhance your chat integrations:
- Memory Visualization - Add visual memory exploration to your chat
- Memory Analytics - Analyze chat patterns and memory usage
- Enterprise Setup - Scale your memory-enabled chat for enterprise use