-
Notifications
You must be signed in to change notification settings - Fork 2.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Unable to Use Mem0 with Ollama Locally: Facing Errors with Qdrant and GraphRag Configurations #2030
Comments
Solution for Qdrant and Ollama Integration
import os
import subprocess
from mem0 import Memory
# Ensure the path exists
os.makedirs("./qdrant_data", exist_ok=True)
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"collection_name": "test_collection",
"path": "./qdrant_data", # Local persistent storage
"host": "localhost",
"port": 6333,
"embedding_model_dims": 768, # Verified dimension for nomic-embed-text
},
},
"llm": {
"provider": "ollama",
"config": {
"model": "llama3:latest", # Updated to a more reliable model
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434",
},
},
"embedder": {
"provider": "ollama",
"config": {
"model": "nomic-embed-text:latest",
"ollama_base_url": "http://localhost:11434",
},
},
}
# Proper Memory initialization
m = Memory.from_config(config)
# Adding memories with proper metadata
m.add("I'm visiting Paris", user_id="john", metadata={"type": "travel"})
m.add("I'm listening to music", user_id="john", metadata={"type": "activity"})
# Retrieve memories
memories = m.get_all(user_id="john")
print(memories) Solution for GraphRAG and Neo4j Integration
from mem0 import Memory
config = {
"llm": {
"provider": "ollama",
"config": {
"model": "llama3:latest", # Updated model name
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434",
},
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j+s://your-neo4j-instance.databases.neo4j.io",
"username": "neo4j",
# IMPORTANT: Use environment variables for sensitive credentials
"password": os.getenv("NEO4J_PASSWORD")
}
},
"version": "v1.1"
}
# Initialize Memory
m = Memory.from_config(config)
# Add memory with user context
m.add("I like pizza", user_id="alice")
# Retrieve and search memories
memories = m.get_all(user_id="alice")
search_result = m.search("tell me my name.", user_id="alice") Key Recommendations:
Specific Error Resolutions:
Let me know if any of that works. If there's any other data you con provide I can probably help you troubleshoot further. |
This error was not fixed with the given resolutions @AbhigyaWangoo Full code: from mem0 import Memory
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"collection_name": "memory_test",
"host": "localhost",
"path": "qdrant_storage",
"port": 6333,
"embedding_model_dims": 384, # Change this according to your local model's dimensions
},
},
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.1:8b",
"temperature": 0.5,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434", # Ensure this URL is correct
},
},
"embedder": {
"provider": "huggingface",
"config": {
"model": "all-MiniLM-L6-v2"
}
},
}
m = Memory.from_config(config)
statements = [
"I love hiking on weekends to enjoy nature.",
"My favorite color is blue.",
"I recently started learning how to play the guitar.",
"Cooking is my way of relaxing after a long day.",
"I have a black Labrador named Max.",
"Reading historical fiction is my favorite pastime.",
"I can’t start my day without a cup of coffee.",
"I enjoy painting landscapes during my free time.",
"One of my goals is to visit all seven continents.",
"I’m a huge fan of mystery novels.",
"I love experimenting with new recipes in the kitchen.",
"Photography is a hobby I picked up during the pandemic.",
"I’m currently training for a half-marathon.",
"My favorite sport to watch is basketball.",
"I enjoy building and flying model airplanes.",
"I speak three languages fluently.",
"Baking desserts for friends and family makes me happy.",
"I have a small collection of vintage records.",
"Gardening is a hobby that helps me unwind.",
"I love binge-watching documentaries about space."
]
for s in statements:
m.add(s, user_id = "test", metadata={"category": "hobbies"}) Errors: |
It has added two memories, wont go further |
Hi there! I understand you're having issues with Mem0 when using it with Ollama locally. Let me help you resolve these problems. The issues you're experiencing can be broken down into two main problems:
Here's how to fix these issues:
import os
from mem0 import Memory
# Ensure qdrant directory exists
os.makedirs("./qdrant_data", exist_ok=True)
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"collection_name": "test_collection",
"path": "./qdrant_data", # Add local storage path
"host": "localhost",
"port": 6333,
"embedding_model_dims": 384 # Correct dimension for nomic-embed-text
},
},
"llm": {
"provider": "ollama",
"config": {
"model": "llama2", # Use a stable model
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434",
},
},
"embedder": {
"provider": "ollama",
"config": {
"model": "nomic-embed-text",
"ollama_base_url": "http://localhost:11434",
},
},
"version": "v1.1"
}
# Initialize Memory
m = Memory.from_config(config)
# Add memories with proper metadata
m.add("I'm visiting Paris", user_id="john", metadata={"type": "event", "category": "travel"})
config = {
"llm": {
"provider": "ollama",
"config": {
"model": "llama2",
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434",
},
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j://localhost:7687", # Use local Neo4j or your actual URL
"username": "neo4j",
"password": os.getenv("NEO4J_PASSWORD") # Use environment variable
}
},
"version": "v1.1"
} Key Points to Check:
ollama serve
ollama list
ollama pull llama2
ollama pull nomic-embed-text
Common Issues and Solutions:
Let me know if you need any clarification or run into other issues! |
Didn't work |
Provided a quick fix if the prompt is too long for the model to follow in #2043 |
I you want to prevent the ERROR:root:Error in new_memories_with_actions: 'event', add chat with role: from mem0 import Memory
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"collection_name": "test",
"host": "localhost",
"port": 6333,
"embedding_model_dims": 768, # Change this according to your local model's dimensions
},
},
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.2:latest",
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434", # Ensure this URL is correct
},
},
"embedder": {
"provider": "ollama",
"config": {
"model": "nomic-embed-text:latest",
# Alternatively, you can use "snowflake-arctic-embed:latest"
"ollama_base_url": "http://localhost:11434",
},
},
"version": "v1.1"
}
m = Memory.from_config(config) messages = [
{"role": "user", "content": "Hi, I'm Alex. I like to play cricket on weekends."},
{"role": "assistant", "content": "Hello Alex! It's great to know that you enjoy playing cricket on weekends. I'll remember that for future reference."}
]
m.add(messages, user_id="alice") |
🐛 Describe the bug
##First sample is for Qdrant server and ollama
import os
import subprocess
from collections import deque
from mem0 import Memory
Configuration for Mem0 and Ollama
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"collection_name": "test",
"host": "localhost",
"port": 6333,
"embedding_model_dims": 768, # Match this to your local model's embedding dimensions
},
},
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.2:3b-instruct-q4_K_M",
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434", # Make sure Ollama is running at this URL
},
},
"embedder": {
"provider": "ollama",
"config": {
"model": "nomic-embed-text:latest",
"ollama_base_url": "http://localhost:11434",
},
},
"version": "v1.1"
}
m = Memory.from_config(config)
Add a memory
m.add("I'm visiting Paris", user_id="john")
m.add("I'm listening to music", user_id="john")
m.add("I'm trying to learn French", user_id="john")
m.add("I'm going to the beach", user_id="john")
m.add("I'm going to the gym", user_id="john")
Retrieve memories
memories = m.get_all(user_id="john")
print(memories)
##Above code is giving error as """ERROR:root:Error in new_memories_with_actions: 'event'"""
##Second sample is for graphrag and ollama
from mem0 import Memory
config = {
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.2:3b-instruct-q4_K_M",
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434", # Make sure Ollama is running at this URL,
# "api_key": "",
},
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j+s://170d45e6.databases.neo4j.io",
"username": "neo4j",
"password": "F8plXttQXj19K4YoVdJyIHGRUdmEKXvIt47ff9v6dP4"
}
m = Memory.from_config(config_dict=config)
m.add("I like pizza", user_id="alice")
m.get_all(user_id="alice")
m.search("tell me my name.", user_id="alice")
##Above sample giving error as """openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable
"""
The text was updated successfully, but these errors were encountered: