Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unable to Use Mem0 with Ollama Locally: Facing Errors with Qdrant and GraphRag Configurations #2030

Open
abhijeetsuryawanshi12 opened this issue Nov 15, 2024 · 7 comments

Comments

@abhijeetsuryawanshi12
Copy link

🐛 Describe the bug

##First sample is for Qdrant server and ollama

import os
import subprocess
from collections import deque
from mem0 import Memory

Configuration for Mem0 and Ollama

config = {
"vector_store": {
"provider": "qdrant",
"config": {
"collection_name": "test",
"host": "localhost",
"port": 6333,
"embedding_model_dims": 768, # Match this to your local model's embedding dimensions
},
},
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.2:3b-instruct-q4_K_M",
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434", # Make sure Ollama is running at this URL
},
},
"embedder": {
"provider": "ollama",
"config": {
"model": "nomic-embed-text:latest",
"ollama_base_url": "http://localhost:11434",
},
},
"version": "v1.1"
}

m = Memory.from_config(config)

Add a memory

m.add("I'm visiting Paris", user_id="john")
m.add("I'm listening to music", user_id="john")
m.add("I'm trying to learn French", user_id="john")
m.add("I'm going to the beach", user_id="john")
m.add("I'm going to the gym", user_id="john")

Retrieve memories

memories = m.get_all(user_id="john")
print(memories)

##Above code is giving error as """ERROR:root:Error in new_memories_with_actions: 'event'"""

##Second sample is for graphrag and ollama

from mem0 import Memory

config = {
"llm": {
"provider": "ollama",
"config": {
"model": "llama3.2:3b-instruct-q4_K_M",
"temperature": 0,
"max_tokens": 8000,
"ollama_base_url": "http://localhost:11434", # Make sure Ollama is running at this URL,
# "api_key": "",
},
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j+s://170d45e6.databases.neo4j.io",
"username": "neo4j",
"password": "F8plXttQXj19K4YoVdJyIHGRUdmEKXvIt47ff9v6dP4"

    },
    "llm": {
    "provider": "ollama",
    "config": {
        "model": "llama3.2:3b-instruct-q4_K_M",
        "temperature": 0,
        "max_tokens": 8000,
        "ollama_base_url": "http://localhost:11434",  # Make sure Ollama is running at this URL
        # "api_key": "",
    },
},
},
"version": "v1.1"

}

m = Memory.from_config(config_dict=config)

m.add("I like pizza", user_id="alice")

m.get_all(user_id="alice")

m.search("tell me my name.", user_id="alice")

##Above sample giving error as """openai.OpenAIError: The api_key client option must be set either by passing api_key to the client or by setting the OPENAI_API_KEY environment variable
"""

@AbhigyaWangoo
Copy link

Solution for Qdrant and Ollama Integration

  1. Corrected Configuration for Qdrant and Ollama:
import os
import subprocess
from mem0 import Memory

# Ensure the path exists
os.makedirs("./qdrant_data", exist_ok=True)

config = {
    "vector_store": {
        "provider": "qdrant",
        "config": {
            "collection_name": "test_collection",
            "path": "./qdrant_data",  # Local persistent storage
            "host": "localhost",
            "port": 6333,
            "embedding_model_dims": 768,  # Verified dimension for nomic-embed-text
        },
    },
    "llm": {
        "provider": "ollama",
        "config": {
            "model": "llama3:latest",  # Updated to a more reliable model
            "temperature": 0,
            "max_tokens": 8000,
            "ollama_base_url": "http://localhost:11434",
        },
    },
    "embedder": {
        "provider": "ollama",
        "config": {
            "model": "nomic-embed-text:latest",
            "ollama_base_url": "http://localhost:11434",
        },
    },
}

# Proper Memory initialization
m = Memory.from_config(config)

# Adding memories with proper metadata
m.add("I'm visiting Paris", user_id="john", metadata={"type": "travel"})
m.add("I'm listening to music", user_id="john", metadata={"type": "activity"})

# Retrieve memories
memories = m.get_all(user_id="john")
print(memories)

Solution for GraphRAG and Neo4j Integration

  1. Corrected Configuration for GraphRAG with Ollama:
from mem0 import Memory

config = {
    "llm": {
        "provider": "ollama",
        "config": {
            "model": "llama3:latest",  # Updated model name
            "temperature": 0,
            "max_tokens": 8000,
            "ollama_base_url": "http://localhost:11434",
        },
    },
    "graph_store": {
        "provider": "neo4j",
        "config": {
            "url": "neo4j+s://your-neo4j-instance.databases.neo4j.io",
            "username": "neo4j",
            # IMPORTANT: Use environment variables for sensitive credentials
            "password": os.getenv("NEO4J_PASSWORD")
        }
    },
    "version": "v1.1"
}

# Initialize Memory
m = Memory.from_config(config)

# Add memory with user context
m.add("I like pizza", user_id="alice")

# Retrieve and search memories
memories = m.get_all(user_id="alice")
search_result = m.search("tell me my name.", user_id="alice")

Key Recommendations:

  1. For Qdrant:

    • Use a local path for persistent storage
    • Ensure correct embedding model dimensions
    • Add metadata to memories for better tracking
  2. For GraphRAG and Neo4j:

    • NEVER hardcode passwords (use environment variables)
    • Use the latest Ollama models
    • Verify Neo4j connection details
  3. General Troubleshooting:

    • Confirm Ollama is running (ollama serve)
    • Verify model availability (ollama list)
    • Check Qdrant and Neo4j services
    • Set logging to DEBUG for more insights

Specific Error Resolutions:

  • "ERROR:root:Error in new_memories_with_actions: 'event'":

    • Add explicit metadata
    • Ensure consistent path and permissions
    • Verify Qdrant configuration
  • "OpenAI API key error":

    • Remove OpenAI-specific configurations
    • Use Ollama as the primary provider
    • Set up local LLM entirely through Ollama

Let me know if any of that works. If there's any other data you con provide I can probably help you troubleshoot further.

@microcoder-py
Copy link

microcoder-py commented Nov 21, 2024

ERROR:root:Error in new_memories_with_actions: 'event'

This error was not fixed with the given resolutions @AbhigyaWangoo

Full code:

from mem0 import Memory

config = {
    "vector_store": {
        "provider": "qdrant",
        "config": {
            "collection_name": "memory_test",
            "host": "localhost",
            "path": "qdrant_storage",
            "port": 6333,
            "embedding_model_dims": 384,  # Change this according to your local model's dimensions
        },
    },
    "llm": {
        "provider": "ollama",
        "config": {
            "model": "llama3.1:8b",
            "temperature": 0.5,
            "max_tokens": 8000,
            "ollama_base_url": "http://localhost:11434",  # Ensure this URL is correct
        },
    },
    "embedder": {
        "provider": "huggingface",
        "config": {
            "model": "all-MiniLM-L6-v2"
        }
    },
}

m = Memory.from_config(config)

statements = [
    "I love hiking on weekends to enjoy nature.",
    "My favorite color is blue.",
    "I recently started learning how to play the guitar.",
    "Cooking is my way of relaxing after a long day.",
    "I have a black Labrador named Max.",
    "Reading historical fiction is my favorite pastime.",
    "I can’t start my day without a cup of coffee.",
    "I enjoy painting landscapes during my free time.",
    "One of my goals is to visit all seven continents.",
    "I’m a huge fan of mystery novels.",
    "I love experimenting with new recipes in the kitchen.",
    "Photography is a hobby I picked up during the pandemic.",
    "I’m currently training for a half-marathon.",
    "My favorite sport to watch is basketball.",
    "I enjoy building and flying model airplanes.",
    "I speak three languages fluently.",
    "Baking desserts for friends and family makes me happy.",
    "I have a small collection of vintage records.",
    "Gardening is a hobby that helps me unwind.",
    "I love binge-watching documentaries about space."
]

for s in statements:
    m.add(s, user_id = "test",  metadata={"category": "hobbies"})

Errors:
ERROR:root:Error in new_memories_with_actions: 'event' ERROR:root:Error in new_memories_with_actions: 'event' /home/remotessh/memtest/test.py:72: DeprecationWarning: The current add API output format is deprecated. To use the latest format, set api_version='v1.1'. The current format will be removed in mem0ai 1.1.0 and later versions. m.add(s, user_id = "test", metadata={"category": "hobbies"}) ERROR:root:Error in new_memories_with_actions: 'event' ERROR:root:Error in new_memories_with_actions: 'event'

@microcoder-py
Copy link

It has added two memories, wont go further

@Cirr0e
Copy link

Cirr0e commented Nov 22, 2024

Hi there! I understand you're having issues with Mem0 when using it with Ollama locally. Let me help you resolve these problems.

The issues you're experiencing can be broken down into two main problems:

  1. Error with new_memories_with_actions: 'event'
  2. OpenAI API key error in the GraphRAG configuration

Here's how to fix these issues:

  1. First, let's correct the Qdrant configuration:
import os
from mem0 import Memory

# Ensure qdrant directory exists
os.makedirs("./qdrant_data", exist_ok=True)

config = {
    "vector_store": {
        "provider": "qdrant",
        "config": {
            "collection_name": "test_collection",
            "path": "./qdrant_data",  # Add local storage path
            "host": "localhost",
            "port": 6333,
            "embedding_model_dims": 384  # Correct dimension for nomic-embed-text
        },
    },
    "llm": {
        "provider": "ollama",
        "config": {
            "model": "llama2",  # Use a stable model
            "temperature": 0,
            "max_tokens": 8000,
            "ollama_base_url": "http://localhost:11434",
        },
    },
    "embedder": {
        "provider": "ollama",
        "config": {
            "model": "nomic-embed-text",
            "ollama_base_url": "http://localhost:11434",
        },
    },
    "version": "v1.1"
}

# Initialize Memory
m = Memory.from_config(config)

# Add memories with proper metadata
m.add("I'm visiting Paris", user_id="john", metadata={"type": "event", "category": "travel"})
  1. For the GraphRAG configuration, remove OpenAI-specific settings:
config = {
    "llm": {
        "provider": "ollama",
        "config": {
            "model": "llama2",
            "temperature": 0,
            "max_tokens": 8000,
            "ollama_base_url": "http://localhost:11434",
        },
    },
    "graph_store": {
        "provider": "neo4j",
        "config": {
            "url": "neo4j://localhost:7687",  # Use local Neo4j or your actual URL
            "username": "neo4j",
            "password": os.getenv("NEO4J_PASSWORD")  # Use environment variable
        }
    },
    "version": "v1.1"
}

Key Points to Check:

  1. Make sure Ollama is running:
ollama serve
  1. Verify model availability:
ollama list
  1. Pull required models:
ollama pull llama2
ollama pull nomic-embed-text
  1. Ensure Qdrant is running and accessible at localhost:6333

Common Issues and Solutions:

  • If memories aren't being added: Make sure the proper metadata is included and the embedding dimensions match your model
  • If you get OpenAI errors: Remove any OpenAI-specific configurations and ensure you're using Ollama consistently
  • If storage issues persist: Check that the Qdrant data directory has proper permissions

Let me know if you need any clarification or run into other issues!

@microcoder-py
Copy link

Didn't work

@microcoder-py
Copy link

Provided a quick fix if the prompt is too long for the model to follow in #2043

@ShuvoDAI
Copy link

I you want to prevent the ERROR:root:Error in new_memories_with_actions: 'event', add chat with role:

from mem0 import Memory

config = {
    "vector_store": {
        "provider": "qdrant",
        "config": {
            "collection_name": "test",
            "host": "localhost",
            "port": 6333,
            "embedding_model_dims": 768,  # Change this according to your local model's dimensions
        },
    },
    "llm": {
        "provider": "ollama",
        "config": {
            "model": "llama3.2:latest",
            "temperature": 0,
            "max_tokens": 8000,
            "ollama_base_url": "http://localhost:11434",  # Ensure this URL is correct
        },
    },
    "embedder": {
        "provider": "ollama",
        "config": {
            "model": "nomic-embed-text:latest",
            # Alternatively, you can use "snowflake-arctic-embed:latest"
            "ollama_base_url": "http://localhost:11434",
        },
    },
    "version": "v1.1"
}

m = Memory.from_config(config)
messages = [
   {"role": "user", "content": "Hi, I'm Alex. I like to play cricket on weekends."},
   {"role": "assistant", "content": "Hello Alex! It's great to know that you enjoy playing cricket on weekends. I'll remember that for future reference."}
]
m.add(messages, user_id="alice")

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

5 participants