diff --git a/embedchain/embedchain/embedder/ollama.py b/embedchain/embedchain/embedder/ollama.py index 9e4ada473e..448f11e475 100644 --- a/embedchain/embedchain/embedder/ollama.py +++ b/embedchain/embedchain/embedder/ollama.py @@ -21,7 +21,7 @@ def __init__(self, config: Optional[OllamaEmbedderConfig] = None): client = Client(host=config.base_url) local_models = client.list()["models"] - if not any(model.get("name") == self.config.model for model in local_models): + if not any(model.get("model") == self.config.model for model in local_models): logger.info(f"Pulling {self.config.model} from Ollama!") client.pull(self.config.model) embeddings = OllamaEmbeddings(model=self.config.model, base_url=config.base_url) diff --git a/embedchain/embedchain/llm/ollama.py b/embedchain/embedchain/llm/ollama.py index e34ff38e1d..f2d2d19d19 100644 --- a/embedchain/embedchain/llm/ollama.py +++ b/embedchain/embedchain/llm/ollama.py @@ -28,7 +28,7 @@ def __init__(self, config: Optional[BaseLlmConfig] = None): client = Client(host=config.base_url) local_models = client.list()["models"] - if not any(model.get("name") == self.config.model for model in local_models): + if not any(model.get("model") == self.config.model for model in local_models): logger.info(f"Pulling {self.config.model} from Ollama!") client.pull(self.config.model) diff --git a/mem0/embeddings/ollama.py b/mem0/embeddings/ollama.py index 30034365c4..2cab342d04 100644 --- a/mem0/embeddings/ollama.py +++ b/mem0/embeddings/ollama.py @@ -36,7 +36,7 @@ def _ensure_model_exists(self): Ensure the specified model exists locally. If not, pull it from Ollama. """ local_models = self.client.list()["models"] - if not any(model.get("name") == self.config.model for model in local_models): + if not any(model.get("model") == self.config.model for model in local_models): self.client.pull(self.config.model) def embed(self, text): diff --git a/mem0/llms/ollama.py b/mem0/llms/ollama.py index 54d8b719fb..87b66211aa 100644 --- a/mem0/llms/ollama.py +++ b/mem0/llms/ollama.py @@ -23,7 +23,7 @@ def _ensure_model_exists(self): Ensure the specified model exists locally. If not, pull it from Ollama. """ local_models = self.client.list()["models"] - if not any(model.get("name") == self.config.model for model in local_models): + if not any(model.get("model") == self.config.model for model in local_models): self.client.pull(self.config.model) def _parse_response(self, response, tools): diff --git a/tests/embeddings/test_ollama_embeddings.py b/tests/embeddings/test_ollama_embeddings.py index 0aa428b742..b4dac102f1 100644 --- a/tests/embeddings/test_ollama_embeddings.py +++ b/tests/embeddings/test_ollama_embeddings.py @@ -8,7 +8,7 @@ def mock_ollama_client(): with patch("mem0.embeddings.ollama.Client") as mock_ollama: mock_client = Mock() - mock_client.list.return_value = {"models": [{"name": "nomic-embed-text"}]} + mock_client.list.return_value = {"models": [{"model": "nomic-embed-text"}]} mock_ollama.return_value = mock_client yield mock_client