From 068cc0baa7dfda4c1e339ba17a1b4afa61f6b59e Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Wed, 9 Apr 2025 10:31:57 +0000 Subject: [PATCH] Fix #2549: Improve error handling for Ollama connection errors Co-Authored-By: Joe Moura --- src/crewai/llm.py | 4 ++++ .../exceptions/ollama_connection_exception.py | 18 ++++++++++++++++++ tests/test_ollama_connection.py | 17 +++++++++++++++++ 3 files changed, 39 insertions(+) create mode 100644 src/crewai/utilities/exceptions/ollama_connection_exception.py create mode 100644 tests/test_ollama_connection.py diff --git a/src/crewai/llm.py b/src/crewai/llm.py index 5d6a0ccf5..bd93ad28c 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -177,6 +177,10 @@ class LLM: response = litellm.completion(**params) return response["choices"][0]["message"]["content"] except Exception as e: + if "ollama" in str(self.model).lower() and ("connection refused" in str(e).lower() or "ollamaexception" in str(e).lower()): + from crewai.utilities.exceptions.ollama_connection_exception import OllamaConnectionException + raise OllamaConnectionException(str(e)) + if not LLMContextLengthExceededException( str(e) )._is_context_limit_error(str(e)): diff --git a/src/crewai/utilities/exceptions/ollama_connection_exception.py b/src/crewai/utilities/exceptions/ollama_connection_exception.py new file mode 100644 index 000000000..df8c1b808 --- /dev/null +++ b/src/crewai/utilities/exceptions/ollama_connection_exception.py @@ -0,0 +1,18 @@ +class OllamaConnectionException(Exception): + """Exception raised when there's a connection issue with Ollama. + + This typically happens when Ollama is not running or is not accessible + at the expected URL. + """ + + def __init__(self, error_message: str): + self.original_error_message = error_message + super().__init__(self._get_error_message(error_message)) + + def _get_error_message(self, error_message: str): + return ( + f"Failed to connect to Ollama. Original error: {error_message}\n" + "Please make sure Ollama is installed and running. " + "You can install Ollama from https://ollama.com/download and " + "start it by running 'ollama serve' in your terminal." + ) diff --git a/tests/test_ollama_connection.py b/tests/test_ollama_connection.py new file mode 100644 index 000000000..4b4bd7838 --- /dev/null +++ b/tests/test_ollama_connection.py @@ -0,0 +1,17 @@ +import pytest +from unittest.mock import patch, MagicMock +from crewai.llm import LLM +from crewai.utilities.exceptions.ollama_connection_exception import OllamaConnectionException + +class TestOllamaConnection: + def test_ollama_connection_error(self): + with patch('litellm.completion') as mock_completion: + mock_completion.side_effect = Exception("OllamaException - [Errno 111] Connection refused") + + llm = LLM(model="ollama/llama3") + + with pytest.raises(OllamaConnectionException) as exc_info: + llm.call([{"role": "user", "content": "Hello"}]) + + assert "Failed to connect to Ollama" in str(exc_info.value) + assert "Please make sure Ollama is installed and running" in str(exc_info.value)