Compare commits

..

1 Commits

Author SHA1 Message Date
Devin AI
068cc0baa7 Fix #2549: Improve error handling for Ollama connection errors
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-09 10:31:57 +00:00
5 changed files with 42 additions and 65 deletions

View File

@@ -12,7 +12,7 @@ from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandle
def reset_memories_command(
long,
short,
entities, # Changed from entity to entities to match CLI parameter
entity,
knowledge,
kickoff_outputs,
all,
@@ -23,7 +23,7 @@ def reset_memories_command(
Args:
long (bool): Whether to reset the long-term memory.
short (bool): Whether to reset the short-term memory.
entities (bool): Whether to reset the entity memory.
entity (bool): Whether to reset the entity memory.
kickoff_outputs (bool): Whether to reset the latest kickoff task outputs.
all (bool): Whether to reset all memories.
knowledge (bool): Whether to reset the knowledge.
@@ -45,7 +45,7 @@ def reset_memories_command(
if short:
ShortTermMemory().reset()
click.echo("Short term memory has been reset.")
if entities: # Changed from entity to entities
if entity:
EntityMemory().reset()
click.echo("Entity memory has been reset.")
if kickoff_outputs:

View File

@@ -177,6 +177,10 @@ class LLM:
response = litellm.completion(**params)
return response["choices"][0]["message"]["content"]
except Exception as e:
if "ollama" in str(self.model).lower() and ("connection refused" in str(e).lower() or "ollamaexception" in str(e).lower()):
from crewai.utilities.exceptions.ollama_connection_exception import OllamaConnectionException
raise OllamaConnectionException(str(e))
if not LLMContextLengthExceededException(
str(e)
)._is_context_limit_error(str(e)):

View File

@@ -0,0 +1,18 @@
class OllamaConnectionException(Exception):
"""Exception raised when there's a connection issue with Ollama.
This typically happens when Ollama is not running or is not accessible
at the expected URL.
"""
def __init__(self, error_message: str):
self.original_error_message = error_message
super().__init__(self._get_error_message(error_message))
def _get_error_message(self, error_message: str):
return (
f"Failed to connect to Ollama. Original error: {error_message}\n"
"Please make sure Ollama is installed and running. "
"You can install Ollama from https://ollama.com/download and "
"start it by running 'ollama serve' in your terminal."
)

View File

@@ -1,62 +0,0 @@
import os
import tempfile
from unittest.mock import patch, MagicMock
import pytest
from click.testing import CliRunner
from crewai.cli.cli import reset_memories
from crewai.cli.reset_memories_command import reset_memories_command
def test_reset_memories_command_parameters():
"""Test that the CLI parameters match the function parameters."""
# Create a mock for reset_memories_command
with patch('crewai.cli.cli.reset_memories_command') as mock_reset:
runner = CliRunner()
# Test with entities flag
result = runner.invoke(reset_memories, ['--entities'])
assert result.exit_code == 0
# Check that the function was called with the correct parameters
# The third parameter should be True for entities
mock_reset.assert_called_once_with(False, False, True, False, False, False)
def test_reset_memories_all_flag():
"""Test that the --all flag resets all memories."""
with patch('crewai.cli.cli.reset_memories_command') as mock_reset:
runner = CliRunner()
# Test with all flag
result = runner.invoke(reset_memories, ['--all'])
assert result.exit_code == 0
# Check that the function was called with the correct parameters
# The last parameter should be True for all
mock_reset.assert_called_once_with(False, False, False, False, False, True)
def test_reset_memories_knowledge_flag():
"""Test that the --knowledge flag resets knowledge storage."""
with patch('crewai.cli.cli.reset_memories_command') as mock_reset:
runner = CliRunner()
# Test with knowledge flag
result = runner.invoke(reset_memories, ['--knowledge'])
assert result.exit_code == 0
# Check that the function was called with the correct parameters
# The fourth parameter should be True for knowledge
mock_reset.assert_called_once_with(False, False, False, True, False, False)
def test_reset_memories_no_flags():
"""Test that an error message is shown when no flags are provided."""
runner = CliRunner()
# Test with no flags
result = runner.invoke(reset_memories, [])
assert result.exit_code == 0
assert "Please specify at least one memory type" in result.output

View File

@@ -0,0 +1,17 @@
import pytest
from unittest.mock import patch, MagicMock
from crewai.llm import LLM
from crewai.utilities.exceptions.ollama_connection_exception import OllamaConnectionException
class TestOllamaConnection:
def test_ollama_connection_error(self):
with patch('litellm.completion') as mock_completion:
mock_completion.side_effect = Exception("OllamaException - [Errno 111] Connection refused")
llm = LLM(model="ollama/llama3")
with pytest.raises(OllamaConnectionException) as exc_info:
llm.call([{"role": "user", "content": "Hello"}])
assert "Failed to connect to Ollama" in str(exc_info.value)
assert "Please make sure Ollama is installed and running" in str(exc_info.value)