Compare commits

...

4 Commits

Author SHA1 Message Date
Devin AI
674c3463ca Test: Improve empty messages tests with proper mocking
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-05-02 08:14:47 +00:00
Devin AI
c9912e9979 Refactor: Extract message validation into helper method with type hints
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-05-02 08:12:31 +00:00
Devin AI
5238ac2683 Fix lint: Sort imports in test_empty_messages.py
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-05-02 08:07:20 +00:00
Devin AI
4de80f51e7 Fix #2740: Add validation for empty messages lists to prevent IndexError in LiteLLM's ollama_pt()
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-05-02 08:05:12 +00:00
2 changed files with 90 additions and 5 deletions

View File

@@ -322,6 +322,23 @@ class LLM(BaseLLM):
ANTHROPIC_PREFIXES = ("anthropic/", "claude-", "claude/")
return any(prefix in model.lower() for prefix in ANTHROPIC_PREFIXES)
def _validate_messages(
self,
messages: Union[str, List[Dict[str, str]], None]
) -> None:
"""Validate that messages list is not empty or None.
Args:
messages: Input messages for the LLM
Raises:
ValueError: If messages is None or an empty list
"""
if messages is None:
raise ValueError("Messages list cannot be empty. At least one message is required.")
if isinstance(messages, list) and len(messages) == 0:
raise ValueError("Messages list cannot be empty. At least one message is required.")
def _prepare_completion_params(
self,
messages: Union[str, List[Dict[str, str]]],
@@ -337,8 +354,14 @@ class LLM(BaseLLM):
Returns:
Dict[str, Any]: Parameters for the completion call
Raises:
ValueError: If messages is None or an empty list
"""
# --- 1) Format messages according to provider requirements
# --- 1) Ensure messages list is not empty
self._validate_messages(messages)
# --- 2) Format messages according to provider requirements
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
formatted_messages = self._format_messages_for_provider(messages)
@@ -842,10 +865,13 @@ class LLM(BaseLLM):
Raises:
TypeError: If messages format is invalid
ValueError: If response format is not supported
ValueError: If messages is None or an empty list, or if response format is not supported
LLMContextLengthExceededException: If input exceeds model's context limit
"""
# --- 1) Emit call started event
# --- 1) Validate messages is not None or empty to prevent IndexError in LiteLLM's ollama_pt()
self._validate_messages(messages)
# --- 2) Emit call started event
assert hasattr(crewai_event_bus, "emit")
crewai_event_bus.emit(
self,
@@ -857,10 +883,10 @@ class LLM(BaseLLM):
),
)
# --- 2) Validate parameters before proceeding with the call
# --- 3) Validate parameters before proceeding with the call
self._validate_call_params()
# --- 3) Convert string messages to proper format if needed
# --- 4) Convert string messages to proper format if needed
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]

View File

@@ -0,0 +1,59 @@
from unittest.mock import patch, MagicMock
import pytest
from crewai.llm import LLM
@patch('crewai.llm.LLM._prepare_completion_params')
def test_empty_messages_validation(mock_prepare):
"""
Test that LLM.call() raises a ValueError when an empty messages list is passed.
This prevents the IndexError in LiteLLM's ollama_pt() function.
"""
llm = LLM(model="gpt-3.5-turbo") # Any model will do for this test
with pytest.raises(ValueError, match="Messages list cannot be empty"):
llm.call(messages=[])
with pytest.raises(ValueError, match="Messages list cannot be empty"):
llm.call(messages=None)
mock_prepare.assert_not_called()
@patch('crewai.llm.LLM._prepare_completion_params')
def test_empty_string_message(mock_prepare):
"""
Test that LLM.call() raises a ValueError when an empty string message is passed.
"""
llm = LLM(model="gpt-3.5-turbo")
with pytest.raises(ValueError, match="Messages list cannot be empty"):
llm.call(messages="")
mock_prepare.assert_not_called()
@patch('crewai.llm.LLM._prepare_completion_params')
def test_invalid_message_format(mock_prepare):
"""
Test that LLM.call() raises a TypeError when a message with invalid format is passed.
"""
mock_prepare.side_effect = TypeError("Invalid message format")
llm = LLM(model="gpt-3.5-turbo")
with pytest.raises(TypeError, match="Invalid message format"):
llm.call(messages=[{}])
@pytest.mark.vcr(filter_headers=["authorization"])
def test_ollama_model_empty_messages():
"""
Test that LLM.call() with an Ollama model raises a ValueError
when an empty messages list is passed.
"""
llm = LLM(model="ollama/llama3")
with pytest.raises(ValueError, match="Messages list cannot be empty"):
llm.call(messages=[])