diff --git a/src/crewai/llm.py b/src/crewai/llm.py index c8c456297..82e48c8d6 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -338,7 +338,13 @@ class LLM(BaseLLM): Returns: Dict[str, Any]: Parameters for the completion call """ - # --- 1) Format messages according to provider requirements + # --- 1) Ensure messages list is not None or empty (additional safeguard) + if messages is None: + raise ValueError("Messages list cannot be empty. At least one message is required.") + if isinstance(messages, list) and len(messages) == 0: + raise ValueError("Messages list cannot be empty. At least one message is required.") + + # --- 2) Format messages according to provider requirements if isinstance(messages, str): messages = [{"role": "user", "content": messages}] formatted_messages = self._format_messages_for_provider(messages) @@ -845,7 +851,13 @@ class LLM(BaseLLM): ValueError: If response format is not supported LLMContextLengthExceededException: If input exceeds model's context limit """ - # --- 1) Emit call started event + # --- 1) Validate messages is not None or empty to prevent IndexError in LiteLLM's ollama_pt() + if messages is None: + raise ValueError("Messages list cannot be empty. At least one message is required.") + if isinstance(messages, list) and len(messages) == 0: + raise ValueError("Messages list cannot be empty. At least one message is required.") + + # --- 2) Emit call started event assert hasattr(crewai_event_bus, "emit") crewai_event_bus.emit( self, @@ -857,10 +869,10 @@ class LLM(BaseLLM): ), ) - # --- 2) Validate parameters before proceeding with the call + # --- 3) Validate parameters before proceeding with the call self._validate_call_params() - # --- 3) Convert string messages to proper format if needed + # --- 4) Convert string messages to proper format if needed if isinstance(messages, str): messages = [{"role": "user", "content": messages}] diff --git a/tests/test_empty_messages.py b/tests/test_empty_messages.py new file mode 100644 index 000000000..96cc5e743 --- /dev/null +++ b/tests/test_empty_messages.py @@ -0,0 +1,30 @@ +import pytest +from unittest.mock import patch + +from crewai.llm import LLM + + +def test_empty_messages_validation(): + """ + Test that LLM.call() raises a ValueError when an empty messages list is passed. + This prevents the IndexError in LiteLLM's ollama_pt() function. + """ + llm = LLM(model="gpt-3.5-turbo") # Any model will do for this test + + with pytest.raises(ValueError, match="Messages list cannot be empty"): + llm.call(messages=[]) + + with pytest.raises(ValueError, match="Messages list cannot be empty"): + llm.call(messages=None) + + +@pytest.mark.vcr(filter_headers=["authorization"]) +def test_ollama_model_empty_messages(): + """ + Test that LLM.call() with an Ollama model raises a ValueError + when an empty messages list is passed. + """ + llm = LLM(model="ollama/llama3") + + with pytest.raises(ValueError, match="Messages list cannot be empty"): + llm.call(messages=[])