mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 16:48:30 +00:00
Fix #2740: Add validation for empty messages lists to prevent IndexError in LiteLLM's ollama_pt()
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -338,7 +338,13 @@ class LLM(BaseLLM):
|
||||
Returns:
|
||||
Dict[str, Any]: Parameters for the completion call
|
||||
"""
|
||||
# --- 1) Format messages according to provider requirements
|
||||
# --- 1) Ensure messages list is not None or empty (additional safeguard)
|
||||
if messages is None:
|
||||
raise ValueError("Messages list cannot be empty. At least one message is required.")
|
||||
if isinstance(messages, list) and len(messages) == 0:
|
||||
raise ValueError("Messages list cannot be empty. At least one message is required.")
|
||||
|
||||
# --- 2) Format messages according to provider requirements
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
formatted_messages = self._format_messages_for_provider(messages)
|
||||
@@ -845,7 +851,13 @@ class LLM(BaseLLM):
|
||||
ValueError: If response format is not supported
|
||||
LLMContextLengthExceededException: If input exceeds model's context limit
|
||||
"""
|
||||
# --- 1) Emit call started event
|
||||
# --- 1) Validate messages is not None or empty to prevent IndexError in LiteLLM's ollama_pt()
|
||||
if messages is None:
|
||||
raise ValueError("Messages list cannot be empty. At least one message is required.")
|
||||
if isinstance(messages, list) and len(messages) == 0:
|
||||
raise ValueError("Messages list cannot be empty. At least one message is required.")
|
||||
|
||||
# --- 2) Emit call started event
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -857,10 +869,10 @@ class LLM(BaseLLM):
|
||||
),
|
||||
)
|
||||
|
||||
# --- 2) Validate parameters before proceeding with the call
|
||||
# --- 3) Validate parameters before proceeding with the call
|
||||
self._validate_call_params()
|
||||
|
||||
# --- 3) Convert string messages to proper format if needed
|
||||
# --- 4) Convert string messages to proper format if needed
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
|
||||
|
||||
30
tests/test_empty_messages.py
Normal file
30
tests/test_empty_messages.py
Normal file
@@ -0,0 +1,30 @@
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai.llm import LLM
|
||||
|
||||
|
||||
def test_empty_messages_validation():
|
||||
"""
|
||||
Test that LLM.call() raises a ValueError when an empty messages list is passed.
|
||||
This prevents the IndexError in LiteLLM's ollama_pt() function.
|
||||
"""
|
||||
llm = LLM(model="gpt-3.5-turbo") # Any model will do for this test
|
||||
|
||||
with pytest.raises(ValueError, match="Messages list cannot be empty"):
|
||||
llm.call(messages=[])
|
||||
|
||||
with pytest.raises(ValueError, match="Messages list cannot be empty"):
|
||||
llm.call(messages=None)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_ollama_model_empty_messages():
|
||||
"""
|
||||
Test that LLM.call() with an Ollama model raises a ValueError
|
||||
when an empty messages list is passed.
|
||||
"""
|
||||
llm = LLM(model="ollama/llama3")
|
||||
|
||||
with pytest.raises(ValueError, match="Messages list cannot be empty"):
|
||||
llm.call(messages=[])
|
||||
Reference in New Issue
Block a user