mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-22 22:58:13 +00:00
fix: improve LiteLLM fallback error message for non-native providers
Fixes #4262 When using non-native providers like Groq with models that require LiteLLM, the error message was confusing: 'LiteLLM is not available, falling back to LiteLLM'. This commit: - Fixes the contradictory error message - Provides a clear, actionable error message that includes: - The model name that requires LiteLLM - Instructions on how to install LiteLLM (pip install 'crewai[litellm]') - Adds tests to verify the error message is helpful and includes the model name Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
@@ -410,8 +410,14 @@ class LLM(BaseLLM):
|
||||
|
||||
# FALLBACK to LiteLLM
|
||||
if not LITELLM_AVAILABLE:
|
||||
logger.error("LiteLLM is not available, falling back to LiteLLM")
|
||||
raise ImportError("Fallback to LiteLLM is not available") from None
|
||||
logger.error(
|
||||
f"Model '{model}' requires LiteLLM but it is not installed. "
|
||||
"Install it with: pip install 'crewai[litellm]' or pip install litellm"
|
||||
)
|
||||
raise ImportError(
|
||||
f"Model '{model}' requires LiteLLM for inference but LiteLLM is not installed. "
|
||||
"Please install it with: pip install 'crewai[litellm]' or pip install litellm"
|
||||
) from None
|
||||
|
||||
instance = object.__new__(cls)
|
||||
super(LLM, instance).__init__(model=model, is_litellm=True, **kwargs)
|
||||
@@ -1144,7 +1150,7 @@ class LLM(BaseLLM):
|
||||
if response_model:
|
||||
params["response_model"] = response_model
|
||||
response = litellm.completion(**params)
|
||||
|
||||
|
||||
if hasattr(response,"usage") and not isinstance(response.usage, type) and response.usage:
|
||||
usage_info = response.usage
|
||||
self._track_token_usage_internal(usage_info)
|
||||
@@ -1363,7 +1369,7 @@ class LLM(BaseLLM):
|
||||
"""
|
||||
full_response = ""
|
||||
chunk_count = 0
|
||||
|
||||
|
||||
usage_info = None
|
||||
|
||||
accumulated_tool_args: defaultdict[int, AccumulatedToolArgs] = defaultdict(
|
||||
|
||||
@@ -737,6 +737,33 @@ def test_native_provider_falls_back_to_litellm_when_not_in_supported_list():
|
||||
assert llm.model == "groq/llama-3.1-70b-versatile"
|
||||
|
||||
|
||||
def test_litellm_fallback_raises_helpful_error_when_litellm_not_available():
|
||||
"""Test that when LiteLLM is not available, a helpful error message is raised."""
|
||||
with patch("crewai.llm.LITELLM_AVAILABLE", False):
|
||||
with patch("crewai.llm.SUPPORTED_NATIVE_PROVIDERS", ["openai", "anthropic"]):
|
||||
with pytest.raises(ImportError) as excinfo:
|
||||
LLM(model="groq/llama-3.1-70b-versatile", is_litellm=False)
|
||||
|
||||
error_message = str(excinfo.value)
|
||||
assert "groq/llama-3.1-70b-versatile" in error_message
|
||||
assert "LiteLLM" in error_message
|
||||
assert "pip install" in error_message
|
||||
assert "crewai[litellm]" in error_message
|
||||
|
||||
|
||||
def test_litellm_fallback_error_includes_model_name():
|
||||
"""Test that the LiteLLM fallback error includes the model name for clarity."""
|
||||
with patch("crewai.llm.LITELLM_AVAILABLE", False):
|
||||
with patch("crewai.llm.SUPPORTED_NATIVE_PROVIDERS", ["openai", "anthropic"]):
|
||||
test_model = "together/meta-llama/Llama-3-70b"
|
||||
with pytest.raises(ImportError) as excinfo:
|
||||
LLM(model=test_model, is_litellm=False)
|
||||
|
||||
error_message = str(excinfo.value)
|
||||
assert test_model in error_message
|
||||
assert "requires LiteLLM for inference" in error_message
|
||||
|
||||
|
||||
def test_prefixed_models_with_valid_constants_use_native_sdk():
|
||||
"""Test that models with native provider prefixes use native SDK when model is in constants."""
|
||||
# Test openai/ prefix with actual OpenAI model in constants → Native SDK
|
||||
@@ -989,4 +1016,4 @@ async def test_usage_info_streaming_with_acall():
|
||||
assert llm._token_usage["completion_tokens"] > 0
|
||||
assert llm._token_usage["total_tokens"] > 0
|
||||
|
||||
assert len(result) > 0
|
||||
assert len(result) > 0
|
||||
|
||||
Reference in New Issue
Block a user