diff --git a/lib/crewai/src/crewai/llm.py b/lib/crewai/src/crewai/llm.py index 8bc1fe648..a69df75f7 100644 --- a/lib/crewai/src/crewai/llm.py +++ b/lib/crewai/src/crewai/llm.py @@ -410,8 +410,14 @@ class LLM(BaseLLM): # FALLBACK to LiteLLM if not LITELLM_AVAILABLE: - logger.error("LiteLLM is not available, falling back to LiteLLM") - raise ImportError("Fallback to LiteLLM is not available") from None + logger.error( + f"Model '{model}' requires LiteLLM but it is not installed. " + "Install it with: pip install 'crewai[litellm]' or pip install litellm" + ) + raise ImportError( + f"Model '{model}' requires LiteLLM for inference but LiteLLM is not installed. " + "Please install it with: pip install 'crewai[litellm]' or pip install litellm" + ) from None instance = object.__new__(cls) super(LLM, instance).__init__(model=model, is_litellm=True, **kwargs) @@ -1144,7 +1150,7 @@ class LLM(BaseLLM): if response_model: params["response_model"] = response_model response = litellm.completion(**params) - + if hasattr(response,"usage") and not isinstance(response.usage, type) and response.usage: usage_info = response.usage self._track_token_usage_internal(usage_info) @@ -1363,7 +1369,7 @@ class LLM(BaseLLM): """ full_response = "" chunk_count = 0 - + usage_info = None accumulated_tool_args: defaultdict[int, AccumulatedToolArgs] = defaultdict( diff --git a/lib/crewai/tests/test_llm.py b/lib/crewai/tests/test_llm.py index a8b6a7a3f..71c1c8fc0 100644 --- a/lib/crewai/tests/test_llm.py +++ b/lib/crewai/tests/test_llm.py @@ -737,6 +737,33 @@ def test_native_provider_falls_back_to_litellm_when_not_in_supported_list(): assert llm.model == "groq/llama-3.1-70b-versatile" +def test_litellm_fallback_raises_helpful_error_when_litellm_not_available(): + """Test that when LiteLLM is not available, a helpful error message is raised.""" + with patch("crewai.llm.LITELLM_AVAILABLE", False): + with patch("crewai.llm.SUPPORTED_NATIVE_PROVIDERS", ["openai", "anthropic"]): + with pytest.raises(ImportError) as excinfo: + LLM(model="groq/llama-3.1-70b-versatile", is_litellm=False) + + error_message = str(excinfo.value) + assert "groq/llama-3.1-70b-versatile" in error_message + assert "LiteLLM" in error_message + assert "pip install" in error_message + assert "crewai[litellm]" in error_message + + +def test_litellm_fallback_error_includes_model_name(): + """Test that the LiteLLM fallback error includes the model name for clarity.""" + with patch("crewai.llm.LITELLM_AVAILABLE", False): + with patch("crewai.llm.SUPPORTED_NATIVE_PROVIDERS", ["openai", "anthropic"]): + test_model = "together/meta-llama/Llama-3-70b" + with pytest.raises(ImportError) as excinfo: + LLM(model=test_model, is_litellm=False) + + error_message = str(excinfo.value) + assert test_model in error_message + assert "requires LiteLLM for inference" in error_message + + def test_prefixed_models_with_valid_constants_use_native_sdk(): """Test that models with native provider prefixes use native SDK when model is in constants.""" # Test openai/ prefix with actual OpenAI model in constants → Native SDK @@ -989,4 +1016,4 @@ async def test_usage_info_streaming_with_acall(): assert llm._token_usage["completion_tokens"] > 0 assert llm._token_usage["total_tokens"] > 0 - assert len(result) > 0 \ No newline at end of file + assert len(result) > 0