diff --git a/src/crewai/llm.py b/src/crewai/llm.py index 47295303b..893588772 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -117,18 +117,42 @@ def suppress_warnings(): class LLM: + """LLM class for handling model interactions. + + Args: + model: The model identifier; should not start with 'models/'. + Examples: 'gemini/gemini-1.5-pro', 'anthropic/claude-3' + timeout: Optional timeout for model calls + temperature: Optional temperature parameter + max_tokens: Optional maximum tokens for completion + max_completion_tokens: Optional maximum completion tokens + logprobs: Optional log probabilities + top_p: Optional nucleus sampling parameter + n: Optional number of completions + stop: Optional stop sequences + presence_penalty: Optional presence penalty + frequency_penalty: Optional frequency penalty + logit_bias: Optional token biasing + user: Optional user identifier + response_format: Optional response format configuration + seed: Optional random seed + tools: Optional list of tools + tool_choice: Optional tool choice configuration + api_base: Optional API base URL + api_key: Optional API key + api_version: Optional API version + base_url: Optional base URL + top_logprobs: Optional top log probabilities + callbacks: Optional list of callbacks + reasoning_effort: Optional reasoning effort level + + Raises: + ValueError: If the model name starts with 'models/' or is empty + TypeError: If model is not a string + """ def __init__( self, - model: Union[str, 'BaseLanguageModel'], - """Initialize LLM instance. - - Args: - model: The model identifier; should not start with 'models/'. - Examples: 'gemini/gemini-1.5-pro', 'anthropic/claude-3' - - Raises: - ValueError: If the model name starts with 'models/'. - """ + model: str, timeout: Optional[Union[float, int]] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, @@ -155,7 +179,11 @@ class LLM: INVALID_MODEL_PREFIX = "models/" # Validate model name - if isinstance(model, str) and model.startswith(INVALID_MODEL_PREFIX): + if not isinstance(model, str): + raise TypeError("Model name must be a string") + if not model: + raise ValueError("Model name cannot be empty") + if model.startswith(INVALID_MODEL_PREFIX): raise ValueError( f'Invalid model name "{model}": Model names should not start with "{INVALID_MODEL_PREFIX}". ' 'Use the provider prefix instead (e.g., "gemini/model-name").' diff --git a/tests/llm_test.py b/tests/llm_test.py index 44d8d6a52..4e9094a55 100644 --- a/tests/llm_test.py +++ b/tests/llm_test.py @@ -269,9 +269,9 @@ class TestModelNameValidation: def test_edge_cases(self): """Test edge cases for model name validation.""" - with pytest.raises(ValueError): + with pytest.raises(ValueError, match="cannot be empty"): LLM(model="") # Empty string - with pytest.raises(TypeError): + with pytest.raises(TypeError, match="must be a string"): LLM(model=None) # None value @@ -347,13 +347,16 @@ def test_anthropic_model_detection(): ("claude-instant", True), ("claude/v1", True), ("gpt-4", False), - ("", False), ("anthropomorphic", False), # Should not match partial words ] for model, expected in models: llm = LLM(model=model) - assert llm.is_anthropic == expected, f"Failed for model: {model}" + assert llm._is_anthropic_model(model) == expected, f"Failed for model: {model}" + + # Test empty model name separately since it raises ValueError + with pytest.raises(ValueError, match="cannot be empty"): + LLM(model="") def test_anthropic_message_formatting(anthropic_llm, system_message, user_message): """Test Anthropic message formatting with fixtures."""