diff --git a/lib/crewai/src/crewai/llm.py b/lib/crewai/src/crewai/llm.py index 66eebec6f..5ba3d6514 100644 --- a/lib/crewai/src/crewai/llm.py +++ b/lib/crewai/src/crewai/llm.py @@ -498,7 +498,11 @@ class LLM(BaseLLM): } # Remove None values from params - return {k: v for k, v in params.items() if v is not None} + params = {k: v for k, v in params.items() if v is not None} + + params = self._apply_additional_drop_params(params) + + return params def _handle_streaming_response( self, diff --git a/lib/crewai/src/crewai/llms/base_llm.py b/lib/crewai/src/crewai/llms/base_llm.py index ae6865d8b..bae69101c 100644 --- a/lib/crewai/src/crewai/llms/base_llm.py +++ b/lib/crewai/src/crewai/llms/base_llm.py @@ -223,6 +223,49 @@ class BaseLLM(ABC): return content + def _apply_additional_drop_params(self, params: dict[str, Any]) -> dict[str, Any]: + """Apply additional_drop_params filtering to remove unwanted parameters. + + This method provides consistent parameter filtering across all LLM providers. + It should be called after building the final params dict and before making + the provider API call. + + Args: + params: The parameters dictionary to filter + + Returns: + Filtered parameters dictionary with specified params removed + + Example: + >>> llm = LLM(model="o1-mini", additional_drop_params=["stop"]) + >>> params = {"model": "o1-mini", "messages": [...], "stop": ["\\n"]} + >>> filtered = llm._apply_additional_drop_params(params) + >>> "stop" in filtered + False + """ + drop_params = ( + self.additional_params.get("additional_drop_params") + or self.additional_params.get("drop_additionnal_params") + or [] + ) + + if not drop_params: + return params + + filtered_params = params.copy() + + for param_name in drop_params: + if param_name in filtered_params: + logging.debug( + f"Dropping parameter '{param_name}' as specified in additional_drop_params" + ) + filtered_params.pop(param_name) + + filtered_params.pop("additional_drop_params", None) + filtered_params.pop("drop_additionnal_params", None) + + return filtered_params + def get_context_window_size(self) -> int: """Get the context window size for the LLM. diff --git a/lib/crewai/src/crewai/llms/providers/anthropic/completion.py b/lib/crewai/src/crewai/llms/providers/anthropic/completion.py index fad6f1904..247664998 100644 --- a/lib/crewai/src/crewai/llms/providers/anthropic/completion.py +++ b/lib/crewai/src/crewai/llms/providers/anthropic/completion.py @@ -201,6 +201,8 @@ class AnthropicCompletion(BaseLLM): if tools and self.supports_tools: params["tools"] = self._convert_tools_for_interference(tools) + params = self._apply_additional_drop_params(params) + return params def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]: diff --git a/lib/crewai/src/crewai/llms/providers/azure/completion.py b/lib/crewai/src/crewai/llms/providers/azure/completion.py index 0d52143de..32b89410b 100644 --- a/lib/crewai/src/crewai/llms/providers/azure/completion.py +++ b/lib/crewai/src/crewai/llms/providers/azure/completion.py @@ -273,6 +273,8 @@ class AzureCompletion(BaseLLM): params["tools"] = self._convert_tools_for_interference(tools) params["tool_choice"] = "auto" + params = self._apply_additional_drop_params(params) + return params def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]: diff --git a/lib/crewai/src/crewai/llms/providers/openai/completion.py b/lib/crewai/src/crewai/llms/providers/openai/completion.py index 5fbde3eab..bd8b33d50 100644 --- a/lib/crewai/src/crewai/llms/providers/openai/completion.py +++ b/lib/crewai/src/crewai/llms/providers/openai/completion.py @@ -249,7 +249,11 @@ class OpenAICompletion(BaseLLM): "timeout", } - return {k: v for k, v in params.items() if k not in crewai_specific_params} + params = {k: v for k, v in params.items() if k not in crewai_specific_params} + + params = self._apply_additional_drop_params(params) + + return params def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]: """Convert CrewAI tool format to OpenAI function calling format.""" diff --git a/lib/crewai/tests/test_llm.py b/lib/crewai/tests/test_llm.py index 3555ee8c5..556329809 100644 --- a/lib/crewai/tests/test_llm.py +++ b/lib/crewai/tests/test_llm.py @@ -725,3 +725,108 @@ def test_native_provider_falls_back_to_litellm_when_not_in_supported_list(): # Should fall back to LiteLLM assert llm.is_litellm is True assert llm.model == "groq/llama-3.1-70b-versatile" + + +def test_additional_drop_params_filters_parameters_in_litellm(): + """Test that additional_drop_params correctly filters out specified parameters in LiteLLM path.""" + with patch("crewai.llm.LITELLM_AVAILABLE", True), patch("crewai.llm.litellm"): + llm = LLM( + model="o1-mini", + stop=["stop_sequence"], + additional_drop_params=["stop"], + is_litellm=True, + ) + + messages = [{"role": "user", "content": "Hello"}] + params = llm._prepare_completion_params(messages) + + assert "stop" not in params + assert "additional_drop_params" not in params + assert params["model"] == "o1-mini" + + +def test_additional_drop_params_with_agent(): + """Test that additional_drop_params works when LLM is used with an Agent.""" + from unittest.mock import patch, MagicMock + from crewai import Agent, Task, Crew + + with patch("crewai.llm.LITELLM_AVAILABLE", True), patch("crewai.llm.litellm") as mock_litellm: + llm = LLM( + model="o1-mini", + stop=["stop_sequence"], + additional_drop_params=["stop"], + is_litellm=True, + ) + + agent = Agent( + role="Test Agent", + goal="Test the LLM response format functionality.", + backstory="An AI developer testing LLM integrations.", + llm=llm, + ) + + task = Task( + description="Say hello", + expected_output="A greeting", + agent=agent, + ) + + mock_response = MagicMock() + mock_response.choices = [MagicMock()] + mock_response.choices[0].message.content = "Hello!" + mock_response.choices[0].message.tool_calls = None + mock_response.usage = MagicMock() + mock_response.usage.prompt_tokens = 10 + mock_response.usage.completion_tokens = 5 + mock_response.usage.total_tokens = 15 + mock_litellm.completion.return_value = mock_response + + crew = Crew(agents=[agent], tasks=[task], verbose=False) + crew.kickoff() + + # Verify that litellm.completion was called + assert mock_litellm.completion.called + + # Get the kwargs passed to litellm.completion + call_kwargs = mock_litellm.completion.call_args[1] + + assert "stop" not in call_kwargs + assert "additional_drop_params" not in call_kwargs + + +def test_additional_drop_params_supports_misspelled_variant(): + """Test that drop_additionnal_params (misspelled) is also supported for backwards compatibility.""" + with patch("crewai.llm.LITELLM_AVAILABLE", True), patch("crewai.llm.litellm"): + llm = LLM( + model="o1-mini", + stop=["stop_sequence"], + drop_additionnal_params=["stop"], + is_litellm=True, + ) + + messages = [{"role": "user", "content": "Hello"}] + params = llm._prepare_completion_params(messages) + + assert "stop" not in params + assert "drop_additionnal_params" not in params + assert params["model"] == "o1-mini" + + +def test_additional_drop_params_filters_multiple_parameters(): + """Test that additional_drop_params can filter multiple parameters.""" + with patch("crewai.llm.LITELLM_AVAILABLE", True), patch("crewai.llm.litellm"): + llm = LLM( + model="o1-mini", + stop=["stop_sequence"], + temperature=0.7, + additional_drop_params=["stop", "temperature"], + is_litellm=True, + ) + + messages = [{"role": "user", "content": "Hello"}] + params = llm._prepare_completion_params(messages) + + assert "stop" not in params + assert "temperature" not in params + assert "additional_drop_params" not in params + assert params["model"] == "o1-mini"