mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 04:18:35 +00:00
Compare commits
2 Commits
1.6.1
...
devin/1761
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
312c6d73bc | ||
|
|
26906113fe |
@@ -498,7 +498,9 @@ class LLM(BaseLLM):
|
||||
}
|
||||
|
||||
# Remove None values from params
|
||||
return {k: v for k, v in params.items() if v is not None}
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
return self._apply_additional_drop_params(params)
|
||||
|
||||
def _handle_streaming_response(
|
||||
self,
|
||||
|
||||
@@ -223,6 +223,49 @@ class BaseLLM(ABC):
|
||||
|
||||
return content
|
||||
|
||||
def _apply_additional_drop_params(self, params: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Apply additional_drop_params filtering to remove unwanted parameters.
|
||||
|
||||
This method provides consistent parameter filtering across all LLM providers.
|
||||
It should be called after building the final params dict and before making
|
||||
the provider API call.
|
||||
|
||||
Args:
|
||||
params: The parameters dictionary to filter
|
||||
|
||||
Returns:
|
||||
Filtered parameters dictionary with specified params removed
|
||||
|
||||
Example:
|
||||
>>> llm = LLM(model="o1-mini", additional_drop_params=["stop"])
|
||||
>>> params = {"model": "o1-mini", "messages": [...], "stop": ["\\n"]}
|
||||
>>> filtered = llm._apply_additional_drop_params(params)
|
||||
>>> "stop" in filtered
|
||||
False
|
||||
"""
|
||||
drop_params = (
|
||||
self.additional_params.get("additional_drop_params")
|
||||
or self.additional_params.get("drop_additionnal_params")
|
||||
or []
|
||||
)
|
||||
|
||||
if not drop_params:
|
||||
return params
|
||||
|
||||
filtered_params = params.copy()
|
||||
|
||||
for param_name in drop_params:
|
||||
if param_name in filtered_params:
|
||||
logging.debug(
|
||||
f"Dropping parameter '{param_name}' as specified in additional_drop_params"
|
||||
)
|
||||
filtered_params.pop(param_name)
|
||||
|
||||
filtered_params.pop("additional_drop_params", None)
|
||||
filtered_params.pop("drop_additionnal_params", None)
|
||||
|
||||
return filtered_params
|
||||
|
||||
def get_context_window_size(self) -> int:
|
||||
"""Get the context window size for the LLM.
|
||||
|
||||
|
||||
@@ -201,7 +201,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
if tools and self.supports_tools:
|
||||
params["tools"] = self._convert_tools_for_interference(tools)
|
||||
|
||||
return params
|
||||
return self._apply_additional_drop_params(params)
|
||||
|
||||
def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]:
|
||||
"""Convert CrewAI tool format to Anthropic tool use format."""
|
||||
|
||||
@@ -273,7 +273,7 @@ class AzureCompletion(BaseLLM):
|
||||
params["tools"] = self._convert_tools_for_interference(tools)
|
||||
params["tool_choice"] = "auto"
|
||||
|
||||
return params
|
||||
return self._apply_additional_drop_params(params)
|
||||
|
||||
def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]:
|
||||
"""Convert CrewAI tool format to Azure OpenAI function calling format."""
|
||||
|
||||
@@ -249,7 +249,9 @@ class OpenAICompletion(BaseLLM):
|
||||
"timeout",
|
||||
}
|
||||
|
||||
return {k: v for k, v in params.items() if k not in crewai_specific_params}
|
||||
params = {k: v for k, v in params.items() if k not in crewai_specific_params}
|
||||
|
||||
return self._apply_additional_drop_params(params)
|
||||
|
||||
def _convert_tools_for_interference(self, tools: list[dict]) -> list[dict]:
|
||||
"""Convert CrewAI tool format to OpenAI function calling format."""
|
||||
|
||||
@@ -725,3 +725,108 @@ def test_native_provider_falls_back_to_litellm_when_not_in_supported_list():
|
||||
# Should fall back to LiteLLM
|
||||
assert llm.is_litellm is True
|
||||
assert llm.model == "groq/llama-3.1-70b-versatile"
|
||||
|
||||
|
||||
def test_additional_drop_params_filters_parameters_in_litellm():
|
||||
"""Test that additional_drop_params correctly filters out specified parameters in LiteLLM path."""
|
||||
with patch("crewai.llm.LITELLM_AVAILABLE", True), patch("crewai.llm.litellm"):
|
||||
llm = LLM(
|
||||
model="o1-mini",
|
||||
stop=["stop_sequence"],
|
||||
additional_drop_params=["stop"],
|
||||
is_litellm=True,
|
||||
)
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
params = llm._prepare_completion_params(messages)
|
||||
|
||||
assert "stop" not in params
|
||||
assert "additional_drop_params" not in params
|
||||
assert params["model"] == "o1-mini"
|
||||
|
||||
|
||||
def test_additional_drop_params_with_agent():
|
||||
"""Test that additional_drop_params works when LLM is used with an Agent."""
|
||||
from unittest.mock import patch, MagicMock
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
with patch("crewai.llm.LITELLM_AVAILABLE", True), patch("crewai.llm.litellm") as mock_litellm:
|
||||
llm = LLM(
|
||||
model="o1-mini",
|
||||
stop=["stop_sequence"],
|
||||
additional_drop_params=["stop"],
|
||||
is_litellm=True,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test the LLM response format functionality.",
|
||||
backstory="An AI developer testing LLM integrations.",
|
||||
llm=llm,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="A greeting",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [MagicMock()]
|
||||
mock_response.choices[0].message.content = "Hello!"
|
||||
mock_response.choices[0].message.tool_calls = None
|
||||
mock_response.usage = MagicMock()
|
||||
mock_response.usage.prompt_tokens = 10
|
||||
mock_response.usage.completion_tokens = 5
|
||||
mock_response.usage.total_tokens = 15
|
||||
mock_litellm.completion.return_value = mock_response
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
crew.kickoff()
|
||||
|
||||
# Verify that litellm.completion was called
|
||||
assert mock_litellm.completion.called
|
||||
|
||||
# Get the kwargs passed to litellm.completion
|
||||
call_kwargs = mock_litellm.completion.call_args[1]
|
||||
|
||||
assert "stop" not in call_kwargs
|
||||
assert "additional_drop_params" not in call_kwargs
|
||||
|
||||
|
||||
def test_additional_drop_params_supports_misspelled_variant():
|
||||
"""Test that drop_additionnal_params (misspelled) is also supported for backwards compatibility."""
|
||||
with patch("crewai.llm.LITELLM_AVAILABLE", True), patch("crewai.llm.litellm"):
|
||||
llm = LLM(
|
||||
model="o1-mini",
|
||||
stop=["stop_sequence"],
|
||||
drop_additionnal_params=["stop"],
|
||||
is_litellm=True,
|
||||
)
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
params = llm._prepare_completion_params(messages)
|
||||
|
||||
assert "stop" not in params
|
||||
assert "drop_additionnal_params" not in params
|
||||
assert params["model"] == "o1-mini"
|
||||
|
||||
|
||||
def test_additional_drop_params_filters_multiple_parameters():
|
||||
"""Test that additional_drop_params can filter multiple parameters."""
|
||||
with patch("crewai.llm.LITELLM_AVAILABLE", True), patch("crewai.llm.litellm"):
|
||||
llm = LLM(
|
||||
model="o1-mini",
|
||||
stop=["stop_sequence"],
|
||||
temperature=0.7,
|
||||
additional_drop_params=["stop", "temperature"],
|
||||
is_litellm=True,
|
||||
)
|
||||
|
||||
messages = [{"role": "user", "content": "Hello"}]
|
||||
params = llm._prepare_completion_params(messages)
|
||||
|
||||
assert "stop" not in params
|
||||
assert "temperature" not in params
|
||||
assert "additional_drop_params" not in params
|
||||
assert params["model"] == "o1-mini"
|
||||
|
||||
Reference in New Issue
Block a user