Compare commits

...

2 Commits

Author SHA1 Message Date
Devin AI
cfb9d55b05 test: Fix o4-mini test to use mocking instead of real API calls
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-22 16:06:25 +00:00
Devin AI
130ed3481f fix: Remove stop parameter for o4-mini model
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-04-22 16:00:41 +00:00
2 changed files with 42 additions and 1 deletions

View File

@@ -358,7 +358,12 @@ class LLM(BaseLLM):
}
# Remove None values from params
return {k: v for k, v in params.items() if v is not None}
params = {k: v for k, v in params.items() if v is not None}
if "o4-mini" in self.model:
params.pop("stop", None)
return params
def _handle_streaming_response(
self,

View File

@@ -533,3 +533,39 @@ def test_handle_streaming_tool_calls_no_tools(mock_emit):
expected_completed_llm_call=1,
expected_final_chunk_result=response,
)
def test_llm_o4_mini_stop_parameter():
"""Test that o4-mini model works correctly without stop parameter."""
llm = LLM(model="o4-mini", stop=["STOP", "END"])
# Check that stop parameter is set
assert llm.stop == ["STOP", "END"]
params = llm._prepare_completion_params(messages=[{"role": "user", "content": "Hello"}])
assert "stop" not in params
with patch("litellm.completion") as mock_completion:
# Create a mock response
mock_message = MagicMock()
mock_message.content = "Test response"
mock_choice = MagicMock()
mock_choice.message = mock_message
mock_response = MagicMock()
mock_response.choices = [mock_choice]
mock_response.usage = {
"prompt_tokens": 5,
"completion_tokens": 5,
"total_tokens": 10,
}
# Set up the mock to return our response
mock_completion.return_value = mock_response
response = llm.call(messages=[{"role": "user", "content": "Hello, world!"}])
assert response == "Test response"
call_args = mock_completion.call_args[1]
assert "stop" not in call_args