Fix #3242: Add reasoning parameter to LLM class to disable reasoning mode

- Add reasoning parameter to LLM.__init__() method
- Implement logic in _prepare_completion_params to handle reasoning=False
- Add comprehensive tests for reasoning parameter functionality
- Ensure reasoning=False overrides reasoning_effort parameter
- Add integration test with Agent class

The fix ensures that when reasoning=False is set, the reasoning_effort
parameter is not included in the LLM completion call, effectively
disabling reasoning mode for models like Qwen and Cogito with Ollama.

Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
Devin AI
2025-07-30 14:37:53 +00:00
parent 498e8dc6e8
commit 22761d74ba
3 changed files with 133 additions and 1 deletions

View File

@@ -308,6 +308,7 @@ class LLM(BaseLLM):
api_version: Optional[str] = None,
api_key: Optional[str] = None,
callbacks: List[Any] = [],
reasoning: Optional[bool] = None,
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
stream: bool = False,
**kwargs,
@@ -332,6 +333,7 @@ class LLM(BaseLLM):
self.api_key = api_key
self.callbacks = callbacks
self.context_window_size = 0
self.reasoning = reasoning
self.reasoning_effort = reasoning_effort
self.additional_params = kwargs
self.is_anthropic = self._is_anthropic_model(model)
@@ -406,10 +408,15 @@ class LLM(BaseLLM):
"api_key": self.api_key,
"stream": self.stream,
"tools": tools,
"reasoning_effort": self.reasoning_effort,
**self.additional_params,
}
if self.reasoning is False:
# When reasoning is explicitly disabled, don't include reasoning_effort
pass
elif self.reasoning is True or self.reasoning_effort is not None:
params["reasoning_effort"] = self.reasoning_effort
# Remove None values from params
return {k: v for k, v in params.items() if v is not None}