mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-25 16:18:13 +00:00
Fix #3242: Add reasoning parameter to LLM class to disable reasoning mode
- Add reasoning parameter to LLM.__init__() method - Implement logic in _prepare_completion_params to handle reasoning=False - Add comprehensive tests for reasoning parameter functionality - Ensure reasoning=False overrides reasoning_effort parameter - Add integration test with Agent class The fix ensures that when reasoning=False is set, the reasoning_effort parameter is not included in the LLM completion call, effectively disabling reasoning mode for models like Qwen and Cogito with Ollama. Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
@@ -2,6 +2,7 @@
|
||||
|
||||
import json
|
||||
import pytest
|
||||
from unittest.mock import patch
|
||||
|
||||
from crewai import Agent, Task
|
||||
from crewai.llm import LLM
|
||||
@@ -259,3 +260,31 @@ def test_agent_with_function_calling_fallback():
|
||||
assert result == "4"
|
||||
assert "Reasoning Plan:" in task.description
|
||||
assert "Invalid JSON that will trigger fallback" in task.description
|
||||
|
||||
|
||||
def test_agent_with_llm_reasoning_disabled():
|
||||
"""Test agent with LLM reasoning disabled."""
|
||||
llm = LLM("gpt-3.5-turbo", reasoning=False)
|
||||
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="To test the LLM reasoning parameter",
|
||||
backstory="I am a test agent created to verify the LLM reasoning parameter works correctly.",
|
||||
llm=llm,
|
||||
reasoning=False,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Simple math task: What's 3+3?",
|
||||
expected_output="The answer should be a number.",
|
||||
agent=agent
|
||||
)
|
||||
|
||||
with patch.object(agent.llm, 'call') as mock_call:
|
||||
mock_call.return_value = "6"
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert result == "6"
|
||||
assert "Reasoning Plan:" not in task.description
|
||||
|
||||
Reference in New Issue
Block a user