Fix tests to properly mock LLM calls to avoid authentication errors

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-05-26 19:07:41 +00:00
committed by João Moura
parent 757910439a
commit e947cb26ee

View File

@@ -22,7 +22,11 @@ def mock_llm_responses():
def test_agent_with_reasoning_interval(mock_llm_responses):
"""Test agent with reasoning interval."""
llm = LLM("gpt-3.5-turbo")
with patch('crewai.llm.LLM.call') as mock_llm_call:
mock_llm_call.return_value = mock_llm_responses["initial_reasoning"]
llm = MagicMock()
llm.call.return_value = mock_llm_responses["initial_reasoning"]
agent = Agent(
role="Test Agent",
@@ -55,7 +59,11 @@ def test_agent_with_reasoning_interval(mock_llm_responses):
def test_agent_with_adaptive_reasoning(mock_llm_responses):
"""Test agent with adaptive reasoning."""
llm = LLM("gpt-3.5-turbo")
with patch('crewai.llm.LLM.call') as mock_llm_call:
mock_llm_call.return_value = mock_llm_responses["initial_reasoning"]
llm = MagicMock()
llm.call.return_value = mock_llm_responses["initial_reasoning"]
agent = Agent(
role="Test Agent",
@@ -91,7 +99,8 @@ def test_agent_with_adaptive_reasoning(mock_llm_responses):
def test_mid_execution_reasoning_handler():
"""Test the mid-execution reasoning handler."""
llm = LLM("gpt-3.5-turbo")
llm = MagicMock()
llm.call.return_value = "Based on progress, I'll adjust my approach.\n\nREADY: I am ready to continue executing the task."
agent = Agent(
role="Test Agent",