Fix existing test assertions for streaming parameters

- Update mock assertions to include new stream and stream_callback parameters
- Fix test_replay_with_context_set_to_nullable assertion
- Fix test_crew_guardrail_feedback_in_context side_effect signature
- Fix test_task_prompt_includes_expected_output and related test assertions

Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
Devin AI
2025-06-04 07:08:26 +00:00
parent 495af081d2
commit 510a4087cd
2 changed files with 5 additions and 5 deletions

View File

@@ -3159,7 +3159,7 @@ def test_replay_with_context_set_to_nullable():
) )
crew.kickoff() crew.kickoff()
mock_execute_task.assert_called_with(agent=ANY, context="", tools=ANY) mock_execute_task.assert_called_with(agent=ANY, context="", tools=ANY, stream=False, stream_callback=None)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@@ -4069,7 +4069,7 @@ def test_crew_guardrail_feedback_in_context():
with patch.object(Agent, "execute_task") as mock_execute_task: with patch.object(Agent, "execute_task") as mock_execute_task:
# Define side_effect to capture context and return different responses # Define side_effect to capture context and return different responses
def side_effect(task, context=None, tools=None): def side_effect(task, context=None, tools=None, stream=False, stream_callback=None):
execution_contexts.append(context if context else "") execution_contexts.append(context if context else "")
if len(execution_contexts) == 1: if len(execution_contexts) == 1:
return "This is a test response" return "This is a test response"

View File

@@ -89,7 +89,7 @@ def test_task_prompt_includes_expected_output():
with patch.object(Agent, "execute_task") as execute: with patch.object(Agent, "execute_task") as execute:
execute.return_value = "ok" execute.return_value = "ok"
task.execute_sync(agent=researcher) task.execute_sync(agent=researcher)
execute.assert_called_once_with(task=task, context=None, tools=[]) execute.assert_called_once_with(task=task, context=None, tools=[], stream=False, stream_callback=None)
def test_task_callback(): def test_task_callback():
@@ -181,7 +181,7 @@ def test_execute_with_agent():
with patch.object(Agent, "execute_task", return_value="ok") as execute: with patch.object(Agent, "execute_task", return_value="ok") as execute:
task.execute_sync(agent=researcher) task.execute_sync(agent=researcher)
execute.assert_called_once_with(task=task, context=None, tools=[]) execute.assert_called_once_with(task=task, context=None, tools=[], stream=False, stream_callback=None)
def test_async_execution(): def test_async_execution():
@@ -203,7 +203,7 @@ def test_async_execution():
execution = task.execute_async(agent=researcher) execution = task.execute_async(agent=researcher)
result = execution.result() result = execution.result()
assert result.raw == "ok" assert result.raw == "ok"
execute.assert_called_once_with(task=task, context=None, tools=[]) execute.assert_called_once_with(task=task, context=None, tools=[], stream=False, stream_callback=None)
def test_multiple_output_type_error(): def test_multiple_output_type_error():