mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 16:18:30 +00:00
fix linting issues in new tests (#2089)
Co-authored-by: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com>
This commit is contained in:
committed by
GitHub
parent
90b3145e92
commit
c408368267
@@ -51,6 +51,7 @@ writer = Agent(
|
|||||||
|
|
||||||
def test_crew_with_only_conditional_tasks_raises_error():
|
def test_crew_with_only_conditional_tasks_raises_error():
|
||||||
"""Test that creating a crew with only conditional tasks raises an error."""
|
"""Test that creating a crew with only conditional tasks raises an error."""
|
||||||
|
|
||||||
def condition_func(task_output: TaskOutput) -> bool:
|
def condition_func(task_output: TaskOutput) -> bool:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -82,6 +83,7 @@ def test_crew_with_only_conditional_tasks_raises_error():
|
|||||||
tasks=[conditional1, conditional2, conditional3],
|
tasks=[conditional1, conditional2, conditional3],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_crew_config_conditional_requirement():
|
def test_crew_config_conditional_requirement():
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
Crew(process=Process.sequential)
|
Crew(process=Process.sequential)
|
||||||
@@ -589,12 +591,12 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
|
|||||||
_, kwargs = mock_execute_sync.call_args
|
_, kwargs = mock_execute_sync.call_args
|
||||||
tools = kwargs["tools"]
|
tools = kwargs["tools"]
|
||||||
|
|
||||||
assert any(isinstance(tool, TestTool) for tool in tools), (
|
assert any(
|
||||||
"TestTool should be present"
|
isinstance(tool, TestTool) for tool in tools
|
||||||
)
|
), "TestTool should be present"
|
||||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
assert any(
|
||||||
"Delegation tool should be present"
|
"delegate" in tool.name.lower() for tool in tools
|
||||||
)
|
), "Delegation tool should be present"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
@@ -653,12 +655,12 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
|
|||||||
_, kwargs = mock_execute_sync.call_args
|
_, kwargs = mock_execute_sync.call_args
|
||||||
tools = kwargs["tools"]
|
tools = kwargs["tools"]
|
||||||
|
|
||||||
assert any(isinstance(tool, TestTool) for tool in new_ceo.tools), (
|
assert any(
|
||||||
"TestTool should be present"
|
isinstance(tool, TestTool) for tool in new_ceo.tools
|
||||||
)
|
), "TestTool should be present"
|
||||||
assert any("delegate" in tool.name.lower() for tool in tools), (
|
assert any(
|
||||||
"Delegation tool should be present"
|
"delegate" in tool.name.lower() for tool in tools
|
||||||
)
|
), "Delegation tool should be present"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
@@ -782,17 +784,17 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
|
|||||||
used_tools = kwargs["tools"]
|
used_tools = kwargs["tools"]
|
||||||
|
|
||||||
# Confirm AnotherTestTool is present but TestTool is not
|
# Confirm AnotherTestTool is present but TestTool is not
|
||||||
assert any(isinstance(tool, AnotherTestTool) for tool in used_tools), (
|
assert any(
|
||||||
"AnotherTestTool should be present"
|
isinstance(tool, AnotherTestTool) for tool in used_tools
|
||||||
)
|
), "AnotherTestTool should be present"
|
||||||
assert not any(isinstance(tool, TestTool) for tool in used_tools), (
|
assert not any(
|
||||||
"TestTool should not be present among used tools"
|
isinstance(tool, TestTool) for tool in used_tools
|
||||||
)
|
), "TestTool should not be present among used tools"
|
||||||
|
|
||||||
# Confirm delegation tool(s) are present
|
# Confirm delegation tool(s) are present
|
||||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
assert any(
|
||||||
"Delegation tool should be present"
|
"delegate" in tool.name.lower() for tool in used_tools
|
||||||
)
|
), "Delegation tool should be present"
|
||||||
|
|
||||||
# Finally, make sure the agent's original tools remain unchanged
|
# Finally, make sure the agent's original tools remain unchanged
|
||||||
assert len(researcher_with_delegation.tools) == 1
|
assert len(researcher_with_delegation.tools) == 1
|
||||||
@@ -1593,9 +1595,9 @@ def test_code_execution_flag_adds_code_tool_upon_kickoff():
|
|||||||
|
|
||||||
# Verify that exactly one tool was used and it was a CodeInterpreterTool
|
# Verify that exactly one tool was used and it was a CodeInterpreterTool
|
||||||
assert len(used_tools) == 1, "Should have exactly one tool"
|
assert len(used_tools) == 1, "Should have exactly one tool"
|
||||||
assert isinstance(used_tools[0], CodeInterpreterTool), (
|
assert isinstance(
|
||||||
"Tool should be CodeInterpreterTool"
|
used_tools[0], CodeInterpreterTool
|
||||||
)
|
), "Tool should be CodeInterpreterTool"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
@@ -1952,6 +1954,7 @@ def test_task_callback_on_crew():
|
|||||||
|
|
||||||
def test_task_callback_both_on_task_and_crew():
|
def test_task_callback_both_on_task_and_crew():
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import MagicMock, patch
|
||||||
|
|
||||||
mock_callback_on_task = MagicMock()
|
mock_callback_on_task = MagicMock()
|
||||||
mock_callback_on_crew = MagicMock()
|
mock_callback_on_crew = MagicMock()
|
||||||
|
|
||||||
@@ -2101,21 +2104,22 @@ def test_conditional_task_uses_last_output():
|
|||||||
expected_output="First output",
|
expected_output="First output",
|
||||||
agent=researcher,
|
agent=researcher,
|
||||||
)
|
)
|
||||||
|
|
||||||
def condition_fails(task_output: TaskOutput) -> bool:
|
def condition_fails(task_output: TaskOutput) -> bool:
|
||||||
# This condition will never be met
|
# This condition will never be met
|
||||||
return "never matches" in task_output.raw.lower()
|
return "never matches" in task_output.raw.lower()
|
||||||
|
|
||||||
def condition_succeeds(task_output: TaskOutput) -> bool:
|
def condition_succeeds(task_output: TaskOutput) -> bool:
|
||||||
# This condition will match first task's output
|
# This condition will match first task's output
|
||||||
return "first success" in task_output.raw.lower()
|
return "first success" in task_output.raw.lower()
|
||||||
|
|
||||||
conditional_task1 = ConditionalTask(
|
conditional_task1 = ConditionalTask(
|
||||||
description="Second task - conditional that fails condition",
|
description="Second task - conditional that fails condition",
|
||||||
expected_output="Second output",
|
expected_output="Second output",
|
||||||
agent=researcher,
|
agent=researcher,
|
||||||
condition=condition_fails,
|
condition=condition_fails,
|
||||||
)
|
)
|
||||||
|
|
||||||
conditional_task2 = ConditionalTask(
|
conditional_task2 = ConditionalTask(
|
||||||
description="Third task - conditional that succeeds using first task output",
|
description="Third task - conditional that succeeds using first task output",
|
||||||
expected_output="Third output",
|
expected_output="Third output",
|
||||||
@@ -2134,35 +2138,37 @@ def test_conditional_task_uses_last_output():
|
|||||||
raw="First success output", # Will be used by third task's condition
|
raw="First success output", # Will be used by third task's condition
|
||||||
agent=researcher.role,
|
agent=researcher.role,
|
||||||
)
|
)
|
||||||
mock_skipped = TaskOutput(
|
|
||||||
description="Second task output",
|
|
||||||
raw="", # Empty output since condition fails
|
|
||||||
agent=researcher.role,
|
|
||||||
)
|
|
||||||
mock_third = TaskOutput(
|
mock_third = TaskOutput(
|
||||||
description="Third task output",
|
description="Third task output",
|
||||||
raw="Third task executed", # Output when condition succeeds using first task output
|
raw="Third task executed", # Output when condition succeeds using first task output
|
||||||
agent=writer.role,
|
agent=writer.role,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Set up mocks for task execution and conditional logic
|
# Set up mocks for task execution and conditional logic
|
||||||
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
||||||
# First conditional fails, second succeeds
|
# First conditional fails, second succeeds
|
||||||
mock_should_execute.side_effect = [False, True]
|
mock_should_execute.side_effect = [False, True]
|
||||||
|
|
||||||
with patch.object(Task, "execute_sync") as mock_execute:
|
with patch.object(Task, "execute_sync") as mock_execute:
|
||||||
mock_execute.side_effect = [mock_first, mock_third]
|
mock_execute.side_effect = [mock_first, mock_third]
|
||||||
result = crew.kickoff()
|
result = crew.kickoff()
|
||||||
|
|
||||||
# Verify execution behavior
|
# Verify execution behavior
|
||||||
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
||||||
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
||||||
|
|
||||||
# Verify outputs collection
|
# Verify outputs collection:
|
||||||
|
# First executed task output, followed by an automatically generated (skipped) output, then the conditional execution
|
||||||
assert len(result.tasks_output) == 3
|
assert len(result.tasks_output) == 3
|
||||||
assert result.tasks_output[0].raw == "First success output" # First task succeeded
|
assert (
|
||||||
assert result.tasks_output[1].raw == "" # Second task skipped (condition failed)
|
result.tasks_output[0].raw == "First success output"
|
||||||
assert result.tasks_output[2].raw == "Third task executed" # Third task used first task's output
|
) # First task succeeded
|
||||||
|
assert (
|
||||||
|
result.tasks_output[1].raw == ""
|
||||||
|
) # Second task skipped (condition failed)
|
||||||
|
assert (
|
||||||
|
result.tasks_output[2].raw == "Third task executed"
|
||||||
|
) # Third task used first task's output
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_conditional_tasks_result_collection():
|
def test_conditional_tasks_result_collection():
|
||||||
@@ -2172,20 +2178,20 @@ def test_conditional_tasks_result_collection():
|
|||||||
expected_output="First output",
|
expected_output="First output",
|
||||||
agent=researcher,
|
agent=researcher,
|
||||||
)
|
)
|
||||||
|
|
||||||
def condition_never_met(task_output: TaskOutput) -> bool:
|
def condition_never_met(task_output: TaskOutput) -> bool:
|
||||||
return "never matches" in task_output.raw.lower()
|
return "never matches" in task_output.raw.lower()
|
||||||
|
|
||||||
def condition_always_met(task_output: TaskOutput) -> bool:
|
def condition_always_met(task_output: TaskOutput) -> bool:
|
||||||
return "success" in task_output.raw.lower()
|
return "success" in task_output.raw.lower()
|
||||||
|
|
||||||
task2 = ConditionalTask(
|
task2 = ConditionalTask(
|
||||||
description="Conditional task that never executes",
|
description="Conditional task that never executes",
|
||||||
expected_output="Second output",
|
expected_output="Second output",
|
||||||
agent=researcher,
|
agent=researcher,
|
||||||
condition=condition_never_met,
|
condition=condition_never_met,
|
||||||
)
|
)
|
||||||
|
|
||||||
task3 = ConditionalTask(
|
task3 = ConditionalTask(
|
||||||
description="Conditional task that always executes",
|
description="Conditional task that always executes",
|
||||||
expected_output="Third output",
|
expected_output="Third output",
|
||||||
@@ -2204,35 +2210,46 @@ def test_conditional_tasks_result_collection():
|
|||||||
raw="Success output", # Triggers third task's condition
|
raw="Success output", # Triggers third task's condition
|
||||||
agent=researcher.role,
|
agent=researcher.role,
|
||||||
)
|
)
|
||||||
mock_skipped = TaskOutput(
|
|
||||||
description="Skipped output",
|
|
||||||
raw="", # Empty output for skipped task
|
|
||||||
agent=researcher.role,
|
|
||||||
)
|
|
||||||
mock_conditional = TaskOutput(
|
mock_conditional = TaskOutput(
|
||||||
description="Conditional output",
|
description="Conditional output",
|
||||||
raw="Conditional task executed",
|
raw="Conditional task executed",
|
||||||
agent=writer.role,
|
agent=writer.role,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Set up mocks for task execution and conditional logic
|
# Set up mocks for task execution and conditional logic
|
||||||
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
||||||
# First conditional fails, second succeeds
|
# First conditional fails, second succeeds
|
||||||
mock_should_execute.side_effect = [False, True]
|
mock_should_execute.side_effect = [False, True]
|
||||||
|
|
||||||
with patch.object(Task, "execute_sync") as mock_execute:
|
with patch.object(Task, "execute_sync") as mock_execute:
|
||||||
mock_execute.side_effect = [mock_success, mock_conditional]
|
mock_execute.side_effect = [mock_success, mock_conditional]
|
||||||
result = crew.kickoff()
|
result = crew.kickoff()
|
||||||
|
|
||||||
# Verify execution behavior
|
# Verify execution behavior
|
||||||
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
||||||
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
||||||
|
|
||||||
|
# Verify task output collection:
|
||||||
|
# There should be three outputs: normal task, skipped conditional task (empty output),
|
||||||
|
# and the conditional task that executed.
|
||||||
|
assert len(result.tasks_output) == 3
|
||||||
|
assert (
|
||||||
|
result.tasks_output[0].raw == "Success output"
|
||||||
|
) # Normal task executed
|
||||||
|
assert result.tasks_output[1].raw == "" # Second task skipped
|
||||||
|
assert (
|
||||||
|
result.tasks_output[2].raw == "Conditional task executed"
|
||||||
|
) # Third task executed
|
||||||
|
|
||||||
# Verify task output collection
|
# Verify task output collection
|
||||||
assert len(result.tasks_output) == 3
|
assert len(result.tasks_output) == 3
|
||||||
assert result.tasks_output[0].raw == "Success output" # Normal task executed
|
assert (
|
||||||
assert result.tasks_output[1].raw == "" # Second task skipped
|
result.tasks_output[0].raw == "Success output"
|
||||||
assert result.tasks_output[2].raw == "Conditional task executed" # Third task executed
|
) # Normal task executed
|
||||||
|
assert result.tasks_output[1].raw == "" # Second task skipped
|
||||||
|
assert (
|
||||||
|
result.tasks_output[2].raw == "Conditional task executed"
|
||||||
|
) # Third task executed
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_multiple_conditional_tasks():
|
def test_multiple_conditional_tasks():
|
||||||
@@ -2242,20 +2259,20 @@ def test_multiple_conditional_tasks():
|
|||||||
expected_output="Research output",
|
expected_output="Research output",
|
||||||
agent=researcher,
|
agent=researcher,
|
||||||
)
|
)
|
||||||
|
|
||||||
def condition1(task_output: TaskOutput) -> bool:
|
def condition1(task_output: TaskOutput) -> bool:
|
||||||
return "success" in task_output.raw.lower()
|
return "success" in task_output.raw.lower()
|
||||||
|
|
||||||
def condition2(task_output: TaskOutput) -> bool:
|
def condition2(task_output: TaskOutput) -> bool:
|
||||||
return "proceed" in task_output.raw.lower()
|
return "proceed" in task_output.raw.lower()
|
||||||
|
|
||||||
task2 = ConditionalTask(
|
task2 = ConditionalTask(
|
||||||
description="First conditional task",
|
description="First conditional task",
|
||||||
expected_output="Conditional output 1",
|
expected_output="Conditional output 1",
|
||||||
agent=writer,
|
agent=writer,
|
||||||
condition=condition1,
|
condition=condition1,
|
||||||
)
|
)
|
||||||
|
|
||||||
task3 = ConditionalTask(
|
task3 = ConditionalTask(
|
||||||
description="Second conditional task",
|
description="Second conditional task",
|
||||||
expected_output="Conditional output 2",
|
expected_output="Conditional output 2",
|
||||||
@@ -2274,7 +2291,7 @@ def test_multiple_conditional_tasks():
|
|||||||
raw="Success and proceed output",
|
raw="Success and proceed output",
|
||||||
agent=researcher.role,
|
agent=researcher.role,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Set up mocks for task execution
|
# Set up mocks for task execution
|
||||||
with patch.object(Task, "execute_sync", return_value=mock_success) as mock_execute:
|
with patch.object(Task, "execute_sync", return_value=mock_success) as mock_execute:
|
||||||
result = crew.kickoff()
|
result = crew.kickoff()
|
||||||
@@ -2282,6 +2299,7 @@ def test_multiple_conditional_tasks():
|
|||||||
assert mock_execute.call_count == 3
|
assert mock_execute.call_count == 3
|
||||||
assert len(result.tasks_output) == 3
|
assert len(result.tasks_output) == 3
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_using_contextual_memory():
|
def test_using_contextual_memory():
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
@@ -3400,9 +3418,9 @@ def test_fetch_inputs():
|
|||||||
expected_placeholders = {"role_detail", "topic", "field"}
|
expected_placeholders = {"role_detail", "topic", "field"}
|
||||||
actual_placeholders = crew.fetch_inputs()
|
actual_placeholders = crew.fetch_inputs()
|
||||||
|
|
||||||
assert actual_placeholders == expected_placeholders, (
|
assert (
|
||||||
f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
actual_placeholders == expected_placeholders
|
||||||
)
|
), f"Expected {expected_placeholders}, but got {actual_placeholders}"
|
||||||
|
|
||||||
|
|
||||||
def test_task_tools_preserve_code_execution_tools():
|
def test_task_tools_preserve_code_execution_tools():
|
||||||
@@ -3475,20 +3493,20 @@ def test_task_tools_preserve_code_execution_tools():
|
|||||||
used_tools = kwargs["tools"]
|
used_tools = kwargs["tools"]
|
||||||
|
|
||||||
# Verify all expected tools are present
|
# Verify all expected tools are present
|
||||||
assert any(isinstance(tool, TestTool) for tool in used_tools), (
|
assert any(
|
||||||
"Task's TestTool should be present"
|
isinstance(tool, TestTool) for tool in used_tools
|
||||||
)
|
), "Task's TestTool should be present"
|
||||||
assert any(isinstance(tool, CodeInterpreterTool) for tool in used_tools), (
|
assert any(
|
||||||
"CodeInterpreterTool should be present"
|
isinstance(tool, CodeInterpreterTool) for tool in used_tools
|
||||||
)
|
), "CodeInterpreterTool should be present"
|
||||||
assert any("delegate" in tool.name.lower() for tool in used_tools), (
|
assert any(
|
||||||
"Delegation tool should be present"
|
"delegate" in tool.name.lower() for tool in used_tools
|
||||||
)
|
), "Delegation tool should be present"
|
||||||
|
|
||||||
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
|
# Verify the total number of tools (TestTool + CodeInterpreter + 2 delegation tools)
|
||||||
assert len(used_tools) == 4, (
|
assert (
|
||||||
"Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
len(used_tools) == 4
|
||||||
)
|
), "Should have TestTool, CodeInterpreter, and 2 delegation tools"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
@@ -3532,9 +3550,9 @@ def test_multimodal_flag_adds_multimodal_tools():
|
|||||||
used_tools = kwargs["tools"]
|
used_tools = kwargs["tools"]
|
||||||
|
|
||||||
# Check that the multimodal tool was added
|
# Check that the multimodal tool was added
|
||||||
assert any(isinstance(tool, AddImageTool) for tool in used_tools), (
|
assert any(
|
||||||
"AddImageTool should be present when agent is multimodal"
|
isinstance(tool, AddImageTool) for tool in used_tools
|
||||||
)
|
), "AddImageTool should be present when agent is multimodal"
|
||||||
|
|
||||||
# Verify we have exactly one tool (just the AddImageTool)
|
# Verify we have exactly one tool (just the AddImageTool)
|
||||||
assert len(used_tools) == 1, "Should only have the AddImageTool"
|
assert len(used_tools) == 1, "Should only have the AddImageTool"
|
||||||
@@ -3760,9 +3778,9 @@ def test_crew_guardrail_feedback_in_context():
|
|||||||
assert len(execution_contexts) > 1, "Task should have been executed multiple times"
|
assert len(execution_contexts) > 1, "Task should have been executed multiple times"
|
||||||
|
|
||||||
# Verify that the second execution included the guardrail feedback
|
# Verify that the second execution included the guardrail feedback
|
||||||
assert "Output must contain the keyword 'IMPORTANT'" in execution_contexts[1], (
|
assert (
|
||||||
"Guardrail feedback should be included in retry context"
|
"Output must contain the keyword 'IMPORTANT'" in execution_contexts[1]
|
||||||
)
|
), "Guardrail feedback should be included in retry context"
|
||||||
|
|
||||||
# Verify final output meets guardrail requirements
|
# Verify final output meets guardrail requirements
|
||||||
assert "IMPORTANT" in result.raw, "Final output should contain required keyword"
|
assert "IMPORTANT" in result.raw, "Final output should contain required keyword"
|
||||||
|
|||||||
Reference in New Issue
Block a user