mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 00:58:30 +00:00
Merge branch 'main' into devin/1738752192-fix-memory-reset-openai-dependency
This commit is contained in:
@@ -49,6 +49,39 @@ writer = Agent(
|
||||
)
|
||||
|
||||
|
||||
def test_crew_with_only_conditional_tasks_raises_error():
|
||||
"""Test that creating a crew with only conditional tasks raises an error."""
|
||||
def condition_func(task_output: TaskOutput) -> bool:
|
||||
return True
|
||||
|
||||
conditional1 = ConditionalTask(
|
||||
description="Conditional task 1",
|
||||
expected_output="Output 1",
|
||||
agent=researcher,
|
||||
condition=condition_func,
|
||||
)
|
||||
conditional2 = ConditionalTask(
|
||||
description="Conditional task 2",
|
||||
expected_output="Output 2",
|
||||
agent=researcher,
|
||||
condition=condition_func,
|
||||
)
|
||||
conditional3 = ConditionalTask(
|
||||
description="Conditional task 3",
|
||||
expected_output="Output 3",
|
||||
agent=researcher,
|
||||
condition=condition_func,
|
||||
)
|
||||
|
||||
with pytest.raises(
|
||||
pydantic_core._pydantic_core.ValidationError,
|
||||
match="Crew must include at least one non-conditional task",
|
||||
):
|
||||
Crew(
|
||||
agents=[researcher],
|
||||
tasks=[conditional1, conditional2, conditional3],
|
||||
)
|
||||
|
||||
def test_crew_config_conditional_requirement():
|
||||
with pytest.raises(ValueError):
|
||||
Crew(process=Process.sequential)
|
||||
@@ -2060,6 +2093,195 @@ def test_tools_with_custom_caching():
|
||||
assert result.raw == "3"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_task_uses_last_output():
|
||||
"""Test that conditional tasks use the last task output for condition evaluation."""
|
||||
task1 = Task(
|
||||
description="First task",
|
||||
expected_output="First output",
|
||||
agent=researcher,
|
||||
)
|
||||
def condition_fails(task_output: TaskOutput) -> bool:
|
||||
# This condition will never be met
|
||||
return "never matches" in task_output.raw.lower()
|
||||
|
||||
def condition_succeeds(task_output: TaskOutput) -> bool:
|
||||
# This condition will match first task's output
|
||||
return "first success" in task_output.raw.lower()
|
||||
|
||||
conditional_task1 = ConditionalTask(
|
||||
description="Second task - conditional that fails condition",
|
||||
expected_output="Second output",
|
||||
agent=researcher,
|
||||
condition=condition_fails,
|
||||
)
|
||||
|
||||
conditional_task2 = ConditionalTask(
|
||||
description="Third task - conditional that succeeds using first task output",
|
||||
expected_output="Third output",
|
||||
agent=writer,
|
||||
condition=condition_succeeds,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, conditional_task1, conditional_task2],
|
||||
)
|
||||
|
||||
# Mock outputs for tasks
|
||||
mock_first = TaskOutput(
|
||||
description="First task output",
|
||||
raw="First success output", # Will be used by third task's condition
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_skipped = TaskOutput(
|
||||
description="Second task output",
|
||||
raw="", # Empty output since condition fails
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_third = TaskOutput(
|
||||
description="Third task output",
|
||||
raw="Third task executed", # Output when condition succeeds using first task output
|
||||
agent=writer.role,
|
||||
)
|
||||
|
||||
# Set up mocks for task execution and conditional logic
|
||||
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
||||
# First conditional fails, second succeeds
|
||||
mock_should_execute.side_effect = [False, True]
|
||||
|
||||
with patch.object(Task, "execute_sync") as mock_execute:
|
||||
mock_execute.side_effect = [mock_first, mock_third]
|
||||
result = crew.kickoff()
|
||||
|
||||
# Verify execution behavior
|
||||
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
||||
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
||||
|
||||
# Verify outputs collection
|
||||
assert len(result.tasks_output) == 3
|
||||
assert result.tasks_output[0].raw == "First success output" # First task succeeded
|
||||
assert result.tasks_output[1].raw == "" # Second task skipped (condition failed)
|
||||
assert result.tasks_output[2].raw == "Third task executed" # Third task used first task's output
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_tasks_result_collection():
|
||||
"""Test that task outputs are properly collected based on execution status."""
|
||||
task1 = Task(
|
||||
description="Normal task that always executes",
|
||||
expected_output="First output",
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
def condition_never_met(task_output: TaskOutput) -> bool:
|
||||
return "never matches" in task_output.raw.lower()
|
||||
|
||||
def condition_always_met(task_output: TaskOutput) -> bool:
|
||||
return "success" in task_output.raw.lower()
|
||||
|
||||
task2 = ConditionalTask(
|
||||
description="Conditional task that never executes",
|
||||
expected_output="Second output",
|
||||
agent=researcher,
|
||||
condition=condition_never_met,
|
||||
)
|
||||
|
||||
task3 = ConditionalTask(
|
||||
description="Conditional task that always executes",
|
||||
expected_output="Third output",
|
||||
agent=writer,
|
||||
condition=condition_always_met,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2, task3],
|
||||
)
|
||||
|
||||
# Mock outputs for different execution paths
|
||||
mock_success = TaskOutput(
|
||||
description="Success output",
|
||||
raw="Success output", # Triggers third task's condition
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_skipped = TaskOutput(
|
||||
description="Skipped output",
|
||||
raw="", # Empty output for skipped task
|
||||
agent=researcher.role,
|
||||
)
|
||||
mock_conditional = TaskOutput(
|
||||
description="Conditional output",
|
||||
raw="Conditional task executed",
|
||||
agent=writer.role,
|
||||
)
|
||||
|
||||
# Set up mocks for task execution and conditional logic
|
||||
with patch.object(ConditionalTask, "should_execute") as mock_should_execute:
|
||||
# First conditional fails, second succeeds
|
||||
mock_should_execute.side_effect = [False, True]
|
||||
|
||||
with patch.object(Task, "execute_sync") as mock_execute:
|
||||
mock_execute.side_effect = [mock_success, mock_conditional]
|
||||
result = crew.kickoff()
|
||||
|
||||
# Verify execution behavior
|
||||
assert mock_execute.call_count == 2 # Only first and third tasks execute
|
||||
assert mock_should_execute.call_count == 2 # Both conditionals checked
|
||||
|
||||
# Verify task output collection
|
||||
assert len(result.tasks_output) == 3
|
||||
assert result.tasks_output[0].raw == "Success output" # Normal task executed
|
||||
assert result.tasks_output[1].raw == "" # Second task skipped
|
||||
assert result.tasks_output[2].raw == "Conditional task executed" # Third task executed
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_multiple_conditional_tasks():
|
||||
"""Test that having multiple conditional tasks in sequence works correctly."""
|
||||
task1 = Task(
|
||||
description="Initial research task",
|
||||
expected_output="Research output",
|
||||
agent=researcher,
|
||||
)
|
||||
|
||||
def condition1(task_output: TaskOutput) -> bool:
|
||||
return "success" in task_output.raw.lower()
|
||||
|
||||
def condition2(task_output: TaskOutput) -> bool:
|
||||
return "proceed" in task_output.raw.lower()
|
||||
|
||||
task2 = ConditionalTask(
|
||||
description="First conditional task",
|
||||
expected_output="Conditional output 1",
|
||||
agent=writer,
|
||||
condition=condition1,
|
||||
)
|
||||
|
||||
task3 = ConditionalTask(
|
||||
description="Second conditional task",
|
||||
expected_output="Conditional output 2",
|
||||
agent=writer,
|
||||
condition=condition2,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2, task3],
|
||||
)
|
||||
|
||||
# Mock different task outputs to test conditional logic
|
||||
mock_success = TaskOutput(
|
||||
description="Mock success",
|
||||
raw="Success and proceed output",
|
||||
agent=researcher.role,
|
||||
)
|
||||
|
||||
# Set up mocks for task execution
|
||||
with patch.object(Task, "execute_sync", return_value=mock_success) as mock_execute:
|
||||
result = crew.kickoff()
|
||||
# Verify all tasks were executed (no IndexError)
|
||||
assert mock_execute.call_count == 3
|
||||
assert len(result.tasks_output) == 3
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_using_contextual_memory():
|
||||
from unittest.mock import patch
|
||||
|
||||
@@ -286,6 +286,79 @@ def test_o3_mini_reasoning_effort_medium():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.fixture
|
||||
def anthropic_llm():
|
||||
"""Fixture providing an Anthropic LLM instance."""
|
||||
return LLM(model="anthropic/claude-3-sonnet")
|
||||
|
||||
@pytest.fixture
|
||||
def system_message():
|
||||
"""Fixture providing a system message."""
|
||||
return {"role": "system", "content": "test"}
|
||||
|
||||
@pytest.fixture
|
||||
def user_message():
|
||||
"""Fixture providing a user message."""
|
||||
return {"role": "user", "content": "test"}
|
||||
|
||||
def test_anthropic_message_formatting_edge_cases(anthropic_llm):
|
||||
"""Test edge cases for Anthropic message formatting."""
|
||||
# Test None messages
|
||||
with pytest.raises(TypeError, match="Messages cannot be None"):
|
||||
anthropic_llm._format_messages_for_provider(None)
|
||||
|
||||
# Test empty message list
|
||||
formatted = anthropic_llm._format_messages_for_provider([])
|
||||
assert len(formatted) == 1
|
||||
assert formatted[0]["role"] == "user"
|
||||
assert formatted[0]["content"] == "."
|
||||
|
||||
# Test invalid message format
|
||||
with pytest.raises(TypeError, match="Invalid message format"):
|
||||
anthropic_llm._format_messages_for_provider([{"invalid": "message"}])
|
||||
|
||||
def test_anthropic_model_detection():
|
||||
"""Test Anthropic model detection with various formats."""
|
||||
models = [
|
||||
("anthropic/claude-3", True),
|
||||
("claude-instant", True),
|
||||
("claude/v1", True),
|
||||
("gpt-4", False),
|
||||
("", False),
|
||||
("anthropomorphic", False), # Should not match partial words
|
||||
]
|
||||
|
||||
for model, expected in models:
|
||||
llm = LLM(model=model)
|
||||
assert llm.is_anthropic == expected, f"Failed for model: {model}"
|
||||
|
||||
def test_anthropic_message_formatting(anthropic_llm, system_message, user_message):
|
||||
"""Test Anthropic message formatting with fixtures."""
|
||||
# Test when first message is system
|
||||
formatted = anthropic_llm._format_messages_for_provider([system_message])
|
||||
assert len(formatted) == 2
|
||||
assert formatted[0]["role"] == "user"
|
||||
assert formatted[0]["content"] == "."
|
||||
assert formatted[1] == system_message
|
||||
|
||||
# Test when first message is already user
|
||||
formatted = anthropic_llm._format_messages_for_provider([user_message])
|
||||
assert len(formatted) == 1
|
||||
assert formatted[0] == user_message
|
||||
|
||||
# Test with empty message list
|
||||
formatted = anthropic_llm._format_messages_for_provider([])
|
||||
assert len(formatted) == 1
|
||||
assert formatted[0]["role"] == "user"
|
||||
assert formatted[0]["content"] == "."
|
||||
|
||||
# Test with non-Anthropic model (should not modify messages)
|
||||
non_anthropic_llm = LLM(model="gpt-4")
|
||||
formatted = non_anthropic_llm._format_messages_for_provider([system_message])
|
||||
assert len(formatted) == 1
|
||||
assert formatted[0] == system_message
|
||||
|
||||
|
||||
def test_deepseek_r1_with_open_router():
|
||||
if not os.getenv("OPEN_ROUTER_API_KEY"):
|
||||
pytest.skip("OPEN_ROUTER_API_KEY not set; skipping test.")
|
||||
|
||||
Reference in New Issue
Block a user