mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-15 20:08:29 +00:00
pytest improvements to handle flaky test (#2726)
* build(dev): add pytest-randomly dependency By randomizing the test execution order, this helps identify tests that unintentionally depend on shared state or specific execution order, which can lead to flaky or unreliable test behavior. * build(dev): add pytest-timeout This will prevent a test from running indefinitely * test: block external requests in CI and set default 10s timeout per test * test: adding missing cassettes We notice that those cassettes are missing after enabling block-network on CI * test: increase tests timeout on CI * test: fix flaky test ValueError: Circular reference detected (id repeated) * fix: prevent crash when event handler raises exception Previously, if a registered event handler raised an exception during execution, it could crash the entire application or interrupt the event dispatch process. This change wraps handler execution in a try/except block within the `emit` method, ensuring that exceptions are caught and logged without affecting other handlers or flow. This improves the resilience of the event bus, especially when handling third-party or temporary listeners.
This commit is contained in:
2
.github/workflows/tests.yml
vendored
2
.github/workflows/tests.yml
vendored
@@ -31,4 +31,4 @@ jobs:
|
||||
run: uv sync --dev --all-extras
|
||||
|
||||
- name: Run tests
|
||||
run: uv run pytest tests -vv
|
||||
run: uv run pytest --block-network --timeout=60 -vv
|
||||
|
||||
@@ -85,6 +85,8 @@ dev-dependencies = [
|
||||
"pytest-asyncio>=0.23.7",
|
||||
"pytest-subprocess>=1.5.2",
|
||||
"pytest-recording>=0.13.2",
|
||||
"pytest-randomly>=3.16.0",
|
||||
"pytest-timeout>=2.3.1",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -70,7 +70,12 @@ class CrewAIEventsBus:
|
||||
for event_type, handlers in self._handlers.items():
|
||||
if isinstance(event, event_type):
|
||||
for handler in handlers:
|
||||
handler(source, event)
|
||||
try:
|
||||
handler(source, event)
|
||||
except Exception as e:
|
||||
print(
|
||||
f"[EventBus Error] Handler '{handler.__name__}' failed for event '{event_type.__name__}': {e}"
|
||||
)
|
||||
|
||||
self._signal.send(source, event=event)
|
||||
|
||||
|
||||
1899
tests/cassettes/test_docling_source.yaml
Normal file
1899
tests/cassettes/test_docling_source.yaml
Normal file
File diff suppressed because it is too large
Load Diff
3321
tests/cassettes/test_multiple_docling_sources.yaml
Normal file
3321
tests/cassettes/test_multiple_docling_sources.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -42,29 +42,38 @@ from crewai.utilities.events.event_listener import EventListener
|
||||
from crewai.utilities.rpm_controller import RPMController
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
|
||||
ceo = Agent(
|
||||
role="CEO",
|
||||
goal="Make sure the writers in your company produce amazing content.",
|
||||
backstory="You're an long time CEO of a content creation agency with a Senior Writer on the team. You're now working on a new project and want to make sure the content produced is amazing.",
|
||||
allow_delegation=True,
|
||||
)
|
||||
|
||||
researcher = Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
role="Senior Writer",
|
||||
goal="Write the best content about AI and AI agents.",
|
||||
backstory="You're a senior writer, specialized in technology, software engineering, AI and startups. You work as a freelancer and are now working on writing content for a new customer.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
@pytest.fixture
|
||||
def ceo():
|
||||
return Agent(
|
||||
role="CEO",
|
||||
goal="Make sure the writers in your company produce amazing content.",
|
||||
backstory="You're an long time CEO of a content creation agency with a Senior Writer on the team. You're now working on a new project and want to make sure the content produced is amazing.",
|
||||
allow_delegation=True,
|
||||
)
|
||||
|
||||
|
||||
def test_crew_with_only_conditional_tasks_raises_error():
|
||||
@pytest.fixture
|
||||
def researcher():
|
||||
return Agent(
|
||||
role="Researcher",
|
||||
goal="Make the best research and analysis on content about AI and AI agents",
|
||||
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def writer():
|
||||
return Agent(
|
||||
role="Senior Writer",
|
||||
goal="Write the best content about AI and AI agents.",
|
||||
backstory="You're a senior writer, specialized in technology, software engineering, AI and startups. You work as a freelancer and are now working on writing content for a new customer.",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
|
||||
def test_crew_with_only_conditional_tasks_raises_error(researcher):
|
||||
"""Test that creating a crew with only conditional tasks raises an error."""
|
||||
|
||||
def condition_func(task_output: TaskOutput) -> bool:
|
||||
@@ -146,7 +155,9 @@ def test_crew_config_conditional_requirement():
|
||||
]
|
||||
|
||||
|
||||
def test_async_task_cannot_include_sequential_async_tasks_in_context():
|
||||
def test_async_task_cannot_include_sequential_async_tasks_in_context(
|
||||
researcher, writer
|
||||
):
|
||||
task1 = Task(
|
||||
description="Task 1",
|
||||
async_execution=True,
|
||||
@@ -194,7 +205,7 @@ def test_async_task_cannot_include_sequential_async_tasks_in_context():
|
||||
pytest.fail("Unexpected ValidationError raised")
|
||||
|
||||
|
||||
def test_context_no_future_tasks():
|
||||
def test_context_no_future_tasks(researcher, writer):
|
||||
task2 = Task(
|
||||
description="Task 2",
|
||||
expected_output="output",
|
||||
@@ -258,7 +269,7 @@ def test_crew_config_with_wrong_keys():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_creation():
|
||||
def test_crew_creation(researcher, writer):
|
||||
tasks = [
|
||||
Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
||||
@@ -290,7 +301,7 @@ def test_crew_creation():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_sync_task_execution():
|
||||
def test_sync_task_execution(researcher, writer):
|
||||
from unittest.mock import patch
|
||||
|
||||
tasks = [
|
||||
@@ -331,7 +342,7 @@ def test_sync_task_execution():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_process():
|
||||
def test_hierarchical_process(researcher, writer):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -352,7 +363,7 @@ def test_hierarchical_process():
|
||||
)
|
||||
|
||||
|
||||
def test_manager_llm_requirement_for_hierarchical_process():
|
||||
def test_manager_llm_requirement_for_hierarchical_process(researcher, writer):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -367,7 +378,7 @@ def test_manager_llm_requirement_for_hierarchical_process():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_manager_agent_delegating_to_assigned_task_agent():
|
||||
def test_manager_agent_delegating_to_assigned_task_agent(researcher, writer):
|
||||
"""
|
||||
Test that the manager agent delegates to the assigned task agent.
|
||||
"""
|
||||
@@ -419,7 +430,7 @@ def test_manager_agent_delegating_to_assigned_task_agent():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_manager_agent_delegating_to_all_agents():
|
||||
def test_manager_agent_delegating_to_all_agents(researcher, writer):
|
||||
"""
|
||||
Test that the manager agent delegates to all agents when none are specified.
|
||||
"""
|
||||
@@ -529,7 +540,7 @@ def test_manager_agent_delegates_with_varied_role_cases():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents():
|
||||
def test_crew_with_delegating_agents(ceo, writer):
|
||||
tasks = [
|
||||
Task(
|
||||
description="Produce and amazing 1 paragraph draft of an article about AI Agents.",
|
||||
@@ -553,7 +564,7 @@ def test_crew_with_delegating_agents():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents_should_not_override_task_tools():
|
||||
def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -615,7 +626,7 @@ def test_crew_with_delegating_agents_should_not_override_task_tools():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_delegating_agents_should_not_override_agent_tools():
|
||||
def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -679,7 +690,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_tools_override_agent_tools():
|
||||
def test_task_tools_override_agent_tools(researcher):
|
||||
from typing import Type
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -734,7 +745,7 @@ def test_task_tools_override_agent_tools():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_task_tools_override_agent_tools_with_allow_delegation():
|
||||
def test_task_tools_override_agent_tools_with_allow_delegation(researcher, writer):
|
||||
"""
|
||||
Test that task tools override agent tools while preserving delegation tools when allow_delegation=True
|
||||
"""
|
||||
@@ -817,7 +828,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_verbose_output(capsys):
|
||||
def test_crew_verbose_output(researcher, writer, capsys):
|
||||
tasks = [
|
||||
Task(
|
||||
description="Research AI advancements.",
|
||||
@@ -877,7 +888,7 @@ def test_crew_verbose_output(capsys):
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_cache_hitting_between_agents():
|
||||
def test_cache_hitting_between_agents(researcher, writer, ceo):
|
||||
from unittest.mock import call, patch
|
||||
|
||||
from crewai.tools import tool
|
||||
@@ -1050,7 +1061,7 @@ def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_sequential_async_task_execution_completion():
|
||||
def test_sequential_async_task_execution_completion(researcher, writer):
|
||||
list_ideas = Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for an article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
@@ -1204,7 +1215,7 @@ async def test_crew_async_kickoff():
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
async def test_async_task_execution_call_count():
|
||||
async def test_async_task_execution_call_count(researcher, writer):
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
list_ideas = Task(
|
||||
@@ -1707,7 +1718,7 @@ def test_agents_do_not_get_delegation_tools_with_there_is_only_one_agent():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_sequential_crew_creation_tasks_without_agents():
|
||||
def test_sequential_crew_creation_tasks_without_agents(researcher):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -1757,7 +1768,7 @@ def test_agent_usage_metrics_are_captured_for_hierarchical_process():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_crew_creation_tasks_with_agents():
|
||||
def test_hierarchical_crew_creation_tasks_with_agents(researcher, writer):
|
||||
"""
|
||||
Agents are not required for tasks in a hierarchical process but sometimes they are still added
|
||||
This test makes sure that the manager still delegates the task to the agent even if the agent is passed in the task
|
||||
@@ -1810,7 +1821,7 @@ def test_hierarchical_crew_creation_tasks_with_agents():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_crew_creation_tasks_with_async_execution():
|
||||
def test_hierarchical_crew_creation_tasks_with_async_execution(researcher, writer, ceo):
|
||||
"""
|
||||
Tests that async tasks in hierarchical crews are handled correctly with proper delegation tools
|
||||
"""
|
||||
@@ -1867,7 +1878,7 @@ def test_hierarchical_crew_creation_tasks_with_async_execution():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_crew_creation_tasks_with_sync_last():
|
||||
def test_hierarchical_crew_creation_tasks_with_sync_last(researcher, writer, ceo):
|
||||
"""
|
||||
Agents are not required for tasks in a hierarchical process but sometimes they are still added
|
||||
This test makes sure that the manager still delegates the task to the agent even if the agent is passed in the task
|
||||
@@ -2170,7 +2181,7 @@ def test_tools_with_custom_caching():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_task_uses_last_output():
|
||||
def test_conditional_task_uses_last_output(researcher, writer):
|
||||
"""Test that conditional tasks use the last task output for condition evaluation."""
|
||||
task1 = Task(
|
||||
description="First task",
|
||||
@@ -2244,7 +2255,7 @@ def test_conditional_task_uses_last_output():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_tasks_result_collection():
|
||||
def test_conditional_tasks_result_collection(researcher, writer):
|
||||
"""Test that task outputs are properly collected based on execution status."""
|
||||
task1 = Task(
|
||||
description="Normal task that always executes",
|
||||
@@ -2325,7 +2336,7 @@ def test_conditional_tasks_result_collection():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_multiple_conditional_tasks():
|
||||
def test_multiple_conditional_tasks(researcher, writer):
|
||||
"""Test that having multiple conditional tasks in sequence works correctly."""
|
||||
task1 = Task(
|
||||
description="Initial research task",
|
||||
@@ -2560,7 +2571,7 @@ def test_disabled_memory_using_contextual_memory():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_log_file_output(tmp_path):
|
||||
def test_crew_log_file_output(tmp_path, researcher):
|
||||
test_file = tmp_path / "logs.txt"
|
||||
tasks = [
|
||||
Task(
|
||||
@@ -2658,7 +2669,7 @@ def test_crew_output_file_validation_failures():
|
||||
Crew(agents=[agent], tasks=[task]).kickoff()
|
||||
|
||||
|
||||
def test_manager_agent():
|
||||
def test_manager_agent(researcher, writer):
|
||||
from unittest.mock import patch
|
||||
|
||||
task = Task(
|
||||
@@ -2696,7 +2707,7 @@ def test_manager_agent():
|
||||
mock_execute_sync.assert_called()
|
||||
|
||||
|
||||
def test_manager_agent_in_agents_raises_exception():
|
||||
def test_manager_agent_in_agents_raises_exception(researcher, writer):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -2718,7 +2729,7 @@ def test_manager_agent_in_agents_raises_exception():
|
||||
)
|
||||
|
||||
|
||||
def test_manager_agent_with_tools_raises_exception():
|
||||
def test_manager_agent_with_tools_raises_exception(researcher, writer):
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool
|
||||
@@ -2755,7 +2766,7 @@ def test_manager_agent_with_tools_raises_exception():
|
||||
@patch("crewai.crew.TaskEvaluator")
|
||||
@patch("crewai.crew.Crew.copy")
|
||||
def test_crew_train_success(
|
||||
copy_mock, task_evaluator, crew_training_handler, kickoff_mock
|
||||
copy_mock, task_evaluator, crew_training_handler, kickoff_mock, researcher, writer
|
||||
):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
@@ -2831,7 +2842,7 @@ def test_crew_train_success(
|
||||
assert isinstance(received_events[1], CrewTrainCompletedEvent)
|
||||
|
||||
|
||||
def test_crew_train_error():
|
||||
def test_crew_train_error(researcher, writer):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -2850,7 +2861,7 @@ def test_crew_train_error():
|
||||
)
|
||||
|
||||
|
||||
def test__setup_for_training():
|
||||
def test__setup_for_training(researcher, writer):
|
||||
researcher.allow_delegation = True
|
||||
writer.allow_delegation = True
|
||||
agents = [researcher, writer]
|
||||
@@ -2881,7 +2892,7 @@ def test__setup_for_training():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_replay_feature():
|
||||
def test_replay_feature(researcher, writer):
|
||||
list_ideas = Task(
|
||||
description="Generate a list of 5 interesting ideas to explore for an article, where each bulletpoint is under 15 words.",
|
||||
expected_output="Bullet point list of 5 important events. No additional commentary.",
|
||||
@@ -2918,7 +2929,7 @@ def test_replay_feature():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_replay_error():
|
||||
def test_crew_replay_error(researcher, writer):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -3314,7 +3325,7 @@ def test_replay_setup_context():
|
||||
assert crew.tasks[1].prompt_context == "context raw output"
|
||||
|
||||
|
||||
def test_key():
|
||||
def test_key(researcher, writer):
|
||||
tasks = [
|
||||
Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for na article, what makes them unique and interesting.",
|
||||
@@ -3383,7 +3394,9 @@ def test_key_with_interpolated_inputs():
|
||||
assert crew.key == curr_key
|
||||
|
||||
|
||||
def test_conditional_task_requirement_breaks_when_singular_conditional_task():
|
||||
def test_conditional_task_requirement_breaks_when_singular_conditional_task(
|
||||
researcher, writer
|
||||
):
|
||||
def condition_fn(output) -> bool:
|
||||
return output.raw.startswith("Andrew Ng has!!")
|
||||
|
||||
@@ -3401,7 +3414,7 @@ def test_conditional_task_requirement_breaks_when_singular_conditional_task():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_task_last_task_when_conditional_is_true():
|
||||
def test_conditional_task_last_task_when_conditional_is_true(researcher, writer):
|
||||
def condition_fn(output) -> bool:
|
||||
return True
|
||||
|
||||
@@ -3428,7 +3441,7 @@ def test_conditional_task_last_task_when_conditional_is_true():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_task_last_task_when_conditional_is_false():
|
||||
def test_conditional_task_last_task_when_conditional_is_false(researcher, writer):
|
||||
def condition_fn(output) -> bool:
|
||||
return False
|
||||
|
||||
@@ -3452,7 +3465,7 @@ def test_conditional_task_last_task_when_conditional_is_false():
|
||||
assert result.raw == "Hi"
|
||||
|
||||
|
||||
def test_conditional_task_requirement_breaks_when_task_async():
|
||||
def test_conditional_task_requirement_breaks_when_task_async(researcher, writer):
|
||||
def my_condition(context):
|
||||
return context.get("some_value") > 10
|
||||
|
||||
@@ -3477,7 +3490,7 @@ def test_conditional_task_requirement_breaks_when_task_async():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_should_skip():
|
||||
def test_conditional_should_skip(researcher, writer):
|
||||
task1 = Task(description="Return hello", expected_output="say hi", agent=researcher)
|
||||
|
||||
condition_mock = MagicMock(return_value=False)
|
||||
@@ -3509,7 +3522,7 @@ def test_conditional_should_skip():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_conditional_should_execute():
|
||||
def test_conditional_should_execute(researcher, writer):
|
||||
task1 = Task(description="Return hello", expected_output="say hi", agent=researcher)
|
||||
|
||||
condition_mock = MagicMock(
|
||||
@@ -3542,7 +3555,7 @@ def test_conditional_should_execute():
|
||||
@mock.patch("crewai.crew.CrewEvaluator")
|
||||
@mock.patch("crewai.crew.Crew.copy")
|
||||
@mock.patch("crewai.crew.Crew.kickoff")
|
||||
def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
|
||||
def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator, researcher):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -3592,7 +3605,7 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_verbose_manager_agent():
|
||||
def test_hierarchical_verbose_manager_agent(researcher, writer):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -3613,7 +3626,7 @@ def test_hierarchical_verbose_manager_agent():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_verbose_false_manager_agent():
|
||||
def test_hierarchical_verbose_false_manager_agent(researcher, writer):
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
@@ -4186,7 +4199,7 @@ def test_before_kickoff_without_inputs():
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_with_knowledge_sources_works_with_copy():
|
||||
def test_crew_with_knowledge_sources_works_with_copy(researcher, writer):
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
|
||||
@@ -4195,7 +4208,6 @@ def test_crew_with_knowledge_sources_works_with_copy():
|
||||
tasks=[Task(description="test", expected_output="test", agent=researcher)],
|
||||
knowledge_sources=[string_source],
|
||||
)
|
||||
|
||||
crew_copy = crew.copy()
|
||||
|
||||
assert crew_copy.knowledge_sources == crew.knowledge_sources
|
||||
|
||||
@@ -547,6 +547,7 @@ def test_excel_knowledge_source(mock_vector_db, tmpdir):
|
||||
mock_vector_db.query.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
def test_docling_source(mock_vector_db):
|
||||
docling_source = CrewDoclingSource(
|
||||
file_paths=[
|
||||
@@ -567,6 +568,7 @@ def test_docling_source(mock_vector_db):
|
||||
mock_vector_db.query.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
def test_multiple_docling_sources():
|
||||
urls: List[Union[Path, str]] = [
|
||||
"https://lilianweng.github.io/posts/2024-11-28-reward-hacking/",
|
||||
|
||||
@@ -32,3 +32,16 @@ def test_wildcard_event_handler():
|
||||
crewai_event_bus.emit("source_object", event)
|
||||
|
||||
mock_handler.assert_called_once_with("source_object", event)
|
||||
|
||||
|
||||
def test_event_bus_error_handling(capfd):
|
||||
@crewai_event_bus.on(BaseEvent)
|
||||
def broken_handler(source, event):
|
||||
raise ValueError("Simulated handler failure")
|
||||
|
||||
event = TestEvent(type="test_event")
|
||||
crewai_event_bus.emit("source_object", event)
|
||||
|
||||
out, err = capfd.readouterr()
|
||||
assert "Simulated handler failure" in out
|
||||
assert "Handler 'broken_handler' failed" in out
|
||||
|
||||
28
uv.lock
generated
28
uv.lock
generated
@@ -811,8 +811,10 @@ dev = [
|
||||
{ name = "pre-commit" },
|
||||
{ name = "pytest" },
|
||||
{ name = "pytest-asyncio" },
|
||||
{ name = "pytest-randomly" },
|
||||
{ name = "pytest-recording" },
|
||||
{ name = "pytest-subprocess" },
|
||||
{ name = "pytest-timeout" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "ruff" },
|
||||
]
|
||||
@@ -867,8 +869,10 @@ dev = [
|
||||
{ name = "pre-commit", specifier = ">=3.6.0" },
|
||||
{ name = "pytest", specifier = ">=8.0.0" },
|
||||
{ name = "pytest-asyncio", specifier = ">=0.23.7" },
|
||||
{ name = "pytest-randomly", specifier = ">=3.16.0" },
|
||||
{ name = "pytest-recording", specifier = ">=0.13.2" },
|
||||
{ name = "pytest-subprocess", specifier = ">=1.5.2" },
|
||||
{ name = "pytest-timeout", specifier = ">=2.3.1" },
|
||||
{ name = "python-dotenv", specifier = ">=1.0.0" },
|
||||
{ name = "ruff", specifier = ">=0.8.2" },
|
||||
]
|
||||
@@ -4228,6 +4232,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/96/31/6607dab48616902f76885dfcf62c08d929796fc3b2d2318faf9fd54dbed9/pytest_asyncio-0.24.0-py3-none-any.whl", hash = "sha256:a811296ed596b69bf0b6f3dc40f83bcaf341b155a269052d82efa2b25ac7037b", size = 18024 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-randomly"
|
||||
version = "3.16.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c0/68/d221ed7f4a2a49a664da721b8e87b52af6dd317af2a6cb51549cf17ac4b8/pytest_randomly-3.16.0.tar.gz", hash = "sha256:11bf4d23a26484de7860d82f726c0629837cf4064b79157bd18ec9d41d7feb26", size = 13367 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/22/70/b31577d7c46d8e2f9baccfed5067dd8475262a2331ffb0bfdf19361c9bde/pytest_randomly-3.16.0-py3-none-any.whl", hash = "sha256:8633d332635a1a0983d3bba19342196807f6afb17c3eef78e02c2f85dade45d6", size = 8396 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-recording"
|
||||
version = "0.13.2"
|
||||
@@ -4254,6 +4270,18 @@ wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/10/77/a80e8f9126b95ffd5ad4d04bd14005c68dcbf0d88f53b2b14893f6cc7232/pytest_subprocess-1.5.2-py3-none-any.whl", hash = "sha256:23ac7732aa8bd45f1757265b1316eb72a7f55b41fb21e2ca22e149ba3629fa46", size = 20886 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pytest-timeout"
|
||||
version = "2.3.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/93/0d/04719abc7a4bdb3a7a1f968f24b0f5253d698c9cc94975330e9d3145befb/pytest-timeout-2.3.1.tar.gz", hash = "sha256:12397729125c6ecbdaca01035b9e5239d4db97352320af155b3f5de1ba5165d9", size = 17697 }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/03/27/14af9ef8321f5edc7527e47def2a21d8118c6f329a9342cc61387a0c0599/pytest_timeout-2.3.1-py3-none-any.whl", hash = "sha256:68188cb703edfc6a18fad98dc25a3c61e9f24d644b0b70f33af545219fc7813e", size = 14148 },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "python-bidi"
|
||||
version = "0.6.3"
|
||||
|
||||
Reference in New Issue
Block a user