mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 00:28:31 +00:00
Merge branch 'feature/procedure_v2' into brandon/cre-107-pipeline-conditional-routing
This commit is contained in:
@@ -135,29 +135,29 @@ def test_version_command_with_tools(runner):
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.cli.test_crew")
|
||||
def test_test_default_iterations(test_crew, runner):
|
||||
@mock.patch("crewai.cli.cli.evaluate_crew")
|
||||
def test_test_default_iterations(evaluate_crew, runner):
|
||||
result = runner.invoke(test)
|
||||
|
||||
test_crew.assert_called_once_with(3, "gpt-4o-mini")
|
||||
evaluate_crew.assert_called_once_with(3, "gpt-4o-mini")
|
||||
assert result.exit_code == 0
|
||||
assert "Testing the crew for 3 iterations with model gpt-4o-mini" in result.output
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.cli.test_crew")
|
||||
def test_test_custom_iterations(test_crew, runner):
|
||||
@mock.patch("crewai.cli.cli.evaluate_crew")
|
||||
def test_test_custom_iterations(evaluate_crew, runner):
|
||||
result = runner.invoke(test, ["--n_iterations", "5", "--model", "gpt-4o"])
|
||||
|
||||
test_crew.assert_called_once_with(5, "gpt-4o")
|
||||
evaluate_crew.assert_called_once_with(5, "gpt-4o")
|
||||
assert result.exit_code == 0
|
||||
assert "Testing the crew for 5 iterations with model gpt-4o" in result.output
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.cli.test_crew")
|
||||
def test_test_invalid_string_iterations(test_crew, runner):
|
||||
@mock.patch("crewai.cli.cli.evaluate_crew")
|
||||
def test_test_invalid_string_iterations(evaluate_crew, runner):
|
||||
result = runner.invoke(test, ["--n_iterations", "invalid"])
|
||||
|
||||
test_crew.assert_not_called()
|
||||
evaluate_crew.assert_not_called()
|
||||
assert result.exit_code == 2
|
||||
assert (
|
||||
"Usage: test [OPTIONS]\nTry 'test --help' for help.\n\nError: Invalid value for '-n' / '--n_iterations': 'invalid' is not a valid integer.\n"
|
||||
|
||||
@@ -3,7 +3,7 @@ from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.cli import test_crew
|
||||
from crewai.cli import evaluate_crew
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
@@ -14,13 +14,13 @@ from crewai.cli import test_crew
|
||||
(10, "gpt-4"),
|
||||
],
|
||||
)
|
||||
@mock.patch("crewai.cli.test_crew.subprocess.run")
|
||||
@mock.patch("crewai.cli.evaluate_crew.subprocess.run")
|
||||
def test_crew_success(mock_subprocess_run, n_iterations, model):
|
||||
"""Test the crew function for successful execution."""
|
||||
mock_subprocess_run.return_value = subprocess.CompletedProcess(
|
||||
args=f"poetry run test {n_iterations} {model}", returncode=0
|
||||
)
|
||||
result = test_crew.test_crew(n_iterations, model)
|
||||
result = evaluate_crew.evaluate_crew(n_iterations, model)
|
||||
|
||||
mock_subprocess_run.assert_called_once_with(
|
||||
["poetry", "run", "test", str(n_iterations), model],
|
||||
@@ -31,26 +31,26 @@ def test_crew_success(mock_subprocess_run, n_iterations, model):
|
||||
assert result is None
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.test_crew.click")
|
||||
@mock.patch("crewai.cli.evaluate_crew.click")
|
||||
def test_test_crew_zero_iterations(click):
|
||||
test_crew.test_crew(0, "gpt-4o")
|
||||
evaluate_crew.evaluate_crew(0, "gpt-4o")
|
||||
click.echo.assert_called_once_with(
|
||||
"An unexpected error occurred: The number of iterations must be a positive integer.",
|
||||
err=True,
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.test_crew.click")
|
||||
@mock.patch("crewai.cli.evaluate_crew.click")
|
||||
def test_test_crew_negative_iterations(click):
|
||||
test_crew.test_crew(-2, "gpt-4o")
|
||||
evaluate_crew.evaluate_crew(-2, "gpt-4o")
|
||||
click.echo.assert_called_once_with(
|
||||
"An unexpected error occurred: The number of iterations must be a positive integer.",
|
||||
err=True,
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.test_crew.click")
|
||||
@mock.patch("crewai.cli.test_crew.subprocess.run")
|
||||
@mock.patch("crewai.cli.evaluate_crew.click")
|
||||
@mock.patch("crewai.cli.evaluate_crew.subprocess.run")
|
||||
def test_test_crew_called_process_error(mock_subprocess_run, click):
|
||||
n_iterations = 5
|
||||
mock_subprocess_run.side_effect = subprocess.CalledProcessError(
|
||||
@@ -59,7 +59,7 @@ def test_test_crew_called_process_error(mock_subprocess_run, click):
|
||||
output="Error",
|
||||
stderr="Some error occurred",
|
||||
)
|
||||
test_crew.test_crew(n_iterations, "gpt-4o")
|
||||
evaluate_crew.evaluate_crew(n_iterations, "gpt-4o")
|
||||
|
||||
mock_subprocess_run.assert_called_once_with(
|
||||
["poetry", "run", "test", "5", "gpt-4o"],
|
||||
@@ -78,13 +78,13 @@ def test_test_crew_called_process_error(mock_subprocess_run, click):
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.test_crew.click")
|
||||
@mock.patch("crewai.cli.test_crew.subprocess.run")
|
||||
@mock.patch("crewai.cli.evaluate_crew.click")
|
||||
@mock.patch("crewai.cli.evaluate_crew.subprocess.run")
|
||||
def test_test_crew_unexpected_exception(mock_subprocess_run, click):
|
||||
# Arrange
|
||||
n_iterations = 5
|
||||
mock_subprocess_run.side_effect = Exception("Unexpected error")
|
||||
test_crew.test_crew(n_iterations, "gpt-4o")
|
||||
evaluate_crew.evaluate_crew(n_iterations, "gpt-4o")
|
||||
|
||||
mock_subprocess_run.assert_called_once_with(
|
||||
["poetry", "run", "test", "5", "gpt-4o"],
|
||||
|
||||
@@ -629,21 +629,18 @@ def test_sequential_async_task_execution_completion():
|
||||
list_ideas = Task(
|
||||
description="Give me a list of 5 interesting ideas to explore for an article, what makes them unique and interesting.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
max_retry_limit=3,
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
list_important_history = Task(
|
||||
description="Research the history of AI and give me the 5 most important events that shaped the technology.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
max_retry_limit=3,
|
||||
agent=researcher,
|
||||
async_execution=True,
|
||||
)
|
||||
write_article = Task(
|
||||
description="Write an article about the history of AI and its most important events.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
max_retry_limit=3,
|
||||
agent=writer,
|
||||
context=[list_ideas, list_important_history],
|
||||
)
|
||||
|
||||
@@ -23,10 +23,7 @@ def short_term_memory():
|
||||
expected_output="A list of relevant URLs based on the search query.",
|
||||
agent=agent,
|
||||
)
|
||||
return ShortTermMemory(crew=Crew(
|
||||
agents=[agent],
|
||||
tasks=[task]
|
||||
))
|
||||
return ShortTermMemory(crew=Crew(agents=[agent], tasks=[task]))
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -38,7 +35,11 @@ def test_save_and_search(short_term_memory):
|
||||
agent="test_agent",
|
||||
metadata={"task": "test_task"},
|
||||
)
|
||||
short_term_memory.save(memory)
|
||||
short_term_memory.save(
|
||||
value=memory.data,
|
||||
metadata=memory.metadata,
|
||||
agent=memory.agent,
|
||||
)
|
||||
|
||||
find = short_term_memory.search("test value", score_threshold=0.01)[0]
|
||||
assert find["context"] == memory.data, "Data value mismatch."
|
||||
|
||||
@@ -387,12 +387,6 @@ async def test_pipeline_with_parallel_stages_end_in_single_stage(mock_crew_facto
|
||||
assert pipeline_result_1.token_usage == expected_token_usage
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pipeline_with_parallel_stages_multiple_inputs(mock_crew_factory):
|
||||
# TODO: implement
|
||||
pass
|
||||
|
||||
|
||||
def test_pipeline_rshift_operator(mock_crew_factory):
|
||||
"""
|
||||
Test that the >> operator correctly creates a Pipeline from Crews and lists of Crews.
|
||||
|
||||
@@ -84,6 +84,10 @@ class TestCrewEvaluator:
|
||||
1: [10, 9, 8],
|
||||
2: [9, 8, 7],
|
||||
}
|
||||
crew_planner.run_execution_times = {
|
||||
1: [24, 45, 66],
|
||||
2: [55, 33, 67],
|
||||
}
|
||||
|
||||
crew_planner.print_crew_evaluation_result()
|
||||
|
||||
@@ -98,6 +102,7 @@ class TestCrewEvaluator:
|
||||
mock.call().add_row("Task 2", "9", "8", "8.5"),
|
||||
mock.call().add_row("Task 3", "8", "7", "7.5"),
|
||||
mock.call().add_row("Crew", "9.0", "8.0", "8.5"),
|
||||
mock.call().add_row("Execution Time (s)", "135", "155", "145"),
|
||||
]
|
||||
)
|
||||
console.assert_has_calls([mock.call(), mock.call().print(table())])
|
||||
|
||||
Reference in New Issue
Block a user