feat: Add execution time to both task and testing feature (#1031)

* feat: Add execution time to both task and testing feature

* feat: Remove unused functions

* feat: change test_crew to evalaute_crew to avoid issues with testing libs

* feat: fix tests
This commit is contained in:
Eduardo Chiarotti
2024-07-29 23:17:07 -03:00
committed by GitHub
parent c82f612c07
commit 18d867aa47
7 changed files with 62 additions and 33 deletions

View File

@@ -135,29 +135,29 @@ def test_version_command_with_tools(runner):
)
@mock.patch("crewai.cli.cli.test_crew")
def test_test_default_iterations(test_crew, runner):
@mock.patch("crewai.cli.cli.evaluate_crew")
def test_test_default_iterations(evaluate_crew, runner):
result = runner.invoke(test)
test_crew.assert_called_once_with(3, "gpt-4o-mini")
evaluate_crew.assert_called_once_with(3, "gpt-4o-mini")
assert result.exit_code == 0
assert "Testing the crew for 3 iterations with model gpt-4o-mini" in result.output
@mock.patch("crewai.cli.cli.test_crew")
def test_test_custom_iterations(test_crew, runner):
@mock.patch("crewai.cli.cli.evaluate_crew")
def test_test_custom_iterations(evaluate_crew, runner):
result = runner.invoke(test, ["--n_iterations", "5", "--model", "gpt-4o"])
test_crew.assert_called_once_with(5, "gpt-4o")
evaluate_crew.assert_called_once_with(5, "gpt-4o")
assert result.exit_code == 0
assert "Testing the crew for 5 iterations with model gpt-4o" in result.output
@mock.patch("crewai.cli.cli.test_crew")
def test_test_invalid_string_iterations(test_crew, runner):
@mock.patch("crewai.cli.cli.evaluate_crew")
def test_test_invalid_string_iterations(evaluate_crew, runner):
result = runner.invoke(test, ["--n_iterations", "invalid"])
test_crew.assert_not_called()
evaluate_crew.assert_not_called()
assert result.exit_code == 2
assert (
"Usage: test [OPTIONS]\nTry 'test --help' for help.\n\nError: Invalid value for '-n' / '--n_iterations': 'invalid' is not a valid integer.\n"

View File

@@ -3,7 +3,7 @@ from unittest import mock
import pytest
from crewai.cli import test_crew
from crewai.cli import evaluate_crew
@pytest.mark.parametrize(
@@ -14,13 +14,13 @@ from crewai.cli import test_crew
(10, "gpt-4"),
],
)
@mock.patch("crewai.cli.test_crew.subprocess.run")
@mock.patch("crewai.cli.evaluate_crew.subprocess.run")
def test_crew_success(mock_subprocess_run, n_iterations, model):
"""Test the crew function for successful execution."""
mock_subprocess_run.return_value = subprocess.CompletedProcess(
args=f"poetry run test {n_iterations} {model}", returncode=0
)
result = test_crew.test_crew(n_iterations, model)
result = evaluate_crew.evaluate_crew(n_iterations, model)
mock_subprocess_run.assert_called_once_with(
["poetry", "run", "test", str(n_iterations), model],
@@ -31,26 +31,26 @@ def test_crew_success(mock_subprocess_run, n_iterations, model):
assert result is None
@mock.patch("crewai.cli.test_crew.click")
@mock.patch("crewai.cli.evaluate_crew.click")
def test_test_crew_zero_iterations(click):
test_crew.test_crew(0, "gpt-4o")
evaluate_crew.evaluate_crew(0, "gpt-4o")
click.echo.assert_called_once_with(
"An unexpected error occurred: The number of iterations must be a positive integer.",
err=True,
)
@mock.patch("crewai.cli.test_crew.click")
@mock.patch("crewai.cli.evaluate_crew.click")
def test_test_crew_negative_iterations(click):
test_crew.test_crew(-2, "gpt-4o")
evaluate_crew.evaluate_crew(-2, "gpt-4o")
click.echo.assert_called_once_with(
"An unexpected error occurred: The number of iterations must be a positive integer.",
err=True,
)
@mock.patch("crewai.cli.test_crew.click")
@mock.patch("crewai.cli.test_crew.subprocess.run")
@mock.patch("crewai.cli.evaluate_crew.click")
@mock.patch("crewai.cli.evaluate_crew.subprocess.run")
def test_test_crew_called_process_error(mock_subprocess_run, click):
n_iterations = 5
mock_subprocess_run.side_effect = subprocess.CalledProcessError(
@@ -59,7 +59,7 @@ def test_test_crew_called_process_error(mock_subprocess_run, click):
output="Error",
stderr="Some error occurred",
)
test_crew.test_crew(n_iterations, "gpt-4o")
evaluate_crew.evaluate_crew(n_iterations, "gpt-4o")
mock_subprocess_run.assert_called_once_with(
["poetry", "run", "test", "5", "gpt-4o"],
@@ -78,13 +78,13 @@ def test_test_crew_called_process_error(mock_subprocess_run, click):
)
@mock.patch("crewai.cli.test_crew.click")
@mock.patch("crewai.cli.test_crew.subprocess.run")
@mock.patch("crewai.cli.evaluate_crew.click")
@mock.patch("crewai.cli.evaluate_crew.subprocess.run")
def test_test_crew_unexpected_exception(mock_subprocess_run, click):
# Arrange
n_iterations = 5
mock_subprocess_run.side_effect = Exception("Unexpected error")
test_crew.test_crew(n_iterations, "gpt-4o")
evaluate_crew.evaluate_crew(n_iterations, "gpt-4o")
mock_subprocess_run.assert_called_once_with(
["poetry", "run", "test", "5", "gpt-4o"],

View File

@@ -84,6 +84,10 @@ class TestCrewEvaluator:
1: [10, 9, 8],
2: [9, 8, 7],
}
crew_planner.run_execution_times = {
1: [24, 45, 66],
2: [55, 33, 67],
}
crew_planner.print_crew_evaluation_result()
@@ -98,6 +102,7 @@ class TestCrewEvaluator:
mock.call().add_row("Task 2", "9", "8", "8.5"),
mock.call().add_row("Task 3", "8", "7", "7.5"),
mock.call().add_row("Crew", "9.0", "8.0", "8.5"),
mock.call().add_row("Execution Time (s)", "135", "155", "145"),
]
)
console.assert_has_calls([mock.call(), mock.call().print(table())])