test: add tests for Evaluator Experiment

This commit is contained in:
Lucas Gomide
2025-07-10 23:37:40 -03:00
parent bb2a0f95ba
commit 3fee619798
5 changed files with 338 additions and 19 deletions

View File

@@ -97,11 +97,10 @@ class ExperimentResults:
baseline_result = baseline_lookup[test_identifier]
baseline_passed = baseline_result.get("passed", False)
if result.passed and not baseline_passed:
improved.append((test_identifier, result.score, baseline_result.get("score", 0)))
improved.append(test_identifier)
elif not result.passed and baseline_passed:
regressed.append((test_identifier, result.score, baseline_result.get("score", 0)))
regressed.append(test_identifier)
else:
unchanged.append(test_identifier)

View File

@@ -34,7 +34,7 @@ class ExperimentResultsDisplay:
improved = comparison.get("improved", [])
if improved:
details = ", ".join([f"{test_identifier}" for test_identifier, _, _ in improved[:3]])
details = ", ".join([f"{test_identifier}" for test_identifier in improved[:3]])
if len(improved) > 3:
details += f" and {len(improved) - 3} more"
table.add_row("✅ Improved", str(len(improved)), details)
@@ -43,7 +43,7 @@ class ExperimentResultsDisplay:
regressed = comparison.get("regressed", [])
if regressed:
details = ", ".join([f"{test_identifier}" for test_identifier, _, _ in regressed[:3]])
details = ", ".join([f"{test_identifier}" for test_identifier in regressed[:3]])
if len(regressed) > 3:
details += f" and {len(regressed) - 3} more"
table.add_row("❌ Regressed", str(len(regressed)), details, style="red")

View File

@@ -12,7 +12,7 @@ from crewai.evaluation.evaluation_display import AgentAggregatedEvaluationResult
class ExperimentRunner:
def __init__(self, dataset: List[Dict[str, Any]]):
self.dataset = dataset or []
self.evaluator = None
self.evaluator: AgentEvaluator | None = None
self.display = ExperimentResultsDisplay()
def run(self, crew: Crew, print_summary: bool = False) -> ExperimentResults:
@@ -86,27 +86,32 @@ class ExperimentRunner:
Compare expected and actual scores, and return whether the test case passed.
The rules for comparison are as follows:
- If both expected and actual scores are single numbers, the actual score must be greater than or equal to the expected score.
- If the expected score is a single number and the actual score is a dict, the test case fails.
- If the expected score is a dict and the actual score is a single number, the test case fails.
- If both expected and actual scores are dicts, the actual score must have all the same keys as the expected score, and the value for each key must be greater than or equal to the expected score.
- If both expected and actual scores are single numbers, the actual score must be >= expected.
- If expected is a single number and actual is a dict, compare against the average of actual values.
- If expected is a dict and actual is a single number, actual must be >= all expected values.
- If both are dicts, actual must have matching keys with values >= expected values.
"""
if isinstance(expected, (int, float)) and isinstance(actual, (int, float)):
return actual >= expected
elif isinstance(expected, dict) and isinstance(actual, (int, float)):
return False
if isinstance(expected, dict) and isinstance(actual, (int, float)):
return all(actual >= exp_score for exp_score in expected.values())
elif isinstance(expected, (int, float)) and isinstance(actual, dict):
if isinstance(expected, (int, float)) and isinstance(actual, dict):
if not actual:
return False
avg_score = sum(actual.values()) / len(actual)
return avg_score >= expected
elif isinstance(expected, dict) and isinstance(actual, dict):
for metric, exp_score in expected.items():
if metric not in actual or actual[metric] < exp_score:
return False
return True
if isinstance(expected, dict) and isinstance(actual, dict):
if not expected:
return True
matching_keys = set(expected.keys()) & set(actual.keys())
if not matching_keys:
return False
# All matching keys must have actual >= expected
return all(actual[key] >= expected[key] for key in matching_keys)
return False

View File

@@ -0,0 +1,111 @@
import pytest
from unittest.mock import MagicMock, patch
from crewai.evaluation.experiment.result import ExperimentResult, ExperimentResults
class TestExperimentResult:
@pytest.fixture
def mock_results(self):
return [
ExperimentResult(
identifier="test-1",
inputs={"query": "What is the capital of France?"},
score=10,
expected_score=7,
passed=True
),
ExperimentResult(
identifier="test-2",
inputs={"query": "Who wrote Hamlet?"},
score={"relevance": 9, "factuality": 8},
expected_score={"relevance": 7, "factuality": 7},
passed=True,
agent_evaluations={"agent1": {"metrics": {"goal_alignment": {"score": 9}}}}
),
ExperimentResult(
identifier="test-3",
inputs={"query": "Any query"},
score={"relevance": 9, "factuality": 8},
expected_score={"relevance": 7, "factuality": 7},
passed=False,
agent_evaluations={"agent1": {"metrics": {"goal_alignment": {"score": 9}}}}
),
ExperimentResult(
identifier="test-4",
inputs={"query": "Another query"},
score={"relevance": 9, "factuality": 8},
expected_score={"relevance": 7, "factuality": 7},
passed=True,
agent_evaluations={"agent1": {"metrics": {"goal_alignment": {"score": 9}}}}
),
ExperimentResult(
identifier="test-6",
inputs={"query": "Yet another query"},
score={"relevance": 9, "factuality": 8},
expected_score={"relevance": 7, "factuality": 7},
passed=True,
agent_evaluations={"agent1": {"metrics": {"goal_alignment": {"score": 9}}}}
)
]
@patch('os.path.exists', return_value=True)
@patch('os.path.getsize', return_value=1)
@patch('json.load')
@patch('builtins.open', new_callable=MagicMock)
def test_experiment_results_compare_with_baseline(self, mock_open, mock_json_load, mock_path_getsize, mock_path_exists, mock_results):
baseline_data = {
"timestamp": "2023-01-01T00:00:00+00:00",
"results": [
{
"identifier": "test-1",
"inputs": {"query": "What is the capital of France?"},
"score": 7,
"expected_score": 7,
"passed": False
},
{
"identifier": "test-2",
"inputs": {"query": "Who wrote Hamlet?"},
"score": {"relevance": 8, "factuality": 7},
"expected_score": {"relevance": 7, "factuality": 7},
"passed": True
},
{
"identifier": "test-3",
"inputs": {"query": "Any query"},
"score": {"relevance": 8, "factuality": 7},
"expected_score": {"relevance": 7, "factuality": 7},
"passed": True
},
{
"identifier": "test-4",
"inputs": {"query": "Another query"},
"score": {"relevance": 8, "factuality": 7},
"expected_score": {"relevance": 7, "factuality": 7},
"passed": True
},
{
"identifier": "test-5",
"inputs": {"query": "Another query"},
"score": {"relevance": 8, "factuality": 7},
"expected_score": {"relevance": 7, "factuality": 7},
"passed": True
}
]
}
mock_json_load.return_value = baseline_data
results = ExperimentResults(results=mock_results)
results.display = MagicMock()
comparison = results.compare_with_baseline(baseline_filepath="baseline.json")
assert "baseline_timestamp" in comparison
assert comparison["baseline_timestamp"] == "2023-01-01T00:00:00+00:00"
assert comparison["improved"] == ["test-1"]
assert comparison["regressed"] == ["test-3"]
assert comparison["unchanged"] == ["test-2", "test-4"]
assert comparison["new_tests"] == ["test-6"]
assert comparison["missing_tests"] == ["test-5"]

View File

@@ -0,0 +1,204 @@
import pytest
from unittest.mock import MagicMock, patch
import json
from crewai import llm
from crewai.agent import Agent
from crewai.evaluation import metrics
from crewai.task import Task
from crewai.crew import Crew
from crewai.evaluation.experiment.runner import ExperimentRunner
from crewai.evaluation.experiment.result import ExperimentResult, ExperimentResults
from crewai.evaluation.agent_evaluator import AgentEvaluator
from crewai.evaluation.evaluation_display import AgentAggregatedEvaluationResult
from crewai.evaluation.base_evaluator import MetricCategory, EvaluationScore
class TestExperimentRunner:
@pytest.fixture
def mock_crew(self):
return MagicMock(llm=Crew)
@pytest.fixture
def mock_evaluator_results(self):
agent_evaluation = AgentAggregatedEvaluationResult(
agent_id="Test Agent",
agent_role="Test Agent Role",
metrics={
MetricCategory.GOAL_ALIGNMENT: EvaluationScore(
score=9,
feedback="Test feedback for goal alignment",
raw_response="Test raw response for goal alignment"
),
MetricCategory.REASONING_EFFICIENCY: EvaluationScore(
score=None,
feedback="Reasoning efficiency not applicable",
raw_response="Reasoning efficiency not applicable"
),
MetricCategory.PARAMETER_EXTRACTION: EvaluationScore(
score=7,
feedback="Test parameter extraction explanation",
raw_response="Test raw output"
),
MetricCategory.TOOL_SELECTION: EvaluationScore(
score=8,
feedback="Test tool selection explanation",
raw_response="Test raw output"
)
}
)
return {"Test Agent": agent_evaluation}
@patch('crewai.evaluation.experiment.runner.create_default_evaluator')
def test_run_success(self, mock_create_evaluator, mock_crew, mock_evaluator_results):
dataset = [
{
"identifier": "test-case-1",
"inputs": {"query": "Test query 1"},
"expected_score": 8
},
{
"identifier": "test-case-2",
"inputs": {"query": "Test query 2"},
"expected_score": {"goal_alignment": 7}
},
{
"identifier": "test-case-3",
"inputs": {"query": "Test query 3"},
"expected_score": {"tool_selection": 9}
}
]
mock_evaluator = MagicMock()
mock_evaluator.get_agent_evaluation.return_value = mock_evaluator_results
mock_evaluator.reset_iterations_results = MagicMock()
mock_create_evaluator.return_value = mock_evaluator
runner = ExperimentRunner(dataset=dataset)
results = runner.run(crew=mock_crew)
assert isinstance(results, ExperimentResults)
result_1, result_2, result_3 = results.results
assert len(results.results) == 3
assert result_1.identifier == "test-case-1"
assert result_1.inputs == {"query": "Test query 1"}
assert result_1.expected_score == 8
assert result_1.passed is True
assert result_2.identifier == "test-case-2"
assert result_2.inputs == {"query": "Test query 2"}
assert isinstance(result_2.expected_score, dict)
assert "goal_alignment" in result_2.expected_score
assert result_2.passed is True
assert result_3.identifier == "test-case-3"
assert result_3.inputs == {"query": "Test query 3"}
assert isinstance(result_3.expected_score, dict)
assert "tool_selection" in result_3.expected_score
assert result_3.passed is False
assert mock_crew.kickoff.call_count == 3
mock_crew.kickoff.assert_any_call(inputs={"query": "Test query 1"})
mock_crew.kickoff.assert_any_call(inputs={"query": "Test query 2"})
mock_crew.kickoff.assert_any_call(inputs={"query": "Test query 3"})
assert mock_evaluator.reset_iterations_results.call_count == 3
assert mock_evaluator.get_agent_evaluation.call_count == 3
@patch('crewai.evaluation.experiment.runner.create_default_evaluator')
def test_run_success_with_unknown_metric(self, mock_create_evaluator, mock_crew, mock_evaluator_results):
dataset = [
{
"identifier": "test-case-2",
"inputs": {"query": "Test query 2"},
"expected_score": {"goal_alignment": 7, "unknown_metric": 8}
}
]
mock_evaluator = MagicMock()
mock_evaluator.get_agent_evaluation.return_value = mock_evaluator_results
mock_evaluator.reset_iterations_results = MagicMock()
mock_create_evaluator.return_value = mock_evaluator
runner = ExperimentRunner(dataset=dataset)
results = runner.run(crew=mock_crew)
result, = results.results
assert result.identifier == "test-case-2"
assert result.inputs == {"query": "Test query 2"}
assert isinstance(result.expected_score, dict)
assert "goal_alignment" in result.expected_score.keys()
assert "unknown_metric" in result.expected_score.keys()
assert result.passed is True
@patch('crewai.evaluation.experiment.runner.create_default_evaluator')
def test_run_success_with_single_metric_evaluator_and_expected_specific_metric(self, mock_create_evaluator, mock_crew, mock_evaluator_results):
dataset = [
{
"identifier": "test-case-2",
"inputs": {"query": "Test query 2"},
"expected_score": {"goal_alignment": 7}
}
]
mock_evaluator = MagicMock()
mock_create_evaluator["Test Agent"].metrics = {
MetricCategory.GOAL_ALIGNMENT: EvaluationScore(
score=9,
feedback="Test feedback for goal alignment",
raw_response="Test raw response for goal alignment"
)
}
mock_evaluator.get_agent_evaluation.return_value = mock_evaluator_results
mock_evaluator.reset_iterations_results = MagicMock()
mock_create_evaluator.return_value = mock_evaluator
runner = ExperimentRunner(dataset=dataset)
results = runner.run(crew=mock_crew)
result, = results.results
assert result.identifier == "test-case-2"
assert result.inputs == {"query": "Test query 2"}
assert isinstance(result.expected_score, dict)
assert "goal_alignment" in result.expected_score.keys()
assert result.passed is True
@patch('crewai.evaluation.experiment.runner.create_default_evaluator')
def test_run_success_when_expected_metric_is_not_available(self, mock_create_evaluator, mock_crew, mock_evaluator_results):
dataset = [
{
"identifier": "test-case-2",
"inputs": {"query": "Test query 2"},
"expected_score": {"unknown_metric": 7}
}
]
mock_evaluator = MagicMock()
mock_create_evaluator["Test Agent"].metrics = {
MetricCategory.GOAL_ALIGNMENT: EvaluationScore(
score=5,
feedback="Test feedback for goal alignment",
raw_response="Test raw response for goal alignment"
)
}
mock_evaluator.get_agent_evaluation.return_value = mock_evaluator_results
mock_evaluator.reset_iterations_results = MagicMock()
mock_create_evaluator.return_value = mock_evaluator
runner = ExperimentRunner(dataset=dataset)
results = runner.run(crew=mock_crew)
result, = results.results
assert result.identifier == "test-case-2"
assert result.inputs == {"query": "Test query 2"}
assert isinstance(result.expected_score, dict)
assert "unknown_metric" in result.expected_score.keys()
assert result.passed is False