fix: address flaky tests (#3363)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled

fix: resolve flaky tests and race conditions in test suite

- Fix telemetry/event tests by patching class methods instead of instances
- Use unique temp files/directories to prevent CI race conditions
- Reset singleton state between tests
- Mock embedchain.Client.setup() to prevent JSON corruption
- Rename test files to test_*.py convention
- Move agent tests to tests/agents directory
- Fix repeated tool usage detection
- Remove database-dependent tools causing initialization errors
This commit is contained in:
Greyson LaLonde
2025-08-20 13:34:09 -04:00
committed by GitHub
parent 7fdf9f9290
commit 641c156c17
35 changed files with 670 additions and 527 deletions

View File

View File

@@ -3,6 +3,7 @@ from unittest.mock import MagicMock
from crewai.agent import Agent
from crewai.task import Task
class BaseEvaluationMetricsTest:
@pytest.fixture
def mock_agent(self):
@@ -24,5 +25,5 @@ class BaseEvaluationMetricsTest:
def execution_trace(self):
return {
"thinking": ["I need to analyze this data carefully"],
"actions": ["Gathered information", "Analyzed data"]
}
"actions": ["Gathered information", "Analyzed data"],
}

View File

@@ -1,5 +1,7 @@
from unittest.mock import patch, MagicMock
from tests.experimental.evaluation.metrics.base_evaluation_metrics_test import BaseEvaluationMetricsTest
from tests.experimental.evaluation.metrics.test_base_evaluation_metrics import (
BaseEvaluationMetricsTest,
)
from crewai.experimental.evaluation.base_evaluator import EvaluationScore
from crewai.experimental.evaluation.metrics.goal_metrics import GoalAlignmentEvaluator
@@ -8,7 +10,9 @@ from crewai.utilities.llm_utils import LLM
class TestGoalAlignmentEvaluator(BaseEvaluationMetricsTest):
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_success(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_success(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = """
{
@@ -24,7 +28,7 @@ class TestGoalAlignmentEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="This is the final output"
final_output="This is the final output",
)
assert isinstance(result, EvaluationScore)
@@ -40,7 +44,9 @@ class TestGoalAlignmentEvaluator(BaseEvaluationMetricsTest):
assert mock_task.description in prompt[1]["content"]
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_error_handling(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_error_handling(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = "Invalid JSON response"
mock_create_llm.return_value = mock_llm
@@ -51,7 +57,7 @@ class TestGoalAlignmentEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="This is the final output"
final_output="This is the final output",
)
assert isinstance(result, EvaluationScore)

View File

@@ -6,10 +6,13 @@ from crewai.tasks.task_output import TaskOutput
from crewai.experimental.evaluation.metrics.reasoning_metrics import (
ReasoningEfficiencyEvaluator,
)
from tests.experimental.evaluation.metrics.base_evaluation_metrics_test import BaseEvaluationMetricsTest
from tests.experimental.evaluation.metrics.test_base_evaluation_metrics import (
BaseEvaluationMetricsTest,
)
from crewai.utilities.llm_utils import LLM
from crewai.experimental.evaluation.base_evaluator import EvaluationScore
class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
@pytest.fixture
def mock_output(self):
@@ -23,18 +26,18 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
{
"prompt": "How should I approach this task?",
"response": "I'll first research the topic, then compile findings.",
"timestamp": 1626987654
"timestamp": 1626987654,
},
{
"prompt": "What resources should I use?",
"response": "I'll use relevant academic papers and reliable websites.",
"timestamp": 1626987754
"timestamp": 1626987754,
},
{
"prompt": "How should I structure the output?",
"response": "I'll organize information clearly with headings and bullet points.",
"timestamp": 1626987854
}
"timestamp": 1626987854,
},
]
def test_insufficient_llm_calls(self, mock_agent, mock_task, mock_output):
@@ -45,7 +48,7 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=mock_output
final_output=mock_output,
)
assert isinstance(result, EvaluationScore)
@@ -53,7 +56,9 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
assert "Insufficient LLM calls" in result.feedback
@patch("crewai.utilities.llm_utils.create_llm")
def test_successful_evaluation(self, mock_create_llm, mock_agent, mock_task, mock_output, llm_calls):
def test_successful_evaluation(
self, mock_create_llm, mock_agent, mock_task, mock_output, llm_calls
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = """
{
@@ -83,7 +88,7 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=mock_output
final_output=mock_output,
)
# Assertions
@@ -97,7 +102,9 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
mock_llm.call.assert_called_once()
@patch("crewai.utilities.llm_utils.create_llm")
def test_parse_error_handling(self, mock_create_llm, mock_agent, mock_task, mock_output, llm_calls):
def test_parse_error_handling(
self, mock_create_llm, mock_agent, mock_task, mock_output, llm_calls
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = "Invalid JSON response"
mock_create_llm.return_value = mock_llm
@@ -114,7 +121,7 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=mock_output
final_output=mock_output,
)
# Assertions for error handling
@@ -126,11 +133,31 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
def test_loop_detection(self, mock_create_llm, mock_agent, mock_task, mock_output):
# Setup LLM calls with a repeating pattern
repetitive_llm_calls = [
{"prompt": "How to solve?", "response": "I'll try method A", "timestamp": 1000},
{"prompt": "Let me try method A", "response": "It didn't work", "timestamp": 1100},
{"prompt": "How to solve?", "response": "I'll try method A again", "timestamp": 1200},
{"prompt": "Let me try method A", "response": "It didn't work", "timestamp": 1300},
{"prompt": "How to solve?", "response": "I'll try method A one more time", "timestamp": 1400}
{
"prompt": "How to solve?",
"response": "I'll try method A",
"timestamp": 1000,
},
{
"prompt": "Let me try method A",
"response": "It didn't work",
"timestamp": 1100,
},
{
"prompt": "How to solve?",
"response": "I'll try method A again",
"timestamp": 1200,
},
{
"prompt": "Let me try method A",
"response": "It didn't work",
"timestamp": 1300,
},
{
"prompt": "How to solve?",
"response": "I'll try method A one more time",
"timestamp": 1400,
},
]
mock_llm = MagicMock(spec=LLM)
@@ -158,7 +185,7 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=mock_output
final_output=mock_output,
)
assert isinstance(result, EvaluationScore)

View File

@@ -1,13 +1,20 @@
from unittest.mock import patch, MagicMock
from crewai.experimental.evaluation.base_evaluator import EvaluationScore
from crewai.experimental.evaluation.metrics.semantic_quality_metrics import SemanticQualityEvaluator
from tests.experimental.evaluation.metrics.base_evaluation_metrics_test import BaseEvaluationMetricsTest
from crewai.experimental.evaluation.metrics.semantic_quality_metrics import (
SemanticQualityEvaluator,
)
from tests.experimental.evaluation.metrics.test_base_evaluation_metrics import (
BaseEvaluationMetricsTest,
)
from crewai.utilities.llm_utils import LLM
class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_success(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_success(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = """
{
@@ -23,7 +30,7 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="This is a well-structured analysis of the data."
final_output="This is a well-structured analysis of the data.",
)
assert isinstance(result, EvaluationScore)
@@ -39,7 +46,9 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
assert mock_task.description in prompt[1]["content"]
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_with_empty_output(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_with_empty_output(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = """
{
@@ -55,7 +64,7 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=""
final_output="",
)
assert isinstance(result, EvaluationScore)
@@ -63,7 +72,9 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
assert "empty or minimal" in result.feedback
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_error_handling(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_error_handling(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = "Invalid JSON response"
mock_create_llm.return_value = mock_llm
@@ -74,9 +85,9 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="This is the output."
final_output="This is the output.",
)
assert isinstance(result, EvaluationScore)
assert result.score is None
assert "Failed to parse" in result.feedback
assert "Failed to parse" in result.feedback

View File

@@ -3,10 +3,13 @@ from unittest.mock import patch, MagicMock
from crewai.experimental.evaluation.metrics.tools_metrics import (
ToolSelectionEvaluator,
ParameterExtractionEvaluator,
ToolInvocationEvaluator
ToolInvocationEvaluator,
)
from crewai.utilities.llm_utils import LLM
from tests.experimental.evaluation.metrics.base_evaluation_metrics_test import BaseEvaluationMetricsTest
from tests.experimental.evaluation.metrics.test_base_evaluation_metrics import (
BaseEvaluationMetricsTest,
)
class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
def test_no_tools_available(self, mock_task, mock_agent):
@@ -20,7 +23,7 @@ class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score is None
@@ -35,7 +38,7 @@ class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score is None
@@ -56,8 +59,12 @@ class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
# Setup execution trace with tool uses
execution_trace = {
"tool_uses": [
{"tool": "search_tool", "input": {"query": "test query"}, "output": "search results"},
{"tool": "calculator", "input": {"expression": "2+2"}, "output": "4"}
{
"tool": "search_tool",
"input": {"query": "test query"},
"output": "search results",
},
{"tool": "calculator", "input": {"expression": "2+2"}, "output": "4"},
]
}
@@ -66,7 +73,7 @@ class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score == 8.5
@@ -90,7 +97,7 @@ class TestParameterExtractionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score is None
@@ -117,14 +124,14 @@ class TestParameterExtractionEvaluator(BaseEvaluationMetricsTest):
"tool": "search_tool",
"input": {"query": "test query"},
"output": "search results",
"error": None
"error": None,
},
{
"tool": "calculator",
"input": {"expression": "2+2"},
"output": "4",
"error": None
}
"error": None,
},
]
}
@@ -133,7 +140,7 @@ class TestParameterExtractionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score == 9.0
@@ -149,7 +156,7 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score is None
@@ -171,8 +178,12 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
# Setup execution trace with tool uses
execution_trace = {
"tool_uses": [
{"tool": "search_tool", "input": {"query": "test query"}, "output": "search results"},
{"tool": "calculator", "input": {"expression": "2+2"}, "output": "4"}
{
"tool": "search_tool",
"input": {"query": "test query"},
"output": "search results",
},
{"tool": "calculator", "input": {"expression": "2+2"}, "output": "4"},
]
}
@@ -181,7 +192,7 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score == 8.0
@@ -207,14 +218,14 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
"tool": "search_tool",
"input": {"query": "test query"},
"output": "search results",
"error": None
"error": None,
},
{
"tool": "calculator",
"input": {"expression": "2+"},
"output": None,
"error": "Invalid expression"
}
"error": "Invalid expression",
},
]
}
@@ -223,7 +234,7 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score == 5.5