mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 08:38:30 +00:00
test: add Agent eval tests
This commit is contained in:
28
tests/evaluation/metrics/base_evaluation_metrics_test.py
Normal file
28
tests/evaluation/metrics/base_evaluation_metrics_test.py
Normal file
@@ -0,0 +1,28 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
class BaseEvaluationMetricsTest:
|
||||
@pytest.fixture
|
||||
def mock_agent(self):
|
||||
agent = MagicMock(spec=Agent)
|
||||
agent.id = "test_agent_id"
|
||||
agent.role = "Test Agent"
|
||||
agent.goal = "Test goal"
|
||||
agent.tools = []
|
||||
return agent
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task(self):
|
||||
task = MagicMock(spec=Task)
|
||||
task.description = "Test task description"
|
||||
task.expected_output = "Test expected output"
|
||||
return task
|
||||
|
||||
@pytest.fixture
|
||||
def execution_trace(self):
|
||||
return {
|
||||
"thinking": ["I need to analyze this data carefully"],
|
||||
"actions": ["Gathered information", "Analyzed data"]
|
||||
}
|
||||
Reference in New Issue
Block a user