chore: add logging for evaluation process

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-09 20:59:11 +00:00
parent df6cb60ec7
commit 0bb44690e3

View File

@@ -210,6 +210,7 @@ class CrewEvaluator:
Raises:
ValueError: If task to evaluate or task output is missing, or if evaluation result is invalid
"""
try:
current_task = None
for task in self.crew.tasks:
if task.description == task_output.description:
@@ -221,6 +222,7 @@ class CrewEvaluator:
"Task to evaluate and task output are required for evaluation"
)
self._logger.log("info", f"Starting evaluation for task: {task_output.description}")
evaluator_agent = self._evaluator_agent()
evaluation_task = self._evaluation_task(
evaluator_agent, current_task, task_output.raw
@@ -239,5 +241,9 @@ class CrewEvaluator:
self.run_execution_times[self.iteration].append(
current_task._execution_time
)
self._logger.log("info", f"Evaluation completed with score: {evaluation_result.pydantic.quality}")
else:
raise ValueError("Evaluation result is not in the expected format")
except Exception as e:
self._logger.log("error", f"Evaluation failed: {str(e)}")
raise