mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
chore: add logging for evaluation process
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -210,6 +210,7 @@ class CrewEvaluator:
|
||||
Raises:
|
||||
ValueError: If task to evaluate or task output is missing, or if evaluation result is invalid
|
||||
"""
|
||||
try:
|
||||
current_task = None
|
||||
for task in self.crew.tasks:
|
||||
if task.description == task_output.description:
|
||||
@@ -221,6 +222,7 @@ class CrewEvaluator:
|
||||
"Task to evaluate and task output are required for evaluation"
|
||||
)
|
||||
|
||||
self._logger.log("info", f"Starting evaluation for task: {task_output.description}")
|
||||
evaluator_agent = self._evaluator_agent()
|
||||
evaluation_task = self._evaluation_task(
|
||||
evaluator_agent, current_task, task_output.raw
|
||||
@@ -239,5 +241,9 @@ class CrewEvaluator:
|
||||
self.run_execution_times[self.iteration].append(
|
||||
current_task._execution_time
|
||||
)
|
||||
self._logger.log("info", f"Evaluation completed with score: {evaluation_result.pydantic.quality}")
|
||||
else:
|
||||
raise ValueError("Evaluation result is not in the expected format")
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Evaluation failed: {str(e)}")
|
||||
raise
|
||||
|
||||
Reference in New Issue
Block a user