mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 08:38:30 +00:00
fix: improve error handling, logging, and test coverage
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -1081,7 +1081,7 @@ class Crew(BaseModel):
|
|||||||
openai_model_name: Optional[Union[str, LLM]] = None,
|
openai_model_name: Optional[Union[str, LLM]] = None,
|
||||||
inputs: Optional[Dict[str, Any]] = None,
|
inputs: Optional[Dict[str, Any]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.
|
"""Test and evaluate the Crew with the given inputs for n iterations.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
n_iterations: The number of iterations to run the test.
|
n_iterations: The number of iterations to run the test.
|
||||||
@@ -1089,6 +1089,9 @@ class Crew(BaseModel):
|
|||||||
the performance of the agents. If a string is provided, it will be used to create
|
the performance of the agents. If a string is provided, it will be used to create
|
||||||
an LLM instance.
|
an LLM instance.
|
||||||
inputs: The inputs to use for the test.
|
inputs: The inputs to use for the test.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If openai_model_name is not a string or LLM instance.
|
||||||
"""
|
"""
|
||||||
test_crew = self.copy()
|
test_crew = self.copy()
|
||||||
|
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ from crewai.llm import LLM
|
|||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
from crewai.utilities.logger import Logger
|
||||||
from rich.box import HEAVY_EDGE
|
from rich.box import HEAVY_EDGE
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
from rich.table import Table
|
from rich.table import Table
|
||||||
@@ -42,11 +43,22 @@ class CrewEvaluator:
|
|||||||
crew (Crew): The crew to evaluate
|
crew (Crew): The crew to evaluate
|
||||||
openai_model_name (Union[str, LLM]): Either a model name string or an LLM instance
|
openai_model_name (Union[str, LLM]): Either a model name string or an LLM instance
|
||||||
to use for evaluation. If a string is provided, it will be used to create an
|
to use for evaluation. If a string is provided, it will be used to create an
|
||||||
LLM instance.
|
LLM instance with default settings. If an LLM instance is provided, its settings
|
||||||
|
(like temperature) will be preserved.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If openai_model_name is not a string or LLM instance.
|
||||||
"""
|
"""
|
||||||
self.crew = crew
|
self.crew = crew
|
||||||
self.llm = openai_model_name if isinstance(openai_model_name, LLM) else LLM(model=openai_model_name)
|
if not isinstance(openai_model_name, (str, LLM)):
|
||||||
|
raise ValueError(f"Invalid model type '{type(openai_model_name)}'. Expected str or LLM instance.")
|
||||||
|
self.model_instance = openai_model_name if isinstance(openai_model_name, LLM) else LLM(model=openai_model_name)
|
||||||
self._telemetry = Telemetry()
|
self._telemetry = Telemetry()
|
||||||
|
self._logger = Logger()
|
||||||
|
self._logger.log(
|
||||||
|
"info",
|
||||||
|
f"Initializing CrewEvaluator with model: {openai_model_name if isinstance(openai_model_name, str) else openai_model_name.model}"
|
||||||
|
)
|
||||||
self._setup_for_evaluating()
|
self._setup_for_evaluating()
|
||||||
|
|
||||||
def _setup_for_evaluating(self) -> None:
|
def _setup_for_evaluating(self) -> None:
|
||||||
@@ -62,7 +74,7 @@ class CrewEvaluator:
|
|||||||
),
|
),
|
||||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||||
verbose=False,
|
verbose=False,
|
||||||
llm=self.llm,
|
llm=self.model_instance,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _evaluation_task(
|
def _evaluation_task(
|
||||||
@@ -192,7 +204,11 @@ class CrewEvaluator:
|
|||||||
self.crew,
|
self.crew,
|
||||||
evaluation_result.pydantic.quality,
|
evaluation_result.pydantic.quality,
|
||||||
current_task._execution_time,
|
current_task._execution_time,
|
||||||
self.llm.model,
|
self.model_instance.model,
|
||||||
|
)
|
||||||
|
self._logger.log(
|
||||||
|
"info",
|
||||||
|
f"Task evaluation completed with quality score: {evaluation_result.pydantic.quality}"
|
||||||
)
|
)
|
||||||
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
||||||
self.run_execution_times[self.iteration].append(
|
self.run_execution_times[self.iteration].append(
|
||||||
|
|||||||
@@ -136,14 +136,25 @@ class TestCrewEvaluator:
|
|||||||
"""Test that CrewEvaluator correctly handles custom LLM instances."""
|
"""Test that CrewEvaluator correctly handles custom LLM instances."""
|
||||||
custom_llm = LLM(model="gpt-4", temperature=0.5)
|
custom_llm = LLM(model="gpt-4", temperature=0.5)
|
||||||
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
|
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
|
||||||
assert evaluator.llm == custom_llm
|
assert evaluator.model_instance == custom_llm
|
||||||
assert evaluator.llm.temperature == 0.5
|
assert evaluator.model_instance.temperature == 0.5
|
||||||
|
|
||||||
|
def test_evaluator_with_invalid_model_type(self, crew_planner):
|
||||||
|
"""Test that CrewEvaluator raises error for invalid model type."""
|
||||||
|
with pytest.raises(ValueError, match="Invalid model type"):
|
||||||
|
CrewEvaluator(crew_planner.crew, 123)
|
||||||
|
|
||||||
|
def test_evaluator_preserves_model_settings(self, crew_planner):
|
||||||
|
"""Test that CrewEvaluator preserves model settings."""
|
||||||
|
custom_llm = LLM(model="gpt-4", temperature=0.7)
|
||||||
|
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
|
||||||
|
assert evaluator.model_instance.temperature == 0.7
|
||||||
|
|
||||||
def test_evaluator_with_model_name(self, crew_planner):
|
def test_evaluator_with_model_name(self, crew_planner):
|
||||||
"""Test that CrewEvaluator correctly handles string model names."""
|
"""Test that CrewEvaluator correctly handles string model names."""
|
||||||
evaluator = CrewEvaluator(crew_planner.crew, "gpt-4")
|
evaluator = CrewEvaluator(crew_planner.crew, "gpt-4")
|
||||||
assert isinstance(evaluator.llm, LLM)
|
assert isinstance(evaluator.model_instance, LLM)
|
||||||
assert evaluator.llm.model == "gpt-4"
|
assert evaluator.model_instance.model == "gpt-4"
|
||||||
|
|
||||||
def test_evaluate(self, crew_planner):
|
def test_evaluate(self, crew_planner):
|
||||||
task_output = TaskOutput(
|
task_output = TaskOutput(
|
||||||
|
|||||||
Reference in New Issue
Block a user