diff --git a/src/crewai/crew.py b/src/crewai/crew.py index f36df2a03..55981fb64 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -1125,19 +1125,20 @@ class Crew(BaseModel): def test( self, n_iterations: int, - openai_model_name: Optional[str] = None, + eval_llm: Union[str, InstanceOf[LLM], Any] = Field(description="Language model that will run the agent.", default=None), inputs: Optional[Dict[str, Any]] = None, ) -> None: """Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.""" test_crew = self.copy() + eval_llm = create_llm(eval_llm) self._test_execution_span = test_crew._telemetry.test_execution_span( test_crew, n_iterations, inputs, - openai_model_name, # type: ignore[arg-type] + eval_llm.model, # type: ignore[arg-type] ) # type: ignore[arg-type] - evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type] + evaluator = CrewEvaluator(test_crew, eval_llm) # type: ignore[arg-type] for i in range(1, n_iterations + 1): evaluator.set_iteration(i) diff --git a/src/crewai/utilities/evaluators/crew_evaluator_handler.py b/src/crewai/utilities/evaluators/crew_evaluator_handler.py index ef9b908e1..1393cdf56 100644 --- a/src/crewai/utilities/evaluators/crew_evaluator_handler.py +++ b/src/crewai/utilities/evaluators/crew_evaluator_handler.py @@ -1,6 +1,6 @@ from collections import defaultdict -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, InstanceOf from rich.box import HEAVY_EDGE from rich.console import Console from rich.table import Table @@ -9,6 +9,7 @@ from crewai.agent import Agent from crewai.task import Task from crewai.tasks.task_output import TaskOutput from crewai.telemetry import Telemetry +from crewai.llm import LLM class TaskEvaluationPydanticOutput(BaseModel): @@ -32,9 +33,9 @@ class CrewEvaluator: run_execution_times: defaultdict = defaultdict(list) iteration: int = 0 - def __init__(self, crew, openai_model_name: str): + def __init__(self, crew, eval_llm: InstanceOf[LLM]): self.crew = crew - self.openai_model_name = openai_model_name + self.llm = eval_llm self._telemetry = Telemetry() self._setup_for_evaluating() @@ -51,7 +52,7 @@ class CrewEvaluator: ), backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed", verbose=False, - llm=self.openai_model_name, + llm=self.llm, ) def _evaluation_task( @@ -181,7 +182,7 @@ class CrewEvaluator: self.crew, evaluation_result.pydantic.quality, current_task.execution_duration, - self.openai_model_name, + self.llm.model, ) self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality) self.run_execution_times[self.iteration].append(