diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 9ae9ce2c0..10d440955 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -1147,20 +1147,35 @@ class Crew(BaseModel): def test( self, - n_iterations: int, + n_iterations: int = 1, openai_model_name: Optional[str] = None, + llm: Optional[Union[str, LLM]] = None, inputs: Optional[Dict[str, Any]] = None, ) -> None: - """Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.""" + """Test and evaluate the Crew with the given inputs for n iterations. + + Args: + n_iterations: Number of iterations to run the test + openai_model_name: OpenAI model name to use for evaluation (deprecated) + llm: LLM instance or model name to use for evaluation + inputs: Optional dictionary of inputs to pass to the crew + """ + if not llm and not openai_model_name: + raise ValueError("Either llm or openai_model_name must be provided") + + model_to_use = llm or openai_model_name + if isinstance(model_to_use, str): + model_to_use = LLM(model=model_to_use) + test_crew = self.copy() self._test_execution_span = test_crew._telemetry.test_execution_span( test_crew, n_iterations, inputs, - openai_model_name, # type: ignore[arg-type] + str(model_to_use.model), # type: ignore[arg-type] ) # type: ignore[arg-type] - evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type] + evaluator = CrewEvaluator(test_crew, model_to_use) for i in range(1, n_iterations + 1): evaluator.set_iteration(i) diff --git a/src/crewai/utilities/evaluators/crew_evaluator_handler.py b/src/crewai/utilities/evaluators/crew_evaluator_handler.py index ef9b908e1..41496a1fc 100644 --- a/src/crewai/utilities/evaluators/crew_evaluator_handler.py +++ b/src/crewai/utilities/evaluators/crew_evaluator_handler.py @@ -1,4 +1,5 @@ from collections import defaultdict +from typing import Union from pydantic import BaseModel, Field from rich.box import HEAVY_EDGE @@ -6,6 +7,7 @@ from rich.console import Console from rich.table import Table from crewai.agent import Agent +from crewai.llm import LLM from crewai.task import Task from crewai.tasks.task_output import TaskOutput from crewai.telemetry import Telemetry @@ -32,9 +34,12 @@ class CrewEvaluator: run_execution_times: defaultdict = defaultdict(list) iteration: int = 0 - def __init__(self, crew, openai_model_name: str): + def __init__(self, crew, llm: Union[str, LLM]): self.crew = crew - self.openai_model_name = openai_model_name + if isinstance(llm, str): + self.llm = LLM(model=llm) + else: + self.llm = llm self._telemetry = Telemetry() self._setup_for_evaluating() @@ -51,7 +56,7 @@ class CrewEvaluator: ), backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed", verbose=False, - llm=self.openai_model_name, + llm=self.llm, ) def _evaluation_task(