feat: enable custom LLM support for Crew.test()

- Add llm parameter to Crew.test() that accepts string or LLM instance
- Maintain backward compatibility with openai_model_name parameter
- Update CrewEvaluator to handle any LLM implementation
- Improve docstrings and type hints

Fixes #2080

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-09 22:42:05 +00:00
parent d6d98ee969
commit 9bd39464cc
2 changed files with 27 additions and 7 deletions

View File

@@ -1147,20 +1147,35 @@ class Crew(BaseModel):
def test(
self,
n_iterations: int,
n_iterations: int = 1,
openai_model_name: Optional[str] = None,
llm: Optional[Union[str, LLM]] = None,
inputs: Optional[Dict[str, Any]] = None,
) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
"""Test and evaluate the Crew with the given inputs for n iterations.
Args:
n_iterations: Number of iterations to run the test
openai_model_name: OpenAI model name to use for evaluation (deprecated)
llm: LLM instance or model name to use for evaluation
inputs: Optional dictionary of inputs to pass to the crew
"""
if not llm and not openai_model_name:
raise ValueError("Either llm or openai_model_name must be provided")
model_to_use = llm or openai_model_name
if isinstance(model_to_use, str):
model_to_use = LLM(model=model_to_use)
test_crew = self.copy()
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
n_iterations,
inputs,
openai_model_name, # type: ignore[arg-type]
str(model_to_use.model), # type: ignore[arg-type]
) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, model_to_use)
for i in range(1, n_iterations + 1):
evaluator.set_iteration(i)

View File

@@ -1,4 +1,5 @@
from collections import defaultdict
from typing import Union
from pydantic import BaseModel, Field
from rich.box import HEAVY_EDGE
@@ -6,6 +7,7 @@ from rich.console import Console
from rich.table import Table
from crewai.agent import Agent
from crewai.llm import LLM
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry
@@ -32,9 +34,12 @@ class CrewEvaluator:
run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0
def __init__(self, crew, openai_model_name: str):
def __init__(self, crew, llm: Union[str, LLM]):
self.crew = crew
self.openai_model_name = openai_model_name
if isinstance(llm, str):
self.llm = LLM(model=llm)
else:
self.llm = llm
self._telemetry = Telemetry()
self._setup_for_evaluating()
@@ -51,7 +56,7 @@ class CrewEvaluator:
),
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
verbose=False,
llm=self.openai_model_name,
llm=self.llm,
)
def _evaluation_task(