mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 16:48:30 +00:00
feat: enable custom LLM support for Crew.test()
- Add llm parameter to Crew.test() that accepts string or LLM instance - Maintain backward compatibility with openai_model_name parameter - Update CrewEvaluator to handle any LLM implementation - Improve docstrings and type hints Fixes #2080 Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -1147,20 +1147,35 @@ class Crew(BaseModel):
|
|||||||
|
|
||||||
def test(
|
def test(
|
||||||
self,
|
self,
|
||||||
n_iterations: int,
|
n_iterations: int = 1,
|
||||||
openai_model_name: Optional[str] = None,
|
openai_model_name: Optional[str] = None,
|
||||||
|
llm: Optional[Union[str, LLM]] = None,
|
||||||
inputs: Optional[Dict[str, Any]] = None,
|
inputs: Optional[Dict[str, Any]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
"""Test and evaluate the Crew with the given inputs for n iterations.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
n_iterations: Number of iterations to run the test
|
||||||
|
openai_model_name: OpenAI model name to use for evaluation (deprecated)
|
||||||
|
llm: LLM instance or model name to use for evaluation
|
||||||
|
inputs: Optional dictionary of inputs to pass to the crew
|
||||||
|
"""
|
||||||
|
if not llm and not openai_model_name:
|
||||||
|
raise ValueError("Either llm or openai_model_name must be provided")
|
||||||
|
|
||||||
|
model_to_use = llm or openai_model_name
|
||||||
|
if isinstance(model_to_use, str):
|
||||||
|
model_to_use = LLM(model=model_to_use)
|
||||||
|
|
||||||
test_crew = self.copy()
|
test_crew = self.copy()
|
||||||
|
|
||||||
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||||
test_crew,
|
test_crew,
|
||||||
n_iterations,
|
n_iterations,
|
||||||
inputs,
|
inputs,
|
||||||
openai_model_name, # type: ignore[arg-type]
|
str(model_to_use.model), # type: ignore[arg-type]
|
||||||
) # type: ignore[arg-type]
|
) # type: ignore[arg-type]
|
||||||
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
evaluator = CrewEvaluator(test_crew, model_to_use)
|
||||||
|
|
||||||
for i in range(1, n_iterations + 1):
|
for i in range(1, n_iterations + 1):
|
||||||
evaluator.set_iteration(i)
|
evaluator.set_iteration(i)
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from rich.box import HEAVY_EDGE
|
from rich.box import HEAVY_EDGE
|
||||||
@@ -6,6 +7,7 @@ from rich.console import Console
|
|||||||
from rich.table import Table
|
from rich.table import Table
|
||||||
|
|
||||||
from crewai.agent import Agent
|
from crewai.agent import Agent
|
||||||
|
from crewai.llm import LLM
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.tasks.task_output import TaskOutput
|
from crewai.tasks.task_output import TaskOutput
|
||||||
from crewai.telemetry import Telemetry
|
from crewai.telemetry import Telemetry
|
||||||
@@ -32,9 +34,12 @@ class CrewEvaluator:
|
|||||||
run_execution_times: defaultdict = defaultdict(list)
|
run_execution_times: defaultdict = defaultdict(list)
|
||||||
iteration: int = 0
|
iteration: int = 0
|
||||||
|
|
||||||
def __init__(self, crew, openai_model_name: str):
|
def __init__(self, crew, llm: Union[str, LLM]):
|
||||||
self.crew = crew
|
self.crew = crew
|
||||||
self.openai_model_name = openai_model_name
|
if isinstance(llm, str):
|
||||||
|
self.llm = LLM(model=llm)
|
||||||
|
else:
|
||||||
|
self.llm = llm
|
||||||
self._telemetry = Telemetry()
|
self._telemetry = Telemetry()
|
||||||
self._setup_for_evaluating()
|
self._setup_for_evaluating()
|
||||||
|
|
||||||
@@ -51,7 +56,7 @@ class CrewEvaluator:
|
|||||||
),
|
),
|
||||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||||
verbose=False,
|
verbose=False,
|
||||||
llm=self.openai_model_name,
|
llm=self.llm,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _evaluation_task(
|
def _evaluation_task(
|
||||||
|
|||||||
Reference in New Issue
Block a user