Compare commits

...

2 Commits

Author SHA1 Message Date
Devin AI
206ad7c954 chore: address code review feedback
- Fix Crew import warning with TYPE_CHECKING
- Add performance monitoring with @track_agent
- Enhance docstrings and type hints
- Improve test organization

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 22:37:38 +00:00
Devin AI
93ce2ae55d feat: enable custom LLM support for Crew.test()
This PR enables the Crew.test() method to work with any LLM implementation through the LLM class while maintaining backward compatibility with the openai_model_name parameter.

Changes:
- Added new llm parameter to Crew.test() that accepts string or LLM instance
- Maintained backward compatibility with openai_model_name parameter
- Updated CrewEvaluator to handle any LLM implementation
- Added comprehensive test coverage for both new functionality and backward compatibility

Fixes #2078

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 22:29:06 +00:00
4 changed files with 197 additions and 28 deletions

View File

@@ -4,8 +4,11 @@ import uuid
import warnings
from concurrent.futures import Future
from hashlib import md5
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from crewai.llm import LLM
from pydantic import (
UUID4,
BaseModel,
@@ -1076,18 +1079,35 @@ class Crew(BaseModel):
self,
n_iterations: int,
openai_model_name: Optional[str] = None,
llm: Optional[Union[str, LLM]] = None,
inputs: Optional[Dict[str, Any]] = None,
) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.
Args:
n_iterations: Number of test iterations to run
openai_model_name: (Deprecated) OpenAI model name to use for evaluation. Will be ignored if llm is provided.
llm: Language model to use for evaluation, can be a string (model name) or LLM instance.
Takes precedence over openai_model_name if both are provided.
inputs: Optional dictionary of inputs to pass to the crew
Raises:
ValueError: If neither openai_model_name nor llm is provided
"""
if openai_model_name and llm:
warnings.warn("Both openai_model_name and llm provided. Using llm parameter.")
elif not (openai_model_name or llm):
raise ValueError("Either openai_model_name or llm must be provided")
test_crew = self.copy()
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
n_iterations,
inputs,
openai_model_name, # type: ignore[arg-type]
) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
str(llm) if llm else openai_model_name,
)
evaluator = CrewEvaluator(test_crew, llm or openai_model_name)
for i in range(1, n_iterations + 1):
evaluator.set_iteration(i)

View File

@@ -1,15 +1,28 @@
from collections import defaultdict
from typing import TYPE_CHECKING, Union
from pydantic import BaseModel, Field
from rich.box import HEAVY_EDGE
from rich.console import Console
from rich.table import Table
try:
from agentops import track_agent
except ImportError:
def track_agent():
def noop(f):
return f
return noop
from crewai.agent import Agent
from crewai.llm import LLM
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry
if TYPE_CHECKING:
from crewai.crew import Crew
class TaskEvaluationPydanticOutput(BaseModel):
quality: float = Field(
@@ -17,13 +30,15 @@ class TaskEvaluationPydanticOutput(BaseModel):
)
@track_agent()
class CrewEvaluator:
"""
A class to evaluate the performance of the agents in the crew based on the tasks they have performed.
"""Evaluates the performance of a crew's agents on their tasks.
Handles evaluation of agent performance using specified LLM model.
Attributes:
crew (Crew): The crew of agents to evaluate.
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
llm (Union[str, LLM]): The language model to use for evaluation. Can be a string (model name) or LLM instance.
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
iteration (int): The current iteration of the evaluation.
"""
@@ -32,18 +47,29 @@ class CrewEvaluator:
run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0
def __init__(self, crew, openai_model_name: str):
def __init__(self, crew: "Crew", llm: Union[str, LLM]) -> None:
"""Initialize CrewEvaluator with crew and language model.
Args:
crew: The crew to evaluate
llm: Language model to use for evaluation, can be a string (model name) or LLM instance
"""
self.crew = crew
self.openai_model_name = openai_model_name
self.llm = llm if isinstance(llm, LLM) else LLM(model=llm)
self._telemetry = Telemetry()
self._setup_for_evaluating()
def _setup_for_evaluating(self) -> None:
"""Sets up the crew for evaluating."""
"""Sets up the crew for evaluating by assigning evaluation callbacks to tasks."""
for task in self.crew.tasks:
task.callback = self.evaluate
def _evaluator_agent(self):
def _evaluator_agent(self) -> Agent:
"""Creates an agent specialized in evaluating task performance.
Returns:
Agent: An agent configured to evaluate task execution quality.
"""
return Agent(
role="Task Execution Evaluator",
goal=(
@@ -51,12 +77,22 @@ class CrewEvaluator:
),
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
verbose=False,
llm=self.openai_model_name,
llm=self.llm,
)
def _evaluation_task(
self, evaluator_agent: Agent, task_to_evaluate: Task, task_output: str
) -> Task:
"""Creates a task for evaluating another task's execution.
Args:
evaluator_agent: The agent that will perform the evaluation
task_to_evaluate: The task whose execution needs to be evaluated
task_output: The output produced by the task execution
Returns:
Task: A task configured to evaluate the execution quality
"""
return Task(
description=(
"Based on the task description and the expected output, compare and evaluate the performance of the agents in the crew based on the Task Output they have performed using score from 1 to 10 evaluating on completion, quality, and overall performance."
@@ -72,25 +108,28 @@ class CrewEvaluator:
)
def set_iteration(self, iteration: int) -> None:
"""Sets the current iteration number for test tracking.
Args:
iteration: The iteration number to set
"""
self.iteration = iteration
def print_crew_evaluation_result(self) -> None:
"""
Prints the evaluation result of the crew in a table.
A Crew with 2 tasks using the command crewai test -n 3
will output the following table:
"""Prints a formatted table showing evaluation results for all tasks and iterations.
Displays task scores (1-10), average scores, execution times, and involved agents
in a rich-formatted table. Each row represents a task or crew-level metric,
with columns for each test iteration and averages.
Example output:
Tasks Scores
(1-10 Higher is better)
┏━━━━━━━━━━━━━━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━┳━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Tasks/Crew/Agents ┃ Run 1 ┃ Run 2 ┃ Run 3 ┃ Avg. Total ┃ Agents ┃
┡━━━━━━━━━━━━━━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━╇━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ Task 1 │ 9.0 │ 10.0 │ 9.0 │ 9.3 │ - AI LLMs Senior Researcher │
│ │ │ │ │ │ - AI LLMs Reporting Analyst │
│ │ │ │ │ │ │
│ Task 2 │ 9.0 │ 9.0 │ 9.0 │ 9.0 │ - AI LLMs Senior Researcher │
│ │ │ │ │ │ - AI LLMs Reporting Analyst │
│ │ │ │ │ │ │
│ Crew │ 9.0 │ 9.5 │ 9.0 │ 9.2 │ │
│ Execution Time (s) │ 42 │ 79 │ 52 │ 57 │ │
└────────────────────┴───────┴───────┴───────┴────────────┴──────────────────────────────┘
@@ -156,8 +195,18 @@ class CrewEvaluator:
console = Console()
console.print(table)
def evaluate(self, task_output: TaskOutput):
"""Evaluates the performance of the agents in the crew based on the tasks they have performed."""
def evaluate(self, task_output: TaskOutput) -> None:
"""Evaluates the performance of the agents in the crew based on task execution.
Evaluates task execution quality using a specialized evaluator agent and
stores the evaluation results for later analysis.
Args:
task_output: The output from the task execution to evaluate
Raises:
ValueError: If task_output is missing or doesn't match any known task
"""
current_task = None
for task in self.crew.tasks:
if task.description == task_output.description:
@@ -181,7 +230,7 @@ class CrewEvaluator:
self.crew,
evaluation_result.pydantic.quality,
current_task._execution_time,
self.openai_model_name,
str(self.llm),
)
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
self.run_execution_times[self.iteration].append(

View File

@@ -14,6 +14,7 @@ from crewai.agent import Agent
from crewai.agents.cache import CacheHandler
from crewai.crew import Crew
from crewai.crews.crew_output import CrewOutput
from crewai.llm import LLM
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.process import Process
from crewai.task import Task
@@ -1123,7 +1124,7 @@ def test_kickoff_for_each_empty_input():
assert results == []
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr(filter_headeruvs=["authorization"])
def test_kickoff_for_each_invalid_input():
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
@@ -2814,8 +2815,8 @@ def test_conditional_should_execute():
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.",
description="Test task",
expected_output="Expected output",
agent=researcher,
)
@@ -2844,6 +2845,77 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
]
)
class TestCrewTesting:
"""Tests for Crew.test() functionality."""
@pytest.fixture
def task(self):
return Task(
description="Test task",
expected_output="Expected output",
agent=researcher,
)
@pytest.fixture
def crew(self, task):
return Crew(agents=[researcher], tasks=[task])
@pytest.mark.parametrize("llm_input", [
"gpt-4o-mini",
LLM(model="gpt-4o-mini"),
])
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_test_with_different_llms(self, kickoff_mock, copy_mock, crew_evaluator, crew, llm_input):
"""Test Crew.test() with different LLM inputs."""
copy_mock.return_value = crew
crew.test(n_iterations=2, llm=llm_input, inputs={"topic": "AI"})
kickoff_mock.assert_has_calls([
mock.call(inputs={"topic": "AI"}),
mock.call(inputs={"topic": "AI"})
])
crew_evaluator.assert_has_calls([
mock.call(crew, llm_input),
mock.call().set_iteration(1),
mock.call().set_iteration(2),
mock.call().print_crew_evaluation_result(),
])
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_test_with_both_llm_and_model_name(self, kickoff_mock, copy_mock, crew_evaluator, crew):
"""Test that llm parameter takes precedence over openai_model_name."""
custom_llm = LLM(model="gpt-4o-mini")
copy_mock.return_value = crew
with pytest.warns(UserWarning, match="Both openai_model_name and llm provided. Using llm parameter."):
crew.test(n_iterations=2, llm=custom_llm, openai_model_name="gpt-4", inputs={"topic": "AI"})
kickoff_mock.assert_has_calls([
mock.call(inputs={"topic": "AI"}),
mock.call(inputs={"topic": "AI"})
])
crew_evaluator.assert_has_calls([
mock.call(crew, custom_llm),
mock.call().set_iteration(1),
mock.call().set_iteration(2),
mock.call().print_crew_evaluation_result(),
])
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_test_with_no_llm_raises_error(self, kickoff_mock, copy_mock, crew_evaluator, crew):
"""Test that error is raised when no LLM is provided."""
copy_mock.return_value = crew
with pytest.raises(ValueError, match="Either openai_model_name or llm must be provided"):
crew.test(n_iterations=2, inputs={"topic": "AI"})
@pytest.mark.vcr(filter_headers=["authorization"])
def test_hierarchical_verbose_manager_agent():
@@ -3125,4 +3197,4 @@ def test_multimodal_agent_live_image_analysis():
# Verify we got a meaningful response
assert isinstance(result.raw, str)
assert len(result.raw) > 100 # Expecting a detailed analysis
assert "error" not in result.raw.lower() # No error messages in response
assert "error" not in result.raw.lower() # No error messages in response

View File

@@ -4,6 +4,7 @@ import pytest
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.llm import LLM
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.utilities.evaluators.crew_evaluator_handler import (
@@ -23,7 +24,7 @@ class TestCrewEvaluator:
)
crew = Crew(agents=[agent], tasks=[task])
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini")
return CrewEvaluator(crew, "gpt-4o-mini")
def test_setup_for_evaluating(self, crew_planner):
crew_planner._setup_for_evaluating()
@@ -140,3 +141,30 @@ class TestCrewEvaluator:
execute().pydantic = TaskEvaluationPydanticOutput(quality=9.5)
crew_planner.evaluate(task_output)
assert crew_planner.tasks_scores[0] == [9.5]
def test_crew_evaluator_with_custom_llm(self):
agent = Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1")
task = Task(
description="Task 1",
expected_output="Output 1",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
custom_llm = LLM(model="gpt-4o-mini")
evaluator = CrewEvaluator(crew, custom_llm)
assert evaluator.llm == custom_llm
def test_crew_evaluator_with_model_name(self):
agent = Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1")
task = Task(
description="Task 1",
expected_output="Output 1",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
model_name = "gpt-4o-mini"
evaluator = CrewEvaluator(crew, model_name)
assert isinstance(evaluator.llm, LLM)
assert evaluator.llm.model == model_name