Compare commits

...

2 Commits

Author SHA1 Message Date
Devin AI
90aea23dd6 feat: improve LLM validation and error handling
- Add descriptive error messages with usage context
- Add LLM instance validation
- Add deprecation warning for openai_model_name
- Add string representation to CrewEvaluator
- Add edge case tests

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 22:48:56 +00:00
Devin AI
3e67a2eca1 feat: enable custom LLM support for Crew.test()
- Add llm parameter to Crew.test() that accepts string or LLM instance
- Maintain backward compatibility with openai_model_name parameter
- Update CrewEvaluator to handle any LLM implementation
- Add comprehensive test coverage

Fixes #2079

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 22:36:41 +00:00
4 changed files with 153 additions and 24 deletions

View File

@@ -1076,18 +1076,50 @@ class Crew(BaseModel):
self,
n_iterations: int,
openai_model_name: Optional[str] = None,
llm: Optional[Union[str, LLM]] = None,
inputs: Optional[Dict[str, Any]] = None,
) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
"""Test and evaluate the Crew with the given inputs for n iterations.
Args:
n_iterations: Number of test iterations to run
openai_model_name: (Deprecated) Name of OpenAI model to use for evaluation
llm: LLM instance or model name to use for evaluation
inputs: Optional dictionary of inputs to pass to the crew
"""
if openai_model_name:
warnings.warn(
"openai_model_name is deprecated and will be removed in future versions. Use llm parameter instead.",
DeprecationWarning,
stacklevel=2
)
test_crew = self.copy()
model = llm if llm else openai_model_name
try:
if not model:
raise ValueError(
"Either llm or openai_model_name must be provided. Please provide either "
"a custom LLM instance or an OpenAI model name."
)
if isinstance(model, LLM):
if not hasattr(model, 'model'):
raise ValueError("Provided LLM instance must have a 'model' attribute")
elif isinstance(model, str):
model = LLM(model=model)
else:
raise ValueError("LLM must be either a string model name or an LLM instance")
except Exception as e:
raise ValueError(f"Failed to initialize LLM: {str(e)}")
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
n_iterations,
inputs,
openai_model_name, # type: ignore[arg-type]
str(model), # type: ignore[arg-type]
) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, model)
for i in range(1, n_iterations + 1):
evaluator.set_iteration(i)

View File

@@ -1,6 +1,10 @@
from collections import defaultdict
from typing import Union
from pydantic import BaseModel, Field
from crewai.llm import LLM
from rich.box import HEAVY_EDGE
from rich.console import Console
from rich.table import Table
@@ -23,7 +27,7 @@ class CrewEvaluator:
Attributes:
crew (Crew): The crew of agents to evaluate.
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
llm (LLM): The language model to use for evaluating the performance of the agents.
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
iteration (int): The current iteration of the evaluation.
"""
@@ -32,12 +36,20 @@ class CrewEvaluator:
run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0
def __init__(self, crew, openai_model_name: str):
def __init__(self, crew, llm: Union[str, LLM]):
self.crew = crew
self.openai_model_name = openai_model_name
try:
self.llm = llm if isinstance(llm, LLM) else LLM(model=llm)
if not hasattr(self.llm, 'model'):
raise ValueError("Provided LLM instance must have a 'model' attribute")
except Exception as e:
raise ValueError(f"Failed to initialize LLM: {str(e)}")
self._telemetry = Telemetry()
self._setup_for_evaluating()
def __str__(self) -> str:
return f"CrewEvaluator(model={str(self.llm)}, iteration={self.iteration})"
def _setup_for_evaluating(self) -> None:
"""Sets up the crew for evaluating."""
for task in self.crew.tasks:
@@ -51,7 +63,7 @@ class CrewEvaluator:
),
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
verbose=False,
llm=self.openai_model_name,
llm=self.llm,
)
def _evaluation_task(
@@ -181,7 +193,7 @@ class CrewEvaluator:
self.crew,
evaluation_result.pydantic.quality,
current_task._execution_time,
self.openai_model_name,
str(self.llm),
)
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
self.run_execution_times[self.iteration].append(

View File

@@ -13,6 +13,7 @@ import pytest
from crewai.agent import Agent
from crewai.agents.cache import CacheHandler
from crewai.crew import Crew
from crewai.llm import LLM
from crewai.crews.crew_output import CrewOutput
from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.process import Process
@@ -1123,7 +1124,7 @@ def test_kickoff_for_each_empty_input():
assert results == []
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr(filter_headeruvs=["authorization"])
def test_kickoff_for_each_invalid_input():
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
@@ -2812,10 +2813,43 @@ def test_conditional_should_execute():
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
def test_crew_testing_with_custom_llm(kickoff_mock, copy_mock, crew_evaluator):
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.",
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(
agents=[researcher],
tasks=[task],
)
# Create a mock for the copied crew
copy_mock.return_value = crew
custom_llm = LLM(model="gpt-4o-mini")
n_iterations = 2
crew.test(n_iterations, llm=custom_llm)
# Ensure kickoff is called on the copied crew
kickoff_mock.assert_has_calls([mock.call(inputs=None), mock.call(inputs=None)])
# Verify CrewEvaluator was called with custom LLM
crew_evaluator.assert_has_calls([
mock.call(crew, custom_llm),
mock.call().set_iteration(1),
mock.call().set_iteration(2),
mock.call().print_crew_evaluation_result(),
])
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_testing_backward_compatibility(kickoff_mock, copy_mock, crew_evaluator):
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
@@ -2828,22 +2862,73 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
copy_mock.return_value = crew
n_iterations = 2
crew.test(n_iterations, openai_model_name="gpt-4o-mini", inputs={"topic": "AI"})
with pytest.warns(DeprecationWarning, match="openai_model_name is deprecated"):
crew.test(n_iterations, openai_model_name="gpt-4o-mini", inputs={"topic": "AI"})
# Ensure kickoff is called on the copied crew
kickoff_mock.assert_has_calls(
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
kickoff_mock.assert_has_calls([
mock.call(inputs={"topic": "AI"}),
mock.call(inputs={"topic": "AI"})
])
# Verify CrewEvaluator was called with string model name
crew_evaluator.assert_has_calls([
mock.call(crew, mock.ANY),
mock.call().set_iteration(1),
mock.call().set_iteration(2),
mock.call().print_crew_evaluation_result(),
])
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_testing_missing_llm(kickoff_mock, copy_mock, crew_evaluator):
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew_evaluator.assert_has_calls(
[
mock.call(crew, "gpt-4o-mini"),
mock.call().set_iteration(1),
mock.call().set_iteration(2),
mock.call().print_crew_evaluation_result(),
]
crew = Crew(
agents=[researcher],
tasks=[task],
)
# Create a mock for the copied crew
copy_mock.return_value = crew
n_iterations = 2
with pytest.raises(ValueError, match="Either llm or openai_model_name must be provided"):
crew.test(n_iterations)
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_testing_with_invalid_llm(kickoff_mock, copy_mock, crew_evaluator):
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(
agents=[researcher],
tasks=[task],
)
# Create a mock for the copied crew
copy_mock.return_value = crew
# Test invalid LLM type
with pytest.raises(ValueError, match="Failed to initialize LLM"):
crew.test(n_iterations=2, llm={})
# Test LLM without model attribute
class InvalidLLM:
def __init__(self): pass
with pytest.raises(ValueError, match="LLM must be either a string model name or an LLM instance"):
crew.test(n_iterations=2, llm=InvalidLLM())
@pytest.mark.vcr(filter_headers=["authorization"])
def test_hierarchical_verbose_manager_agent():
@@ -3125,4 +3210,4 @@ def test_multimodal_agent_live_image_analysis():
# Verify we got a meaningful response
assert isinstance(result.raw, str)
assert len(result.raw) > 100 # Expecting a detailed analysis
assert "error" not in result.raw.lower() # No error messages in response
assert "error" not in result.raw.lower() # No error messages in response

View File

@@ -23,7 +23,7 @@ class TestCrewEvaluator:
)
crew = Crew(agents=[agent], tasks=[task])
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini")
return CrewEvaluator(crew, llm="gpt-4o-mini")
def test_setup_for_evaluating(self, crew_planner):
crew_planner._setup_for_evaluating()