Compare commits

...

2 Commits

Author SHA1 Message Date
Devin AI
90c577fdd0 refactor: improve type safety and test patterns
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 21:28:13 +00:00
Devin AI
b8a15c6115 fix: enable any llm to run test functionality
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 21:15:54 +00:00
4 changed files with 129 additions and 17 deletions

View File

@@ -4,6 +4,7 @@ import uuid
import warnings import warnings
from concurrent.futures import Future from concurrent.futures import Future
from hashlib import md5 from hashlib import md5
from crewai.llm import LLM
from typing import Any, Callable, Dict, List, Optional, Tuple, Union from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from pydantic import ( from pydantic import (
@@ -1075,19 +1076,36 @@ class Crew(BaseModel):
def test( def test(
self, self,
n_iterations: int, n_iterations: int,
openai_model_name: Optional[str] = None, llm: Union[str, LLM],
inputs: Optional[Dict[str, Any]] = None, inputs: Optional[Dict[str, Any]] = None,
) -> None: ) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.""" """Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.
Args:
n_iterations: Number of test iterations to run
llm: Language model to use for evaluation. Can be either a model name string (e.g. "gpt-4")
or an LLM instance for custom implementations
inputs: Optional dictionary of input values to use for task execution
Example:
```python
# Using model name string
crew.test(n_iterations=3, llm="gpt-4")
# Using custom LLM implementation
custom_llm = LLM(model="custom-model")
crew.test(n_iterations=3, llm=custom_llm)
```
"""
test_crew = self.copy() test_crew = self.copy()
self._test_execution_span = test_crew._telemetry.test_execution_span( self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew, test_crew,
n_iterations, n_iterations,
inputs, inputs,
openai_model_name, # type: ignore[arg-type] str(llm) if isinstance(llm, LLM) else llm,
) # type: ignore[arg-type] )
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type] evaluator = CrewEvaluator(test_crew, llm)
for i in range(1, n_iterations + 1): for i in range(1, n_iterations + 1):
evaluator.set_iteration(i) evaluator.set_iteration(i)

View File

@@ -1,10 +1,16 @@
from collections import defaultdict from collections import defaultdict
from typing import Any, Dict, List, Optional, TypeVar, Union
from typing import DefaultDict # Separate import to avoid circular imports
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from rich.box import HEAVY_EDGE from rich.box import HEAVY_EDGE
from rich.console import Console from rich.console import Console
from rich.table import Table from rich.table import Table
from crewai.llm import LLM
T = TypeVar('T', bound=LLM)
from crewai.agent import Agent from crewai.agent import Agent
from crewai.task import Task from crewai.task import Task
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
@@ -28,14 +34,47 @@ class CrewEvaluator:
iteration (int): The current iteration of the evaluation. iteration (int): The current iteration of the evaluation.
""" """
tasks_scores: defaultdict = defaultdict(list) _tasks_scores: DefaultDict[int, List[float]] = Field(
run_execution_times: defaultdict = defaultdict(list) default_factory=lambda: defaultdict(list))
_run_execution_times: DefaultDict[int, List[float]] = Field(
default_factory=lambda: defaultdict(list))
iteration: int = 0 iteration: int = 0
def __init__(self, crew, openai_model_name: str): @property
def tasks_scores(self) -> DefaultDict[int, List[float]]:
return self._tasks_scores
@tasks_scores.setter
def tasks_scores(self, value: Dict[int, List[float]]) -> None:
self._tasks_scores = defaultdict(list, value)
@property
def run_execution_times(self) -> DefaultDict[int, List[float]]:
return self._run_execution_times
@run_execution_times.setter
def run_execution_times(self, value: Dict[int, List[float]]) -> None:
self._run_execution_times = defaultdict(list, value)
def __init__(self, crew, llm: Union[str, T]):
"""Initialize the CrewEvaluator.
Args:
crew: The Crew instance to evaluate
llm: Language model to use for evaluation. Can be either a model name string
or an LLM instance for custom implementations
Raises:
ValueError: If llm is None or invalid
"""
if not llm:
raise ValueError("Invalid LLM configuration")
self.crew = crew self.crew = crew
self.openai_model_name = openai_model_name self.llm = LLM(model=llm) if isinstance(llm, str) else llm
self._telemetry = Telemetry() self._telemetry = Telemetry()
self._tasks_scores = defaultdict(list)
self._run_execution_times = defaultdict(list)
self._setup_for_evaluating() self._setup_for_evaluating()
def _setup_for_evaluating(self) -> None: def _setup_for_evaluating(self) -> None:
@@ -51,7 +90,7 @@ class CrewEvaluator:
), ),
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed", backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
verbose=False, verbose=False,
llm=self.openai_model_name, llm=self.llm,
) )
def _evaluation_task( def _evaluation_task(
@@ -181,11 +220,19 @@ class CrewEvaluator:
self.crew, self.crew,
evaluation_result.pydantic.quality, evaluation_result.pydantic.quality,
current_task._execution_time, current_task._execution_time,
self.openai_model_name, self._get_llm_identifier(),
) )
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality) self._tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
self.run_execution_times[self.iteration].append( self._run_execution_times[self.iteration].append(
current_task._execution_time current_task._execution_time
) )
else: else:
raise ValueError("Evaluation result is not in the expected format") raise ValueError("Evaluation result is not in the expected format")
def _get_llm_identifier(self) -> str:
"""Get a string identifier for the LLM instance.
Returns:
String representation of the LLM for telemetry
"""
return str(self.llm) if isinstance(self.llm, LLM) else self.llm

View File

@@ -10,6 +10,7 @@ import instructor
import pydantic_core import pydantic_core
import pytest import pytest
from crewai.llm import LLM
from crewai.agent import Agent from crewai.agent import Agent
from crewai.agents.cache import CacheHandler from crewai.agents.cache import CacheHandler
from crewai.crew import Crew from crewai.crew import Crew
@@ -1123,7 +1124,7 @@ def test_kickoff_for_each_empty_input():
assert results == [] assert results == []
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headeruvs=["authorization"])
def test_kickoff_for_each_invalid_input(): def test_kickoff_for_each_invalid_input():
"""Tests if kickoff_for_each raises TypeError for invalid input types.""" """Tests if kickoff_for_each raises TypeError for invalid input types."""
@@ -2828,7 +2829,7 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
copy_mock.return_value = crew copy_mock.return_value = crew
n_iterations = 2 n_iterations = 2
crew.test(n_iterations, openai_model_name="gpt-4o-mini", inputs={"topic": "AI"}) crew.test(n_iterations, llm="gpt-4o-mini", inputs={"topic": "AI"})
# Ensure kickoff is called on the copied crew # Ensure kickoff is called on the copied crew
kickoff_mock.assert_has_calls( kickoff_mock.assert_has_calls(
@@ -2844,6 +2845,32 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
] ]
) )
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_testing_with_custom_llm(kickoff_mock, copy_mock, crew_evaluator):
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
copy_mock.return_value = crew
custom_llm = LLM(model="gpt-4")
crew.test(2, llm=custom_llm, inputs={"topic": "AI"})
kickoff_mock.assert_has_calls([
mock.call(inputs={"topic": "AI"}),
mock.call(inputs={"topic": "AI"})
])
crew_evaluator.assert_has_calls([
mock.call(crew, custom_llm),
mock.call().set_iteration(1),
mock.call().set_iteration(2),
mock.call().print_crew_evaluation_result(),
])
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_hierarchical_verbose_manager_agent(): def test_hierarchical_verbose_manager_agent():
@@ -3125,4 +3152,4 @@ def test_multimodal_agent_live_image_analysis():
# Verify we got a meaningful response # Verify we got a meaningful response
assert isinstance(result.raw, str) assert isinstance(result.raw, str)
assert len(result.raw) > 100 # Expecting a detailed analysis assert len(result.raw) > 100 # Expecting a detailed analysis
assert "error" not in result.raw.lower() # No error messages in response assert "error" not in result.raw.lower() # No error messages in response

View File

@@ -2,6 +2,7 @@ from unittest import mock
import pytest import pytest
from crewai.llm import LLM
from crewai.agent import Agent from crewai.agent import Agent
from crewai.crew import Crew from crewai.crew import Crew
from crewai.task import Task from crewai.task import Task
@@ -23,7 +24,7 @@ class TestCrewEvaluator:
) )
crew = Crew(agents=[agent], tasks=[task]) crew = Crew(agents=[agent], tasks=[task])
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini") return CrewEvaluator(crew, llm="gpt-4o-mini")
def test_setup_for_evaluating(self, crew_planner): def test_setup_for_evaluating(self, crew_planner):
crew_planner._setup_for_evaluating() crew_planner._setup_for_evaluating()
@@ -47,6 +48,25 @@ class TestCrewEvaluator:
assert agent.verbose is False assert agent.verbose is False
assert agent.llm.model == "gpt-4o-mini" assert agent.llm.model == "gpt-4o-mini"
@pytest.mark.parametrize("llm_input,expected_model", [
(LLM(model="gpt-4"), "gpt-4"),
("gpt-4", "gpt-4"),
])
def test_evaluator_with_llm_types(self, crew_planner, llm_input, expected_model):
evaluator = CrewEvaluator(crew_planner.crew, llm_input)
agent = evaluator._evaluator_agent()
assert agent.llm.model == expected_model
def test_evaluator_with_invalid_llm(self, crew_planner):
with pytest.raises(ValueError, match="Invalid LLM configuration"):
CrewEvaluator(crew_planner.crew, None)
def test_evaluator_with_string_llm(self, crew_planner):
evaluator = CrewEvaluator(crew_planner.crew, "gpt-4")
agent = evaluator._evaluator_agent()
assert isinstance(agent.llm, LLM)
assert agent.llm.model == "gpt-4"
def test_evaluation_task(self, crew_planner): def test_evaluation_task(self, crew_planner):
evaluator_agent = Agent( evaluator_agent = Agent(
role="Evaluator Agent", role="Evaluator Agent",