diff --git a/src/crewai/agents/agent_builder/base_agent.py b/src/crewai/agents/agent_builder/base_agent.py index 207a1769a..e390dad51 100644 --- a/src/crewai/agents/agent_builder/base_agent.py +++ b/src/crewai/agents/agent_builder/base_agent.py @@ -256,13 +256,14 @@ class BaseAgent(ABC, BaseModel): "tools_handler", "cache_handler", "llm", + "crew", # Exclude crew to avoid circular reference } # Copy llm and clear callbacks - existing_llm = shallow_copy(self.llm) + existing_llm = shallow_copy(self.llm) if self.llm else None copied_data = self.model_dump(exclude=exclude) copied_data = {k: v for k, v in copied_data.items() if v is not None} - copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools) + copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools or []) return copied_agent diff --git a/src/crewai/crew.py b/src/crewai/crew.py index d13c59b6e..c80b8e86c 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -1079,7 +1079,7 @@ class Crew(BaseModel): llm: Optional[Union[str, LLM]] = None, inputs: Optional[Dict[str, Any]] = None, ) -> None: - """Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures. + """Test and evaluate the Crew with the given inputs for n iterations concurrently. Args: n_iterations: Number of test iterations to run @@ -1087,31 +1087,42 @@ class Crew(BaseModel): llm: LLM instance or model name to use for evaluation inputs: Optional inputs for the crew """ + if openai_model_name: + warnings.warn( + "openai_model_name parameter is deprecated and will be removed in v3.0. Use llm parameter instead.", + DeprecationWarning, + stacklevel=2, + ) + + if not (llm or openai_model_name): + raise ValueError("Either llm or openai_model_name must be provided") + test_crew = self.copy() # Convert string to LLM instance if needed if isinstance(llm, str): llm = LLM(model=llm) - - # Maintain backward compatibility - if openai_model_name and not llm: + elif openai_model_name and not llm: llm = LLM(model=openai_model_name) - elif not llm: - raise ValueError("Either llm or openai_model_name must be provided") + + assert isinstance(llm, LLM), "llm must be an LLM instance" + + try: + self._test_execution_span = test_crew._telemetry.test_execution_span( + test_crew, + n_iterations, + inputs, + getattr(llm, "model", None), + ) + evaluator = CrewEvaluator(test_crew, llm) - self._test_execution_span = test_crew._telemetry.test_execution_span( - test_crew, - n_iterations, - inputs, - getattr(llm, "model", None), - ) - evaluator = CrewEvaluator(test_crew, llm) + for i in range(1, n_iterations + 1): + evaluator.set_iteration(i) + test_crew.kickoff(inputs=inputs) - for i in range(1, n_iterations + 1): - evaluator.set_iteration(i) - test_crew.kickoff(inputs=inputs) - - evaluator.print_crew_evaluation_result() + evaluator.print_crew_evaluation_result() + except Exception as e: + raise ValueError(f"Error during crew test execution: {str(e)}") from e def __repr__(self): return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})" diff --git a/src/crewai/utilities/evaluators/crew_evaluator_handler.py b/src/crewai/utilities/evaluators/crew_evaluator_handler.py index e01a8a6c3..a4da86b5d 100644 --- a/src/crewai/utilities/evaluators/crew_evaluator_handler.py +++ b/src/crewai/utilities/evaluators/crew_evaluator_handler.py @@ -1,5 +1,5 @@ from collections import defaultdict -from typing import Union +from typing import TYPE_CHECKING, Any, Union from pydantic import BaseModel, Field from rich.box import HEAVY_EDGE @@ -12,6 +12,9 @@ from crewai.task import Task from crewai.tasks.task_output import TaskOutput from crewai.telemetry import Telemetry +if TYPE_CHECKING: + from crewai.crew import Crew + class TaskEvaluationPydanticOutput(BaseModel): quality: float = Field( @@ -20,21 +23,25 @@ class TaskEvaluationPydanticOutput(BaseModel): class CrewEvaluator: - """ - A class to evaluate the performance of the agents in the crew based on the tasks they have performed. - + """Handles evaluation of Crew execution and performance. + + Args: + crew: The Crew instance to evaluate + llm: Language model to use for evaluation + Attributes: - crew (Crew): The crew of agents to evaluate. - openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted). - tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task. - iteration (int): The current iteration of the evaluation. + tasks_scores: Dictionary to store task scores + run_execution_times: Dictionary to store execution times + iteration: Current iteration number + crew: The crew instance being evaluated + llm: Language model used for evaluation """ - tasks_scores: defaultdict = defaultdict(list) - run_execution_times: defaultdict = defaultdict(list) + tasks_scores: defaultdict[int, list[float]] = defaultdict(list) + run_execution_times: defaultdict[int, list[float]] = defaultdict(list) iteration: int = 0 - def __init__(self, crew, llm: Union[str, LLM]): + def __init__(self, crew: "Crew", llm: Union[str, LLM]): self.crew = crew self.llm = llm if isinstance(llm, LLM) else LLM(model=llm) self._telemetry = Telemetry() @@ -183,7 +190,7 @@ class CrewEvaluator: self.crew, evaluation_result.pydantic.quality, current_task._execution_time, - self.openai_model_name, + getattr(self.llm, "model", None), ) self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality) self.run_execution_times[self.iteration].append( diff --git a/tests/crew_test.py b/tests/crew_test.py index 4e660542f..985643706 100644 --- a/tests/crew_test.py +++ b/tests/crew_test.py @@ -26,6 +26,9 @@ from crewai.utilities import Logger from crewai.utilities.rpm_controller import RPMController from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler +TEST_MODEL = "gpt-4o" +TEST_ITERATIONS = 1 + ceo = Agent( role="CEO", goal="Make sure the writers in your company produce amazing content.", @@ -663,30 +666,30 @@ def test_task_tools_override_agent_tools_with_allow_delegation(): assert isinstance(researcher_with_delegation.tools[0], TestTool) @pytest.mark.vcr(filter_headers=["authorization"]) -@pytest.mark.vcr(filter_headers=["authorization"]) -def test_crew_test_with_custom_llm(): - tasks = [ - Task( - description="Test task", - expected_output="Test output", - agent=researcher, - ) - ] - crew = Crew(agents=[researcher], tasks=tasks) - - # Test with LLM instance - custom_llm = LLM(model="gpt-4o") - crew.test(n_iterations=1, llm=custom_llm) - - # Test with model name string - crew.test(n_iterations=1, llm="gpt-4o") - - # Test backward compatibility - crew.test(n_iterations=1, openai_model_name="gpt-4o") - - # Test error when no LLM provided - with pytest.raises(ValueError): - crew.test(n_iterations=1) +class TestCrewCustomLLM: + def test_crew_test_with_custom_llm(self): + tasks = [ + Task( + description="Test task", + expected_output="Test output", + agent=researcher, + ) + ] + crew = Crew(agents=[researcher], tasks=tasks) + + # Test with LLM instance + custom_llm = LLM(model=TEST_MODEL) + crew.test(n_iterations=TEST_ITERATIONS, llm=custom_llm) + + # Test with model name string + crew.test(n_iterations=TEST_ITERATIONS, llm=TEST_MODEL) + + # Test backward compatibility + crew.test(n_iterations=TEST_ITERATIONS, openai_model_name=TEST_MODEL) + + # Test error when no LLM provided + with pytest.raises(ValueError): + crew.test(n_iterations=TEST_ITERATIONS) @@ -2863,14 +2866,23 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator): [mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})] ) - crew_evaluator.assert_has_calls( - [ - mock.call(crew, "gpt-4o-mini"), - mock.call().set_iteration(1), - mock.call().set_iteration(2), - mock.call().print_crew_evaluation_result(), - ] - ) + # Get the actual calls made to crew_evaluator + actual_calls = crew_evaluator.mock_calls + + # Check that the first call was made with correct crew and either string or LLM instance + first_call = actual_calls[0] + assert first_call[0] == '', "First call should be to constructor" + assert first_call[1][0] == crew, "First argument should be crew" + assert isinstance(first_call[1][1], (str, LLM)), "Second argument should be string or LLM" + if isinstance(first_call[1][1], LLM): + assert first_call[1][1].model == "gpt-4o-mini" + else: + assert first_call[1][1] == "gpt-4o-mini" + + # Check remaining calls + assert actual_calls[1] == mock.call().set_iteration(1) + assert actual_calls[2] == mock.call().set_iteration(2) + assert actual_calls[3] == mock.call().print_crew_evaluation_result() @pytest.mark.vcr(filter_headers=["authorization"]) diff --git a/tests/utilities/evaluators/test_crew_evaluator_handler.py b/tests/utilities/evaluators/test_crew_evaluator_handler.py index 649c25998..c8923982c 100644 --- a/tests/utilities/evaluators/test_crew_evaluator_handler.py +++ b/tests/utilities/evaluators/test_crew_evaluator_handler.py @@ -4,6 +4,7 @@ import pytest from crewai.agent import Agent from crewai.crew import Crew +from crewai.llm import LLM from crewai.task import Task from crewai.tasks.task_output import TaskOutput from crewai.utilities.evaluators.crew_evaluator_handler import ( @@ -23,7 +24,7 @@ class TestCrewEvaluator: ) crew = Crew(agents=[agent], tasks=[task]) - return CrewEvaluator(crew, openai_model_name="gpt-4o-mini") + return CrewEvaluator(crew, llm=LLM(model="gpt-4o-mini")) def test_setup_for_evaluating(self, crew_planner): crew_planner._setup_for_evaluating() @@ -45,6 +46,7 @@ class TestCrewEvaluator: == "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed" ) assert agent.verbose is False + assert isinstance(agent.llm, LLM) assert agent.llm.model == "gpt-4o-mini" def test_evaluation_task(self, crew_planner):