refactor: improve code quality based on PR feedback

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-09 22:35:14 +00:00
parent 2a5a1250fb
commit 598702ccdb
5 changed files with 98 additions and 65 deletions

View File

@@ -256,13 +256,14 @@ class BaseAgent(ABC, BaseModel):
"tools_handler", "tools_handler",
"cache_handler", "cache_handler",
"llm", "llm",
"crew", # Exclude crew to avoid circular reference
} }
# Copy llm and clear callbacks # Copy llm and clear callbacks
existing_llm = shallow_copy(self.llm) existing_llm = shallow_copy(self.llm) if self.llm else None
copied_data = self.model_dump(exclude=exclude) copied_data = self.model_dump(exclude=exclude)
copied_data = {k: v for k, v in copied_data.items() if v is not None} copied_data = {k: v for k, v in copied_data.items() if v is not None}
copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools) copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools or [])
return copied_agent return copied_agent

View File

@@ -1079,7 +1079,7 @@ class Crew(BaseModel):
llm: Optional[Union[str, LLM]] = None, llm: Optional[Union[str, LLM]] = None,
inputs: Optional[Dict[str, Any]] = None, inputs: Optional[Dict[str, Any]] = None,
) -> None: ) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures. """Test and evaluate the Crew with the given inputs for n iterations concurrently.
Args: Args:
n_iterations: Number of test iterations to run n_iterations: Number of test iterations to run
@@ -1087,31 +1087,42 @@ class Crew(BaseModel):
llm: LLM instance or model name to use for evaluation llm: LLM instance or model name to use for evaluation
inputs: Optional inputs for the crew inputs: Optional inputs for the crew
""" """
if openai_model_name:
warnings.warn(
"openai_model_name parameter is deprecated and will be removed in v3.0. Use llm parameter instead.",
DeprecationWarning,
stacklevel=2,
)
if not (llm or openai_model_name):
raise ValueError("Either llm or openai_model_name must be provided")
test_crew = self.copy() test_crew = self.copy()
# Convert string to LLM instance if needed # Convert string to LLM instance if needed
if isinstance(llm, str): if isinstance(llm, str):
llm = LLM(model=llm) llm = LLM(model=llm)
elif openai_model_name and not llm:
# Maintain backward compatibility
if openai_model_name and not llm:
llm = LLM(model=openai_model_name) llm = LLM(model=openai_model_name)
elif not llm:
raise ValueError("Either llm or openai_model_name must be provided") assert isinstance(llm, LLM), "llm must be an LLM instance"
try:
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
n_iterations,
inputs,
getattr(llm, "model", None),
)
evaluator = CrewEvaluator(test_crew, llm)
self._test_execution_span = test_crew._telemetry.test_execution_span( for i in range(1, n_iterations + 1):
test_crew, evaluator.set_iteration(i)
n_iterations, test_crew.kickoff(inputs=inputs)
inputs,
getattr(llm, "model", None),
)
evaluator = CrewEvaluator(test_crew, llm)
for i in range(1, n_iterations + 1): evaluator.print_crew_evaluation_result()
evaluator.set_iteration(i) except Exception as e:
test_crew.kickoff(inputs=inputs) raise ValueError(f"Error during crew test execution: {str(e)}") from e
evaluator.print_crew_evaluation_result()
def __repr__(self): def __repr__(self):
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})" return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"

View File

@@ -1,5 +1,5 @@
from collections import defaultdict from collections import defaultdict
from typing import Union from typing import TYPE_CHECKING, Any, Union
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from rich.box import HEAVY_EDGE from rich.box import HEAVY_EDGE
@@ -12,6 +12,9 @@ from crewai.task import Task
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry from crewai.telemetry import Telemetry
if TYPE_CHECKING:
from crewai.crew import Crew
class TaskEvaluationPydanticOutput(BaseModel): class TaskEvaluationPydanticOutput(BaseModel):
quality: float = Field( quality: float = Field(
@@ -20,21 +23,25 @@ class TaskEvaluationPydanticOutput(BaseModel):
class CrewEvaluator: class CrewEvaluator:
""" """Handles evaluation of Crew execution and performance.
A class to evaluate the performance of the agents in the crew based on the tasks they have performed.
Args:
crew: The Crew instance to evaluate
llm: Language model to use for evaluation
Attributes: Attributes:
crew (Crew): The crew of agents to evaluate. tasks_scores: Dictionary to store task scores
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted). run_execution_times: Dictionary to store execution times
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task. iteration: Current iteration number
iteration (int): The current iteration of the evaluation. crew: The crew instance being evaluated
llm: Language model used for evaluation
""" """
tasks_scores: defaultdict = defaultdict(list) tasks_scores: defaultdict[int, list[float]] = defaultdict(list)
run_execution_times: defaultdict = defaultdict(list) run_execution_times: defaultdict[int, list[float]] = defaultdict(list)
iteration: int = 0 iteration: int = 0
def __init__(self, crew, llm: Union[str, LLM]): def __init__(self, crew: "Crew", llm: Union[str, LLM]):
self.crew = crew self.crew = crew
self.llm = llm if isinstance(llm, LLM) else LLM(model=llm) self.llm = llm if isinstance(llm, LLM) else LLM(model=llm)
self._telemetry = Telemetry() self._telemetry = Telemetry()
@@ -183,7 +190,7 @@ class CrewEvaluator:
self.crew, self.crew,
evaluation_result.pydantic.quality, evaluation_result.pydantic.quality,
current_task._execution_time, current_task._execution_time,
self.openai_model_name, getattr(self.llm, "model", None),
) )
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality) self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
self.run_execution_times[self.iteration].append( self.run_execution_times[self.iteration].append(

View File

@@ -26,6 +26,9 @@ from crewai.utilities import Logger
from crewai.utilities.rpm_controller import RPMController from crewai.utilities.rpm_controller import RPMController
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
TEST_MODEL = "gpt-4o"
TEST_ITERATIONS = 1
ceo = Agent( ceo = Agent(
role="CEO", role="CEO",
goal="Make sure the writers in your company produce amazing content.", goal="Make sure the writers in your company produce amazing content.",
@@ -663,30 +666,30 @@ def test_task_tools_override_agent_tools_with_allow_delegation():
assert isinstance(researcher_with_delegation.tools[0], TestTool) assert isinstance(researcher_with_delegation.tools[0], TestTool)
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr(filter_headers=["authorization"]) class TestCrewCustomLLM:
def test_crew_test_with_custom_llm(): def test_crew_test_with_custom_llm(self):
tasks = [ tasks = [
Task( Task(
description="Test task", description="Test task",
expected_output="Test output", expected_output="Test output",
agent=researcher, agent=researcher,
) )
] ]
crew = Crew(agents=[researcher], tasks=tasks) crew = Crew(agents=[researcher], tasks=tasks)
# Test with LLM instance # Test with LLM instance
custom_llm = LLM(model="gpt-4o") custom_llm = LLM(model=TEST_MODEL)
crew.test(n_iterations=1, llm=custom_llm) crew.test(n_iterations=TEST_ITERATIONS, llm=custom_llm)
# Test with model name string # Test with model name string
crew.test(n_iterations=1, llm="gpt-4o") crew.test(n_iterations=TEST_ITERATIONS, llm=TEST_MODEL)
# Test backward compatibility # Test backward compatibility
crew.test(n_iterations=1, openai_model_name="gpt-4o") crew.test(n_iterations=TEST_ITERATIONS, openai_model_name=TEST_MODEL)
# Test error when no LLM provided # Test error when no LLM provided
with pytest.raises(ValueError): with pytest.raises(ValueError):
crew.test(n_iterations=1) crew.test(n_iterations=TEST_ITERATIONS)
@@ -2863,14 +2866,23 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})] [mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
) )
crew_evaluator.assert_has_calls( # Get the actual calls made to crew_evaluator
[ actual_calls = crew_evaluator.mock_calls
mock.call(crew, "gpt-4o-mini"),
mock.call().set_iteration(1), # Check that the first call was made with correct crew and either string or LLM instance
mock.call().set_iteration(2), first_call = actual_calls[0]
mock.call().print_crew_evaluation_result(), assert first_call[0] == '', "First call should be to constructor"
] assert first_call[1][0] == crew, "First argument should be crew"
) assert isinstance(first_call[1][1], (str, LLM)), "Second argument should be string or LLM"
if isinstance(first_call[1][1], LLM):
assert first_call[1][1].model == "gpt-4o-mini"
else:
assert first_call[1][1] == "gpt-4o-mini"
# Check remaining calls
assert actual_calls[1] == mock.call().set_iteration(1)
assert actual_calls[2] == mock.call().set_iteration(2)
assert actual_calls[3] == mock.call().print_crew_evaluation_result()
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])

View File

@@ -4,6 +4,7 @@ import pytest
from crewai.agent import Agent from crewai.agent import Agent
from crewai.crew import Crew from crewai.crew import Crew
from crewai.llm import LLM
from crewai.task import Task from crewai.task import Task
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.utilities.evaluators.crew_evaluator_handler import ( from crewai.utilities.evaluators.crew_evaluator_handler import (
@@ -23,7 +24,7 @@ class TestCrewEvaluator:
) )
crew = Crew(agents=[agent], tasks=[task]) crew = Crew(agents=[agent], tasks=[task])
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini") return CrewEvaluator(crew, llm=LLM(model="gpt-4o-mini"))
def test_setup_for_evaluating(self, crew_planner): def test_setup_for_evaluating(self, crew_planner):
crew_planner._setup_for_evaluating() crew_planner._setup_for_evaluating()
@@ -45,6 +46,7 @@ class TestCrewEvaluator:
== "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed" == "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed"
) )
assert agent.verbose is False assert agent.verbose is False
assert isinstance(agent.llm, LLM)
assert agent.llm.model == "gpt-4o-mini" assert agent.llm.model == "gpt-4o-mini"
def test_evaluation_task(self, crew_planner): def test_evaluation_task(self, crew_planner):