Compare commits

...

5 Commits

Author SHA1 Message Date
Devin AI
f53cf838bd chore: address PR feedback for test functionality
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 21:02:22 +00:00
Devin AI
0bb44690e3 chore: add logging for evaluation process
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 20:59:11 +00:00
Devin AI
df6cb60ec7 test: add error handling test cases for crew.test()
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 20:58:40 +00:00
Devin AI
1d7aceb919 chore: add error handling for llm type validation
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 20:57:52 +00:00
Devin AI
a7b050f52f fix: enable any llm to run test functionality
This change enables the Crew.test() method to work with any LLM implementation, not just OpenAI models. It maintains backward compatibility with the openai_model_name parameter while adding support for custom LLMs.

Fixes #2067
Fixes #2071

Co-Authored-By: Joe Moura <joao@crewai.com>
2025-02-09 20:51:16 +00:00
4 changed files with 207 additions and 44 deletions

View File

@@ -6,6 +6,7 @@ from concurrent.futures import Future
from hashlib import md5 from hashlib import md5
from typing import Any, Callable, Dict, List, Optional, Tuple, Union from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from langchain_core.language_models.base import BaseLanguageModel
from pydantic import ( from pydantic import (
UUID4, UUID4,
BaseModel, BaseModel,
@@ -1075,19 +1076,39 @@ class Crew(BaseModel):
def test( def test(
self, self,
n_iterations: int, n_iterations: int,
openai_model_name: Optional[str] = None, llm: Optional[Union[str, InstanceOf[LLM], Any]] = None,
openai_model_name: Optional[str] = None, # Kept for backward compatibility
inputs: Optional[Dict[str, Any]] = None, inputs: Optional[Dict[str, Any]] = None,
) -> None: ) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.""" """Test and evaluate the Crew with the given inputs for n iterations.
Args:
n_iterations (int): Number of test iterations to run
llm (Optional[Union[str, LLM, BaseLanguageModel]]): Language model to use for testing
openai_model_name (Optional[str]): Legacy parameter for OpenAI models (deprecated)
inputs (Optional[Dict[str, Any]]): Test inputs for the crew
Raises:
ValueError: If n_iterations is less than 1 or if llm type is unsupported
Returns:
None
"""
if n_iterations < 1:
raise ValueError("n_iterations must be greater than 0")
if llm is not None and not isinstance(llm, (str, LLM, BaseLanguageModel)):
raise ValueError(f"Unsupported LLM type: {type(llm)}")
test_crew = self.copy() test_crew = self.copy()
test_llm = llm if llm is not None else openai_model_name
self._test_execution_span = test_crew._telemetry.test_execution_span( self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew, test_crew,
n_iterations, n_iterations,
inputs, inputs,
openai_model_name, # type: ignore[arg-type] test_llm, # type: ignore[arg-type]
) # type: ignore[arg-type] ) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type] evaluator = CrewEvaluator(test_crew, test_llm) # type: ignore[arg-type]
for i in range(1, n_iterations + 1): for i in range(1, n_iterations + 1):
evaluator.set_iteration(i) evaluator.set_iteration(i)

View File

@@ -1,14 +1,19 @@
import os
from collections import defaultdict from collections import defaultdict
from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel, Field from langchain_core.language_models.base import BaseLanguageModel
from pydantic import BaseModel, Field, InstanceOf
from rich.box import HEAVY_EDGE from rich.box import HEAVY_EDGE
from rich.console import Console from rich.console import Console
from rich.table import Table from rich.table import Table
from crewai.agent import Agent from crewai.agent import Agent
from crewai.llm import LLM
from crewai.task import Task from crewai.task import Task
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry from crewai.telemetry import Telemetry
from crewai.utilities.logger import Logger
class TaskEvaluationPydanticOutput(BaseModel): class TaskEvaluationPydanticOutput(BaseModel):
@@ -22,22 +27,62 @@ class CrewEvaluator:
A class to evaluate the performance of the agents in the crew based on the tasks they have performed. A class to evaluate the performance of the agents in the crew based on the tasks they have performed.
Attributes: Attributes:
crew (Crew): The crew of agents to evaluate. crew (Crew): The crew of agents to evaluate
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted). llm (Union[str, LLM, BaseLanguageModel]): Language model to use for evaluation
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task. tasks_scores (defaultdict): Dictionary to store the scores of the agents for each task
iteration (int): The current iteration of the evaluation. iteration (int): Current iteration of the evaluation
run_execution_times (defaultdict): Dictionary to store execution times for each run
""" """
tasks_scores: defaultdict = defaultdict(list) tasks_scores: defaultdict = defaultdict(list)
run_execution_times: defaultdict = defaultdict(list) run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0 iteration: int = 0
def __init__(self, crew, openai_model_name: str): def __init__(self, crew, llm: Union[str, InstanceOf[LLM], BaseLanguageModel]):
"""Initialize the CrewEvaluator.
Args:
crew (Crew): The crew to evaluate
llm (Union[str, LLM, BaseLanguageModel]): Language model to use for evaluation
Raises:
ValueError: If llm is of an unsupported type
"""
if not isinstance(llm, (str, LLM, BaseLanguageModel, type(None))):
raise ValueError(f"Unsupported LLM type: {type(llm)}")
self.crew = crew self.crew = crew
self.openai_model_name = openai_model_name self.llm = llm
self._telemetry = Telemetry() self._telemetry = Telemetry()
self._logger = Logger()
self._setup_llm()
self._setup_for_evaluating() self._setup_for_evaluating()
def _setup_llm(self):
"""Set up the LLM following the Agent class pattern.
This method initializes the language model based on the provided llm parameter:
- If string: creates new LLM instance with model name
- If LLM instance: uses as-is
- If None: uses default model from environment or "gpt-4"
- Otherwise: attempts to extract model name from object attributes
"""
if isinstance(self.llm, str):
self.llm = LLM(model=self.llm)
elif isinstance(self.llm, LLM):
pass
elif self.llm is None:
model_name = os.environ.get("OPENAI_MODEL_NAME") or "gpt-4"
self.llm = LLM(model=model_name)
else:
llm_params = {
"model": getattr(self.llm, "model_name", None)
or getattr(self.llm, "deployment_name", None)
or str(self.llm),
}
self.llm = LLM(**llm_params)
def _setup_for_evaluating(self) -> None: def _setup_for_evaluating(self) -> None:
"""Sets up the crew for evaluating.""" """Sets up the crew for evaluating."""
for task in self.crew.tasks: for task in self.crew.tasks:
@@ -51,7 +96,7 @@ class CrewEvaluator:
), ),
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed", backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
verbose=False, verbose=False,
llm=self.openai_model_name, llm=self.llm,
) )
def _evaluation_task( def _evaluation_task(
@@ -157,35 +202,48 @@ class CrewEvaluator:
console.print(table) console.print(table)
def evaluate(self, task_output: TaskOutput): def evaluate(self, task_output: TaskOutput):
"""Evaluates the performance of the agents in the crew based on the tasks they have performed.""" """Evaluates the performance of the agents in the crew based on the tasks they have performed.
current_task = None
for task in self.crew.tasks:
if task.description == task_output.description:
current_task = task
break
if not current_task or not task_output: Args:
raise ValueError( task_output (TaskOutput): The output from the task to evaluate
"Task to evaluate and task output are required for evaluation"
Raises:
ValueError: If task to evaluate or task output is missing, or if evaluation result is invalid
"""
try:
current_task = None
for task in self.crew.tasks:
if task.description == task_output.description:
current_task = task
break
if not current_task or not task_output:
raise ValueError(
"Task to evaluate and task output are required for evaluation"
)
self._logger.log("info", f"Starting evaluation for task: {task_output.description}")
evaluator_agent = self._evaluator_agent()
evaluation_task = self._evaluation_task(
evaluator_agent, current_task, task_output.raw
) )
evaluator_agent = self._evaluator_agent() evaluation_result = evaluation_task.execute_sync()
evaluation_task = self._evaluation_task(
evaluator_agent, current_task, task_output.raw
)
evaluation_result = evaluation_task.execute_sync() if isinstance(evaluation_result.pydantic, TaskEvaluationPydanticOutput):
self._test_result_span = self._telemetry.individual_test_result_span(
if isinstance(evaluation_result.pydantic, TaskEvaluationPydanticOutput): self.crew,
self._test_result_span = self._telemetry.individual_test_result_span( evaluation_result.pydantic.quality,
self.crew, current_task._execution_time,
evaluation_result.pydantic.quality, self.llm,
current_task._execution_time, )
self.openai_model_name, self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
) self.run_execution_times[self.iteration].append(
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality) current_task._execution_time
self.run_execution_times[self.iteration].append( )
current_task._execution_time self._logger.log("info", f"Evaluation completed with score: {evaluation_result.pydantic.quality}")
) else:
else: raise ValueError("Evaluation result is not in the expected format")
raise ValueError("Evaluation result is not in the expected format") except Exception as e:
self._logger.log("error", f"Evaluation failed: {str(e)}")
raise

View File

@@ -24,6 +24,36 @@ from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities import Logger from crewai.utilities import Logger
from crewai.utilities.rpm_controller import RPMController from crewai.utilities.rpm_controller import RPMController
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
from crewai.llm import LLM
class MockLLM(LLM):
"""Mock LLM for testing."""
def __init__(self):
super().__init__(model="gpt-4") # Use a known model name
def chat_completion(self, messages, tools=None, tool_choice=None, **kwargs):
# Mock a proper response that matches the expected format
if tools and any('output' in tool.get('function', {}).get('name', '') for tool in tools):
return {
"choices": [{
"message": {
"content": None,
"role": "assistant",
"function_call": {
"name": "output",
"arguments": '{"quality": 8.5}'
}
}
}]
}
return {
"choices": [{
"message": {
"content": "Mock LLM Response",
"role": "assistant"
}
}]
}
ceo = Agent( ceo = Agent(
role="CEO", role="CEO",
@@ -47,6 +77,60 @@ writer = Agent(
) )
def test_crew_test_with_custom_llm():
"""Test that Crew.test() works with a custom LLM implementation."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
# Test with custom LLM
custom_llm = MockLLM()
crew.test(n_iterations=1, llm=custom_llm)
# No assertion needed as we just verify it runs without errors
def test_crew_test_backward_compatibility():
"""Test that Crew.test() maintains backward compatibility with openai_model_name."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
# Test with openai_model_name
crew.test(n_iterations=1, openai_model_name="gpt-4")
# No assertion needed as we just verify it runs without errors
def test_crew_test_with_invalid_llm():
"""Test that Crew.test() properly handles invalid LLM inputs."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
# Test with invalid LLM type
with pytest.raises(ValueError, match="Unsupported LLM type"):
crew.test(n_iterations=1, llm=123) # type: ignore
def test_crew_test_with_invalid_iterations():
"""Test that Crew.test() validates n_iterations."""
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
# Test with invalid n_iterations
with pytest.raises(ValueError, match="n_iterations must be greater than 0"):
crew.test(n_iterations=0, llm=MockLLM())
def test_crew_config_conditional_requirement(): def test_crew_config_conditional_requirement():
with pytest.raises(ValueError): with pytest.raises(ValueError):
Crew(process=Process.sequential) Crew(process=Process.sequential)
@@ -1123,7 +1207,7 @@ def test_kickoff_for_each_empty_input():
assert results == [] assert results == []
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headeruvs=["authorization"])
def test_kickoff_for_each_invalid_input(): def test_kickoff_for_each_invalid_input():
"""Tests if kickoff_for_each raises TypeError for invalid input types.""" """Tests if kickoff_for_each raises TypeError for invalid input types."""

View File

@@ -23,7 +23,7 @@ class TestCrewEvaluator:
) )
crew = Crew(agents=[agent], tasks=[task]) crew = Crew(agents=[agent], tasks=[task])
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini") return CrewEvaluator(crew, llm="openai/gpt-4o-mini")
def test_setup_for_evaluating(self, crew_planner): def test_setup_for_evaluating(self, crew_planner):
crew_planner._setup_for_evaluating() crew_planner._setup_for_evaluating()
@@ -45,7 +45,7 @@ class TestCrewEvaluator:
== "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed" == "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed"
) )
assert agent.verbose is False assert agent.verbose is False
assert agent.llm.model == "gpt-4o-mini" assert agent.llm.model == "openai/gpt-4o-mini"
def test_evaluation_task(self, crew_planner): def test_evaluation_task(self, crew_planner):
evaluator_agent = Agent( evaluator_agent = Agent(