mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 12:28:30 +00:00
Compare commits
5 Commits
bugfix/sup
...
devin/1739
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f53cf838bd | ||
|
|
0bb44690e3 | ||
|
|
df6cb60ec7 | ||
|
|
1d7aceb919 | ||
|
|
a7b050f52f |
@@ -6,6 +6,7 @@ from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from langchain_core.language_models.base import BaseLanguageModel
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -1075,19 +1076,39 @@ class Crew(BaseModel):
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
openai_model_name: Optional[str] = None,
|
||||
llm: Optional[Union[str, InstanceOf[LLM], Any]] = None,
|
||||
openai_model_name: Optional[str] = None, # Kept for backward compatibility
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations.
|
||||
|
||||
Args:
|
||||
n_iterations (int): Number of test iterations to run
|
||||
llm (Optional[Union[str, LLM, BaseLanguageModel]]): Language model to use for testing
|
||||
openai_model_name (Optional[str]): Legacy parameter for OpenAI models (deprecated)
|
||||
inputs (Optional[Dict[str, Any]]): Test inputs for the crew
|
||||
|
||||
Raises:
|
||||
ValueError: If n_iterations is less than 1 or if llm type is unsupported
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
if n_iterations < 1:
|
||||
raise ValueError("n_iterations must be greater than 0")
|
||||
if llm is not None and not isinstance(llm, (str, LLM, BaseLanguageModel)):
|
||||
raise ValueError(f"Unsupported LLM type: {type(llm)}")
|
||||
|
||||
test_crew = self.copy()
|
||||
test_llm = llm if llm is not None else openai_model_name
|
||||
|
||||
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||
test_crew,
|
||||
n_iterations,
|
||||
inputs,
|
||||
openai_model_name, # type: ignore[arg-type]
|
||||
test_llm, # type: ignore[arg-type]
|
||||
) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, test_llm) # type: ignore[arg-type]
|
||||
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
|
||||
@@ -1,14 +1,19 @@
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from langchain_core.language_models.base import BaseLanguageModel
|
||||
from pydantic import BaseModel, Field, InstanceOf
|
||||
from rich.box import HEAVY_EDGE
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
class TaskEvaluationPydanticOutput(BaseModel):
|
||||
@@ -22,22 +27,62 @@ class CrewEvaluator:
|
||||
A class to evaluate the performance of the agents in the crew based on the tasks they have performed.
|
||||
|
||||
Attributes:
|
||||
crew (Crew): The crew of agents to evaluate.
|
||||
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
|
||||
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
|
||||
iteration (int): The current iteration of the evaluation.
|
||||
crew (Crew): The crew of agents to evaluate
|
||||
llm (Union[str, LLM, BaseLanguageModel]): Language model to use for evaluation
|
||||
tasks_scores (defaultdict): Dictionary to store the scores of the agents for each task
|
||||
iteration (int): Current iteration of the evaluation
|
||||
run_execution_times (defaultdict): Dictionary to store execution times for each run
|
||||
"""
|
||||
|
||||
tasks_scores: defaultdict = defaultdict(list)
|
||||
run_execution_times: defaultdict = defaultdict(list)
|
||||
iteration: int = 0
|
||||
|
||||
def __init__(self, crew, openai_model_name: str):
|
||||
def __init__(self, crew, llm: Union[str, InstanceOf[LLM], BaseLanguageModel]):
|
||||
"""Initialize the CrewEvaluator.
|
||||
|
||||
Args:
|
||||
crew (Crew): The crew to evaluate
|
||||
llm (Union[str, LLM, BaseLanguageModel]): Language model to use for evaluation
|
||||
|
||||
Raises:
|
||||
ValueError: If llm is of an unsupported type
|
||||
"""
|
||||
if not isinstance(llm, (str, LLM, BaseLanguageModel, type(None))):
|
||||
raise ValueError(f"Unsupported LLM type: {type(llm)}")
|
||||
|
||||
self.crew = crew
|
||||
self.openai_model_name = openai_model_name
|
||||
self.llm = llm
|
||||
self._telemetry = Telemetry()
|
||||
self._logger = Logger()
|
||||
self._setup_llm()
|
||||
self._setup_for_evaluating()
|
||||
|
||||
def _setup_llm(self):
|
||||
"""Set up the LLM following the Agent class pattern.
|
||||
|
||||
This method initializes the language model based on the provided llm parameter:
|
||||
- If string: creates new LLM instance with model name
|
||||
- If LLM instance: uses as-is
|
||||
- If None: uses default model from environment or "gpt-4"
|
||||
- Otherwise: attempts to extract model name from object attributes
|
||||
"""
|
||||
if isinstance(self.llm, str):
|
||||
self.llm = LLM(model=self.llm)
|
||||
elif isinstance(self.llm, LLM):
|
||||
pass
|
||||
elif self.llm is None:
|
||||
model_name = os.environ.get("OPENAI_MODEL_NAME") or "gpt-4"
|
||||
self.llm = LLM(model=model_name)
|
||||
else:
|
||||
llm_params = {
|
||||
"model": getattr(self.llm, "model_name", None)
|
||||
or getattr(self.llm, "deployment_name", None)
|
||||
or str(self.llm),
|
||||
}
|
||||
self.llm = LLM(**llm_params)
|
||||
|
||||
|
||||
def _setup_for_evaluating(self) -> None:
|
||||
"""Sets up the crew for evaluating."""
|
||||
for task in self.crew.tasks:
|
||||
@@ -51,7 +96,7 @@ class CrewEvaluator:
|
||||
),
|
||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||
verbose=False,
|
||||
llm=self.openai_model_name,
|
||||
llm=self.llm,
|
||||
)
|
||||
|
||||
def _evaluation_task(
|
||||
@@ -157,35 +202,48 @@ class CrewEvaluator:
|
||||
console.print(table)
|
||||
|
||||
def evaluate(self, task_output: TaskOutput):
|
||||
"""Evaluates the performance of the agents in the crew based on the tasks they have performed."""
|
||||
current_task = None
|
||||
for task in self.crew.tasks:
|
||||
if task.description == task_output.description:
|
||||
current_task = task
|
||||
break
|
||||
"""Evaluates the performance of the agents in the crew based on the tasks they have performed.
|
||||
|
||||
Args:
|
||||
task_output (TaskOutput): The output from the task to evaluate
|
||||
|
||||
if not current_task or not task_output:
|
||||
raise ValueError(
|
||||
"Task to evaluate and task output are required for evaluation"
|
||||
Raises:
|
||||
ValueError: If task to evaluate or task output is missing, or if evaluation result is invalid
|
||||
"""
|
||||
try:
|
||||
current_task = None
|
||||
for task in self.crew.tasks:
|
||||
if task.description == task_output.description:
|
||||
current_task = task
|
||||
break
|
||||
|
||||
if not current_task or not task_output:
|
||||
raise ValueError(
|
||||
"Task to evaluate and task output are required for evaluation"
|
||||
)
|
||||
|
||||
self._logger.log("info", f"Starting evaluation for task: {task_output.description}")
|
||||
evaluator_agent = self._evaluator_agent()
|
||||
evaluation_task = self._evaluation_task(
|
||||
evaluator_agent, current_task, task_output.raw
|
||||
)
|
||||
|
||||
evaluator_agent = self._evaluator_agent()
|
||||
evaluation_task = self._evaluation_task(
|
||||
evaluator_agent, current_task, task_output.raw
|
||||
)
|
||||
evaluation_result = evaluation_task.execute_sync()
|
||||
|
||||
evaluation_result = evaluation_task.execute_sync()
|
||||
|
||||
if isinstance(evaluation_result.pydantic, TaskEvaluationPydanticOutput):
|
||||
self._test_result_span = self._telemetry.individual_test_result_span(
|
||||
self.crew,
|
||||
evaluation_result.pydantic.quality,
|
||||
current_task._execution_time,
|
||||
self.openai_model_name,
|
||||
)
|
||||
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
||||
self.run_execution_times[self.iteration].append(
|
||||
current_task._execution_time
|
||||
)
|
||||
else:
|
||||
raise ValueError("Evaluation result is not in the expected format")
|
||||
if isinstance(evaluation_result.pydantic, TaskEvaluationPydanticOutput):
|
||||
self._test_result_span = self._telemetry.individual_test_result_span(
|
||||
self.crew,
|
||||
evaluation_result.pydantic.quality,
|
||||
current_task._execution_time,
|
||||
self.llm,
|
||||
)
|
||||
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
||||
self.run_execution_times[self.iteration].append(
|
||||
current_task._execution_time
|
||||
)
|
||||
self._logger.log("info", f"Evaluation completed with score: {evaluation_result.pydantic.quality}")
|
||||
else:
|
||||
raise ValueError("Evaluation result is not in the expected format")
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Evaluation failed: {str(e)}")
|
||||
raise
|
||||
|
||||
@@ -24,6 +24,36 @@ from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.rpm_controller import RPMController
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
from crewai.llm import LLM
|
||||
|
||||
class MockLLM(LLM):
|
||||
"""Mock LLM for testing."""
|
||||
def __init__(self):
|
||||
super().__init__(model="gpt-4") # Use a known model name
|
||||
|
||||
def chat_completion(self, messages, tools=None, tool_choice=None, **kwargs):
|
||||
# Mock a proper response that matches the expected format
|
||||
if tools and any('output' in tool.get('function', {}).get('name', '') for tool in tools):
|
||||
return {
|
||||
"choices": [{
|
||||
"message": {
|
||||
"content": None,
|
||||
"role": "assistant",
|
||||
"function_call": {
|
||||
"name": "output",
|
||||
"arguments": '{"quality": 8.5}'
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
return {
|
||||
"choices": [{
|
||||
"message": {
|
||||
"content": "Mock LLM Response",
|
||||
"role": "assistant"
|
||||
}
|
||||
}]
|
||||
}
|
||||
|
||||
ceo = Agent(
|
||||
role="CEO",
|
||||
@@ -47,6 +77,60 @@ writer = Agent(
|
||||
)
|
||||
|
||||
|
||||
def test_crew_test_with_custom_llm():
|
||||
"""Test that Crew.test() works with a custom LLM implementation."""
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=researcher,
|
||||
)
|
||||
crew = Crew(agents=[researcher], tasks=[task])
|
||||
|
||||
# Test with custom LLM
|
||||
custom_llm = MockLLM()
|
||||
crew.test(n_iterations=1, llm=custom_llm)
|
||||
# No assertion needed as we just verify it runs without errors
|
||||
|
||||
def test_crew_test_backward_compatibility():
|
||||
"""Test that Crew.test() maintains backward compatibility with openai_model_name."""
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=researcher,
|
||||
)
|
||||
crew = Crew(agents=[researcher], tasks=[task])
|
||||
|
||||
# Test with openai_model_name
|
||||
crew.test(n_iterations=1, openai_model_name="gpt-4")
|
||||
# No assertion needed as we just verify it runs without errors
|
||||
|
||||
def test_crew_test_with_invalid_llm():
|
||||
"""Test that Crew.test() properly handles invalid LLM inputs."""
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=researcher,
|
||||
)
|
||||
crew = Crew(agents=[researcher], tasks=[task])
|
||||
|
||||
# Test with invalid LLM type
|
||||
with pytest.raises(ValueError, match="Unsupported LLM type"):
|
||||
crew.test(n_iterations=1, llm=123) # type: ignore
|
||||
|
||||
def test_crew_test_with_invalid_iterations():
|
||||
"""Test that Crew.test() validates n_iterations."""
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=researcher,
|
||||
)
|
||||
crew = Crew(agents=[researcher], tasks=[task])
|
||||
|
||||
# Test with invalid n_iterations
|
||||
with pytest.raises(ValueError, match="n_iterations must be greater than 0"):
|
||||
crew.test(n_iterations=0, llm=MockLLM())
|
||||
|
||||
|
||||
def test_crew_config_conditional_requirement():
|
||||
with pytest.raises(ValueError):
|
||||
Crew(process=Process.sequential)
|
||||
@@ -1123,7 +1207,7 @@ def test_kickoff_for_each_empty_input():
|
||||
assert results == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr(filter_headeruvs=["authorization"])
|
||||
def test_kickoff_for_each_invalid_input():
|
||||
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
|
||||
|
||||
@@ -3125,4 +3209,4 @@ def test_multimodal_agent_live_image_analysis():
|
||||
# Verify we got a meaningful response
|
||||
assert isinstance(result.raw, str)
|
||||
assert len(result.raw) > 100 # Expecting a detailed analysis
|
||||
assert "error" not in result.raw.lower() # No error messages in response
|
||||
assert "error" not in result.raw.lower() # No error messages in response
|
||||
|
||||
@@ -23,7 +23,7 @@ class TestCrewEvaluator:
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini")
|
||||
return CrewEvaluator(crew, llm="openai/gpt-4o-mini")
|
||||
|
||||
def test_setup_for_evaluating(self, crew_planner):
|
||||
crew_planner._setup_for_evaluating()
|
||||
@@ -45,7 +45,7 @@ class TestCrewEvaluator:
|
||||
== "Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed"
|
||||
)
|
||||
assert agent.verbose is False
|
||||
assert agent.llm.model == "gpt-4o-mini"
|
||||
assert agent.llm.model == "openai/gpt-4o-mini"
|
||||
|
||||
def test_evaluation_task(self, crew_planner):
|
||||
evaluator_agent = Agent(
|
||||
|
||||
Reference in New Issue
Block a user