mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 04:18:35 +00:00
Compare commits
2 Commits
1.2.0
...
devin/1739
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2744af4825 | ||
|
|
81f84cab58 |
@@ -1,6 +1,9 @@
|
||||
from importlib.metadata import version as get_version
|
||||
from typing import Optional
|
||||
|
||||
from typing import Union
|
||||
|
||||
from crewai.llm import LLM
|
||||
import click
|
||||
|
||||
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
||||
@@ -180,8 +183,15 @@ def reset_memories(
|
||||
default="gpt-4o-mini",
|
||||
help="LLM Model to run the tests on the Crew. For now only accepting only OpenAI models.",
|
||||
)
|
||||
def test(n_iterations: int, model: str):
|
||||
"""Test the crew and evaluate the results."""
|
||||
def test(n_iterations: int, model: Union[str, LLM]):
|
||||
"""Test the crew and evaluate the results using either a model name or LLM instance.
|
||||
|
||||
Args:
|
||||
n_iterations: The number of iterations to run the test.
|
||||
model: Either a model name string or an LLM instance to use for evaluating
|
||||
the performance of the agents. If a string is provided, it will be used
|
||||
to create an LLM instance.
|
||||
"""
|
||||
click.echo(f"Testing the crew for {n_iterations} iterations with model {model}")
|
||||
evaluate_crew(n_iterations, model)
|
||||
|
||||
|
||||
@@ -18,6 +18,9 @@ from pydantic import (
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from typing import Union
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
@@ -1075,19 +1078,30 @@ class Crew(BaseModel):
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
openai_model_name: Optional[str] = None,
|
||||
openai_model_name: Optional[Union[str, LLM]] = None,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations.
|
||||
|
||||
Args:
|
||||
n_iterations: The number of iterations to run the test.
|
||||
openai_model_name: Either a model name string or an LLM instance to use for evaluating
|
||||
the performance of the agents. If a string is provided, it will be used to create
|
||||
an LLM instance.
|
||||
inputs: The inputs to use for the test.
|
||||
|
||||
Raises:
|
||||
ValueError: If openai_model_name is not a string or LLM instance.
|
||||
"""
|
||||
test_crew = self.copy()
|
||||
|
||||
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||
test_crew,
|
||||
n_iterations,
|
||||
inputs,
|
||||
openai_model_name, # type: ignore[arg-type]
|
||||
) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
||||
openai_model_name,
|
||||
)
|
||||
evaluator = CrewEvaluator(test_crew, openai_model_name)
|
||||
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
from typing import Union
|
||||
|
||||
from crewai.llm import LLM
|
||||
from collections import defaultdict
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from crewai.utilities.logger import Logger
|
||||
from rich.box import HEAVY_EDGE
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
@@ -23,7 +27,7 @@ class CrewEvaluator:
|
||||
|
||||
Attributes:
|
||||
crew (Crew): The crew of agents to evaluate.
|
||||
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
|
||||
openai_model_name (Union[str, LLM]): Either a model name string or an LLM instance to use for evaluating the performance of the agents.
|
||||
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
|
||||
iteration (int): The current iteration of the evaluation.
|
||||
"""
|
||||
@@ -32,10 +36,29 @@ class CrewEvaluator:
|
||||
run_execution_times: defaultdict = defaultdict(list)
|
||||
iteration: int = 0
|
||||
|
||||
def __init__(self, crew, openai_model_name: str):
|
||||
def __init__(self, crew, openai_model_name: Union[str, LLM]):
|
||||
"""Initialize the CrewEvaluator.
|
||||
|
||||
Args:
|
||||
crew (Crew): The crew to evaluate
|
||||
openai_model_name (Union[str, LLM]): Either a model name string or an LLM instance
|
||||
to use for evaluation. If a string is provided, it will be used to create an
|
||||
LLM instance with default settings. If an LLM instance is provided, its settings
|
||||
(like temperature) will be preserved.
|
||||
|
||||
Raises:
|
||||
ValueError: If openai_model_name is not a string or LLM instance.
|
||||
"""
|
||||
self.crew = crew
|
||||
self.openai_model_name = openai_model_name
|
||||
if not isinstance(openai_model_name, (str, LLM)):
|
||||
raise ValueError(f"Invalid model type '{type(openai_model_name)}'. Expected str or LLM instance.")
|
||||
self.model_instance = openai_model_name if isinstance(openai_model_name, LLM) else LLM(model=openai_model_name)
|
||||
self._telemetry = Telemetry()
|
||||
self._logger = Logger()
|
||||
self._logger.log(
|
||||
"info",
|
||||
f"Initializing CrewEvaluator with model: {openai_model_name if isinstance(openai_model_name, str) else openai_model_name.model}"
|
||||
)
|
||||
self._setup_for_evaluating()
|
||||
|
||||
def _setup_for_evaluating(self) -> None:
|
||||
@@ -51,7 +74,7 @@ class CrewEvaluator:
|
||||
),
|
||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||
verbose=False,
|
||||
llm=self.openai_model_name,
|
||||
llm=self.model_instance,
|
||||
)
|
||||
|
||||
def _evaluation_task(
|
||||
@@ -181,7 +204,11 @@ class CrewEvaluator:
|
||||
self.crew,
|
||||
evaluation_result.pydantic.quality,
|
||||
current_task._execution_time,
|
||||
self.openai_model_name,
|
||||
self.model_instance.model,
|
||||
)
|
||||
self._logger.log(
|
||||
"info",
|
||||
f"Task evaluation completed with quality score: {evaluation_result.pydantic.quality}"
|
||||
)
|
||||
self.tasks_scores[self.iteration].append(evaluation_result.pydantic.quality)
|
||||
self.run_execution_times[self.iteration].append(
|
||||
|
||||
@@ -10,6 +10,7 @@ import instructor
|
||||
import pydantic_core
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.crew import Crew
|
||||
@@ -300,6 +301,35 @@ def test_hierarchical_process():
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_test_with_custom_llm():
|
||||
"""Test that Crew.test() works correctly with custom LLM instances."""
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=researcher,
|
||||
)
|
||||
custom_llm = LLM(model="gpt-4", temperature=0.5)
|
||||
crew = Crew(agents=[researcher], tasks=[task], process=Process.sequential)
|
||||
|
||||
with mock.patch('crewai.crew.CrewEvaluator') as mock_evaluator:
|
||||
crew.test(n_iterations=1, openai_model_name=custom_llm)
|
||||
mock_evaluator.assert_called_once_with(mock.ANY, custom_llm)
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_test_backward_compatibility():
|
||||
"""Test that Crew.test() maintains backward compatibility with string model names."""
|
||||
task = Task(
|
||||
description="Test task",
|
||||
expected_output="Test output",
|
||||
agent=researcher,
|
||||
)
|
||||
crew = Crew(agents=[researcher], tasks=[task], process=Process.sequential)
|
||||
|
||||
with mock.patch('crewai.crew.CrewEvaluator') as mock_evaluator:
|
||||
crew.test(n_iterations=1, openai_model_name="gpt-4")
|
||||
mock_evaluator.assert_called_once_with(mock.ANY, "gpt-4")
|
||||
|
||||
def test_manager_llm_requirement_for_hierarchical_process():
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
@@ -1123,7 +1153,7 @@ def test_kickoff_for_each_empty_input():
|
||||
assert results == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr(filter_headeruvs=["authorization"])
|
||||
def test_kickoff_for_each_invalid_input():
|
||||
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
|
||||
|
||||
@@ -3125,4 +3155,4 @@ def test_multimodal_agent_live_image_analysis():
|
||||
# Verify we got a meaningful response
|
||||
assert isinstance(result.raw, str)
|
||||
assert len(result.raw) > 100 # Expecting a detailed analysis
|
||||
assert "error" not in result.raw.lower() # No error messages in response
|
||||
assert "error" not in result.raw.lower() # No error messages in response
|
||||
|
||||
@@ -2,6 +2,7 @@ from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
@@ -131,6 +132,30 @@ class TestCrewEvaluator:
|
||||
# Ensure the console prints the table
|
||||
console.assert_has_calls([mock.call(), mock.call().print(table())])
|
||||
|
||||
def test_evaluator_with_custom_llm(self, crew_planner):
|
||||
"""Test that CrewEvaluator correctly handles custom LLM instances."""
|
||||
custom_llm = LLM(model="gpt-4", temperature=0.5)
|
||||
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
|
||||
assert evaluator.model_instance == custom_llm
|
||||
assert evaluator.model_instance.temperature == 0.5
|
||||
|
||||
def test_evaluator_with_invalid_model_type(self, crew_planner):
|
||||
"""Test that CrewEvaluator raises error for invalid model type."""
|
||||
with pytest.raises(ValueError, match="Invalid model type"):
|
||||
CrewEvaluator(crew_planner.crew, 123)
|
||||
|
||||
def test_evaluator_preserves_model_settings(self, crew_planner):
|
||||
"""Test that CrewEvaluator preserves model settings."""
|
||||
custom_llm = LLM(model="gpt-4", temperature=0.7)
|
||||
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
|
||||
assert evaluator.model_instance.temperature == 0.7
|
||||
|
||||
def test_evaluator_with_model_name(self, crew_planner):
|
||||
"""Test that CrewEvaluator correctly handles string model names."""
|
||||
evaluator = CrewEvaluator(crew_planner.crew, "gpt-4")
|
||||
assert isinstance(evaluator.model_instance, LLM)
|
||||
assert evaluator.model_instance.model == "gpt-4"
|
||||
|
||||
def test_evaluate(self, crew_planner):
|
||||
task_output = TaskOutput(
|
||||
description="Task 1", agent=str(crew_planner.crew.agents[0])
|
||||
|
||||
Reference in New Issue
Block a user