mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 08:38:30 +00:00
test: fix test assertions for llm parameter
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -6,6 +6,10 @@ from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from crewai.llm import LLM
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -1075,7 +1079,8 @@ class Crew(BaseModel):
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
openai_model_name: Optional[str] = None,
|
||||
llm: Optional[Union[str, InstanceOf[LLM], Any]] = None,
|
||||
openai_model_name: Optional[str] = None, # For backward compatibility
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
@@ -1085,9 +1090,9 @@ class Crew(BaseModel):
|
||||
test_crew,
|
||||
n_iterations,
|
||||
inputs,
|
||||
openai_model_name, # type: ignore[arg-type]
|
||||
) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
||||
llm or openai_model_name or "gpt-4o-mini",
|
||||
)
|
||||
evaluator = CrewEvaluator(test_crew, llm or openai_model_name or "gpt-4o-mini")
|
||||
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
from collections import defaultdict
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Any, Union
|
||||
|
||||
from pydantic import BaseModel, Field, InstanceOf
|
||||
from rich.box import HEAVY_EDGE
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
@@ -23,7 +26,7 @@ class CrewEvaluator:
|
||||
|
||||
Attributes:
|
||||
crew (Crew): The crew of agents to evaluate.
|
||||
openai_model_name (str): The model to use for evaluating the performance of the agents (for now ONLY OpenAI accepted).
|
||||
llm (Union[str, InstanceOf[LLM], Any]): The language model to use for evaluating the performance of the agents.
|
||||
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
|
||||
iteration (int): The current iteration of the evaluation.
|
||||
"""
|
||||
@@ -32,9 +35,9 @@ class CrewEvaluator:
|
||||
run_execution_times: defaultdict = defaultdict(list)
|
||||
iteration: int = 0
|
||||
|
||||
def __init__(self, crew, openai_model_name: str):
|
||||
def __init__(self, crew, llm: Union[str, InstanceOf[LLM], Any]):
|
||||
self.crew = crew
|
||||
self.openai_model_name = openai_model_name
|
||||
self.llm = llm if isinstance(llm, LLM) else LLM(model=llm)
|
||||
self._telemetry = Telemetry()
|
||||
self._setup_for_evaluating()
|
||||
|
||||
@@ -51,7 +54,7 @@ class CrewEvaluator:
|
||||
),
|
||||
backstory="Evaluator agent for crew evaluation with precise capabilities to evaluate the performance of the agents in the crew based on the tasks they have performed",
|
||||
verbose=False,
|
||||
llm=self.openai_model_name,
|
||||
llm=self.llm,
|
||||
)
|
||||
|
||||
def _evaluation_task(
|
||||
|
||||
@@ -300,6 +300,15 @@ def test_hierarchical_process():
|
||||
)
|
||||
|
||||
|
||||
@mock.patch("crewai.crew.CrewEvaluator")
|
||||
@mock.patch("crewai.crew.Crew.copy")
|
||||
def test_crew_test_backward_compatibility(mock_copy, mock_evaluator):
|
||||
crew = Crew(agents=[researcher], tasks=[Task(description="test", agent=researcher)])
|
||||
crew.test(2, openai_model_name="gpt-4")
|
||||
mock_evaluator.assert_called_once()
|
||||
_, kwargs = mock_evaluator.call_args
|
||||
assert kwargs["llm"] == "gpt-4"
|
||||
|
||||
def test_manager_llm_requirement_for_hierarchical_process():
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
@@ -1123,7 +1132,7 @@ def test_kickoff_for_each_empty_input():
|
||||
assert results == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr(filter_headeruvs=["authorization"])
|
||||
def test_kickoff_for_each_invalid_input():
|
||||
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
|
||||
|
||||
@@ -3125,4 +3134,4 @@ def test_multimodal_agent_live_image_analysis():
|
||||
# Verify we got a meaningful response
|
||||
assert isinstance(result.raw, str)
|
||||
assert len(result.raw) > 100 # Expecting a detailed analysis
|
||||
assert "error" not in result.raw.lower() # No error messages in response
|
||||
assert "error" not in result.raw.lower() # No error messages in response
|
||||
|
||||
@@ -4,6 +4,7 @@ import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.llm import LLM
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import (
|
||||
@@ -23,7 +24,7 @@ class TestCrewEvaluator:
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini")
|
||||
return CrewEvaluator(crew, llm="gpt-4o-mini")
|
||||
|
||||
def test_setup_for_evaluating(self, crew_planner):
|
||||
crew_planner._setup_for_evaluating()
|
||||
@@ -46,6 +47,7 @@ class TestCrewEvaluator:
|
||||
)
|
||||
assert agent.verbose is False
|
||||
assert agent.llm.model == "gpt-4o-mini"
|
||||
assert isinstance(agent.llm, LLM)
|
||||
|
||||
def test_evaluation_task(self, crew_planner):
|
||||
evaluator_agent = Agent(
|
||||
@@ -131,6 +133,17 @@ class TestCrewEvaluator:
|
||||
# Ensure the console prints the table
|
||||
console.assert_has_calls([mock.call(), mock.call().print(table())])
|
||||
|
||||
def test_custom_llm_support(self):
|
||||
agent = Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1")
|
||||
task = Task(description="Task 1", expected_output="Output 1", agent=agent)
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
|
||||
custom_llm = LLM(model="custom-model")
|
||||
evaluator = CrewEvaluator(crew, llm=custom_llm)
|
||||
|
||||
assert evaluator.llm.model == "custom-model"
|
||||
assert isinstance(evaluator.llm, LLM)
|
||||
|
||||
def test_evaluate(self, crew_planner):
|
||||
task_output = TaskOutput(
|
||||
description="Task 1", agent=str(crew_planner.crew.agents[0])
|
||||
|
||||
Reference in New Issue
Block a user