fix: enable any llm to run test functionality

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-09 21:15:54 +00:00
parent 409892d65f
commit b8a15c6115
4 changed files with 56 additions and 12 deletions

View File

@@ -10,6 +10,7 @@ import instructor
import pydantic_core
import pytest
from crewai.llm import LLM
from crewai.agent import Agent
from crewai.agents.cache import CacheHandler
from crewai.crew import Crew
@@ -1123,7 +1124,7 @@ def test_kickoff_for_each_empty_input():
assert results == []
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr(filter_headeruvs=["authorization"])
def test_kickoff_for_each_invalid_input():
"""Tests if kickoff_for_each raises TypeError for invalid input types."""
@@ -2828,7 +2829,7 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
copy_mock.return_value = crew
n_iterations = 2
crew.test(n_iterations, openai_model_name="gpt-4o-mini", inputs={"topic": "AI"})
crew.test(n_iterations, llm="gpt-4o-mini", inputs={"topic": "AI"})
# Ensure kickoff is called on the copied crew
kickoff_mock.assert_has_calls(
@@ -2844,6 +2845,32 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
]
)
@mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff")
def test_crew_testing_with_custom_llm(kickoff_mock, copy_mock, crew_evaluator):
task = Task(
description="Test task",
expected_output="Test output",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task])
copy_mock.return_value = crew
custom_llm = LLM(model="gpt-4")
crew.test(2, llm=custom_llm, inputs={"topic": "AI"})
kickoff_mock.assert_has_calls([
mock.call(inputs={"topic": "AI"}),
mock.call(inputs={"topic": "AI"})
])
crew_evaluator.assert_has_calls([
mock.call(crew, custom_llm),
mock.call().set_iteration(1),
mock.call().set_iteration(2),
mock.call().print_crew_evaluation_result(),
])
@pytest.mark.vcr(filter_headers=["authorization"])
def test_hierarchical_verbose_manager_agent():
@@ -3125,4 +3152,4 @@ def test_multimodal_agent_live_image_analysis():
# Verify we got a meaningful response
assert isinstance(result.raw, str)
assert len(result.raw) > 100 # Expecting a detailed analysis
assert "error" not in result.raw.lower() # No error messages in response
assert "error" not in result.raw.lower() # No error messages in response

View File

@@ -2,6 +2,7 @@ from unittest import mock
import pytest
from crewai.llm import LLM
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.task import Task
@@ -23,7 +24,7 @@ class TestCrewEvaluator:
)
crew = Crew(agents=[agent], tasks=[task])
return CrewEvaluator(crew, openai_model_name="gpt-4o-mini")
return CrewEvaluator(crew, llm="gpt-4o-mini")
def test_setup_for_evaluating(self, crew_planner):
crew_planner._setup_for_evaluating()
@@ -47,6 +48,18 @@ class TestCrewEvaluator:
assert agent.verbose is False
assert agent.llm.model == "gpt-4o-mini"
def test_evaluator_with_custom_llm(self, crew_planner):
custom_llm = LLM(model="gpt-4")
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
agent = evaluator._evaluator_agent()
assert agent.llm == custom_llm
def test_evaluator_with_string_llm(self, crew_planner):
evaluator = CrewEvaluator(crew_planner.crew, "gpt-4")
agent = evaluator._evaluator_agent()
assert isinstance(agent.llm, LLM)
assert agent.llm.model == "gpt-4"
def test_evaluation_task(self, crew_planner):
evaluator_agent = Agent(
role="Evaluator Agent",