test: add tests for custom LLM support in Crew.test() and CrewEvaluator

- Add tests for string model name input
- Add tests for LLM instance input
- Add tests for backward compatibility
- Add tests for error cases
- Add tests for CrewEvaluator LLM handling

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-09 22:42:44 +00:00
parent 9bd39464cc
commit f3704a44b3

View File

@@ -0,0 +1,64 @@
import pytest
from unittest.mock import MagicMock
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.llm import LLM
from crewai.task import Task
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
def test_crew_test_with_custom_llm():
# Setup
agent = Agent(
role="test",
goal="test",
backstory="test",
llm=LLM(model="gpt-4"),
)
task = Task(
description="test",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
# Test with string model name
crew.test(n_iterations=1, llm="gpt-4")
# Test with LLM instance
custom_llm = LLM(model="gpt-4")
crew.test(n_iterations=1, llm=custom_llm)
# Test backward compatibility
crew.test(n_iterations=1, openai_model_name="gpt-4")
# Test error when neither parameter is provided
with pytest.raises(ValueError, match="Either llm or openai_model_name must be provided"):
crew.test(n_iterations=1)
def test_crew_evaluator_with_custom_llm():
# Setup
agent = Agent(
role="test",
goal="test",
backstory="test",
llm=LLM(model="gpt-4"),
)
task = Task(
description="test",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
# Test with string model name
evaluator = CrewEvaluator(crew, "gpt-4")
assert isinstance(evaluator.llm, LLM)
assert evaluator.llm.model == "gpt-4"
# Test with LLM instance
custom_llm = LLM(model="gpt-4")
evaluator = CrewEvaluator(crew, custom_llm)
assert evaluator.llm == custom_llm
# Test that evaluator agent uses the correct LLM
evaluator_agent = evaluator._evaluator_agent()
assert evaluator_agent.llm == evaluator.llm