fix: enable any llm to run test functionality

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-09 21:24:58 +00:00
parent 409892d65f
commit 81f84cab58
5 changed files with 90 additions and 14 deletions

View File

@@ -2,6 +2,7 @@ from unittest import mock
import pytest
from crewai.llm import LLM
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.task import Task
@@ -131,6 +132,19 @@ class TestCrewEvaluator:
# Ensure the console prints the table
console.assert_has_calls([mock.call(), mock.call().print(table())])
def test_evaluator_with_custom_llm(self, crew_planner):
"""Test that CrewEvaluator correctly handles custom LLM instances."""
custom_llm = LLM(model="gpt-4", temperature=0.5)
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
assert evaluator.llm == custom_llm
assert evaluator.llm.temperature == 0.5
def test_evaluator_with_model_name(self, crew_planner):
"""Test that CrewEvaluator correctly handles string model names."""
evaluator = CrewEvaluator(crew_planner.crew, "gpt-4")
assert isinstance(evaluator.llm, LLM)
assert evaluator.llm.model == "gpt-4"
def test_evaluate(self, crew_planner):
task_output = TaskOutput(
description="Task 1", agent=str(crew_planner.crew.agents[0])