mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-25 16:18:13 +00:00
fix: enable any llm to run test functionality
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -2,6 +2,7 @@ from unittest import mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
@@ -131,6 +132,19 @@ class TestCrewEvaluator:
|
||||
# Ensure the console prints the table
|
||||
console.assert_has_calls([mock.call(), mock.call().print(table())])
|
||||
|
||||
def test_evaluator_with_custom_llm(self, crew_planner):
|
||||
"""Test that CrewEvaluator correctly handles custom LLM instances."""
|
||||
custom_llm = LLM(model="gpt-4", temperature=0.5)
|
||||
evaluator = CrewEvaluator(crew_planner.crew, custom_llm)
|
||||
assert evaluator.llm == custom_llm
|
||||
assert evaluator.llm.temperature == 0.5
|
||||
|
||||
def test_evaluator_with_model_name(self, crew_planner):
|
||||
"""Test that CrewEvaluator correctly handles string model names."""
|
||||
evaluator = CrewEvaluator(crew_planner.crew, "gpt-4")
|
||||
assert isinstance(evaluator.llm, LLM)
|
||||
assert evaluator.llm.model == "gpt-4"
|
||||
|
||||
def test_evaluate(self, crew_planner):
|
||||
task_output = TaskOutput(
|
||||
description="Task 1", agent=str(crew_planner.crew.agents[0])
|
||||
|
||||
Reference in New Issue
Block a user