mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
fix: improve error handling and import order
- Add better error handling in _get_llm_instance - Fix import order in test_custom_llm_support.py Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -1166,21 +1166,6 @@ class Crew(BaseModel):
|
||||
model_to_use = self._get_llm_instance(llm, openai_model_name)
|
||||
test_crew = self.copy()
|
||||
|
||||
def _get_llm_instance(self, llm: Optional[Union[str, LLM]], openai_model_name: Optional[str]) -> LLM:
|
||||
"""Get an LLM instance from either llm or openai_model_name parameter.
|
||||
|
||||
Args:
|
||||
llm: LLM instance or model name
|
||||
openai_model_name: OpenAI model name (deprecated)
|
||||
|
||||
Returns:
|
||||
LLM instance
|
||||
"""
|
||||
model = llm if llm is not None else openai_model_name
|
||||
if isinstance(model, str):
|
||||
return LLM(model=model)
|
||||
return model
|
||||
|
||||
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||
test_crew,
|
||||
n_iterations,
|
||||
@@ -1195,6 +1180,28 @@ class Crew(BaseModel):
|
||||
|
||||
evaluator.print_crew_evaluation_result()
|
||||
|
||||
def _get_llm_instance(self, llm: Optional[Union[str, LLM]], openai_model_name: Optional[str]) -> LLM:
|
||||
"""Get an LLM instance from either llm or openai_model_name parameter.
|
||||
|
||||
Args:
|
||||
llm: LLM instance or model name
|
||||
openai_model_name: OpenAI model name (deprecated)
|
||||
|
||||
Returns:
|
||||
LLM instance
|
||||
|
||||
Raises:
|
||||
ValueError: If neither llm nor openai_model_name is provided
|
||||
"""
|
||||
model = llm if llm is not None else openai_model_name
|
||||
if model is None:
|
||||
raise ValueError("Must provide either 'llm' or 'openai_model_name' parameter")
|
||||
if isinstance(model, str):
|
||||
return LLM(model=model)
|
||||
if not isinstance(model, LLM):
|
||||
raise ValueError("Model must be either a string or an LLM instance")
|
||||
return model
|
||||
|
||||
def __repr__(self):
|
||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
import pytest
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
@@ -37,7 +36,7 @@ def test_crew_test_with_custom_llm():
|
||||
crew.test(n_iterations=1, openai_model_name="gpt-4")
|
||||
|
||||
# Test error when neither parameter is provided
|
||||
with pytest.raises(ValueError, match="Either llm or openai_model_name must be provided"):
|
||||
with pytest.raises(ValueError, match="Must provide either 'llm' or 'openai_model_name' parameter"):
|
||||
crew.test(n_iterations=1)
|
||||
|
||||
def test_crew_evaluator_with_custom_llm():
|
||||
|
||||
Reference in New Issue
Block a user