mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-11 00:58:30 +00:00
Improve CustomLLM test implementation and error handling
- Fix CustomLLM.call method to avoid modifying messages parameter - Add better error messages for isinstance checks as suggested in review - Replace assert with proper exception handling in guardrail validation - Add type hints to CustomLLM test class - Add edge case test for invalid LLM type Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
@@ -211,7 +211,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""Set up the LLM and other components after initialization."""
|
||||
self.llm = create_llm(self.llm)
|
||||
if not isinstance(self.llm, BaseLLM):
|
||||
raise ValueError("Unable to create LLM instance")
|
||||
raise ValueError(f"Expected LLM instance of type BaseLLM, got {self.llm.__class__.__name__}")
|
||||
|
||||
# Initialize callbacks
|
||||
token_callback = TokenCalcHandler(token_cost_process=self._token_process)
|
||||
@@ -233,7 +233,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
elif isinstance(self.guardrail, str):
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
assert isinstance(self.llm, BaseLLM)
|
||||
if not isinstance(self.llm, BaseLLM):
|
||||
raise TypeError(f"Guardrail requires LLM instance of type BaseLLM, got {self.llm.__class__.__name__}")
|
||||
|
||||
self._guardrail = LLMGuardrail(description=self.guardrail, llm=self.llm)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user