mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
Improve CustomLLM test implementation and error handling
- Fix CustomLLM.call method to avoid modifying messages parameter - Add better error messages for isinstance checks as suggested in review - Replace assert with proper exception handling in guardrail validation - Add type hints to CustomLLM test class - Add edge case test for invalid LLM type Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
@@ -211,7 +211,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""Set up the LLM and other components after initialization."""
|
||||
self.llm = create_llm(self.llm)
|
||||
if not isinstance(self.llm, BaseLLM):
|
||||
raise ValueError("Unable to create LLM instance")
|
||||
raise ValueError(f"Expected LLM instance of type BaseLLM, got {self.llm.__class__.__name__}")
|
||||
|
||||
# Initialize callbacks
|
||||
token_callback = TokenCalcHandler(token_cost_process=self._token_process)
|
||||
@@ -233,7 +233,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
elif isinstance(self.guardrail, str):
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
assert isinstance(self.llm, BaseLLM)
|
||||
if not isinstance(self.llm, BaseLLM):
|
||||
raise TypeError(f"Guardrail requires LLM instance of type BaseLLM, got {self.llm.__class__.__name__}")
|
||||
|
||||
self._guardrail = LLMGuardrail(description=self.guardrail, llm=self.llm)
|
||||
|
||||
|
||||
@@ -426,18 +426,13 @@ def test_lite_agent_with_custom_llm_and_guardrails():
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
|
||||
class CustomLLM(BaseLLM):
|
||||
def __init__(self, response="Custom response"):
|
||||
def __init__(self, response: str = "Custom response"):
|
||||
super().__init__(model="custom-model")
|
||||
self.response = response
|
||||
self.call_count = 0
|
||||
|
||||
def call(self, messages, tools=None, callbacks=None, available_functions=None, from_task=None, from_agent=None):
|
||||
def call(self, messages, tools=None, callbacks=None, available_functions=None, from_task=None, from_agent=None) -> str:
|
||||
self.call_count += 1
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
for message in messages:
|
||||
if isinstance(message["content"], str):
|
||||
message["content"] = [{"type": "text", "text": message["content"]}]
|
||||
return self.response
|
||||
|
||||
custom_llm = CustomLLM(response="Brazilian soccer players are the best!")
|
||||
@@ -470,3 +465,19 @@ def test_lite_agent_with_custom_llm_and_guardrails():
|
||||
|
||||
result2 = agent2.kickoff("Test message")
|
||||
assert result2.raw == "Modified by guardrail"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_lite_agent_with_invalid_llm():
|
||||
"""Test that Agent raises proper error with invalid LLM type."""
|
||||
class InvalidLLM:
|
||||
pass
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
llm=InvalidLLM()
|
||||
)
|
||||
assert "Expected LLM instance of type BaseLLM" in str(exc_info.value)
|
||||
|
||||
Reference in New Issue
Block a user