From 5b548d618da7787e5b3b1dd6baecbc5797651f43 Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Mon, 7 Jul 2025 17:27:13 +0000 Subject: [PATCH] Improve CustomLLM test implementation and error handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Fix CustomLLM.call method to avoid modifying messages parameter - Add better error messages for isinstance checks as suggested in review - Replace assert with proper exception handling in guardrail validation - Add type hints to CustomLLM test class - Add edge case test for invalid LLM type Co-Authored-By: João --- src/crewai/lite_agent.py | 5 +++-- tests/test_lite_agent.py | 25 ++++++++++++++++++------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/src/crewai/lite_agent.py b/src/crewai/lite_agent.py index fff13cd08..047a7dc83 100644 --- a/src/crewai/lite_agent.py +++ b/src/crewai/lite_agent.py @@ -211,7 +211,7 @@ class LiteAgent(FlowTrackable, BaseModel): """Set up the LLM and other components after initialization.""" self.llm = create_llm(self.llm) if not isinstance(self.llm, BaseLLM): - raise ValueError("Unable to create LLM instance") + raise ValueError(f"Expected LLM instance of type BaseLLM, got {self.llm.__class__.__name__}") # Initialize callbacks token_callback = TokenCalcHandler(token_cost_process=self._token_process) @@ -233,7 +233,8 @@ class LiteAgent(FlowTrackable, BaseModel): elif isinstance(self.guardrail, str): from crewai.tasks.llm_guardrail import LLMGuardrail - assert isinstance(self.llm, BaseLLM) + if not isinstance(self.llm, BaseLLM): + raise TypeError(f"Guardrail requires LLM instance of type BaseLLM, got {self.llm.__class__.__name__}") self._guardrail = LLMGuardrail(description=self.guardrail, llm=self.llm) diff --git a/tests/test_lite_agent.py b/tests/test_lite_agent.py index fb5d0d8be..2a18c9e26 100644 --- a/tests/test_lite_agent.py +++ b/tests/test_lite_agent.py @@ -426,18 +426,13 @@ def test_lite_agent_with_custom_llm_and_guardrails(): from crewai.llms.base_llm import BaseLLM class CustomLLM(BaseLLM): - def __init__(self, response="Custom response"): + def __init__(self, response: str = "Custom response"): super().__init__(model="custom-model") self.response = response self.call_count = 0 - def call(self, messages, tools=None, callbacks=None, available_functions=None, from_task=None, from_agent=None): + def call(self, messages, tools=None, callbacks=None, available_functions=None, from_task=None, from_agent=None) -> str: self.call_count += 1 - if isinstance(messages, str): - messages = [{"role": "user", "content": messages}] - for message in messages: - if isinstance(message["content"], str): - message["content"] = [{"type": "text", "text": message["content"]}] return self.response custom_llm = CustomLLM(response="Brazilian soccer players are the best!") @@ -470,3 +465,19 @@ def test_lite_agent_with_custom_llm_and_guardrails(): result2 = agent2.kickoff("Test message") assert result2.raw == "Modified by guardrail" + + +@pytest.mark.vcr(filter_headers=["authorization"]) +def test_lite_agent_with_invalid_llm(): + """Test that Agent raises proper error with invalid LLM type.""" + class InvalidLLM: + pass + + with pytest.raises(ValueError) as exc_info: + Agent( + role="Test Agent", + goal="Test goal", + backstory="Test backstory", + llm=InvalidLLM() + ) + assert "Expected LLM instance of type BaseLLM" in str(exc_info.value)