diff --git a/src/crewai/lite_agent.py b/src/crewai/lite_agent.py index 047a7dc83..2df245de2 100644 --- a/src/crewai/lite_agent.py +++ b/src/crewai/lite_agent.py @@ -211,7 +211,7 @@ class LiteAgent(FlowTrackable, BaseModel): """Set up the LLM and other components after initialization.""" self.llm = create_llm(self.llm) if not isinstance(self.llm, BaseLLM): - raise ValueError(f"Expected LLM instance of type BaseLLM, got {self.llm.__class__.__name__}") + raise ValueError(f"Expected LLM instance of type BaseLLM, got {type(self.llm).__name__}") # Initialize callbacks token_callback = TokenCalcHandler(token_cost_process=self._token_process) @@ -234,7 +234,7 @@ class LiteAgent(FlowTrackable, BaseModel): from crewai.tasks.llm_guardrail import LLMGuardrail if not isinstance(self.llm, BaseLLM): - raise TypeError(f"Guardrail requires LLM instance of type BaseLLM, got {self.llm.__class__.__name__}") + raise TypeError(f"Guardrail requires LLM instance of type BaseLLM, got {type(self.llm).__name__}") self._guardrail = LLMGuardrail(description=self.guardrail, llm=self.llm) diff --git a/tests/test_lite_agent.py b/tests/test_lite_agent.py index 2a18c9e26..7c266a529 100644 --- a/tests/test_lite_agent.py +++ b/tests/test_lite_agent.py @@ -433,8 +433,24 @@ def test_lite_agent_with_custom_llm_and_guardrails(): def call(self, messages, tools=None, callbacks=None, available_functions=None, from_task=None, from_agent=None) -> str: self.call_count += 1 + + if "valid" in str(messages) and "feedback" in str(messages): + return '{"valid": true, "feedback": null}' + + if "Thought:" in str(messages): + return f"Thought: I will analyze soccer players\nFinal Answer: {self.response}" + return self.response + def supports_function_calling(self) -> bool: + return False + + def supports_stop_words(self) -> bool: + return False + + def get_context_window_size(self) -> int: + return 4096 + custom_llm = CustomLLM(response="Brazilian soccer players are the best!") agent = Agent( @@ -469,12 +485,14 @@ def test_lite_agent_with_custom_llm_and_guardrails(): @pytest.mark.vcr(filter_headers=["authorization"]) def test_lite_agent_with_invalid_llm(): - """Test that Agent raises proper error with invalid LLM type.""" + """Test that LiteAgent raises proper error with invalid LLM type.""" + from crewai.lite_agent import LiteAgent + class InvalidLLM: pass with pytest.raises(ValueError) as exc_info: - Agent( + LiteAgent( role="Test Agent", goal="Test goal", backstory="Test backstory",