Fix test failures: improve CustomLLM and error handling

- Fix CustomLLM to handle structured output for guardrails with JSON response
- Add proper method implementations (supports_function_calling, etc.)
- Handle 'Thought:' pattern like working CustomLLM implementation
- Change invalid LLM test to use LiteAgent instead of Agent
- Improve error messages to use type() instead of __class__
- Address GitHub review feedback for better error handling

Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
Devin AI
2025-07-07 18:37:03 +00:00
parent 5b548d618d
commit 6be376f804
2 changed files with 22 additions and 4 deletions

View File

@@ -211,7 +211,7 @@ class LiteAgent(FlowTrackable, BaseModel):
"""Set up the LLM and other components after initialization."""
self.llm = create_llm(self.llm)
if not isinstance(self.llm, BaseLLM):
raise ValueError(f"Expected LLM instance of type BaseLLM, got {self.llm.__class__.__name__}")
raise ValueError(f"Expected LLM instance of type BaseLLM, got {type(self.llm).__name__}")
# Initialize callbacks
token_callback = TokenCalcHandler(token_cost_process=self._token_process)
@@ -234,7 +234,7 @@ class LiteAgent(FlowTrackable, BaseModel):
from crewai.tasks.llm_guardrail import LLMGuardrail
if not isinstance(self.llm, BaseLLM):
raise TypeError(f"Guardrail requires LLM instance of type BaseLLM, got {self.llm.__class__.__name__}")
raise TypeError(f"Guardrail requires LLM instance of type BaseLLM, got {type(self.llm).__name__}")
self._guardrail = LLMGuardrail(description=self.guardrail, llm=self.llm)