From ce38a3d70ef8e31943384dd5b9c1cdbc6e77ba7a Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 09:37:26 +0000 Subject: [PATCH] Improve error handling in kickoff_async with LLMError exception class Co-Authored-By: Joe Moura --- src/crewai/crew.py | 18 +++++++- src/crewai/utilities/exceptions/llm_error.py | 16 ++++++++ tests/crew_test.py | 43 +++++++++++++++++++- 3 files changed, 73 insertions(+), 4 deletions(-) create mode 100644 src/crewai/utilities/exceptions/llm_error.py diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 8c7b310df..5c2af882e 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -1,5 +1,6 @@ import asyncio import json +import logging import re import uuid import warnings @@ -38,6 +39,7 @@ from crewai.tasks.conditional_task import ConditionalTask from crewai.tasks.task_output import TaskOutput from crewai.tools.agent_tools.agent_tools import AgentTools from crewai.tools.base_tool import BaseTool, Tool +from crewai.utilities.exceptions.llm_error import LLMError from crewai.types.usage_metrics import UsageMetrics from crewai.utilities import I18N, FileHandler, Logger, RPMController from crewai.utilities.constants import TRAINING_DATA_FILE @@ -683,11 +685,23 @@ class Crew(BaseModel): return results async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = {}) -> CrewOutput: - """Asynchronous kickoff method to start the crew execution.""" + """Asynchronous kickoff method to start the crew execution. + + Args: + inputs (Optional[Dict[str, Any]]): Input parameters for the crew execution + + Returns: + CrewOutput: The result of the crew execution + + Raises: + LLMError: When LLM-specific errors occur + Exception: For other unexpected errors + """ try: return await asyncio.to_thread(self.kickoff, inputs) except Exception as e: - raise + logging.error(f"Error during async crew execution: {str(e)}") + raise LLMError(f"Crew execution failed: {str(e)}", original_error=e) async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[CrewOutput]: crew_copies = [self.copy() for _ in inputs] diff --git a/src/crewai/utilities/exceptions/llm_error.py b/src/crewai/utilities/exceptions/llm_error.py new file mode 100644 index 000000000..601ab867b --- /dev/null +++ b/src/crewai/utilities/exceptions/llm_error.py @@ -0,0 +1,16 @@ +"""Exception class for LLM-related errors.""" +from typing import Optional + + +class LLMError(Exception): + """Base exception class for LLM operation errors.""" + + def __init__(self, message: str, original_error: Optional[Exception] = None): + """Initialize the LLM error. + + Args: + message: The error message to display + original_error: The original exception that caused this error, if any + """ + super().__init__(message) + self.original_error = original_error diff --git a/tests/crew_test.py b/tests/crew_test.py index 48d252e4f..327bfe0d5 100644 --- a/tests/crew_test.py +++ b/tests/crew_test.py @@ -1503,6 +1503,7 @@ async def test_async_kickoff_for_each_async_empty_input(): async def test_kickoff_async_error_handling(): """Tests error handling in kickoff_async when kickoff raises an error.""" from unittest.mock import patch + from crewai.utilities.exceptions.llm_error import LLMError inputs = {"topic": "dog"} @@ -1525,10 +1526,48 @@ async def test_kickoff_async_error_handling(): ) with patch.object(Crew, "kickoff", side_effect=Exception("Simulated LLM error")) as mock_kickoff: - with pytest.raises(Exception, match="Simulated LLM error"): + with pytest.raises(LLMError) as excinfo: await crew.kickoff_async(inputs) - + + assert "Crew execution failed: Simulated LLM error" in str(excinfo.value) + assert excinfo.value.original_error is not None + assert "Simulated LLM error" in str(excinfo.value.original_error) mock_kickoff.assert_called_once_with(inputs) +@pytest.mark.asyncio +async def test_kickoff_async_context_length_error_handling(): + """Tests error handling in kickoff_async when kickoff raises a context length error.""" + from unittest.mock import patch + from crewai.utilities.exceptions.context_window_exceeding_exception import LLMContextLengthExceededException + from crewai.utilities.exceptions.llm_error import LLMError + + inputs = {"topic": "dog"} + + agent = Agent( + role="{topic} Researcher", + goal="Express hot takes on {topic}.", + backstory="You have a lot of experience with {topic}.", + ) + + task = Task( + description="Give me an analysis around {topic}.", + expected_output="1 bullet point about {topic} that's under 15 words.", + agent=agent, + ) + + # Create the crew + crew = Crew( + agents=[agent], + tasks=[task], + ) + + with patch.object(Crew, "kickoff", side_effect=LLMContextLengthExceededException("maximum context length exceeded")) as mock_kickoff: + with pytest.raises(LLMError) as excinfo: + await crew.kickoff_async(inputs) + + assert "Crew execution failed" in str(excinfo.value) + assert "maximum context length exceeded" in str(excinfo.value.original_error) + mock_kickoff.assert_called_once_with(inputs) +