mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 23:58:34 +00:00
Improve error handling in kickoff_async with LLMError exception class
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import uuid
|
||||
import warnings
|
||||
@@ -38,6 +39,7 @@ from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.utilities.exceptions.llm_error import LLMError
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
@@ -683,11 +685,23 @@ class Crew(BaseModel):
|
||||
return results
|
||||
|
||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = {}) -> CrewOutput:
|
||||
"""Asynchronous kickoff method to start the crew execution."""
|
||||
"""Asynchronous kickoff method to start the crew execution.
|
||||
|
||||
Args:
|
||||
inputs (Optional[Dict[str, Any]]): Input parameters for the crew execution
|
||||
|
||||
Returns:
|
||||
CrewOutput: The result of the crew execution
|
||||
|
||||
Raises:
|
||||
LLMError: When LLM-specific errors occur
|
||||
Exception: For other unexpected errors
|
||||
"""
|
||||
try:
|
||||
return await asyncio.to_thread(self.kickoff, inputs)
|
||||
except Exception as e:
|
||||
raise
|
||||
logging.error(f"Error during async crew execution: {str(e)}")
|
||||
raise LLMError(f"Crew execution failed: {str(e)}", original_error=e)
|
||||
|
||||
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[CrewOutput]:
|
||||
crew_copies = [self.copy() for _ in inputs]
|
||||
|
||||
16
src/crewai/utilities/exceptions/llm_error.py
Normal file
16
src/crewai/utilities/exceptions/llm_error.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""Exception class for LLM-related errors."""
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class LLMError(Exception):
|
||||
"""Base exception class for LLM operation errors."""
|
||||
|
||||
def __init__(self, message: str, original_error: Optional[Exception] = None):
|
||||
"""Initialize the LLM error.
|
||||
|
||||
Args:
|
||||
message: The error message to display
|
||||
original_error: The original exception that caused this error, if any
|
||||
"""
|
||||
super().__init__(message)
|
||||
self.original_error = original_error
|
||||
@@ -1503,6 +1503,7 @@ async def test_async_kickoff_for_each_async_empty_input():
|
||||
async def test_kickoff_async_error_handling():
|
||||
"""Tests error handling in kickoff_async when kickoff raises an error."""
|
||||
from unittest.mock import patch
|
||||
from crewai.utilities.exceptions.llm_error import LLMError
|
||||
|
||||
inputs = {"topic": "dog"}
|
||||
|
||||
@@ -1525,10 +1526,48 @@ async def test_kickoff_async_error_handling():
|
||||
)
|
||||
|
||||
with patch.object(Crew, "kickoff", side_effect=Exception("Simulated LLM error")) as mock_kickoff:
|
||||
with pytest.raises(Exception, match="Simulated LLM error"):
|
||||
with pytest.raises(LLMError) as excinfo:
|
||||
await crew.kickoff_async(inputs)
|
||||
|
||||
|
||||
assert "Crew execution failed: Simulated LLM error" in str(excinfo.value)
|
||||
assert excinfo.value.original_error is not None
|
||||
assert "Simulated LLM error" in str(excinfo.value.original_error)
|
||||
mock_kickoff.assert_called_once_with(inputs)
|
||||
@pytest.mark.asyncio
|
||||
async def test_kickoff_async_context_length_error_handling():
|
||||
"""Tests error handling in kickoff_async when kickoff raises a context length error."""
|
||||
from unittest.mock import patch
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import LLMContextLengthExceededException
|
||||
from crewai.utilities.exceptions.llm_error import LLMError
|
||||
|
||||
inputs = {"topic": "dog"}
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Give me an analysis around {topic}.",
|
||||
expected_output="1 bullet point about {topic} that's under 15 words.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
# Create the crew
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
)
|
||||
|
||||
with patch.object(Crew, "kickoff", side_effect=LLMContextLengthExceededException("maximum context length exceeded")) as mock_kickoff:
|
||||
with pytest.raises(LLMError) as excinfo:
|
||||
await crew.kickoff_async(inputs)
|
||||
|
||||
assert "Crew execution failed" in str(excinfo.value)
|
||||
assert "maximum context length exceeded" in str(excinfo.value.original_error)
|
||||
mock_kickoff.assert_called_once_with(inputs)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user