Compare commits

...

4 Commits

Author SHA1 Message Date
Lorenze Jay
d337862be1 Merge branch 'main' into devin/1743067554-fix-issue-2487 2025-03-27 12:50:26 -07:00
Devin AI
2234672cf3 Fix lint issues: import sorting
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-03-27 09:39:11 +00:00
Devin AI
ce38a3d70e Improve error handling in kickoff_async with LLMError exception class
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-03-27 09:37:26 +00:00
Devin AI
7907c8a147 Fix issue #2487: Ensure LLM errors are properly raised in async context
Co-Authored-By: Joe Moura <joao@crewai.com>
2025-03-27 09:28:27 +00:00
3 changed files with 109 additions and 2 deletions

View File

@@ -1,5 +1,6 @@
import asyncio
import json
import logging
import re
import uuid
import warnings
@@ -56,6 +57,7 @@ from crewai.utilities.events.crew_events import (
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.event_listener import EventListener
from crewai.utilities.exceptions.llm_error import LLMError
from crewai.utilities.formatter import (
aggregate_raw_outputs_from_task_outputs,
aggregate_raw_outputs_from_tasks,
@@ -683,8 +685,23 @@ class Crew(BaseModel):
return results
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = {}) -> CrewOutput:
"""Asynchronous kickoff method to start the crew execution."""
return await asyncio.to_thread(self.kickoff, inputs)
"""Asynchronous kickoff method to start the crew execution.
Args:
inputs (Optional[Dict[str, Any]]): Input parameters for the crew execution
Returns:
CrewOutput: The result of the crew execution
Raises:
LLMError: When LLM-specific errors occur
Exception: For other unexpected errors
"""
try:
return await asyncio.to_thread(self.kickoff, inputs)
except Exception as e:
logging.error(f"Error during async crew execution: {str(e)}")
raise LLMError(f"Crew execution failed: {str(e)}", original_error=e)
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[CrewOutput]:
crew_copies = [self.copy() for _ in inputs]

View File

@@ -0,0 +1,16 @@
"""Exception class for LLM-related errors."""
from typing import Optional
class LLMError(Exception):
"""Base exception class for LLM operation errors."""
def __init__(self, message: str, original_error: Optional[Exception] = None):
"""Initialize the LLM error.
Args:
message: The error message to display
original_error: The original exception that caused this error, if any
"""
super().__init__(message)
self.original_error = original_error

View File

@@ -1501,6 +1501,80 @@ async def test_async_kickoff_for_each_async_empty_input():
# Assertion
assert results == [], "Result should be an empty list when input is empty"
@pytest.mark.asyncio
async def test_kickoff_async_error_handling():
"""Tests error handling in kickoff_async when kickoff raises an error."""
from unittest.mock import patch
from crewai.utilities.exceptions.llm_error import LLMError
inputs = {"topic": "dog"}
agent = Agent(
role="{topic} Researcher",
goal="Express hot takes on {topic}.",
backstory="You have a lot of experience with {topic}.",
)
task = Task(
description="Give me an analysis around {topic}.",
expected_output="1 bullet point about {topic} that's under 15 words.",
agent=agent,
)
# Create the crew
crew = Crew(
agents=[agent],
tasks=[task],
)
with patch.object(Crew, "kickoff", side_effect=Exception("Simulated LLM error")) as mock_kickoff:
with pytest.raises(LLMError) as excinfo:
await crew.kickoff_async(inputs)
assert "Crew execution failed: Simulated LLM error" in str(excinfo.value)
assert excinfo.value.original_error is not None
assert "Simulated LLM error" in str(excinfo.value.original_error)
mock_kickoff.assert_called_once_with(inputs)
@pytest.mark.asyncio
async def test_kickoff_async_context_length_error_handling():
"""Tests error handling in kickoff_async when kickoff raises a context length error."""
from unittest.mock import patch
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
from crewai.utilities.exceptions.llm_error import LLMError
inputs = {"topic": "dog"}
agent = Agent(
role="{topic} Researcher",
goal="Express hot takes on {topic}.",
backstory="You have a lot of experience with {topic}.",
)
task = Task(
description="Give me an analysis around {topic}.",
expected_output="1 bullet point about {topic} that's under 15 words.",
agent=agent,
)
# Create the crew
crew = Crew(
agents=[agent],
tasks=[task],
)
with patch.object(Crew, "kickoff", side_effect=LLMContextLengthExceededException("maximum context length exceeded")) as mock_kickoff:
with pytest.raises(LLMError) as excinfo:
await crew.kickoff_async(inputs)
assert "Crew execution failed" in str(excinfo.value)
assert "maximum context length exceeded" in str(excinfo.value.original_error)
mock_kickoff.assert_called_once_with(inputs)
def test_set_agents_step_callback():