Fix error

This commit is contained in:
Brandon Hancock
2025-01-24 14:58:04 -05:00
parent e125b136b9
commit 63ce0c91f9
3 changed files with 28 additions and 15 deletions

View File

@@ -2,7 +2,6 @@ import shutil
import subprocess import subprocess
from typing import Any, Dict, List, Literal, Optional, Union from typing import Any, Dict, List, Literal, Optional, Union
from litellm.llms.base_llm.chat.transformation import BaseLLMException
from pydantic import Field, InstanceOf, PrivateAttr, model_validator from pydantic import Field, InstanceOf, PrivateAttr, model_validator
from crewai.agents import CacheHandler from crewai.agents import CacheHandler
@@ -260,7 +259,7 @@ class Agent(BaseAgent):
} }
)["output"] )["output"]
except Exception as e: except Exception as e:
if isinstance(e, BaseLLMException): if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors # Do not retry on litellm errors
raise e raise e
self._times_executed += 1 self._times_executed += 1

View File

@@ -3,9 +3,6 @@ import re
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union from typing import Any, Callable, Dict, List, Optional, Union
from litellm.exceptions import AuthenticationError as LiteLLMAuthenticationError
from litellm.llms.base_llm.chat.transformation import BaseLLMException
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.parser import ( from crewai.agents.parser import (
@@ -104,7 +101,12 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
try: try:
formatted_answer = self._invoke_loop() formatted_answer = self._invoke_loop()
except Exception as e: except Exception as e:
raise e if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors
raise e
else:
self._handle_unknown_error(e)
raise e
if self.ask_for_human_input: if self.ask_for_human_input:
formatted_answer = self._handle_human_feedback(formatted_answer) formatted_answer = self._handle_human_feedback(formatted_answer)
@@ -143,9 +145,12 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._invoke_step_callback(formatted_answer) self._invoke_step_callback(formatted_answer)
self._append_message(formatted_answer.text, role="assistant") self._append_message(formatted_answer.text, role="assistant")
except OutputParserException as e:
formatted_answer = self._handle_output_parser_exception(e)
except Exception as e: except Exception as e:
if isinstance(e, BaseLLMException): if e.__class__.__module__.startswith("litellm"):
# Stop execution on litellm errors # Do not retry on litellm errors
raise e raise e
if self._is_context_length_exceeded(e): if self._is_context_length_exceeded(e):
self._handle_context_length() self._handle_context_length()

View File

@@ -1676,7 +1676,7 @@ def test_crew_agent_executor_litellm_auth_error():
with ( with (
patch.object(LLM, "call") as mock_llm_call, patch.object(LLM, "call") as mock_llm_call,
patch.object(Printer, "print") as mock_printer, patch.object(Printer, "print") as mock_printer,
pytest.raises(AuthenticationError, match="Invalid API key"), pytest.raises(AuthenticationError) as exc_info,
): ):
mock_llm_call.side_effect = AuthenticationError( mock_llm_call.side_effect = AuthenticationError(
message="Invalid API key", llm_provider="openai", model="gpt-4" message="Invalid API key", llm_provider="openai", model="gpt-4"
@@ -1689,18 +1689,27 @@ def test_crew_agent_executor_litellm_auth_error():
} }
) )
# Verify error handling # Verify error handling messages
error_message = f"Error during LLM call: {str(mock_llm_call.side_effect)}"
mock_printer.assert_any_call( mock_printer.assert_any_call(
content="An unknown error occurred. Please check the details below.", content=error_message,
color="red",
)
mock_printer.assert_any_call(
content="Error details: litellm.AuthenticationError: Invalid API key",
color="red", color="red",
) )
# Verify the call was only made once (no retries) # Verify the call was only made once (no retries)
mock_llm_call.assert_called_once() mock_llm_call.assert_called_once()
# Assert that the exception was raised and has the expected attributes
assert exc_info.type is AuthenticationError
assert "Invalid API key".lower() in exc_info.value.message.lower()
assert exc_info.value.llm_provider == "openai"
assert exc_info.value.model == "gpt-4"
# Optionally, assert that the exception is an instance of BaseLLMException
from litellm.llms.base_llm.chat.transformation import BaseLLMException
assert isinstance(exc_info.value, BaseLLMException)
def test_litellm_anthropic_error_handling(): def test_litellm_anthropic_error_handling():
"""Test that AnthropicError from LiteLLM is handled correctly and not retried.""" """Test that AnthropicError from LiteLLM is handled correctly and not retried."""