feat: remove duplicate print on LLM call error (#3183)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled

By improving litellm handler error / outputs

Co-authored-by: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com>
This commit is contained in:
Lucas Gomide
2025-07-21 23:08:07 -03:00
committed by GitHub
parent 2593242234
commit 27623a1d01
4 changed files with 10 additions and 23 deletions

View File

@@ -120,11 +120,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
raise raise
except Exception as e: except Exception as e:
handle_unknown_error(self._printer, e) handle_unknown_error(self._printer, e)
if e.__class__.__module__.startswith("litellm"): raise
# Do not retry on litellm errors
raise e
else:
raise e
if self.ask_for_human_input: if self.ask_for_human_input:
formatted_answer = self._handle_human_feedback(formatted_answer) formatted_answer = self._handle_human_feedback(formatted_answer)

View File

@@ -59,6 +59,7 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
load_dotenv() load_dotenv()
litellm.suppress_debug_info = True
class FilteredStream(io.TextIOBase): class FilteredStream(io.TextIOBase):
_lock = None _lock = None
@@ -76,9 +77,7 @@ class FilteredStream(io.TextIOBase):
# Skip common noisy LiteLLM banners and any other lines that contain "litellm" # Skip common noisy LiteLLM banners and any other lines that contain "litellm"
if ( if (
"give feedback / get help" in lower_s "litellm.info:" in lower_s
or "litellm.info:" in lower_s
or "litellm" in lower_s
or "Consider using a smaller input or implementing a text splitting strategy" in lower_s or "Consider using a smaller input or implementing a text splitting strategy" in lower_s
): ):
return 0 return 0
@@ -1005,7 +1004,6 @@ class LLM(BaseLLM):
self, self,
event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent), event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent),
) )
logging.error(f"LiteLLM call failed: {str(e)}")
raise raise
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None, messages: str | list[dict[str, Any]] | None = None): def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None, messages: str | list[dict[str, Any]] | None = None):

View File

@@ -157,10 +157,6 @@ def get_llm_response(
from_agent=from_agent, from_agent=from_agent,
) )
except Exception as e: except Exception as e:
printer.print(
content=f"Error during LLM call: {e}",
color="red",
)
raise e raise e
if not answer: if not answer:
printer.print( printer.print(
@@ -232,12 +228,17 @@ def handle_unknown_error(printer: Any, exception: Exception) -> None:
printer: Printer instance for output printer: Printer instance for output
exception: The exception that occurred exception: The exception that occurred
""" """
error_message = str(exception)
if "litellm" in error_message:
return
printer.print( printer.print(
content="An unknown error occurred. Please check the details below.", content="An unknown error occurred. Please check the details below.",
color="red", color="red",
) )
printer.print( printer.print(
content=f"Error details: {exception}", content=f"Error details: {error_message}",
color="red", color="red",
) )

View File

@@ -2010,7 +2010,6 @@ def test_crew_agent_executor_litellm_auth_error():
from litellm.exceptions import AuthenticationError from litellm.exceptions import AuthenticationError
from crewai.agents.tools_handler import ToolsHandler from crewai.agents.tools_handler import ToolsHandler
from crewai.utilities import Printer
# Create an agent and executor # Create an agent and executor
agent = Agent( agent = Agent(
@@ -2043,7 +2042,6 @@ def test_crew_agent_executor_litellm_auth_error():
# Mock the LLM call to raise AuthenticationError # Mock the LLM call to raise AuthenticationError
with ( with (
patch.object(LLM, "call") as mock_llm_call, patch.object(LLM, "call") as mock_llm_call,
patch.object(Printer, "print") as mock_printer,
pytest.raises(AuthenticationError) as exc_info, pytest.raises(AuthenticationError) as exc_info,
): ):
mock_llm_call.side_effect = AuthenticationError( mock_llm_call.side_effect = AuthenticationError(
@@ -2057,13 +2055,6 @@ def test_crew_agent_executor_litellm_auth_error():
} }
) )
# Verify error handling messages
error_message = f"Error during LLM call: {str(mock_llm_call.side_effect)}"
mock_printer.assert_any_call(
content=error_message,
color="red",
)
# Verify the call was only made once (no retries) # Verify the call was only made once (no retries)
mock_llm_call.assert_called_once() mock_llm_call.assert_called_once()