Compare commits

...

3 Commits

Author SHA1 Message Date
lorenzejay
b8eb7dd294 fix import 2025-12-21 22:38:24 -08:00
lorenzejay
f39379ddd5 refactor: replace AgentExecutionErrorEvent with TaskFailedEvent for LLM call handling
- Updated Agent class to emit TaskFailedEvent instead of AgentExecutionErrorEvent when LLM calls are blocked.
- Removed unnecessary LLMCallBlockedError handling from CrewAgentExecutor.
- Enhanced test cases to ensure proper exception handling for blocked LLM calls.
- Improved code clarity and consistency in event handling across agent execution.
2025-12-21 22:05:11 -08:00
lorenzejay
05c42791c9 feat: implement LLMCallBlockedError handling in LLM and Agent classes
- Introduced LLMCallBlockedError to manage blocked LLM calls from before_llm_call hooks.
- Updated LLM class to raise LLMCallBlockedError instead of returning a boolean.
- Enhanced Agent class to emit events and handle LLMCallBlockedError during task execution.
- Added error handling in CrewAgentExecutor and agent utilities to gracefully manage blocked calls.
- Updated tests to verify behavior when LLM calls are blocked.
2025-12-21 21:48:26 -08:00
13 changed files with 162 additions and 38 deletions

View File

@@ -44,6 +44,8 @@ from crewai.events.types.memory_events import (
MemoryRetrievalCompletedEvent,
MemoryRetrievalStartedEvent,
)
from crewai.events.types.task_events import TaskFailedEvent
from crewai.hooks import LLMCallBlockedError
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.lite_agent import LiteAgent
@@ -409,6 +411,15 @@ class Agent(BaseAgent):
),
)
raise e
if isinstance(e, LLMCallBlockedError):
crewai_event_bus.emit(
self,
event=TaskFailedEvent( # type: ignore[no-untyped-call]
task=task,
error=str(e),
),
)
raise e
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
crewai_event_bus.emit(
@@ -615,6 +626,15 @@ class Agent(BaseAgent):
),
)
raise e
if isinstance(e, LLMCallBlockedError):
crewai_event_bus.emit(
self,
event=TaskFailedEvent( # type: ignore[no-untyped-call]
task=task,
error=str(e),
),
)
raise e
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
crewai_event_bus.emit(

View File

@@ -34,6 +34,7 @@ from crewai.utilities.agent_utils import (
get_llm_response,
handle_agent_action_core,
handle_context_length,
handle_llm_call_blocked_error,
handle_max_iterations_exceeded,
handle_output_parser_exception,
handle_unknown_error,
@@ -284,7 +285,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
log_error_after=self.log_error_after,
printer=self._printer,
)
except Exception as e:
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors

View File

@@ -7,6 +7,7 @@ from crewai.hooks.decorators import (
before_tool_call,
)
from crewai.hooks.llm_hooks import (
LLMCallBlockedError,
LLMCallHookContext,
clear_after_llm_call_hooks,
clear_all_llm_call_hooks,
@@ -74,6 +75,8 @@ def clear_all_global_hooks() -> dict[str, tuple[int, int]]:
__all__ = [
# Exceptions
"LLMCallBlockedError",
# Context classes
"LLMCallHookContext",
"ToolCallHookContext",

View File

@@ -14,6 +14,14 @@ if TYPE_CHECKING:
from crewai.utilities.types import LLMMessage
class LLMCallBlockedError(Exception):
"""Raised when a before_llm_call hook blocks the LLM call.
This exception is intentionally NOT retried by the agent,
as it represents an intentional block by the hook.
"""
class LLMCallHookContext:
"""Context object passed to LLM call hooks.
@@ -131,6 +139,7 @@ class LLMCallHookContext:
... if response.lower() == "no":
... print("LLM call skipped by user")
"""
# from crewai.events.event_listener import event_listener
printer = Printer()
event_listener.formatter.pause_live_updates()

View File

@@ -1645,8 +1645,7 @@ class LLM(BaseLLM):
msg_role: Literal["assistant"] = "assistant"
message["role"] = msg_role
if not self._invoke_before_llm_call_hooks(messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(messages, from_agent)
# --- 5) Set up callbacks if provided
with suppress_warnings():

View File

@@ -591,7 +591,7 @@ class BaseLLM(ABC):
self,
messages: list[LLMMessage],
from_agent: Agent | None = None,
) -> bool:
) -> None:
"""Invoke before_llm_call hooks for direct LLM calls (no agent context).
This method should be called by native provider implementations before
@@ -601,20 +601,19 @@ class BaseLLM(ABC):
messages: The messages being sent to the LLM
from_agent: The agent making the call (None for direct calls)
Returns:
True if LLM call should proceed, False if blocked by hook
Raises:
LLMCallBlockedError: If any hook returns False to block the LLM call.
Example:
>>> # In a native provider's call() method:
>>> if from_agent is None and not self._invoke_before_llm_call_hooks(
... messages, from_agent
... ):
... raise ValueError("LLM call blocked by hook")
>>> if from_agent is None:
... self._invoke_before_llm_call_hooks(messages, from_agent)
"""
# Only invoke hooks for direct calls (no agent context)
if from_agent is not None:
return True
return
from crewai.hooks import LLMCallBlockedError
from crewai.hooks.llm_hooks import (
LLMCallHookContext,
get_before_llm_call_hooks,
@@ -623,7 +622,7 @@ class BaseLLM(ABC):
before_hooks = get_before_llm_call_hooks()
if not before_hooks:
return True
return
hook_context = LLMCallHookContext(
executor=None,
@@ -643,15 +642,17 @@ class BaseLLM(ABC):
content="LLM call blocked by before_llm_call hook",
color="yellow",
)
return False
raise LLMCallBlockedError(
"LLM call blocked by before_llm_call hook"
)
except LLMCallBlockedError:
raise
except Exception as e:
printer.print(
content=f"Error in before_llm_call hook: {e}",
color="yellow",
)
return True
def _invoke_after_llm_call_hooks(
self,
messages: list[LLMMessage],

View File

@@ -5,7 +5,6 @@ import logging
import os
from typing import TYPE_CHECKING, Any, Literal, cast
from anthropic.types import ThinkingBlock
from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType
@@ -197,8 +196,7 @@ class AnthropicCompletion(BaseLLM):
messages
)
if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(formatted_messages, from_agent)
# Prepare completion parameters
completion_params = self._prepare_completion_params(

View File

@@ -302,8 +302,7 @@ class AzureCompletion(BaseLLM):
# Format messages for Azure
formatted_messages = self._format_messages_for_azure(messages)
if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(formatted_messages, from_agent)
# Prepare completion parameters
completion_params = self._prepare_completion_params(

View File

@@ -315,10 +315,9 @@ class BedrockCompletion(BaseLLM):
messages
)
if not self._invoke_before_llm_call_hooks(
self._invoke_before_llm_call_hooks(
cast(list[LLMMessage], formatted_messages), from_agent
):
raise ValueError("LLM call blocked by before_llm_call hook")
)
# Prepare request body
body: BedrockConverseRequestBody = {

View File

@@ -250,8 +250,7 @@ class GeminiCompletion(BaseLLM):
messages_for_hooks = self._convert_contents_to_dict(formatted_content)
if not self._invoke_before_llm_call_hooks(messages_for_hooks, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(messages_for_hooks, from_agent)
config = self._prepare_generation_config(
system_instruction, tools, response_model

View File

@@ -190,8 +190,7 @@ class OpenAICompletion(BaseLLM):
formatted_messages = self._format_messages(messages)
if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(formatted_messages, from_agent)
completion_params = self._prepare_completion_params(
messages=formatted_messages, tools=tools

View File

@@ -16,6 +16,7 @@ from crewai.agents.parser import (
parse,
)
from crewai.cli.config import Settings
from crewai.hooks import LLMCallBlockedError
from crewai.llms.base_llm import BaseLLM
from crewai.tools import BaseTool as CrewAITool
from crewai.tools.base_tool import BaseTool
@@ -260,8 +261,7 @@ def get_llm_response(
"""
if executor_context is not None:
if not _setup_before_llm_call_hooks(executor_context, printer):
raise ValueError("LLM call blocked by before_llm_call hook")
_setup_before_llm_call_hooks(executor_context, printer) # Raises if blocked
messages = executor_context.messages
try:
@@ -314,8 +314,7 @@ async def aget_llm_response(
ValueError: If the response is None or empty.
"""
if executor_context is not None:
if not _setup_before_llm_call_hooks(executor_context, printer):
raise ValueError("LLM call blocked by before_llm_call hook")
_setup_before_llm_call_hooks(executor_context, printer) # Raises if blocked
messages = executor_context.messages
try:
@@ -461,6 +460,18 @@ def handle_output_parser_exception(
return formatted_answer
def handle_llm_call_blocked_error(
e: LLMCallBlockedError,
messages: list[LLMMessage],
) -> AgentFinish:
messages.append({"role": "user", "content": str(e)})
return AgentFinish(
thought="",
output=str(e),
text=str(e),
)
def is_context_length_exceeded(exception: Exception) -> bool:
"""Check if the exception is due to context length exceeding.
@@ -728,15 +739,15 @@ def load_agent_from_repository(from_repository: str) -> dict[str, Any]:
def _setup_before_llm_call_hooks(
executor_context: CrewAgentExecutor | LiteAgent | None, printer: Printer
) -> bool:
) -> None:
"""Setup and invoke before_llm_call hooks for the executor context.
Args:
executor_context: The executor context to setup the hooks for.
printer: Printer instance for error logging.
Returns:
True if LLM execution should proceed, False if blocked by a hook.
Raises:
LLMCallBlockedError: If any hook returns False to block the LLM call.
"""
if executor_context and executor_context.before_llm_call_hooks:
from crewai.hooks.llm_hooks import LLMCallHookContext
@@ -752,7 +763,11 @@ def _setup_before_llm_call_hooks(
content="LLM call blocked by before_llm_call hook",
color="yellow",
)
return False
raise LLMCallBlockedError(
"LLM call blocked by before_llm_call hook"
)
except LLMCallBlockedError:
raise
except Exception as e:
printer.print(
content=f"Error in before_llm_call hook: {e}",
@@ -773,8 +788,6 @@ def _setup_before_llm_call_hooks(
else:
executor_context.messages = []
return True
def _setup_after_llm_call_hooks(
executor_context: CrewAgentExecutor | LiteAgent | None,

View File

@@ -4,7 +4,12 @@ from __future__ import annotations
from unittest.mock import Mock
from crewai.hooks import clear_all_llm_call_hooks, unregister_after_llm_call_hook, unregister_before_llm_call_hook
from crewai.hooks import (
LLMCallBlockedError,
clear_all_llm_call_hooks,
unregister_after_llm_call_hook,
unregister_before_llm_call_hook,
)
import pytest
from crewai.hooks.llm_hooks import (
@@ -87,6 +92,86 @@ class TestLLMCallHookContext:
assert new_message in mock_executor.messages
assert len(mock_executor.messages) == 2
def test_before_hook_returning_false_gracefully_finishes(self) -> None:
"""Test that when before_llm_call hook returns False, agent gracefully finishes."""
from crewai import Agent, Crew, Task
hook_called = {"before": False}
def blocking_hook(context: LLMCallHookContext) -> bool:
"""Hook that blocks all LLM calls."""
hook_called["before"] = True
return False
register_before_llm_call_hook(blocking_hook)
try:
agent = Agent(
role="Test Agent",
goal="Answer questions",
backstory="You are a test agent",
verbose=True,
)
task = Task(
description="Say hello",
expected_output="A greeting",
agent=agent,
)
with pytest.raises(LLMCallBlockedError):
crew = Crew(agents=[agent], tasks=[task], verbose=True)
crew.kickoff()
finally:
unregister_before_llm_call_hook(blocking_hook)
def test_direct_llm_call_raises_blocked_error_when_hook_returns_false(self) -> None:
"""Test that direct LLM.call() raises LLMCallBlockedError when hook returns False."""
from crewai.hooks import LLMCallBlockedError
from crewai.llm import LLM
hook_called = {"before": False}
def blocking_hook(context: LLMCallHookContext) -> bool:
"""Hook that blocks all LLM calls."""
hook_called["before"] = True
return False
register_before_llm_call_hook(blocking_hook)
try:
llm = LLM(model="gpt-4o-mini")
with pytest.raises(LLMCallBlockedError) as exc_info:
llm.call([{"role": "user", "content": "Say hello"}])
assert hook_called["before"] is True, "Before hook should have been called"
assert "blocked" in str(exc_info.value).lower()
finally:
unregister_before_llm_call_hook(blocking_hook)
def test_raises_with_llm_call_blocked_exception(self) -> None:
"""Test that the LLM call raises an exception when the hook raises an exception."""
from crewai.hooks import LLMCallBlockedError
from crewai.llm import LLM
def blocking_hook(context: LLMCallHookContext) -> bool:
raise LLMCallBlockedError("llm call blocked")
register_before_llm_call_hook(blocking_hook)
try:
llm = LLM(model="gpt-4o-mini")
with pytest.raises(LLMCallBlockedError) as exc_info:
llm.call([{"role": "user", "content": "Say hello"}])
assert "blocked" in str(exc_info.value).lower()
finally:
unregister_before_llm_call_hook(blocking_hook)
class TestBeforeLLMCallHooks:
"""Test before_llm_call hook registration and execution."""