refactor: replace AgentExecutionErrorEvent with TaskFailedEvent for LLM call handling

- Updated Agent class to emit TaskFailedEvent instead of AgentExecutionErrorEvent when LLM calls are blocked.
- Removed unnecessary LLMCallBlockedError handling from CrewAgentExecutor.
- Enhanced test cases to ensure proper exception handling for blocked LLM calls.
- Improved code clarity and consistency in event handling across agent execution.
This commit is contained in:
lorenzejay
2025-12-21 22:05:11 -08:00
parent 05c42791c9
commit f39379ddd5
3 changed files with 7 additions and 22 deletions

View File

@@ -44,6 +44,7 @@ from crewai.events.types.memory_events import (
MemoryRetrievalCompletedEvent,
MemoryRetrievalStartedEvent,
)
from crewai.events.types.task_events import TaskFailedEvent
from crewai.hooks import LLMCallBlockedError
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
@@ -413,8 +414,7 @@ class Agent(BaseAgent):
if isinstance(e, LLMCallBlockedError):
crewai_event_bus.emit(
self,
event=AgentExecutionErrorEvent(
agent=self,
event=TaskFailedEvent( # type: ignore[no-untyped-call]
task=task,
error=str(e),
),
@@ -626,12 +626,10 @@ class Agent(BaseAgent):
),
)
raise e
# Don't retry on intentionally blocked LLM calls
if isinstance(e, LLMCallBlockedError):
crewai_event_bus.emit(
self,
event=AgentExecutionErrorEvent(
agent=self,
event=TaskFailedEvent( # type: ignore[no-untyped-call]
task=task,
error=str(e),
),

View File

@@ -9,7 +9,6 @@ from __future__ import annotations
from collections.abc import Callable
from typing import TYPE_CHECKING, Any, Literal, cast
from crewai.hooks import LLMCallBlockedError
from pydantic import BaseModel, GetCoreSchemaHandler
from pydantic_core import CoreSchema, core_schema
@@ -286,12 +285,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
log_error_after=self.log_error_after,
printer=self._printer,
)
except LLMCallBlockedError as e:
formatted_answer = handle_llm_call_blocked_error(
e=e,
messages=self.messages,
)
except Exception as e:
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors

View File

@@ -5,6 +5,7 @@ from __future__ import annotations
from unittest.mock import Mock
from crewai.hooks import (
LLMCallBlockedError,
clear_all_llm_call_hooks,
unregister_after_llm_call_hook,
unregister_before_llm_call_hook,
@@ -118,16 +119,9 @@ class TestLLMCallHookContext:
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=True)
result = crew.kickoff()
print('result', result)
assert 1 + 1 == 3
assert hook_called["before"] is True, "Before hook should have been called"
assert "blocked" in result.raw.lower(), "Result should indicate LLM call was blocked" # type: ignore
with pytest.raises(LLMCallBlockedError):
crew = Crew(agents=[agent], tasks=[task], verbose=True)
crew.kickoff()
finally:
unregister_before_llm_call_hook(blocking_hook)