refactor: Move events module to crewai.events (#3425)

refactor(events): relocate events module & update imports

- Move events from utilities/ to top-level events/ with types/, listeners/, utils/ structure
- Update all source/tests/docs to new import paths
- Add backwards compatibility stubs in crewai.utilities.events with deprecation warnings
- Restore test mocks and fix related test imports
This commit is contained in:
Greyson LaLonde
2025-09-02 10:06:42 -04:00
committed by GitHub
parent 1b1a8fdbf4
commit 878c1a649a
81 changed files with 1094 additions and 751 deletions

View File

@@ -44,12 +44,12 @@ To create a custom event listener, you need to:
Here's a simple example of a custom event listener class:
```python
from crewai.utilities.events import (
from crewai.events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
AgentExecutionCompletedEvent,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener):
def __init__(self):
@@ -146,7 +146,7 @@ my_project/
```python
# my_custom_listener.py
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import BaseEventListener
# ... import events ...
class MyCustomListener(BaseEventListener):
@@ -279,7 +279,7 @@ Additional fields vary by event type. For example, `CrewKickoffCompletedEvent` i
For temporary event handling (useful for testing or specific operations), you can use the `scoped_handlers` context manager:
```python
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent
from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStartedEvent)

View File

@@ -681,11 +681,11 @@ CrewAI emits events during the knowledge retrieval process that you can listen f
#### Example: Monitoring Knowledge Retrieval
```python
from crewai.utilities.events import (
from crewai.events import (
KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent,
BaseEventListener,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
class KnowledgeMonitorListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):

View File

@@ -733,10 +733,10 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
CrewAI emits events for each chunk received during streaming:
```python
from crewai.utilities.events import (
from crewai.events import (
LLMStreamChunkEvent
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):
@@ -758,8 +758,8 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
```python
from crewai import LLM, Agent, Task, Crew
from crewai.utilities.events import LLMStreamChunkEvent
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import LLMStreamChunkEvent
from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):

View File

@@ -1041,8 +1041,8 @@ CrewAI emits the following memory-related events:
Track memory operation timing to optimize your application:
```python
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
from crewai.events import (
BaseEventListener,
MemoryQueryCompletedEvent,
MemorySaveCompletedEvent
)
@@ -1076,8 +1076,8 @@ memory_monitor = MemoryPerformanceMonitor()
Log memory operations for debugging and insights:
```python
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
from crewai.events import (
BaseEventListener,
MemorySaveStartedEvent,
MemoryQueryStartedEvent,
MemoryRetrievalCompletedEvent
@@ -1117,8 +1117,8 @@ memory_logger = MemoryLogger()
Capture and respond to memory errors:
```python
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
from crewai.events import (
BaseEventListener,
MemorySaveFailedEvent,
MemoryQueryFailedEvent
)
@@ -1167,8 +1167,8 @@ error_tracker = MemoryErrorTracker(notify_email="admin@example.com")
Memory events can be forwarded to analytics and monitoring platforms to track performance metrics, detect anomalies, and visualize memory usage patterns:
```python
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
from crewai.events import (
BaseEventListener,
MemoryQueryCompletedEvent,
MemorySaveCompletedEvent
)

View File

@@ -44,12 +44,12 @@ Prompt Tracing을 통해 다음과 같은 작업이 가능합니다:
아래는 커스텀 이벤트 리스너 클래스의 간단한 예시입니다:
```python
from crewai.utilities.events import (
from crewai.events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
AgentExecutionCompletedEvent,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener):
def __init__(self):
@@ -146,7 +146,7 @@ my_project/
```python
# my_custom_listener.py
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import BaseEventListener
# ... import events ...
class MyCustomListener(BaseEventListener):
@@ -279,7 +279,7 @@ CrewAI는 여러분이 청취할 수 있는 다양한 이벤트를 제공합니
임시 이벤트 처리가 필요한 경우(테스트 또는 특정 작업에 유용함), `scoped_handlers` 컨텍스트 관리자를 사용할 수 있습니다:
```python
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent
from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStartedEvent)

View File

@@ -683,11 +683,11 @@ CrewAI는 knowledge 검색 과정에서 이벤트를 발생시키며, 이벤트
#### 예시: Knowledge Retrieval 모니터링
```python
from crewai.utilities.events import (
from crewai.events import (
KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent,
BaseEventListener,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
class KnowledgeMonitorListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):

View File

@@ -731,10 +731,10 @@ CrewAI는 LLM의 스트리밍 응답을 지원하여, 애플리케이션이 출
CrewAI는 스트리밍 중 수신되는 각 청크에 대해 이벤트를 발생시킵니다:
```python
from crewai.utilities.events import (
from crewai.events import (
LLMStreamChunkEvent
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):
@@ -756,8 +756,8 @@ CrewAI는 LLM의 스트리밍 응답을 지원하여, 애플리케이션이 출
```python
from crewai import LLM, Agent, Task, Crew
from crewai.utilities.events import LLMStreamChunkEvent
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import LLMStreamChunkEvent
from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):

View File

@@ -985,8 +985,8 @@ CrewAI는 다음과 같은 메모리 관련 이벤트를 발생시킵니다:
애플리케이션을 최적화하기 위해 메모리 작업 타이밍을 추적하세요:
```python
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
from crewai.events import (
BaseEventListener,
MemoryQueryCompletedEvent,
MemorySaveCompletedEvent
)
@@ -1020,8 +1020,8 @@ memory_monitor = MemoryPerformanceMonitor()
디버깅 및 인사이트를 위해 메모리 작업을 로깅합니다:
```python
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
from crewai.events import (
BaseEventListener,
MemorySaveStartedEvent,
MemoryQueryStartedEvent,
MemoryRetrievalCompletedEvent
@@ -1061,8 +1061,8 @@ memory_logger = MemoryLogger()
메모리 오류를 캡처하고 대응합니다:
```python
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
from crewai.events import (
BaseEventListener,
MemorySaveFailedEvent,
MemoryQueryFailedEvent
)
@@ -1111,8 +1111,8 @@ error_tracker = MemoryErrorTracker(notify_email="admin@example.com")
메모리 이벤트는 분석 및 모니터링 플랫폼으로 전달되어 성능 지표를 추적하고, 이상 징후를 감지하며, 메모리 사용 패턴을 시각화할 수 있습니다:
```python
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events import (
from crewai.events import (
BaseEventListener,
MemoryQueryCompletedEvent,
MemorySaveCompletedEvent
)

View File

@@ -44,12 +44,12 @@ Para criar um listener de evento personalizado, você precisa:
Veja um exemplo simples de uma classe de listener de evento personalizado:
```python
from crewai.utilities.events import (
from crewai.events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
AgentExecutionCompletedEvent,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import BaseEventListener
class MeuListenerPersonalizado(BaseEventListener):
def __init__(self):
@@ -146,7 +146,7 @@ my_project/
```python
# my_custom_listener.py
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import BaseEventListener
# ... importe events ...
class MyCustomListener(BaseEventListener):
@@ -268,7 +268,7 @@ Campos adicionais variam pelo tipo de evento. Por exemplo, `CrewKickoffCompleted
Para lidar temporariamente com eventos (útil para testes ou operações específicas), você pode usar o context manager `scoped_handlers`:
```python
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent
from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStartedEvent)

View File

@@ -681,11 +681,11 @@ O CrewAI emite eventos durante o processo de recuperação de knowledge que voc
#### Exemplo: Monitorando Recuperação de Knowledge
```python
from crewai.utilities.events import (
from crewai.events import (
KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent,
BaseEventListener,
)
from crewai.utilities.events.base_event_listener import BaseEventListener
class KnowledgeMonitorListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):

View File

@@ -708,10 +708,10 @@ O CrewAI suporta respostas em streaming de LLMs, permitindo que sua aplicação
O CrewAI emite eventos para cada chunk recebido durante o streaming:
```python
from crewai.utilities.events import (
from crewai.events import (
LLMStreamChunkEvent
)
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):

View File

@@ -38,17 +38,17 @@ from crewai.utilities.agent_utils import (
)
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.converter import generate_model_description
from crewai.utilities.events.agent_events import (
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
MemoryRetrievalStartedEvent,
MemoryRetrievalCompletedEvent,
)
from crewai.utilities.events.knowledge_events import (
from crewai.events.types.knowledge_events import (
KnowledgeQueryCompletedEvent,
KnowledgeQueryFailedEvent,
KnowledgeQueryStartedEvent,

View File

@@ -1,4 +1,4 @@
from typing import Any, AsyncIterable, Dict, List, Optional
from typing import Any, Dict, List, Optional
from pydantic import Field, PrivateAttr
@@ -14,15 +14,14 @@ from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.tools.base_tool import BaseTool
from crewai.utilities import Logger
from crewai.utilities.converter import Converter
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.agent_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,
)
try:
from langchain_core.messages import ToolMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent

View File

@@ -10,8 +10,8 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools import BaseTool
from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.utilities import Logger
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.agent_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,

View File

@@ -7,7 +7,7 @@ from crewai.utilities import I18N
from crewai.utilities.converter import ConverterError
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
from crewai.utilities.printer import Printer
from crewai.utilities.events.event_listener import event_listener
from crewai.events.event_listener import event_listener
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent

View File

@@ -30,11 +30,11 @@ from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
from crewai.utilities.logger import Logger
from crewai.utilities.tool_utils import execute_tool_and_check_finality
from crewai.utilities.training_handler import CrewTrainingHandler
from crewai.utilities.events.agent_events import (
from crewai.events.types.logging_events import (
AgentLogsStartedEvent,
AgentLogsExecutionEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
class CrewAgentExecutor(CrewAgentExecutorMixin):

View File

@@ -59,7 +59,7 @@ from crewai.utilities import I18N, FileHandler, Logger, RPMController
from crewai.utilities.constants import NOT_SPECIFIED, TRAINING_DATA_FILE
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
from crewai.utilities.events.crew_events import (
from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewKickoffStartedEvent,
@@ -70,14 +70,14 @@ from crewai.utilities.events.crew_events import (
CrewTrainFailedEvent,
CrewTrainStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.event_listener import EventListener
from crewai.utilities.events.listeners.tracing.trace_listener import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.event_listener import EventListener
from crewai.events.listeners.tracing.trace_listener import (
TraceCollectionListener,
)
from crewai.utilities.events.listeners.tracing.utils import (
from crewai.events.listeners.tracing.utils import (
is_tracing_enabled,
)
from crewai.utilities.formatter import (

View File

@@ -0,0 +1,56 @@
"""CrewAI events system for monitoring and extending agent behavior.
This module provides the event infrastructure that allows users to:
- Monitor agent, task, and crew execution
- Track memory operations and performance
- Build custom logging and analytics
- Extend CrewAI with custom event handlers
"""
from crewai.events.base_event_listener import BaseEventListener
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
MemoryQueryCompletedEvent,
MemorySaveCompletedEvent,
MemorySaveStartedEvent,
MemoryQueryStartedEvent,
MemoryRetrievalCompletedEvent,
MemorySaveFailedEvent,
MemoryQueryFailedEvent,
)
from crewai.events.types.knowledge_events import (
KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent,
)
from crewai.events.types.crew_events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
)
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
)
from crewai.events.types.llm_events import (
LLMStreamChunkEvent,
)
__all__ = [
"BaseEventListener",
"crewai_event_bus",
"MemoryQueryCompletedEvent",
"MemorySaveCompletedEvent",
"MemorySaveStartedEvent",
"MemoryQueryStartedEvent",
"MemoryRetrievalCompletedEvent",
"MemorySaveFailedEvent",
"MemoryQueryFailedEvent",
"KnowledgeRetrievalStartedEvent",
"KnowledgeRetrievalCompletedEvent",
"CrewKickoffStartedEvent",
"CrewKickoffCompletedEvent",
"AgentExecutionCompletedEvent",
"LLMStreamChunkEvent",
]

View File

@@ -0,0 +1,15 @@
from abc import ABC, abstractmethod
from crewai.events.event_bus import CrewAIEventsBus, crewai_event_bus
class BaseEventListener(ABC):
verbose: bool = False
def __init__(self):
super().__init__()
self.setup_listeners(crewai_event_bus)
@abstractmethod
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
pass

View File

@@ -0,0 +1,117 @@
from __future__ import annotations
import threading
from contextlib import contextmanager
from typing import Any, Callable, Dict, List, Type, TypeVar, cast
from blinker import Signal
from crewai.events.base_events import BaseEvent
from crewai.events.event_types import EventTypes
EventT = TypeVar("EventT", bound=BaseEvent)
class CrewAIEventsBus:
"""
A singleton event bus that uses blinker signals for event handling.
Allows both internal (Flow/Crew) and external event handling.
"""
_instance = None
_lock = threading.Lock()
def __new__(cls):
if cls._instance is None:
with cls._lock:
if cls._instance is None: # prevent race condition
cls._instance = super(CrewAIEventsBus, cls).__new__(cls)
cls._instance._initialize()
return cls._instance
def _initialize(self) -> None:
"""Initialize the event bus internal state"""
self._signal = Signal("crewai_event_bus")
self._handlers: Dict[Type[BaseEvent], List[Callable]] = {}
def on(
self, event_type: Type[EventT]
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
"""
Decorator to register an event handler for a specific event type.
Usage:
@crewai_event_bus.on(AgentExecutionCompletedEvent)
def on_agent_execution_completed(
source: Any, event: AgentExecutionCompletedEvent
):
print(f"👍 Agent '{event.agent}' completed task")
print(f" Output: {event.output}")
"""
def decorator(
handler: Callable[[Any, EventT], None],
) -> Callable[[Any, EventT], None]:
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventT], None], handler)
)
return handler
return decorator
def emit(self, source: Any, event: BaseEvent) -> None:
"""
Emit an event to all registered handlers
Args:
source: The object emitting the event
event: The event instance to emit
"""
for event_type, handlers in self._handlers.items():
if isinstance(event, event_type):
for handler in handlers:
try:
handler(source, event)
except Exception as e:
print(
f"[EventBus Error] Handler '{handler.__name__}' failed for event '{event_type.__name__}': {e}"
)
self._signal.send(source, event=event)
def register_handler(
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
) -> None:
"""Register an event handler for a specific event type"""
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventTypes], None], handler)
)
@contextmanager
def scoped_handlers(self):
"""
Context manager for temporary event handling scope.
Useful for testing or temporary event handling.
Usage:
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStarted)
def temp_handler(source, event):
print("Temporary handler")
# Do stuff...
# Handlers are cleared after the context
"""
previous_handlers = self._handlers.copy()
self._handlers.clear()
try:
yield
finally:
self._handlers = previous_handlers
# Global instance
crewai_event_bus = CrewAIEventsBus()

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
from io import StringIO
from typing import Any, Dict
@@ -7,8 +9,8 @@ from crewai.task import Task
from crewai.telemetry.telemetry import Telemetry
from crewai.utilities import Logger
from crewai.utilities.constants import EMITTER_COLOR
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.knowledge_events import (
from crewai.events.base_event_listener import BaseEventListener
from crewai.events.types.knowledge_events import (
KnowledgeQueryCompletedEvent,
KnowledgeQueryFailedEvent,
KnowledgeQueryStartedEvent,
@@ -16,28 +18,30 @@ from crewai.utilities.events.knowledge_events import (
KnowledgeRetrievalStartedEvent,
KnowledgeSearchQueryFailedEvent,
)
from crewai.utilities.events.llm_events import (
from crewai.events.types.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMStreamChunkEvent,
)
from crewai.utilities.events.llm_guardrail_events import (
from crewai.events.types.llm_guardrail_events import (
LLMGuardrailStartedEvent,
LLMGuardrailCompletedEvent,
)
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
from crewai.events.utils.console_formatter import ConsoleFormatter
from .agent_events import (
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionStartedEvent,
AgentLogsStartedEvent,
AgentLogsExecutionEvent,
LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent,
)
from .crew_events import (
from crewai.events.types.logging_events import (
AgentLogsStartedEvent,
AgentLogsExecutionEvent,
)
from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewKickoffStartedEvent,
@@ -49,7 +53,7 @@ from .crew_events import (
CrewTrainFailedEvent,
CrewTrainStartedEvent,
)
from .flow_events import (
from .types.flow_events import (
FlowCreatedEvent,
FlowFinishedEvent,
FlowStartedEvent,
@@ -57,13 +61,13 @@ from .flow_events import (
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
)
from .task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent
from .tool_usage_events import (
from .types.task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent
from .types.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from .reasoning_events import (
from .types.reasoning_events import (
AgentReasoningStartedEvent,
AgentReasoningCompletedEvent,
AgentReasoningFailedEvent,
@@ -162,7 +166,7 @@ class EventListener(BaseEventListener):
span = self._telemetry.task_started(crew=source.agent.crew, task=source)
self.execution_spans[source] = span
# Pass both task ID and task name (if set)
task_name = source.name if hasattr(source, 'name') and source.name else None
task_name = source.name if hasattr(source, "name") and source.name else None
self.formatter.create_task_branch(
self.formatter.current_crew_tree, source.id, task_name
)
@@ -176,13 +180,13 @@ class EventListener(BaseEventListener):
self.execution_spans[source] = None
# Pass task name if it exists
task_name = source.name if hasattr(source, 'name') and source.name else None
task_name = source.name if hasattr(source, "name") and source.name else None
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source.agent.role,
"completed",
task_name
task_name,
)
@crewai_event_bus.on(TaskFailedEvent)
@@ -194,13 +198,13 @@ class EventListener(BaseEventListener):
self.execution_spans[source] = None
# Pass task name if it exists
task_name = source.name if hasattr(source, 'name') and source.name else None
task_name = source.name if hasattr(source, "name") and source.name else None
self.formatter.update_task_status(
self.formatter.current_crew_tree,
source.id,
source.agent.role,
"failed",
task_name
task_name,
)
# ----------- AGENT EVENTS -----------

View File

@@ -1,12 +1,12 @@
from typing import Union
from .agent_events import (
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,
LiteAgentExecutionCompletedEvent,
)
from .crew_events import (
from .types.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewKickoffStartedEvent,
@@ -17,39 +17,39 @@ from .crew_events import (
CrewTrainFailedEvent,
CrewTrainStartedEvent,
)
from .flow_events import (
from .types.flow_events import (
FlowFinishedEvent,
FlowStartedEvent,
MethodExecutionFailedEvent,
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
)
from .llm_events import (
from .types.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMStreamChunkEvent,
)
from .llm_guardrail_events import (
from .types.llm_guardrail_events import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
from .task_events import (
from .types.task_events import (
TaskCompletedEvent,
TaskFailedEvent,
TaskStartedEvent,
)
from .tool_usage_events import (
from .types.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from .reasoning_events import (
from .types.reasoning_events import (
AgentReasoningStartedEvent,
AgentReasoningCompletedEvent,
AgentReasoningFailedEvent,
)
from .knowledge_events import (
from .types.knowledge_events import (
KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent,
KnowledgeQueryStartedEvent,
@@ -58,7 +58,7 @@ from .knowledge_events import (
KnowledgeSearchQueryFailedEvent,
)
from .memory_events import (
from .types.memory_events import (
MemorySaveStartedEvent,
MemorySaveCompletedEvent,
MemorySaveFailedEvent,

View File

@@ -0,0 +1,5 @@
"""Event listener implementations for CrewAI.
This module contains various event listener implementations
for handling memory, tracing, and other event-driven functionality.
"""

View File

@@ -1,5 +1,5 @@
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.memory_events import (
from crewai.events.base_event_listener import BaseEventListener
from crewai.events.types.memory_events import (
MemoryRetrievalCompletedEvent,
MemoryRetrievalStartedEvent,
MemoryQueryFailedEvent,
@@ -9,8 +9,8 @@ from crewai.utilities.events.memory_events import (
MemorySaveFailedEvent,
)
class MemoryListener(BaseEventListener):
class MemoryListener(BaseEventListener):
def __init__(self, formatter):
super().__init__()
self.formatter = formatter
@@ -19,9 +19,7 @@ class MemoryListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus):
@crewai_event_bus.on(MemoryRetrievalStartedEvent)
def on_memory_retrieval_started(
source, event: MemoryRetrievalStartedEvent
):
def on_memory_retrieval_started(source, event: MemoryRetrievalStartedEvent):
if self.memory_retrieval_in_progress:
return
@@ -33,9 +31,7 @@ class MemoryListener(BaseEventListener):
)
@crewai_event_bus.on(MemoryRetrievalCompletedEvent)
def on_memory_retrieval_completed(
source, event: MemoryRetrievalCompletedEvent
):
def on_memory_retrieval_completed(source, event: MemoryRetrievalCompletedEvent):
if not self.memory_retrieval_in_progress:
return
@@ -44,7 +40,7 @@ class MemoryListener(BaseEventListener):
self.formatter.current_agent_branch,
self.formatter.current_crew_tree,
event.memory_content,
event.retrieval_time_ms
event.retrieval_time_ms,
)
@crewai_event_bus.on(MemoryQueryCompletedEvent)

View File

@@ -11,7 +11,7 @@ from crewai.cli.plus_api import PlusAPI
from rich.console import Console
from rich.panel import Panel
from crewai.utilities.events.listeners.tracing.types import TraceEvent
from crewai.events.listeners.tracing.types import TraceEvent
from logging import getLogger
logger = getLogger(__name__)

View File

@@ -3,8 +3,8 @@ import uuid
from typing import Dict, Any, Optional
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.agent_events import (
from crewai.events.base_event_listener import BaseEventListener
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionStartedEvent,
LiteAgentExecutionStartedEvent,
@@ -12,34 +12,34 @@ from crewai.utilities.events.agent_events import (
LiteAgentExecutionErrorEvent,
AgentExecutionErrorEvent,
)
from crewai.utilities.events.listeners.tracing.types import TraceEvent
from crewai.utilities.events.reasoning_events import (
from crewai.events.listeners.tracing.types import TraceEvent
from crewai.events.types.reasoning_events import (
AgentReasoningStartedEvent,
AgentReasoningCompletedEvent,
AgentReasoningFailedEvent,
)
from crewai.utilities.events.crew_events import (
from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewKickoffStartedEvent,
)
from crewai.utilities.events.task_events import (
from crewai.events.types.task_events import (
TaskCompletedEvent,
TaskFailedEvent,
TaskStartedEvent,
)
from crewai.utilities.events.tool_usage_events import (
from crewai.events.types.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from crewai.utilities.events.llm_events import (
from crewai.events.types.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
)
from crewai.utilities.events.flow_events import (
from crewai.events.types.flow_events import (
FlowCreatedEvent,
FlowStartedEvent,
FlowFinishedEvent,
@@ -48,7 +48,7 @@ from crewai.utilities.events.flow_events import (
MethodExecutionFailedEvent,
FlowPlotEvent,
)
from crewai.utilities.events.llm_guardrail_events import (
from crewai.events.types.llm_guardrail_events import (
LLMGuardrailStartedEvent,
LLMGuardrailCompletedEvent,
)
@@ -57,7 +57,7 @@ from crewai.utilities.serialization import to_serializable
from .trace_batch_manager import TraceBatchManager
from crewai.utilities.events.memory_events import (
from crewai.events.types.memory_events import (
MemoryQueryStartedEvent,
MemoryQueryCompletedEvent,
MemoryQueryFailedEvent,

View File

@@ -0,0 +1,5 @@
"""Event type definitions for CrewAI.
This module contains all event types used throughout the CrewAI system
for monitoring and extending agent, crew, task, and tool execution.
"""

View File

@@ -1,13 +1,15 @@
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
"""Agent-related events moved to break circular dependencies."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Union
from pydantic import model_validator
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from .base_events import BaseEvent
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.events.base_events import BaseEvent
class AgentExecutionStartedEvent(BaseEvent):
@@ -21,9 +23,9 @@ class AgentExecutionStartedEvent(BaseEvent):
model_config = {"arbitrary_types_allowed": True}
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
@model_validator(mode="after")
def set_fingerprint_data(self):
"""Set fingerprint data from the agent if available."""
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
@@ -32,6 +34,7 @@ class AgentExecutionStartedEvent(BaseEvent):
and self.agent.fingerprint.metadata
):
self.fingerprint_metadata = self.agent.fingerprint.metadata
return self
class AgentExecutionCompletedEvent(BaseEvent):
@@ -42,9 +45,11 @@ class AgentExecutionCompletedEvent(BaseEvent):
output: str
type: str = "agent_execution_completed"
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
model_config = {"arbitrary_types_allowed": True}
@model_validator(mode="after")
def set_fingerprint_data(self):
"""Set fingerprint data from the agent if available."""
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
@@ -53,6 +58,7 @@ class AgentExecutionCompletedEvent(BaseEvent):
and self.agent.fingerprint.metadata
):
self.fingerprint_metadata = self.agent.fingerprint.metadata
return self
class AgentExecutionErrorEvent(BaseEvent):
@@ -63,9 +69,11 @@ class AgentExecutionErrorEvent(BaseEvent):
error: str
type: str = "agent_execution_error"
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
model_config = {"arbitrary_types_allowed": True}
@model_validator(mode="after")
def set_fingerprint_data(self):
"""Set fingerprint data from the agent if available."""
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
@@ -74,6 +82,7 @@ class AgentExecutionErrorEvent(BaseEvent):
and self.agent.fingerprint.metadata
):
self.fingerprint_metadata = self.agent.fingerprint.metadata
return self
# New event classes for LiteAgent
@@ -104,26 +113,6 @@ class LiteAgentExecutionErrorEvent(BaseEvent):
type: str = "lite_agent_execution_error"
# New logging events
class AgentLogsStartedEvent(BaseEvent):
"""Event emitted when agent logs should be shown at start"""
agent_role: str
task_description: Optional[str] = None
verbose: bool = False
type: str = "agent_logs_started"
class AgentLogsExecutionEvent(BaseEvent):
"""Event emitted when agent logs should be shown during execution"""
agent_role: str
formatted_answer: Any
verbose: bool = False
type: str = "agent_logs_execution"
model_config = {"arbitrary_types_allowed": True}
# Agent Eval events
class AgentEvaluationStartedEvent(BaseEvent):
agent_id: str
@@ -132,6 +121,7 @@ class AgentEvaluationStartedEvent(BaseEvent):
iteration: int
type: str = "agent_evaluation_started"
class AgentEvaluationCompletedEvent(BaseEvent):
agent_id: str
agent_role: str
@@ -141,6 +131,7 @@ class AgentEvaluationCompletedEvent(BaseEvent):
score: Any
type: str = "agent_evaluation_completed"
class AgentEvaluationFailedEvent(BaseEvent):
agent_id: str
agent_role: str

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from crewai.utilities.events.base_events import BaseEvent
from crewai.events.base_events import BaseEvent
if TYPE_CHECKING:
from crewai.crew import Crew

View File

@@ -2,7 +2,7 @@ from typing import Any, Dict, Optional, Union
from pydantic import BaseModel, ConfigDict
from .base_events import BaseEvent
from crewai.events.base_events import BaseEvent
class FlowEvent(BaseEvent):

View File

@@ -1,10 +1,6 @@
from typing import TYPE_CHECKING, Any
from crewai.events.base_events import BaseEvent
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.utilities.events.base_events import BaseEvent
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent
class KnowledgeRetrievalStartedEvent(BaseEvent):
@@ -20,7 +16,7 @@ class KnowledgeRetrievalCompletedEvent(BaseEvent):
query: str
type: str = "knowledge_search_query_completed"
agent: BaseAgent
retrieved_knowledge: Any
retrieved_knowledge: str
class KnowledgeQueryStartedEvent(BaseEvent):

View File

@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel
from crewai.utilities.events.base_events import BaseEvent
from crewai.events.base_events import BaseEvent
class LLMEventBase(BaseEvent):

View File

@@ -1,7 +1,7 @@
from inspect import getsource
from typing import Any, Callable, Optional, Union
from crewai.utilities.events.base_events import BaseEvent
from crewai.events.base_events import BaseEvent
class LLMGuardrailStartedEvent(BaseEvent):

View File

@@ -0,0 +1,25 @@
"""Agent logging events that don't reference BaseAgent to avoid circular imports."""
from typing import Any, Optional
from crewai.events.base_events import BaseEvent
class AgentLogsStartedEvent(BaseEvent):
"""Event emitted when agent logs should be shown at start"""
agent_role: str
task_description: Optional[str] = None
verbose: bool = False
type: str = "agent_logs_started"
class AgentLogsExecutionEvent(BaseEvent):
"""Event emitted when agent logs should be shown during execution"""
agent_role: str
formatted_answer: Any
verbose: bool = False
type: str = "agent_logs_execution"
model_config = {"arbitrary_types_allowed": True}

View File

@@ -1,6 +1,6 @@
from typing import Any, Dict, Optional
from crewai.utilities.events.base_events import BaseEvent
from crewai.events.base_events import BaseEvent
class MemoryBaseEvent(BaseEvent):

View File

@@ -1,4 +1,4 @@
from crewai.utilities.events.base_events import BaseEvent
from crewai.events.base_events import BaseEvent
from typing import Any, Optional

View File

@@ -1,7 +1,7 @@
from typing import Any, Optional
from crewai.tasks.task_output import TaskOutput
from crewai.utilities.events.base_events import BaseEvent
from crewai.events.base_events import BaseEvent
class TaskStartedEvent(BaseEvent):

View File

@@ -1,7 +1,7 @@
from datetime import datetime
from typing import Any, Callable, Dict, Optional
from .base_events import BaseEvent
from crewai.events.base_events import BaseEvent
class ToolUsageEvent(BaseEvent):

View File

@@ -1,18 +1,30 @@
import threading
from typing import Any, Optional
from crewai.experimental.evaluation.base_evaluator import AgentEvaluationResult, AggregationStrategy
from crewai.experimental.evaluation.base_evaluator import (
AgentEvaluationResult,
AggregationStrategy,
)
from crewai.agent import Agent
from crewai.task import Task
from crewai.experimental.evaluation.evaluation_display import EvaluationDisplayFormatter
from crewai.utilities.events.agent_events import AgentEvaluationStartedEvent, AgentEvaluationCompletedEvent, AgentEvaluationFailedEvent
from crewai.events.types.agent_events import (
AgentEvaluationStartedEvent,
AgentEvaluationCompletedEvent,
AgentEvaluationFailedEvent,
)
from crewai.experimental.evaluation import BaseEvaluator, create_evaluation_callbacks
from collections.abc import Sequence
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
from crewai.utilities.events.task_events import TaskCompletedEvent
from crewai.utilities.events.agent_events import LiteAgentExecutionCompletedEvent
from crewai.experimental.evaluation.base_evaluator import AgentAggregatedEvaluationResult, EvaluationScore, MetricCategory
from crewai.events.event_bus import crewai_event_bus
from crewai.events.utils.console_formatter import ConsoleFormatter
from crewai.events.types.task_events import TaskCompletedEvent
from crewai.events.types.agent_events import LiteAgentExecutionCompletedEvent
from crewai.experimental.evaluation.base_evaluator import (
AgentAggregatedEvaluationResult,
EvaluationScore,
MetricCategory,
)
class ExecutionState:
current_agent_id: Optional[str] = None
@@ -24,6 +36,7 @@ class ExecutionState:
self.iterations_results = {}
self.agent_evaluators = {}
class AgentEvaluator:
def __init__(
self,
@@ -46,27 +59,45 @@ class AgentEvaluator:
@property
def _execution_state(self) -> ExecutionState:
if not hasattr(self._thread_local, 'execution_state'):
if not hasattr(self._thread_local, "execution_state"):
self._thread_local.execution_state = ExecutionState()
return self._thread_local.execution_state
def _subscribe_to_events(self) -> None:
from typing import cast
crewai_event_bus.register_handler(TaskCompletedEvent, cast(Any, self._handle_task_completed))
crewai_event_bus.register_handler(LiteAgentExecutionCompletedEvent, cast(Any, self._handle_lite_agent_completed))
crewai_event_bus.register_handler(
TaskCompletedEvent, cast(Any, self._handle_task_completed)
)
crewai_event_bus.register_handler(
LiteAgentExecutionCompletedEvent,
cast(Any, self._handle_lite_agent_completed),
)
def _handle_task_completed(self, source: Any, event: TaskCompletedEvent) -> None:
assert event.task is not None
agent = event.task.agent
if agent and str(getattr(agent, 'id', 'unknown')) in self._execution_state.agent_evaluators:
self.emit_evaluation_started_event(agent_role=agent.role, agent_id=str(agent.id), task_id=str(event.task.id))
if (
agent
and str(getattr(agent, "id", "unknown"))
in self._execution_state.agent_evaluators
):
self.emit_evaluation_started_event(
agent_role=agent.role,
agent_id=str(agent.id),
task_id=str(event.task.id),
)
state = ExecutionState()
state.current_agent_id = str(agent.id)
state.current_task_id = str(event.task.id)
assert state.current_agent_id is not None and state.current_task_id is not None
trace = self.callback.get_trace(state.current_agent_id, state.current_task_id)
assert (
state.current_agent_id is not None and state.current_task_id is not None
)
trace = self.callback.get_trace(
state.current_agent_id, state.current_task_id
)
if not trace:
return
@@ -76,19 +107,28 @@ class AgentEvaluator:
task=event.task,
execution_trace=trace,
final_output=event.output,
state=state
state=state,
)
current_iteration = self._execution_state.iteration
if current_iteration not in self._execution_state.iterations_results:
self._execution_state.iterations_results[current_iteration] = {}
if agent.role not in self._execution_state.iterations_results[current_iteration]:
self._execution_state.iterations_results[current_iteration][agent.role] = []
if (
agent.role
not in self._execution_state.iterations_results[current_iteration]
):
self._execution_state.iterations_results[current_iteration][
agent.role
] = []
self._execution_state.iterations_results[current_iteration][agent.role].append(result)
self._execution_state.iterations_results[current_iteration][
agent.role
].append(result)
def _handle_lite_agent_completed(self, source: object, event: LiteAgentExecutionCompletedEvent) -> None:
def _handle_lite_agent_completed(
self, source: object, event: LiteAgentExecutionCompletedEvent
) -> None:
agent_info = event.agent_info
agent_id = str(agent_info["id"])
@@ -106,8 +146,12 @@ class AgentEvaluator:
if not target_agent:
return
assert state.current_agent_id is not None and state.current_task_id is not None
trace = self.callback.get_trace(state.current_agent_id, state.current_task_id)
assert (
state.current_agent_id is not None and state.current_task_id is not None
)
trace = self.callback.get_trace(
state.current_agent_id, state.current_task_id
)
if not trace:
return
@@ -116,7 +160,7 @@ class AgentEvaluator:
agent=target_agent,
execution_trace=trace,
final_output=event.output,
state=state
state=state,
)
current_iteration = self._execution_state.iteration
@@ -124,10 +168,17 @@ class AgentEvaluator:
self._execution_state.iterations_results[current_iteration] = {}
agent_role = target_agent.role
if agent_role not in self._execution_state.iterations_results[current_iteration]:
self._execution_state.iterations_results[current_iteration][agent_role] = []
if (
agent_role
not in self._execution_state.iterations_results[current_iteration]
):
self._execution_state.iterations_results[current_iteration][
agent_role
] = []
self._execution_state.iterations_results[current_iteration][agent_role].append(result)
self._execution_state.iterations_results[current_iteration][
agent_role
].append(result)
def set_iteration(self, iteration: int) -> None:
self._execution_state.iteration = iteration
@@ -136,14 +187,26 @@ class AgentEvaluator:
self._execution_state.iterations_results = {}
def get_evaluation_results(self) -> dict[str, list[AgentEvaluationResult]]:
if self._execution_state.iterations_results and self._execution_state.iteration in self._execution_state.iterations_results:
return self._execution_state.iterations_results[self._execution_state.iteration]
if (
self._execution_state.iterations_results
and self._execution_state.iteration
in self._execution_state.iterations_results
):
return self._execution_state.iterations_results[
self._execution_state.iteration
]
return {}
def display_results_with_iterations(self) -> None:
self.display_formatter.display_summary_results(self._execution_state.iterations_results)
self.display_formatter.display_summary_results(
self._execution_state.iterations_results
)
def get_agent_evaluation(self, strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE, include_evaluation_feedback: bool = True) -> dict[str, AgentAggregatedEvaluationResult]:
def get_agent_evaluation(
self,
strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE,
include_evaluation_feedback: bool = True,
) -> dict[str, AgentAggregatedEvaluationResult]:
agent_results = {}
with crewai_event_bus.scoped_handlers():
task_results = self.get_evaluation_results()
@@ -157,13 +220,16 @@ class AgentEvaluator:
agent_id=agent_id,
agent_role=agent_role,
results=results,
strategy=strategy
strategy=strategy,
)
agent_results[agent_role] = aggregated_result
if self._execution_state.iterations_results and self._execution_state.iteration == max(self._execution_state.iterations_results.keys(), default=0):
if (
self._execution_state.iterations_results
and self._execution_state.iteration
== max(self._execution_state.iterations_results.keys(), default=0)
):
self.display_results_with_iterations()
if include_evaluation_feedback:
@@ -172,7 +238,9 @@ class AgentEvaluator:
return agent_results
def display_evaluation_with_feedback(self) -> None:
self.display_formatter.display_evaluation_with_feedback(self._execution_state.iterations_results)
self.display_formatter.display_evaluation_with_feedback(
self._execution_state.iterations_results
)
def evaluate(
self,
@@ -184,46 +252,91 @@ class AgentEvaluator:
) -> AgentEvaluationResult:
result = AgentEvaluationResult(
agent_id=state.current_agent_id or str(agent.id),
task_id=state.current_task_id or (str(task.id) if task else "unknown_task")
task_id=state.current_task_id or (str(task.id) if task else "unknown_task"),
)
assert self.evaluators is not None
task_id = str(task.id) if task else None
for evaluator in self.evaluators:
try:
self.emit_evaluation_started_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id)
self.emit_evaluation_started_event(
agent_role=agent.role, agent_id=str(agent.id), task_id=task_id
)
score = evaluator.evaluate(
agent=agent,
task=task,
execution_trace=execution_trace,
final_output=final_output
final_output=final_output,
)
result.metrics[evaluator.metric_category] = score
self.emit_evaluation_completed_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id, metric_category=evaluator.metric_category, score=score)
self.emit_evaluation_completed_event(
agent_role=agent.role,
agent_id=str(agent.id),
task_id=task_id,
metric_category=evaluator.metric_category,
score=score,
)
except Exception as e:
self.emit_evaluation_failed_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id, error=str(e))
self.console_formatter.print(f"Error in {evaluator.metric_category.value} evaluator: {str(e)}")
self.emit_evaluation_failed_event(
agent_role=agent.role,
agent_id=str(agent.id),
task_id=task_id,
error=str(e),
)
self.console_formatter.print(
f"Error in {evaluator.metric_category.value} evaluator: {str(e)}"
)
return result
def emit_evaluation_started_event(self, agent_role: str, agent_id: str, task_id: str | None = None):
def emit_evaluation_started_event(
self, agent_role: str, agent_id: str, task_id: str | None = None
):
crewai_event_bus.emit(
self,
AgentEvaluationStartedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration)
AgentEvaluationStartedEvent(
agent_role=agent_role,
agent_id=agent_id,
task_id=task_id,
iteration=self._execution_state.iteration,
),
)
def emit_evaluation_completed_event(self, agent_role: str, agent_id: str, task_id: str | None = None, metric_category: MetricCategory | None = None, score: EvaluationScore | None = None):
def emit_evaluation_completed_event(
self,
agent_role: str,
agent_id: str,
task_id: str | None = None,
metric_category: MetricCategory | None = None,
score: EvaluationScore | None = None,
):
crewai_event_bus.emit(
self,
AgentEvaluationCompletedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration, metric_category=metric_category, score=score)
AgentEvaluationCompletedEvent(
agent_role=agent_role,
agent_id=agent_id,
task_id=task_id,
iteration=self._execution_state.iteration,
metric_category=metric_category,
score=score,
),
)
def emit_evaluation_failed_event(self, agent_role: str, agent_id: str, error: str, task_id: str | None = None):
def emit_evaluation_failed_event(
self, agent_role: str, agent_id: str, error: str, task_id: str | None = None
):
crewai_event_bus.emit(
self,
AgentEvaluationFailedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration, error=error)
AgentEvaluationFailedEvent(
agent_role=agent_role,
agent_id=agent_id,
task_id=task_id,
iteration=self._execution_state.iteration,
error=error,
),
)
def create_default_evaluator(agents: list[Agent], llm: None = None):
from crewai.experimental.evaluation import (
GoalAlignmentEvaluator,
@@ -231,7 +344,7 @@ def create_default_evaluator(agents: list[Agent], llm: None = None):
ToolSelectionEvaluator,
ParameterExtractionEvaluator,
ToolInvocationEvaluator,
ReasoningEfficiencyEvaluator
ReasoningEfficiencyEvaluator,
)
evaluators = [

View File

@@ -3,18 +3,28 @@ from typing import Dict, Any, List
from rich.table import Table
from rich.box import HEAVY_EDGE, ROUNDED
from collections.abc import Sequence
from crewai.experimental.evaluation.base_evaluator import AgentAggregatedEvaluationResult, AggregationStrategy, AgentEvaluationResult, MetricCategory
from crewai.experimental.evaluation.base_evaluator import (
AgentAggregatedEvaluationResult,
AggregationStrategy,
AgentEvaluationResult,
MetricCategory,
)
from crewai.experimental.evaluation import EvaluationScore
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
from crewai.events.utils.console_formatter import ConsoleFormatter
from crewai.utilities.llm_utils import create_llm
class EvaluationDisplayFormatter:
def __init__(self):
self.console_formatter = ConsoleFormatter()
def display_evaluation_with_feedback(self, iterations_results: Dict[int, Dict[str, List[Any]]]):
def display_evaluation_with_feedback(
self, iterations_results: Dict[int, Dict[str, List[Any]]]
):
if not iterations_results:
self.console_formatter.print("[yellow]No evaluation results to display[/yellow]")
self.console_formatter.print(
"[yellow]No evaluation results to display[/yellow]"
)
return
all_agent_roles: set[str] = set()
@@ -22,7 +32,9 @@ class EvaluationDisplayFormatter:
all_agent_roles.update(iter_results.keys())
for agent_role in sorted(all_agent_roles):
self.console_formatter.print(f"\n[bold cyan]Agent: {agent_role}[/bold cyan]")
self.console_formatter.print(
f"\n[bold cyan]Agent: {agent_role}[/bold cyan]"
)
for iter_num, results in sorted(iterations_results.items()):
if agent_role not in results or not results[agent_role]:
@@ -62,9 +74,7 @@ class EvaluationDisplayFormatter:
table.add_section()
table.add_row(
metric.title(),
score_text,
evaluation_score.feedback or ""
metric.title(), score_text, evaluation_score.feedback or ""
)
if aggregated_result.overall_score is not None:
@@ -82,19 +92,26 @@ class EvaluationDisplayFormatter:
table.add_row(
"Overall Score",
f"[{overall_color}]{overall_score:.1f}[/]",
"Overall agent evaluation score"
"Overall agent evaluation score",
)
self.console_formatter.print(table)
def display_summary_results(self, iterations_results: Dict[int, Dict[str, List[AgentAggregatedEvaluationResult]]]):
def display_summary_results(
self,
iterations_results: Dict[int, Dict[str, List[AgentAggregatedEvaluationResult]]],
):
if not iterations_results:
self.console_formatter.print("[yellow]No evaluation results to display[/yellow]")
self.console_formatter.print(
"[yellow]No evaluation results to display[/yellow]"
)
return
self.console_formatter.print("\n")
table = Table(title="Agent Performance Scores \n (1-10 Higher is better)", box=HEAVY_EDGE)
table = Table(
title="Agent Performance Scores \n (1-10 Higher is better)", box=HEAVY_EDGE
)
table.add_column("Agent/Metric", style="cyan")
@@ -123,11 +140,14 @@ class EvaluationDisplayFormatter:
agent_id=agent_id,
agent_role=agent_role,
results=agent_results,
strategy=AggregationStrategy.SIMPLE_AVERAGE
strategy=AggregationStrategy.SIMPLE_AVERAGE,
)
valid_scores = [score.score for score in aggregated_result.metrics.values()
if score.score is not None]
valid_scores = [
score.score
for score in aggregated_result.metrics.values()
if score.score is not None
]
if valid_scores:
avg_score = sum(valid_scores) / len(valid_scores)
agent_scores_by_iteration[iter_num] = avg_score
@@ -137,7 +157,9 @@ class EvaluationDisplayFormatter:
if not agent_scores_by_iteration:
continue
avg_across_iterations = sum(agent_scores_by_iteration.values()) / len(agent_scores_by_iteration)
avg_across_iterations = sum(agent_scores_by_iteration.values()) / len(
agent_scores_by_iteration
)
row = [f"[bold]{agent_role}[/bold]"]
@@ -178,9 +200,13 @@ class EvaluationDisplayFormatter:
row = [f" - {metric.title()}"]
for iter_num in sorted(iterations_results.keys()):
if (iter_num in agent_metrics_by_iteration and
metric in agent_metrics_by_iteration[iter_num]):
metric_score = agent_metrics_by_iteration[iter_num][metric].score
if (
iter_num in agent_metrics_by_iteration
and metric in agent_metrics_by_iteration[iter_num]
):
metric_score = agent_metrics_by_iteration[iter_num][
metric
].score
if metric_score is not None:
metric_scores.append(metric_score)
if metric_score >= 8.0:
@@ -225,7 +251,9 @@ class EvaluationDisplayFormatter:
results: Sequence[AgentEvaluationResult],
strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE,
) -> AgentAggregatedEvaluationResult:
metrics_by_category: dict[MetricCategory, list[EvaluationScore]] = defaultdict(list)
metrics_by_category: dict[MetricCategory, list[EvaluationScore]] = defaultdict(
list
)
for result in results:
for metric_name, evaluation_score in result.metrics.items():
@@ -246,19 +274,20 @@ class EvaluationDisplayFormatter:
metric=category.title(),
feedbacks=feedbacks,
scores=[s.score for s in scores],
strategy=strategy
strategy=strategy,
)
else:
feedback_summary = feedbacks[0]
aggregated_metrics[category] = EvaluationScore(
score=avg_score,
feedback=feedback_summary
score=avg_score, feedback=feedback_summary
)
overall_score = None
if aggregated_metrics:
valid_scores = [m.score for m in aggregated_metrics.values() if m.score is not None]
valid_scores = [
m.score for m in aggregated_metrics.values() if m.score is not None
]
if valid_scores:
overall_score = sum(valid_scores) / len(valid_scores)
@@ -268,7 +297,7 @@ class EvaluationDisplayFormatter:
metrics=aggregated_metrics,
overall_score=overall_score,
task_count=len(results),
aggregation_strategy=strategy
aggregation_strategy=strategy,
)
def _summarize_feedbacks(
@@ -277,10 +306,12 @@ class EvaluationDisplayFormatter:
metric: str,
feedbacks: List[str],
scores: List[float | None],
strategy: AggregationStrategy
strategy: AggregationStrategy,
) -> str:
if len(feedbacks) <= 2 and all(len(fb) < 200 for fb in feedbacks):
return "\n\n".join([f"Feedback {i+1}: {fb}" for i, fb in enumerate(feedbacks)])
return "\n\n".join(
[f"Feedback {i+1}: {fb}" for i, fb in enumerate(feedbacks)]
)
try:
llm = create_llm()
@@ -290,20 +321,26 @@ class EvaluationDisplayFormatter:
if len(feedback) > 500:
feedback = feedback[:500] + "..."
score_text = f"{score:.1f}" if score is not None else "N/A"
formatted_feedbacks.append(f"Feedback #{i+1} (Score: {score_text}):\n{feedback}")
formatted_feedbacks.append(
f"Feedback #{i+1} (Score: {score_text}):\n{feedback}"
)
all_feedbacks = "\n\n" + "\n\n---\n\n".join(formatted_feedbacks)
strategy_guidance = ""
if strategy == AggregationStrategy.BEST_PERFORMANCE:
strategy_guidance = "Focus on the highest-scoring aspects and strengths demonstrated."
strategy_guidance = (
"Focus on the highest-scoring aspects and strengths demonstrated."
)
elif strategy == AggregationStrategy.WORST_PERFORMANCE:
strategy_guidance = "Focus on areas that need improvement and common issues across tasks."
else:
strategy_guidance = "Provide a balanced analysis of strengths and weaknesses across all tasks."
prompt = [
{"role": "system", "content": f"""You are an expert evaluator creating a comprehensive summary of agent performance feedback.
{
"role": "system",
"content": f"""You are an expert evaluator creating a comprehensive summary of agent performance feedback.
Your job is to synthesize multiple feedback points about the same metric across different tasks.
Create a concise, insightful summary that captures the key patterns and themes from all feedback.
@@ -315,14 +352,18 @@ class EvaluationDisplayFormatter:
3. Highlighting patterns across tasks
4. 150-250 words in length
The summary should be directly usable as final feedback for the agent's performance on this metric."""},
{"role": "user", "content": f"""I need a synthesized summary of the following feedback for:
The summary should be directly usable as final feedback for the agent's performance on this metric.""",
},
{
"role": "user",
"content": f"""I need a synthesized summary of the following feedback for:
Agent Role: {agent_role}
Metric: {metric.title()}
{all_feedbacks}
"""}
""",
},
]
assert llm is not None
response = llm.call(prompt)
@@ -330,4 +371,6 @@ class EvaluationDisplayFormatter:
return response
except Exception:
return "Synthesized from multiple tasks: " + "\n\n".join([f"- {fb[:500]}..." for fb in feedbacks])
return "Synthesized from multiple tasks: " + "\n\n".join(
[f"- {fb[:500]}..." for fb in feedbacks]
)

View File

@@ -5,25 +5,23 @@ from collections.abc import Sequence
from crewai.agent import Agent
from crewai.task import Task
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus
from crewai.utilities.events.agent_events import (
from crewai.events.base_event_listener import BaseEventListener
from crewai.events.event_bus import CrewAIEventsBus
from crewai.events.types.agent_events import (
AgentExecutionStartedEvent,
AgentExecutionCompletedEvent,
LiteAgentExecutionStartedEvent,
LiteAgentExecutionCompletedEvent
LiteAgentExecutionCompletedEvent,
)
from crewai.utilities.events.tool_usage_events import (
from crewai.events.types.tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageErrorEvent,
ToolExecutionErrorEvent,
ToolSelectionErrorEvent,
ToolValidateInputErrorEvent
)
from crewai.utilities.events.llm_events import (
LLMCallStartedEvent,
LLMCallCompletedEvent
ToolValidateInputErrorEvent,
)
from crewai.events.types.llm_events import LLMCallStartedEvent, LLMCallCompletedEvent
class EvaluationTraceCallback(BaseEventListener):
"""Event listener for collecting execution traces for evaluation.
@@ -68,27 +66,49 @@ class EvaluationTraceCallback(BaseEventListener):
@event_bus.on(ToolUsageFinishedEvent)
def on_tool_completed(source, event: ToolUsageFinishedEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.output, success=True)
self.on_tool_use(
event.tool_name, event.tool_args, event.output, success=True
)
@event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error,
success=False, error_type="usage_error")
self.on_tool_use(
event.tool_name,
event.tool_args,
event.error,
success=False,
error_type="usage_error",
)
@event_bus.on(ToolExecutionErrorEvent)
def on_tool_execution_error(source, event: ToolExecutionErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error,
success=False, error_type="execution_error")
self.on_tool_use(
event.tool_name,
event.tool_args,
event.error,
success=False,
error_type="execution_error",
)
@event_bus.on(ToolSelectionErrorEvent)
def on_tool_selection_error(source, event: ToolSelectionErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error,
success=False, error_type="selection_error")
self.on_tool_use(
event.tool_name,
event.tool_args,
event.error,
success=False,
error_type="selection_error",
)
@event_bus.on(ToolValidateInputErrorEvent)
def on_tool_validate_input_error(source, event: ToolValidateInputErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error,
success=False, error_type="validation_error")
self.on_tool_use(
event.tool_name,
event.tool_args,
event.error,
success=False,
error_type="validation_error",
)
@event_bus.on(LLMCallStartedEvent)
def on_llm_call_started(source, event: LLMCallStartedEvent):
@@ -99,7 +119,7 @@ class EvaluationTraceCallback(BaseEventListener):
self.on_llm_call_end(event.messages, event.response)
def on_lite_agent_start(self, agent_info: dict[str, Any]):
self.current_agent_id = agent_info['id']
self.current_agent_id = agent_info["id"]
self.current_task_id = "lite_task"
trace_key = f"{self.current_agent_id}_{self.current_task_id}"
@@ -110,7 +130,7 @@ class EvaluationTraceCallback(BaseEventListener):
tool_uses=[],
llm_calls=[],
start_time=datetime.now(),
final_output=None
final_output=None,
)
def _init_trace(self, trace_key: str, **kwargs: Any):
@@ -128,7 +148,7 @@ class EvaluationTraceCallback(BaseEventListener):
tool_uses=[],
llm_calls=[],
start_time=datetime.now(),
final_output=None
final_output=None,
)
def on_agent_finish(self, agent: Agent, task: Task, output: Any):
@@ -151,8 +171,14 @@ class EvaluationTraceCallback(BaseEventListener):
self._reset_current()
def on_tool_use(self, tool_name: str, tool_args: dict[str, Any] | str, result: Any,
success: bool = True, error_type: str | None = None):
def on_tool_use(
self,
tool_name: str,
tool_args: dict[str, Any] | str,
result: Any,
success: bool = True,
error_type: str | None = None,
):
if not self.current_agent_id or not self.current_task_id:
return
@@ -163,7 +189,7 @@ class EvaluationTraceCallback(BaseEventListener):
"args": tool_args,
"result": result,
"success": success,
"timestamp": datetime.now()
"timestamp": datetime.now(),
}
# Add error information if applicable
@@ -173,7 +199,11 @@ class EvaluationTraceCallback(BaseEventListener):
self.traces[trace_key]["tool_uses"].append(tool_use)
def on_llm_call_start(self, messages: str | Sequence[dict[str, Any]] | None, tools: Sequence[dict[str, Any]] | None = None):
def on_llm_call_start(
self,
messages: str | Sequence[dict[str, Any]] | None,
tools: Sequence[dict[str, Any]] | None = None,
):
if not self.current_agent_id or not self.current_task_id:
return
@@ -186,10 +216,12 @@ class EvaluationTraceCallback(BaseEventListener):
"tools": tools,
"start_time": datetime.now(),
"response": None,
"end_time": None
"end_time": None,
}
def on_llm_call_end(self, messages: str | list[dict[str, Any]] | None, response: Any):
def on_llm_call_end(
self, messages: str | list[dict[str, Any]] | None, response: Any
):
if not self.current_agent_id or not self.current_task_id:
return
@@ -213,7 +245,7 @@ class EvaluationTraceCallback(BaseEventListener):
"response": response,
"start_time": start_time,
"end_time": current_time,
"total_tokens": total_tokens
"total_tokens": total_tokens,
}
self.traces[trace_key]["llm_calls"].append(llm_call)
@@ -227,7 +259,7 @@ class EvaluationTraceCallback(BaseEventListener):
def create_evaluation_callbacks() -> EvaluationTraceCallback:
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
callback = EvaluationTraceCallback()
callback.setup_listeners(crewai_event_bus)

View File

@@ -25,8 +25,8 @@ from crewai.flow.flow_visualizer import plot_flow
from crewai.flow.persistence.base import FlowPersistence
from crewai.flow.types import FlowExecutionData
from crewai.flow.utils import get_possible_return_constants
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.flow_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.flow_events import (
FlowCreatedEvent,
FlowFinishedEvent,
FlowPlotEvent,
@@ -35,10 +35,10 @@ from crewai.utilities.events.flow_events import (
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
)
from crewai.utilities.events.listeners.tracing.trace_listener import (
from crewai.events.listeners.tracing.trace_listener import (
TraceCollectionListener,
)
from crewai.utilities.events.listeners.tracing.utils import (
from crewai.events.listeners.tracing.utils import (
is_tracing_enabled,
)
from crewai.utilities.printer import Printer
@@ -934,12 +934,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
method = self._methods[start_method_name]
enhanced_method = self._inject_trigger_payload_for_start_method(method)
result = await self._execute_method(
start_method_name, enhanced_method
)
result = await self._execute_method(start_method_name, enhanced_method)
await self._execute_listeners(start_method_name, result)
def _inject_trigger_payload_for_start_method(self, original_method: Callable) -> Callable:
def _inject_trigger_payload_for_start_method(
self, original_method: Callable
) -> Callable:
def prepare_kwargs(*args, **kwargs):
inputs = baggage.get_baggage("flow_inputs") or {}
trigger_payload = inputs.get("crewai_trigger_payload")
@@ -952,15 +952,17 @@ class Flow(Generic[T], metaclass=FlowMeta):
elif trigger_payload is not None:
self._log_flow_event(
f"Trigger payload available but {original_method.__name__} doesn't accept crewai_trigger_payload parameter",
color="yellow"
color="yellow",
)
return args, kwargs
if asyncio.iscoroutinefunction(original_method):
async def enhanced_method(*args, **kwargs):
args, kwargs = prepare_kwargs(*args, **kwargs)
return await original_method(*args, **kwargs)
else:
def enhanced_method(*args, **kwargs):
args, kwargs = prepare_kwargs(*args, **kwargs)
return original_method(*args, **kwargs)

View File

@@ -62,13 +62,13 @@ from crewai.utilities.agent_utils import (
render_text_description_and_args,
)
from crewai.utilities.converter import generate_model_description
from crewai.utilities.events.agent_events import (
AgentLogsExecutionEvent,
from crewai.events.types.logging_events import AgentLogsExecutionEvent
from crewai.events.types.agent_events import (
LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.printer import Printer

View File

@@ -23,14 +23,14 @@ from dotenv import load_dotenv
from litellm.types.utils import ChatCompletionDeltaToolCall
from pydantic import BaseModel, Field
from crewai.utilities.events.llm_events import (
from crewai.events.types.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
LLMStreamChunkEvent,
)
from crewai.utilities.events.tool_usage_events import (
from crewai.events.types.tool_usage_events import (
ToolUsageStartedEvent,
ToolUsageFinishedEvent,
ToolUsageErrorEvent,
@@ -52,7 +52,7 @@ import io
from typing import TextIO
from crewai.llms.base_llm import BaseLLM
from crewai.utilities.events import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)

View File

@@ -6,8 +6,8 @@ from pydantic import PrivateAttr
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
from crewai.memory.memory import Memory
from crewai.memory.storage.rag_storage import RAGStorage
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
MemoryQueryStartedEvent,
MemoryQueryCompletedEvent,
MemoryQueryFailedEvent,

View File

@@ -4,8 +4,8 @@ import time
from crewai.memory.external.external_memory_item import ExternalMemoryItem
from crewai.memory.memory import Memory
from crewai.memory.storage.interface import Storage
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
MemoryQueryStartedEvent,
MemoryQueryCompletedEvent,
MemoryQueryFailedEvent,

View File

@@ -3,8 +3,8 @@ import time
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
from crewai.memory.memory import Memory
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
MemoryQueryStartedEvent,
MemoryQueryCompletedEvent,
MemoryQueryFailedEvent,

View File

@@ -6,8 +6,8 @@ from pydantic import PrivateAttr
from crewai.memory.memory import Memory
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
from crewai.memory.storage.rag_storage import RAGStorage
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
MemoryQueryStartedEvent,
MemoryQueryCompletedEvent,
MemoryQueryFailedEvent,

View File

@@ -43,12 +43,12 @@ from crewai.utilities.config import process_config
from crewai.utilities.constants import NOT_SPECIFIED, _NotSpecified
from crewai.utilities.guardrail import process_guardrail, GuardrailResult
from crewai.utilities.converter import Converter, convert_to_model
from crewai.utilities.events import (
from crewai.events.event_types import (
TaskCompletedEvent,
TaskFailedEvent,
TaskStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.i18n import I18N
from crewai.utilities.printer import Printer
from crewai.utilities.string_utils import interpolate_only
@@ -160,11 +160,10 @@ class Task(BaseModel):
)
max_retries: Optional[int] = Field(
default=None,
description="[DEPRECATED] Maximum number of retries when guardrail fails. Use guardrail_max_retries instead. Will be removed in v1.0.0"
description="[DEPRECATED] Maximum number of retries when guardrail fails. Use guardrail_max_retries instead. Will be removed in v1.0.0",
)
guardrail_max_retries: int = Field(
default=3,
description="Maximum number of retries when guardrail fails"
default=3, description="Maximum number of retries when guardrail fails"
)
retry_count: int = Field(default=0, description="Current number of retries")
start_time: Optional[datetime.datetime] = Field(
@@ -367,7 +366,7 @@ class Task(BaseModel):
"The 'max_retries' parameter is deprecated and will be removed in CrewAI v1.0.0. "
"Please use 'guardrail_max_retries' instead.",
DeprecationWarning,
stacklevel=2
stacklevel=2,
)
self.guardrail_max_retries = self.max_retries
return self
@@ -532,11 +531,11 @@ class Task(BaseModel):
def _process_guardrail(self, task_output: TaskOutput) -> GuardrailResult:
assert self._guardrail is not None
from crewai.utilities.events import (
from crewai.events.event_types import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
crewai_event_bus.emit(
self,

View File

@@ -20,8 +20,8 @@ from crewai.utilities.agent_utils import (
get_tool_names,
render_text_description_and_args,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.tool_usage_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.tool_usage_events import (
ToolSelectionErrorEvent,
ToolUsageErrorEvent,
ToolUsageFinishedEvent,

View File

@@ -9,8 +9,8 @@ from crewai.agent import Agent
from crewai.llm import BaseLLM
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.crew_events import CrewTestResultEvent
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.crew_events import CrewTestResultEvent
class TaskEvaluationPydanticOutput(BaseModel):

View File

@@ -3,7 +3,8 @@ from typing import List
from pydantic import BaseModel, Field
from crewai.utilities import Converter
from crewai.utilities.events import TaskEvaluationEvent, crewai_event_bus
from crewai.events.types.task_events import TaskEvaluationEvent
from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
from crewai.utilities.training_converter import TrainingConverter

View File

@@ -1,124 +1,140 @@
from .crew_events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewTrainStartedEvent,
CrewTrainCompletedEvent,
CrewTrainFailedEvent,
CrewTestStartedEvent,
CrewTestCompletedEvent,
CrewTestFailedEvent,
)
from .llm_guardrail_events import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
from .agent_events import (
AgentExecutionStartedEvent,
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentEvaluationStartedEvent,
AgentEvaluationCompletedEvent,
AgentEvaluationFailedEvent,
)
from .task_events import (
TaskStartedEvent,
TaskCompletedEvent,
TaskFailedEvent,
TaskEvaluationEvent,
)
from .flow_events import (
FlowCreatedEvent,
FlowStartedEvent,
FlowFinishedEvent,
FlowPlotEvent,
MethodExecutionStartedEvent,
MethodExecutionFinishedEvent,
MethodExecutionFailedEvent,
)
from .crewai_event_bus import CrewAIEventsBus, crewai_event_bus
from .tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageErrorEvent,
ToolUsageStartedEvent,
ToolExecutionErrorEvent,
ToolSelectionErrorEvent,
ToolUsageEvent,
ToolValidateInputErrorEvent,
)
from .llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
LLMStreamChunkEvent,
"""Backwards compatibility - this module has moved to crewai.events."""
import warnings
from abc import ABC
from collections.abc import Callable
from typing import Any, Type, TypeVar
from typing_extensions import deprecated
import crewai.events as new_events
from crewai.events.base_events import BaseEvent
from crewai.events.event_types import EventTypes
EventT = TypeVar("EventT", bound=BaseEvent)
warnings.warn(
"Importing from 'crewai.utilities.events' is deprecated and will be removed in v1.0.0. "
"Please use 'crewai.events' instead.",
DeprecationWarning,
stacklevel=2
)
from .memory_events import (
MemorySaveStartedEvent,
MemorySaveCompletedEvent,
MemorySaveFailedEvent,
MemoryQueryStartedEvent,
MemoryQueryCompletedEvent,
MemoryQueryFailedEvent,
MemoryRetrievalStartedEvent,
MemoryRetrievalCompletedEvent,
)
# events
from .event_listener import EventListener
@deprecated("Use 'from crewai.events import BaseEventListener' instead")
class BaseEventListener(new_events.BaseEventListener, ABC):
"""Deprecated: Use crewai.events.BaseEventListener instead."""
pass
@deprecated("Use 'from crewai.events import crewai_event_bus' instead")
class crewai_event_bus: # noqa: N801
"""Deprecated: Use crewai.events.crewai_event_bus instead."""
@classmethod
def on(
cls, event_type: Type[EventT]
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
"""Delegate to the actual event bus instance."""
return new_events.crewai_event_bus.on(event_type)
@classmethod
def emit(cls, source: Any, event: BaseEvent) -> None:
"""Delegate to the actual event bus instance."""
return new_events.crewai_event_bus.emit(source, event)
@classmethod
def register_handler(
cls, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
) -> None:
"""Delegate to the actual event bus instance."""
return new_events.crewai_event_bus.register_handler(event_type, handler)
@classmethod
def scoped_handlers(cls) -> Any:
"""Delegate to the actual event bus instance."""
return new_events.crewai_event_bus.scoped_handlers()
@deprecated("Use 'from crewai.events import CrewKickoffStartedEvent' instead")
class CrewKickoffStartedEvent(new_events.CrewKickoffStartedEvent):
"""Deprecated: Use crewai.events.CrewKickoffStartedEvent instead."""
pass
@deprecated("Use 'from crewai.events import CrewKickoffCompletedEvent' instead")
class CrewKickoffCompletedEvent(new_events.CrewKickoffCompletedEvent):
"""Deprecated: Use crewai.events.CrewKickoffCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import AgentExecutionCompletedEvent' instead")
class AgentExecutionCompletedEvent(new_events.AgentExecutionCompletedEvent):
"""Deprecated: Use crewai.events.AgentExecutionCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemoryQueryCompletedEvent' instead")
class MemoryQueryCompletedEvent(new_events.MemoryQueryCompletedEvent):
"""Deprecated: Use crewai.events.MemoryQueryCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemorySaveCompletedEvent' instead")
class MemorySaveCompletedEvent(new_events.MemorySaveCompletedEvent):
"""Deprecated: Use crewai.events.MemorySaveCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemorySaveStartedEvent' instead")
class MemorySaveStartedEvent(new_events.MemorySaveStartedEvent):
"""Deprecated: Use crewai.events.MemorySaveStartedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemoryQueryStartedEvent' instead")
class MemoryQueryStartedEvent(new_events.MemoryQueryStartedEvent):
"""Deprecated: Use crewai.events.MemoryQueryStartedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemoryRetrievalCompletedEvent' instead")
class MemoryRetrievalCompletedEvent(new_events.MemoryRetrievalCompletedEvent):
"""Deprecated: Use crewai.events.MemoryRetrievalCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemorySaveFailedEvent' instead")
class MemorySaveFailedEvent(new_events.MemorySaveFailedEvent):
"""Deprecated: Use crewai.events.MemorySaveFailedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemoryQueryFailedEvent' instead")
class MemoryQueryFailedEvent(new_events.MemoryQueryFailedEvent):
"""Deprecated: Use crewai.events.MemoryQueryFailedEvent instead."""
pass
@deprecated("Use 'from crewai.events import KnowledgeRetrievalStartedEvent' instead")
class KnowledgeRetrievalStartedEvent(new_events.KnowledgeRetrievalStartedEvent):
"""Deprecated: Use crewai.events.KnowledgeRetrievalStartedEvent instead."""
pass
@deprecated("Use 'from crewai.events import KnowledgeRetrievalCompletedEvent' instead")
class KnowledgeRetrievalCompletedEvent(new_events.KnowledgeRetrievalCompletedEvent):
"""Deprecated: Use crewai.events.KnowledgeRetrievalCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import LLMStreamChunkEvent' instead")
class LLMStreamChunkEvent(new_events.LLMStreamChunkEvent):
"""Deprecated: Use crewai.events.LLMStreamChunkEvent instead."""
pass
__all__ = [
"EventListener",
"CrewAIEventsBus",
"crewai_event_bus",
"AgentExecutionStartedEvent",
"AgentExecutionCompletedEvent",
"AgentExecutionErrorEvent",
"AgentEvaluationStartedEvent",
"AgentEvaluationCompletedEvent",
"AgentEvaluationFailedEvent",
"TaskStartedEvent",
"TaskCompletedEvent",
"TaskFailedEvent",
"TaskEvaluationEvent",
"FlowCreatedEvent",
"FlowStartedEvent",
"FlowFinishedEvent",
"FlowPlotEvent",
"MethodExecutionStartedEvent",
"MethodExecutionFinishedEvent",
"MethodExecutionFailedEvent",
"LLMCallCompletedEvent",
"LLMCallFailedEvent",
"LLMCallStartedEvent",
"LLMCallType",
"LLMStreamChunkEvent",
"MemorySaveStartedEvent",
"MemorySaveCompletedEvent",
"MemorySaveFailedEvent",
"MemoryQueryStartedEvent",
"MemoryQueryCompletedEvent",
"MemoryQueryFailedEvent",
"MemoryRetrievalStartedEvent",
"MemoryRetrievalCompletedEvent",
"EventListener",
"CrewKickoffStartedEvent",
"CrewKickoffCompletedEvent",
"CrewKickoffFailedEvent",
"CrewTrainStartedEvent",
"CrewTrainCompletedEvent",
"CrewTrainFailedEvent",
"CrewTestStartedEvent",
"CrewTestCompletedEvent",
"CrewTestFailedEvent",
"LLMGuardrailCompletedEvent",
"LLMGuardrailStartedEvent",
"ToolUsageFinishedEvent",
"ToolUsageErrorEvent",
"ToolUsageStartedEvent",
"ToolExecutionErrorEvent",
"ToolSelectionErrorEvent",
"ToolUsageEvent",
"ToolValidateInputErrorEvent",
'BaseEventListener',
'crewai_event_bus',
'CrewKickoffStartedEvent',
'CrewKickoffCompletedEvent',
'AgentExecutionCompletedEvent',
'MemoryQueryCompletedEvent',
'MemorySaveCompletedEvent',
'MemorySaveStartedEvent',
'MemoryQueryStartedEvent',
'MemoryRetrievalCompletedEvent',
'MemorySaveFailedEvent',
'MemoryQueryFailedEvent',
'KnowledgeRetrievalStartedEvent',
'KnowledgeRetrievalCompletedEvent',
'LLMStreamChunkEvent',
]
__deprecated__ = "Use 'crewai.events' instead of 'crewai.utilities.events'"

View File

@@ -1,16 +1,13 @@
from abc import ABC, abstractmethod
from logging import Logger
"""Backwards compatibility stub for crewai.utilities.events.base_event_listener."""
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus, crewai_event_bus
import warnings
from crewai.events import BaseEventListener
warnings.warn(
"Importing from 'crewai.utilities.events.base_event_listener' is deprecated and will be removed in v1.0.0. "
"Please use 'from crewai.events import BaseEventListener' instead.",
DeprecationWarning,
stacklevel=2,
)
class BaseEventListener(ABC):
verbose: bool = False
def __init__(self):
super().__init__()
self.setup_listeners(crewai_event_bus)
@abstractmethod
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
pass
__all__ = ["BaseEventListener"]

View File

@@ -1,115 +1,13 @@
import threading
from contextlib import contextmanager
from typing import Any, Callable, Dict, List, Type, TypeVar, cast
"""Backwards compatibility stub for crewai.utilities.events.crewai_event_bus."""
from blinker import Signal
import warnings
from crewai.events import crewai_event_bus
from crewai.utilities.events.base_events import BaseEvent
from crewai.utilities.events.event_types import EventTypes
EventT = TypeVar("EventT", bound=BaseEvent)
class CrewAIEventsBus:
"""
A singleton event bus that uses blinker signals for event handling.
Allows both internal (Flow/Crew) and external event handling.
"""
_instance = None
_lock = threading.Lock()
def __new__(cls):
if cls._instance is None:
with cls._lock:
if cls._instance is None: # prevent race condition
cls._instance = super(CrewAIEventsBus, cls).__new__(cls)
cls._instance._initialize()
return cls._instance
def _initialize(self) -> None:
"""Initialize the event bus internal state"""
self._signal = Signal("crewai_event_bus")
self._handlers: Dict[Type[BaseEvent], List[Callable]] = {}
def on(
self, event_type: Type[EventT]
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
"""
Decorator to register an event handler for a specific event type.
Usage:
@crewai_event_bus.on(AgentExecutionCompletedEvent)
def on_agent_execution_completed(
source: Any, event: AgentExecutionCompletedEvent
):
print(f"👍 Agent '{event.agent}' completed task")
print(f" Output: {event.output}")
"""
def decorator(
handler: Callable[[Any, EventT], None],
) -> Callable[[Any, EventT], None]:
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventT], None], handler)
)
return handler
return decorator
def emit(self, source: Any, event: BaseEvent) -> None:
"""
Emit an event to all registered handlers
Args:
source: The object emitting the event
event: The event instance to emit
"""
for event_type, handlers in self._handlers.items():
if isinstance(event, event_type):
for handler in handlers:
try:
handler(source, event)
except Exception as e:
print(
f"[EventBus Error] Handler '{handler.__name__}' failed for event '{event_type.__name__}': {e}"
warnings.warn(
"Importing from 'crewai.utilities.events.crewai_event_bus' is deprecated and will be removed in v1.0.0. "
"Please use 'from crewai.events import crewai_event_bus' instead.",
DeprecationWarning,
stacklevel=2,
)
self._signal.send(source, event=event)
def register_handler(
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
) -> None:
"""Register an event handler for a specific event type"""
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventTypes], None], handler)
)
@contextmanager
def scoped_handlers(self):
"""
Context manager for temporary event handling scope.
Useful for testing or temporary event handling.
Usage:
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStarted)
def temp_handler(source, event):
print("Temporary handler")
# Do stuff...
# Handlers are cleared after the context
"""
previous_handlers = self._handlers.copy()
self._handlers.clear()
try:
yield
finally:
self._handlers = previous_handlers
# Global instance
crewai_event_bus = CrewAIEventsBus()
__all__ = ["crewai_event_bus"]

View File

@@ -2,6 +2,7 @@ from typing import Any, Callable, Optional, Tuple, Union
from pydantic import BaseModel, field_validator
class GuardrailResult(BaseModel):
"""Result from a task guardrail execution.
@@ -14,6 +15,7 @@ class GuardrailResult(BaseModel):
result (Any, optional): The validated/transformed result if successful
error (str, optional): Error message if validation failed
"""
success: bool
result: Optional[Any] = None
error: Optional[str] = None
@@ -24,9 +26,13 @@ class GuardrailResult(BaseModel):
values = info.data
if "success" in values:
if values["success"] and v and "error" in values and values["error"]:
raise ValueError("Cannot have both result and error when success is True")
raise ValueError(
"Cannot have both result and error when success is True"
)
if not values["success"] and v and "result" in values and values["result"]:
raise ValueError("Cannot have both result and error when success is False")
raise ValueError(
"Cannot have both result and error when success is False"
)
return v
@classmethod
@@ -44,11 +50,13 @@ class GuardrailResult(BaseModel):
return cls(
success=success,
result=data if success else None,
error=data if not success else None
error=data if not success else None,
)
def process_guardrail(output: Any, guardrail: Callable, retry_count: int) -> GuardrailResult:
def process_guardrail(
output: Any, guardrail: Callable, retry_count: int
) -> GuardrailResult:
"""Process the guardrail for the agent output.
Args:
@@ -60,21 +68,21 @@ def process_guardrail(output: Any, guardrail: Callable, retry_count: int) -> Gua
from crewai.task import TaskOutput
from crewai.lite_agent import LiteAgentOutput
assert isinstance(output, TaskOutput) or isinstance(output, LiteAgentOutput), "Output must be a TaskOutput or LiteAgentOutput"
assert isinstance(output, TaskOutput) or isinstance(
output, LiteAgentOutput
), "Output must be a TaskOutput or LiteAgentOutput"
assert guardrail is not None
from crewai.utilities.events import (
from crewai.events.types.llm_guardrail_events import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
crewai_event_bus.emit(
None,
LLMGuardrailStartedEvent(
guardrail=guardrail, retry_count=retry_count
),
LLMGuardrailStartedEvent(guardrail=guardrail, retry_count=retry_count),
)
result = guardrail(output)

View File

@@ -8,8 +8,8 @@ from crewai.agent import Agent
from crewai.task import Task
from crewai.utilities import I18N
from crewai.llm import LLM
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.reasoning_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.reasoning_events import (
AgentReasoningStartedEvent,
AgentReasoningCompletedEvent,
AgentReasoningFailedEvent,

View File

@@ -19,8 +19,8 @@ from crewai.tools.tool_calling import InstructorToolCalling
from crewai.tools.tool_usage import ToolUsage
from crewai.utilities import RPMController
from crewai.utilities.errors import AgentRepositoryError
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ToolUsageFinishedEvent
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.tool_usage_events import ToolUsageFinishedEvent
from crewai.process import Process

View File

@@ -9,9 +9,9 @@ from crewai import LLM, Agent
from crewai.flow import Flow, start
from crewai.lite_agent import LiteAgent, LiteAgentOutput
from crewai.tools import BaseTool
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.agent_events import LiteAgentExecutionStartedEvent
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.agent_events import LiteAgentExecutionStartedEvent
from crewai.events.types.tool_usage_events import ToolUsageStartedEvent
from crewai.llms.base_llm import BaseLLM
from unittest.mock import patch
@@ -322,7 +322,7 @@ def test_sets_parent_flow_when_inside_flow():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_is_called_using_string():
guardrail_events = defaultdict(list)
from crewai.utilities.events import (
from crewai.events.event_types import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
@@ -359,7 +359,7 @@ def test_guardrail_is_called_using_string():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_is_called_using_callable():
guardrail_events = defaultdict(list)
from crewai.utilities.events import (
from crewai.events.event_types import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
@@ -392,7 +392,7 @@ def test_guardrail_is_called_using_callable():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_reached_attempt_limit():
guardrail_events = defaultdict(list)
from crewai.utilities.events import (
from crewai.events.event_types import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)

View File

@@ -80,7 +80,7 @@ def auto_mock_telemetry(request):
with (
patch(
"crewai.utilities.events.event_listener.Telemetry",
"crewai.events.event_listener.Telemetry",
mock_telemetry_class,
),
patch("crewai.tools.tool_usage.Telemetry", mock_telemetry_class),

View File

@@ -13,13 +13,18 @@ from crewai.experimental.evaluation import (
ToolInvocationEvaluator,
ReasoningEfficiencyEvaluator,
MetricCategory,
EvaluationScore
EvaluationScore,
)
from crewai.utilities.events.agent_events import AgentEvaluationStartedEvent, AgentEvaluationCompletedEvent, AgentEvaluationFailedEvent
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.events.types.agent_events import (
AgentEvaluationStartedEvent,
AgentEvaluationCompletedEvent,
AgentEvaluationFailedEvent,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.experimental.evaluation import create_default_evaluator
class TestAgentEvaluator:
@pytest.fixture
def mock_crew(self):
@@ -28,19 +33,16 @@ class TestAgentEvaluator:
goal="Complete test tasks successfully",
backstory="An agent created for testing purposes",
allow_delegation=False,
verbose=False
verbose=False,
)
task = Task(
description="Test task description",
agent=agent,
expected_output="Expected test output"
expected_output="Expected test output",
)
crew = Crew(
agents=[agent],
tasks=[task]
)
crew = Crew(agents=[agent], tasks=[task])
return crew
def test_set_iteration(self):
@@ -51,7 +53,9 @@ class TestAgentEvaluator:
@pytest.mark.vcr(filter_headers=["authorization"])
def test_evaluate_current_iteration(self, mock_crew):
agent_evaluator = AgentEvaluator(agents=mock_crew.agents, evaluators=[GoalAlignmentEvaluator()])
agent_evaluator = AgentEvaluator(
agents=mock_crew.agents, evaluators=[GoalAlignmentEvaluator()]
)
mock_crew.kickoff()
@@ -59,20 +63,20 @@ class TestAgentEvaluator:
assert isinstance(results, dict)
agent, = mock_crew.agents
task, = mock_crew.tasks
(agent,) = mock_crew.agents
(task,) = mock_crew.tasks
assert len(mock_crew.agents) == 1
assert agent.role in results
assert len(results[agent.role]) == 1
result, = results[agent.role]
(result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id)
assert result.task_id == str(task.id)
goal_alignment, = result.metrics.values()
(goal_alignment,) = result.metrics.values()
assert goal_alignment.score == 5.0
expected_feedback = "The agent's output demonstrates an understanding of the need for a comprehensive document outlining task"
@@ -92,7 +96,7 @@ class TestAgentEvaluator:
ToolSelectionEvaluator,
ParameterExtractionEvaluator,
ToolInvocationEvaluator,
ReasoningEfficiencyEvaluator
ReasoningEfficiencyEvaluator,
]
assert len(agent_evaluator.evaluators) == len(expected_types)
@@ -109,6 +113,7 @@ class TestAgentEvaluator:
with crewai_event_bus.scoped_handlers():
events = {}
@crewai_event_bus.on(AgentEvaluationStartedEvent)
def capture_started(source, event):
events["started"] = event
@@ -121,7 +126,9 @@ class TestAgentEvaluator:
def capture_failed(source, event):
events["failed"] = event
agent_evaluator = AgentEvaluator(agents=[agent], evaluators=[GoalAlignmentEvaluator()])
agent_evaluator = AgentEvaluator(
agents=[agent], evaluators=[GoalAlignmentEvaluator()]
)
agent.kickoff(messages="Complete this task successfully")
@@ -143,13 +150,13 @@ class TestAgentEvaluator:
assert isinstance(results, dict)
result, = results[agent.role]
(result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id)
assert result.task_id == "lite_task"
goal_alignment, = result.metrics.values()
(goal_alignment,) = result.metrics.values()
assert goal_alignment.score == 2.0
expected_feedback = "The agent did not demonstrate a clear understanding of the task goal, which is to complete test tasks successfully"
@@ -168,13 +175,14 @@ class TestAgentEvaluator:
task = Task(
description="Test task description",
agent=agent,
expected_output="Expected test output"
expected_output="Expected test output",
)
mock_crew.agents.append(agent)
mock_crew.tasks.append(task)
with crewai_event_bus.scoped_handlers():
events = {}
@crewai_event_bus.on(AgentEvaluationStartedEvent)
def capture_started(source, event):
events["started"] = event
@@ -187,7 +195,9 @@ class TestAgentEvaluator:
def capture_failed(source, event):
events["failed"] = event
agent_evaluator = AgentEvaluator(agents=[agent], evaluators=[GoalAlignmentEvaluator()])
agent_evaluator = AgentEvaluator(
agents=[agent], evaluators=[GoalAlignmentEvaluator()]
)
mock_crew.kickoff()
assert events.keys() == {"started", "completed"}
@@ -208,13 +218,13 @@ class TestAgentEvaluator:
assert isinstance(results, dict)
assert len(results.keys()) == 1
result, = results[agent.role]
(result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id)
assert result.task_id == str(task.id)
goal_alignment, = result.metrics.values()
(goal_alignment,) = result.metrics.values()
assert goal_alignment.score == 5.0
expected_feedback = "The agent provided a thorough guide on how to conduct a test task but failed to produce specific expected output"
@@ -223,11 +233,10 @@ class TestAgentEvaluator:
assert goal_alignment.raw_response is not None
assert '"score": 5' in goal_alignment.raw_response
@pytest.mark.vcr(filter_headers=["authorization"])
def test_failed_evaluation(self, mock_crew):
agent, = mock_crew.agents
task, = mock_crew.tasks
(agent,) = mock_crew.agents
(task,) = mock_crew.tasks
with crewai_event_bus.scoped_handlers():
events = {}
@@ -247,13 +256,16 @@ class TestAgentEvaluator:
# Create a mock evaluator that will raise an exception
from crewai.experimental.evaluation.base_evaluator import BaseEvaluator
from crewai.experimental.evaluation import MetricCategory
class FailingEvaluator(BaseEvaluator):
metric_category = MetricCategory.GOAL_ALIGNMENT
def evaluate(self, agent, task, execution_trace, final_output):
raise ValueError("Forced evaluation failure")
agent_evaluator = AgentEvaluator(agents=[agent], evaluators=[FailingEvaluator()])
agent_evaluator = AgentEvaluator(
agents=[agent], evaluators=[FailingEvaluator()]
)
mock_crew.kickoff()
assert events.keys() == {"started", "failed"}
@@ -269,7 +281,7 @@ class TestAgentEvaluator:
assert events["failed"].error == "Forced evaluation failure"
results = agent_evaluator.get_evaluation_results()
result, = results[agent.role]
(result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id)

View File

@@ -1,7 +1,7 @@
from unittest.mock import MagicMock, patch, ANY
from collections import defaultdict
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.memory_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
MemorySaveStartedEvent,
MemorySaveCompletedEvent,
MemoryQueryStartedEvent,

View File

@@ -1,10 +1,10 @@
import pytest
from unittest.mock import ANY
from collections import defaultdict
from crewai.utilities.events import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
from crewai.utilities.events.memory_events import (
from crewai.events.types.memory_events import (
MemorySaveStartedEvent,
MemorySaveCompletedEvent,
MemoryQueryStartedEvent,

View File

@@ -7,8 +7,8 @@ from crewai.crew import Crew
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
from crewai.task import Task
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.memory_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
MemorySaveStartedEvent,
MemorySaveCompletedEvent,
MemoryQueryStartedEvent,

View File

@@ -27,19 +27,17 @@ from crewai.tasks.conditional_task import ConditionalTask
from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput
from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities.events import (
CrewTrainCompletedEvent,
CrewTrainStartedEvent,
crewai_event_bus,
)
from crewai.utilities.events.crew_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.crew_events import (
CrewTestCompletedEvent,
CrewTestStartedEvent,
CrewTrainCompletedEvent,
CrewTrainStartedEvent,
)
from crewai.utilities.rpm_controller import RPMController
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
from crewai.utilities.events.memory_events import (
from crewai.events.types.memory_events import (
MemorySaveStartedEvent,
MemorySaveCompletedEvent,
MemorySaveFailedEvent,

View File

@@ -7,14 +7,14 @@ import pytest
from pydantic import BaseModel
from crewai.flow.flow import Flow, and_, listen, or_, router, start
from crewai.utilities.events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.flow_events import (
FlowFinishedEvent,
FlowStartedEvent,
FlowPlotEvent,
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
crewai_event_bus,
)
from crewai.utilities.events.flow_events import FlowPlotEvent
def test_simple_sequential_flow():

View File

@@ -1,6 +1,6 @@
import pytest
from unittest.mock import patch, MagicMock
from crewai.utilities.events.event_listener import event_listener
from crewai.events.event_listener import event_listener
class TestFlowHumanInputIntegration:
@@ -23,10 +23,12 @@ class TestFlowHumanInputIntegration:
finally:
formatter._live_paused = original_paused_state
@patch('builtins.input', return_value='')
@patch("builtins.input", return_value="")
def test_human_input_pauses_flow_updates(self, mock_input):
"""Test that human input pauses Flow status updates."""
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.agent_builder.base_agent_executor_mixin import (
CrewAgentExecutorMixin,
)
executor = CrewAgentExecutorMixin()
executor.crew = MagicMock()
@@ -40,22 +42,25 @@ class TestFlowHumanInputIntegration:
try:
formatter._live_paused = False
with patch.object(formatter, 'pause_live_updates') as mock_pause, \
patch.object(formatter, 'resume_live_updates') as mock_resume:
with (
patch.object(formatter, "pause_live_updates") as mock_pause,
patch.object(formatter, "resume_live_updates") as mock_resume,
):
result = executor._ask_human_input("Test result")
mock_pause.assert_called_once()
mock_resume.assert_called_once()
mock_input.assert_called_once()
assert result == ''
assert result == ""
finally:
formatter._live_paused = original_paused_state
@patch('builtins.input', side_effect=['feedback', ''])
@patch("builtins.input", side_effect=["feedback", ""])
def test_multiple_human_input_rounds(self, mock_input):
"""Test multiple rounds of human input with Flow status management."""
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.agent_builder.base_agent_executor_mixin import (
CrewAgentExecutorMixin,
)
executor = CrewAgentExecutorMixin()
executor.crew = MagicMock()
@@ -76,14 +81,17 @@ class TestFlowHumanInputIntegration:
def track_resume():
resume_calls.append(True)
with patch.object(formatter, 'pause_live_updates', side_effect=track_pause), \
patch.object(formatter, 'resume_live_updates', side_effect=track_resume):
with (
patch.object(formatter, "pause_live_updates", side_effect=track_pause),
patch.object(
formatter, "resume_live_updates", side_effect=track_resume
),
):
result1 = executor._ask_human_input("Test result 1")
assert result1 == 'feedback'
assert result1 == "feedback"
result2 = executor._ask_human_input("Test result 2")
assert result2 == ''
assert result2 == ""
assert len(pause_calls) == 2
assert len(resume_calls) == 2
@@ -111,7 +119,9 @@ class TestFlowHumanInputIntegration:
def test_pause_resume_exception_handling(self):
"""Test that resume is called even if exception occurs during human input."""
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.agent_builder.base_agent_executor_mixin import (
CrewAgentExecutorMixin,
)
executor = CrewAgentExecutorMixin()
executor.crew = MagicMock()
@@ -123,10 +133,13 @@ class TestFlowHumanInputIntegration:
original_paused_state = formatter._live_paused
try:
with patch.object(formatter, 'pause_live_updates') as mock_pause, \
patch.object(formatter, 'resume_live_updates') as mock_resume, \
patch('builtins.input', side_effect=KeyboardInterrupt("Test exception")):
with (
patch.object(formatter, "pause_live_updates") as mock_pause,
patch.object(formatter, "resume_live_updates") as mock_resume,
patch(
"builtins.input", side_effect=KeyboardInterrupt("Test exception")
),
):
with pytest.raises(KeyboardInterrupt):
executor._ask_human_input("Test result")
@@ -137,7 +150,9 @@ class TestFlowHumanInputIntegration:
def test_training_mode_human_input(self):
"""Test human input in training mode."""
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.agent_builder.base_agent_executor_mixin import (
CrewAgentExecutorMixin,
)
executor = CrewAgentExecutorMixin()
executor.crew = MagicMock()
@@ -149,19 +164,25 @@ class TestFlowHumanInputIntegration:
original_paused_state = formatter._live_paused
try:
with patch.object(formatter, 'pause_live_updates') as mock_pause, \
patch.object(formatter, 'resume_live_updates') as mock_resume, \
patch('builtins.input', return_value='training feedback'):
with (
patch.object(formatter, "pause_live_updates") as mock_pause,
patch.object(formatter, "resume_live_updates") as mock_resume,
patch("builtins.input", return_value="training feedback"),
):
result = executor._ask_human_input("Test result")
mock_pause.assert_called_once()
mock_resume.assert_called_once()
assert result == 'training feedback'
assert result == "training feedback"
executor._printer.print.assert_called()
call_args = [call[1]['content'] for call in executor._printer.print.call_args_list]
training_prompt_found = any('TRAINING MODE' in content for content in call_args)
call_args = [
call[1]["content"]
for call in executor._printer.print.call_args_list
]
training_prompt_found = any(
"TRAINING MODE" in content for content in call_args
)
assert training_prompt_found
finally:
formatter._live_paused = original_paused_state

View File

@@ -8,7 +8,7 @@ from pydantic import BaseModel
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM
from crewai.utilities.events import (
from crewai.events.event_types import (
LLMCallCompletedEvent,
LLMStreamChunkEvent,
ToolUsageStartedEvent,
@@ -563,7 +563,7 @@ def assert_event_count(
@pytest.fixture
def mock_emit() -> MagicMock:
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus
from crewai.events.event_bus import CrewAIEventsBus
with patch.object(CrewAIEventsBus, "emit") as mock_emit:
yield mock_emit

View File

@@ -7,11 +7,11 @@ from crewai.llm import LLM
from crewai.tasks.hallucination_guardrail import HallucinationGuardrail
from crewai.tasks.llm_guardrail import LLMGuardrail
from crewai.tasks.task_output import TaskOutput
from crewai.utilities.events import (
from crewai.events.event_types import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
def test_task_without_guardrail():

View File

@@ -10,8 +10,8 @@ from pydantic import BaseModel, Field
from crewai import Agent, Task
from crewai.tools import BaseTool
from crewai.tools.tool_usage import ToolUsage
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.tool_usage_events import (
ToolSelectionErrorEvent,
ToolUsageFinishedEvent,
ToolValidateInputErrorEvent,

View File

@@ -5,13 +5,13 @@ from unittest.mock import patch, MagicMock
from crewai import Agent, Task, Crew
from crewai.flow.flow import Flow, start
from crewai.utilities.events.listeners.tracing.trace_listener import (
from crewai.events.listeners.tracing.trace_listener import (
TraceCollectionListener,
)
from crewai.utilities.events.listeners.tracing.trace_batch_manager import (
from crewai.events.listeners.tracing.trace_batch_manager import (
TraceBatchManager,
)
from crewai.utilities.events.listeners.tracing.types import TraceEvent
from crewai.events.listeners.tracing.types import TraceEvent
class TestTraceListenerSetup:
@@ -27,11 +27,11 @@ class TestTraceListenerSetup:
return_value="mock_token_12345",
),
patch(
"crewai.utilities.events.listeners.tracing.trace_listener.get_auth_token",
"crewai.events.listeners.tracing.trace_listener.get_auth_token",
return_value="mock_token_12345",
),
patch(
"crewai.utilities.events.listeners.tracing.trace_batch_manager.get_auth_token",
"crewai.events.listeners.tracing.trace_batch_manager.get_auth_token",
return_value="mock_token_12345",
),
):
@@ -40,7 +40,7 @@ class TestTraceListenerSetup:
@pytest.fixture(autouse=True)
def clear_event_bus(self):
"""Clear event bus listeners before and after each test"""
from crewai.utilities.events import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
# Store original handlers
original_handlers = crewai_event_bus._handlers.copy()
@@ -123,7 +123,7 @@ class TestTraceListenerSetup:
crew = Crew(agents=[agent], tasks=[task], verbose=True)
trace_listener = TraceCollectionListener()
from crewai.utilities.events import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
trace_listener.setup_listeners(crewai_event_bus)
@@ -162,7 +162,7 @@ class TestTraceListenerSetup:
crew = Crew(agents=[agent], tasks=[task], verbose=True)
from crewai.utilities.events import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
trace_listener = None
for handler_list in crewai_event_bus._handlers.values():
@@ -207,7 +207,7 @@ class TestTraceListenerSetup:
)
crew = Crew(agents=[agent], tasks=[task], verbose=True)
from crewai.utilities.events import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
# Create and setup trace listener explicitly
trace_listener = TraceCollectionListener()
@@ -262,7 +262,7 @@ class TestTraceListenerSetup:
result = crew.kickoff()
assert result is not None
from crewai.utilities.events import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
trace_handlers = []
for handlers in crewai_event_bus._handlers.values():
@@ -281,9 +281,9 @@ class TestTraceListenerSetup:
):
trace_handlers.append(handler)
assert len(trace_handlers) == 0, (
f"Found {len(trace_handlers)} trace handlers when tracing should be disabled"
)
assert (
len(trace_handlers) == 0
), f"Found {len(trace_handlers)} trace handlers when tracing should be disabled"
def test_trace_listener_setup_correctly_for_crew(self):
"""Test that trace listener is set up correctly when enabled"""
@@ -328,7 +328,7 @@ class TestTraceListenerSetup:
with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}),
patch(
"crewai.utilities.events.listeners.tracing.trace_listener.TraceCollectionListener._check_authenticated",
"crewai.events.listeners.tracing.trace_listener.TraceCollectionListener._check_authenticated",
return_value=False,
),
):
@@ -357,7 +357,7 @@ class TestTraceListenerSetup:
with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}),
patch(
"crewai.utilities.events.listeners.tracing.trace_batch_manager.PlusAPI"
"crewai.events.listeners.tracing.trace_batch_manager.PlusAPI"
) as mock_plus_api_class,
):
mock_plus_api_instance = MagicMock()
@@ -393,13 +393,13 @@ class TestTraceListenerSetup:
# Helper method to ensure cleanup
def teardown_method(self):
"""Cleanup after each test method"""
from crewai.utilities.events import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
crewai_event_bus._handlers.clear()
@classmethod
def teardown_class(cls):
"""Final cleanup after all tests in this class"""
from crewai.utilities.events import crewai_event_bus
from crewai.events.event_bus import crewai_event_bus
crewai_event_bus._handlers.clear()

View File

@@ -1,7 +1,7 @@
from unittest.mock import Mock
from crewai.utilities.events.base_events import BaseEvent
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.events.base_events import BaseEvent
from crewai.events.event_bus import crewai_event_bus
class TestEvent(BaseEvent):

View File

@@ -1,7 +1,7 @@
from unittest.mock import MagicMock, patch
from rich.tree import Tree
from rich.live import Live
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
from crewai.events.utils.console_formatter import ConsoleFormatter
class TestConsoleFormatterPauseResume:
@@ -76,7 +76,7 @@ class TestConsoleFormatterPauseResume:
tree = Tree("Test")
with patch('crewai.utilities.events.utils.console_formatter.Live') as mock_live_class:
with patch("crewai.events.utils.console_formatter.Live") as mock_live_class:
mock_live_instance = MagicMock()
mock_live_class.return_value = mock_live_instance
@@ -112,5 +112,5 @@ class TestConsoleFormatterPauseResume:
"""Test that _live_paused is properly initialized."""
formatter = ConsoleFormatter()
assert hasattr(formatter, '_live_paused')
assert hasattr(formatter, "_live_paused")
assert not formatter._live_paused

View File

@@ -11,12 +11,12 @@ from crewai.flow.flow import Flow, listen, start
from crewai.llm import LLM
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.utilities.events.agent_events import (
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,
)
from crewai.utilities.events.crew_events import (
from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffFailedEvent,
CrewKickoffStartedEvent,
@@ -24,28 +24,28 @@ from crewai.utilities.events.crew_events import (
CrewTestResultEvent,
CrewTestStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.event_listener import EventListener
from crewai.utilities.events.event_types import ToolUsageFinishedEvent
from crewai.utilities.events.flow_events import (
from crewai.events.event_bus import crewai_event_bus
from crewai.events.event_listener import EventListener
from crewai.events.types.tool_usage_events import ToolUsageFinishedEvent
from crewai.events.types.flow_events import (
FlowCreatedEvent,
FlowFinishedEvent,
FlowStartedEvent,
MethodExecutionFailedEvent,
MethodExecutionStartedEvent,
)
from crewai.utilities.events.llm_events import (
from crewai.events.types.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMStreamChunkEvent,
)
from crewai.utilities.events.task_events import (
from crewai.events.types.task_events import (
TaskCompletedEvent,
TaskFailedEvent,
TaskStartedEvent,
)
from crewai.utilities.events.tool_usage_events import (
from crewai.events.types.tool_usage_events import (
ToolUsageErrorEvent,
)
@@ -114,9 +114,7 @@ def test_crew_emits_start_kickoff_event(
mock_telemetry.task_ended = Mock(return_value=mock_span)
# Patch the Telemetry class to return our mock
with patch(
"crewai.utilities.events.event_listener.Telemetry", return_value=mock_telemetry
):
with patch("crewai.events.event_listener.Telemetry", return_value=mock_telemetry):
# Now when Crew creates EventListener, it will use our mocked telemetry
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
@@ -241,9 +239,7 @@ def test_crew_emits_end_task_event(
mock_telemetry.crew_execution_span = Mock()
mock_telemetry.end_crew = Mock()
with patch(
"crewai.utilities.events.event_listener.Telemetry", return_value=mock_telemetry
):
with patch("crewai.events.event_listener.Telemetry", return_value=mock_telemetry):
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
@@ -455,9 +451,7 @@ def test_flow_emits_start_event(reset_event_listener_singleton):
mock_telemetry.flow_creation_span = Mock()
mock_telemetry.set_tracer = Mock()
with patch(
"crewai.utilities.events.event_listener.Telemetry", return_value=mock_telemetry
):
with patch("crewai.events.event_listener.Telemetry", return_value=mock_telemetry):
# Force creation of EventListener singleton with mocked telemetry
_ = EventListener()