refactor: Move events module to crewai.events (#3425)

refactor(events): relocate events module & update imports

- Move events from utilities/ to top-level events/ with types/, listeners/, utils/ structure
- Update all source/tests/docs to new import paths
- Add backwards compatibility stubs in crewai.utilities.events with deprecation warnings
- Restore test mocks and fix related test imports
This commit is contained in:
Greyson LaLonde
2025-09-02 10:06:42 -04:00
committed by GitHub
parent 1b1a8fdbf4
commit 878c1a649a
81 changed files with 1094 additions and 751 deletions

View File

@@ -44,12 +44,12 @@ To create a custom event listener, you need to:
Here's a simple example of a custom event listener class: Here's a simple example of a custom event listener class:
```python ```python
from crewai.utilities.events import ( from crewai.events import (
CrewKickoffStartedEvent, CrewKickoffStartedEvent,
CrewKickoffCompletedEvent, CrewKickoffCompletedEvent,
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
) )
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
def __init__(self): def __init__(self):
@@ -146,7 +146,7 @@ my_project/
```python ```python
# my_custom_listener.py # my_custom_listener.py
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
# ... import events ... # ... import events ...
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
@@ -279,7 +279,7 @@ Additional fields vary by event type. For example, `CrewKickoffCompletedEvent` i
For temporary event handling (useful for testing or specific operations), you can use the `scoped_handlers` context manager: For temporary event handling (useful for testing or specific operations), you can use the `scoped_handlers` context manager:
```python ```python
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
with crewai_event_bus.scoped_handlers(): with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStartedEvent) @crewai_event_bus.on(CrewKickoffStartedEvent)

View File

@@ -681,11 +681,11 @@ CrewAI emits events during the knowledge retrieval process that you can listen f
#### Example: Monitoring Knowledge Retrieval #### Example: Monitoring Knowledge Retrieval
```python ```python
from crewai.utilities.events import ( from crewai.events import (
KnowledgeRetrievalStartedEvent, KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent, KnowledgeRetrievalCompletedEvent,
BaseEventListener,
) )
from crewai.utilities.events.base_event_listener import BaseEventListener
class KnowledgeMonitorListener(BaseEventListener): class KnowledgeMonitorListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus): def setup_listeners(self, crewai_event_bus):

View File

@@ -733,10 +733,10 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
CrewAI emits events for each chunk received during streaming: CrewAI emits events for each chunk received during streaming:
```python ```python
from crewai.utilities.events import ( from crewai.events import (
LLMStreamChunkEvent LLMStreamChunkEvent
) )
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus): def setup_listeners(self, crewai_event_bus):
@@ -758,8 +758,8 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
```python ```python
from crewai import LLM, Agent, Task, Crew from crewai import LLM, Agent, Task, Crew
from crewai.utilities.events import LLMStreamChunkEvent from crewai.events import LLMStreamChunkEvent
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus): def setup_listeners(self, crewai_event_bus):

View File

@@ -1041,8 +1041,8 @@ CrewAI emits the following memory-related events:
Track memory operation timing to optimize your application: Track memory operation timing to optimize your application:
```python ```python
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import (
from crewai.utilities.events import ( BaseEventListener,
MemoryQueryCompletedEvent, MemoryQueryCompletedEvent,
MemorySaveCompletedEvent MemorySaveCompletedEvent
) )
@@ -1076,8 +1076,8 @@ memory_monitor = MemoryPerformanceMonitor()
Log memory operations for debugging and insights: Log memory operations for debugging and insights:
```python ```python
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import (
from crewai.utilities.events import ( BaseEventListener,
MemorySaveStartedEvent, MemorySaveStartedEvent,
MemoryQueryStartedEvent, MemoryQueryStartedEvent,
MemoryRetrievalCompletedEvent MemoryRetrievalCompletedEvent
@@ -1117,8 +1117,8 @@ memory_logger = MemoryLogger()
Capture and respond to memory errors: Capture and respond to memory errors:
```python ```python
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import (
from crewai.utilities.events import ( BaseEventListener,
MemorySaveFailedEvent, MemorySaveFailedEvent,
MemoryQueryFailedEvent MemoryQueryFailedEvent
) )
@@ -1167,8 +1167,8 @@ error_tracker = MemoryErrorTracker(notify_email="admin@example.com")
Memory events can be forwarded to analytics and monitoring platforms to track performance metrics, detect anomalies, and visualize memory usage patterns: Memory events can be forwarded to analytics and monitoring platforms to track performance metrics, detect anomalies, and visualize memory usage patterns:
```python ```python
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import (
from crewai.utilities.events import ( BaseEventListener,
MemoryQueryCompletedEvent, MemoryQueryCompletedEvent,
MemorySaveCompletedEvent MemorySaveCompletedEvent
) )

View File

@@ -44,12 +44,12 @@ Prompt Tracing을 통해 다음과 같은 작업이 가능합니다:
아래는 커스텀 이벤트 리스너 클래스의 간단한 예시입니다: 아래는 커스텀 이벤트 리스너 클래스의 간단한 예시입니다:
```python ```python
from crewai.utilities.events import ( from crewai.events import (
CrewKickoffStartedEvent, CrewKickoffStartedEvent,
CrewKickoffCompletedEvent, CrewKickoffCompletedEvent,
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
) )
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
def __init__(self): def __init__(self):
@@ -146,7 +146,7 @@ my_project/
```python ```python
# my_custom_listener.py # my_custom_listener.py
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
# ... import events ... # ... import events ...
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
@@ -279,7 +279,7 @@ CrewAI는 여러분이 청취할 수 있는 다양한 이벤트를 제공합니
임시 이벤트 처리가 필요한 경우(테스트 또는 특정 작업에 유용함), `scoped_handlers` 컨텍스트 관리자를 사용할 수 있습니다: 임시 이벤트 처리가 필요한 경우(테스트 또는 특정 작업에 유용함), `scoped_handlers` 컨텍스트 관리자를 사용할 수 있습니다:
```python ```python
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
with crewai_event_bus.scoped_handlers(): with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStartedEvent) @crewai_event_bus.on(CrewKickoffStartedEvent)

View File

@@ -683,11 +683,11 @@ CrewAI는 knowledge 검색 과정에서 이벤트를 발생시키며, 이벤트
#### 예시: Knowledge Retrieval 모니터링 #### 예시: Knowledge Retrieval 모니터링
```python ```python
from crewai.utilities.events import ( from crewai.events import (
KnowledgeRetrievalStartedEvent, KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent, KnowledgeRetrievalCompletedEvent,
BaseEventListener,
) )
from crewai.utilities.events.base_event_listener import BaseEventListener
class KnowledgeMonitorListener(BaseEventListener): class KnowledgeMonitorListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus): def setup_listeners(self, crewai_event_bus):

View File

@@ -731,10 +731,10 @@ CrewAI는 LLM의 스트리밍 응답을 지원하여, 애플리케이션이 출
CrewAI는 스트리밍 중 수신되는 각 청크에 대해 이벤트를 발생시킵니다: CrewAI는 스트리밍 중 수신되는 각 청크에 대해 이벤트를 발생시킵니다:
```python ```python
from crewai.utilities.events import ( from crewai.events import (
LLMStreamChunkEvent LLMStreamChunkEvent
) )
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus): def setup_listeners(self, crewai_event_bus):
@@ -756,8 +756,8 @@ CrewAI는 LLM의 스트리밍 응답을 지원하여, 애플리케이션이 출
```python ```python
from crewai import LLM, Agent, Task, Crew from crewai import LLM, Agent, Task, Crew
from crewai.utilities.events import LLMStreamChunkEvent from crewai.events import LLMStreamChunkEvent
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus): def setup_listeners(self, crewai_event_bus):

View File

@@ -985,8 +985,8 @@ CrewAI는 다음과 같은 메모리 관련 이벤트를 발생시킵니다:
애플리케이션을 최적화하기 위해 메모리 작업 타이밍을 추적하세요: 애플리케이션을 최적화하기 위해 메모리 작업 타이밍을 추적하세요:
```python ```python
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import (
from crewai.utilities.events import ( BaseEventListener,
MemoryQueryCompletedEvent, MemoryQueryCompletedEvent,
MemorySaveCompletedEvent MemorySaveCompletedEvent
) )
@@ -1020,8 +1020,8 @@ memory_monitor = MemoryPerformanceMonitor()
디버깅 및 인사이트를 위해 메모리 작업을 로깅합니다: 디버깅 및 인사이트를 위해 메모리 작업을 로깅합니다:
```python ```python
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import (
from crewai.utilities.events import ( BaseEventListener,
MemorySaveStartedEvent, MemorySaveStartedEvent,
MemoryQueryStartedEvent, MemoryQueryStartedEvent,
MemoryRetrievalCompletedEvent MemoryRetrievalCompletedEvent
@@ -1061,8 +1061,8 @@ memory_logger = MemoryLogger()
메모리 오류를 캡처하고 대응합니다: 메모리 오류를 캡처하고 대응합니다:
```python ```python
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import (
from crewai.utilities.events import ( BaseEventListener,
MemorySaveFailedEvent, MemorySaveFailedEvent,
MemoryQueryFailedEvent MemoryQueryFailedEvent
) )
@@ -1111,8 +1111,8 @@ error_tracker = MemoryErrorTracker(notify_email="admin@example.com")
메모리 이벤트는 분석 및 모니터링 플랫폼으로 전달되어 성능 지표를 추적하고, 이상 징후를 감지하며, 메모리 사용 패턴을 시각화할 수 있습니다: 메모리 이벤트는 분석 및 모니터링 플랫폼으로 전달되어 성능 지표를 추적하고, 이상 징후를 감지하며, 메모리 사용 패턴을 시각화할 수 있습니다:
```python ```python
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import (
from crewai.utilities.events import ( BaseEventListener,
MemoryQueryCompletedEvent, MemoryQueryCompletedEvent,
MemorySaveCompletedEvent MemorySaveCompletedEvent
) )

View File

@@ -44,12 +44,12 @@ Para criar um listener de evento personalizado, você precisa:
Veja um exemplo simples de uma classe de listener de evento personalizado: Veja um exemplo simples de uma classe de listener de evento personalizado:
```python ```python
from crewai.utilities.events import ( from crewai.events import (
CrewKickoffStartedEvent, CrewKickoffStartedEvent,
CrewKickoffCompletedEvent, CrewKickoffCompletedEvent,
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
) )
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
class MeuListenerPersonalizado(BaseEventListener): class MeuListenerPersonalizado(BaseEventListener):
def __init__(self): def __init__(self):
@@ -146,7 +146,7 @@ my_project/
```python ```python
# my_custom_listener.py # my_custom_listener.py
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
# ... importe events ... # ... importe events ...
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
@@ -268,7 +268,7 @@ Campos adicionais variam pelo tipo de evento. Por exemplo, `CrewKickoffCompleted
Para lidar temporariamente com eventos (útil para testes ou operações específicas), você pode usar o context manager `scoped_handlers`: Para lidar temporariamente com eventos (útil para testes ou operações específicas), você pode usar o context manager `scoped_handlers`:
```python ```python
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
with crewai_event_bus.scoped_handlers(): with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStartedEvent) @crewai_event_bus.on(CrewKickoffStartedEvent)

View File

@@ -681,11 +681,11 @@ O CrewAI emite eventos durante o processo de recuperação de knowledge que voc
#### Exemplo: Monitorando Recuperação de Knowledge #### Exemplo: Monitorando Recuperação de Knowledge
```python ```python
from crewai.utilities.events import ( from crewai.events import (
KnowledgeRetrievalStartedEvent, KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent, KnowledgeRetrievalCompletedEvent,
BaseEventListener,
) )
from crewai.utilities.events.base_event_listener import BaseEventListener
class KnowledgeMonitorListener(BaseEventListener): class KnowledgeMonitorListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus): def setup_listeners(self, crewai_event_bus):

View File

@@ -708,10 +708,10 @@ O CrewAI suporta respostas em streaming de LLMs, permitindo que sua aplicação
O CrewAI emite eventos para cada chunk recebido durante o streaming: O CrewAI emite eventos para cada chunk recebido durante o streaming:
```python ```python
from crewai.utilities.events import ( from crewai.events import (
LLMStreamChunkEvent LLMStreamChunkEvent
) )
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events import BaseEventListener
class MyCustomListener(BaseEventListener): class MyCustomListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus): def setup_listeners(self, crewai_event_bus):

View File

@@ -38,17 +38,17 @@ from crewai.utilities.agent_utils import (
) )
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.converter import generate_model_description from crewai.utilities.converter import generate_model_description
from crewai.utilities.events.agent_events import ( from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
AgentExecutionErrorEvent, AgentExecutionErrorEvent,
AgentExecutionStartedEvent, AgentExecutionStartedEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemoryRetrievalStartedEvent, MemoryRetrievalStartedEvent,
MemoryRetrievalCompletedEvent, MemoryRetrievalCompletedEvent,
) )
from crewai.utilities.events.knowledge_events import ( from crewai.events.types.knowledge_events import (
KnowledgeQueryCompletedEvent, KnowledgeQueryCompletedEvent,
KnowledgeQueryFailedEvent, KnowledgeQueryFailedEvent,
KnowledgeQueryStartedEvent, KnowledgeQueryStartedEvent,

View File

@@ -1,4 +1,4 @@
from typing import Any, AsyncIterable, Dict, List, Optional from typing import Any, Dict, List, Optional
from pydantic import Field, PrivateAttr from pydantic import Field, PrivateAttr
@@ -14,15 +14,14 @@ from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.utilities import Logger from crewai.utilities import Logger
from crewai.utilities.converter import Converter from crewai.utilities.converter import Converter
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.agent_events import ( from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
AgentExecutionErrorEvent, AgentExecutionErrorEvent,
AgentExecutionStartedEvent, AgentExecutionStartedEvent,
) )
try: try:
from langchain_core.messages import ToolMessage
from langgraph.checkpoint.memory import MemorySaver from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent from langgraph.prebuilt import create_react_agent

View File

@@ -10,8 +10,8 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools import BaseTool from crewai.tools import BaseTool
from crewai.tools.agent_tools.agent_tools import AgentTools from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.utilities import Logger from crewai.utilities import Logger
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.agent_events import ( from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
AgentExecutionErrorEvent, AgentExecutionErrorEvent,
AgentExecutionStartedEvent, AgentExecutionStartedEvent,

View File

@@ -7,7 +7,7 @@ from crewai.utilities import I18N
from crewai.utilities.converter import ConverterError from crewai.utilities.converter import ConverterError
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
from crewai.utilities.printer import Printer from crewai.utilities.printer import Printer
from crewai.utilities.events.event_listener import event_listener from crewai.events.event_listener import event_listener
if TYPE_CHECKING: if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent

View File

@@ -30,11 +30,11 @@ from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
from crewai.utilities.logger import Logger from crewai.utilities.logger import Logger
from crewai.utilities.tool_utils import execute_tool_and_check_finality from crewai.utilities.tool_utils import execute_tool_and_check_finality
from crewai.utilities.training_handler import CrewTrainingHandler from crewai.utilities.training_handler import CrewTrainingHandler
from crewai.utilities.events.agent_events import ( from crewai.events.types.logging_events import (
AgentLogsStartedEvent, AgentLogsStartedEvent,
AgentLogsExecutionEvent, AgentLogsExecutionEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
class CrewAgentExecutor(CrewAgentExecutorMixin): class CrewAgentExecutor(CrewAgentExecutorMixin):

View File

@@ -59,7 +59,7 @@ from crewai.utilities import I18N, FileHandler, Logger, RPMController
from crewai.utilities.constants import NOT_SPECIFIED, TRAINING_DATA_FILE from crewai.utilities.constants import NOT_SPECIFIED, TRAINING_DATA_FILE
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
from crewai.utilities.events.crew_events import ( from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent, CrewKickoffCompletedEvent,
CrewKickoffFailedEvent, CrewKickoffFailedEvent,
CrewKickoffStartedEvent, CrewKickoffStartedEvent,
@@ -70,14 +70,14 @@ from crewai.utilities.events.crew_events import (
CrewTrainFailedEvent, CrewTrainFailedEvent,
CrewTrainStartedEvent, CrewTrainStartedEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.event_listener import EventListener from crewai.events.event_listener import EventListener
from crewai.utilities.events.listeners.tracing.trace_listener import ( from crewai.events.listeners.tracing.trace_listener import (
TraceCollectionListener, TraceCollectionListener,
) )
from crewai.utilities.events.listeners.tracing.utils import ( from crewai.events.listeners.tracing.utils import (
is_tracing_enabled, is_tracing_enabled,
) )
from crewai.utilities.formatter import ( from crewai.utilities.formatter import (

View File

@@ -0,0 +1,56 @@
"""CrewAI events system for monitoring and extending agent behavior.
This module provides the event infrastructure that allows users to:
- Monitor agent, task, and crew execution
- Track memory operations and performance
- Build custom logging and analytics
- Extend CrewAI with custom event handlers
"""
from crewai.events.base_event_listener import BaseEventListener
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
MemoryQueryCompletedEvent,
MemorySaveCompletedEvent,
MemorySaveStartedEvent,
MemoryQueryStartedEvent,
MemoryRetrievalCompletedEvent,
MemorySaveFailedEvent,
MemoryQueryFailedEvent,
)
from crewai.events.types.knowledge_events import (
KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent,
)
from crewai.events.types.crew_events import (
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent,
)
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
)
from crewai.events.types.llm_events import (
LLMStreamChunkEvent,
)
__all__ = [
"BaseEventListener",
"crewai_event_bus",
"MemoryQueryCompletedEvent",
"MemorySaveCompletedEvent",
"MemorySaveStartedEvent",
"MemoryQueryStartedEvent",
"MemoryRetrievalCompletedEvent",
"MemorySaveFailedEvent",
"MemoryQueryFailedEvent",
"KnowledgeRetrievalStartedEvent",
"KnowledgeRetrievalCompletedEvent",
"CrewKickoffStartedEvent",
"CrewKickoffCompletedEvent",
"AgentExecutionCompletedEvent",
"LLMStreamChunkEvent",
]

View File

@@ -0,0 +1,15 @@
from abc import ABC, abstractmethod
from crewai.events.event_bus import CrewAIEventsBus, crewai_event_bus
class BaseEventListener(ABC):
verbose: bool = False
def __init__(self):
super().__init__()
self.setup_listeners(crewai_event_bus)
@abstractmethod
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
pass

View File

@@ -0,0 +1,117 @@
from __future__ import annotations
import threading
from contextlib import contextmanager
from typing import Any, Callable, Dict, List, Type, TypeVar, cast
from blinker import Signal
from crewai.events.base_events import BaseEvent
from crewai.events.event_types import EventTypes
EventT = TypeVar("EventT", bound=BaseEvent)
class CrewAIEventsBus:
"""
A singleton event bus that uses blinker signals for event handling.
Allows both internal (Flow/Crew) and external event handling.
"""
_instance = None
_lock = threading.Lock()
def __new__(cls):
if cls._instance is None:
with cls._lock:
if cls._instance is None: # prevent race condition
cls._instance = super(CrewAIEventsBus, cls).__new__(cls)
cls._instance._initialize()
return cls._instance
def _initialize(self) -> None:
"""Initialize the event bus internal state"""
self._signal = Signal("crewai_event_bus")
self._handlers: Dict[Type[BaseEvent], List[Callable]] = {}
def on(
self, event_type: Type[EventT]
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
"""
Decorator to register an event handler for a specific event type.
Usage:
@crewai_event_bus.on(AgentExecutionCompletedEvent)
def on_agent_execution_completed(
source: Any, event: AgentExecutionCompletedEvent
):
print(f"👍 Agent '{event.agent}' completed task")
print(f" Output: {event.output}")
"""
def decorator(
handler: Callable[[Any, EventT], None],
) -> Callable[[Any, EventT], None]:
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventT], None], handler)
)
return handler
return decorator
def emit(self, source: Any, event: BaseEvent) -> None:
"""
Emit an event to all registered handlers
Args:
source: The object emitting the event
event: The event instance to emit
"""
for event_type, handlers in self._handlers.items():
if isinstance(event, event_type):
for handler in handlers:
try:
handler(source, event)
except Exception as e:
print(
f"[EventBus Error] Handler '{handler.__name__}' failed for event '{event_type.__name__}': {e}"
)
self._signal.send(source, event=event)
def register_handler(
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
) -> None:
"""Register an event handler for a specific event type"""
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventTypes], None], handler)
)
@contextmanager
def scoped_handlers(self):
"""
Context manager for temporary event handling scope.
Useful for testing or temporary event handling.
Usage:
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStarted)
def temp_handler(source, event):
print("Temporary handler")
# Do stuff...
# Handlers are cleared after the context
"""
previous_handlers = self._handlers.copy()
self._handlers.clear()
try:
yield
finally:
self._handlers = previous_handlers
# Global instance
crewai_event_bus = CrewAIEventsBus()

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
from io import StringIO from io import StringIO
from typing import Any, Dict from typing import Any, Dict
@@ -7,8 +9,8 @@ from crewai.task import Task
from crewai.telemetry.telemetry import Telemetry from crewai.telemetry.telemetry import Telemetry
from crewai.utilities import Logger from crewai.utilities import Logger
from crewai.utilities.constants import EMITTER_COLOR from crewai.utilities.constants import EMITTER_COLOR
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events.base_event_listener import BaseEventListener
from crewai.utilities.events.knowledge_events import ( from crewai.events.types.knowledge_events import (
KnowledgeQueryCompletedEvent, KnowledgeQueryCompletedEvent,
KnowledgeQueryFailedEvent, KnowledgeQueryFailedEvent,
KnowledgeQueryStartedEvent, KnowledgeQueryStartedEvent,
@@ -16,28 +18,30 @@ from crewai.utilities.events.knowledge_events import (
KnowledgeRetrievalStartedEvent, KnowledgeRetrievalStartedEvent,
KnowledgeSearchQueryFailedEvent, KnowledgeSearchQueryFailedEvent,
) )
from crewai.utilities.events.llm_events import ( from crewai.events.types.llm_events import (
LLMCallCompletedEvent, LLMCallCompletedEvent,
LLMCallFailedEvent, LLMCallFailedEvent,
LLMCallStartedEvent, LLMCallStartedEvent,
LLMStreamChunkEvent, LLMStreamChunkEvent,
) )
from crewai.utilities.events.llm_guardrail_events import ( from crewai.events.types.llm_guardrail_events import (
LLMGuardrailStartedEvent, LLMGuardrailStartedEvent,
LLMGuardrailCompletedEvent, LLMGuardrailCompletedEvent,
) )
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter from crewai.events.utils.console_formatter import ConsoleFormatter
from .agent_events import ( from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
AgentExecutionStartedEvent, AgentExecutionStartedEvent,
AgentLogsStartedEvent,
AgentLogsExecutionEvent,
LiteAgentExecutionCompletedEvent, LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent, LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent, LiteAgentExecutionStartedEvent,
) )
from .crew_events import ( from crewai.events.types.logging_events import (
AgentLogsStartedEvent,
AgentLogsExecutionEvent,
)
from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent, CrewKickoffCompletedEvent,
CrewKickoffFailedEvent, CrewKickoffFailedEvent,
CrewKickoffStartedEvent, CrewKickoffStartedEvent,
@@ -49,7 +53,7 @@ from .crew_events import (
CrewTrainFailedEvent, CrewTrainFailedEvent,
CrewTrainStartedEvent, CrewTrainStartedEvent,
) )
from .flow_events import ( from .types.flow_events import (
FlowCreatedEvent, FlowCreatedEvent,
FlowFinishedEvent, FlowFinishedEvent,
FlowStartedEvent, FlowStartedEvent,
@@ -57,13 +61,13 @@ from .flow_events import (
MethodExecutionFinishedEvent, MethodExecutionFinishedEvent,
MethodExecutionStartedEvent, MethodExecutionStartedEvent,
) )
from .task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent from .types.task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent
from .tool_usage_events import ( from .types.tool_usage_events import (
ToolUsageErrorEvent, ToolUsageErrorEvent,
ToolUsageFinishedEvent, ToolUsageFinishedEvent,
ToolUsageStartedEvent, ToolUsageStartedEvent,
) )
from .reasoning_events import ( from .types.reasoning_events import (
AgentReasoningStartedEvent, AgentReasoningStartedEvent,
AgentReasoningCompletedEvent, AgentReasoningCompletedEvent,
AgentReasoningFailedEvent, AgentReasoningFailedEvent,
@@ -162,7 +166,7 @@ class EventListener(BaseEventListener):
span = self._telemetry.task_started(crew=source.agent.crew, task=source) span = self._telemetry.task_started(crew=source.agent.crew, task=source)
self.execution_spans[source] = span self.execution_spans[source] = span
# Pass both task ID and task name (if set) # Pass both task ID and task name (if set)
task_name = source.name if hasattr(source, 'name') and source.name else None task_name = source.name if hasattr(source, "name") and source.name else None
self.formatter.create_task_branch( self.formatter.create_task_branch(
self.formatter.current_crew_tree, source.id, task_name self.formatter.current_crew_tree, source.id, task_name
) )
@@ -176,13 +180,13 @@ class EventListener(BaseEventListener):
self.execution_spans[source] = None self.execution_spans[source] = None
# Pass task name if it exists # Pass task name if it exists
task_name = source.name if hasattr(source, 'name') and source.name else None task_name = source.name if hasattr(source, "name") and source.name else None
self.formatter.update_task_status( self.formatter.update_task_status(
self.formatter.current_crew_tree, self.formatter.current_crew_tree,
source.id, source.id,
source.agent.role, source.agent.role,
"completed", "completed",
task_name task_name,
) )
@crewai_event_bus.on(TaskFailedEvent) @crewai_event_bus.on(TaskFailedEvent)
@@ -194,13 +198,13 @@ class EventListener(BaseEventListener):
self.execution_spans[source] = None self.execution_spans[source] = None
# Pass task name if it exists # Pass task name if it exists
task_name = source.name if hasattr(source, 'name') and source.name else None task_name = source.name if hasattr(source, "name") and source.name else None
self.formatter.update_task_status( self.formatter.update_task_status(
self.formatter.current_crew_tree, self.formatter.current_crew_tree,
source.id, source.id,
source.agent.role, source.agent.role,
"failed", "failed",
task_name task_name,
) )
# ----------- AGENT EVENTS ----------- # ----------- AGENT EVENTS -----------

View File

@@ -1,12 +1,12 @@
from typing import Union from typing import Union
from .agent_events import ( from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
AgentExecutionErrorEvent, AgentExecutionErrorEvent,
AgentExecutionStartedEvent, AgentExecutionStartedEvent,
LiteAgentExecutionCompletedEvent, LiteAgentExecutionCompletedEvent,
) )
from .crew_events import ( from .types.crew_events import (
CrewKickoffCompletedEvent, CrewKickoffCompletedEvent,
CrewKickoffFailedEvent, CrewKickoffFailedEvent,
CrewKickoffStartedEvent, CrewKickoffStartedEvent,
@@ -17,39 +17,39 @@ from .crew_events import (
CrewTrainFailedEvent, CrewTrainFailedEvent,
CrewTrainStartedEvent, CrewTrainStartedEvent,
) )
from .flow_events import ( from .types.flow_events import (
FlowFinishedEvent, FlowFinishedEvent,
FlowStartedEvent, FlowStartedEvent,
MethodExecutionFailedEvent, MethodExecutionFailedEvent,
MethodExecutionFinishedEvent, MethodExecutionFinishedEvent,
MethodExecutionStartedEvent, MethodExecutionStartedEvent,
) )
from .llm_events import ( from .types.llm_events import (
LLMCallCompletedEvent, LLMCallCompletedEvent,
LLMCallFailedEvent, LLMCallFailedEvent,
LLMCallStartedEvent, LLMCallStartedEvent,
LLMStreamChunkEvent, LLMStreamChunkEvent,
) )
from .llm_guardrail_events import ( from .types.llm_guardrail_events import (
LLMGuardrailCompletedEvent, LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent, LLMGuardrailStartedEvent,
) )
from .task_events import ( from .types.task_events import (
TaskCompletedEvent, TaskCompletedEvent,
TaskFailedEvent, TaskFailedEvent,
TaskStartedEvent, TaskStartedEvent,
) )
from .tool_usage_events import ( from .types.tool_usage_events import (
ToolUsageErrorEvent, ToolUsageErrorEvent,
ToolUsageFinishedEvent, ToolUsageFinishedEvent,
ToolUsageStartedEvent, ToolUsageStartedEvent,
) )
from .reasoning_events import ( from .types.reasoning_events import (
AgentReasoningStartedEvent, AgentReasoningStartedEvent,
AgentReasoningCompletedEvent, AgentReasoningCompletedEvent,
AgentReasoningFailedEvent, AgentReasoningFailedEvent,
) )
from .knowledge_events import ( from .types.knowledge_events import (
KnowledgeRetrievalStartedEvent, KnowledgeRetrievalStartedEvent,
KnowledgeRetrievalCompletedEvent, KnowledgeRetrievalCompletedEvent,
KnowledgeQueryStartedEvent, KnowledgeQueryStartedEvent,
@@ -58,7 +58,7 @@ from .knowledge_events import (
KnowledgeSearchQueryFailedEvent, KnowledgeSearchQueryFailedEvent,
) )
from .memory_events import ( from .types.memory_events import (
MemorySaveStartedEvent, MemorySaveStartedEvent,
MemorySaveCompletedEvent, MemorySaveCompletedEvent,
MemorySaveFailedEvent, MemorySaveFailedEvent,

View File

@@ -0,0 +1,5 @@
"""Event listener implementations for CrewAI.
This module contains various event listener implementations
for handling memory, tracing, and other event-driven functionality.
"""

View File

@@ -1,5 +1,5 @@
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events.base_event_listener import BaseEventListener
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemoryRetrievalCompletedEvent, MemoryRetrievalCompletedEvent,
MemoryRetrievalStartedEvent, MemoryRetrievalStartedEvent,
MemoryQueryFailedEvent, MemoryQueryFailedEvent,
@@ -9,8 +9,8 @@ from crewai.utilities.events.memory_events import (
MemorySaveFailedEvent, MemorySaveFailedEvent,
) )
class MemoryListener(BaseEventListener):
class MemoryListener(BaseEventListener):
def __init__(self, formatter): def __init__(self, formatter):
super().__init__() super().__init__()
self.formatter = formatter self.formatter = formatter
@@ -19,9 +19,7 @@ class MemoryListener(BaseEventListener):
def setup_listeners(self, crewai_event_bus): def setup_listeners(self, crewai_event_bus):
@crewai_event_bus.on(MemoryRetrievalStartedEvent) @crewai_event_bus.on(MemoryRetrievalStartedEvent)
def on_memory_retrieval_started( def on_memory_retrieval_started(source, event: MemoryRetrievalStartedEvent):
source, event: MemoryRetrievalStartedEvent
):
if self.memory_retrieval_in_progress: if self.memory_retrieval_in_progress:
return return
@@ -33,9 +31,7 @@ class MemoryListener(BaseEventListener):
) )
@crewai_event_bus.on(MemoryRetrievalCompletedEvent) @crewai_event_bus.on(MemoryRetrievalCompletedEvent)
def on_memory_retrieval_completed( def on_memory_retrieval_completed(source, event: MemoryRetrievalCompletedEvent):
source, event: MemoryRetrievalCompletedEvent
):
if not self.memory_retrieval_in_progress: if not self.memory_retrieval_in_progress:
return return
@@ -44,7 +40,7 @@ class MemoryListener(BaseEventListener):
self.formatter.current_agent_branch, self.formatter.current_agent_branch,
self.formatter.current_crew_tree, self.formatter.current_crew_tree,
event.memory_content, event.memory_content,
event.retrieval_time_ms event.retrieval_time_ms,
) )
@crewai_event_bus.on(MemoryQueryCompletedEvent) @crewai_event_bus.on(MemoryQueryCompletedEvent)
@@ -107,4 +103,4 @@ class MemoryListener(BaseEventListener):
event.error, event.error,
event.source_type, event.source_type,
self.formatter.current_crew_tree, self.formatter.current_crew_tree,
) )

View File

@@ -11,7 +11,7 @@ from crewai.cli.plus_api import PlusAPI
from rich.console import Console from rich.console import Console
from rich.panel import Panel from rich.panel import Panel
from crewai.utilities.events.listeners.tracing.types import TraceEvent from crewai.events.listeners.tracing.types import TraceEvent
from logging import getLogger from logging import getLogger
logger = getLogger(__name__) logger = getLogger(__name__)

View File

@@ -3,8 +3,8 @@ import uuid
from typing import Dict, Any, Optional from typing import Dict, Any, Optional
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events.base_event_listener import BaseEventListener
from crewai.utilities.events.agent_events import ( from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
AgentExecutionStartedEvent, AgentExecutionStartedEvent,
LiteAgentExecutionStartedEvent, LiteAgentExecutionStartedEvent,
@@ -12,34 +12,34 @@ from crewai.utilities.events.agent_events import (
LiteAgentExecutionErrorEvent, LiteAgentExecutionErrorEvent,
AgentExecutionErrorEvent, AgentExecutionErrorEvent,
) )
from crewai.utilities.events.listeners.tracing.types import TraceEvent from crewai.events.listeners.tracing.types import TraceEvent
from crewai.utilities.events.reasoning_events import ( from crewai.events.types.reasoning_events import (
AgentReasoningStartedEvent, AgentReasoningStartedEvent,
AgentReasoningCompletedEvent, AgentReasoningCompletedEvent,
AgentReasoningFailedEvent, AgentReasoningFailedEvent,
) )
from crewai.utilities.events.crew_events import ( from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent, CrewKickoffCompletedEvent,
CrewKickoffFailedEvent, CrewKickoffFailedEvent,
CrewKickoffStartedEvent, CrewKickoffStartedEvent,
) )
from crewai.utilities.events.task_events import ( from crewai.events.types.task_events import (
TaskCompletedEvent, TaskCompletedEvent,
TaskFailedEvent, TaskFailedEvent,
TaskStartedEvent, TaskStartedEvent,
) )
from crewai.utilities.events.tool_usage_events import ( from crewai.events.types.tool_usage_events import (
ToolUsageErrorEvent, ToolUsageErrorEvent,
ToolUsageFinishedEvent, ToolUsageFinishedEvent,
ToolUsageStartedEvent, ToolUsageStartedEvent,
) )
from crewai.utilities.events.llm_events import ( from crewai.events.types.llm_events import (
LLMCallCompletedEvent, LLMCallCompletedEvent,
LLMCallFailedEvent, LLMCallFailedEvent,
LLMCallStartedEvent, LLMCallStartedEvent,
) )
from crewai.utilities.events.flow_events import ( from crewai.events.types.flow_events import (
FlowCreatedEvent, FlowCreatedEvent,
FlowStartedEvent, FlowStartedEvent,
FlowFinishedEvent, FlowFinishedEvent,
@@ -48,7 +48,7 @@ from crewai.utilities.events.flow_events import (
MethodExecutionFailedEvent, MethodExecutionFailedEvent,
FlowPlotEvent, FlowPlotEvent,
) )
from crewai.utilities.events.llm_guardrail_events import ( from crewai.events.types.llm_guardrail_events import (
LLMGuardrailStartedEvent, LLMGuardrailStartedEvent,
LLMGuardrailCompletedEvent, LLMGuardrailCompletedEvent,
) )
@@ -57,7 +57,7 @@ from crewai.utilities.serialization import to_serializable
from .trace_batch_manager import TraceBatchManager from .trace_batch_manager import TraceBatchManager
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemoryQueryStartedEvent, MemoryQueryStartedEvent,
MemoryQueryCompletedEvent, MemoryQueryCompletedEvent,
MemoryQueryFailedEvent, MemoryQueryFailedEvent,

View File

@@ -0,0 +1,5 @@
"""Event type definitions for CrewAI.
This module contains all event types used throughout the CrewAI system
for monitoring and extending agent, crew, task, and tool execution.
"""

View File

@@ -1,13 +1,15 @@
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union """Agent-related events moved to break circular dependencies."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Union
from pydantic import model_validator
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.structured_tool import CrewStructuredTool
from crewai.events.base_events import BaseEvent
from .base_events import BaseEvent
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent
class AgentExecutionStartedEvent(BaseEvent): class AgentExecutionStartedEvent(BaseEvent):
@@ -21,9 +23,9 @@ class AgentExecutionStartedEvent(BaseEvent):
model_config = {"arbitrary_types_allowed": True} model_config = {"arbitrary_types_allowed": True}
def __init__(self, **data): @model_validator(mode="after")
super().__init__(**data) def set_fingerprint_data(self):
# Set fingerprint data from the agent """Set fingerprint data from the agent if available."""
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint: if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent" self.source_type = "agent"
@@ -32,6 +34,7 @@ class AgentExecutionStartedEvent(BaseEvent):
and self.agent.fingerprint.metadata and self.agent.fingerprint.metadata
): ):
self.fingerprint_metadata = self.agent.fingerprint.metadata self.fingerprint_metadata = self.agent.fingerprint.metadata
return self
class AgentExecutionCompletedEvent(BaseEvent): class AgentExecutionCompletedEvent(BaseEvent):
@@ -42,9 +45,11 @@ class AgentExecutionCompletedEvent(BaseEvent):
output: str output: str
type: str = "agent_execution_completed" type: str = "agent_execution_completed"
def __init__(self, **data): model_config = {"arbitrary_types_allowed": True}
super().__init__(**data)
# Set fingerprint data from the agent @model_validator(mode="after")
def set_fingerprint_data(self):
"""Set fingerprint data from the agent if available."""
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint: if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent" self.source_type = "agent"
@@ -53,6 +58,7 @@ class AgentExecutionCompletedEvent(BaseEvent):
and self.agent.fingerprint.metadata and self.agent.fingerprint.metadata
): ):
self.fingerprint_metadata = self.agent.fingerprint.metadata self.fingerprint_metadata = self.agent.fingerprint.metadata
return self
class AgentExecutionErrorEvent(BaseEvent): class AgentExecutionErrorEvent(BaseEvent):
@@ -63,9 +69,11 @@ class AgentExecutionErrorEvent(BaseEvent):
error: str error: str
type: str = "agent_execution_error" type: str = "agent_execution_error"
def __init__(self, **data): model_config = {"arbitrary_types_allowed": True}
super().__init__(**data)
# Set fingerprint data from the agent @model_validator(mode="after")
def set_fingerprint_data(self):
"""Set fingerprint data from the agent if available."""
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint: if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent" self.source_type = "agent"
@@ -74,6 +82,7 @@ class AgentExecutionErrorEvent(BaseEvent):
and self.agent.fingerprint.metadata and self.agent.fingerprint.metadata
): ):
self.fingerprint_metadata = self.agent.fingerprint.metadata self.fingerprint_metadata = self.agent.fingerprint.metadata
return self
# New event classes for LiteAgent # New event classes for LiteAgent
@@ -104,26 +113,6 @@ class LiteAgentExecutionErrorEvent(BaseEvent):
type: str = "lite_agent_execution_error" type: str = "lite_agent_execution_error"
# New logging events
class AgentLogsStartedEvent(BaseEvent):
"""Event emitted when agent logs should be shown at start"""
agent_role: str
task_description: Optional[str] = None
verbose: bool = False
type: str = "agent_logs_started"
class AgentLogsExecutionEvent(BaseEvent):
"""Event emitted when agent logs should be shown during execution"""
agent_role: str
formatted_answer: Any
verbose: bool = False
type: str = "agent_logs_execution"
model_config = {"arbitrary_types_allowed": True}
# Agent Eval events # Agent Eval events
class AgentEvaluationStartedEvent(BaseEvent): class AgentEvaluationStartedEvent(BaseEvent):
agent_id: str agent_id: str
@@ -132,6 +121,7 @@ class AgentEvaluationStartedEvent(BaseEvent):
iteration: int iteration: int
type: str = "agent_evaluation_started" type: str = "agent_evaluation_started"
class AgentEvaluationCompletedEvent(BaseEvent): class AgentEvaluationCompletedEvent(BaseEvent):
agent_id: str agent_id: str
agent_role: str agent_role: str
@@ -141,6 +131,7 @@ class AgentEvaluationCompletedEvent(BaseEvent):
score: Any score: Any
type: str = "agent_evaluation_completed" type: str = "agent_evaluation_completed"
class AgentEvaluationFailedEvent(BaseEvent): class AgentEvaluationFailedEvent(BaseEvent):
agent_id: str agent_id: str
agent_role: str agent_role: str

View File

@@ -1,6 +1,6 @@
from typing import TYPE_CHECKING, Any, Dict, Optional, Union from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from crewai.utilities.events.base_events import BaseEvent from crewai.events.base_events import BaseEvent
if TYPE_CHECKING: if TYPE_CHECKING:
from crewai.crew import Crew from crewai.crew import Crew

View File

@@ -2,7 +2,7 @@ from typing import Any, Dict, Optional, Union
from pydantic import BaseModel, ConfigDict from pydantic import BaseModel, ConfigDict
from .base_events import BaseEvent from crewai.events.base_events import BaseEvent
class FlowEvent(BaseEvent): class FlowEvent(BaseEvent):

View File

@@ -1,10 +1,6 @@
from typing import TYPE_CHECKING, Any from crewai.events.base_events import BaseEvent
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.utilities.events.base_events import BaseEvent
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent
class KnowledgeRetrievalStartedEvent(BaseEvent): class KnowledgeRetrievalStartedEvent(BaseEvent):
@@ -20,7 +16,7 @@ class KnowledgeRetrievalCompletedEvent(BaseEvent):
query: str query: str
type: str = "knowledge_search_query_completed" type: str = "knowledge_search_query_completed"
agent: BaseAgent agent: BaseAgent
retrieved_knowledge: Any retrieved_knowledge: str
class KnowledgeQueryStartedEvent(BaseEvent): class KnowledgeQueryStartedEvent(BaseEvent):

View File

@@ -3,7 +3,7 @@ from typing import Any, Dict, List, Optional, Union
from pydantic import BaseModel from pydantic import BaseModel
from crewai.utilities.events.base_events import BaseEvent from crewai.events.base_events import BaseEvent
class LLMEventBase(BaseEvent): class LLMEventBase(BaseEvent):

View File

@@ -1,7 +1,7 @@
from inspect import getsource from inspect import getsource
from typing import Any, Callable, Optional, Union from typing import Any, Callable, Optional, Union
from crewai.utilities.events.base_events import BaseEvent from crewai.events.base_events import BaseEvent
class LLMGuardrailStartedEvent(BaseEvent): class LLMGuardrailStartedEvent(BaseEvent):

View File

@@ -0,0 +1,25 @@
"""Agent logging events that don't reference BaseAgent to avoid circular imports."""
from typing import Any, Optional
from crewai.events.base_events import BaseEvent
class AgentLogsStartedEvent(BaseEvent):
"""Event emitted when agent logs should be shown at start"""
agent_role: str
task_description: Optional[str] = None
verbose: bool = False
type: str = "agent_logs_started"
class AgentLogsExecutionEvent(BaseEvent):
"""Event emitted when agent logs should be shown during execution"""
agent_role: str
formatted_answer: Any
verbose: bool = False
type: str = "agent_logs_execution"
model_config = {"arbitrary_types_allowed": True}

View File

@@ -1,6 +1,6 @@
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
from crewai.utilities.events.base_events import BaseEvent from crewai.events.base_events import BaseEvent
class MemoryBaseEvent(BaseEvent): class MemoryBaseEvent(BaseEvent):

View File

@@ -1,4 +1,4 @@
from crewai.utilities.events.base_events import BaseEvent from crewai.events.base_events import BaseEvent
from typing import Any, Optional from typing import Any, Optional

View File

@@ -1,7 +1,7 @@
from typing import Any, Optional from typing import Any, Optional
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.utilities.events.base_events import BaseEvent from crewai.events.base_events import BaseEvent
class TaskStartedEvent(BaseEvent): class TaskStartedEvent(BaseEvent):

View File

@@ -1,7 +1,7 @@
from datetime import datetime from datetime import datetime
from typing import Any, Callable, Dict, Optional from typing import Any, Callable, Dict, Optional
from .base_events import BaseEvent from crewai.events.base_events import BaseEvent
class ToolUsageEvent(BaseEvent): class ToolUsageEvent(BaseEvent):

View File

@@ -227,7 +227,7 @@ class ConsoleFormatter:
return None return None
task_content = Text() task_content = Text()
# Display task name if available, otherwise just the ID # Display task name if available, otherwise just the ID
if task_name: if task_name:
task_content.append("📋 Task: ", style="yellow bold") task_content.append("📋 Task: ", style="yellow bold")
@@ -235,7 +235,7 @@ class ConsoleFormatter:
task_content.append(f" (ID: {task_id})", style="yellow dim") task_content.append(f" (ID: {task_id})", style="yellow dim")
else: else:
task_content.append(f"📋 Task: {task_id}", style="yellow bold") task_content.append(f"📋 Task: {task_id}", style="yellow bold")
task_content.append("\nStatus: ", style="white") task_content.append("\nStatus: ", style="white")
task_content.append("Executing Task...", style="yellow dim") task_content.append("Executing Task...", style="yellow dim")

View File

@@ -1,18 +1,30 @@
import threading import threading
from typing import Any, Optional from typing import Any, Optional
from crewai.experimental.evaluation.base_evaluator import AgentEvaluationResult, AggregationStrategy from crewai.experimental.evaluation.base_evaluator import (
AgentEvaluationResult,
AggregationStrategy,
)
from crewai.agent import Agent from crewai.agent import Agent
from crewai.task import Task from crewai.task import Task
from crewai.experimental.evaluation.evaluation_display import EvaluationDisplayFormatter from crewai.experimental.evaluation.evaluation_display import EvaluationDisplayFormatter
from crewai.utilities.events.agent_events import AgentEvaluationStartedEvent, AgentEvaluationCompletedEvent, AgentEvaluationFailedEvent from crewai.events.types.agent_events import (
AgentEvaluationStartedEvent,
AgentEvaluationCompletedEvent,
AgentEvaluationFailedEvent,
)
from crewai.experimental.evaluation import BaseEvaluator, create_evaluation_callbacks from crewai.experimental.evaluation import BaseEvaluator, create_evaluation_callbacks
from collections.abc import Sequence from collections.abc import Sequence
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter from crewai.events.utils.console_formatter import ConsoleFormatter
from crewai.utilities.events.task_events import TaskCompletedEvent from crewai.events.types.task_events import TaskCompletedEvent
from crewai.utilities.events.agent_events import LiteAgentExecutionCompletedEvent from crewai.events.types.agent_events import LiteAgentExecutionCompletedEvent
from crewai.experimental.evaluation.base_evaluator import AgentAggregatedEvaluationResult, EvaluationScore, MetricCategory from crewai.experimental.evaluation.base_evaluator import (
AgentAggregatedEvaluationResult,
EvaluationScore,
MetricCategory,
)
class ExecutionState: class ExecutionState:
current_agent_id: Optional[str] = None current_agent_id: Optional[str] = None
@@ -24,6 +36,7 @@ class ExecutionState:
self.iterations_results = {} self.iterations_results = {}
self.agent_evaluators = {} self.agent_evaluators = {}
class AgentEvaluator: class AgentEvaluator:
def __init__( def __init__(
self, self,
@@ -46,27 +59,45 @@ class AgentEvaluator:
@property @property
def _execution_state(self) -> ExecutionState: def _execution_state(self) -> ExecutionState:
if not hasattr(self._thread_local, 'execution_state'): if not hasattr(self._thread_local, "execution_state"):
self._thread_local.execution_state = ExecutionState() self._thread_local.execution_state = ExecutionState()
return self._thread_local.execution_state return self._thread_local.execution_state
def _subscribe_to_events(self) -> None: def _subscribe_to_events(self) -> None:
from typing import cast from typing import cast
crewai_event_bus.register_handler(TaskCompletedEvent, cast(Any, self._handle_task_completed))
crewai_event_bus.register_handler(LiteAgentExecutionCompletedEvent, cast(Any, self._handle_lite_agent_completed)) crewai_event_bus.register_handler(
TaskCompletedEvent, cast(Any, self._handle_task_completed)
)
crewai_event_bus.register_handler(
LiteAgentExecutionCompletedEvent,
cast(Any, self._handle_lite_agent_completed),
)
def _handle_task_completed(self, source: Any, event: TaskCompletedEvent) -> None: def _handle_task_completed(self, source: Any, event: TaskCompletedEvent) -> None:
assert event.task is not None assert event.task is not None
agent = event.task.agent agent = event.task.agent
if agent and str(getattr(agent, 'id', 'unknown')) in self._execution_state.agent_evaluators: if (
self.emit_evaluation_started_event(agent_role=agent.role, agent_id=str(agent.id), task_id=str(event.task.id)) agent
and str(getattr(agent, "id", "unknown"))
in self._execution_state.agent_evaluators
):
self.emit_evaluation_started_event(
agent_role=agent.role,
agent_id=str(agent.id),
task_id=str(event.task.id),
)
state = ExecutionState() state = ExecutionState()
state.current_agent_id = str(agent.id) state.current_agent_id = str(agent.id)
state.current_task_id = str(event.task.id) state.current_task_id = str(event.task.id)
assert state.current_agent_id is not None and state.current_task_id is not None assert (
trace = self.callback.get_trace(state.current_agent_id, state.current_task_id) state.current_agent_id is not None and state.current_task_id is not None
)
trace = self.callback.get_trace(
state.current_agent_id, state.current_task_id
)
if not trace: if not trace:
return return
@@ -76,19 +107,28 @@ class AgentEvaluator:
task=event.task, task=event.task,
execution_trace=trace, execution_trace=trace,
final_output=event.output, final_output=event.output,
state=state state=state,
) )
current_iteration = self._execution_state.iteration current_iteration = self._execution_state.iteration
if current_iteration not in self._execution_state.iterations_results: if current_iteration not in self._execution_state.iterations_results:
self._execution_state.iterations_results[current_iteration] = {} self._execution_state.iterations_results[current_iteration] = {}
if agent.role not in self._execution_state.iterations_results[current_iteration]: if (
self._execution_state.iterations_results[current_iteration][agent.role] = [] agent.role
not in self._execution_state.iterations_results[current_iteration]
):
self._execution_state.iterations_results[current_iteration][
agent.role
] = []
self._execution_state.iterations_results[current_iteration][agent.role].append(result) self._execution_state.iterations_results[current_iteration][
agent.role
].append(result)
def _handle_lite_agent_completed(self, source: object, event: LiteAgentExecutionCompletedEvent) -> None: def _handle_lite_agent_completed(
self, source: object, event: LiteAgentExecutionCompletedEvent
) -> None:
agent_info = event.agent_info agent_info = event.agent_info
agent_id = str(agent_info["id"]) agent_id = str(agent_info["id"])
@@ -106,8 +146,12 @@ class AgentEvaluator:
if not target_agent: if not target_agent:
return return
assert state.current_agent_id is not None and state.current_task_id is not None assert (
trace = self.callback.get_trace(state.current_agent_id, state.current_task_id) state.current_agent_id is not None and state.current_task_id is not None
)
trace = self.callback.get_trace(
state.current_agent_id, state.current_task_id
)
if not trace: if not trace:
return return
@@ -116,7 +160,7 @@ class AgentEvaluator:
agent=target_agent, agent=target_agent,
execution_trace=trace, execution_trace=trace,
final_output=event.output, final_output=event.output,
state=state state=state,
) )
current_iteration = self._execution_state.iteration current_iteration = self._execution_state.iteration
@@ -124,10 +168,17 @@ class AgentEvaluator:
self._execution_state.iterations_results[current_iteration] = {} self._execution_state.iterations_results[current_iteration] = {}
agent_role = target_agent.role agent_role = target_agent.role
if agent_role not in self._execution_state.iterations_results[current_iteration]: if (
self._execution_state.iterations_results[current_iteration][agent_role] = [] agent_role
not in self._execution_state.iterations_results[current_iteration]
):
self._execution_state.iterations_results[current_iteration][
agent_role
] = []
self._execution_state.iterations_results[current_iteration][agent_role].append(result) self._execution_state.iterations_results[current_iteration][
agent_role
].append(result)
def set_iteration(self, iteration: int) -> None: def set_iteration(self, iteration: int) -> None:
self._execution_state.iteration = iteration self._execution_state.iteration = iteration
@@ -136,14 +187,26 @@ class AgentEvaluator:
self._execution_state.iterations_results = {} self._execution_state.iterations_results = {}
def get_evaluation_results(self) -> dict[str, list[AgentEvaluationResult]]: def get_evaluation_results(self) -> dict[str, list[AgentEvaluationResult]]:
if self._execution_state.iterations_results and self._execution_state.iteration in self._execution_state.iterations_results: if (
return self._execution_state.iterations_results[self._execution_state.iteration] self._execution_state.iterations_results
and self._execution_state.iteration
in self._execution_state.iterations_results
):
return self._execution_state.iterations_results[
self._execution_state.iteration
]
return {} return {}
def display_results_with_iterations(self) -> None: def display_results_with_iterations(self) -> None:
self.display_formatter.display_summary_results(self._execution_state.iterations_results) self.display_formatter.display_summary_results(
self._execution_state.iterations_results
)
def get_agent_evaluation(self, strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE, include_evaluation_feedback: bool = True) -> dict[str, AgentAggregatedEvaluationResult]: def get_agent_evaluation(
self,
strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE,
include_evaluation_feedback: bool = True,
) -> dict[str, AgentAggregatedEvaluationResult]:
agent_results = {} agent_results = {}
with crewai_event_bus.scoped_handlers(): with crewai_event_bus.scoped_handlers():
task_results = self.get_evaluation_results() task_results = self.get_evaluation_results()
@@ -157,13 +220,16 @@ class AgentEvaluator:
agent_id=agent_id, agent_id=agent_id,
agent_role=agent_role, agent_role=agent_role,
results=results, results=results,
strategy=strategy strategy=strategy,
) )
agent_results[agent_role] = aggregated_result agent_results[agent_role] = aggregated_result
if (
if self._execution_state.iterations_results and self._execution_state.iteration == max(self._execution_state.iterations_results.keys(), default=0): self._execution_state.iterations_results
and self._execution_state.iteration
== max(self._execution_state.iterations_results.keys(), default=0)
):
self.display_results_with_iterations() self.display_results_with_iterations()
if include_evaluation_feedback: if include_evaluation_feedback:
@@ -172,7 +238,9 @@ class AgentEvaluator:
return agent_results return agent_results
def display_evaluation_with_feedback(self) -> None: def display_evaluation_with_feedback(self) -> None:
self.display_formatter.display_evaluation_with_feedback(self._execution_state.iterations_results) self.display_formatter.display_evaluation_with_feedback(
self._execution_state.iterations_results
)
def evaluate( def evaluate(
self, self,
@@ -184,46 +252,91 @@ class AgentEvaluator:
) -> AgentEvaluationResult: ) -> AgentEvaluationResult:
result = AgentEvaluationResult( result = AgentEvaluationResult(
agent_id=state.current_agent_id or str(agent.id), agent_id=state.current_agent_id or str(agent.id),
task_id=state.current_task_id or (str(task.id) if task else "unknown_task") task_id=state.current_task_id or (str(task.id) if task else "unknown_task"),
) )
assert self.evaluators is not None assert self.evaluators is not None
task_id = str(task.id) if task else None task_id = str(task.id) if task else None
for evaluator in self.evaluators: for evaluator in self.evaluators:
try: try:
self.emit_evaluation_started_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id) self.emit_evaluation_started_event(
agent_role=agent.role, agent_id=str(agent.id), task_id=task_id
)
score = evaluator.evaluate( score = evaluator.evaluate(
agent=agent, agent=agent,
task=task, task=task,
execution_trace=execution_trace, execution_trace=execution_trace,
final_output=final_output final_output=final_output,
) )
result.metrics[evaluator.metric_category] = score result.metrics[evaluator.metric_category] = score
self.emit_evaluation_completed_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id, metric_category=evaluator.metric_category, score=score) self.emit_evaluation_completed_event(
agent_role=agent.role,
agent_id=str(agent.id),
task_id=task_id,
metric_category=evaluator.metric_category,
score=score,
)
except Exception as e: except Exception as e:
self.emit_evaluation_failed_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id, error=str(e)) self.emit_evaluation_failed_event(
self.console_formatter.print(f"Error in {evaluator.metric_category.value} evaluator: {str(e)}") agent_role=agent.role,
agent_id=str(agent.id),
task_id=task_id,
error=str(e),
)
self.console_formatter.print(
f"Error in {evaluator.metric_category.value} evaluator: {str(e)}"
)
return result return result
def emit_evaluation_started_event(self, agent_role: str, agent_id: str, task_id: str | None = None): def emit_evaluation_started_event(
self, agent_role: str, agent_id: str, task_id: str | None = None
):
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
AgentEvaluationStartedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration) AgentEvaluationStartedEvent(
agent_role=agent_role,
agent_id=agent_id,
task_id=task_id,
iteration=self._execution_state.iteration,
),
) )
def emit_evaluation_completed_event(self, agent_role: str, agent_id: str, task_id: str | None = None, metric_category: MetricCategory | None = None, score: EvaluationScore | None = None): def emit_evaluation_completed_event(
self,
agent_role: str,
agent_id: str,
task_id: str | None = None,
metric_category: MetricCategory | None = None,
score: EvaluationScore | None = None,
):
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
AgentEvaluationCompletedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration, metric_category=metric_category, score=score) AgentEvaluationCompletedEvent(
agent_role=agent_role,
agent_id=agent_id,
task_id=task_id,
iteration=self._execution_state.iteration,
metric_category=metric_category,
score=score,
),
) )
def emit_evaluation_failed_event(self, agent_role: str, agent_id: str, error: str, task_id: str | None = None): def emit_evaluation_failed_event(
self, agent_role: str, agent_id: str, error: str, task_id: str | None = None
):
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
AgentEvaluationFailedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration, error=error) AgentEvaluationFailedEvent(
agent_role=agent_role,
agent_id=agent_id,
task_id=task_id,
iteration=self._execution_state.iteration,
error=error,
),
) )
def create_default_evaluator(agents: list[Agent], llm: None = None): def create_default_evaluator(agents: list[Agent], llm: None = None):
from crewai.experimental.evaluation import ( from crewai.experimental.evaluation import (
GoalAlignmentEvaluator, GoalAlignmentEvaluator,
@@ -231,7 +344,7 @@ def create_default_evaluator(agents: list[Agent], llm: None = None):
ToolSelectionEvaluator, ToolSelectionEvaluator,
ParameterExtractionEvaluator, ParameterExtractionEvaluator,
ToolInvocationEvaluator, ToolInvocationEvaluator,
ReasoningEfficiencyEvaluator ReasoningEfficiencyEvaluator,
) )
evaluators = [ evaluators = [

View File

@@ -3,18 +3,28 @@ from typing import Dict, Any, List
from rich.table import Table from rich.table import Table
from rich.box import HEAVY_EDGE, ROUNDED from rich.box import HEAVY_EDGE, ROUNDED
from collections.abc import Sequence from collections.abc import Sequence
from crewai.experimental.evaluation.base_evaluator import AgentAggregatedEvaluationResult, AggregationStrategy, AgentEvaluationResult, MetricCategory from crewai.experimental.evaluation.base_evaluator import (
AgentAggregatedEvaluationResult,
AggregationStrategy,
AgentEvaluationResult,
MetricCategory,
)
from crewai.experimental.evaluation import EvaluationScore from crewai.experimental.evaluation import EvaluationScore
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter from crewai.events.utils.console_formatter import ConsoleFormatter
from crewai.utilities.llm_utils import create_llm from crewai.utilities.llm_utils import create_llm
class EvaluationDisplayFormatter: class EvaluationDisplayFormatter:
def __init__(self): def __init__(self):
self.console_formatter = ConsoleFormatter() self.console_formatter = ConsoleFormatter()
def display_evaluation_with_feedback(self, iterations_results: Dict[int, Dict[str, List[Any]]]): def display_evaluation_with_feedback(
self, iterations_results: Dict[int, Dict[str, List[Any]]]
):
if not iterations_results: if not iterations_results:
self.console_formatter.print("[yellow]No evaluation results to display[/yellow]") self.console_formatter.print(
"[yellow]No evaluation results to display[/yellow]"
)
return return
all_agent_roles: set[str] = set() all_agent_roles: set[str] = set()
@@ -22,7 +32,9 @@ class EvaluationDisplayFormatter:
all_agent_roles.update(iter_results.keys()) all_agent_roles.update(iter_results.keys())
for agent_role in sorted(all_agent_roles): for agent_role in sorted(all_agent_roles):
self.console_formatter.print(f"\n[bold cyan]Agent: {agent_role}[/bold cyan]") self.console_formatter.print(
f"\n[bold cyan]Agent: {agent_role}[/bold cyan]"
)
for iter_num, results in sorted(iterations_results.items()): for iter_num, results in sorted(iterations_results.items()):
if agent_role not in results or not results[agent_role]: if agent_role not in results or not results[agent_role]:
@@ -62,9 +74,7 @@ class EvaluationDisplayFormatter:
table.add_section() table.add_section()
table.add_row( table.add_row(
metric.title(), metric.title(), score_text, evaluation_score.feedback or ""
score_text,
evaluation_score.feedback or ""
) )
if aggregated_result.overall_score is not None: if aggregated_result.overall_score is not None:
@@ -82,19 +92,26 @@ class EvaluationDisplayFormatter:
table.add_row( table.add_row(
"Overall Score", "Overall Score",
f"[{overall_color}]{overall_score:.1f}[/]", f"[{overall_color}]{overall_score:.1f}[/]",
"Overall agent evaluation score" "Overall agent evaluation score",
) )
self.console_formatter.print(table) self.console_formatter.print(table)
def display_summary_results(self, iterations_results: Dict[int, Dict[str, List[AgentAggregatedEvaluationResult]]]): def display_summary_results(
self,
iterations_results: Dict[int, Dict[str, List[AgentAggregatedEvaluationResult]]],
):
if not iterations_results: if not iterations_results:
self.console_formatter.print("[yellow]No evaluation results to display[/yellow]") self.console_formatter.print(
"[yellow]No evaluation results to display[/yellow]"
)
return return
self.console_formatter.print("\n") self.console_formatter.print("\n")
table = Table(title="Agent Performance Scores \n (1-10 Higher is better)", box=HEAVY_EDGE) table = Table(
title="Agent Performance Scores \n (1-10 Higher is better)", box=HEAVY_EDGE
)
table.add_column("Agent/Metric", style="cyan") table.add_column("Agent/Metric", style="cyan")
@@ -123,11 +140,14 @@ class EvaluationDisplayFormatter:
agent_id=agent_id, agent_id=agent_id,
agent_role=agent_role, agent_role=agent_role,
results=agent_results, results=agent_results,
strategy=AggregationStrategy.SIMPLE_AVERAGE strategy=AggregationStrategy.SIMPLE_AVERAGE,
) )
valid_scores = [score.score for score in aggregated_result.metrics.values() valid_scores = [
if score.score is not None] score.score
for score in aggregated_result.metrics.values()
if score.score is not None
]
if valid_scores: if valid_scores:
avg_score = sum(valid_scores) / len(valid_scores) avg_score = sum(valid_scores) / len(valid_scores)
agent_scores_by_iteration[iter_num] = avg_score agent_scores_by_iteration[iter_num] = avg_score
@@ -137,7 +157,9 @@ class EvaluationDisplayFormatter:
if not agent_scores_by_iteration: if not agent_scores_by_iteration:
continue continue
avg_across_iterations = sum(agent_scores_by_iteration.values()) / len(agent_scores_by_iteration) avg_across_iterations = sum(agent_scores_by_iteration.values()) / len(
agent_scores_by_iteration
)
row = [f"[bold]{agent_role}[/bold]"] row = [f"[bold]{agent_role}[/bold]"]
@@ -178,9 +200,13 @@ class EvaluationDisplayFormatter:
row = [f" - {metric.title()}"] row = [f" - {metric.title()}"]
for iter_num in sorted(iterations_results.keys()): for iter_num in sorted(iterations_results.keys()):
if (iter_num in agent_metrics_by_iteration and if (
metric in agent_metrics_by_iteration[iter_num]): iter_num in agent_metrics_by_iteration
metric_score = agent_metrics_by_iteration[iter_num][metric].score and metric in agent_metrics_by_iteration[iter_num]
):
metric_score = agent_metrics_by_iteration[iter_num][
metric
].score
if metric_score is not None: if metric_score is not None:
metric_scores.append(metric_score) metric_scores.append(metric_score)
if metric_score >= 8.0: if metric_score >= 8.0:
@@ -225,7 +251,9 @@ class EvaluationDisplayFormatter:
results: Sequence[AgentEvaluationResult], results: Sequence[AgentEvaluationResult],
strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE, strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE,
) -> AgentAggregatedEvaluationResult: ) -> AgentAggregatedEvaluationResult:
metrics_by_category: dict[MetricCategory, list[EvaluationScore]] = defaultdict(list) metrics_by_category: dict[MetricCategory, list[EvaluationScore]] = defaultdict(
list
)
for result in results: for result in results:
for metric_name, evaluation_score in result.metrics.items(): for metric_name, evaluation_score in result.metrics.items():
@@ -246,19 +274,20 @@ class EvaluationDisplayFormatter:
metric=category.title(), metric=category.title(),
feedbacks=feedbacks, feedbacks=feedbacks,
scores=[s.score for s in scores], scores=[s.score for s in scores],
strategy=strategy strategy=strategy,
) )
else: else:
feedback_summary = feedbacks[0] feedback_summary = feedbacks[0]
aggregated_metrics[category] = EvaluationScore( aggregated_metrics[category] = EvaluationScore(
score=avg_score, score=avg_score, feedback=feedback_summary
feedback=feedback_summary
) )
overall_score = None overall_score = None
if aggregated_metrics: if aggregated_metrics:
valid_scores = [m.score for m in aggregated_metrics.values() if m.score is not None] valid_scores = [
m.score for m in aggregated_metrics.values() if m.score is not None
]
if valid_scores: if valid_scores:
overall_score = sum(valid_scores) / len(valid_scores) overall_score = sum(valid_scores) / len(valid_scores)
@@ -268,7 +297,7 @@ class EvaluationDisplayFormatter:
metrics=aggregated_metrics, metrics=aggregated_metrics,
overall_score=overall_score, overall_score=overall_score,
task_count=len(results), task_count=len(results),
aggregation_strategy=strategy aggregation_strategy=strategy,
) )
def _summarize_feedbacks( def _summarize_feedbacks(
@@ -277,10 +306,12 @@ class EvaluationDisplayFormatter:
metric: str, metric: str,
feedbacks: List[str], feedbacks: List[str],
scores: List[float | None], scores: List[float | None],
strategy: AggregationStrategy strategy: AggregationStrategy,
) -> str: ) -> str:
if len(feedbacks) <= 2 and all(len(fb) < 200 for fb in feedbacks): if len(feedbacks) <= 2 and all(len(fb) < 200 for fb in feedbacks):
return "\n\n".join([f"Feedback {i+1}: {fb}" for i, fb in enumerate(feedbacks)]) return "\n\n".join(
[f"Feedback {i+1}: {fb}" for i, fb in enumerate(feedbacks)]
)
try: try:
llm = create_llm() llm = create_llm()
@@ -290,20 +321,26 @@ class EvaluationDisplayFormatter:
if len(feedback) > 500: if len(feedback) > 500:
feedback = feedback[:500] + "..." feedback = feedback[:500] + "..."
score_text = f"{score:.1f}" if score is not None else "N/A" score_text = f"{score:.1f}" if score is not None else "N/A"
formatted_feedbacks.append(f"Feedback #{i+1} (Score: {score_text}):\n{feedback}") formatted_feedbacks.append(
f"Feedback #{i+1} (Score: {score_text}):\n{feedback}"
)
all_feedbacks = "\n\n" + "\n\n---\n\n".join(formatted_feedbacks) all_feedbacks = "\n\n" + "\n\n---\n\n".join(formatted_feedbacks)
strategy_guidance = "" strategy_guidance = ""
if strategy == AggregationStrategy.BEST_PERFORMANCE: if strategy == AggregationStrategy.BEST_PERFORMANCE:
strategy_guidance = "Focus on the highest-scoring aspects and strengths demonstrated." strategy_guidance = (
"Focus on the highest-scoring aspects and strengths demonstrated."
)
elif strategy == AggregationStrategy.WORST_PERFORMANCE: elif strategy == AggregationStrategy.WORST_PERFORMANCE:
strategy_guidance = "Focus on areas that need improvement and common issues across tasks." strategy_guidance = "Focus on areas that need improvement and common issues across tasks."
else: else:
strategy_guidance = "Provide a balanced analysis of strengths and weaknesses across all tasks." strategy_guidance = "Provide a balanced analysis of strengths and weaknesses across all tasks."
prompt = [ prompt = [
{"role": "system", "content": f"""You are an expert evaluator creating a comprehensive summary of agent performance feedback. {
"role": "system",
"content": f"""You are an expert evaluator creating a comprehensive summary of agent performance feedback.
Your job is to synthesize multiple feedback points about the same metric across different tasks. Your job is to synthesize multiple feedback points about the same metric across different tasks.
Create a concise, insightful summary that captures the key patterns and themes from all feedback. Create a concise, insightful summary that captures the key patterns and themes from all feedback.
@@ -315,14 +352,18 @@ class EvaluationDisplayFormatter:
3. Highlighting patterns across tasks 3. Highlighting patterns across tasks
4. 150-250 words in length 4. 150-250 words in length
The summary should be directly usable as final feedback for the agent's performance on this metric."""}, The summary should be directly usable as final feedback for the agent's performance on this metric.""",
{"role": "user", "content": f"""I need a synthesized summary of the following feedback for: },
{
"role": "user",
"content": f"""I need a synthesized summary of the following feedback for:
Agent Role: {agent_role} Agent Role: {agent_role}
Metric: {metric.title()} Metric: {metric.title()}
{all_feedbacks} {all_feedbacks}
"""} """,
},
] ]
assert llm is not None assert llm is not None
response = llm.call(prompt) response = llm.call(prompt)
@@ -330,4 +371,6 @@ class EvaluationDisplayFormatter:
return response return response
except Exception: except Exception:
return "Synthesized from multiple tasks: " + "\n\n".join([f"- {fb[:500]}..." for fb in feedbacks]) return "Synthesized from multiple tasks: " + "\n\n".join(
[f"- {fb[:500]}..." for fb in feedbacks]
)

View File

@@ -5,25 +5,23 @@ from collections.abc import Sequence
from crewai.agent import Agent from crewai.agent import Agent
from crewai.task import Task from crewai.task import Task
from crewai.utilities.events.base_event_listener import BaseEventListener from crewai.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus from crewai.events.event_bus import CrewAIEventsBus
from crewai.utilities.events.agent_events import ( from crewai.events.types.agent_events import (
AgentExecutionStartedEvent, AgentExecutionStartedEvent,
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
LiteAgentExecutionStartedEvent, LiteAgentExecutionStartedEvent,
LiteAgentExecutionCompletedEvent LiteAgentExecutionCompletedEvent,
) )
from crewai.utilities.events.tool_usage_events import ( from crewai.events.types.tool_usage_events import (
ToolUsageFinishedEvent, ToolUsageFinishedEvent,
ToolUsageErrorEvent, ToolUsageErrorEvent,
ToolExecutionErrorEvent, ToolExecutionErrorEvent,
ToolSelectionErrorEvent, ToolSelectionErrorEvent,
ToolValidateInputErrorEvent ToolValidateInputErrorEvent,
)
from crewai.utilities.events.llm_events import (
LLMCallStartedEvent,
LLMCallCompletedEvent
) )
from crewai.events.types.llm_events import LLMCallStartedEvent, LLMCallCompletedEvent
class EvaluationTraceCallback(BaseEventListener): class EvaluationTraceCallback(BaseEventListener):
"""Event listener for collecting execution traces for evaluation. """Event listener for collecting execution traces for evaluation.
@@ -68,27 +66,49 @@ class EvaluationTraceCallback(BaseEventListener):
@event_bus.on(ToolUsageFinishedEvent) @event_bus.on(ToolUsageFinishedEvent)
def on_tool_completed(source, event: ToolUsageFinishedEvent): def on_tool_completed(source, event: ToolUsageFinishedEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.output, success=True) self.on_tool_use(
event.tool_name, event.tool_args, event.output, success=True
)
@event_bus.on(ToolUsageErrorEvent) @event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event: ToolUsageErrorEvent): def on_tool_usage_error(source, event: ToolUsageErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error, self.on_tool_use(
success=False, error_type="usage_error") event.tool_name,
event.tool_args,
event.error,
success=False,
error_type="usage_error",
)
@event_bus.on(ToolExecutionErrorEvent) @event_bus.on(ToolExecutionErrorEvent)
def on_tool_execution_error(source, event: ToolExecutionErrorEvent): def on_tool_execution_error(source, event: ToolExecutionErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error, self.on_tool_use(
success=False, error_type="execution_error") event.tool_name,
event.tool_args,
event.error,
success=False,
error_type="execution_error",
)
@event_bus.on(ToolSelectionErrorEvent) @event_bus.on(ToolSelectionErrorEvent)
def on_tool_selection_error(source, event: ToolSelectionErrorEvent): def on_tool_selection_error(source, event: ToolSelectionErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error, self.on_tool_use(
success=False, error_type="selection_error") event.tool_name,
event.tool_args,
event.error,
success=False,
error_type="selection_error",
)
@event_bus.on(ToolValidateInputErrorEvent) @event_bus.on(ToolValidateInputErrorEvent)
def on_tool_validate_input_error(source, event: ToolValidateInputErrorEvent): def on_tool_validate_input_error(source, event: ToolValidateInputErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error, self.on_tool_use(
success=False, error_type="validation_error") event.tool_name,
event.tool_args,
event.error,
success=False,
error_type="validation_error",
)
@event_bus.on(LLMCallStartedEvent) @event_bus.on(LLMCallStartedEvent)
def on_llm_call_started(source, event: LLMCallStartedEvent): def on_llm_call_started(source, event: LLMCallStartedEvent):
@@ -99,7 +119,7 @@ class EvaluationTraceCallback(BaseEventListener):
self.on_llm_call_end(event.messages, event.response) self.on_llm_call_end(event.messages, event.response)
def on_lite_agent_start(self, agent_info: dict[str, Any]): def on_lite_agent_start(self, agent_info: dict[str, Any]):
self.current_agent_id = agent_info['id'] self.current_agent_id = agent_info["id"]
self.current_task_id = "lite_task" self.current_task_id = "lite_task"
trace_key = f"{self.current_agent_id}_{self.current_task_id}" trace_key = f"{self.current_agent_id}_{self.current_task_id}"
@@ -110,7 +130,7 @@ class EvaluationTraceCallback(BaseEventListener):
tool_uses=[], tool_uses=[],
llm_calls=[], llm_calls=[],
start_time=datetime.now(), start_time=datetime.now(),
final_output=None final_output=None,
) )
def _init_trace(self, trace_key: str, **kwargs: Any): def _init_trace(self, trace_key: str, **kwargs: Any):
@@ -128,7 +148,7 @@ class EvaluationTraceCallback(BaseEventListener):
tool_uses=[], tool_uses=[],
llm_calls=[], llm_calls=[],
start_time=datetime.now(), start_time=datetime.now(),
final_output=None final_output=None,
) )
def on_agent_finish(self, agent: Agent, task: Task, output: Any): def on_agent_finish(self, agent: Agent, task: Task, output: Any):
@@ -151,8 +171,14 @@ class EvaluationTraceCallback(BaseEventListener):
self._reset_current() self._reset_current()
def on_tool_use(self, tool_name: str, tool_args: dict[str, Any] | str, result: Any, def on_tool_use(
success: bool = True, error_type: str | None = None): self,
tool_name: str,
tool_args: dict[str, Any] | str,
result: Any,
success: bool = True,
error_type: str | None = None,
):
if not self.current_agent_id or not self.current_task_id: if not self.current_agent_id or not self.current_task_id:
return return
@@ -163,7 +189,7 @@ class EvaluationTraceCallback(BaseEventListener):
"args": tool_args, "args": tool_args,
"result": result, "result": result,
"success": success, "success": success,
"timestamp": datetime.now() "timestamp": datetime.now(),
} }
# Add error information if applicable # Add error information if applicable
@@ -173,7 +199,11 @@ class EvaluationTraceCallback(BaseEventListener):
self.traces[trace_key]["tool_uses"].append(tool_use) self.traces[trace_key]["tool_uses"].append(tool_use)
def on_llm_call_start(self, messages: str | Sequence[dict[str, Any]] | None, tools: Sequence[dict[str, Any]] | None = None): def on_llm_call_start(
self,
messages: str | Sequence[dict[str, Any]] | None,
tools: Sequence[dict[str, Any]] | None = None,
):
if not self.current_agent_id or not self.current_task_id: if not self.current_agent_id or not self.current_task_id:
return return
@@ -186,10 +216,12 @@ class EvaluationTraceCallback(BaseEventListener):
"tools": tools, "tools": tools,
"start_time": datetime.now(), "start_time": datetime.now(),
"response": None, "response": None,
"end_time": None "end_time": None,
} }
def on_llm_call_end(self, messages: str | list[dict[str, Any]] | None, response: Any): def on_llm_call_end(
self, messages: str | list[dict[str, Any]] | None, response: Any
):
if not self.current_agent_id or not self.current_task_id: if not self.current_agent_id or not self.current_task_id:
return return
@@ -213,7 +245,7 @@ class EvaluationTraceCallback(BaseEventListener):
"response": response, "response": response,
"start_time": start_time, "start_time": start_time,
"end_time": current_time, "end_time": current_time,
"total_tokens": total_tokens "total_tokens": total_tokens,
} }
self.traces[trace_key]["llm_calls"].append(llm_call) self.traces[trace_key]["llm_calls"].append(llm_call)
@@ -227,7 +259,7 @@ class EvaluationTraceCallback(BaseEventListener):
def create_evaluation_callbacks() -> EvaluationTraceCallback: def create_evaluation_callbacks() -> EvaluationTraceCallback:
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
callback = EvaluationTraceCallback() callback = EvaluationTraceCallback()
callback.setup_listeners(crewai_event_bus) callback.setup_listeners(crewai_event_bus)

View File

@@ -25,8 +25,8 @@ from crewai.flow.flow_visualizer import plot_flow
from crewai.flow.persistence.base import FlowPersistence from crewai.flow.persistence.base import FlowPersistence
from crewai.flow.types import FlowExecutionData from crewai.flow.types import FlowExecutionData
from crewai.flow.utils import get_possible_return_constants from crewai.flow.utils import get_possible_return_constants
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.flow_events import ( from crewai.events.types.flow_events import (
FlowCreatedEvent, FlowCreatedEvent,
FlowFinishedEvent, FlowFinishedEvent,
FlowPlotEvent, FlowPlotEvent,
@@ -35,10 +35,10 @@ from crewai.utilities.events.flow_events import (
MethodExecutionFinishedEvent, MethodExecutionFinishedEvent,
MethodExecutionStartedEvent, MethodExecutionStartedEvent,
) )
from crewai.utilities.events.listeners.tracing.trace_listener import ( from crewai.events.listeners.tracing.trace_listener import (
TraceCollectionListener, TraceCollectionListener,
) )
from crewai.utilities.events.listeners.tracing.utils import ( from crewai.events.listeners.tracing.utils import (
is_tracing_enabled, is_tracing_enabled,
) )
from crewai.utilities.printer import Printer from crewai.utilities.printer import Printer
@@ -934,12 +934,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
method = self._methods[start_method_name] method = self._methods[start_method_name]
enhanced_method = self._inject_trigger_payload_for_start_method(method) enhanced_method = self._inject_trigger_payload_for_start_method(method)
result = await self._execute_method( result = await self._execute_method(start_method_name, enhanced_method)
start_method_name, enhanced_method
)
await self._execute_listeners(start_method_name, result) await self._execute_listeners(start_method_name, result)
def _inject_trigger_payload_for_start_method(self, original_method: Callable) -> Callable: def _inject_trigger_payload_for_start_method(
self, original_method: Callable
) -> Callable:
def prepare_kwargs(*args, **kwargs): def prepare_kwargs(*args, **kwargs):
inputs = baggage.get_baggage("flow_inputs") or {} inputs = baggage.get_baggage("flow_inputs") or {}
trigger_payload = inputs.get("crewai_trigger_payload") trigger_payload = inputs.get("crewai_trigger_payload")
@@ -952,15 +952,17 @@ class Flow(Generic[T], metaclass=FlowMeta):
elif trigger_payload is not None: elif trigger_payload is not None:
self._log_flow_event( self._log_flow_event(
f"Trigger payload available but {original_method.__name__} doesn't accept crewai_trigger_payload parameter", f"Trigger payload available but {original_method.__name__} doesn't accept crewai_trigger_payload parameter",
color="yellow" color="yellow",
) )
return args, kwargs return args, kwargs
if asyncio.iscoroutinefunction(original_method): if asyncio.iscoroutinefunction(original_method):
async def enhanced_method(*args, **kwargs): async def enhanced_method(*args, **kwargs):
args, kwargs = prepare_kwargs(*args, **kwargs) args, kwargs = prepare_kwargs(*args, **kwargs)
return await original_method(*args, **kwargs) return await original_method(*args, **kwargs)
else: else:
def enhanced_method(*args, **kwargs): def enhanced_method(*args, **kwargs):
args, kwargs = prepare_kwargs(*args, **kwargs) args, kwargs = prepare_kwargs(*args, **kwargs)
return original_method(*args, **kwargs) return original_method(*args, **kwargs)

View File

@@ -62,13 +62,13 @@ from crewai.utilities.agent_utils import (
render_text_description_and_args, render_text_description_and_args,
) )
from crewai.utilities.converter import generate_model_description from crewai.utilities.converter import generate_model_description
from crewai.utilities.events.agent_events import ( from crewai.events.types.logging_events import AgentLogsExecutionEvent
AgentLogsExecutionEvent, from crewai.events.types.agent_events import (
LiteAgentExecutionCompletedEvent, LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent, LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent, LiteAgentExecutionStartedEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.llm_utils import create_llm from crewai.utilities.llm_utils import create_llm
from crewai.utilities.printer import Printer from crewai.utilities.printer import Printer

View File

@@ -23,14 +23,14 @@ from dotenv import load_dotenv
from litellm.types.utils import ChatCompletionDeltaToolCall from litellm.types.utils import ChatCompletionDeltaToolCall
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from crewai.utilities.events.llm_events import ( from crewai.events.types.llm_events import (
LLMCallCompletedEvent, LLMCallCompletedEvent,
LLMCallFailedEvent, LLMCallFailedEvent,
LLMCallStartedEvent, LLMCallStartedEvent,
LLMCallType, LLMCallType,
LLMStreamChunkEvent, LLMStreamChunkEvent,
) )
from crewai.utilities.events.tool_usage_events import ( from crewai.events.types.tool_usage_events import (
ToolUsageStartedEvent, ToolUsageStartedEvent,
ToolUsageFinishedEvent, ToolUsageFinishedEvent,
ToolUsageErrorEvent, ToolUsageErrorEvent,
@@ -52,7 +52,7 @@ import io
from typing import TextIO from typing import TextIO
from crewai.llms.base_llm import BaseLLM from crewai.llms.base_llm import BaseLLM
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.exceptions.context_window_exceeding_exception import ( from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException, LLMContextLengthExceededException,
) )

View File

@@ -6,8 +6,8 @@ from pydantic import PrivateAttr
from crewai.memory.entity.entity_memory_item import EntityMemoryItem from crewai.memory.entity.entity_memory_item import EntityMemoryItem
from crewai.memory.memory import Memory from crewai.memory.memory import Memory
from crewai.memory.storage.rag_storage import RAGStorage from crewai.memory.storage.rag_storage import RAGStorage
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemoryQueryStartedEvent, MemoryQueryStartedEvent,
MemoryQueryCompletedEvent, MemoryQueryCompletedEvent,
MemoryQueryFailedEvent, MemoryQueryFailedEvent,

View File

@@ -4,8 +4,8 @@ import time
from crewai.memory.external.external_memory_item import ExternalMemoryItem from crewai.memory.external.external_memory_item import ExternalMemoryItem
from crewai.memory.memory import Memory from crewai.memory.memory import Memory
from crewai.memory.storage.interface import Storage from crewai.memory.storage.interface import Storage
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemoryQueryStartedEvent, MemoryQueryStartedEvent,
MemoryQueryCompletedEvent, MemoryQueryCompletedEvent,
MemoryQueryFailedEvent, MemoryQueryFailedEvent,

View File

@@ -3,8 +3,8 @@ import time
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
from crewai.memory.memory import Memory from crewai.memory.memory import Memory
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemoryQueryStartedEvent, MemoryQueryStartedEvent,
MemoryQueryCompletedEvent, MemoryQueryCompletedEvent,
MemoryQueryFailedEvent, MemoryQueryFailedEvent,

View File

@@ -6,8 +6,8 @@ from pydantic import PrivateAttr
from crewai.memory.memory import Memory from crewai.memory.memory import Memory
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
from crewai.memory.storage.rag_storage import RAGStorage from crewai.memory.storage.rag_storage import RAGStorage
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemoryQueryStartedEvent, MemoryQueryStartedEvent,
MemoryQueryCompletedEvent, MemoryQueryCompletedEvent,
MemoryQueryFailedEvent, MemoryQueryFailedEvent,

View File

@@ -43,12 +43,12 @@ from crewai.utilities.config import process_config
from crewai.utilities.constants import NOT_SPECIFIED, _NotSpecified from crewai.utilities.constants import NOT_SPECIFIED, _NotSpecified
from crewai.utilities.guardrail import process_guardrail, GuardrailResult from crewai.utilities.guardrail import process_guardrail, GuardrailResult
from crewai.utilities.converter import Converter, convert_to_model from crewai.utilities.converter import Converter, convert_to_model
from crewai.utilities.events import ( from crewai.events.event_types import (
TaskCompletedEvent, TaskCompletedEvent,
TaskFailedEvent, TaskFailedEvent,
TaskStartedEvent, TaskStartedEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.i18n import I18N from crewai.utilities.i18n import I18N
from crewai.utilities.printer import Printer from crewai.utilities.printer import Printer
from crewai.utilities.string_utils import interpolate_only from crewai.utilities.string_utils import interpolate_only
@@ -160,11 +160,10 @@ class Task(BaseModel):
) )
max_retries: Optional[int] = Field( max_retries: Optional[int] = Field(
default=None, default=None,
description="[DEPRECATED] Maximum number of retries when guardrail fails. Use guardrail_max_retries instead. Will be removed in v1.0.0" description="[DEPRECATED] Maximum number of retries when guardrail fails. Use guardrail_max_retries instead. Will be removed in v1.0.0",
) )
guardrail_max_retries: int = Field( guardrail_max_retries: int = Field(
default=3, default=3, description="Maximum number of retries when guardrail fails"
description="Maximum number of retries when guardrail fails"
) )
retry_count: int = Field(default=0, description="Current number of retries") retry_count: int = Field(default=0, description="Current number of retries")
start_time: Optional[datetime.datetime] = Field( start_time: Optional[datetime.datetime] = Field(
@@ -367,7 +366,7 @@ class Task(BaseModel):
"The 'max_retries' parameter is deprecated and will be removed in CrewAI v1.0.0. " "The 'max_retries' parameter is deprecated and will be removed in CrewAI v1.0.0. "
"Please use 'guardrail_max_retries' instead.", "Please use 'guardrail_max_retries' instead.",
DeprecationWarning, DeprecationWarning,
stacklevel=2 stacklevel=2,
) )
self.guardrail_max_retries = self.max_retries self.guardrail_max_retries = self.max_retries
return self return self
@@ -532,11 +531,11 @@ class Task(BaseModel):
def _process_guardrail(self, task_output: TaskOutput) -> GuardrailResult: def _process_guardrail(self, task_output: TaskOutput) -> GuardrailResult:
assert self._guardrail is not None assert self._guardrail is not None
from crewai.utilities.events import ( from crewai.events.event_types import (
LLMGuardrailCompletedEvent, LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent, LLMGuardrailStartedEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,

View File

@@ -20,8 +20,8 @@ from crewai.utilities.agent_utils import (
get_tool_names, get_tool_names,
render_text_description_and_args, render_text_description_and_args,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ( from crewai.events.types.tool_usage_events import (
ToolSelectionErrorEvent, ToolSelectionErrorEvent,
ToolUsageErrorEvent, ToolUsageErrorEvent,
ToolUsageFinishedEvent, ToolUsageFinishedEvent,

View File

@@ -9,8 +9,8 @@ from crewai.agent import Agent
from crewai.llm import BaseLLM from crewai.llm import BaseLLM
from crewai.task import Task from crewai.task import Task
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.crew_events import CrewTestResultEvent from crewai.events.types.crew_events import CrewTestResultEvent
class TaskEvaluationPydanticOutput(BaseModel): class TaskEvaluationPydanticOutput(BaseModel):

View File

@@ -3,7 +3,8 @@ from typing import List
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from crewai.utilities import Converter from crewai.utilities import Converter
from crewai.utilities.events import TaskEvaluationEvent, crewai_event_bus from crewai.events.types.task_events import TaskEvaluationEvent
from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
from crewai.utilities.training_converter import TrainingConverter from crewai.utilities.training_converter import TrainingConverter

View File

@@ -1,124 +1,140 @@
from .crew_events import ( """Backwards compatibility - this module has moved to crewai.events."""
CrewKickoffStartedEvent,
CrewKickoffCompletedEvent, import warnings
CrewKickoffFailedEvent, from abc import ABC
CrewTrainStartedEvent, from collections.abc import Callable
CrewTrainCompletedEvent, from typing import Any, Type, TypeVar
CrewTrainFailedEvent,
CrewTestStartedEvent, from typing_extensions import deprecated
CrewTestCompletedEvent, import crewai.events as new_events
CrewTestFailedEvent, from crewai.events.base_events import BaseEvent
) from crewai.events.event_types import EventTypes
from .llm_guardrail_events import (
LLMGuardrailCompletedEvent, EventT = TypeVar("EventT", bound=BaseEvent)
LLMGuardrailStartedEvent,
)
from .agent_events import ( warnings.warn(
AgentExecutionStartedEvent, "Importing from 'crewai.utilities.events' is deprecated and will be removed in v1.0.0. "
AgentExecutionCompletedEvent, "Please use 'crewai.events' instead.",
AgentExecutionErrorEvent, DeprecationWarning,
AgentEvaluationStartedEvent, stacklevel=2
AgentEvaluationCompletedEvent,
AgentEvaluationFailedEvent,
)
from .task_events import (
TaskStartedEvent,
TaskCompletedEvent,
TaskFailedEvent,
TaskEvaluationEvent,
)
from .flow_events import (
FlowCreatedEvent,
FlowStartedEvent,
FlowFinishedEvent,
FlowPlotEvent,
MethodExecutionStartedEvent,
MethodExecutionFinishedEvent,
MethodExecutionFailedEvent,
)
from .crewai_event_bus import CrewAIEventsBus, crewai_event_bus
from .tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageErrorEvent,
ToolUsageStartedEvent,
ToolExecutionErrorEvent,
ToolSelectionErrorEvent,
ToolUsageEvent,
ToolValidateInputErrorEvent,
)
from .llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
LLMStreamChunkEvent,
) )
from .memory_events import (
MemorySaveStartedEvent,
MemorySaveCompletedEvent,
MemorySaveFailedEvent,
MemoryQueryStartedEvent,
MemoryQueryCompletedEvent,
MemoryQueryFailedEvent,
MemoryRetrievalStartedEvent,
MemoryRetrievalCompletedEvent,
)
# events @deprecated("Use 'from crewai.events import BaseEventListener' instead")
from .event_listener import EventListener class BaseEventListener(new_events.BaseEventListener, ABC):
"""Deprecated: Use crewai.events.BaseEventListener instead."""
pass
@deprecated("Use 'from crewai.events import crewai_event_bus' instead")
class crewai_event_bus: # noqa: N801
"""Deprecated: Use crewai.events.crewai_event_bus instead."""
@classmethod
def on(
cls, event_type: Type[EventT]
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
"""Delegate to the actual event bus instance."""
return new_events.crewai_event_bus.on(event_type)
@classmethod
def emit(cls, source: Any, event: BaseEvent) -> None:
"""Delegate to the actual event bus instance."""
return new_events.crewai_event_bus.emit(source, event)
@classmethod
def register_handler(
cls, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
) -> None:
"""Delegate to the actual event bus instance."""
return new_events.crewai_event_bus.register_handler(event_type, handler)
@classmethod
def scoped_handlers(cls) -> Any:
"""Delegate to the actual event bus instance."""
return new_events.crewai_event_bus.scoped_handlers()
@deprecated("Use 'from crewai.events import CrewKickoffStartedEvent' instead")
class CrewKickoffStartedEvent(new_events.CrewKickoffStartedEvent):
"""Deprecated: Use crewai.events.CrewKickoffStartedEvent instead."""
pass
@deprecated("Use 'from crewai.events import CrewKickoffCompletedEvent' instead")
class CrewKickoffCompletedEvent(new_events.CrewKickoffCompletedEvent):
"""Deprecated: Use crewai.events.CrewKickoffCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import AgentExecutionCompletedEvent' instead")
class AgentExecutionCompletedEvent(new_events.AgentExecutionCompletedEvent):
"""Deprecated: Use crewai.events.AgentExecutionCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemoryQueryCompletedEvent' instead")
class MemoryQueryCompletedEvent(new_events.MemoryQueryCompletedEvent):
"""Deprecated: Use crewai.events.MemoryQueryCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemorySaveCompletedEvent' instead")
class MemorySaveCompletedEvent(new_events.MemorySaveCompletedEvent):
"""Deprecated: Use crewai.events.MemorySaveCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemorySaveStartedEvent' instead")
class MemorySaveStartedEvent(new_events.MemorySaveStartedEvent):
"""Deprecated: Use crewai.events.MemorySaveStartedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemoryQueryStartedEvent' instead")
class MemoryQueryStartedEvent(new_events.MemoryQueryStartedEvent):
"""Deprecated: Use crewai.events.MemoryQueryStartedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemoryRetrievalCompletedEvent' instead")
class MemoryRetrievalCompletedEvent(new_events.MemoryRetrievalCompletedEvent):
"""Deprecated: Use crewai.events.MemoryRetrievalCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemorySaveFailedEvent' instead")
class MemorySaveFailedEvent(new_events.MemorySaveFailedEvent):
"""Deprecated: Use crewai.events.MemorySaveFailedEvent instead."""
pass
@deprecated("Use 'from crewai.events import MemoryQueryFailedEvent' instead")
class MemoryQueryFailedEvent(new_events.MemoryQueryFailedEvent):
"""Deprecated: Use crewai.events.MemoryQueryFailedEvent instead."""
pass
@deprecated("Use 'from crewai.events import KnowledgeRetrievalStartedEvent' instead")
class KnowledgeRetrievalStartedEvent(new_events.KnowledgeRetrievalStartedEvent):
"""Deprecated: Use crewai.events.KnowledgeRetrievalStartedEvent instead."""
pass
@deprecated("Use 'from crewai.events import KnowledgeRetrievalCompletedEvent' instead")
class KnowledgeRetrievalCompletedEvent(new_events.KnowledgeRetrievalCompletedEvent):
"""Deprecated: Use crewai.events.KnowledgeRetrievalCompletedEvent instead."""
pass
@deprecated("Use 'from crewai.events import LLMStreamChunkEvent' instead")
class LLMStreamChunkEvent(new_events.LLMStreamChunkEvent):
"""Deprecated: Use crewai.events.LLMStreamChunkEvent instead."""
pass
__all__ = [ __all__ = [
"EventListener", 'BaseEventListener',
"CrewAIEventsBus", 'crewai_event_bus',
"crewai_event_bus", 'CrewKickoffStartedEvent',
"AgentExecutionStartedEvent", 'CrewKickoffCompletedEvent',
"AgentExecutionCompletedEvent", 'AgentExecutionCompletedEvent',
"AgentExecutionErrorEvent", 'MemoryQueryCompletedEvent',
"AgentEvaluationStartedEvent", 'MemorySaveCompletedEvent',
"AgentEvaluationCompletedEvent", 'MemorySaveStartedEvent',
"AgentEvaluationFailedEvent", 'MemoryQueryStartedEvent',
"TaskStartedEvent", 'MemoryRetrievalCompletedEvent',
"TaskCompletedEvent", 'MemorySaveFailedEvent',
"TaskFailedEvent", 'MemoryQueryFailedEvent',
"TaskEvaluationEvent", 'KnowledgeRetrievalStartedEvent',
"FlowCreatedEvent", 'KnowledgeRetrievalCompletedEvent',
"FlowStartedEvent", 'LLMStreamChunkEvent',
"FlowFinishedEvent",
"FlowPlotEvent",
"MethodExecutionStartedEvent",
"MethodExecutionFinishedEvent",
"MethodExecutionFailedEvent",
"LLMCallCompletedEvent",
"LLMCallFailedEvent",
"LLMCallStartedEvent",
"LLMCallType",
"LLMStreamChunkEvent",
"MemorySaveStartedEvent",
"MemorySaveCompletedEvent",
"MemorySaveFailedEvent",
"MemoryQueryStartedEvent",
"MemoryQueryCompletedEvent",
"MemoryQueryFailedEvent",
"MemoryRetrievalStartedEvent",
"MemoryRetrievalCompletedEvent",
"EventListener",
"CrewKickoffStartedEvent",
"CrewKickoffCompletedEvent",
"CrewKickoffFailedEvent",
"CrewTrainStartedEvent",
"CrewTrainCompletedEvent",
"CrewTrainFailedEvent",
"CrewTestStartedEvent",
"CrewTestCompletedEvent",
"CrewTestFailedEvent",
"LLMGuardrailCompletedEvent",
"LLMGuardrailStartedEvent",
"ToolUsageFinishedEvent",
"ToolUsageErrorEvent",
"ToolUsageStartedEvent",
"ToolExecutionErrorEvent",
"ToolSelectionErrorEvent",
"ToolUsageEvent",
"ToolValidateInputErrorEvent",
] ]
__deprecated__ = "Use 'crewai.events' instead of 'crewai.utilities.events'"

View File

@@ -1,16 +1,13 @@
from abc import ABC, abstractmethod """Backwards compatibility stub for crewai.utilities.events.base_event_listener."""
from logging import Logger
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus, crewai_event_bus import warnings
from crewai.events import BaseEventListener
warnings.warn(
"Importing from 'crewai.utilities.events.base_event_listener' is deprecated and will be removed in v1.0.0. "
"Please use 'from crewai.events import BaseEventListener' instead.",
DeprecationWarning,
stacklevel=2,
)
class BaseEventListener(ABC): __all__ = ["BaseEventListener"]
verbose: bool = False
def __init__(self):
super().__init__()
self.setup_listeners(crewai_event_bus)
@abstractmethod
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
pass

View File

@@ -1,115 +1,13 @@
import threading """Backwards compatibility stub for crewai.utilities.events.crewai_event_bus."""
from contextlib import contextmanager
from typing import Any, Callable, Dict, List, Type, TypeVar, cast
from blinker import Signal import warnings
from crewai.events import crewai_event_bus
from crewai.utilities.events.base_events import BaseEvent warnings.warn(
from crewai.utilities.events.event_types import EventTypes "Importing from 'crewai.utilities.events.crewai_event_bus' is deprecated and will be removed in v1.0.0. "
"Please use 'from crewai.events import crewai_event_bus' instead.",
DeprecationWarning,
stacklevel=2,
)
EventT = TypeVar("EventT", bound=BaseEvent) __all__ = ["crewai_event_bus"]
class CrewAIEventsBus:
"""
A singleton event bus that uses blinker signals for event handling.
Allows both internal (Flow/Crew) and external event handling.
"""
_instance = None
_lock = threading.Lock()
def __new__(cls):
if cls._instance is None:
with cls._lock:
if cls._instance is None: # prevent race condition
cls._instance = super(CrewAIEventsBus, cls).__new__(cls)
cls._instance._initialize()
return cls._instance
def _initialize(self) -> None:
"""Initialize the event bus internal state"""
self._signal = Signal("crewai_event_bus")
self._handlers: Dict[Type[BaseEvent], List[Callable]] = {}
def on(
self, event_type: Type[EventT]
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
"""
Decorator to register an event handler for a specific event type.
Usage:
@crewai_event_bus.on(AgentExecutionCompletedEvent)
def on_agent_execution_completed(
source: Any, event: AgentExecutionCompletedEvent
):
print(f"👍 Agent '{event.agent}' completed task")
print(f" Output: {event.output}")
"""
def decorator(
handler: Callable[[Any, EventT], None],
) -> Callable[[Any, EventT], None]:
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventT], None], handler)
)
return handler
return decorator
def emit(self, source: Any, event: BaseEvent) -> None:
"""
Emit an event to all registered handlers
Args:
source: The object emitting the event
event: The event instance to emit
"""
for event_type, handlers in self._handlers.items():
if isinstance(event, event_type):
for handler in handlers:
try:
handler(source, event)
except Exception as e:
print(
f"[EventBus Error] Handler '{handler.__name__}' failed for event '{event_type.__name__}': {e}"
)
self._signal.send(source, event=event)
def register_handler(
self, event_type: Type[EventTypes], handler: Callable[[Any, EventTypes], None]
) -> None:
"""Register an event handler for a specific event type"""
if event_type not in self._handlers:
self._handlers[event_type] = []
self._handlers[event_type].append(
cast(Callable[[Any, EventTypes], None], handler)
)
@contextmanager
def scoped_handlers(self):
"""
Context manager for temporary event handling scope.
Useful for testing or temporary event handling.
Usage:
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(CrewKickoffStarted)
def temp_handler(source, event):
print("Temporary handler")
# Do stuff...
# Handlers are cleared after the context
"""
previous_handlers = self._handlers.copy()
self._handlers.clear()
try:
yield
finally:
self._handlers = previous_handlers
# Global instance
crewai_event_bus = CrewAIEventsBus()

View File

@@ -2,6 +2,7 @@ from typing import Any, Callable, Optional, Tuple, Union
from pydantic import BaseModel, field_validator from pydantic import BaseModel, field_validator
class GuardrailResult(BaseModel): class GuardrailResult(BaseModel):
"""Result from a task guardrail execution. """Result from a task guardrail execution.
@@ -14,6 +15,7 @@ class GuardrailResult(BaseModel):
result (Any, optional): The validated/transformed result if successful result (Any, optional): The validated/transformed result if successful
error (str, optional): Error message if validation failed error (str, optional): Error message if validation failed
""" """
success: bool success: bool
result: Optional[Any] = None result: Optional[Any] = None
error: Optional[str] = None error: Optional[str] = None
@@ -24,9 +26,13 @@ class GuardrailResult(BaseModel):
values = info.data values = info.data
if "success" in values: if "success" in values:
if values["success"] and v and "error" in values and values["error"]: if values["success"] and v and "error" in values and values["error"]:
raise ValueError("Cannot have both result and error when success is True") raise ValueError(
"Cannot have both result and error when success is True"
)
if not values["success"] and v and "result" in values and values["result"]: if not values["success"] and v and "result" in values and values["result"]:
raise ValueError("Cannot have both result and error when success is False") raise ValueError(
"Cannot have both result and error when success is False"
)
return v return v
@classmethod @classmethod
@@ -44,11 +50,13 @@ class GuardrailResult(BaseModel):
return cls( return cls(
success=success, success=success,
result=data if success else None, result=data if success else None,
error=data if not success else None error=data if not success else None,
) )
def process_guardrail(output: Any, guardrail: Callable, retry_count: int) -> GuardrailResult: def process_guardrail(
output: Any, guardrail: Callable, retry_count: int
) -> GuardrailResult:
"""Process the guardrail for the agent output. """Process the guardrail for the agent output.
Args: Args:
@@ -60,21 +68,21 @@ def process_guardrail(output: Any, guardrail: Callable, retry_count: int) -> Gua
from crewai.task import TaskOutput from crewai.task import TaskOutput
from crewai.lite_agent import LiteAgentOutput from crewai.lite_agent import LiteAgentOutput
assert isinstance(output, TaskOutput) or isinstance(output, LiteAgentOutput), "Output must be a TaskOutput or LiteAgentOutput" assert isinstance(output, TaskOutput) or isinstance(
output, LiteAgentOutput
), "Output must be a TaskOutput or LiteAgentOutput"
assert guardrail is not None assert guardrail is not None
from crewai.utilities.events import ( from crewai.events.types.llm_guardrail_events import (
LLMGuardrailCompletedEvent, LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent, LLMGuardrailStartedEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
crewai_event_bus.emit( crewai_event_bus.emit(
None, None,
LLMGuardrailStartedEvent( LLMGuardrailStartedEvent(guardrail=guardrail, retry_count=retry_count),
guardrail=guardrail, retry_count=retry_count
),
) )
result = guardrail(output) result = guardrail(output)

View File

@@ -8,8 +8,8 @@ from crewai.agent import Agent
from crewai.task import Task from crewai.task import Task
from crewai.utilities import I18N from crewai.utilities import I18N
from crewai.llm import LLM from crewai.llm import LLM
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.reasoning_events import ( from crewai.events.types.reasoning_events import (
AgentReasoningStartedEvent, AgentReasoningStartedEvent,
AgentReasoningCompletedEvent, AgentReasoningCompletedEvent,
AgentReasoningFailedEvent, AgentReasoningFailedEvent,

View File

@@ -19,8 +19,8 @@ from crewai.tools.tool_calling import InstructorToolCalling
from crewai.tools.tool_usage import ToolUsage from crewai.tools.tool_usage import ToolUsage
from crewai.utilities import RPMController from crewai.utilities import RPMController
from crewai.utilities.errors import AgentRepositoryError from crewai.utilities.errors import AgentRepositoryError
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ToolUsageFinishedEvent from crewai.events.types.tool_usage_events import ToolUsageFinishedEvent
from crewai.process import Process from crewai.process import Process

View File

@@ -9,9 +9,9 @@ from crewai import LLM, Agent
from crewai.flow import Flow, start from crewai.flow import Flow, start
from crewai.lite_agent import LiteAgent, LiteAgentOutput from crewai.lite_agent import LiteAgent, LiteAgentOutput
from crewai.tools import BaseTool from crewai.tools import BaseTool
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.agent_events import LiteAgentExecutionStartedEvent from crewai.events.types.agent_events import LiteAgentExecutionStartedEvent
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent from crewai.events.types.tool_usage_events import ToolUsageStartedEvent
from crewai.llms.base_llm import BaseLLM from crewai.llms.base_llm import BaseLLM
from unittest.mock import patch from unittest.mock import patch
@@ -322,7 +322,7 @@ def test_sets_parent_flow_when_inside_flow():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_is_called_using_string(): def test_guardrail_is_called_using_string():
guardrail_events = defaultdict(list) guardrail_events = defaultdict(list)
from crewai.utilities.events import ( from crewai.events.event_types import (
LLMGuardrailCompletedEvent, LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent, LLMGuardrailStartedEvent,
) )
@@ -359,7 +359,7 @@ def test_guardrail_is_called_using_string():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_is_called_using_callable(): def test_guardrail_is_called_using_callable():
guardrail_events = defaultdict(list) guardrail_events = defaultdict(list)
from crewai.utilities.events import ( from crewai.events.event_types import (
LLMGuardrailCompletedEvent, LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent, LLMGuardrailStartedEvent,
) )
@@ -392,7 +392,7 @@ def test_guardrail_is_called_using_callable():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_reached_attempt_limit(): def test_guardrail_reached_attempt_limit():
guardrail_events = defaultdict(list) guardrail_events = defaultdict(list)
from crewai.utilities.events import ( from crewai.events.event_types import (
LLMGuardrailCompletedEvent, LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent, LLMGuardrailStartedEvent,
) )

View File

@@ -80,7 +80,7 @@ def auto_mock_telemetry(request):
with ( with (
patch( patch(
"crewai.utilities.events.event_listener.Telemetry", "crewai.events.event_listener.Telemetry",
mock_telemetry_class, mock_telemetry_class,
), ),
patch("crewai.tools.tool_usage.Telemetry", mock_telemetry_class), patch("crewai.tools.tool_usage.Telemetry", mock_telemetry_class),

View File

@@ -13,13 +13,18 @@ from crewai.experimental.evaluation import (
ToolInvocationEvaluator, ToolInvocationEvaluator,
ReasoningEfficiencyEvaluator, ReasoningEfficiencyEvaluator,
MetricCategory, MetricCategory,
EvaluationScore EvaluationScore,
) )
from crewai.utilities.events.agent_events import AgentEvaluationStartedEvent, AgentEvaluationCompletedEvent, AgentEvaluationFailedEvent from crewai.events.types.agent_events import (
from crewai.utilities.events.crewai_event_bus import crewai_event_bus AgentEvaluationStartedEvent,
AgentEvaluationCompletedEvent,
AgentEvaluationFailedEvent,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.experimental.evaluation import create_default_evaluator from crewai.experimental.evaluation import create_default_evaluator
class TestAgentEvaluator: class TestAgentEvaluator:
@pytest.fixture @pytest.fixture
def mock_crew(self): def mock_crew(self):
@@ -28,19 +33,16 @@ class TestAgentEvaluator:
goal="Complete test tasks successfully", goal="Complete test tasks successfully",
backstory="An agent created for testing purposes", backstory="An agent created for testing purposes",
allow_delegation=False, allow_delegation=False,
verbose=False verbose=False,
) )
task = Task( task = Task(
description="Test task description", description="Test task description",
agent=agent, agent=agent,
expected_output="Expected test output" expected_output="Expected test output",
) )
crew = Crew( crew = Crew(agents=[agent], tasks=[task])
agents=[agent],
tasks=[task]
)
return crew return crew
def test_set_iteration(self): def test_set_iteration(self):
@@ -51,7 +53,9 @@ class TestAgentEvaluator:
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_evaluate_current_iteration(self, mock_crew): def test_evaluate_current_iteration(self, mock_crew):
agent_evaluator = AgentEvaluator(agents=mock_crew.agents, evaluators=[GoalAlignmentEvaluator()]) agent_evaluator = AgentEvaluator(
agents=mock_crew.agents, evaluators=[GoalAlignmentEvaluator()]
)
mock_crew.kickoff() mock_crew.kickoff()
@@ -59,20 +63,20 @@ class TestAgentEvaluator:
assert isinstance(results, dict) assert isinstance(results, dict)
agent, = mock_crew.agents (agent,) = mock_crew.agents
task, = mock_crew.tasks (task,) = mock_crew.tasks
assert len(mock_crew.agents) == 1 assert len(mock_crew.agents) == 1
assert agent.role in results assert agent.role in results
assert len(results[agent.role]) == 1 assert len(results[agent.role]) == 1
result, = results[agent.role] (result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult) assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id) assert result.agent_id == str(agent.id)
assert result.task_id == str(task.id) assert result.task_id == str(task.id)
goal_alignment, = result.metrics.values() (goal_alignment,) = result.metrics.values()
assert goal_alignment.score == 5.0 assert goal_alignment.score == 5.0
expected_feedback = "The agent's output demonstrates an understanding of the need for a comprehensive document outlining task" expected_feedback = "The agent's output demonstrates an understanding of the need for a comprehensive document outlining task"
@@ -92,7 +96,7 @@ class TestAgentEvaluator:
ToolSelectionEvaluator, ToolSelectionEvaluator,
ParameterExtractionEvaluator, ParameterExtractionEvaluator,
ToolInvocationEvaluator, ToolInvocationEvaluator,
ReasoningEfficiencyEvaluator ReasoningEfficiencyEvaluator,
] ]
assert len(agent_evaluator.evaluators) == len(expected_types) assert len(agent_evaluator.evaluators) == len(expected_types)
@@ -109,6 +113,7 @@ class TestAgentEvaluator:
with crewai_event_bus.scoped_handlers(): with crewai_event_bus.scoped_handlers():
events = {} events = {}
@crewai_event_bus.on(AgentEvaluationStartedEvent) @crewai_event_bus.on(AgentEvaluationStartedEvent)
def capture_started(source, event): def capture_started(source, event):
events["started"] = event events["started"] = event
@@ -121,7 +126,9 @@ class TestAgentEvaluator:
def capture_failed(source, event): def capture_failed(source, event):
events["failed"] = event events["failed"] = event
agent_evaluator = AgentEvaluator(agents=[agent], evaluators=[GoalAlignmentEvaluator()]) agent_evaluator = AgentEvaluator(
agents=[agent], evaluators=[GoalAlignmentEvaluator()]
)
agent.kickoff(messages="Complete this task successfully") agent.kickoff(messages="Complete this task successfully")
@@ -143,13 +150,13 @@ class TestAgentEvaluator:
assert isinstance(results, dict) assert isinstance(results, dict)
result, = results[agent.role] (result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult) assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id) assert result.agent_id == str(agent.id)
assert result.task_id == "lite_task" assert result.task_id == "lite_task"
goal_alignment, = result.metrics.values() (goal_alignment,) = result.metrics.values()
assert goal_alignment.score == 2.0 assert goal_alignment.score == 2.0
expected_feedback = "The agent did not demonstrate a clear understanding of the task goal, which is to complete test tasks successfully" expected_feedback = "The agent did not demonstrate a clear understanding of the task goal, which is to complete test tasks successfully"
@@ -168,13 +175,14 @@ class TestAgentEvaluator:
task = Task( task = Task(
description="Test task description", description="Test task description",
agent=agent, agent=agent,
expected_output="Expected test output" expected_output="Expected test output",
) )
mock_crew.agents.append(agent) mock_crew.agents.append(agent)
mock_crew.tasks.append(task) mock_crew.tasks.append(task)
with crewai_event_bus.scoped_handlers(): with crewai_event_bus.scoped_handlers():
events = {} events = {}
@crewai_event_bus.on(AgentEvaluationStartedEvent) @crewai_event_bus.on(AgentEvaluationStartedEvent)
def capture_started(source, event): def capture_started(source, event):
events["started"] = event events["started"] = event
@@ -187,7 +195,9 @@ class TestAgentEvaluator:
def capture_failed(source, event): def capture_failed(source, event):
events["failed"] = event events["failed"] = event
agent_evaluator = AgentEvaluator(agents=[agent], evaluators=[GoalAlignmentEvaluator()]) agent_evaluator = AgentEvaluator(
agents=[agent], evaluators=[GoalAlignmentEvaluator()]
)
mock_crew.kickoff() mock_crew.kickoff()
assert events.keys() == {"started", "completed"} assert events.keys() == {"started", "completed"}
@@ -208,13 +218,13 @@ class TestAgentEvaluator:
assert isinstance(results, dict) assert isinstance(results, dict)
assert len(results.keys()) == 1 assert len(results.keys()) == 1
result, = results[agent.role] (result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult) assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id) assert result.agent_id == str(agent.id)
assert result.task_id == str(task.id) assert result.task_id == str(task.id)
goal_alignment, = result.metrics.values() (goal_alignment,) = result.metrics.values()
assert goal_alignment.score == 5.0 assert goal_alignment.score == 5.0
expected_feedback = "The agent provided a thorough guide on how to conduct a test task but failed to produce specific expected output" expected_feedback = "The agent provided a thorough guide on how to conduct a test task but failed to produce specific expected output"
@@ -223,11 +233,10 @@ class TestAgentEvaluator:
assert goal_alignment.raw_response is not None assert goal_alignment.raw_response is not None
assert '"score": 5' in goal_alignment.raw_response assert '"score": 5' in goal_alignment.raw_response
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_failed_evaluation(self, mock_crew): def test_failed_evaluation(self, mock_crew):
agent, = mock_crew.agents (agent,) = mock_crew.agents
task, = mock_crew.tasks (task,) = mock_crew.tasks
with crewai_event_bus.scoped_handlers(): with crewai_event_bus.scoped_handlers():
events = {} events = {}
@@ -247,13 +256,16 @@ class TestAgentEvaluator:
# Create a mock evaluator that will raise an exception # Create a mock evaluator that will raise an exception
from crewai.experimental.evaluation.base_evaluator import BaseEvaluator from crewai.experimental.evaluation.base_evaluator import BaseEvaluator
from crewai.experimental.evaluation import MetricCategory from crewai.experimental.evaluation import MetricCategory
class FailingEvaluator(BaseEvaluator): class FailingEvaluator(BaseEvaluator):
metric_category = MetricCategory.GOAL_ALIGNMENT metric_category = MetricCategory.GOAL_ALIGNMENT
def evaluate(self, agent, task, execution_trace, final_output): def evaluate(self, agent, task, execution_trace, final_output):
raise ValueError("Forced evaluation failure") raise ValueError("Forced evaluation failure")
agent_evaluator = AgentEvaluator(agents=[agent], evaluators=[FailingEvaluator()]) agent_evaluator = AgentEvaluator(
agents=[agent], evaluators=[FailingEvaluator()]
)
mock_crew.kickoff() mock_crew.kickoff()
assert events.keys() == {"started", "failed"} assert events.keys() == {"started", "failed"}
@@ -269,7 +281,7 @@ class TestAgentEvaluator:
assert events["failed"].error == "Forced evaluation failure" assert events["failed"].error == "Forced evaluation failure"
results = agent_evaluator.get_evaluation_results() results = agent_evaluator.get_evaluation_results()
result, = results[agent.role] (result,) = results[agent.role]
assert isinstance(result, AgentEvaluationResult) assert isinstance(result, AgentEvaluationResult)
assert result.agent_id == str(agent.id) assert result.agent_id == str(agent.id)

View File

@@ -1,7 +1,7 @@
from unittest.mock import MagicMock, patch, ANY from unittest.mock import MagicMock, patch, ANY
from collections import defaultdict from collections import defaultdict
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemorySaveStartedEvent, MemorySaveStartedEvent,
MemorySaveCompletedEvent, MemorySaveCompletedEvent,
MemoryQueryStartedEvent, MemoryQueryStartedEvent,

View File

@@ -1,10 +1,10 @@
import pytest import pytest
from unittest.mock import ANY from unittest.mock import ANY
from collections import defaultdict from collections import defaultdict
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.memory.long_term.long_term_memory import LongTermMemory from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemorySaveStartedEvent, MemorySaveStartedEvent,
MemorySaveCompletedEvent, MemorySaveCompletedEvent,
MemoryQueryStartedEvent, MemoryQueryStartedEvent,

View File

@@ -7,8 +7,8 @@ from crewai.crew import Crew
from crewai.memory.short_term.short_term_memory import ShortTermMemory from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
from crewai.task import Task from crewai.task import Task
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemorySaveStartedEvent, MemorySaveStartedEvent,
MemorySaveCompletedEvent, MemorySaveCompletedEvent,
MemoryQueryStartedEvent, MemoryQueryStartedEvent,

View File

@@ -27,19 +27,17 @@ from crewai.tasks.conditional_task import ConditionalTask
from crewai.tasks.output_format import OutputFormat from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.types.usage_metrics import UsageMetrics from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities.events import ( from crewai.events.event_bus import crewai_event_bus
CrewTrainCompletedEvent, from crewai.events.types.crew_events import (
CrewTrainStartedEvent,
crewai_event_bus,
)
from crewai.utilities.events.crew_events import (
CrewTestCompletedEvent, CrewTestCompletedEvent,
CrewTestStartedEvent, CrewTestStartedEvent,
CrewTrainCompletedEvent,
CrewTrainStartedEvent,
) )
from crewai.utilities.rpm_controller import RPMController from crewai.utilities.rpm_controller import RPMController
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
from crewai.utilities.events.memory_events import ( from crewai.events.types.memory_events import (
MemorySaveStartedEvent, MemorySaveStartedEvent,
MemorySaveCompletedEvent, MemorySaveCompletedEvent,
MemorySaveFailedEvent, MemorySaveFailedEvent,

View File

@@ -7,14 +7,14 @@ import pytest
from pydantic import BaseModel from pydantic import BaseModel
from crewai.flow.flow import Flow, and_, listen, or_, router, start from crewai.flow.flow import Flow, and_, listen, or_, router, start
from crewai.utilities.events import ( from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.flow_events import (
FlowFinishedEvent, FlowFinishedEvent,
FlowStartedEvent, FlowStartedEvent,
FlowPlotEvent,
MethodExecutionFinishedEvent, MethodExecutionFinishedEvent,
MethodExecutionStartedEvent, MethodExecutionStartedEvent,
crewai_event_bus,
) )
from crewai.utilities.events.flow_events import FlowPlotEvent
def test_simple_sequential_flow(): def test_simple_sequential_flow():

View File

@@ -1,6 +1,6 @@
import pytest import pytest
from unittest.mock import patch, MagicMock from unittest.mock import patch, MagicMock
from crewai.utilities.events.event_listener import event_listener from crewai.events.event_listener import event_listener
class TestFlowHumanInputIntegration: class TestFlowHumanInputIntegration:
@@ -9,82 +9,90 @@ class TestFlowHumanInputIntegration:
def test_console_formatter_pause_resume_methods(self): def test_console_formatter_pause_resume_methods(self):
"""Test that ConsoleFormatter pause/resume methods work correctly.""" """Test that ConsoleFormatter pause/resume methods work correctly."""
formatter = event_listener.formatter formatter = event_listener.formatter
original_paused_state = formatter._live_paused original_paused_state = formatter._live_paused
try: try:
formatter._live_paused = False formatter._live_paused = False
formatter.pause_live_updates() formatter.pause_live_updates()
assert formatter._live_paused assert formatter._live_paused
formatter.resume_live_updates() formatter.resume_live_updates()
assert not formatter._live_paused assert not formatter._live_paused
finally: finally:
formatter._live_paused = original_paused_state formatter._live_paused = original_paused_state
@patch('builtins.input', return_value='') @patch("builtins.input", return_value="")
def test_human_input_pauses_flow_updates(self, mock_input): def test_human_input_pauses_flow_updates(self, mock_input):
"""Test that human input pauses Flow status updates.""" """Test that human input pauses Flow status updates."""
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.agent_builder.base_agent_executor_mixin import (
CrewAgentExecutorMixin,
)
executor = CrewAgentExecutorMixin() executor = CrewAgentExecutorMixin()
executor.crew = MagicMock() executor.crew = MagicMock()
executor.crew._train = False executor.crew._train = False
executor._printer = MagicMock() executor._printer = MagicMock()
formatter = event_listener.formatter formatter = event_listener.formatter
original_paused_state = formatter._live_paused original_paused_state = formatter._live_paused
try: try:
formatter._live_paused = False formatter._live_paused = False
with patch.object(formatter, 'pause_live_updates') as mock_pause, \ with (
patch.object(formatter, 'resume_live_updates') as mock_resume: patch.object(formatter, "pause_live_updates") as mock_pause,
patch.object(formatter, "resume_live_updates") as mock_resume,
):
result = executor._ask_human_input("Test result") result = executor._ask_human_input("Test result")
mock_pause.assert_called_once() mock_pause.assert_called_once()
mock_resume.assert_called_once() mock_resume.assert_called_once()
mock_input.assert_called_once() mock_input.assert_called_once()
assert result == '' assert result == ""
finally: finally:
formatter._live_paused = original_paused_state formatter._live_paused = original_paused_state
@patch('builtins.input', side_effect=['feedback', '']) @patch("builtins.input", side_effect=["feedback", ""])
def test_multiple_human_input_rounds(self, mock_input): def test_multiple_human_input_rounds(self, mock_input):
"""Test multiple rounds of human input with Flow status management.""" """Test multiple rounds of human input with Flow status management."""
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.agent_builder.base_agent_executor_mixin import (
CrewAgentExecutorMixin,
)
executor = CrewAgentExecutorMixin() executor = CrewAgentExecutorMixin()
executor.crew = MagicMock() executor.crew = MagicMock()
executor.crew._train = False executor.crew._train = False
executor._printer = MagicMock() executor._printer = MagicMock()
formatter = event_listener.formatter formatter = event_listener.formatter
original_paused_state = formatter._live_paused original_paused_state = formatter._live_paused
try: try:
pause_calls = [] pause_calls = []
resume_calls = [] resume_calls = []
def track_pause(): def track_pause():
pause_calls.append(True) pause_calls.append(True)
def track_resume(): def track_resume():
resume_calls.append(True) resume_calls.append(True)
with patch.object(formatter, 'pause_live_updates', side_effect=track_pause), \ with (
patch.object(formatter, 'resume_live_updates', side_effect=track_resume): patch.object(formatter, "pause_live_updates", side_effect=track_pause),
patch.object(
formatter, "resume_live_updates", side_effect=track_resume
),
):
result1 = executor._ask_human_input("Test result 1") result1 = executor._ask_human_input("Test result 1")
assert result1 == 'feedback' assert result1 == "feedback"
result2 = executor._ask_human_input("Test result 2") result2 = executor._ask_human_input("Test result 2")
assert result2 == '' assert result2 == ""
assert len(pause_calls) == 2 assert len(pause_calls) == 2
assert len(resume_calls) == 2 assert len(resume_calls) == 2
finally: finally:
@@ -93,17 +101,17 @@ class TestFlowHumanInputIntegration:
def test_pause_resume_with_no_live_session(self): def test_pause_resume_with_no_live_session(self):
"""Test pause/resume methods handle case when no Live session exists.""" """Test pause/resume methods handle case when no Live session exists."""
formatter = event_listener.formatter formatter = event_listener.formatter
original_live = formatter._live original_live = formatter._live
original_paused_state = formatter._live_paused original_paused_state = formatter._live_paused
try: try:
formatter._live = None formatter._live = None
formatter._live_paused = False formatter._live_paused = False
formatter.pause_live_updates() formatter.pause_live_updates()
formatter.resume_live_updates() formatter.resume_live_updates()
assert not formatter._live_paused assert not formatter._live_paused
finally: finally:
formatter._live = original_live formatter._live = original_live
@@ -111,25 +119,30 @@ class TestFlowHumanInputIntegration:
def test_pause_resume_exception_handling(self): def test_pause_resume_exception_handling(self):
"""Test that resume is called even if exception occurs during human input.""" """Test that resume is called even if exception occurs during human input."""
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.agent_builder.base_agent_executor_mixin import (
CrewAgentExecutorMixin,
)
executor = CrewAgentExecutorMixin() executor = CrewAgentExecutorMixin()
executor.crew = MagicMock() executor.crew = MagicMock()
executor.crew._train = False executor.crew._train = False
executor._printer = MagicMock() executor._printer = MagicMock()
formatter = event_listener.formatter formatter = event_listener.formatter
original_paused_state = formatter._live_paused original_paused_state = formatter._live_paused
try: try:
with patch.object(formatter, 'pause_live_updates') as mock_pause, \ with (
patch.object(formatter, 'resume_live_updates') as mock_resume, \ patch.object(formatter, "pause_live_updates") as mock_pause,
patch('builtins.input', side_effect=KeyboardInterrupt("Test exception")): patch.object(formatter, "resume_live_updates") as mock_resume,
patch(
"builtins.input", side_effect=KeyboardInterrupt("Test exception")
),
):
with pytest.raises(KeyboardInterrupt): with pytest.raises(KeyboardInterrupt):
executor._ask_human_input("Test result") executor._ask_human_input("Test result")
mock_pause.assert_called_once() mock_pause.assert_called_once()
mock_resume.assert_called_once() mock_resume.assert_called_once()
finally: finally:
@@ -137,31 +150,39 @@ class TestFlowHumanInputIntegration:
def test_training_mode_human_input(self): def test_training_mode_human_input(self):
"""Test human input in training mode.""" """Test human input in training mode."""
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.agent_builder.base_agent_executor_mixin import (
CrewAgentExecutorMixin,
)
executor = CrewAgentExecutorMixin() executor = CrewAgentExecutorMixin()
executor.crew = MagicMock() executor.crew = MagicMock()
executor.crew._train = True executor.crew._train = True
executor._printer = MagicMock() executor._printer = MagicMock()
formatter = event_listener.formatter formatter = event_listener.formatter
original_paused_state = formatter._live_paused original_paused_state = formatter._live_paused
try: try:
with patch.object(formatter, 'pause_live_updates') as mock_pause, \ with (
patch.object(formatter, 'resume_live_updates') as mock_resume, \ patch.object(formatter, "pause_live_updates") as mock_pause,
patch('builtins.input', return_value='training feedback'): patch.object(formatter, "resume_live_updates") as mock_resume,
patch("builtins.input", return_value="training feedback"),
):
result = executor._ask_human_input("Test result") result = executor._ask_human_input("Test result")
mock_pause.assert_called_once() mock_pause.assert_called_once()
mock_resume.assert_called_once() mock_resume.assert_called_once()
assert result == 'training feedback' assert result == "training feedback"
executor._printer.print.assert_called() executor._printer.print.assert_called()
call_args = [call[1]['content'] for call in executor._printer.print.call_args_list] call_args = [
training_prompt_found = any('TRAINING MODE' in content for content in call_args) call[1]["content"]
for call in executor._printer.print.call_args_list
]
training_prompt_found = any(
"TRAINING MODE" in content for content in call_args
)
assert training_prompt_found assert training_prompt_found
finally: finally:
formatter._live_paused = original_paused_state formatter._live_paused = original_paused_state

View File

@@ -8,7 +8,7 @@ from pydantic import BaseModel
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM from crewai.llm import CONTEXT_WINDOW_USAGE_RATIO, LLM
from crewai.utilities.events import ( from crewai.events.event_types import (
LLMCallCompletedEvent, LLMCallCompletedEvent,
LLMStreamChunkEvent, LLMStreamChunkEvent,
ToolUsageStartedEvent, ToolUsageStartedEvent,
@@ -563,7 +563,7 @@ def assert_event_count(
@pytest.fixture @pytest.fixture
def mock_emit() -> MagicMock: def mock_emit() -> MagicMock:
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus from crewai.events.event_bus import CrewAIEventsBus
with patch.object(CrewAIEventsBus, "emit") as mock_emit: with patch.object(CrewAIEventsBus, "emit") as mock_emit:
yield mock_emit yield mock_emit

View File

@@ -7,11 +7,11 @@ from crewai.llm import LLM
from crewai.tasks.hallucination_guardrail import HallucinationGuardrail from crewai.tasks.hallucination_guardrail import HallucinationGuardrail
from crewai.tasks.llm_guardrail import LLMGuardrail from crewai.tasks.llm_guardrail import LLMGuardrail
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.utilities.events import ( from crewai.events.event_types import (
LLMGuardrailCompletedEvent, LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent, LLMGuardrailStartedEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
def test_task_without_guardrail(): def test_task_without_guardrail():

View File

@@ -10,8 +10,8 @@ from pydantic import BaseModel, Field
from crewai import Agent, Task from crewai import Agent, Task
from crewai.tools import BaseTool from crewai.tools import BaseTool
from crewai.tools.tool_usage import ToolUsage from crewai.tools.tool_usage import ToolUsage
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ( from crewai.events.types.tool_usage_events import (
ToolSelectionErrorEvent, ToolSelectionErrorEvent,
ToolUsageFinishedEvent, ToolUsageFinishedEvent,
ToolValidateInputErrorEvent, ToolValidateInputErrorEvent,

View File

@@ -5,13 +5,13 @@ from unittest.mock import patch, MagicMock
from crewai import Agent, Task, Crew from crewai import Agent, Task, Crew
from crewai.flow.flow import Flow, start from crewai.flow.flow import Flow, start
from crewai.utilities.events.listeners.tracing.trace_listener import ( from crewai.events.listeners.tracing.trace_listener import (
TraceCollectionListener, TraceCollectionListener,
) )
from crewai.utilities.events.listeners.tracing.trace_batch_manager import ( from crewai.events.listeners.tracing.trace_batch_manager import (
TraceBatchManager, TraceBatchManager,
) )
from crewai.utilities.events.listeners.tracing.types import TraceEvent from crewai.events.listeners.tracing.types import TraceEvent
class TestTraceListenerSetup: class TestTraceListenerSetup:
@@ -27,11 +27,11 @@ class TestTraceListenerSetup:
return_value="mock_token_12345", return_value="mock_token_12345",
), ),
patch( patch(
"crewai.utilities.events.listeners.tracing.trace_listener.get_auth_token", "crewai.events.listeners.tracing.trace_listener.get_auth_token",
return_value="mock_token_12345", return_value="mock_token_12345",
), ),
patch( patch(
"crewai.utilities.events.listeners.tracing.trace_batch_manager.get_auth_token", "crewai.events.listeners.tracing.trace_batch_manager.get_auth_token",
return_value="mock_token_12345", return_value="mock_token_12345",
), ),
): ):
@@ -40,7 +40,7 @@ class TestTraceListenerSetup:
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def clear_event_bus(self): def clear_event_bus(self):
"""Clear event bus listeners before and after each test""" """Clear event bus listeners before and after each test"""
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
# Store original handlers # Store original handlers
original_handlers = crewai_event_bus._handlers.copy() original_handlers = crewai_event_bus._handlers.copy()
@@ -123,7 +123,7 @@ class TestTraceListenerSetup:
crew = Crew(agents=[agent], tasks=[task], verbose=True) crew = Crew(agents=[agent], tasks=[task], verbose=True)
trace_listener = TraceCollectionListener() trace_listener = TraceCollectionListener()
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
trace_listener.setup_listeners(crewai_event_bus) trace_listener.setup_listeners(crewai_event_bus)
@@ -162,7 +162,7 @@ class TestTraceListenerSetup:
crew = Crew(agents=[agent], tasks=[task], verbose=True) crew = Crew(agents=[agent], tasks=[task], verbose=True)
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
trace_listener = None trace_listener = None
for handler_list in crewai_event_bus._handlers.values(): for handler_list in crewai_event_bus._handlers.values():
@@ -207,7 +207,7 @@ class TestTraceListenerSetup:
) )
crew = Crew(agents=[agent], tasks=[task], verbose=True) crew = Crew(agents=[agent], tasks=[task], verbose=True)
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
# Create and setup trace listener explicitly # Create and setup trace listener explicitly
trace_listener = TraceCollectionListener() trace_listener = TraceCollectionListener()
@@ -262,7 +262,7 @@ class TestTraceListenerSetup:
result = crew.kickoff() result = crew.kickoff()
assert result is not None assert result is not None
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
trace_handlers = [] trace_handlers = []
for handlers in crewai_event_bus._handlers.values(): for handlers in crewai_event_bus._handlers.values():
@@ -281,9 +281,9 @@ class TestTraceListenerSetup:
): ):
trace_handlers.append(handler) trace_handlers.append(handler)
assert len(trace_handlers) == 0, ( assert (
f"Found {len(trace_handlers)} trace handlers when tracing should be disabled" len(trace_handlers) == 0
) ), f"Found {len(trace_handlers)} trace handlers when tracing should be disabled"
def test_trace_listener_setup_correctly_for_crew(self): def test_trace_listener_setup_correctly_for_crew(self):
"""Test that trace listener is set up correctly when enabled""" """Test that trace listener is set up correctly when enabled"""
@@ -328,7 +328,7 @@ class TestTraceListenerSetup:
with ( with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}), patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}),
patch( patch(
"crewai.utilities.events.listeners.tracing.trace_listener.TraceCollectionListener._check_authenticated", "crewai.events.listeners.tracing.trace_listener.TraceCollectionListener._check_authenticated",
return_value=False, return_value=False,
), ),
): ):
@@ -357,7 +357,7 @@ class TestTraceListenerSetup:
with ( with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}), patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}),
patch( patch(
"crewai.utilities.events.listeners.tracing.trace_batch_manager.PlusAPI" "crewai.events.listeners.tracing.trace_batch_manager.PlusAPI"
) as mock_plus_api_class, ) as mock_plus_api_class,
): ):
mock_plus_api_instance = MagicMock() mock_plus_api_instance = MagicMock()
@@ -393,13 +393,13 @@ class TestTraceListenerSetup:
# Helper method to ensure cleanup # Helper method to ensure cleanup
def teardown_method(self): def teardown_method(self):
"""Cleanup after each test method""" """Cleanup after each test method"""
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
crewai_event_bus._handlers.clear() crewai_event_bus._handlers.clear()
@classmethod @classmethod
def teardown_class(cls): def teardown_class(cls):
"""Final cleanup after all tests in this class""" """Final cleanup after all tests in this class"""
from crewai.utilities.events import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
crewai_event_bus._handlers.clear() crewai_event_bus._handlers.clear()

View File

@@ -1,7 +1,7 @@
from unittest.mock import Mock from unittest.mock import Mock
from crewai.utilities.events.base_events import BaseEvent from crewai.events.base_events import BaseEvent
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
class TestEvent(BaseEvent): class TestEvent(BaseEvent):

View File

@@ -1,7 +1,7 @@
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
from rich.tree import Tree from rich.tree import Tree
from rich.live import Live from rich.live import Live
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter from crewai.events.utils.console_formatter import ConsoleFormatter
class TestConsoleFormatterPauseResume: class TestConsoleFormatterPauseResume:
@@ -10,78 +10,78 @@ class TestConsoleFormatterPauseResume:
def test_pause_live_updates_with_active_session(self): def test_pause_live_updates_with_active_session(self):
"""Test pausing when Live session is active.""" """Test pausing when Live session is active."""
formatter = ConsoleFormatter() formatter = ConsoleFormatter()
mock_live = MagicMock(spec=Live) mock_live = MagicMock(spec=Live)
formatter._live = mock_live formatter._live = mock_live
formatter._live_paused = False formatter._live_paused = False
formatter.pause_live_updates() formatter.pause_live_updates()
mock_live.stop.assert_called_once() mock_live.stop.assert_called_once()
assert formatter._live_paused assert formatter._live_paused
def test_pause_live_updates_when_already_paused(self): def test_pause_live_updates_when_already_paused(self):
"""Test pausing when already paused does nothing.""" """Test pausing when already paused does nothing."""
formatter = ConsoleFormatter() formatter = ConsoleFormatter()
mock_live = MagicMock(spec=Live) mock_live = MagicMock(spec=Live)
formatter._live = mock_live formatter._live = mock_live
formatter._live_paused = True formatter._live_paused = True
formatter.pause_live_updates() formatter.pause_live_updates()
mock_live.stop.assert_not_called() mock_live.stop.assert_not_called()
assert formatter._live_paused assert formatter._live_paused
def test_pause_live_updates_with_no_session(self): def test_pause_live_updates_with_no_session(self):
"""Test pausing when no Live session exists.""" """Test pausing when no Live session exists."""
formatter = ConsoleFormatter() formatter = ConsoleFormatter()
formatter._live = None formatter._live = None
formatter._live_paused = False formatter._live_paused = False
formatter.pause_live_updates() formatter.pause_live_updates()
assert formatter._live_paused assert formatter._live_paused
def test_resume_live_updates_when_paused(self): def test_resume_live_updates_when_paused(self):
"""Test resuming when paused.""" """Test resuming when paused."""
formatter = ConsoleFormatter() formatter = ConsoleFormatter()
formatter._live_paused = True formatter._live_paused = True
formatter.resume_live_updates() formatter.resume_live_updates()
assert not formatter._live_paused assert not formatter._live_paused
def test_resume_live_updates_when_not_paused(self): def test_resume_live_updates_when_not_paused(self):
"""Test resuming when not paused does nothing.""" """Test resuming when not paused does nothing."""
formatter = ConsoleFormatter() formatter = ConsoleFormatter()
formatter._live_paused = False formatter._live_paused = False
formatter.resume_live_updates() formatter.resume_live_updates()
assert not formatter._live_paused assert not formatter._live_paused
def test_print_after_resume_restarts_live_session(self): def test_print_after_resume_restarts_live_session(self):
"""Test that printing a Tree after resume creates new Live session.""" """Test that printing a Tree after resume creates new Live session."""
formatter = ConsoleFormatter() formatter = ConsoleFormatter()
formatter._live_paused = True formatter._live_paused = True
formatter._live = None formatter._live = None
formatter.resume_live_updates() formatter.resume_live_updates()
assert not formatter._live_paused assert not formatter._live_paused
tree = Tree("Test") tree = Tree("Test")
with patch('crewai.utilities.events.utils.console_formatter.Live') as mock_live_class: with patch("crewai.events.utils.console_formatter.Live") as mock_live_class:
mock_live_instance = MagicMock() mock_live_instance = MagicMock()
mock_live_class.return_value = mock_live_instance mock_live_class.return_value = mock_live_instance
formatter.print(tree) formatter.print(tree)
mock_live_class.assert_called_once() mock_live_class.assert_called_once()
mock_live_instance.start.assert_called_once() mock_live_instance.start.assert_called_once()
assert formatter._live == mock_live_instance assert formatter._live == mock_live_instance
@@ -89,28 +89,28 @@ class TestConsoleFormatterPauseResume:
def test_multiple_pause_resume_cycles(self): def test_multiple_pause_resume_cycles(self):
"""Test multiple pause/resume cycles work correctly.""" """Test multiple pause/resume cycles work correctly."""
formatter = ConsoleFormatter() formatter = ConsoleFormatter()
mock_live = MagicMock(spec=Live) mock_live = MagicMock(spec=Live)
formatter._live = mock_live formatter._live = mock_live
formatter._live_paused = False formatter._live_paused = False
formatter.pause_live_updates() formatter.pause_live_updates()
assert formatter._live_paused assert formatter._live_paused
mock_live.stop.assert_called_once() mock_live.stop.assert_called_once()
assert formatter._live is None # Live session should be cleared assert formatter._live is None # Live session should be cleared
formatter.resume_live_updates() formatter.resume_live_updates()
assert not formatter._live_paused assert not formatter._live_paused
formatter.pause_live_updates() formatter.pause_live_updates()
assert formatter._live_paused assert formatter._live_paused
formatter.resume_live_updates() formatter.resume_live_updates()
assert not formatter._live_paused assert not formatter._live_paused
def test_pause_resume_state_initialization(self): def test_pause_resume_state_initialization(self):
"""Test that _live_paused is properly initialized.""" """Test that _live_paused is properly initialized."""
formatter = ConsoleFormatter() formatter = ConsoleFormatter()
assert hasattr(formatter, '_live_paused') assert hasattr(formatter, "_live_paused")
assert not formatter._live_paused assert not formatter._live_paused

View File

@@ -11,12 +11,12 @@ from crewai.flow.flow import Flow, listen, start
from crewai.llm import LLM from crewai.llm import LLM
from crewai.task import Task from crewai.task import Task
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.utilities.events.agent_events import ( from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent, AgentExecutionCompletedEvent,
AgentExecutionErrorEvent, AgentExecutionErrorEvent,
AgentExecutionStartedEvent, AgentExecutionStartedEvent,
) )
from crewai.utilities.events.crew_events import ( from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent, CrewKickoffCompletedEvent,
CrewKickoffFailedEvent, CrewKickoffFailedEvent,
CrewKickoffStartedEvent, CrewKickoffStartedEvent,
@@ -24,28 +24,28 @@ from crewai.utilities.events.crew_events import (
CrewTestResultEvent, CrewTestResultEvent,
CrewTestStartedEvent, CrewTestStartedEvent,
) )
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.events.event_listener import EventListener from crewai.events.event_listener import EventListener
from crewai.utilities.events.event_types import ToolUsageFinishedEvent from crewai.events.types.tool_usage_events import ToolUsageFinishedEvent
from crewai.utilities.events.flow_events import ( from crewai.events.types.flow_events import (
FlowCreatedEvent, FlowCreatedEvent,
FlowFinishedEvent, FlowFinishedEvent,
FlowStartedEvent, FlowStartedEvent,
MethodExecutionFailedEvent, MethodExecutionFailedEvent,
MethodExecutionStartedEvent, MethodExecutionStartedEvent,
) )
from crewai.utilities.events.llm_events import ( from crewai.events.types.llm_events import (
LLMCallCompletedEvent, LLMCallCompletedEvent,
LLMCallFailedEvent, LLMCallFailedEvent,
LLMCallStartedEvent, LLMCallStartedEvent,
LLMStreamChunkEvent, LLMStreamChunkEvent,
) )
from crewai.utilities.events.task_events import ( from crewai.events.types.task_events import (
TaskCompletedEvent, TaskCompletedEvent,
TaskFailedEvent, TaskFailedEvent,
TaskStartedEvent, TaskStartedEvent,
) )
from crewai.utilities.events.tool_usage_events import ( from crewai.events.types.tool_usage_events import (
ToolUsageErrorEvent, ToolUsageErrorEvent,
) )
@@ -114,9 +114,7 @@ def test_crew_emits_start_kickoff_event(
mock_telemetry.task_ended = Mock(return_value=mock_span) mock_telemetry.task_ended = Mock(return_value=mock_span)
# Patch the Telemetry class to return our mock # Patch the Telemetry class to return our mock
with patch( with patch("crewai.events.event_listener.Telemetry", return_value=mock_telemetry):
"crewai.utilities.events.event_listener.Telemetry", return_value=mock_telemetry
):
# Now when Crew creates EventListener, it will use our mocked telemetry # Now when Crew creates EventListener, it will use our mocked telemetry
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew") crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff() crew.kickoff()
@@ -241,9 +239,7 @@ def test_crew_emits_end_task_event(
mock_telemetry.crew_execution_span = Mock() mock_telemetry.crew_execution_span = Mock()
mock_telemetry.end_crew = Mock() mock_telemetry.end_crew = Mock()
with patch( with patch("crewai.events.event_listener.Telemetry", return_value=mock_telemetry):
"crewai.utilities.events.event_listener.Telemetry", return_value=mock_telemetry
):
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew") crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff() crew.kickoff()
@@ -455,9 +451,7 @@ def test_flow_emits_start_event(reset_event_listener_singleton):
mock_telemetry.flow_creation_span = Mock() mock_telemetry.flow_creation_span = Mock()
mock_telemetry.set_tracer = Mock() mock_telemetry.set_tracer = Mock()
with patch( with patch("crewai.events.event_listener.Telemetry", return_value=mock_telemetry):
"crewai.utilities.events.event_listener.Telemetry", return_value=mock_telemetry
):
# Force creation of EventListener singleton with mocked telemetry # Force creation of EventListener singleton with mocked telemetry
_ = EventListener() _ = EventListener()