diff --git a/conftest.py b/conftest.py index d63e7c885..f4e035e08 100644 --- a/conftest.py +++ b/conftest.py @@ -1,6 +1,7 @@ """Pytest configuration for crewAI workspace.""" from collections.abc import Generator +import gzip import os from pathlib import Path import tempfile @@ -31,6 +32,21 @@ def cleanup_event_handlers() -> Generator[None, Any, None]: pass +@pytest.fixture(autouse=True, scope="function") +def reset_event_state() -> None: + """Reset event system state before each test for isolation.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import ( + EventContextConfig, + _event_context_config, + _event_id_stack, + ) + + reset_emission_counter() + _event_id_stack.set(()) + _event_context_config.set(EventContextConfig()) + + @pytest.fixture(autouse=True, scope="function") def setup_test_environment() -> Generator[None, Any, None]: """Setup test environment for crewAI workspace.""" @@ -138,9 +154,14 @@ def _filter_request_headers(request: Request) -> Request: # type: ignore[no-any def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any]: """Filter sensitive headers from response before recording.""" - # Remove Content-Encoding to prevent decompression issues on replay + for encoding_header in ["Content-Encoding", "content-encoding"]: - response["headers"].pop(encoding_header, None) + if encoding_header in response["headers"]: + encoding = response["headers"].pop(encoding_header) + if encoding and encoding[0] == "gzip": + body = response.get("body", {}).get("string", b"") + if isinstance(body, bytes) and body.startswith(b"\x1f\x8b"): + response["body"]["string"] = gzip.decompress(body).decode("utf-8") for header_name, replacement in HEADERS_TO_FILTER.items(): for variant in [header_name, header_name.upper(), header_name.title()]: diff --git a/lib/crewai/src/crewai/a2a/types.py b/lib/crewai/src/crewai/a2a/types.py index 90473b669..ea15abd80 100644 --- a/lib/crewai/src/crewai/a2a/types.py +++ b/lib/crewai/src/crewai/a2a/types.py @@ -14,15 +14,25 @@ from typing import ( from pydantic import BeforeValidator, HttpUrl, TypeAdapter from typing_extensions import NotRequired -from crewai.a2a.updates import ( - PollingConfig, - PollingHandler, - PushNotificationConfig, - PushNotificationHandler, - StreamingConfig, - StreamingHandler, - UpdateConfig, -) + +try: + from crewai.a2a.updates import ( + PollingConfig, + PollingHandler, + PushNotificationConfig, + PushNotificationHandler, + StreamingConfig, + StreamingHandler, + UpdateConfig, + ) +except ImportError: + PollingConfig = Any # type: ignore[misc,assignment] + PollingHandler = Any # type: ignore[misc,assignment] + PushNotificationConfig = Any # type: ignore[misc,assignment] + PushNotificationHandler = Any # type: ignore[misc,assignment] + StreamingConfig = Any # type: ignore[misc,assignment] + StreamingHandler = Any # type: ignore[misc,assignment] + UpdateConfig = Any # type: ignore[misc,assignment] TransportType = Literal["JSONRPC", "GRPC", "HTTP+JSON"] diff --git a/lib/crewai/src/crewai/a2a/utils/delegation.py b/lib/crewai/src/crewai/a2a/utils/delegation.py index 0fc9eaec5..f322bbf74 100644 --- a/lib/crewai/src/crewai/a2a/utils/delegation.py +++ b/lib/crewai/src/crewai/a2a/utils/delegation.py @@ -251,30 +251,48 @@ async def aexecute_a2a_delegation( if turn_number is None: turn_number = len([m for m in conversation_history if m.role == Role.user]) + 1 - result = await _aexecute_a2a_delegation_impl( - endpoint=endpoint, - auth=auth, - timeout=timeout, - task_description=task_description, - context=context, - context_id=context_id, - task_id=task_id, - reference_task_ids=reference_task_ids, - metadata=metadata, - extensions=extensions, - conversation_history=conversation_history, - is_multiturn=is_multiturn, - turn_number=turn_number, - agent_branch=agent_branch, - agent_id=agent_id, - agent_role=agent_role, - response_model=response_model, - updates=updates, - transport_protocol=transport_protocol, - from_task=from_task, - from_agent=from_agent, - skill_id=skill_id, - ) + try: + result = await _aexecute_a2a_delegation_impl( + endpoint=endpoint, + auth=auth, + timeout=timeout, + task_description=task_description, + context=context, + context_id=context_id, + task_id=task_id, + reference_task_ids=reference_task_ids, + metadata=metadata, + extensions=extensions, + conversation_history=conversation_history, + is_multiturn=is_multiturn, + turn_number=turn_number, + agent_branch=agent_branch, + agent_id=agent_id, + agent_role=agent_role, + response_model=response_model, + updates=updates, + transport_protocol=transport_protocol, + from_task=from_task, + from_agent=from_agent, + skill_id=skill_id, + ) + except Exception as e: + crewai_event_bus.emit( + agent_branch, + A2ADelegationCompletedEvent( + status="failed", + result=None, + error=str(e), + context_id=context_id, + is_multiturn=is_multiturn, + endpoint=endpoint, + metadata=metadata, + extensions=list(extensions.keys()) if extensions else None, + from_task=from_task, + from_agent=from_agent, + ), + ) + raise agent_card_data: dict[str, Any] = result.get("agent_card") or {} crewai_event_bus.emit( diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py index 005d14de8..e00974b41 100644 --- a/lib/crewai/src/crewai/agent/core.py +++ b/lib/crewai/src/crewai/agent/core.py @@ -14,7 +14,14 @@ from typing import ( ) from urllib.parse import urlparse -from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator +from pydantic import ( + BaseModel, + ConfigDict, + Field, + InstanceOf, + PrivateAttr, + model_validator, +) from typing_extensions import Self from crewai.agent.utils import ( @@ -46,6 +53,7 @@ from crewai.events.types.knowledge_events import ( ) from crewai.events.types.memory_events import ( MemoryRetrievalCompletedEvent, + MemoryRetrievalFailedEvent, MemoryRetrievalStartedEvent, ) from crewai.experimental.agent_executor import AgentExecutor @@ -85,17 +93,10 @@ from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.training_handler import CrewTrainingHandler -try: - from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig -except ImportError: - A2AClientConfig = Any - A2AConfig = Any - A2AServerConfig = Any - - if TYPE_CHECKING: from crewai_tools import CodeInterpreterTool + from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig from crewai.agents.agent_builder.base_agent import PlatformAppOrAction from crewai.task import Task from crewai.tools.base_tool import BaseTool @@ -141,6 +142,8 @@ class Agent(BaseAgent): mcps: List of MCP server references for tool integration. """ + model_config = ConfigDict() + _times_executed: int = PrivateAttr(default=0) _mcp_clients: list[Any] = PrivateAttr(default_factory=list) _last_messages: list[LLMMessage] = PrivateAttr(default_factory=list) @@ -354,30 +357,43 @@ class Agent(BaseAgent): ) start_time = time.time() + memory = "" - contextual_memory = ContextualMemory( - self.crew._short_term_memory, - self.crew._long_term_memory, - self.crew._entity_memory, - self.crew._external_memory, - agent=self, - task=task, - ) - memory = contextual_memory.build_context_for_task(task, context or "") - if memory.strip() != "": - task_prompt += self.i18n.slice("memory").format(memory=memory) + try: + contextual_memory = ContextualMemory( + self.crew._short_term_memory, + self.crew._long_term_memory, + self.crew._entity_memory, + self.crew._external_memory, + agent=self, + task=task, + ) + memory = contextual_memory.build_context_for_task(task, context or "") + if memory.strip() != "": + task_prompt += self.i18n.slice("memory").format(memory=memory) - crewai_event_bus.emit( - self, - event=MemoryRetrievalCompletedEvent( - task_id=str(task.id) if task else None, - memory_content=memory, - retrieval_time_ms=(time.time() - start_time) * 1000, - source_type="agent", - from_agent=self, - from_task=task, - ), - ) + crewai_event_bus.emit( + self, + event=MemoryRetrievalCompletedEvent( + task_id=str(task.id) if task else None, + memory_content=memory, + retrieval_time_ms=(time.time() - start_time) * 1000, + source_type="agent", + from_agent=self, + from_task=task, + ), + ) + except Exception as e: + crewai_event_bus.emit( + self, + event=MemoryRetrievalFailedEvent( + task_id=str(task.id) if task else None, + source_type="agent", + from_agent=self, + from_task=task, + error=str(e), + ), + ) knowledge_config = get_knowledge_config(self) task_prompt = handle_knowledge_retrieval( @@ -563,32 +579,45 @@ class Agent(BaseAgent): ) start_time = time.time() + memory = "" - contextual_memory = ContextualMemory( - self.crew._short_term_memory, - self.crew._long_term_memory, - self.crew._entity_memory, - self.crew._external_memory, - agent=self, - task=task, - ) - memory = await contextual_memory.abuild_context_for_task( - task, context or "" - ) - if memory.strip() != "": - task_prompt += self.i18n.slice("memory").format(memory=memory) + try: + contextual_memory = ContextualMemory( + self.crew._short_term_memory, + self.crew._long_term_memory, + self.crew._entity_memory, + self.crew._external_memory, + agent=self, + task=task, + ) + memory = await contextual_memory.abuild_context_for_task( + task, context or "" + ) + if memory.strip() != "": + task_prompt += self.i18n.slice("memory").format(memory=memory) - crewai_event_bus.emit( - self, - event=MemoryRetrievalCompletedEvent( - task_id=str(task.id) if task else None, - memory_content=memory, - retrieval_time_ms=(time.time() - start_time) * 1000, - source_type="agent", - from_agent=self, - from_task=task, - ), - ) + crewai_event_bus.emit( + self, + event=MemoryRetrievalCompletedEvent( + task_id=str(task.id) if task else None, + memory_content=memory, + retrieval_time_ms=(time.time() - start_time) * 1000, + source_type="agent", + from_agent=self, + from_task=task, + ), + ) + except Exception as e: + crewai_event_bus.emit( + self, + event=MemoryRetrievalFailedEvent( + task_id=str(task.id) if task else None, + source_type="agent", + from_agent=self, + from_task=task, + error=str(e), + ), + ) knowledge_config = get_knowledge_config(self) task_prompt = await ahandle_knowledge_retrieval( @@ -2039,3 +2068,22 @@ class Agent(BaseAgent): ), ) raise + + +# Rebuild Agent model to resolve A2A type forward references +try: + from crewai.a2a.config import ( + A2AClientConfig as _A2AClientConfig, + A2AConfig as _A2AConfig, + A2AServerConfig as _A2AServerConfig, + ) + + Agent.model_rebuild( + _types_namespace={ + "A2AConfig": _A2AConfig, + "A2AClientConfig": _A2AClientConfig, + "A2AServerConfig": _A2AServerConfig, + } + ) +except ImportError: + pass diff --git a/lib/crewai/src/crewai/crews/utils.py b/lib/crewai/src/crewai/crews/utils.py index 5694dcda1..4e78a30e5 100644 --- a/lib/crewai/src/crewai/crews/utils.py +++ b/lib/crewai/src/crewai/crews/utils.py @@ -189,9 +189,15 @@ def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any] Returns: The potentially modified inputs dictionary after before callbacks. """ + from crewai.events.base_events import reset_emission_counter from crewai.events.event_bus import crewai_event_bus + from crewai.events.event_context import get_current_parent_id, reset_last_event_id from crewai.events.types.crew_events import CrewKickoffStartedEvent + if get_current_parent_id() is None: + reset_emission_counter() + reset_last_event_id() + for before_callback in crew.before_kickoff_callbacks: if inputs is None: inputs = {} diff --git a/lib/crewai/src/crewai/events/__init__.py b/lib/crewai/src/crewai/events/__init__.py index efbb479cd..61c0ec380 100644 --- a/lib/crewai/src/crewai/events/__init__.py +++ b/lib/crewai/src/crewai/events/__init__.py @@ -75,6 +75,7 @@ from crewai.events.types.memory_events import ( MemoryQueryFailedEvent, MemoryQueryStartedEvent, MemoryRetrievalCompletedEvent, + MemoryRetrievalFailedEvent, MemoryRetrievalStartedEvent, MemorySaveCompletedEvent, MemorySaveFailedEvent, @@ -174,6 +175,7 @@ __all__ = [ "MemoryQueryFailedEvent", "MemoryQueryStartedEvent", "MemoryRetrievalCompletedEvent", + "MemoryRetrievalFailedEvent", "MemoryRetrievalStartedEvent", "MemorySaveCompletedEvent", "MemorySaveFailedEvent", diff --git a/lib/crewai/src/crewai/events/base_events.py b/lib/crewai/src/crewai/events/base_events.py index 4f4e80434..7148e5e1d 100644 --- a/lib/crewai/src/crewai/events/base_events.py +++ b/lib/crewai/src/crewai/events/base_events.py @@ -1,9 +1,46 @@ +from collections.abc import Iterator +import contextvars from datetime import datetime, timezone +import itertools from typing import Any +import uuid from pydantic import BaseModel, Field -from crewai.utilities.serialization import to_serializable +from crewai.utilities.serialization import Serializable, to_serializable + + +_emission_counter: contextvars.ContextVar[Iterator[int]] = contextvars.ContextVar( + "_emission_counter" +) + + +def _get_or_create_counter() -> Iterator[int]: + """Get the emission counter for the current context, creating if needed.""" + try: + return _emission_counter.get() + except LookupError: + counter: Iterator[int] = itertools.count(start=1) + _emission_counter.set(counter) + return counter + + +def get_next_emission_sequence() -> int: + """Get the next emission sequence number. + + Returns: + The next sequence number. + """ + return next(_get_or_create_counter()) + + +def reset_emission_counter() -> None: + """Reset the emission sequence counter to 1. + + Resets for the current context only. + """ + counter: Iterator[int] = itertools.count(start=1) + _emission_counter.set(counter) class BaseEvent(BaseModel): @@ -22,7 +59,13 @@ class BaseEvent(BaseModel): agent_id: str | None = None agent_role: str | None = None - def to_json(self, exclude: set[str] | None = None): + event_id: str = Field(default_factory=lambda: str(uuid.uuid4())) + parent_event_id: str | None = None + previous_event_id: str | None = None + triggered_by_event_id: str | None = None + emission_sequence: int | None = None + + def to_json(self, exclude: set[str] | None = None) -> Serializable: """ Converts the event to a JSON-serializable dictionary. @@ -34,13 +77,13 @@ class BaseEvent(BaseModel): """ return to_serializable(self, exclude=exclude) - def _set_task_params(self, data: dict[str, Any]): + def _set_task_params(self, data: dict[str, Any]) -> None: if "from_task" in data and (task := data["from_task"]): self.task_id = str(task.id) self.task_name = task.name or task.description self.from_task = None - def _set_agent_params(self, data: dict[str, Any]): + def _set_agent_params(self, data: dict[str, Any]) -> None: task = data.get("from_task", None) agent = task.agent if task else data.get("from_agent", None) diff --git a/lib/crewai/src/crewai/events/event_bus.py b/lib/crewai/src/crewai/events/event_bus.py index 9fabace08..5c4bec58f 100644 --- a/lib/crewai/src/crewai/events/event_bus.py +++ b/lib/crewai/src/crewai/events/event_bus.py @@ -16,8 +16,22 @@ from typing import Any, Final, ParamSpec, TypeVar from typing_extensions import Self -from crewai.events.base_events import BaseEvent +from crewai.events.base_events import BaseEvent, get_next_emission_sequence from crewai.events.depends import Depends +from crewai.events.event_context import ( + SCOPE_ENDING_EVENTS, + SCOPE_STARTING_EVENTS, + VALID_EVENT_PAIRS, + get_current_parent_id, + get_enclosing_parent_id, + get_last_event_id, + get_triggering_event_id, + handle_empty_pop, + handle_mismatch, + pop_event_scope, + push_event_scope, + set_last_event_id, +) from crewai.events.handler_graph import build_execution_plan from crewai.events.types.event_bus_types import ( AsyncHandler, @@ -69,6 +83,8 @@ class CrewAIEventsBus: _execution_plan_cache: dict[type[BaseEvent], ExecutionPlan] _console: ConsoleFormatter _shutting_down: bool + _pending_futures: set[Future[Any]] + _futures_lock: threading.Lock def __new__(cls) -> Self: """Create or return the singleton instance. @@ -91,6 +107,8 @@ class CrewAIEventsBus: """ self._shutting_down = False self._rwlock = RWLock() + self._pending_futures: set[Future[Any]] = set() + self._futures_lock = threading.Lock() self._sync_handlers: dict[type[BaseEvent], SyncHandlerSet] = {} self._async_handlers: dict[type[BaseEvent], AsyncHandlerSet] = {} self._handler_dependencies: dict[ @@ -111,6 +129,25 @@ class CrewAIEventsBus: ) self._loop_thread.start() + def _track_future(self, future: Future[Any]) -> Future[Any]: + """Track a future and set up automatic cleanup when it completes. + + Args: + future: The future to track + + Returns: + The same future for chaining + """ + with self._futures_lock: + self._pending_futures.add(future) + + def _cleanup(f: Future[Any]) -> None: + with self._futures_lock: + self._pending_futures.discard(f) + + future.add_done_callback(_cleanup) + return future + def _run_loop(self) -> None: """Run the background async event loop.""" asyncio.set_event_loop(self._loop) @@ -326,6 +363,28 @@ class CrewAIEventsBus: ... await asyncio.wrap_future(future) # In async test ... # or future.result(timeout=5.0) in sync code """ + event.previous_event_id = get_last_event_id() + event.triggered_by_event_id = get_triggering_event_id() + event.emission_sequence = get_next_emission_sequence() + if event.parent_event_id is None: + event_type_name = event.type + if event_type_name in SCOPE_ENDING_EVENTS: + event.parent_event_id = get_enclosing_parent_id() + popped = pop_event_scope() + if popped is None: + handle_empty_pop(event_type_name) + else: + _, popped_type = popped + expected_start = VALID_EVENT_PAIRS.get(event_type_name) + if expected_start and popped_type and popped_type != expected_start: + handle_mismatch(event_type_name, popped_type, expected_start) + elif event_type_name in SCOPE_STARTING_EVENTS: + event.parent_event_id = get_current_parent_id() + push_event_scope(event.event_id, event_type_name) + else: + event.parent_event_id = get_current_parent_id() + + set_last_event_id(event.event_id) event_type = type(event) with self._rwlock.r_locked(): @@ -339,9 +398,11 @@ class CrewAIEventsBus: async_handlers = self._async_handlers.get(event_type, frozenset()) if has_dependencies: - return asyncio.run_coroutine_threadsafe( - self._emit_with_dependencies(source, event), - self._loop, + return self._track_future( + asyncio.run_coroutine_threadsafe( + self._emit_with_dependencies(source, event), + self._loop, + ) ) if sync_handlers: @@ -353,16 +414,53 @@ class CrewAIEventsBus: ctx.run, self._call_handlers, source, event, sync_handlers ) if not async_handlers: - return sync_future + return self._track_future(sync_future) if async_handlers: - return asyncio.run_coroutine_threadsafe( - self._acall_handlers(source, event, async_handlers), - self._loop, + return self._track_future( + asyncio.run_coroutine_threadsafe( + self._acall_handlers(source, event, async_handlers), + self._loop, + ) ) return None + def flush(self, timeout: float | None = 30.0) -> bool: + """Block until all pending event handlers complete. + + This method waits for all futures from previously emitted events to + finish executing. Useful at the end of operations (like kickoff) to + ensure all event handlers have completed before returning. + + Args: + timeout: Maximum time in seconds to wait for handlers to complete. + Defaults to 30 seconds. Pass None to wait indefinitely. + + Returns: + True if all handlers completed, False if timeout occurred. + """ + with self._futures_lock: + futures_to_wait = list(self._pending_futures) + + if not futures_to_wait: + return True + + from concurrent.futures import wait as wait_futures + + done, not_done = wait_futures(futures_to_wait, timeout=timeout) + + # Check for exceptions in completed futures + errors = [ + future.exception() for future in done if future.exception() is not None + ] + for error in errors: + self._console.print( + f"[CrewAIEventsBus] Handler exception during flush: {error}" + ) + + return len(not_done) == 0 + async def aemit(self, source: Any, event: BaseEvent) -> None: """Asynchronously emit an event to registered async handlers. @@ -464,6 +562,9 @@ class CrewAIEventsBus: wait: If True, wait for all pending tasks to complete before stopping. If False, cancel all pending tasks immediately. """ + if wait: + self.flush() + with self._rwlock.w_locked(): self._shutting_down = True loop = getattr(self, "_loop", None) diff --git a/lib/crewai/src/crewai/events/event_context.py b/lib/crewai/src/crewai/events/event_context.py new file mode 100644 index 000000000..672daf786 --- /dev/null +++ b/lib/crewai/src/crewai/events/event_context.py @@ -0,0 +1,334 @@ +"""Event context management for parent-child relationship tracking.""" + +from collections.abc import Generator +from contextlib import contextmanager +import contextvars +from dataclasses import dataclass +from enum import Enum + +from crewai.events.utils.console_formatter import ConsoleFormatter + + +class MismatchBehavior(Enum): + """Behavior when event pairs don't match.""" + + WARN = "warn" + RAISE = "raise" + SILENT = "silent" + + +@dataclass +class EventContextConfig: + """Configuration for event context behavior.""" + + max_stack_depth: int = 100 + mismatch_behavior: MismatchBehavior = MismatchBehavior.WARN + empty_pop_behavior: MismatchBehavior = MismatchBehavior.WARN + + +class StackDepthExceededError(Exception): + """Raised when stack depth limit is exceeded.""" + + +class EventPairingError(Exception): + """Raised when event pairs don't match.""" + + +class EmptyStackError(Exception): + """Raised when popping from empty stack.""" + + +_event_id_stack: contextvars.ContextVar[tuple[tuple[str, str], ...]] = ( + contextvars.ContextVar("_event_id_stack", default=()) +) + +_event_context_config: contextvars.ContextVar[EventContextConfig | None] = ( + contextvars.ContextVar("_event_context_config", default=None) +) + +_last_event_id: contextvars.ContextVar[str | None] = contextvars.ContextVar( + "_last_event_id", default=None +) + +_triggering_event_id: contextvars.ContextVar[str | None] = contextvars.ContextVar( + "_triggering_event_id", default=None +) + +_default_config = EventContextConfig() + +_console = ConsoleFormatter() + + +def get_current_parent_id() -> str | None: + """Get the current parent event ID from the stack.""" + stack = _event_id_stack.get() + return stack[-1][0] if stack else None + + +def get_enclosing_parent_id() -> str | None: + """Get the parent of the current scope (stack[-2]).""" + stack = _event_id_stack.get() + return stack[-2][0] if len(stack) >= 2 else None + + +def get_last_event_id() -> str | None: + """Get the ID of the last emitted event for linear chain tracking. + + Returns: + The event_id of the previously emitted event, or None if no event yet. + """ + return _last_event_id.get() + + +def reset_last_event_id() -> None: + """Reset the last event ID to None. + + Should be called at the start of a new flow or when resetting event state. + """ + _last_event_id.set(None) + + +def set_last_event_id(event_id: str) -> None: + """Set the ID of the last emitted event. + + Args: + event_id: The event_id to set as the last emitted event. + """ + _last_event_id.set(event_id) + + +def get_triggering_event_id() -> str | None: + """Get the ID of the event that triggered the current execution. + + Returns: + The event_id of the triggering event, or None if not in a triggered context. + """ + return _triggering_event_id.get() + + +def set_triggering_event_id(event_id: str | None) -> None: + """Set the ID of the triggering event for causal chain tracking. + + Args: + event_id: The event_id that triggered the current execution, or None. + """ + _triggering_event_id.set(event_id) + + +@contextmanager +def triggered_by_scope(event_id: str) -> Generator[None, None, None]: + """Context manager to set the triggering event ID for causal chain tracking. + + All events emitted within this context will have their triggered_by_event_id + set to the provided event_id. + + Args: + event_id: The event_id that triggered the current execution. + """ + previous = _triggering_event_id.get() + _triggering_event_id.set(event_id) + try: + yield + finally: + _triggering_event_id.set(previous) + + +def push_event_scope(event_id: str, event_type: str = "") -> None: + """Push an event ID and type onto the scope stack.""" + config = _event_context_config.get() or _default_config + stack = _event_id_stack.get() + + if 0 < config.max_stack_depth <= len(stack): + raise StackDepthExceededError( + f"Event stack depth limit ({config.max_stack_depth}) exceeded. " + f"This usually indicates missing ending events." + ) + + _event_id_stack.set((*stack, (event_id, event_type))) + + +def pop_event_scope() -> tuple[str, str] | None: + """Pop an event entry from the scope stack.""" + stack = _event_id_stack.get() + if not stack: + return None + _event_id_stack.set(stack[:-1]) + return stack[-1] + + +def handle_empty_pop(event_type_name: str) -> None: + """Handle a pop attempt on an empty stack.""" + config = _event_context_config.get() or _default_config + msg = ( + f"Ending event '{event_type_name}' emitted with empty scope stack. " + "Missing starting event?" + ) + + if config.empty_pop_behavior == MismatchBehavior.RAISE: + raise EmptyStackError(msg) + if config.empty_pop_behavior == MismatchBehavior.WARN: + _console.print(f"[CrewAIEventsBus] Warning: {msg}") + + +def handle_mismatch( + event_type_name: str, + popped_type: str, + expected_start: str, +) -> None: + """Handle a mismatched event pair.""" + config = _event_context_config.get() or _default_config + msg = ( + f"Event pairing mismatch. '{event_type_name}' closed '{popped_type}' " + f"(expected '{expected_start}')" + ) + + if config.mismatch_behavior == MismatchBehavior.RAISE: + raise EventPairingError(msg) + if config.mismatch_behavior == MismatchBehavior.WARN: + _console.print(f"[CrewAIEventsBus] Warning: {msg}") + + +@contextmanager +def event_scope(event_id: str, event_type: str = "") -> Generator[None, None, None]: + """Context manager to establish a parent event scope.""" + stack = _event_id_stack.get() + already_on_stack = any(entry[0] == event_id for entry in stack) + if not already_on_stack: + push_event_scope(event_id, event_type) + try: + yield + finally: + if not already_on_stack: + pop_event_scope() + + +SCOPE_STARTING_EVENTS: frozenset[str] = frozenset( + { + "flow_started", + "method_execution_started", + "crew_kickoff_started", + "crew_train_started", + "crew_test_started", + "agent_execution_started", + "agent_evaluation_started", + "lite_agent_execution_started", + "task_started", + "llm_call_started", + "llm_guardrail_started", + "tool_usage_started", + "mcp_connection_started", + "mcp_tool_execution_started", + "memory_retrieval_started", + "memory_save_started", + "memory_query_started", + "knowledge_query_started", + "knowledge_search_query_started", + "a2a_delegation_started", + "a2a_conversation_started", + "a2a_server_task_started", + "a2a_parallel_delegation_started", + "agent_reasoning_started", + } +) + +SCOPE_ENDING_EVENTS: frozenset[str] = frozenset( + { + "flow_finished", + "flow_paused", + "method_execution_finished", + "method_execution_failed", + "method_execution_paused", + "crew_kickoff_completed", + "crew_kickoff_failed", + "crew_train_completed", + "crew_train_failed", + "crew_test_completed", + "crew_test_failed", + "agent_execution_completed", + "agent_execution_error", + "agent_evaluation_completed", + "agent_evaluation_failed", + "lite_agent_execution_completed", + "lite_agent_execution_error", + "task_completed", + "task_failed", + "llm_call_completed", + "llm_call_failed", + "llm_guardrail_completed", + "llm_guardrail_failed", + "tool_usage_finished", + "tool_usage_error", + "mcp_connection_completed", + "mcp_connection_failed", + "mcp_tool_execution_completed", + "mcp_tool_execution_failed", + "memory_retrieval_completed", + "memory_retrieval_failed", + "memory_save_completed", + "memory_save_failed", + "memory_query_completed", + "memory_query_failed", + "knowledge_query_completed", + "knowledge_query_failed", + "knowledge_search_query_completed", + "knowledge_search_query_failed", + "a2a_delegation_completed", + "a2a_conversation_completed", + "a2a_server_task_completed", + "a2a_server_task_canceled", + "a2a_server_task_failed", + "a2a_parallel_delegation_completed", + "agent_reasoning_completed", + "agent_reasoning_failed", + } +) + +VALID_EVENT_PAIRS: dict[str, str] = { + "flow_finished": "flow_started", + "flow_paused": "flow_started", + "method_execution_finished": "method_execution_started", + "method_execution_failed": "method_execution_started", + "method_execution_paused": "method_execution_started", + "crew_kickoff_completed": "crew_kickoff_started", + "crew_kickoff_failed": "crew_kickoff_started", + "crew_train_completed": "crew_train_started", + "crew_train_failed": "crew_train_started", + "crew_test_completed": "crew_test_started", + "crew_test_failed": "crew_test_started", + "agent_execution_completed": "agent_execution_started", + "agent_execution_error": "agent_execution_started", + "agent_evaluation_completed": "agent_evaluation_started", + "agent_evaluation_failed": "agent_evaluation_started", + "lite_agent_execution_completed": "lite_agent_execution_started", + "lite_agent_execution_error": "lite_agent_execution_started", + "task_completed": "task_started", + "task_failed": "task_started", + "llm_call_completed": "llm_call_started", + "llm_call_failed": "llm_call_started", + "llm_guardrail_completed": "llm_guardrail_started", + "llm_guardrail_failed": "llm_guardrail_started", + "tool_usage_finished": "tool_usage_started", + "tool_usage_error": "tool_usage_started", + "mcp_connection_completed": "mcp_connection_started", + "mcp_connection_failed": "mcp_connection_started", + "mcp_tool_execution_completed": "mcp_tool_execution_started", + "mcp_tool_execution_failed": "mcp_tool_execution_started", + "memory_retrieval_completed": "memory_retrieval_started", + "memory_retrieval_failed": "memory_retrieval_started", + "memory_save_completed": "memory_save_started", + "memory_save_failed": "memory_save_started", + "memory_query_completed": "memory_query_started", + "memory_query_failed": "memory_query_started", + "knowledge_query_completed": "knowledge_query_started", + "knowledge_query_failed": "knowledge_query_started", + "knowledge_search_query_completed": "knowledge_search_query_started", + "knowledge_search_query_failed": "knowledge_search_query_started", + "a2a_delegation_completed": "a2a_delegation_started", + "a2a_conversation_completed": "a2a_conversation_started", + "a2a_server_task_completed": "a2a_server_task_started", + "a2a_server_task_canceled": "a2a_server_task_started", + "a2a_server_task_failed": "a2a_server_task_started", + "a2a_parallel_delegation_completed": "a2a_parallel_delegation_started", + "agent_reasoning_completed": "agent_reasoning_started", + "agent_reasoning_failed": "agent_reasoning_started", +} diff --git a/lib/crewai/src/crewai/events/event_types.py b/lib/crewai/src/crewai/events/event_types.py index 78aa11fe0..5fca4bd7d 100644 --- a/lib/crewai/src/crewai/events/event_types.py +++ b/lib/crewai/src/crewai/events/event_types.py @@ -79,6 +79,7 @@ from crewai.events.types.memory_events import ( MemoryQueryFailedEvent, MemoryQueryStartedEvent, MemoryRetrievalCompletedEvent, + MemoryRetrievalFailedEvent, MemoryRetrievalStartedEvent, MemorySaveCompletedEvent, MemorySaveFailedEvent, @@ -173,6 +174,7 @@ EventTypes = ( | MemoryQueryFailedEvent | MemoryRetrievalStartedEvent | MemoryRetrievalCompletedEvent + | MemoryRetrievalFailedEvent | MCPConnectionStartedEvent | MCPConnectionCompletedEvent | MCPConnectionFailedEvent diff --git a/lib/crewai/src/crewai/events/listeners/tracing/trace_batch_manager.py b/lib/crewai/src/crewai/events/listeners/tracing/trace_batch_manager.py index 3c5bf0aa2..6c45f63ef 100644 --- a/lib/crewai/src/crewai/events/listeners/tracing/trace_batch_manager.py +++ b/lib/crewai/src/crewai/events/listeners/tracing/trace_batch_manager.py @@ -267,9 +267,12 @@ class TraceBatchManager: sorted_events = sorted( self.event_buffer, - key=lambda e: e.timestamp - if hasattr(e, "timestamp") and e.timestamp - else "", + key=lambda e: ( + e.emission_sequence + if e.emission_sequence is not None + else float("inf"), + e.timestamp if hasattr(e, "timestamp") and e.timestamp else "", + ), ) self.current_batch.events = sorted_events diff --git a/lib/crewai/src/crewai/events/listeners/tracing/trace_listener.py b/lib/crewai/src/crewai/events/listeners/tracing/trace_listener.py index a4d4cbd31..ee337d7fd 100644 --- a/lib/crewai/src/crewai/events/listeners/tracing/trace_listener.py +++ b/lib/crewai/src/crewai/events/listeners/tracing/trace_listener.py @@ -9,6 +9,7 @@ from typing_extensions import Self from crewai.cli.authentication.token import AuthError, get_auth_token from crewai.cli.version import get_crewai_version from crewai.events.base_event_listener import BaseEventListener +from crewai.events.base_events import BaseEvent from crewai.events.event_bus import CrewAIEventsBus from crewai.events.listeners.tracing.first_time_trace_handler import ( FirstTimeTraceHandler, @@ -616,7 +617,7 @@ class TraceCollectionListener(BaseEventListener): if self.batch_manager.is_batch_initialized(): self.batch_manager.finalize_batch() - def _initialize_crew_batch(self, source: Any, event: Any) -> None: + def _initialize_crew_batch(self, source: Any, event: BaseEvent) -> None: """Initialize trace batch. Args: @@ -626,7 +627,7 @@ class TraceCollectionListener(BaseEventListener): user_context = self._get_user_context() execution_metadata = { "crew_name": getattr(event, "crew_name", "Unknown Crew"), - "execution_start": event.timestamp if hasattr(event, "timestamp") else None, + "execution_start": event.timestamp, "crewai_version": get_crewai_version(), } @@ -635,7 +636,7 @@ class TraceCollectionListener(BaseEventListener): self._initialize_batch(user_context, execution_metadata) - def _initialize_flow_batch(self, source: Any, event: Any) -> None: + def _initialize_flow_batch(self, source: Any, event: BaseEvent) -> None: """Initialize trace batch for Flow execution. Args: @@ -645,7 +646,7 @@ class TraceCollectionListener(BaseEventListener): user_context = self._get_user_context() execution_metadata = { "flow_name": getattr(event, "flow_name", "Unknown Flow"), - "execution_start": event.timestamp if hasattr(event, "timestamp") else None, + "execution_start": event.timestamp, "crewai_version": get_crewai_version(), "execution_type": "flow", } @@ -714,18 +715,18 @@ class TraceCollectionListener(BaseEventListener): self.batch_manager.end_event_processing() def _create_trace_event( - self, event_type: str, source: Any, event: Any + self, event_type: str, source: Any, event: BaseEvent ) -> TraceEvent: - """Create a trace event""" - if hasattr(event, "timestamp") and event.timestamp: - trace_event = TraceEvent( - type=event_type, - timestamp=event.timestamp.isoformat(), - ) - else: - trace_event = TraceEvent( - type=event_type, - ) + """Create a trace event with ordering information.""" + trace_event = TraceEvent( + type=event_type, + timestamp=event.timestamp.isoformat() if event.timestamp else "", + event_id=event.event_id, + emission_sequence=event.emission_sequence, + parent_event_id=event.parent_event_id, + previous_event_id=event.previous_event_id, + triggered_by_event_id=event.triggered_by_event_id, + ) trace_event.event_data = self._build_event_data(event_type, event, source) @@ -778,10 +779,8 @@ class TraceCollectionListener(BaseEventListener): } if event_type == "llm_call_started": event_data = safe_serialize_to_dict(event) - event_data["task_name"] = ( - event.task_name or event.task_description - if hasattr(event, "task_name") and event.task_name - else None + event_data["task_name"] = event.task_name or getattr( + event, "task_description", None ) return event_data if event_type == "llm_call_completed": diff --git a/lib/crewai/src/crewai/events/listeners/tracing/types.py b/lib/crewai/src/crewai/events/listeners/tracing/types.py index cdc2b6c26..3468bd3c4 100644 --- a/lib/crewai/src/crewai/events/listeners/tracing/types.py +++ b/lib/crewai/src/crewai/events/listeners/tracing/types.py @@ -15,5 +15,10 @@ class TraceEvent: type: str = "" event_data: dict[str, Any] = field(default_factory=dict) + emission_sequence: int | None = None + parent_event_id: str | None = None + previous_event_id: str | None = None + triggered_by_event_id: str | None = None + def to_dict(self) -> dict[str, Any]: return asdict(self) diff --git a/lib/crewai/src/crewai/events/types/memory_events.py b/lib/crewai/src/crewai/events/types/memory_events.py index 7e954427a..0fd57a352 100644 --- a/lib/crewai/src/crewai/events/types/memory_events.py +++ b/lib/crewai/src/crewai/events/types/memory_events.py @@ -14,7 +14,7 @@ class MemoryBaseEvent(BaseEvent): agent_role: str | None = None agent_id: str | None = None - def __init__(self, **data): + def __init__(self, **data: Any) -> None: super().__init__(**data) self._set_agent_params(data) self._set_task_params(data) @@ -93,3 +93,11 @@ class MemoryRetrievalCompletedEvent(MemoryBaseEvent): task_id: str | None = None memory_content: str retrieval_time_ms: float + + +class MemoryRetrievalFailedEvent(MemoryBaseEvent): + """Event emitted when memory retrieval for a task prompt fails.""" + + type: str = "memory_retrieval_failed" + task_id: str | None = None + error: str diff --git a/lib/crewai/src/crewai/flow/flow.py b/lib/crewai/src/crewai/flow/flow.py index a3e5f69ac..36146d009 100644 --- a/lib/crewai/src/crewai/flow/flow.py +++ b/lib/crewai/src/crewai/flow/flow.py @@ -31,7 +31,13 @@ from pydantic import BaseModel, Field, ValidationError from rich.console import Console from rich.panel import Panel +from crewai.events.base_events import reset_emission_counter from crewai.events.event_bus import crewai_event_bus +from crewai.events.event_context import ( + get_current_parent_id, + reset_last_event_id, + triggered_by_scope, +) from crewai.events.listeners.tracing.trace_listener import ( TraceCollectionListener, ) @@ -753,6 +759,7 @@ class Flow(Generic[T], metaclass=FlowMeta): racing_listeners: frozenset[FlowMethodName], other_listeners: list[FlowMethodName], result: Any, + triggering_event_id: str | None = None, ) -> None: """Execute racing listeners with first-wins semantics. @@ -764,10 +771,11 @@ class Flow(Generic[T], metaclass=FlowMeta): racing_listeners: Set of listener names that race for an OR condition. other_listeners: Other listeners to execute in parallel (not racing). result: The result from the triggering method. + triggering_event_id: The event_id of the event that triggered these listeners. """ racing_tasks = [ asyncio.create_task( - self._execute_single_listener(name, result), + self._execute_single_listener(name, result, triggering_event_id), name=str(name), ) for name in racing_listeners @@ -775,7 +783,7 @@ class Flow(Generic[T], metaclass=FlowMeta): other_tasks = [ asyncio.create_task( - self._execute_single_listener(name, result), + self._execute_single_listener(name, result, triggering_event_id), name=str(name), ) for name in other_listeners @@ -1557,6 +1565,10 @@ class Flow(Generic[T], metaclass=FlowMeta): if filtered_inputs: self._initialize_state(filtered_inputs) + if get_current_parent_id() is None: + reset_emission_counter() + reset_last_event_id() + # Emit FlowStartedEvent and log the start of the flow. if not self.suppress_flow_events: future = crewai_event_bus.emit( @@ -1736,12 +1748,14 @@ class Flow(Generic[T], metaclass=FlowMeta): method = self._methods[start_method_name] enhanced_method = self._inject_trigger_payload_for_start_method(method) - result = await self._execute_method(start_method_name, enhanced_method) + result, finished_event_id = await self._execute_method( + start_method_name, enhanced_method + ) # If start method is a router, use its result as an additional trigger if start_method_name in self._routers and result is not None: # Execute listeners for the start method name first - await self._execute_listeners(start_method_name, result) + await self._execute_listeners(start_method_name, result, finished_event_id) # Then execute listeners for the router result (e.g., "approved") router_result_trigger = FlowMethodName(str(result)) listeners_for_result = self._find_triggered_methods( @@ -1765,16 +1779,21 @@ class Flow(Generic[T], metaclass=FlowMeta): if name not in racing_members ] await self._execute_racing_listeners( - racing_members, other_listeners, listener_result + racing_members, + other_listeners, + listener_result, + finished_event_id, ) else: tasks = [ - self._execute_single_listener(listener_name, listener_result) + self._execute_single_listener( + listener_name, listener_result, finished_event_id + ) for listener_name in listeners_for_result ] await asyncio.gather(*tasks) else: - await self._execute_listeners(start_method_name, result) + await self._execute_listeners(start_method_name, result, finished_event_id) def _inject_trigger_payload_for_start_method( self, original_method: Callable[..., Any] @@ -1818,7 +1837,14 @@ class Flow(Generic[T], metaclass=FlowMeta): method: Callable[..., Any], *args: Any, **kwargs: Any, - ) -> Any: + ) -> tuple[Any, str | None]: + """Execute a method and emit events. + + Returns: + A tuple of (result, finished_event_id) where finished_event_id is + the event_id of the MethodExecutionFinishedEvent, or None if events + are suppressed. + """ try: dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | ( kwargs or {} @@ -1859,21 +1885,21 @@ class Flow(Generic[T], metaclass=FlowMeta): self._completed_methods.add(method_name) + finished_event_id: str | None = None if not self.suppress_flow_events: - future = crewai_event_bus.emit( - self, - MethodExecutionFinishedEvent( - type="method_execution_finished", - method_name=method_name, - flow_name=self.name or self.__class__.__name__, - state=self._copy_and_serialize_state(), - result=result, - ), + finished_event = MethodExecutionFinishedEvent( + type="method_execution_finished", + method_name=method_name, + flow_name=self.name or self.__class__.__name__, + state=self._copy_and_serialize_state(), + result=result, ) + finished_event_id = finished_event.event_id + future = crewai_event_bus.emit(self, finished_event) if future: self._event_futures.append(future) - return result + return result, finished_event_id except Exception as e: # Check if this is a HumanFeedbackPending exception (paused, not failed) from crewai.flow.async_feedback.types import HumanFeedbackPending @@ -1927,7 +1953,10 @@ class Flow(Generic[T], metaclass=FlowMeta): return state_copy async def _execute_listeners( - self, trigger_method: FlowMethodName, result: Any + self, + trigger_method: FlowMethodName, + result: Any, + triggering_event_id: str | None = None, ) -> None: """Executes all listeners and routers triggered by a method completion. @@ -1938,6 +1967,8 @@ class Flow(Generic[T], metaclass=FlowMeta): Args: trigger_method: The name of the method that triggered these listeners. result: The result from the triggering method, passed to listeners that accept parameters. + triggering_event_id: The event_id of the MethodExecutionFinishedEvent that + triggered these listeners, used for causal chain tracking. Note: - Routers are executed sequentially to maintain flow control @@ -1952,6 +1983,7 @@ class Flow(Generic[T], metaclass=FlowMeta): ] = {} # Map outcome -> HumanFeedbackResult current_trigger = trigger_method current_result = result # Track the result to pass to each router + current_triggering_event_id = triggering_event_id while True: routers_triggered = self._find_triggered_methods( @@ -1965,7 +1997,9 @@ class Flow(Generic[T], metaclass=FlowMeta): router_input = router_result_to_feedback.get( str(current_trigger), current_result ) - await self._execute_single_listener(router_name, router_input) + current_triggering_event_id = await self._execute_single_listener( + router_name, router_input, current_triggering_event_id + ) # After executing router, the router's result is the path router_result = ( self._method_outputs[-1] if self._method_outputs else None @@ -2008,12 +2042,15 @@ class Flow(Generic[T], metaclass=FlowMeta): if name not in racing_members ] await self._execute_racing_listeners( - racing_members, other_listeners, listener_result + racing_members, + other_listeners, + listener_result, + triggering_event_id, ) else: tasks = [ self._execute_single_listener( - listener_name, listener_result + listener_name, listener_result, triggering_event_id ) for listener_name in listeners_triggered ] @@ -2192,8 +2229,11 @@ class Flow(Generic[T], metaclass=FlowMeta): return triggered async def _execute_single_listener( - self, listener_name: FlowMethodName, result: Any - ) -> None: + self, + listener_name: FlowMethodName, + result: Any, + triggering_event_id: str | None = None, + ) -> str | None: """Executes a single listener method with proper event handling. This internal method manages the execution of an individual listener, @@ -2202,6 +2242,12 @@ class Flow(Generic[T], metaclass=FlowMeta): Args: listener_name: The name of the listener method to execute. result: The result from the triggering method, which may be passed to the listener if it accepts parameters. + triggering_event_id: The event_id of the event that triggered this listener, + used for causal chain tracking. + + Returns: + The event_id of the MethodExecutionFinishedEvent emitted by this listener, + or None if events are suppressed. Note: - Inspects method signature to determine if it accepts the trigger result @@ -2227,7 +2273,7 @@ class Flow(Generic[T], metaclass=FlowMeta): ): # This conditional start was executed, continue its chain await self._execute_start_method(start_method_name) - return + return None # For cyclic flows, clear from completed to allow re-execution self._completed_methods.discard(listener_name) # Also clear from fired OR listeners for cyclic flows @@ -2240,15 +2286,30 @@ class Flow(Generic[T], metaclass=FlowMeta): params = list(sig.parameters.values()) method_params = [p for p in params if p.name != "self"] - if method_params: - listener_result = await self._execute_method( - listener_name, method, result - ) + if triggering_event_id: + with triggered_by_scope(triggering_event_id): + if method_params: + listener_result, finished_event_id = await self._execute_method( + listener_name, method, result + ) + else: + listener_result, finished_event_id = await self._execute_method( + listener_name, method + ) else: - listener_result = await self._execute_method(listener_name, method) + if method_params: + listener_result, finished_event_id = await self._execute_method( + listener_name, method, result + ) + else: + listener_result, finished_event_id = await self._execute_method( + listener_name, method + ) # Execute listeners (and possibly routers) of this listener - await self._execute_listeners(listener_name, listener_result) + await self._execute_listeners( + listener_name, listener_result, finished_event_id + ) # If this listener is also a router (e.g., has @human_feedback with emit), # we need to trigger listeners for the router result as well @@ -2275,15 +2336,22 @@ class Flow(Generic[T], metaclass=FlowMeta): if name not in racing_members ] await self._execute_racing_listeners( - racing_members, other_listeners, feedback_result + racing_members, + other_listeners, + feedback_result, + finished_event_id, ) else: tasks = [ - self._execute_single_listener(name, feedback_result) + self._execute_single_listener( + name, feedback_result, finished_event_id + ) for name in listeners_for_result ] await asyncio.gather(*tasks) + return finished_event_id + except Exception as e: # Don't log HumanFeedbackPending as an error - it's expected control flow from crewai.flow.async_feedback.types import HumanFeedbackPending diff --git a/lib/crewai/src/crewai/tools/tool_usage.py b/lib/crewai/src/crewai/tools/tool_usage.py index ab3d0fc25..780cce32d 100644 --- a/lib/crewai/src/crewai/tools/tool_usage.py +++ b/lib/crewai/src/crewai/tools/tool_usage.py @@ -241,6 +241,9 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() + started_at = time.time() + started_event_emitted = False + if self.agent: event_data = { "agent_key": self.agent.key, @@ -258,151 +261,162 @@ class ToolUsage: event_data["task_name"] = self.task.name or self.task.description event_data["task_id"] = str(self.task.id) crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data)) + started_event_emitted = True - started_at = time.time() from_cache = False result = None # type: ignore + should_retry = False + available_tool = None - if self.tools_handler and self.tools_handler.cache: - input_str = "" - if calling.arguments: - if isinstance(calling.arguments, dict): - input_str = json.dumps(calling.arguments) - else: - input_str = str(calling.arguments) + try: + if self.tools_handler and self.tools_handler.cache: + input_str = "" + if calling.arguments: + if isinstance(calling.arguments, dict): + input_str = json.dumps(calling.arguments) + else: + input_str = str(calling.arguments) - result = self.tools_handler.cache.read( - tool=calling.tool_name, input=input_str - ) # type: ignore - from_cache = result is not None + result = self.tools_handler.cache.read( + tool=calling.tool_name, input=input_str + ) # type: ignore + from_cache = result is not None - available_tool = next( - ( - available_tool - for available_tool in self.tools - if available_tool.name == tool.name - ), - None, - ) + available_tool = next( + ( + available_tool + for available_tool in self.tools + if available_tool.name == tool.name + ), + None, + ) - usage_limit_error = self._check_usage_limit(available_tool, tool.name) - if usage_limit_error: - try: + usage_limit_error = self._check_usage_limit(available_tool, tool.name) + if usage_limit_error: result = usage_limit_error self._telemetry.tool_usage_error(llm=self.function_calling_llm) - return self._format_result(result=result) - except Exception: - if self.task: - self.task.increment_tools_errors() - - if result is None: - try: - if calling.tool_name in [ - "Delegate work to coworker", - "Ask question to coworker", - ]: - coworker = ( - calling.arguments.get("coworker") if calling.arguments else None - ) - if self.task: - self.task.increment_delegations(coworker) - - if calling.arguments: - try: - acceptable_args = tool.args_schema.model_json_schema()[ - "properties" - ].keys() - arguments = { - k: v - for k, v in calling.arguments.items() - if k in acceptable_args - } - arguments = self._add_fingerprint_metadata(arguments) - result = await tool.ainvoke(input=arguments) - except Exception: - arguments = calling.arguments - arguments = self._add_fingerprint_metadata(arguments) - result = await tool.ainvoke(input=arguments) - else: - arguments = self._add_fingerprint_metadata({}) - result = await tool.ainvoke(input=arguments) - except Exception as e: - self.on_tool_error(tool=tool, tool_calling=calling, e=e) - self._run_attempts += 1 - if self._run_attempts > self._max_parsing_attempts: - self._telemetry.tool_usage_error(llm=self.function_calling_llm) - error_message = self._i18n.errors("tool_usage_exception").format( - error=e, tool=tool.name, tool_inputs=tool.description - ) - error = ToolUsageError( - f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}" - ).message - if self.task: - self.task.increment_tools_errors() - if self.agent and self.agent.verbose: - self._printer.print( - content=f"\n\n{error_message}\n", color="red" + result = self._format_result(result=result) + # Don't return early - fall through to finally block + elif result is None: + try: + if calling.tool_name in [ + "Delegate work to coworker", + "Ask question to coworker", + ]: + coworker = ( + calling.arguments.get("coworker") + if calling.arguments + else None ) - return error + if self.task: + self.task.increment_delegations(coworker) - if self.task: - self.task.increment_tools_errors() - return await self.ause(calling=calling, tool_string=tool_string) + if calling.arguments: + try: + acceptable_args = tool.args_schema.model_json_schema()[ + "properties" + ].keys() + arguments = { + k: v + for k, v in calling.arguments.items() + if k in acceptable_args + } + arguments = self._add_fingerprint_metadata(arguments) + result = await tool.ainvoke(input=arguments) + except Exception: + arguments = calling.arguments + arguments = self._add_fingerprint_metadata(arguments) + result = await tool.ainvoke(input=arguments) + else: + arguments = self._add_fingerprint_metadata({}) + result = await tool.ainvoke(input=arguments) - if self.tools_handler: - should_cache = True - if ( - hasattr(available_tool, "cache_function") - and available_tool.cache_function - ): - should_cache = available_tool.cache_function( - calling.arguments, result + if self.tools_handler: + should_cache = True + if ( + hasattr(available_tool, "cache_function") + and available_tool.cache_function + ): + should_cache = available_tool.cache_function( + calling.arguments, result + ) + + self.tools_handler.on_tool_use( + calling=calling, output=result, should_cache=should_cache + ) + + self._telemetry.tool_usage( + llm=self.function_calling_llm, + tool_name=tool.name, + attempts=self._run_attempts, ) + result = self._format_result(result=result) + data = { + "result": result, + "tool_name": tool.name, + "tool_args": calling.arguments, + } - self.tools_handler.on_tool_use( - calling=calling, output=result, should_cache=should_cache + if ( + hasattr(available_tool, "result_as_answer") + and available_tool.result_as_answer + ): + result_as_answer = available_tool.result_as_answer + data["result_as_answer"] = result_as_answer + + if self.agent and hasattr(self.agent, "tools_results"): + self.agent.tools_results.append(data) + + if available_tool and hasattr( + available_tool, "current_usage_count" + ): + available_tool.current_usage_count += 1 + if ( + hasattr(available_tool, "max_usage_count") + and available_tool.max_usage_count is not None + ): + self._printer.print( + content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}", + color="blue", + ) + + except Exception as e: + self.on_tool_error(tool=tool, tool_calling=calling, e=e) + self._run_attempts += 1 + if self._run_attempts > self._max_parsing_attempts: + self._telemetry.tool_usage_error(llm=self.function_calling_llm) + error_message = self._i18n.errors( + "tool_usage_exception" + ).format(error=e, tool=tool.name, tool_inputs=tool.description) + result = ToolUsageError( + f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}" + ).message + if self.task: + self.task.increment_tools_errors() + if self.agent and self.agent.verbose: + self._printer.print( + content=f"\n\n{error_message}\n", color="red" + ) + else: + if self.task: + self.task.increment_tools_errors() + should_retry = True + else: + result = self._format_result(result=result) + + finally: + if started_event_emitted: + self.on_tool_use_finished( + tool=tool, + tool_calling=calling, + from_cache=from_cache, + started_at=started_at, + result=result, ) - self._telemetry.tool_usage( - llm=self.function_calling_llm, - tool_name=tool.name, - attempts=self._run_attempts, - ) - result = self._format_result(result=result) - data = { - "result": result, - "tool_name": tool.name, - "tool_args": calling.arguments, - } - - self.on_tool_use_finished( - tool=tool, - tool_calling=calling, - from_cache=from_cache, - started_at=started_at, - result=result, - ) - - if ( - hasattr(available_tool, "result_as_answer") - and available_tool.result_as_answer # type: ignore - ): - result_as_answer = available_tool.result_as_answer # type: ignore - data["result_as_answer"] = result_as_answer # type: ignore - - if self.agent and hasattr(self.agent, "tools_results"): - self.agent.tools_results.append(data) - - if available_tool and hasattr(available_tool, "current_usage_count"): - available_tool.current_usage_count += 1 - if ( - hasattr(available_tool, "max_usage_count") - and available_tool.max_usage_count is not None - ): - self._printer.print( - content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}", - color="blue", - ) + # Handle retry after finally block ensures finished event was emitted + if should_retry: + return await self.ause(calling=calling, tool_string=tool_string) return result @@ -412,6 +426,7 @@ class ToolUsage: tool: CrewStructuredTool, calling: ToolCalling | InstructorToolCalling, ) -> str: + # Repeated usage check happens before event emission - safe to return early if self._check_tool_repeated_usage(calling=calling): try: result = self._i18n.errors("task_repeated_usage").format( @@ -428,6 +443,9 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() + started_at = time.time() + started_event_emitted = False + if self.agent: event_data = { "agent_key": self.agent.key, @@ -446,155 +464,162 @@ class ToolUsage: event_data["task_name"] = self.task.name or self.task.description event_data["task_id"] = str(self.task.id) crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data)) + started_event_emitted = True - started_at = time.time() from_cache = False result = None # type: ignore + should_retry = False + available_tool = None - if self.tools_handler and self.tools_handler.cache: - input_str = "" - if calling.arguments: - if isinstance(calling.arguments, dict): - import json + try: + if self.tools_handler and self.tools_handler.cache: + input_str = "" + if calling.arguments: + if isinstance(calling.arguments, dict): + input_str = json.dumps(calling.arguments) + else: + input_str = str(calling.arguments) - input_str = json.dumps(calling.arguments) - else: - input_str = str(calling.arguments) + result = self.tools_handler.cache.read( + tool=calling.tool_name, input=input_str + ) # type: ignore + from_cache = result is not None - result = self.tools_handler.cache.read( - tool=calling.tool_name, input=input_str - ) # type: ignore - from_cache = result is not None + available_tool = next( + ( + available_tool + for available_tool in self.tools + if available_tool.name == tool.name + ), + None, + ) - available_tool = next( - ( - available_tool - for available_tool in self.tools - if available_tool.name == tool.name - ), - None, - ) - - usage_limit_error = self._check_usage_limit(available_tool, tool.name) - if usage_limit_error: - try: + usage_limit_error = self._check_usage_limit(available_tool, tool.name) + if usage_limit_error: result = usage_limit_error self._telemetry.tool_usage_error(llm=self.function_calling_llm) - return self._format_result(result=result) - except Exception: - if self.task: - self.task.increment_tools_errors() - - if result is None: - try: - if calling.tool_name in [ - "Delegate work to coworker", - "Ask question to coworker", - ]: - coworker = ( - calling.arguments.get("coworker") if calling.arguments else None - ) - if self.task: - self.task.increment_delegations(coworker) - - if calling.arguments: - try: - acceptable_args = tool.args_schema.model_json_schema()[ - "properties" - ].keys() - arguments = { - k: v - for k, v in calling.arguments.items() - if k in acceptable_args - } - # Add fingerprint metadata if available - arguments = self._add_fingerprint_metadata(arguments) - result = tool.invoke(input=arguments) - except Exception: - arguments = calling.arguments - # Add fingerprint metadata if available - arguments = self._add_fingerprint_metadata(arguments) - result = tool.invoke(input=arguments) - else: - # Add fingerprint metadata even to empty arguments - arguments = self._add_fingerprint_metadata({}) - result = tool.invoke(input=arguments) - except Exception as e: - self.on_tool_error(tool=tool, tool_calling=calling, e=e) - self._run_attempts += 1 - if self._run_attempts > self._max_parsing_attempts: - self._telemetry.tool_usage_error(llm=self.function_calling_llm) - error_message = self._i18n.errors("tool_usage_exception").format( - error=e, tool=tool.name, tool_inputs=tool.description - ) - error = ToolUsageError( - f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}" - ).message - if self.task: - self.task.increment_tools_errors() - if self.agent and self.agent.verbose: - self._printer.print( - content=f"\n\n{error_message}\n", color="red" + result = self._format_result(result=result) + # Don't return early - fall through to finally block + elif result is None: + try: + if calling.tool_name in [ + "Delegate work to coworker", + "Ask question to coworker", + ]: + coworker = ( + calling.arguments.get("coworker") + if calling.arguments + else None ) - return error + if self.task: + self.task.increment_delegations(coworker) - if self.task: - self.task.increment_tools_errors() - return self.use(calling=calling, tool_string=tool_string) + if calling.arguments: + try: + acceptable_args = tool.args_schema.model_json_schema()[ + "properties" + ].keys() + arguments = { + k: v + for k, v in calling.arguments.items() + if k in acceptable_args + } + arguments = self._add_fingerprint_metadata(arguments) + result = tool.invoke(input=arguments) + except Exception: + arguments = calling.arguments + arguments = self._add_fingerprint_metadata(arguments) + result = tool.invoke(input=arguments) + else: + arguments = self._add_fingerprint_metadata({}) + result = tool.invoke(input=arguments) - if self.tools_handler: - should_cache = True - if ( - hasattr(available_tool, "cache_function") - and available_tool.cache_function - ): - should_cache = available_tool.cache_function( - calling.arguments, result + if self.tools_handler: + should_cache = True + if ( + hasattr(available_tool, "cache_function") + and available_tool.cache_function + ): + should_cache = available_tool.cache_function( + calling.arguments, result + ) + + self.tools_handler.on_tool_use( + calling=calling, output=result, should_cache=should_cache + ) + + self._telemetry.tool_usage( + llm=self.function_calling_llm, + tool_name=tool.name, + attempts=self._run_attempts, ) + result = self._format_result(result=result) + data = { + "result": result, + "tool_name": tool.name, + "tool_args": calling.arguments, + } - self.tools_handler.on_tool_use( - calling=calling, output=result, should_cache=should_cache + if ( + hasattr(available_tool, "result_as_answer") + and available_tool.result_as_answer + ): + result_as_answer = available_tool.result_as_answer + data["result_as_answer"] = result_as_answer + + if self.agent and hasattr(self.agent, "tools_results"): + self.agent.tools_results.append(data) + + if available_tool and hasattr( + available_tool, "current_usage_count" + ): + available_tool.current_usage_count += 1 + if ( + hasattr(available_tool, "max_usage_count") + and available_tool.max_usage_count is not None + ): + self._printer.print( + content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}", + color="blue", + ) + + except Exception as e: + self.on_tool_error(tool=tool, tool_calling=calling, e=e) + self._run_attempts += 1 + if self._run_attempts > self._max_parsing_attempts: + self._telemetry.tool_usage_error(llm=self.function_calling_llm) + error_message = self._i18n.errors( + "tool_usage_exception" + ).format(error=e, tool=tool.name, tool_inputs=tool.description) + result = ToolUsageError( + f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}" + ).message + if self.task: + self.task.increment_tools_errors() + if self.agent and self.agent.verbose: + self._printer.print( + content=f"\n\n{error_message}\n", color="red" + ) + else: + if self.task: + self.task.increment_tools_errors() + should_retry = True + else: + result = self._format_result(result=result) + + finally: + if started_event_emitted: + self.on_tool_use_finished( + tool=tool, + tool_calling=calling, + from_cache=from_cache, + started_at=started_at, + result=result, ) - self._telemetry.tool_usage( - llm=self.function_calling_llm, - tool_name=tool.name, - attempts=self._run_attempts, - ) - result = self._format_result(result=result) - data = { - "result": result, - "tool_name": tool.name, - "tool_args": calling.arguments, - } - self.on_tool_use_finished( - tool=tool, - tool_calling=calling, - from_cache=from_cache, - started_at=started_at, - result=result, - ) - - if ( - hasattr(available_tool, "result_as_answer") - and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function" - ): - result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer" - data["result_as_answer"] = result_as_answer # type: ignore - - if self.agent and hasattr(self.agent, "tools_results"): - self.agent.tools_results.append(data) - - if available_tool and hasattr(available_tool, "current_usage_count"): - available_tool.current_usage_count += 1 - if ( - hasattr(available_tool, "max_usage_count") - and available_tool.max_usage_count is not None - ): - self._printer.print( - content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}", - color="blue", - ) + # Handle retry after finally block ensures finished event was emitted + if should_retry: + return self.use(calling=calling, tool_string=tool_string) return result diff --git a/lib/crewai/tests/cassettes/events/TestAgentEventOrdering.test_agent_events_have_event_ids.yaml b/lib/crewai/tests/cassettes/events/TestAgentEventOrdering.test_agent_events_have_event_ids.yaml new file mode 100644 index 000000000..bd59b950f --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestAgentEventOrdering.test_agent_events_have_event_ids.yaml @@ -0,0 +1,118 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour + personal goal is: Help with tasks\nTo give my best complete final answer to + the task respond using the exact following format:\n\nThought: I now can give + a great answer\nFinal Answer: Your final answer must be the great and the most + complete as possible, it must be outcome described.\n\nI MUST use these formats, + my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''done'' + and nothing else.\n\nThis is the expected criteria for your final answer: The + word done.\nyou MUST return the actual complete content as the final answer, + not a summary.\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '794' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D087GaV1OkB4Yos5MqLYqRSpLLZkV\",\n \"object\": + \"chat.completion\",\n \"created\": 1768923570,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: done\",\n \"refusal\": null,\n \"annotations\": []\n + \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": + 14,\n \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 15:39:30 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '446' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '472' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestAgentEventOrdering.test_llm_events_have_parent.yaml b/lib/crewai/tests/cassettes/events/TestAgentEventOrdering.test_llm_events_have_parent.yaml new file mode 100644 index 000000000..a9983226c --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestAgentEventOrdering.test_llm_events_have_parent.yaml @@ -0,0 +1,118 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour + personal goal is: Help with tasks\nTo give my best complete final answer to + the task respond using the exact following format:\n\nThought: I now can give + a great answer\nFinal Answer: Your final answer must be the great and the most + complete as possible, it must be outcome described.\n\nI MUST use these formats, + my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''hi'' + and nothing else.\n\nThis is the expected criteria for your final answer: The + word hi.\nyou MUST return the actual complete content as the final answer, not + a summary.\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '790' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D087HNU70QfEltqUwIaR3WflNQJMq\",\n \"object\": + \"chat.completion\",\n \"created\": 1768923571,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: hi\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": 14,\n + \ \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 15:39:31 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '401' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '429' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestCrewEventOrdering.test_crew_completed_after_started.yaml b/lib/crewai/tests/cassettes/events/TestCrewEventOrdering.test_crew_completed_after_started.yaml new file mode 100644 index 000000000..f49e094cb --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestCrewEventOrdering.test_crew_completed_after_started.yaml @@ -0,0 +1,118 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Responder. You give short + answers.\nYour personal goal is: Respond briefly\nTo give my best complete final + answer to the task respond using the exact following format:\n\nThought: I now + can give a great answer\nFinal Answer: Your final answer must be the great and + the most complete as possible, it must be outcome described.\n\nI MUST use these + formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say + ''yes'' and nothing else.\n\nThis is the expected criteria for your final answer: + The word yes.\nyou MUST return the actual complete content as the final answer, + not a summary.\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '809' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D0876LY6Tp1gWmwQ5f2A6EsqdbLOK\",\n \"object\": + \"chat.completion\",\n \"created\": 1768923560,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: yes\",\n \"refusal\": null,\n \"annotations\": []\n + \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\": + 14,\n \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 15:39:21 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '519' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '758' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestCrewEventOrdering.test_crew_events_have_event_ids.yaml b/lib/crewai/tests/cassettes/events/TestCrewEventOrdering.test_crew_events_have_event_ids.yaml new file mode 100644 index 000000000..e7b96dae1 --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestCrewEventOrdering.test_crew_events_have_event_ids.yaml @@ -0,0 +1,118 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Responder. You give short + answers.\nYour personal goal is: Respond briefly\nTo give my best complete final + answer to the task respond using the exact following format:\n\nThought: I now + can give a great answer\nFinal Answer: Your final answer must be the great and + the most complete as possible, it must be outcome described.\n\nI MUST use these + formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say + ''hello'' and nothing else.\n\nThis is the expected criteria for your final + answer: The word hello.\nyou MUST return the actual complete content as the + final answer, not a summary.\n\nBegin! This is VERY important to you, use the + tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '813' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D0874asFandwADBjb4DfArsTUyu8K\",\n \"object\": + \"chat.completion\",\n \"created\": 1768923558,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: hello\",\n \"refusal\": null,\n \"annotations\": []\n + \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\": + 14,\n \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 15:39:18 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '478' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '497' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestCrewEventOrdering.test_task_parent_is_crew.yaml b/lib/crewai/tests/cassettes/events/TestCrewEventOrdering.test_task_parent_is_crew.yaml new file mode 100644 index 000000000..60e396f56 --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestCrewEventOrdering.test_task_parent_is_crew.yaml @@ -0,0 +1,118 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Responder. You give short + answers.\nYour personal goal is: Respond briefly\nTo give my best complete final + answer to the task respond using the exact following format:\n\nThought: I now + can give a great answer\nFinal Answer: Your final answer must be the great and + the most complete as possible, it must be outcome described.\n\nI MUST use these + formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say + ''ok'' and nothing else.\n\nThis is the expected criteria for your final answer: + The word ok.\nyou MUST return the actual complete content as the final answer, + not a summary.\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '807' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D0875A6FEJ2ZFKVHohoJdbBgKEMNx\",\n \"object\": + \"chat.completion\",\n \"created\": 1768923559,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: ok\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\": 14,\n + \ \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 15:39:19 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '406' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '439' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestFlowWithCrewEventOrdering.test_crew_parent_is_method.yaml b/lib/crewai/tests/cassettes/events/TestFlowWithCrewEventOrdering.test_crew_parent_is_method.yaml new file mode 100644 index 000000000..f8b04a451 --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestFlowWithCrewEventOrdering.test_crew_parent_is_method.yaml @@ -0,0 +1,118 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Worker. You work.\nYour + personal goal is: Do work\nTo give my best complete final answer to the task + respond using the exact following format:\n\nThought: I now can give a great + answer\nFinal Answer: Your final answer must be the great and the most complete + as possible, it must be outcome described.\n\nI MUST use these formats, my job + depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''go'' and nothing + else.\n\nThis is the expected criteria for your final answer: The word go.\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '782' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D09c91Qh5LJ73NLwFrcRhThK7zNKS\",\n \"object\": + \"chat.completion\",\n \"created\": 1768929329,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: go\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\": 14,\n + \ \"total_tokens\": 172,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 17:15:30 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '521' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '781' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestFlowWithCrewEventOrdering.test_flow_events_have_ids.yaml b/lib/crewai/tests/cassettes/events/TestFlowWithCrewEventOrdering.test_flow_events_have_ids.yaml new file mode 100644 index 000000000..c9ce085fa --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestFlowWithCrewEventOrdering.test_flow_events_have_ids.yaml @@ -0,0 +1,118 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Worker. You work.\nYour + personal goal is: Do work\nTo give my best complete final answer to the task + respond using the exact following format:\n\nThought: I now can give a great + answer\nFinal Answer: Your final answer must be the great and the most complete + as possible, it must be outcome described.\n\nI MUST use these formats, my job + depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''complete'' + and nothing else.\n\nThis is the expected criteria for your final answer: The + word complete.\nyou MUST return the actual complete content as the final answer, + not a summary.\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '794' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D09bYGfIe5pA04mBGuMO94KLyKhry\",\n \"object\": + \"chat.completion\",\n \"created\": 1768929292,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: complete\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\": + 14,\n \"total_tokens\": 172,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 17:14:53 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '436' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '660' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestFlowWithCrewEventOrdering.test_method_parent_is_flow.yaml b/lib/crewai/tests/cassettes/events/TestFlowWithCrewEventOrdering.test_method_parent_is_flow.yaml new file mode 100644 index 000000000..fa566df82 --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestFlowWithCrewEventOrdering.test_method_parent_is_flow.yaml @@ -0,0 +1,118 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Worker. You work.\nYour + personal goal is: Do work\nTo give my best complete final answer to the task + respond using the exact following format:\n\nThought: I now can give a great + answer\nFinal Answer: Your final answer must be the great and the most complete + as possible, it must be outcome described.\n\nI MUST use these formats, my job + depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''ready'' and + nothing else.\n\nThis is the expected criteria for your final answer: The word + ready.\nyou MUST return the actual complete content as the final answer, not + a summary.\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '788' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D09cBIfuX53tF9rWbEKXXIr20uzSv\",\n \"object\": + \"chat.completion\",\n \"created\": 1768929331,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: ready\",\n \"refusal\": null,\n \"annotations\": []\n + \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\": + 14,\n \"total_tokens\": 172,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 17:15:32 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '517' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '1841' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestFlowWithMultipleCrewsEventOrdering.test_second_crew_after_first.yaml b/lib/crewai/tests/cassettes/events/TestFlowWithMultipleCrewsEventOrdering.test_second_crew_after_first.yaml new file mode 100644 index 000000000..629626893 --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestFlowWithMultipleCrewsEventOrdering.test_second_crew_after_first.yaml @@ -0,0 +1,234 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are First. You go first.\nYour + personal goal is: Be first\nTo give my best complete final answer to the task + respond using the exact following format:\n\nThought: I now can give a great + answer\nFinal Answer: Your final answer must be the great and the most complete + as possible, it must be outcome described.\n\nI MUST use these formats, my job + depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''a'' and nothing + else.\n\nThis is the expected criteria for your final answer: The letter a.\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '786' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D09rkgXjWfa1XwICCnLAVV3LXFlUZ\",\n \"object\": + \"chat.completion\",\n \"created\": 1768930296,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: a\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": 14,\n + \ \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 17:31:37 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '418' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '443' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Second. You go second.\nYour + personal goal is: Be second\nTo give my best complete final answer to the task + respond using the exact following format:\n\nThought: I now can give a great + answer\nFinal Answer: Your final answer must be the great and the most complete + as possible, it must be outcome described.\n\nI MUST use these formats, my job + depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''b'' and nothing + else.\n\nThis is the expected criteria for your final answer: The letter b.\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '789' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D09rlVUPgS5haPGYgA4RmW9EfPArd\",\n \"object\": + \"chat.completion\",\n \"created\": 1768930297,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: b\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": 14,\n + \ \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 17:31:38 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '345' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '658' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestFlowWithMultipleCrewsEventOrdering.test_tasks_have_correct_crew_parents.yaml b/lib/crewai/tests/cassettes/events/TestFlowWithMultipleCrewsEventOrdering.test_tasks_have_correct_crew_parents.yaml new file mode 100644 index 000000000..5a6dcfe50 --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestFlowWithMultipleCrewsEventOrdering.test_tasks_have_correct_crew_parents.yaml @@ -0,0 +1,234 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Alpha. You are alpha.\nYour + personal goal is: Do alpha work\nTo give my best complete final answer to the + task respond using the exact following format:\n\nThought: I now can give a + great answer\nFinal Answer: Your final answer must be the great and the most + complete as possible, it must be outcome described.\n\nI MUST use these formats, + my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''alpha'' + and nothing else.\n\nThis is the expected criteria for your final answer: The + word alpha.\nyou MUST return the actual complete content as the final answer, + not a summary.\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '798' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D09ri7edf1TcYqD0vAkS3IjNkai3V\",\n \"object\": + \"chat.completion\",\n \"created\": 1768930294,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: alpha\",\n \"refusal\": null,\n \"annotations\": []\n + \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\": + 14,\n \"total_tokens\": 174,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 17:31:34 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '491' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '513' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Beta. You are beta.\nYour + personal goal is: Do beta work\nTo give my best complete final answer to the + task respond using the exact following format:\n\nThought: I now can give a + great answer\nFinal Answer: Your final answer must be the great and the most + complete as possible, it must be outcome described.\n\nI MUST use these formats, + my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''beta'' + and nothing else.\n\nThis is the expected criteria for your final answer: The + word beta.\nyou MUST return the actual complete content as the final answer, + not a summary.\n\nBegin! This is VERY important to you, use the tools available + and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '793' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D09rj5wiKmsX5P72qH0GEKL5pQEq6\",\n \"object\": + \"chat.completion\",\n \"created\": 1768930295,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: beta\",\n \"refusal\": null,\n \"annotations\": []\n + \ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n + \ ],\n \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\": + 14,\n \"total_tokens\": 174,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 17:31:35 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '506' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '741' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/events/TestFlowWithMultipleCrewsEventOrdering.test_two_crews_have_different_ids.yaml b/lib/crewai/tests/cassettes/events/TestFlowWithMultipleCrewsEventOrdering.test_two_crews_have_different_ids.yaml new file mode 100644 index 000000000..41129121a --- /dev/null +++ b/lib/crewai/tests/cassettes/events/TestFlowWithMultipleCrewsEventOrdering.test_two_crews_have_different_ids.yaml @@ -0,0 +1,234 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are First. You go first.\nYour + personal goal is: Be first\nTo give my best complete final answer to the task + respond using the exact following format:\n\nThought: I now can give a great + answer\nFinal Answer: Your final answer must be the great and the most complete + as possible, it must be outcome described.\n\nI MUST use these formats, my job + depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''1'' and nothing + else.\n\nThis is the expected criteria for your final answer: The number 1.\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '786' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D09rmoYXlYGqmDh0Ca3r9xunjmE7k\",\n \"object\": + \"chat.completion\",\n \"created\": 1768930298,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: 1\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\": 15,\n + \ \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 17:31:38 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '387' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '403' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Second. You go second.\nYour + personal goal is: Be second\nTo give my best complete final answer to the task + respond using the exact following format:\n\nThought: I now can give a great + answer\nFinal Answer: Your final answer must be the great and the most complete + as possible, it must be outcome described.\n\nI MUST use these formats, my job + depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''2'' and nothing + else.\n\nThis is the expected criteria for your final answer: The number 2.\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nBegin! + This is VERY important to you, use the tools available and give your best Final + Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '789' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.10 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D09rnDNZsxICQvSZrx5rlgMdc2Tbp\",\n \"object\": + \"chat.completion\",\n \"created\": 1768930299,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal + Answer: 2\",\n \"refusal\": null,\n \"annotations\": []\n },\n + \ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n + \ \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\": 15,\n + \ \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 20 Jan 2026 17:31:39 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '560' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-envoy-upstream-service-time: + - '581' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/events/test_event_context.py b/lib/crewai/tests/events/test_event_context.py new file mode 100644 index 000000000..071e1a34d --- /dev/null +++ b/lib/crewai/tests/events/test_event_context.py @@ -0,0 +1,180 @@ +"""Tests for event context management.""" + +import pytest + +from crewai.events.event_context import ( + SCOPE_ENDING_EVENTS, + SCOPE_STARTING_EVENTS, + VALID_EVENT_PAIRS, + EmptyStackError, + EventPairingError, + MismatchBehavior, + StackDepthExceededError, + _event_context_config, + EventContextConfig, + get_current_parent_id, + get_enclosing_parent_id, + get_last_event_id, + get_triggering_event_id, + handle_empty_pop, + handle_mismatch, + pop_event_scope, + push_event_scope, + reset_last_event_id, + set_last_event_id, + set_triggering_event_id, + triggered_by_scope, +) + + +class TestStackOperations: + """Tests for stack push/pop operations.""" + + def test_empty_stack_returns_none(self) -> None: + assert get_current_parent_id() is None + assert get_enclosing_parent_id() is None + + def test_push_and_get_parent(self) -> None: + push_event_scope("event-1", "task_started") + assert get_current_parent_id() == "event-1" + + def test_nested_push(self) -> None: + push_event_scope("event-1", "crew_kickoff_started") + push_event_scope("event-2", "task_started") + assert get_current_parent_id() == "event-2" + assert get_enclosing_parent_id() == "event-1" + + def test_pop_restores_parent(self) -> None: + push_event_scope("event-1", "crew_kickoff_started") + push_event_scope("event-2", "task_started") + popped = pop_event_scope() + assert popped == ("event-2", "task_started") + assert get_current_parent_id() == "event-1" + + def test_pop_empty_stack_returns_none(self) -> None: + assert pop_event_scope() is None + + +class TestStackDepthLimit: + """Tests for stack depth limit.""" + + def test_depth_limit_exceeded_raises(self) -> None: + _event_context_config.set(EventContextConfig(max_stack_depth=3)) + + push_event_scope("event-1", "type-1") + push_event_scope("event-2", "type-2") + push_event_scope("event-3", "type-3") + + with pytest.raises(StackDepthExceededError): + push_event_scope("event-4", "type-4") + + +class TestMismatchHandling: + """Tests for mismatch behavior.""" + + def test_handle_mismatch_raises_when_configured(self) -> None: + _event_context_config.set( + EventContextConfig(mismatch_behavior=MismatchBehavior.RAISE) + ) + + with pytest.raises(EventPairingError): + handle_mismatch("task_completed", "llm_call_started", "task_started") + + def test_handle_empty_pop_raises_when_configured(self) -> None: + _event_context_config.set( + EventContextConfig(empty_pop_behavior=MismatchBehavior.RAISE) + ) + + with pytest.raises(EmptyStackError): + handle_empty_pop("task_completed") + + +class TestEventTypeSets: + """Tests for event type set completeness.""" + + def test_all_ending_events_have_pairs(self) -> None: + for ending_event in SCOPE_ENDING_EVENTS: + assert ending_event in VALID_EVENT_PAIRS + + def test_all_pairs_reference_starting_events(self) -> None: + for ending_event, starting_event in VALID_EVENT_PAIRS.items(): + assert starting_event in SCOPE_STARTING_EVENTS + + def test_starting_and_ending_are_disjoint(self) -> None: + overlap = SCOPE_STARTING_EVENTS & SCOPE_ENDING_EVENTS + assert not overlap + + +class TestLastEventIdTracking: + """Tests for linear chain event ID tracking.""" + + def test_initial_last_event_id_is_none(self) -> None: + reset_last_event_id() + assert get_last_event_id() is None + + def test_set_and_get_last_event_id(self) -> None: + reset_last_event_id() + set_last_event_id("event-123") + assert get_last_event_id() == "event-123" + + def test_reset_clears_last_event_id(self) -> None: + set_last_event_id("event-123") + reset_last_event_id() + assert get_last_event_id() is None + + def test_overwrite_last_event_id(self) -> None: + reset_last_event_id() + set_last_event_id("event-1") + set_last_event_id("event-2") + assert get_last_event_id() == "event-2" + + +class TestTriggeringEventIdTracking: + """Tests for causal chain event ID tracking.""" + + def test_initial_triggering_event_id_is_none(self) -> None: + set_triggering_event_id(None) + assert get_triggering_event_id() is None + + def test_set_and_get_triggering_event_id(self) -> None: + set_triggering_event_id("trigger-123") + assert get_triggering_event_id() == "trigger-123" + set_triggering_event_id(None) + + def test_set_none_clears_triggering_event_id(self) -> None: + set_triggering_event_id("trigger-123") + set_triggering_event_id(None) + assert get_triggering_event_id() is None + + +class TestTriggeredByScope: + """Tests for triggered_by_scope context manager.""" + + def test_scope_sets_triggering_id(self) -> None: + set_triggering_event_id(None) + with triggered_by_scope("trigger-456"): + assert get_triggering_event_id() == "trigger-456" + + def test_scope_restores_previous_value(self) -> None: + set_triggering_event_id(None) + with triggered_by_scope("trigger-456"): + pass + assert get_triggering_event_id() is None + + def test_nested_scopes(self) -> None: + set_triggering_event_id(None) + with triggered_by_scope("outer"): + assert get_triggering_event_id() == "outer" + with triggered_by_scope("inner"): + assert get_triggering_event_id() == "inner" + assert get_triggering_event_id() == "outer" + assert get_triggering_event_id() is None + + def test_scope_restores_on_exception(self) -> None: + set_triggering_event_id(None) + try: + with triggered_by_scope("trigger-789"): + raise ValueError("test error") + except ValueError: + pass + assert get_triggering_event_id() is None \ No newline at end of file diff --git a/lib/crewai/tests/events/test_event_ordering.py b/lib/crewai/tests/events/test_event_ordering.py new file mode 100644 index 000000000..b9970bf77 --- /dev/null +++ b/lib/crewai/tests/events/test_event_ordering.py @@ -0,0 +1,1649 @@ +"""Tests for event ordering and parent-child relationships.""" + +import pytest + +from crewai.agent import Agent +from crewai.crew import Crew +from crewai.events.base_events import BaseEvent +from crewai.events.event_bus import crewai_event_bus +from crewai.events.types.agent_events import ( + AgentExecutionCompletedEvent, + AgentExecutionStartedEvent, +) +from crewai.events.types.crew_events import ( + CrewKickoffCompletedEvent, + CrewKickoffStartedEvent, +) +from crewai.events.types.flow_events import ( + FlowFinishedEvent, + FlowStartedEvent, + MethodExecutionFinishedEvent, + MethodExecutionStartedEvent, +) +from crewai.events.types.llm_events import ( + LLMCallCompletedEvent, + LLMCallStartedEvent, +) +from crewai.events.types.task_events import ( + TaskCompletedEvent, + TaskStartedEvent, +) +from crewai.flow.flow import Flow, listen, start +from crewai.task import Task + + +class EventCollector: + """Collects events and provides helpers to find related events.""" + + def __init__(self) -> None: + self.events: list[BaseEvent] = [] + + def add(self, event: BaseEvent) -> None: + self.events.append(event) + + def first(self, event_type: type[BaseEvent]) -> BaseEvent | None: + for e in self.events: + if isinstance(e, event_type): + return e + return None + + def all_of(self, event_type: type[BaseEvent]) -> list[BaseEvent]: + return [e for e in self.events if isinstance(e, event_type)] + + def with_parent(self, parent_id: str) -> list[BaseEvent]: + return [e for e in self.events if e.parent_event_id == parent_id] + + +@pytest.fixture +def collector() -> EventCollector: + """Fixture that collects events during test execution.""" + c = EventCollector() + + @crewai_event_bus.on(CrewKickoffStartedEvent) + def h1(source, event): + c.add(event) + + @crewai_event_bus.on(CrewKickoffCompletedEvent) + def h2(source, event): + c.add(event) + + @crewai_event_bus.on(TaskStartedEvent) + def h3(source, event): + c.add(event) + + @crewai_event_bus.on(TaskCompletedEvent) + def h4(source, event): + c.add(event) + + @crewai_event_bus.on(AgentExecutionStartedEvent) + def h5(source, event): + c.add(event) + + @crewai_event_bus.on(AgentExecutionCompletedEvent) + def h6(source, event): + c.add(event) + + @crewai_event_bus.on(LLMCallStartedEvent) + def h7(source, event): + c.add(event) + + @crewai_event_bus.on(LLMCallCompletedEvent) + def h8(source, event): + c.add(event) + + @crewai_event_bus.on(FlowStartedEvent) + def h9(source, event): + c.add(event) + + @crewai_event_bus.on(FlowFinishedEvent) + def h10(source, event): + c.add(event) + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def h11(source, event): + c.add(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def h12(source, event): + c.add(event) + + return c + + +class TestCrewEventOrdering: + """Tests for event ordering in crew execution.""" + + @pytest.mark.vcr() + def test_crew_events_have_event_ids(self, collector: EventCollector) -> None: + """Every crew event should have a unique event_id.""" + agent = Agent( + role="Responder", + goal="Respond briefly", + backstory="You give short answers.", + verbose=False, + ) + task = Task( + description="Say 'hello' and nothing else.", + expected_output="The word hello.", + agent=agent, + ) + crew = Crew(agents=[agent], tasks=[task], verbose=False) + crew.kickoff() + crewai_event_bus.flush() + + started = collector.first(CrewKickoffStartedEvent) + completed = collector.first(CrewKickoffCompletedEvent) + + assert started is not None + assert started.event_id is not None + assert len(started.event_id) > 0 + + assert completed is not None + assert completed.event_id is not None + assert completed.event_id != started.event_id + + @pytest.mark.vcr() + def test_crew_completed_after_started(self, collector: EventCollector) -> None: + """Crew completed event should have higher sequence than started.""" + agent = Agent( + role="Responder", + goal="Respond briefly", + backstory="You give short answers.", + verbose=False, + ) + task = Task( + description="Say 'yes' and nothing else.", + expected_output="The word yes.", + agent=agent, + ) + crew = Crew(agents=[agent], tasks=[task], verbose=False) + crew.kickoff() + crewai_event_bus.flush() + + started = collector.first(CrewKickoffStartedEvent) + completed = collector.first(CrewKickoffCompletedEvent) + + assert started is not None + assert completed is not None + assert started.emission_sequence is not None + assert completed.emission_sequence is not None + assert completed.emission_sequence > started.emission_sequence + + @pytest.mark.vcr() + def test_task_parent_is_crew(self, collector: EventCollector) -> None: + """Task events should have crew event as parent.""" + agent = Agent( + role="Responder", + goal="Respond briefly", + backstory="You give short answers.", + verbose=False, + ) + task = Task( + description="Say 'ok' and nothing else.", + expected_output="The word ok.", + agent=agent, + ) + crew = Crew(agents=[agent], tasks=[task], verbose=False) + crew.kickoff() + crewai_event_bus.flush() + + crew_started = collector.first(CrewKickoffStartedEvent) + task_started = collector.first(TaskStartedEvent) + + assert crew_started is not None + assert task_started is not None + assert task_started.parent_event_id == crew_started.event_id + + +class TestAgentEventOrdering: + """Tests for event ordering in agent execution.""" + + @pytest.mark.vcr() + def test_agent_events_have_event_ids(self, collector: EventCollector) -> None: + """Agent execution events should have event_ids.""" + agent = Agent( + role="Helper", + goal="Help with tasks", + backstory="You help.", + verbose=False, + ) + task = Task( + description="Say 'done' and nothing else.", + expected_output="The word done.", + agent=agent, + ) + agent.execute_task(task) + crewai_event_bus.flush() + + started = collector.first(AgentExecutionStartedEvent) + completed = collector.first(AgentExecutionCompletedEvent) + + if started: + assert started.event_id is not None + + if completed: + assert completed.event_id is not None + + @pytest.mark.vcr() + def test_llm_events_have_parent(self, collector: EventCollector) -> None: + """LLM call events should have a parent event.""" + agent = Agent( + role="Helper", + goal="Help with tasks", + backstory="You help.", + verbose=False, + ) + task = Task( + description="Say 'hi' and nothing else.", + expected_output="The word hi.", + agent=agent, + ) + agent.execute_task(task) + crewai_event_bus.flush() + + llm_started = collector.first(LLMCallStartedEvent) + + if llm_started: + assert llm_started.event_id is not None + # LLM events should have some parent in the hierarchy + assert llm_started.parent_event_id is not None + + +class TestFlowWithCrewEventOrdering: + """Tests for event ordering in flows containing crews.""" + + @pytest.mark.asyncio + @pytest.mark.vcr() + async def test_flow_events_have_ids(self, collector: EventCollector) -> None: + """Flow events should have event_ids.""" + agent = Agent( + role="Worker", + goal="Do work", + backstory="You work.", + verbose=False, + ) + task = Task( + description="Say 'complete' and nothing else.", + expected_output="The word complete.", + agent=agent, + ) + + class SimpleFlow(Flow): + @start() + async def run_crew(self): + c = Crew(agents=[agent], tasks=[task], verbose=False) + return await c.akickoff() + + flow = SimpleFlow() + await flow.akickoff() + crewai_event_bus.flush() + + flow_started = collector.first(FlowStartedEvent) + flow_finished = collector.first(FlowFinishedEvent) + + assert flow_started is not None + assert flow_started.event_id is not None + + assert flow_finished is not None + assert flow_finished.event_id is not None + + @pytest.mark.asyncio + @pytest.mark.vcr() + async def test_method_parent_is_flow(self, collector: EventCollector) -> None: + """Method execution events should have flow as parent.""" + agent = Agent( + role="Worker", + goal="Do work", + backstory="You work.", + verbose=False, + ) + task = Task( + description="Say 'ready' and nothing else.", + expected_output="The word ready.", + agent=agent, + ) + + class FlowWithMethod(Flow): + @start() + async def my_method(self): + c = Crew(agents=[agent], tasks=[task], verbose=False) + return await c.akickoff() + + flow = FlowWithMethod() + await flow.akickoff() + crewai_event_bus.flush() + + flow_started = collector.first(FlowStartedEvent) + method_started = collector.first(MethodExecutionStartedEvent) + + assert flow_started is not None + assert method_started is not None + assert method_started.parent_event_id == flow_started.event_id + + @pytest.mark.asyncio + @pytest.mark.vcr() + async def test_crew_parent_is_method(self, collector: EventCollector) -> None: + """Crew inside flow method should have method as parent.""" + agent = Agent( + role="Worker", + goal="Do work", + backstory="You work.", + verbose=False, + ) + task = Task( + description="Say 'go' and nothing else.", + expected_output="The word go.", + agent=agent, + ) + + class FlowWithCrew(Flow): + @start() + async def run_it(self): + c = Crew(agents=[agent], tasks=[task], verbose=False) + return await c.akickoff() + + flow = FlowWithCrew() + await flow.akickoff() + crewai_event_bus.flush() + + method_started = collector.first(MethodExecutionStartedEvent) + crew_started = collector.first(CrewKickoffStartedEvent) + + assert method_started is not None + assert crew_started is not None + assert crew_started.parent_event_id == method_started.event_id + + +class TestFlowWithMultipleCrewsEventOrdering: + """Tests for event ordering in flows with multiple crews.""" + + @pytest.mark.asyncio + @pytest.mark.vcr() + async def test_two_crews_have_different_ids( + self, collector: EventCollector + ) -> None: + """Two crews in a flow should have different event_ids.""" + agent1 = Agent( + role="First", + goal="Be first", + backstory="You go first.", + verbose=False, + ) + agent2 = Agent( + role="Second", + goal="Be second", + backstory="You go second.", + verbose=False, + ) + task1 = Task( + description="Say '1' and nothing else.", + expected_output="The number 1.", + agent=agent1, + ) + task2 = Task( + description="Say '2' and nothing else.", + expected_output="The number 2.", + agent=agent2, + ) + + class TwoCrewFlow(Flow): + @start() + async def first(self): + c = Crew(agents=[agent1], tasks=[task1], verbose=False) + return await c.akickoff() + + @listen(first) + async def second(self, _): + c = Crew(agents=[agent2], tasks=[task2], verbose=False) + return await c.akickoff() + + flow = TwoCrewFlow() + await flow.akickoff() + crewai_event_bus.flush() + + crew_started_events = collector.all_of(CrewKickoffStartedEvent) + + assert len(crew_started_events) >= 2 + assert crew_started_events[0].event_id != crew_started_events[1].event_id + + @pytest.mark.asyncio + @pytest.mark.vcr() + async def test_second_crew_after_first(self, collector: EventCollector) -> None: + """Second crew should have higher sequence than first.""" + agent1 = Agent( + role="First", + goal="Be first", + backstory="You go first.", + verbose=False, + ) + agent2 = Agent( + role="Second", + goal="Be second", + backstory="You go second.", + verbose=False, + ) + task1 = Task( + description="Say 'a' and nothing else.", + expected_output="The letter a.", + agent=agent1, + ) + task2 = Task( + description="Say 'b' and nothing else.", + expected_output="The letter b.", + agent=agent2, + ) + + class SequentialCrewFlow(Flow): + @start() + async def crew_a(self): + c = Crew(agents=[agent1], tasks=[task1], verbose=False) + return await c.akickoff() + + @listen(crew_a) + async def crew_b(self, _): + c = Crew(agents=[agent2], tasks=[task2], verbose=False) + return await c.akickoff() + + flow = SequentialCrewFlow() + await flow.akickoff() + crewai_event_bus.flush() + + crew_started_events = collector.all_of(CrewKickoffStartedEvent) + + assert len(crew_started_events) >= 2 + first = crew_started_events[0] + second = crew_started_events[1] + + assert first.emission_sequence is not None + assert second.emission_sequence is not None + assert second.emission_sequence > first.emission_sequence + + @pytest.mark.asyncio + @pytest.mark.vcr() + async def test_tasks_have_correct_crew_parents( + self, collector: EventCollector + ) -> None: + """Tasks in different crews should have their own crew as parent.""" + agent1 = Agent( + role="Alpha", + goal="Do alpha work", + backstory="You are alpha.", + verbose=False, + ) + agent2 = Agent( + role="Beta", + goal="Do beta work", + backstory="You are beta.", + verbose=False, + ) + task1 = Task( + description="Say 'alpha' and nothing else.", + expected_output="The word alpha.", + agent=agent1, + ) + task2 = Task( + description="Say 'beta' and nothing else.", + expected_output="The word beta.", + agent=agent2, + ) + + class ParentTestFlow(Flow): + @start() + async def alpha_crew(self): + c = Crew(agents=[agent1], tasks=[task1], verbose=False) + return await c.akickoff() + + @listen(alpha_crew) + async def beta_crew(self, _): + c = Crew(agents=[agent2], tasks=[task2], verbose=False) + return await c.akickoff() + + flow = ParentTestFlow() + await flow.akickoff() + crewai_event_bus.flush() + + crew_started_events = collector.all_of(CrewKickoffStartedEvent) + task_started_events = collector.all_of(TaskStartedEvent) + + assert len(crew_started_events) >= 2 + assert len(task_started_events) >= 2 + + crew1_id = crew_started_events[0].event_id + crew2_id = crew_started_events[1].event_id + + task1_parent = task_started_events[0].parent_event_id + task2_parent = task_started_events[1].parent_event_id + + assert task1_parent == crew1_id + assert task2_parent == crew2_id + + +class TestPreviousEventIdChain: + """Tests for previous_event_id linear chain tracking.""" + + @pytest.mark.asyncio + async def test_previous_event_id_chain(self) -> None: + """Events should have previous_event_id pointing to the prior event.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class SimpleFlow(Flow): + @start() + async def step_one(self): + return "step_one_done" + + @listen(step_one) + async def step_two(self, result): + return "step_two_done" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(FlowStartedEvent) + def h1(source, event): + events.append(event) + + @crewai_event_bus.on(FlowFinishedEvent) + def h2(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def h3(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def h4(source, event): + events.append(event) + + flow = SimpleFlow() + await flow.akickoff() + crewai_event_bus.flush() + + assert len(events) >= 4 + + all_events = sorted(events, key=lambda e: e.emission_sequence or 0) + all_event_ids = {e.event_id for e in all_events} + + for event in all_events[1:]: + assert event.previous_event_id is not None, ( + f"Event {event.type} (seq {event.emission_sequence}) has no previous_event_id" + ) + if event.previous_event_id in all_event_ids: + prev = next(e for e in all_events if e.event_id == event.previous_event_id) + assert (prev.emission_sequence or 0) < (event.emission_sequence or 0), ( + f"Event {event.type} (seq {event.emission_sequence}) has previous pointing " + f"to {prev.type} (seq {prev.emission_sequence}) which is not earlier" + ) + + @pytest.mark.asyncio + async def test_first_event_has_previous_pointing_back(self) -> None: + """Non-first events should have previous_event_id set.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + events: list[BaseEvent] = [] + + class MinimalFlow(Flow): + @start() + async def do_nothing(self): + return "done" + + reset_emission_counter() + reset_last_event_id() + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(FlowStartedEvent) + def capture1(source, event): + events.append(event) + + @crewai_event_bus.on(FlowFinishedEvent) + def capture2(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture3(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture4(source, event): + events.append(event) + + flow = MinimalFlow() + await flow.akickoff() + crewai_event_bus.flush() + + assert len(events) >= 2 + + sorted_events = sorted(events, key=lambda e: e.emission_sequence or 0) + for event in sorted_events[1:]: + assert event.previous_event_id is not None, ( + f"Event {event.type} (seq {event.emission_sequence}) should have previous_event_id set" + ) + + +class TestTriggeredByEventId: + """Tests for triggered_by_event_id causal chain tracking.""" + + @pytest.mark.asyncio + async def test_triggered_by_event_id_for_listeners(self) -> None: + """Listener events should have triggered_by_event_id pointing to the triggering method_execution_finished event.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class ListenerFlow(Flow): + @start() + async def start_method(self): + return "started" + + @listen(start_method) + async def listener_method(self, result): + return "listened" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = ListenerFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + assert len(started_events) >= 2 + assert len(finished_events) >= 2 + + start_method_finished = next( + (e for e in finished_events if e.method_name == "start_method"), None + ) + listener_started = next( + (e for e in started_events if e.method_name == "listener_method"), None + ) + + assert start_method_finished is not None + assert listener_started is not None + assert listener_started.triggered_by_event_id == start_method_finished.event_id + + @pytest.mark.asyncio + async def test_start_method_has_no_triggered_by(self) -> None: + """Start method events should have triggered_by_event_id=None.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class StartOnlyFlow(Flow): + @start() + async def my_start(self): + return "started" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + flow = StartOnlyFlow() + await flow.akickoff() + crewai_event_bus.flush() + + start_event = next( + (e for e in events if e.method_name == "my_start"), None + ) + assert start_event is not None + assert start_event.triggered_by_event_id is None + + @pytest.mark.asyncio + async def test_chained_listeners_triggered_by(self) -> None: + """Chained listeners should have triggered_by_event_id pointing to their triggering method.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class ChainedFlow(Flow): + @start() + async def first(self): + return "first" + + @listen(first) + async def second(self, result): + return "second" + + @listen(second) + async def third(self, result): + return "third" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = ChainedFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + first_finished = next( + (e for e in finished_events if e.method_name == "first"), None + ) + second_started = next( + (e for e in started_events if e.method_name == "second"), None + ) + second_finished = next( + (e for e in finished_events if e.method_name == "second"), None + ) + third_started = next( + (e for e in started_events if e.method_name == "third"), None + ) + + assert first_finished is not None + assert second_started is not None + assert second_finished is not None + assert third_started is not None + + assert second_started.triggered_by_event_id == first_finished.event_id + assert third_started.triggered_by_event_id == second_finished.event_id + + @pytest.mark.asyncio + async def test_parallel_listeners_same_trigger(self) -> None: + """Parallel listeners should all have triggered_by_event_id pointing to the same triggering event.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class ParallelFlow(Flow): + @start() + async def trigger(self): + return "trigger" + + @listen(trigger) + async def listener_a(self, result): + return "a" + + @listen(trigger) + async def listener_b(self, result): + return "b" + + @listen(trigger) + async def listener_c(self, result): + return "c" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = ParallelFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + trigger_finished = next( + (e for e in finished_events if e.method_name == "trigger"), None + ) + listener_a_started = next( + (e for e in started_events if e.method_name == "listener_a"), None + ) + listener_b_started = next( + (e for e in started_events if e.method_name == "listener_b"), None + ) + listener_c_started = next( + (e for e in started_events if e.method_name == "listener_c"), None + ) + + assert trigger_finished is not None + assert listener_a_started is not None + assert listener_b_started is not None + assert listener_c_started is not None + + # All parallel listeners should point to the same triggering event + assert listener_a_started.triggered_by_event_id == trigger_finished.event_id + assert listener_b_started.triggered_by_event_id == trigger_finished.event_id + assert listener_c_started.triggered_by_event_id == trigger_finished.event_id + + @pytest.mark.asyncio + async def test_or_condition_triggered_by(self) -> None: + """Listener with OR condition should have triggered_by_event_id pointing to whichever method triggered it.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + from crewai.flow.flow import or_ + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class OrConditionFlow(Flow): + @start() + async def path_a(self): + return "a" + + @listen(or_(path_a, "path_b")) + async def after_either(self, result): + return "done" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = OrConditionFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + path_a_finished = next( + (e for e in finished_events if e.method_name == "path_a"), None + ) + after_either_started = next( + (e for e in started_events if e.method_name == "after_either"), None + ) + + assert path_a_finished is not None + assert after_either_started is not None + + # The OR listener should be triggered by path_a since that's what ran + assert after_either_started.triggered_by_event_id == path_a_finished.event_id + + @pytest.mark.asyncio + async def test_router_triggered_by(self) -> None: + """Events from router-triggered paths should have correct triggered_by_event_id.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + from crewai.flow.flow import router + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class RouterFlow(Flow): + @start() + async def begin(self): + return "begin" + + @router(begin) + async def route_decision(self, result): + return "approved" + + @listen("approved") + async def handle_approved(self): + return "handled" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = RouterFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + begin_finished = next( + (e for e in finished_events if e.method_name == "begin"), None + ) + route_decision_started = next( + (e for e in started_events if e.method_name == "route_decision"), None + ) + route_decision_finished = next( + (e for e in finished_events if e.method_name == "route_decision"), None + ) + handle_approved_started = next( + (e for e in started_events if e.method_name == "handle_approved"), None + ) + + assert begin_finished is not None + assert route_decision_started is not None + assert route_decision_finished is not None + assert handle_approved_started is not None + + # Router should be triggered by begin + assert route_decision_started.triggered_by_event_id == begin_finished.event_id + # Handler should be triggered by router's finished event + assert handle_approved_started.triggered_by_event_id == route_decision_finished.event_id + + @pytest.mark.asyncio + async def test_multiple_kickoffs_maintain_chains(self) -> None: + """Multiple akickoff() calls should maintain correct triggered_by chains for each execution.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + first_run_events: list[BaseEvent] = [] + second_run_events: list[BaseEvent] = [] + + class ReusableFlow(Flow): + @start() + async def begin(self): + return "begin" + + @listen(begin) + async def process(self, result): + return "processed" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + if len(second_run_events) == 0 and not capturing_second: + first_run_events.append(event) + else: + second_run_events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + if len(second_run_events) == 0 and not capturing_second: + first_run_events.append(event) + else: + second_run_events.append(event) + + # First kickoff + capturing_second = False + flow1 = ReusableFlow() + await flow1.akickoff() + crewai_event_bus.flush() + + # Second kickoff + capturing_second = True + flow2 = ReusableFlow() + await flow2.akickoff() + crewai_event_bus.flush() + + # Should have events from both runs + assert len(first_run_events) >= 4 # 2 started + 2 finished + assert len(second_run_events) >= 4 + + # Check first run's triggered_by chain + first_started = [e for e in first_run_events if isinstance(e, MethodExecutionStartedEvent)] + first_finished = [e for e in first_run_events if isinstance(e, MethodExecutionFinishedEvent)] + + first_begin_finished = next( + (e for e in first_finished if e.method_name == "begin"), None + ) + first_process_started = next( + (e for e in first_started if e.method_name == "process"), None + ) + assert first_begin_finished is not None + assert first_process_started is not None + assert first_process_started.triggered_by_event_id == first_begin_finished.event_id + + # Check second run's triggered_by chain + second_started = [e for e in second_run_events if isinstance(e, MethodExecutionStartedEvent)] + second_finished = [e for e in second_run_events if isinstance(e, MethodExecutionFinishedEvent)] + + second_begin_finished = next( + (e for e in second_finished if e.method_name == "begin"), None + ) + second_process_started = next( + (e for e in second_started if e.method_name == "process"), None + ) + assert second_begin_finished is not None + assert second_process_started is not None + assert second_process_started.triggered_by_event_id == second_begin_finished.event_id + + # Verify the two runs have different event_ids (not reusing) + assert first_begin_finished.event_id != second_begin_finished.event_id + + # Verify each run has its own independent previous_event_id chain + # (chains reset at each top-level execution) + first_sorted = sorted(first_run_events, key=lambda e: e.emission_sequence or 0) + for event in first_sorted[1:]: + assert event.previous_event_id is not None, ( + f"First run event {event.type} (seq {event.emission_sequence}) should have previous_event_id" + ) + + second_sorted = sorted(second_run_events, key=lambda e: e.emission_sequence or 0) + for event in second_sorted[1:]: + assert event.previous_event_id is not None, ( + f"Second run event {event.type} (seq {event.emission_sequence}) should have previous_event_id" + ) + + @pytest.mark.asyncio + async def test_parallel_flows_maintain_separate_triggered_by_chains(self) -> None: + """Parallel flow executions should maintain correct triggered_by chains independently.""" + import asyncio + + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class ParallelTestFlow(Flow): + def __init__(self, name: str): + super().__init__() + self.flow_name = name + + @start() + async def begin(self): + await asyncio.sleep(0.01) # Small delay to interleave + return self.flow_name + + @listen(begin) + async def process(self, result): + await asyncio.sleep(0.01) + return f"{result}_processed" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + # Run two flows in parallel + flow_a = ParallelTestFlow("flow_a") + flow_b = ParallelTestFlow("flow_b") + await asyncio.gather(flow_a.akickoff(), flow_b.akickoff()) + crewai_event_bus.flush() + + # Should have events from both flows (4 events each = 8 total) + assert len(events) >= 8 + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + # Find flow_a's events by checking the result contains "flow_a" + flow_a_begin_finished = [ + e for e in finished_events + if e.method_name == "begin" and "flow_a" in str(e.result) + ] + flow_a_process_started = [ + e for e in started_events + if e.method_name == "process" + ] + + flow_b_begin_finished = [ + e for e in finished_events + if e.method_name == "begin" and "flow_b" in str(e.result) + ] + + assert len(flow_a_begin_finished) >= 1 + assert len(flow_b_begin_finished) >= 1 + + # Each flow's process should be triggered by its own begin + # Find which process events were triggered by which begin events + for process_event in flow_a_process_started: + trigger_id = process_event.triggered_by_event_id + assert trigger_id is not None + + # The triggering event should be a begin finished event + triggering_event = next( + (e for e in finished_events if e.event_id == trigger_id), None + ) + assert triggering_event is not None + assert triggering_event.method_name == "begin" + + # Verify previous_event_id forms a valid chain across all events + all_sorted = sorted(events, key=lambda e: e.emission_sequence or 0) + for event in all_sorted[1:]: + assert event.previous_event_id is not None + + @pytest.mark.asyncio + async def test_and_condition_triggered_by_last_method(self) -> None: + """AND condition listener should have triggered_by_event_id pointing to the last completing method.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + from crewai.flow.flow import and_ + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class AndConditionFlow(Flow): + @start() + async def method_a(self): + return "a" + + @listen(method_a) + async def method_b(self, result): + return "b" + + @listen(and_(method_a, method_b)) + async def after_both(self, result): + return "both_done" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = AndConditionFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + method_b_finished = next( + (e for e in finished_events if e.method_name == "method_b"), None + ) + after_both_started = next( + (e for e in started_events if e.method_name == "after_both"), None + ) + + assert method_b_finished is not None + assert after_both_started is not None + + # The AND listener should be triggered by method_b (the last one to complete) + assert after_both_started.triggered_by_event_id == method_b_finished.event_id + + @pytest.mark.asyncio + async def test_exception_handling_triggered_by(self) -> None: + """Events emitted after exception should still have correct triggered_by.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + from crewai.events.types.flow_events import MethodExecutionFailedEvent + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class ExceptionFlow(Flow): + @start() + async def will_fail(self): + raise ValueError("intentional error") + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFailedEvent) + def capture_failed(source, event): + events.append(event) + + @crewai_event_bus.on(FlowStartedEvent) + def capture_flow_started(source, event): + events.append(event) + + flow = ExceptionFlow() + try: + await flow.akickoff() + except ValueError: + pass # Expected + crewai_event_bus.flush() + + # Even with exception, events should have proper previous_event_id chain + all_sorted = sorted(events, key=lambda e: e.emission_sequence or 0) + for event in all_sorted[1:]: + assert event.previous_event_id is not None, ( + f"Event {event.type} (seq {event.emission_sequence}) should have previous_event_id" + ) + + @pytest.mark.asyncio + async def test_sync_method_in_flow_triggered_by(self) -> None: + """Synchronous methods should still have correct triggered_by.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class SyncFlow(Flow): + @start() + def sync_start(self): # Synchronous method + return "sync_done" + + @listen(sync_start) + async def async_listener(self, result): + return "async_done" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = SyncFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + sync_start_finished = next( + (e for e in finished_events if e.method_name == "sync_start"), None + ) + async_listener_started = next( + (e for e in started_events if e.method_name == "async_listener"), None + ) + + assert sync_start_finished is not None + assert async_listener_started is not None + assert async_listener_started.triggered_by_event_id == sync_start_finished.event_id + + @pytest.mark.asyncio + async def test_multiple_start_methods_triggered_by(self) -> None: + """Multiple start methods should each have triggered_by_event_id=None.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class MultiStartFlow(Flow): + @start() + async def start_one(self): + return "one" + + @start() + async def start_two(self): + return "two" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + flow = MultiStartFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + + start_one = next( + (e for e in started_events if e.method_name == "start_one"), None + ) + start_two = next( + (e for e in started_events if e.method_name == "start_two"), None + ) + + assert start_one is not None + assert start_two is not None + + # Both start methods should have no triggered_by (they're entry points) + assert start_one.triggered_by_event_id is None + assert start_two.triggered_by_event_id is None + + @pytest.mark.asyncio + async def test_none_return_triggered_by(self) -> None: + """Methods returning None should still have correct triggered_by chain.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class NoneReturnFlow(Flow): + @start() + async def returns_none(self): + return None + + @listen(returns_none) + async def after_none(self, result): + return "got_none" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = NoneReturnFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + returns_none_finished = next( + (e for e in finished_events if e.method_name == "returns_none"), None + ) + after_none_started = next( + (e for e in started_events if e.method_name == "after_none"), None + ) + + assert returns_none_finished is not None + assert after_none_started is not None + assert after_none_started.triggered_by_event_id == returns_none_finished.event_id + + @pytest.mark.asyncio + async def test_deeply_nested_chain_triggered_by(self) -> None: + """Deeply nested listener chains (5+) should maintain correct triggered_by.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class DeepChainFlow(Flow): + @start() + async def level_0(self): + return "0" + + @listen(level_0) + async def level_1(self, result): + return "1" + + @listen(level_1) + async def level_2(self, result): + return "2" + + @listen(level_2) + async def level_3(self, result): + return "3" + + @listen(level_3) + async def level_4(self, result): + return "4" + + @listen(level_4) + async def level_5(self, result): + return "5" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = DeepChainFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + # Verify each level triggers the next + for i in range(5): + prev_finished = next( + (e for e in finished_events if e.method_name == f"level_{i}"), None + ) + next_started = next( + (e for e in started_events if e.method_name == f"level_{i+1}"), None + ) + + assert prev_finished is not None, f"level_{i} finished event not found" + assert next_started is not None, f"level_{i+1} started event not found" + assert next_started.triggered_by_event_id == prev_finished.event_id, ( + f"level_{i+1} should be triggered by level_{i}" + ) + + @pytest.mark.asyncio + async def test_router_conditional_path_triggered_by(self) -> None: + """Router with conditional paths should have correct triggered_by for the selected path.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + from crewai.flow.flow import router + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class ConditionalRouterFlow(Flow): + @start() + async def begin(self): + return "begin" + + @router(begin) + async def conditional_router(self, result): + # Conditionally return one route + return "path_a" + + @listen("path_a") + async def handle_path_a(self): + return "a_done" + + @listen("path_b") + async def handle_path_b(self): + return "b_done" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = ConditionalRouterFlow() + await flow.akickoff() + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + router_finished = next( + (e for e in finished_events if e.method_name == "conditional_router"), None + ) + handle_path_a_started = next( + (e for e in started_events if e.method_name == "handle_path_a"), None + ) + handle_path_b_started = next( + (e for e in started_events if e.method_name == "handle_path_b"), None + ) + + assert router_finished is not None + assert handle_path_a_started is not None + # path_b should NOT be executed since router returned "path_a" + assert handle_path_b_started is None + + # The selected path should be triggered by the router + assert handle_path_a_started.triggered_by_event_id == router_finished.event_id + + +class TestCrewEventsInFlowTriggeredBy: + """Tests for triggered_by in crew events running inside flows.""" + + @pytest.mark.asyncio + async def test_flow_listener_triggered_by_in_nested_context(self) -> None: + """Nested listener contexts should maintain correct triggered_by chains.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class NestedFlow(Flow): + @start() + async def trigger_method(self): + return "trigger" + + @listen(trigger_method) + async def middle_method(self, result): + return "middle" + + @listen(middle_method) + async def final_method(self, result): + return "final" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_method_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_method_finished(source, event): + events.append(event) + + flow = NestedFlow() + await flow.akickoff() + crewai_event_bus.flush() + + method_started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + method_finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + trigger_finished = next( + (e for e in method_finished_events if e.method_name == "trigger_method"), None + ) + middle_started = next( + (e for e in method_started_events if e.method_name == "middle_method"), None + ) + middle_finished = next( + (e for e in method_finished_events if e.method_name == "middle_method"), None + ) + final_started = next( + (e for e in method_started_events if e.method_name == "final_method"), None + ) + + assert trigger_finished is not None + assert middle_started is not None + assert middle_finished is not None + assert final_started is not None + + # middle should be triggered by trigger_method + assert middle_started.triggered_by_event_id == trigger_finished.event_id + # final should be triggered by middle_method + assert final_started.triggered_by_event_id == middle_finished.event_id + + # All events should have proper previous_event_id chain + all_sorted = sorted(events, key=lambda e: e.emission_sequence or 0) + for event in all_sorted[1:]: + assert event.previous_event_id is not None + + def test_sync_kickoff_triggered_by(self) -> None: + """Synchronous kickoff() should maintain correct triggered_by chains.""" + from crewai.events.base_events import reset_emission_counter + from crewai.events.event_context import reset_last_event_id + + reset_emission_counter() + reset_last_event_id() + + events: list[BaseEvent] = [] + + class SyncKickoffFlow(Flow): + @start() + def start_method(self): + return "started" + + @listen(start_method) + def listener_method(self, result): + return "listened" + + with crewai_event_bus.scoped_handlers(): + + @crewai_event_bus.on(MethodExecutionStartedEvent) + def capture_started(source, event): + events.append(event) + + @crewai_event_bus.on(MethodExecutionFinishedEvent) + def capture_finished(source, event): + events.append(event) + + flow = SyncKickoffFlow() + flow.kickoff() # Synchronous kickoff + crewai_event_bus.flush() + + started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)] + finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)] + + start_finished = next( + (e for e in finished_events if e.method_name == "start_method"), None + ) + listener_started = next( + (e for e in started_events if e.method_name == "listener_method"), None + ) + + assert start_finished is not None + assert listener_started is not None + + # Listener should be triggered by start_method + assert listener_started.triggered_by_event_id == start_finished.event_id + + # Verify previous_event_id chain + all_sorted = sorted(events, key=lambda e: e.emission_sequence or 0) + for event in all_sorted[1:]: + assert event.previous_event_id is not None diff --git a/lib/crewai/tests/memory/test_external_memory.py b/lib/crewai/tests/memory/test_external_memory.py index ddd7f0049..8718c5aca 100644 --- a/lib/crewai/tests/memory/test_external_memory.py +++ b/lib/crewai/tests/memory/test_external_memory.py @@ -304,6 +304,11 @@ def test_external_memory_search_events( "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": None, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "query": "test value", "limit": 3, "score_threshold": 0.35, @@ -321,6 +326,11 @@ def test_external_memory_search_events( "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": ANY, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "query": "test value", "results": [], "limit": 3, @@ -376,6 +386,11 @@ def test_external_memory_save_events( "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": None, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "value": "saving value", "metadata": {"task": "test_task"}, } @@ -392,6 +407,11 @@ def test_external_memory_save_events( "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": ANY, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "value": "saving value", "metadata": {"task": "test_task"}, "save_time_ms": ANY, diff --git a/lib/crewai/tests/memory/test_long_term_memory.py b/lib/crewai/tests/memory/test_long_term_memory.py index 639724329..c33e4469b 100644 --- a/lib/crewai/tests/memory/test_long_term_memory.py +++ b/lib/crewai/tests/memory/test_long_term_memory.py @@ -70,6 +70,11 @@ def test_long_term_memory_save_events(long_term_memory): "from_agent": None, "agent_role": "test_agent", "agent_id": None, + "event_id": ANY, + "parent_event_id": None, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "value": "test_task", "metadata": {"task": "test_task", "quality": 0.5}, } @@ -85,6 +90,11 @@ def test_long_term_memory_save_events(long_term_memory): "from_agent": None, "agent_role": "test_agent", "agent_id": None, + "event_id": ANY, + "parent_event_id": None, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "value": "test_task", "metadata": { "task": "test_task", @@ -139,6 +149,11 @@ def test_long_term_memory_search_events(long_term_memory): "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": None, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "query": "test query", "limit": 5, "score_threshold": None, @@ -156,6 +171,11 @@ def test_long_term_memory_search_events(long_term_memory): "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": ANY, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "query": "test query", "results": None, "limit": 5, diff --git a/lib/crewai/tests/memory/test_short_term_memory.py b/lib/crewai/tests/memory/test_short_term_memory.py index a619c51ba..8ea64553a 100644 --- a/lib/crewai/tests/memory/test_short_term_memory.py +++ b/lib/crewai/tests/memory/test_short_term_memory.py @@ -81,6 +81,11 @@ def test_short_term_memory_search_events(short_term_memory): "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": None, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "query": "test value", "limit": 3, "score_threshold": 0.35, @@ -98,6 +103,11 @@ def test_short_term_memory_search_events(short_term_memory): "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": None, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "query": "test value", "results": [], "limit": 3, @@ -150,6 +160,11 @@ def test_short_term_memory_save_events(short_term_memory): "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": None, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "value": "test value", "metadata": {"task": "test_task"}, } @@ -166,6 +181,11 @@ def test_short_term_memory_save_events(short_term_memory): "from_agent": None, "agent_role": None, "agent_id": None, + "event_id": ANY, + "parent_event_id": None, + "previous_event_id": ANY, + "triggered_by_event_id": None, + "emission_sequence": ANY, "value": "test value", "metadata": {"task": "test_task"}, "save_time_ms": ANY, diff --git a/lib/crewai/tests/test_flow.py b/lib/crewai/tests/test_flow.py index 6926e15d5..50b2316fd 100644 --- a/lib/crewai/tests/test_flow.py +++ b/lib/crewai/tests/test_flow.py @@ -1204,7 +1204,8 @@ def test_complex_and_or_branching(): # Final should be after both 2a and 2b - assert execution_order[-1] == "final" + # Note: we don't assert final is last because branch_1c has no downstream + # dependencies and can complete after final due to parallel execution assert execution_order.index("final") > execution_order.index("branch_2a") assert execution_order.index("final") > execution_order.index("branch_2b")