mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-20 13:28:13 +00:00
Compare commits
13 Commits
lorenze/en
...
gl/feat/ev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9a27fbb36b | ||
|
|
74b9a7d9ab | ||
|
|
ff9108dac4 | ||
|
|
c8f9547816 | ||
|
|
d78ee915dc | ||
|
|
5914f8be7d | ||
|
|
da07bd4d9f | ||
|
|
161f9bd063 | ||
|
|
ae253b4156 | ||
|
|
f7e1bdb64e | ||
|
|
1707df8785 | ||
|
|
decdefe8f5 | ||
|
|
ceef062426 |
15
conftest.py
15
conftest.py
@@ -31,6 +31,21 @@ def cleanup_event_handlers() -> Generator[None, Any, None]:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def reset_event_state() -> None:
|
||||
"""Reset event system state before each test for isolation."""
|
||||
from crewai.events.base_events import reset_emission_counter
|
||||
from crewai.events.event_context import (
|
||||
EventContextConfig,
|
||||
_event_context_config,
|
||||
_event_id_stack,
|
||||
)
|
||||
|
||||
reset_emission_counter()
|
||||
_event_id_stack.set(())
|
||||
_event_context_config.set(EventContextConfig())
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def setup_test_environment() -> Generator[None, Any, None]:
|
||||
"""Setup test environment for crewAI workspace."""
|
||||
|
||||
@@ -3,9 +3,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator
|
||||
from typing import TYPE_CHECKING, TypedDict
|
||||
from typing import TYPE_CHECKING, Any, TypedDict
|
||||
import uuid
|
||||
|
||||
from a2a.client.errors import A2AClientHTTPError
|
||||
from a2a.types import (
|
||||
AgentCard,
|
||||
Message,
|
||||
@@ -20,7 +21,10 @@ from a2a.types import (
|
||||
from typing_extensions import NotRequired
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import A2AResponseReceivedEvent
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AConnectionErrorEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -55,7 +59,8 @@ class TaskStateResult(TypedDict):
|
||||
history: list[Message]
|
||||
result: NotRequired[str]
|
||||
error: NotRequired[str]
|
||||
agent_card: NotRequired[AgentCard]
|
||||
agent_card: NotRequired[dict[str, Any]]
|
||||
a2a_agent_name: NotRequired[str | None]
|
||||
|
||||
|
||||
def extract_task_result_parts(a2a_task: A2ATask) -> list[str]:
|
||||
@@ -131,50 +136,69 @@ def process_task_state(
|
||||
is_multiturn: bool,
|
||||
agent_role: str | None,
|
||||
result_parts: list[str] | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
is_final: bool = True,
|
||||
) -> TaskStateResult | None:
|
||||
"""Process A2A task state and return result dictionary.
|
||||
|
||||
Shared logic for both polling and streaming handlers.
|
||||
|
||||
Args:
|
||||
a2a_task: The A2A task to process
|
||||
new_messages: List to collect messages (modified in place)
|
||||
agent_card: The agent card
|
||||
turn_number: Current turn number
|
||||
is_multiturn: Whether multi-turn conversation
|
||||
agent_role: Agent role for logging
|
||||
a2a_task: The A2A task to process.
|
||||
new_messages: List to collect messages (modified in place).
|
||||
agent_card: The agent card.
|
||||
turn_number: Current turn number.
|
||||
is_multiturn: Whether multi-turn conversation.
|
||||
agent_role: Agent role for logging.
|
||||
result_parts: Accumulated result parts (streaming passes accumulated,
|
||||
polling passes None to extract from task)
|
||||
polling passes None to extract from task).
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
from_task: Optional CrewAI Task for event metadata.
|
||||
from_agent: Optional CrewAI Agent for event metadata.
|
||||
is_final: Whether this is the final response in the stream.
|
||||
|
||||
Returns:
|
||||
Result dictionary if terminal/actionable state, None otherwise
|
||||
Result dictionary if terminal/actionable state, None otherwise.
|
||||
"""
|
||||
should_extract = result_parts is None
|
||||
if result_parts is None:
|
||||
result_parts = []
|
||||
|
||||
if a2a_task.status.state == TaskState.completed:
|
||||
if should_extract:
|
||||
if not result_parts:
|
||||
extracted_parts = extract_task_result_parts(a2a_task)
|
||||
result_parts.extend(extracted_parts)
|
||||
if a2a_task.history:
|
||||
new_messages.extend(a2a_task.history)
|
||||
|
||||
response_text = " ".join(result_parts) if result_parts else ""
|
||||
message_id = None
|
||||
if a2a_task.status and a2a_task.status.message:
|
||||
message_id = a2a_task.status.message.message_id
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
context_id=a2a_task.context_id,
|
||||
message_id=message_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="completed",
|
||||
final=is_final,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
return TaskStateResult(
|
||||
status=TaskState.completed,
|
||||
agent_card=agent_card,
|
||||
agent_card=agent_card.model_dump(exclude_none=True),
|
||||
result=response_text,
|
||||
history=new_messages,
|
||||
)
|
||||
@@ -194,14 +218,24 @@ def process_task_state(
|
||||
)
|
||||
new_messages.append(agent_message)
|
||||
|
||||
input_message_id = None
|
||||
if a2a_task.status and a2a_task.status.message:
|
||||
input_message_id = a2a_task.status.message.message_id
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
context_id=a2a_task.context_id,
|
||||
message_id=input_message_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="input_required",
|
||||
final=is_final,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -209,7 +243,7 @@ def process_task_state(
|
||||
status=TaskState.input_required,
|
||||
error=response_text,
|
||||
history=new_messages,
|
||||
agent_card=agent_card,
|
||||
agent_card=agent_card.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
if a2a_task.status.state in {TaskState.failed, TaskState.rejected}:
|
||||
@@ -248,6 +282,11 @@ async def send_message_and_get_task_id(
|
||||
turn_number: int,
|
||||
is_multiturn: bool,
|
||||
agent_role: str | None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
context_id: str | None = None,
|
||||
) -> str | TaskStateResult:
|
||||
"""Send message and process initial response.
|
||||
|
||||
@@ -262,6 +301,11 @@ async def send_message_and_get_task_id(
|
||||
turn_number: Current turn number
|
||||
is_multiturn: Whether multi-turn conversation
|
||||
agent_role: Agent role for logging
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
endpoint: Optional A2A endpoint URL.
|
||||
a2a_agent_name: Optional A2A agent name.
|
||||
context_id: Optional A2A context ID for correlation.
|
||||
|
||||
Returns:
|
||||
Task ID string if agent needs polling/waiting, or TaskStateResult if done.
|
||||
@@ -280,9 +324,16 @@ async def send_message_and_get_task_id(
|
||||
A2AResponseReceivedEvent(
|
||||
response=response_text,
|
||||
turn_number=turn_number,
|
||||
context_id=event.context_id,
|
||||
message_id=event.message_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="completed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -290,7 +341,7 @@ async def send_message_and_get_task_id(
|
||||
status=TaskState.completed,
|
||||
result=response_text,
|
||||
history=new_messages,
|
||||
agent_card=agent_card,
|
||||
agent_card=agent_card.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
if isinstance(event, tuple):
|
||||
@@ -304,6 +355,10 @@ async def send_message_and_get_task_id(
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
@@ -316,6 +371,99 @@ async def send_message_and_get_task_id(
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except A2AClientHTTPError as e:
|
||||
error_msg = f"HTTP Error {e.status_code}: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.status_code,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="send_message",
|
||||
context_id=context_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error during send_message: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="unexpected_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="send_message",
|
||||
context_id=context_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
finally:
|
||||
aclose = getattr(event_stream, "aclose", None)
|
||||
if aclose:
|
||||
|
||||
@@ -22,6 +22,13 @@ class BaseHandlerKwargs(TypedDict, total=False):
|
||||
turn_number: int
|
||||
is_multiturn: bool
|
||||
agent_role: str | None
|
||||
context_id: str | None
|
||||
task_id: str | None
|
||||
endpoint: str | None
|
||||
agent_branch: Any
|
||||
a2a_agent_name: str | None
|
||||
from_task: Any
|
||||
from_agent: Any
|
||||
|
||||
|
||||
class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
@@ -29,8 +36,6 @@ class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
|
||||
polling_interval: float
|
||||
polling_timeout: float
|
||||
endpoint: str
|
||||
agent_branch: Any
|
||||
history_length: int
|
||||
max_polls: int | None
|
||||
|
||||
@@ -38,9 +43,6 @@ class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
class StreamingHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
"""Kwargs for streaming handler."""
|
||||
|
||||
context_id: str | None
|
||||
task_id: str | None
|
||||
|
||||
|
||||
class PushNotificationHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
"""Kwargs for push notification handler."""
|
||||
@@ -49,7 +51,6 @@ class PushNotificationHandlerKwargs(BaseHandlerKwargs, total=False):
|
||||
result_store: PushNotificationResultStore
|
||||
polling_timeout: float
|
||||
polling_interval: float
|
||||
agent_branch: Any
|
||||
|
||||
|
||||
class PushNotificationResultStore(Protocol):
|
||||
|
||||
@@ -31,6 +31,7 @@ from crewai.a2a.task_helpers import (
|
||||
from crewai.a2a.updates.base import PollingHandlerKwargs
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AConnectionErrorEvent,
|
||||
A2APollingStartedEvent,
|
||||
A2APollingStatusEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
@@ -49,23 +50,33 @@ async def _poll_task_until_complete(
|
||||
agent_branch: Any | None = None,
|
||||
history_length: int = 100,
|
||||
max_polls: int | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
context_id: str | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
) -> A2ATask:
|
||||
"""Poll task status until terminal state reached.
|
||||
|
||||
Args:
|
||||
client: A2A client instance
|
||||
task_id: Task ID to poll
|
||||
polling_interval: Seconds between poll attempts
|
||||
polling_timeout: Max seconds before timeout
|
||||
agent_branch: Agent tree branch for logging
|
||||
history_length: Number of messages to retrieve per poll
|
||||
max_polls: Max number of poll attempts (None = unlimited)
|
||||
client: A2A client instance.
|
||||
task_id: Task ID to poll.
|
||||
polling_interval: Seconds between poll attempts.
|
||||
polling_timeout: Max seconds before timeout.
|
||||
agent_branch: Agent tree branch for logging.
|
||||
history_length: Number of messages to retrieve per poll.
|
||||
max_polls: Max number of poll attempts (None = unlimited).
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
context_id: A2A context ID for correlation.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
|
||||
Returns:
|
||||
Final task object in terminal state
|
||||
Final task object in terminal state.
|
||||
|
||||
Raises:
|
||||
A2APollingTimeoutError: If polling exceeds timeout or max_polls
|
||||
A2APollingTimeoutError: If polling exceeds timeout or max_polls.
|
||||
"""
|
||||
start_time = time.monotonic()
|
||||
poll_count = 0
|
||||
@@ -77,13 +88,19 @@ async def _poll_task_until_complete(
|
||||
)
|
||||
|
||||
elapsed = time.monotonic() - start_time
|
||||
effective_context_id = task.context_id or context_id
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2APollingStatusEvent(
|
||||
task_id=task_id,
|
||||
context_id=effective_context_id,
|
||||
state=str(task.status.state.value) if task.status.state else "unknown",
|
||||
elapsed_seconds=elapsed,
|
||||
poll_count=poll_count,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -137,6 +154,9 @@ class PollingHandler:
|
||||
max_polls = kwargs.get("max_polls")
|
||||
context_id = kwargs.get("context_id")
|
||||
task_id = kwargs.get("task_id")
|
||||
a2a_agent_name = kwargs.get("a2a_agent_name")
|
||||
from_task = kwargs.get("from_task")
|
||||
from_agent = kwargs.get("from_agent")
|
||||
|
||||
try:
|
||||
result_or_task_id = await send_message_and_get_task_id(
|
||||
@@ -146,6 +166,11 @@ class PollingHandler:
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
context_id=context_id,
|
||||
)
|
||||
|
||||
if not isinstance(result_or_task_id, str):
|
||||
@@ -157,8 +182,12 @@ class PollingHandler:
|
||||
agent_branch,
|
||||
A2APollingStartedEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
polling_interval=polling_interval,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -170,6 +199,11 @@ class PollingHandler:
|
||||
agent_branch=agent_branch,
|
||||
history_length=history_length,
|
||||
max_polls=max_polls,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
context_id=context_id,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
)
|
||||
|
||||
result = process_task_state(
|
||||
@@ -179,6 +213,10 @@ class PollingHandler:
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
@@ -206,9 +244,15 @@ class PollingHandler:
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
@@ -229,14 +273,83 @@ class PollingHandler:
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.status_code,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="polling",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error during polling: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="unexpected_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="polling",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
|
||||
@@ -29,6 +29,7 @@ from crewai.a2a.updates.base import (
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AConnectionErrorEvent,
|
||||
A2APushNotificationRegisteredEvent,
|
||||
A2APushNotificationTimeoutEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
@@ -48,6 +49,11 @@ async def _wait_for_push_result(
|
||||
timeout: float,
|
||||
poll_interval: float,
|
||||
agent_branch: Any | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
context_id: str | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
) -> A2ATask | None:
|
||||
"""Wait for push notification result.
|
||||
|
||||
@@ -57,6 +63,11 @@ async def _wait_for_push_result(
|
||||
timeout: Max seconds to wait.
|
||||
poll_interval: Seconds between polling attempts.
|
||||
agent_branch: Agent tree branch for logging.
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
context_id: A2A context ID for correlation.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent.
|
||||
|
||||
Returns:
|
||||
Final task object, or None if timeout.
|
||||
@@ -72,7 +83,12 @@ async def _wait_for_push_result(
|
||||
agent_branch,
|
||||
A2APushNotificationTimeoutEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
timeout_seconds=timeout,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -115,18 +131,56 @@ class PushNotificationHandler:
|
||||
agent_role = kwargs.get("agent_role")
|
||||
context_id = kwargs.get("context_id")
|
||||
task_id = kwargs.get("task_id")
|
||||
endpoint = kwargs.get("endpoint")
|
||||
a2a_agent_name = kwargs.get("a2a_agent_name")
|
||||
from_task = kwargs.get("from_task")
|
||||
from_agent = kwargs.get("from_agent")
|
||||
|
||||
if config is None:
|
||||
error_msg = (
|
||||
"PushNotificationConfig is required for push notification handler"
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=error_msg,
|
||||
error_type="configuration_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="push_notification",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error="PushNotificationConfig is required for push notification handler",
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
if result_store is None:
|
||||
error_msg = (
|
||||
"PushNotificationResultStore is required for push notification handler"
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=error_msg,
|
||||
error_type="configuration_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="push_notification",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error="PushNotificationResultStore is required for push notification handler",
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
@@ -138,6 +192,11 @@ class PushNotificationHandler:
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
context_id=context_id,
|
||||
)
|
||||
|
||||
if not isinstance(result_or_task_id, str):
|
||||
@@ -149,7 +208,12 @@ class PushNotificationHandler:
|
||||
agent_branch,
|
||||
A2APushNotificationRegisteredEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
callback_url=str(config.url),
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -165,6 +229,11 @@ class PushNotificationHandler:
|
||||
timeout=polling_timeout,
|
||||
poll_interval=polling_interval,
|
||||
agent_branch=agent_branch,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
context_id=context_id,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
)
|
||||
|
||||
if final_task is None:
|
||||
@@ -181,6 +250,10 @@ class PushNotificationHandler:
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
if result:
|
||||
return result
|
||||
@@ -203,14 +276,83 @@ class PushNotificationHandler:
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.status_code,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="push_notification",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error during push notification: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="unexpected_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="push_notification",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
|
||||
@@ -26,7 +26,13 @@ from crewai.a2a.task_helpers import (
|
||||
)
|
||||
from crewai.a2a.updates.base import StreamingHandlerKwargs
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import A2AResponseReceivedEvent
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AArtifactReceivedEvent,
|
||||
A2AConnectionErrorEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
A2AStreamingChunkEvent,
|
||||
A2AStreamingStartedEvent,
|
||||
)
|
||||
|
||||
|
||||
class StreamingHandler:
|
||||
@@ -57,19 +63,57 @@ class StreamingHandler:
|
||||
turn_number = kwargs.get("turn_number", 0)
|
||||
is_multiturn = kwargs.get("is_multiturn", False)
|
||||
agent_role = kwargs.get("agent_role")
|
||||
endpoint = kwargs.get("endpoint")
|
||||
a2a_agent_name = kwargs.get("a2a_agent_name")
|
||||
from_task = kwargs.get("from_task")
|
||||
from_agent = kwargs.get("from_agent")
|
||||
agent_branch = kwargs.get("agent_branch")
|
||||
|
||||
result_parts: list[str] = []
|
||||
final_result: TaskStateResult | None = None
|
||||
event_stream = client.send_message(message)
|
||||
chunk_index = 0
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AStreamingStartedEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
endpoint=endpoint or "",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
async for event in event_stream:
|
||||
if isinstance(event, Message):
|
||||
new_messages.append(event)
|
||||
message_context_id = event.context_id or context_id
|
||||
for part in event.parts:
|
||||
if part.root.kind == "text":
|
||||
text = part.root.text
|
||||
result_parts.append(text)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AStreamingChunkEvent(
|
||||
task_id=event.task_id or task_id,
|
||||
context_id=message_context_id,
|
||||
chunk=text,
|
||||
chunk_index=chunk_index,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
chunk_index += 1
|
||||
|
||||
elif isinstance(event, tuple):
|
||||
a2a_task, update = event
|
||||
@@ -81,10 +125,51 @@ class StreamingHandler:
|
||||
for part in artifact.parts
|
||||
if part.root.kind == "text"
|
||||
)
|
||||
artifact_size = None
|
||||
if artifact.parts:
|
||||
artifact_size = sum(
|
||||
len(p.root.text.encode("utf-8"))
|
||||
if p.root.kind == "text"
|
||||
else len(getattr(p.root, "data", b""))
|
||||
for p in artifact.parts
|
||||
)
|
||||
effective_context_id = a2a_task.context_id or context_id
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AArtifactReceivedEvent(
|
||||
task_id=a2a_task.id,
|
||||
artifact_id=artifact.artifact_id,
|
||||
artifact_name=artifact.name,
|
||||
artifact_description=artifact.description,
|
||||
mime_type=artifact.parts[0].root.kind
|
||||
if artifact.parts
|
||||
else None,
|
||||
size_bytes=artifact_size,
|
||||
append=update.append or False,
|
||||
last_chunk=update.last_chunk or False,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
context_id=effective_context_id,
|
||||
turn_number=turn_number,
|
||||
is_multiturn=is_multiturn,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
is_final_update = False
|
||||
if isinstance(update, TaskStatusUpdateEvent):
|
||||
is_final_update = update.final
|
||||
if (
|
||||
update.status
|
||||
and update.status.message
|
||||
and update.status.message.parts
|
||||
):
|
||||
result_parts.extend(
|
||||
part.root.text
|
||||
for part in update.status.message.parts
|
||||
if part.root.kind == "text" and part.root.text
|
||||
)
|
||||
|
||||
if (
|
||||
not is_final_update
|
||||
@@ -101,6 +186,11 @@ class StreamingHandler:
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
result_parts=result_parts,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
is_final=is_final_update,
|
||||
)
|
||||
if final_result:
|
||||
break
|
||||
@@ -118,13 +208,82 @@ class StreamingHandler:
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.status_code,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="streaming",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
status=TaskState.failed,
|
||||
error=error_msg,
|
||||
history=new_messages,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Unexpected error during streaming: {e!s}"
|
||||
|
||||
error_message = Message(
|
||||
role=Role.agent,
|
||||
message_id=str(uuid.uuid4()),
|
||||
parts=[Part(root=TextPart(text=error_msg))],
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
)
|
||||
new_messages.append(error_message)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(e),
|
||||
error_type="unexpected_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="streaming",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AResponseReceivedEvent(
|
||||
response=error_msg,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
status="failed",
|
||||
final=True,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
return TaskStateResult(
|
||||
@@ -136,7 +295,23 @@ class StreamingHandler:
|
||||
finally:
|
||||
aclose = getattr(event_stream, "aclose", None)
|
||||
if aclose:
|
||||
await aclose()
|
||||
try:
|
||||
await aclose()
|
||||
except Exception as close_error:
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint or "",
|
||||
error=str(close_error),
|
||||
error_type="stream_close_error",
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
operation="stream_close",
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
if final_result:
|
||||
return final_result
|
||||
@@ -145,5 +320,5 @@ class StreamingHandler:
|
||||
status=TaskState.completed,
|
||||
result=" ".join(result_parts) if result_parts else "",
|
||||
history=new_messages,
|
||||
agent_card=agent_card,
|
||||
agent_card=agent_card.model_dump(exclude_none=True),
|
||||
)
|
||||
|
||||
@@ -23,6 +23,12 @@ from crewai.a2a.auth.utils import (
|
||||
)
|
||||
from crewai.a2a.config import A2AServerConfig
|
||||
from crewai.crew import Crew
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AAgentCardFetchedEvent,
|
||||
A2AAuthenticationFailedEvent,
|
||||
A2AConnectionErrorEvent,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -183,6 +189,8 @@ async def _afetch_agent_card_impl(
|
||||
timeout: int,
|
||||
) -> AgentCard:
|
||||
"""Internal async implementation of AgentCard fetching."""
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if "/.well-known/agent-card.json" in endpoint:
|
||||
base_url = endpoint.replace("/.well-known/agent-card.json", "")
|
||||
agent_card_path = "/.well-known/agent-card.json"
|
||||
@@ -217,9 +225,29 @@ async def _afetch_agent_card_impl(
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
return AgentCard.model_validate(response.json())
|
||||
agent_card = AgentCard.model_validate(response.json())
|
||||
fetch_time_ms = (time.perf_counter() - start_time) * 1000
|
||||
agent_card_dict = agent_card.model_dump(exclude_none=True)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AAgentCardFetchedEvent(
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=agent_card.name,
|
||||
agent_card=agent_card_dict,
|
||||
protocol_version=agent_card.protocol_version,
|
||||
provider=agent_card_dict.get("provider"),
|
||||
cached=False,
|
||||
fetch_time_ms=fetch_time_ms,
|
||||
),
|
||||
)
|
||||
|
||||
return agent_card
|
||||
|
||||
except httpx.HTTPStatusError as e:
|
||||
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
||||
response_body = e.response.text[:1000] if e.response.text else None
|
||||
|
||||
if e.response.status_code == 401:
|
||||
error_details = ["Authentication failed"]
|
||||
www_auth = e.response.headers.get("WWW-Authenticate")
|
||||
@@ -228,7 +256,93 @@ async def _afetch_agent_card_impl(
|
||||
if not auth:
|
||||
error_details.append("No auth scheme provided")
|
||||
msg = " | ".join(error_details)
|
||||
|
||||
auth_type = type(auth).__name__ if auth else None
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AAuthenticationFailedEvent(
|
||||
endpoint=endpoint,
|
||||
auth_type=auth_type,
|
||||
error=msg,
|
||||
status_code=401,
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"response_body": response_body,
|
||||
"www_authenticate": www_auth,
|
||||
"request_url": str(e.request.url),
|
||||
},
|
||||
),
|
||||
)
|
||||
|
||||
raise A2AClientHTTPError(401, msg) from e
|
||||
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="http_error",
|
||||
status_code=e.response.status_code,
|
||||
operation="fetch_agent_card",
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"response_body": response_body,
|
||||
"request_url": str(e.request.url),
|
||||
},
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
except httpx.TimeoutException as e:
|
||||
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="timeout",
|
||||
operation="fetch_agent_card",
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"timeout_config": timeout,
|
||||
"request_url": str(e.request.url) if e.request else None,
|
||||
},
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
except httpx.ConnectError as e:
|
||||
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="connection_error",
|
||||
operation="fetch_agent_card",
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"request_url": str(e.request.url) if e.request else None,
|
||||
},
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
except httpx.RequestError as e:
|
||||
elapsed_ms = (time.perf_counter() - start_time) * 1000
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConnectionErrorEvent(
|
||||
endpoint=endpoint,
|
||||
error=str(e),
|
||||
error_type="request_error",
|
||||
operation="fetch_agent_card",
|
||||
metadata={
|
||||
"elapsed_ms": elapsed_ms,
|
||||
"request_url": str(e.request.url) if e.request else None,
|
||||
},
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
|
||||
@@ -88,6 +88,9 @@ def execute_a2a_delegation(
|
||||
response_model: type[BaseModel] | None = None,
|
||||
turn_number: int | None = None,
|
||||
updates: UpdateConfig | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
skill_id: str | None = None,
|
||||
) -> TaskStateResult:
|
||||
"""Execute a task delegation to a remote A2A agent synchronously.
|
||||
|
||||
@@ -129,6 +132,9 @@ def execute_a2a_delegation(
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
turn_number: Optional turn number for multi-turn conversations.
|
||||
updates: Update mechanism config from A2AConfig.updates.
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
skill_id: Optional skill ID to target a specific agent capability.
|
||||
|
||||
Returns:
|
||||
TaskStateResult with status, result/error, history, and agent_card.
|
||||
@@ -156,10 +162,16 @@ def execute_a2a_delegation(
|
||||
transport_protocol=transport_protocol,
|
||||
turn_number=turn_number,
|
||||
updates=updates,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
skill_id=skill_id,
|
||||
)
|
||||
)
|
||||
finally:
|
||||
loop.close()
|
||||
try:
|
||||
loop.run_until_complete(loop.shutdown_asyncgens())
|
||||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
async def aexecute_a2a_delegation(
|
||||
@@ -181,6 +193,9 @@ async def aexecute_a2a_delegation(
|
||||
response_model: type[BaseModel] | None = None,
|
||||
turn_number: int | None = None,
|
||||
updates: UpdateConfig | None = None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
skill_id: str | None = None,
|
||||
) -> TaskStateResult:
|
||||
"""Execute a task delegation to a remote A2A agent asynchronously.
|
||||
|
||||
@@ -222,6 +237,9 @@ async def aexecute_a2a_delegation(
|
||||
response_model: Optional Pydantic model for structured outputs.
|
||||
turn_number: Optional turn number for multi-turn conversations.
|
||||
updates: Update mechanism config from A2AConfig.updates.
|
||||
from_task: Optional CrewAI Task object for event metadata.
|
||||
from_agent: Optional CrewAI Agent object for event metadata.
|
||||
skill_id: Optional skill ID to target a specific agent capability.
|
||||
|
||||
Returns:
|
||||
TaskStateResult with status, result/error, history, and agent_card.
|
||||
@@ -233,17 +251,6 @@ async def aexecute_a2a_delegation(
|
||||
if turn_number is None:
|
||||
turn_number = len([m for m in conversation_history if m.role == Role.user]) + 1
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationStartedEvent(
|
||||
endpoint=endpoint,
|
||||
task_description=task_description,
|
||||
agent_id=agent_id,
|
||||
is_multiturn=is_multiturn,
|
||||
turn_number=turn_number,
|
||||
),
|
||||
)
|
||||
|
||||
result = await _aexecute_a2a_delegation_impl(
|
||||
endpoint=endpoint,
|
||||
auth=auth,
|
||||
@@ -264,15 +271,28 @@ async def aexecute_a2a_delegation(
|
||||
response_model=response_model,
|
||||
updates=updates,
|
||||
transport_protocol=transport_protocol,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
skill_id=skill_id,
|
||||
)
|
||||
|
||||
agent_card_data: dict[str, Any] = result.get("agent_card") or {}
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationCompletedEvent(
|
||||
status=result["status"],
|
||||
result=result.get("result"),
|
||||
error=result.get("error"),
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=result.get("a2a_agent_name"),
|
||||
agent_card=agent_card_data,
|
||||
provider=agent_card_data.get("provider"),
|
||||
metadata=metadata,
|
||||
extensions=list(extensions.keys()) if extensions else None,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -299,6 +319,9 @@ async def _aexecute_a2a_delegation_impl(
|
||||
agent_role: str | None,
|
||||
response_model: type[BaseModel] | None,
|
||||
updates: UpdateConfig | None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
skill_id: str | None = None,
|
||||
) -> TaskStateResult:
|
||||
"""Internal async implementation of A2A delegation."""
|
||||
if auth:
|
||||
@@ -331,6 +354,28 @@ async def _aexecute_a2a_delegation_impl(
|
||||
if agent_card.name:
|
||||
a2a_agent_name = agent_card.name
|
||||
|
||||
agent_card_dict = agent_card.model_dump(exclude_none=True)
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationStartedEvent(
|
||||
endpoint=endpoint,
|
||||
task_description=task_description,
|
||||
agent_id=agent_id or endpoint,
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
turn_number=turn_number,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card_dict,
|
||||
protocol_version=agent_card.protocol_version,
|
||||
provider=agent_card_dict.get("provider"),
|
||||
skill_id=skill_id,
|
||||
metadata=metadata,
|
||||
extensions=list(extensions.keys()) if extensions else None,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
if turn_number == 1:
|
||||
agent_id_for_event = agent_id or endpoint
|
||||
crewai_event_bus.emit(
|
||||
@@ -338,7 +383,17 @@ async def _aexecute_a2a_delegation_impl(
|
||||
A2AConversationStartedEvent(
|
||||
agent_id=agent_id_for_event,
|
||||
endpoint=endpoint,
|
||||
context_id=context_id,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card_dict,
|
||||
protocol_version=agent_card.protocol_version,
|
||||
provider=agent_card_dict.get("provider"),
|
||||
skill_id=skill_id,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
extensions=list(extensions.keys()) if extensions else None,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -364,6 +419,10 @@ async def _aexecute_a2a_delegation_impl(
|
||||
}
|
||||
)
|
||||
|
||||
message_metadata = metadata.copy() if metadata else {}
|
||||
if skill_id:
|
||||
message_metadata["skill_id"] = skill_id
|
||||
|
||||
message = Message(
|
||||
role=Role.user,
|
||||
message_id=str(uuid.uuid4()),
|
||||
@@ -371,7 +430,7 @@ async def _aexecute_a2a_delegation_impl(
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
metadata=message_metadata if message_metadata else None,
|
||||
extensions=extensions,
|
||||
)
|
||||
|
||||
@@ -381,8 +440,17 @@ async def _aexecute_a2a_delegation_impl(
|
||||
A2AMessageSentEvent(
|
||||
message=message_text,
|
||||
turn_number=turn_number,
|
||||
context_id=context_id,
|
||||
message_id=message.message_id,
|
||||
is_multiturn=is_multiturn,
|
||||
agent_role=agent_role,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
skill_id=skill_id,
|
||||
metadata=message_metadata if message_metadata else None,
|
||||
extensions=list(extensions.keys()) if extensions else None,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -397,6 +465,9 @@ async def _aexecute_a2a_delegation_impl(
|
||||
"task_id": task_id,
|
||||
"endpoint": endpoint,
|
||||
"agent_branch": agent_branch,
|
||||
"a2a_agent_name": a2a_agent_name,
|
||||
"from_task": from_task,
|
||||
"from_agent": from_agent,
|
||||
}
|
||||
|
||||
if isinstance(updates, PollingConfig):
|
||||
@@ -434,13 +505,16 @@ async def _aexecute_a2a_delegation_impl(
|
||||
use_polling=use_polling,
|
||||
push_notification_config=push_config_for_client,
|
||||
) as client:
|
||||
return await handler.execute(
|
||||
result = await handler.execute(
|
||||
client=client,
|
||||
message=message,
|
||||
new_messages=new_messages,
|
||||
agent_card=agent_card,
|
||||
**handler_kwargs,
|
||||
)
|
||||
result["a2a_agent_name"] = a2a_agent_name
|
||||
result["agent_card"] = agent_card.model_dump(exclude_none=True)
|
||||
return result
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
|
||||
@@ -3,11 +3,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
from collections.abc import Callable, Coroutine
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar, cast
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from a2a.server.agent_execution import RequestContext
|
||||
from a2a.server.events import EventQueue
|
||||
@@ -45,7 +48,14 @@ T = TypeVar("T")
|
||||
|
||||
|
||||
def _parse_redis_url(url: str) -> dict[str, Any]:
|
||||
from urllib.parse import urlparse
|
||||
"""Parse a Redis URL into aiocache configuration.
|
||||
|
||||
Args:
|
||||
url: Redis connection URL (e.g., redis://localhost:6379/0).
|
||||
|
||||
Returns:
|
||||
Configuration dict for aiocache.RedisCache.
|
||||
"""
|
||||
|
||||
parsed = urlparse(url)
|
||||
config: dict[str, Any] = {
|
||||
@@ -127,7 +137,7 @@ def cancellable(
|
||||
async for message in pubsub.listen():
|
||||
if message["type"] == "message":
|
||||
return True
|
||||
except Exception as e:
|
||||
except (OSError, ConnectionError) as e:
|
||||
logger.warning("Cancel watcher error for task_id=%s: %s", task_id, e)
|
||||
return await poll_for_cancel()
|
||||
return False
|
||||
@@ -183,7 +193,12 @@ async def execute(
|
||||
msg = "task_id and context_id are required"
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskFailedEvent(a2a_task_id="", a2a_context_id="", error=msg),
|
||||
A2AServerTaskFailedEvent(
|
||||
task_id="",
|
||||
context_id="",
|
||||
error=msg,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
raise ServerError(InvalidParamsError(message=msg)) from None
|
||||
|
||||
@@ -195,7 +210,12 @@ async def execute(
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskStartedEvent(a2a_task_id=task_id, a2a_context_id=context_id),
|
||||
A2AServerTaskStartedEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
from_task=task,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
@@ -215,20 +235,33 @@ async def execute(
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskCompletedEvent(
|
||||
a2a_task_id=task_id, a2a_context_id=context_id, result=str(result)
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
result=str(result),
|
||||
from_task=task,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
except asyncio.CancelledError:
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskCanceledEvent(a2a_task_id=task_id, a2a_context_id=context_id),
|
||||
A2AServerTaskCanceledEvent(
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
from_task=task,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
A2AServerTaskFailedEvent(
|
||||
a2a_task_id=task_id, a2a_context_id=context_id, error=str(e)
|
||||
task_id=task_id,
|
||||
context_id=context_id,
|
||||
error=str(e),
|
||||
from_task=task,
|
||||
from_agent=agent,
|
||||
),
|
||||
)
|
||||
raise ServerError(
|
||||
@@ -282,3 +315,85 @@ async def cancel(
|
||||
context.current_task.status = TaskStatus(state=TaskState.canceled)
|
||||
return context.current_task
|
||||
return None
|
||||
|
||||
|
||||
def list_tasks(
|
||||
tasks: list[A2ATask],
|
||||
context_id: str | None = None,
|
||||
status: TaskState | None = None,
|
||||
status_timestamp_after: datetime | None = None,
|
||||
page_size: int = 50,
|
||||
page_token: str | None = None,
|
||||
history_length: int | None = None,
|
||||
include_artifacts: bool = False,
|
||||
) -> tuple[list[A2ATask], str | None, int]:
|
||||
"""Filter and paginate A2A tasks.
|
||||
|
||||
Provides filtering by context, status, and timestamp, along with
|
||||
cursor-based pagination. This is a pure utility function that operates
|
||||
on an in-memory list of tasks - storage retrieval is handled separately.
|
||||
|
||||
Args:
|
||||
tasks: All tasks to filter.
|
||||
context_id: Filter by context ID to get tasks in a conversation.
|
||||
status: Filter by task state (e.g., completed, working).
|
||||
status_timestamp_after: Filter to tasks updated after this time.
|
||||
page_size: Maximum tasks per page (default 50).
|
||||
page_token: Base64-encoded cursor from previous response.
|
||||
history_length: Limit history messages per task (None = full history).
|
||||
include_artifacts: Whether to include task artifacts (default False).
|
||||
|
||||
Returns:
|
||||
Tuple of (filtered_tasks, next_page_token, total_count).
|
||||
- filtered_tasks: Tasks matching filters, paginated and trimmed.
|
||||
- next_page_token: Token for next page, or None if no more pages.
|
||||
- total_count: Total number of tasks matching filters (before pagination).
|
||||
"""
|
||||
filtered: list[A2ATask] = []
|
||||
for task in tasks:
|
||||
if context_id and task.context_id != context_id:
|
||||
continue
|
||||
if status and task.status.state != status:
|
||||
continue
|
||||
if status_timestamp_after and task.status.timestamp:
|
||||
ts = datetime.fromisoformat(task.status.timestamp.replace("Z", "+00:00"))
|
||||
if ts <= status_timestamp_after:
|
||||
continue
|
||||
filtered.append(task)
|
||||
|
||||
def get_timestamp(t: A2ATask) -> datetime:
|
||||
"""Extract timestamp from task status for sorting."""
|
||||
if t.status.timestamp is None:
|
||||
return datetime.min
|
||||
return datetime.fromisoformat(t.status.timestamp.replace("Z", "+00:00"))
|
||||
|
||||
filtered.sort(key=get_timestamp, reverse=True)
|
||||
total = len(filtered)
|
||||
|
||||
start = 0
|
||||
if page_token:
|
||||
try:
|
||||
cursor_id = base64.b64decode(page_token).decode()
|
||||
for idx, task in enumerate(filtered):
|
||||
if task.id == cursor_id:
|
||||
start = idx + 1
|
||||
break
|
||||
except (ValueError, UnicodeDecodeError):
|
||||
pass
|
||||
|
||||
page = filtered[start : start + page_size]
|
||||
|
||||
result: list[A2ATask] = []
|
||||
for task in page:
|
||||
task = task.model_copy(deep=True)
|
||||
if history_length is not None and task.history:
|
||||
task.history = task.history[-history_length:]
|
||||
if not include_artifacts:
|
||||
task.artifacts = None
|
||||
result.append(task)
|
||||
|
||||
next_token: str | None = None
|
||||
if result and len(result) == page_size:
|
||||
next_token = base64.b64encode(result[-1].id.encode()).decode()
|
||||
|
||||
return result, next_token, total
|
||||
|
||||
@@ -6,9 +6,10 @@ Wraps agent classes with A2A delegation capabilities.
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine
|
||||
from collections.abc import Callable, Coroutine, Mapping
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from functools import wraps
|
||||
import json
|
||||
from types import MethodType
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
@@ -189,7 +190,7 @@ def _execute_task_with_a2a(
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
original_fn: Callable[..., str],
|
||||
task: Task,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
extension_registry: ExtensionRegistry,
|
||||
@@ -277,7 +278,7 @@ def _execute_task_with_a2a(
|
||||
def _augment_prompt_with_a2a(
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
task_description: str,
|
||||
agent_cards: dict[str, AgentCard],
|
||||
agent_cards: Mapping[str, AgentCard | dict[str, Any]],
|
||||
conversation_history: list[Message] | None = None,
|
||||
turn_num: int = 0,
|
||||
max_turns: int | None = None,
|
||||
@@ -309,7 +310,15 @@ def _augment_prompt_with_a2a(
|
||||
for config in a2a_agents:
|
||||
if config.endpoint in agent_cards:
|
||||
card = agent_cards[config.endpoint]
|
||||
agents_text += f"\n{card.model_dump_json(indent=2, exclude_none=True, include={'description', 'url', 'skills'})}\n"
|
||||
if isinstance(card, dict):
|
||||
filtered = {
|
||||
k: v
|
||||
for k, v in card.items()
|
||||
if k in {"description", "url", "skills"} and v is not None
|
||||
}
|
||||
agents_text += f"\n{json.dumps(filtered, indent=2)}\n"
|
||||
else:
|
||||
agents_text += f"\n{card.model_dump_json(indent=2, exclude_none=True, include={'description', 'url', 'skills'})}\n"
|
||||
|
||||
failed_agents = failed_agents or {}
|
||||
if failed_agents:
|
||||
@@ -377,7 +386,7 @@ IMPORTANT: You have the ability to delegate this task to remote A2A agents.
|
||||
|
||||
|
||||
def _parse_agent_response(
|
||||
raw_result: str | dict[str, Any], agent_response_model: type[BaseModel]
|
||||
raw_result: str | dict[str, Any], agent_response_model: type[BaseModel] | None
|
||||
) -> BaseModel | str | dict[str, Any]:
|
||||
"""Parse LLM output as AgentResponse or return raw agent response."""
|
||||
if agent_response_model:
|
||||
@@ -394,6 +403,11 @@ def _parse_agent_response(
|
||||
def _handle_max_turns_exceeded(
|
||||
conversation_history: list[Message],
|
||||
max_turns: int,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> str:
|
||||
"""Handle the case when max turns is exceeded.
|
||||
|
||||
@@ -421,6 +435,11 @@ def _handle_max_turns_exceeded(
|
||||
final_result=final_message,
|
||||
error=None,
|
||||
total_turns=max_turns,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
return final_message
|
||||
@@ -432,6 +451,11 @@ def _handle_max_turns_exceeded(
|
||||
final_result=None,
|
||||
error=f"Conversation exceeded maximum turns ({max_turns})",
|
||||
total_turns=max_turns,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
raise Exception(f"A2A conversation exceeded maximum turns ({max_turns})")
|
||||
@@ -442,7 +466,12 @@ def _process_response_result(
|
||||
disable_structured_output: bool,
|
||||
turn_num: int,
|
||||
agent_role: str,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Process LLM response and determine next action.
|
||||
|
||||
@@ -461,6 +490,10 @@ def _process_response_result(
|
||||
turn_number=final_turn_number,
|
||||
is_multiturn=True,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
@@ -470,6 +503,11 @@ def _process_response_result(
|
||||
final_result=result_text,
|
||||
error=None,
|
||||
total_turns=final_turn_number,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
return result_text, None
|
||||
@@ -490,6 +528,10 @@ def _process_response_result(
|
||||
turn_number=final_turn_number,
|
||||
is_multiturn=True,
|
||||
agent_role=agent_role,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
@@ -499,6 +541,11 @@ def _process_response_result(
|
||||
final_result=str(llm_response.message),
|
||||
error=None,
|
||||
total_turns=final_turn_number,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
return str(llm_response.message), None
|
||||
@@ -510,13 +557,15 @@ def _process_response_result(
|
||||
def _prepare_agent_cards_dict(
|
||||
a2a_result: TaskStateResult,
|
||||
agent_id: str,
|
||||
agent_cards: dict[str, AgentCard] | None,
|
||||
) -> dict[str, AgentCard]:
|
||||
agent_cards: Mapping[str, AgentCard | dict[str, Any]] | None,
|
||||
) -> dict[str, AgentCard | dict[str, Any]]:
|
||||
"""Prepare agent cards dictionary from result and existing cards.
|
||||
|
||||
Shared logic for both sync and async response handlers.
|
||||
"""
|
||||
agent_cards_dict = agent_cards or {}
|
||||
agent_cards_dict: dict[str, AgentCard | dict[str, Any]] = (
|
||||
dict(agent_cards) if agent_cards else {}
|
||||
)
|
||||
if "agent_card" in a2a_result and agent_id not in agent_cards_dict:
|
||||
agent_cards_dict[agent_id] = a2a_result["agent_card"]
|
||||
return agent_cards_dict
|
||||
@@ -529,7 +578,7 @@ def _prepare_delegation_context(
|
||||
original_task_description: str | None,
|
||||
) -> tuple[
|
||||
list[A2AConfig | A2AClientConfig],
|
||||
type[BaseModel],
|
||||
type[BaseModel] | None,
|
||||
str,
|
||||
str,
|
||||
A2AConfig | A2AClientConfig,
|
||||
@@ -598,6 +647,11 @@ def _handle_task_completion(
|
||||
reference_task_ids: list[str],
|
||||
agent_config: A2AConfig | A2AClientConfig,
|
||||
turn_num: int,
|
||||
from_task: Any | None = None,
|
||||
from_agent: Any | None = None,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> tuple[str | None, str | None, list[str]]:
|
||||
"""Handle task completion state including reference task updates.
|
||||
|
||||
@@ -624,6 +678,11 @@ def _handle_task_completion(
|
||||
final_result=result_text,
|
||||
error=None,
|
||||
total_turns=final_turn_number,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
),
|
||||
)
|
||||
return str(result_text), task_id_config, reference_task_ids
|
||||
@@ -645,8 +704,11 @@ def _handle_agent_response_and_continue(
|
||||
original_fn: Callable[..., str],
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
remote_task_completed: bool = False,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Handle A2A result and get CrewAI agent's response.
|
||||
|
||||
@@ -698,6 +760,11 @@ def _handle_agent_response_and_continue(
|
||||
turn_num=turn_num,
|
||||
agent_role=self.role,
|
||||
agent_response_model=agent_response_model,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
)
|
||||
|
||||
|
||||
@@ -750,6 +817,12 @@ def _delegate_to_a2a(
|
||||
|
||||
conversation_history: list[Message] = []
|
||||
|
||||
current_agent_card = agent_cards.get(agent_id) if agent_cards else None
|
||||
current_agent_card_dict = (
|
||||
current_agent_card.model_dump() if current_agent_card else None
|
||||
)
|
||||
current_a2a_agent_name = current_agent_card.name if current_agent_card else None
|
||||
|
||||
try:
|
||||
for turn_num in range(max_turns):
|
||||
console_formatter = getattr(crewai_event_bus, "_console", None)
|
||||
@@ -777,6 +850,8 @@ def _delegate_to_a2a(
|
||||
turn_number=turn_num + 1,
|
||||
updates=agent_config.updates,
|
||||
transport_protocol=agent_config.transport_protocol,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
)
|
||||
|
||||
conversation_history = a2a_result.get("history", [])
|
||||
@@ -797,6 +872,11 @@ def _delegate_to_a2a(
|
||||
reference_task_ids,
|
||||
agent_config,
|
||||
turn_num,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
)
|
||||
if trusted_result is not None:
|
||||
@@ -818,6 +898,9 @@ def _delegate_to_a2a(
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
remote_task_completed=(a2a_result["status"] == TaskState.completed),
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -846,6 +929,9 @@ def _delegate_to_a2a(
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
remote_task_completed=False,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -862,11 +948,24 @@ def _delegate_to_a2a(
|
||||
final_result=None,
|
||||
error=error_msg,
|
||||
total_turns=turn_num + 1,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
),
|
||||
)
|
||||
return f"A2A delegation failed: {error_msg}"
|
||||
|
||||
return _handle_max_turns_exceeded(conversation_history, max_turns)
|
||||
return _handle_max_turns_exceeded(
|
||||
conversation_history,
|
||||
max_turns,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
finally:
|
||||
task.description = original_task_description
|
||||
@@ -916,7 +1015,7 @@ async def _aexecute_task_with_a2a(
|
||||
a2a_agents: list[A2AConfig | A2AClientConfig],
|
||||
original_fn: Callable[..., Coroutine[Any, Any, str]],
|
||||
task: Task,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
extension_registry: ExtensionRegistry,
|
||||
@@ -1001,8 +1100,11 @@ async def _ahandle_agent_response_and_continue(
|
||||
original_fn: Callable[..., Coroutine[Any, Any, str]],
|
||||
context: str | None,
|
||||
tools: list[BaseTool] | None,
|
||||
agent_response_model: type[BaseModel],
|
||||
agent_response_model: type[BaseModel] | None,
|
||||
remote_task_completed: bool = False,
|
||||
endpoint: str | None = None,
|
||||
a2a_agent_name: str | None = None,
|
||||
agent_card: dict[str, Any] | None = None,
|
||||
) -> tuple[str | None, str | None]:
|
||||
"""Async version of _handle_agent_response_and_continue."""
|
||||
agent_cards_dict = _prepare_agent_cards_dict(a2a_result, agent_id, agent_cards)
|
||||
@@ -1032,6 +1134,11 @@ async def _ahandle_agent_response_and_continue(
|
||||
turn_num=turn_num,
|
||||
agent_role=self.role,
|
||||
agent_response_model=agent_response_model,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=endpoint,
|
||||
a2a_agent_name=a2a_agent_name,
|
||||
agent_card=agent_card,
|
||||
)
|
||||
|
||||
|
||||
@@ -1066,6 +1173,12 @@ async def _adelegate_to_a2a(
|
||||
|
||||
conversation_history: list[Message] = []
|
||||
|
||||
current_agent_card = agent_cards.get(agent_id) if agent_cards else None
|
||||
current_agent_card_dict = (
|
||||
current_agent_card.model_dump() if current_agent_card else None
|
||||
)
|
||||
current_a2a_agent_name = current_agent_card.name if current_agent_card else None
|
||||
|
||||
try:
|
||||
for turn_num in range(max_turns):
|
||||
console_formatter = getattr(crewai_event_bus, "_console", None)
|
||||
@@ -1093,6 +1206,8 @@ async def _adelegate_to_a2a(
|
||||
turn_number=turn_num + 1,
|
||||
transport_protocol=agent_config.transport_protocol,
|
||||
updates=agent_config.updates,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
)
|
||||
|
||||
conversation_history = a2a_result.get("history", [])
|
||||
@@ -1113,6 +1228,11 @@ async def _adelegate_to_a2a(
|
||||
reference_task_ids,
|
||||
agent_config,
|
||||
turn_num,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
)
|
||||
if trusted_result is not None:
|
||||
@@ -1134,6 +1254,9 @@ async def _adelegate_to_a2a(
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
remote_task_completed=(a2a_result["status"] == TaskState.completed),
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -1161,6 +1284,9 @@ async def _adelegate_to_a2a(
|
||||
context=context,
|
||||
tools=tools,
|
||||
agent_response_model=agent_response_model,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
if final_result is not None:
|
||||
@@ -1177,11 +1303,24 @@ async def _adelegate_to_a2a(
|
||||
final_result=None,
|
||||
error=error_msg,
|
||||
total_turns=turn_num + 1,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
),
|
||||
)
|
||||
return f"A2A delegation failed: {error_msg}"
|
||||
|
||||
return _handle_max_turns_exceeded(conversation_history, max_turns)
|
||||
return _handle_max_turns_exceeded(
|
||||
conversation_history,
|
||||
max_turns,
|
||||
from_task=task,
|
||||
from_agent=self,
|
||||
endpoint=agent_config.endpoint,
|
||||
a2a_agent_name=current_a2a_agent_name,
|
||||
agent_card=current_agent_card_dict,
|
||||
)
|
||||
|
||||
finally:
|
||||
task.description = original_task_description
|
||||
|
||||
@@ -189,9 +189,14 @@ def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any]
|
||||
Returns:
|
||||
The potentially modified inputs dictionary after before callbacks.
|
||||
"""
|
||||
from crewai.events.base_events import reset_emission_counter
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_context import get_current_parent_id
|
||||
from crewai.events.types.crew_events import CrewKickoffStartedEvent
|
||||
|
||||
if get_current_parent_id() is None:
|
||||
reset_emission_counter()
|
||||
|
||||
for before_callback in crew.before_kickoff_callbacks:
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
|
||||
@@ -1,9 +1,35 @@
|
||||
from collections.abc import Iterator
|
||||
from datetime import datetime, timezone
|
||||
import itertools
|
||||
from typing import Any
|
||||
import uuid
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.utilities.serialization import to_serializable
|
||||
from crewai.utilities.serialization import Serializable, to_serializable
|
||||
|
||||
|
||||
_emission_counter: Iterator[int] = itertools.count(start=1)
|
||||
|
||||
|
||||
def get_next_emission_sequence() -> int:
|
||||
"""Get the next emission sequence number.
|
||||
|
||||
Thread-safe due to atomic next() on itertools.count under the GIL.
|
||||
|
||||
Returns:
|
||||
The next sequence number.
|
||||
"""
|
||||
return next(_emission_counter)
|
||||
|
||||
|
||||
def reset_emission_counter() -> None:
|
||||
"""Reset the emission sequence counter to 1.
|
||||
|
||||
Useful for test isolation.
|
||||
"""
|
||||
global _emission_counter
|
||||
_emission_counter = itertools.count(start=1)
|
||||
|
||||
|
||||
class BaseEvent(BaseModel):
|
||||
@@ -22,7 +48,11 @@ class BaseEvent(BaseModel):
|
||||
agent_id: str | None = None
|
||||
agent_role: str | None = None
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None):
|
||||
event_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||
parent_event_id: str | None = None
|
||||
emission_sequence: int | None = None
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None) -> Serializable:
|
||||
"""
|
||||
Converts the event to a JSON-serializable dictionary.
|
||||
|
||||
@@ -34,13 +64,13 @@ class BaseEvent(BaseModel):
|
||||
"""
|
||||
return to_serializable(self, exclude=exclude)
|
||||
|
||||
def _set_task_params(self, data: dict[str, Any]):
|
||||
def _set_task_params(self, data: dict[str, Any]) -> None:
|
||||
if "from_task" in data and (task := data["from_task"]):
|
||||
self.task_id = str(task.id)
|
||||
self.task_name = task.name or task.description
|
||||
self.from_task = None
|
||||
|
||||
def _set_agent_params(self, data: dict[str, Any]):
|
||||
def _set_agent_params(self, data: dict[str, Any]) -> None:
|
||||
task = data.get("from_task", None)
|
||||
agent = task.agent if task else data.get("from_agent", None)
|
||||
|
||||
|
||||
@@ -16,8 +16,19 @@ from typing import Any, Final, ParamSpec, TypeVar
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent, get_next_emission_sequence
|
||||
from crewai.events.depends import Depends
|
||||
from crewai.events.event_context import (
|
||||
SCOPE_ENDING_EVENTS,
|
||||
SCOPE_STARTING_EVENTS,
|
||||
VALID_EVENT_PAIRS,
|
||||
get_current_parent_id,
|
||||
get_enclosing_parent_id,
|
||||
handle_empty_pop,
|
||||
handle_mismatch,
|
||||
pop_event_scope,
|
||||
push_event_scope,
|
||||
)
|
||||
from crewai.events.handler_graph import build_execution_plan
|
||||
from crewai.events.types.event_bus_types import (
|
||||
AsyncHandler,
|
||||
@@ -69,6 +80,8 @@ class CrewAIEventsBus:
|
||||
_execution_plan_cache: dict[type[BaseEvent], ExecutionPlan]
|
||||
_console: ConsoleFormatter
|
||||
_shutting_down: bool
|
||||
_pending_futures: set[Future[Any]]
|
||||
_futures_lock: threading.Lock
|
||||
|
||||
def __new__(cls) -> Self:
|
||||
"""Create or return the singleton instance.
|
||||
@@ -91,6 +104,8 @@ class CrewAIEventsBus:
|
||||
"""
|
||||
self._shutting_down = False
|
||||
self._rwlock = RWLock()
|
||||
self._pending_futures: set[Future[Any]] = set()
|
||||
self._futures_lock = threading.Lock()
|
||||
self._sync_handlers: dict[type[BaseEvent], SyncHandlerSet] = {}
|
||||
self._async_handlers: dict[type[BaseEvent], AsyncHandlerSet] = {}
|
||||
self._handler_dependencies: dict[
|
||||
@@ -111,6 +126,25 @@ class CrewAIEventsBus:
|
||||
)
|
||||
self._loop_thread.start()
|
||||
|
||||
def _track_future(self, future: Future[Any]) -> Future[Any]:
|
||||
"""Track a future and set up automatic cleanup when it completes.
|
||||
|
||||
Args:
|
||||
future: The future to track
|
||||
|
||||
Returns:
|
||||
The same future for chaining
|
||||
"""
|
||||
with self._futures_lock:
|
||||
self._pending_futures.add(future)
|
||||
|
||||
def _cleanup(f: Future[Any]) -> None:
|
||||
with self._futures_lock:
|
||||
self._pending_futures.discard(f)
|
||||
|
||||
future.add_done_callback(_cleanup)
|
||||
return future
|
||||
|
||||
def _run_loop(self) -> None:
|
||||
"""Run the background async event loop."""
|
||||
asyncio.set_event_loop(self._loop)
|
||||
@@ -326,6 +360,25 @@ class CrewAIEventsBus:
|
||||
... await asyncio.wrap_future(future) # In async test
|
||||
... # or future.result(timeout=5.0) in sync code
|
||||
"""
|
||||
event.emission_sequence = get_next_emission_sequence()
|
||||
if event.parent_event_id is None:
|
||||
event_type_name = event.type
|
||||
if event_type_name in SCOPE_ENDING_EVENTS:
|
||||
event.parent_event_id = get_enclosing_parent_id()
|
||||
popped = pop_event_scope()
|
||||
if popped is None:
|
||||
handle_empty_pop(event_type_name)
|
||||
else:
|
||||
_, popped_type = popped
|
||||
expected_start = VALID_EVENT_PAIRS.get(event_type_name)
|
||||
if expected_start and popped_type and popped_type != expected_start:
|
||||
handle_mismatch(event_type_name, popped_type, expected_start)
|
||||
elif event_type_name in SCOPE_STARTING_EVENTS:
|
||||
event.parent_event_id = get_current_parent_id()
|
||||
push_event_scope(event.event_id, event_type_name)
|
||||
else:
|
||||
event.parent_event_id = get_current_parent_id()
|
||||
|
||||
event_type = type(event)
|
||||
|
||||
with self._rwlock.r_locked():
|
||||
@@ -339,9 +392,11 @@ class CrewAIEventsBus:
|
||||
async_handlers = self._async_handlers.get(event_type, frozenset())
|
||||
|
||||
if has_dependencies:
|
||||
return asyncio.run_coroutine_threadsafe(
|
||||
self._emit_with_dependencies(source, event),
|
||||
self._loop,
|
||||
return self._track_future(
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self._emit_with_dependencies(source, event),
|
||||
self._loop,
|
||||
)
|
||||
)
|
||||
|
||||
if sync_handlers:
|
||||
@@ -353,16 +408,53 @@ class CrewAIEventsBus:
|
||||
ctx.run, self._call_handlers, source, event, sync_handlers
|
||||
)
|
||||
if not async_handlers:
|
||||
return sync_future
|
||||
return self._track_future(sync_future)
|
||||
|
||||
if async_handlers:
|
||||
return asyncio.run_coroutine_threadsafe(
|
||||
self._acall_handlers(source, event, async_handlers),
|
||||
self._loop,
|
||||
return self._track_future(
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self._acall_handlers(source, event, async_handlers),
|
||||
self._loop,
|
||||
)
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def flush(self, timeout: float | None = None) -> bool:
|
||||
"""Block until all pending event handlers complete.
|
||||
|
||||
This method waits for all futures from previously emitted events to
|
||||
finish executing. Useful at the end of operations (like kickoff) to
|
||||
ensure all event handlers have completed before returning.
|
||||
|
||||
Args:
|
||||
timeout: Maximum time in seconds to wait for handlers to complete.
|
||||
If None, waits indefinitely.
|
||||
|
||||
Returns:
|
||||
True if all handlers completed, False if timeout occurred.
|
||||
"""
|
||||
with self._futures_lock:
|
||||
futures_to_wait = list(self._pending_futures)
|
||||
|
||||
if not futures_to_wait:
|
||||
return True
|
||||
|
||||
from concurrent.futures import wait as wait_futures
|
||||
|
||||
done, not_done = wait_futures(futures_to_wait, timeout=timeout)
|
||||
|
||||
# Check for exceptions in completed futures
|
||||
errors = [
|
||||
future.exception() for future in done if future.exception() is not None
|
||||
]
|
||||
for error in errors:
|
||||
self._console.print(
|
||||
f"[CrewAIEventsBus] Handler exception during flush: {error}"
|
||||
)
|
||||
|
||||
return len(not_done) == 0
|
||||
|
||||
async def aemit(self, source: Any, event: BaseEvent) -> None:
|
||||
"""Asynchronously emit an event to registered async handlers.
|
||||
|
||||
@@ -464,6 +556,9 @@ class CrewAIEventsBus:
|
||||
wait: If True, wait for all pending tasks to complete before stopping.
|
||||
If False, cancel all pending tasks immediately.
|
||||
"""
|
||||
if wait:
|
||||
self.flush()
|
||||
|
||||
with self._rwlock.w_locked():
|
||||
self._shutting_down = True
|
||||
loop = getattr(self, "_loop", None)
|
||||
|
||||
260
lib/crewai/src/crewai/events/event_context.py
Normal file
260
lib/crewai/src/crewai/events/event_context.py
Normal file
@@ -0,0 +1,260 @@
|
||||
"""Event context management for parent-child relationship tracking."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
import contextvars
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
|
||||
class MismatchBehavior(Enum):
|
||||
"""Behavior when event pairs don't match."""
|
||||
|
||||
WARN = "warn"
|
||||
RAISE = "raise"
|
||||
SILENT = "silent"
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventContextConfig:
|
||||
"""Configuration for event context behavior."""
|
||||
|
||||
max_stack_depth: int = 100
|
||||
mismatch_behavior: MismatchBehavior = MismatchBehavior.WARN
|
||||
empty_pop_behavior: MismatchBehavior = MismatchBehavior.WARN
|
||||
|
||||
|
||||
class StackDepthExceededError(Exception):
|
||||
"""Raised when stack depth limit is exceeded."""
|
||||
|
||||
|
||||
class EventPairingError(Exception):
|
||||
"""Raised when event pairs don't match."""
|
||||
|
||||
|
||||
class EmptyStackError(Exception):
|
||||
"""Raised when popping from empty stack."""
|
||||
|
||||
|
||||
_event_id_stack: contextvars.ContextVar[tuple[tuple[str, str], ...]] = (
|
||||
contextvars.ContextVar("_event_id_stack", default=())
|
||||
)
|
||||
|
||||
_event_context_config: contextvars.ContextVar[EventContextConfig | None] = (
|
||||
contextvars.ContextVar("_event_context_config", default=None)
|
||||
)
|
||||
|
||||
_default_config = EventContextConfig()
|
||||
|
||||
_console = ConsoleFormatter()
|
||||
|
||||
|
||||
def get_current_parent_id() -> str | None:
|
||||
"""Get the current parent event ID from the stack."""
|
||||
stack = _event_id_stack.get()
|
||||
return stack[-1][0] if stack else None
|
||||
|
||||
|
||||
def get_enclosing_parent_id() -> str | None:
|
||||
"""Get the parent of the current scope (stack[-2])."""
|
||||
stack = _event_id_stack.get()
|
||||
return stack[-2][0] if len(stack) >= 2 else None
|
||||
|
||||
|
||||
def push_event_scope(event_id: str, event_type: str = "") -> None:
|
||||
"""Push an event ID and type onto the scope stack."""
|
||||
config = _event_context_config.get() or _default_config
|
||||
stack = _event_id_stack.get()
|
||||
|
||||
if 0 < config.max_stack_depth <= len(stack):
|
||||
raise StackDepthExceededError(
|
||||
f"Event stack depth limit ({config.max_stack_depth}) exceeded. "
|
||||
f"This usually indicates missing ending events."
|
||||
)
|
||||
|
||||
_event_id_stack.set((*stack, (event_id, event_type)))
|
||||
|
||||
|
||||
def pop_event_scope() -> tuple[str, str] | None:
|
||||
"""Pop an event entry from the scope stack."""
|
||||
stack = _event_id_stack.get()
|
||||
if not stack:
|
||||
return None
|
||||
_event_id_stack.set(stack[:-1])
|
||||
return stack[-1]
|
||||
|
||||
|
||||
def handle_empty_pop(event_type_name: str) -> None:
|
||||
"""Handle a pop attempt on an empty stack."""
|
||||
config = _event_context_config.get() or _default_config
|
||||
msg = (
|
||||
f"Ending event '{event_type_name}' emitted with empty scope stack. "
|
||||
"Missing starting event?"
|
||||
)
|
||||
|
||||
if config.empty_pop_behavior == MismatchBehavior.RAISE:
|
||||
raise EmptyStackError(msg)
|
||||
if config.empty_pop_behavior == MismatchBehavior.WARN:
|
||||
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
|
||||
|
||||
|
||||
def handle_mismatch(
|
||||
event_type_name: str,
|
||||
popped_type: str,
|
||||
expected_start: str,
|
||||
) -> None:
|
||||
"""Handle a mismatched event pair."""
|
||||
config = _event_context_config.get() or _default_config
|
||||
msg = (
|
||||
f"Event pairing mismatch. '{event_type_name}' closed '{popped_type}' "
|
||||
f"(expected '{expected_start}')"
|
||||
)
|
||||
|
||||
if config.mismatch_behavior == MismatchBehavior.RAISE:
|
||||
raise EventPairingError(msg)
|
||||
if config.mismatch_behavior == MismatchBehavior.WARN:
|
||||
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def event_scope(event_id: str, event_type: str = "") -> Generator[None, None, None]:
|
||||
"""Context manager to establish a parent event scope."""
|
||||
stack = _event_id_stack.get()
|
||||
already_on_stack = any(entry[0] == event_id for entry in stack)
|
||||
if not already_on_stack:
|
||||
push_event_scope(event_id, event_type)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if not already_on_stack:
|
||||
pop_event_scope()
|
||||
|
||||
|
||||
SCOPE_STARTING_EVENTS: frozenset[str] = frozenset(
|
||||
{
|
||||
"flow_started",
|
||||
"method_execution_started",
|
||||
"crew_kickoff_started",
|
||||
"crew_train_started",
|
||||
"crew_test_started",
|
||||
"agent_execution_started",
|
||||
"agent_evaluation_started",
|
||||
"lite_agent_execution_started",
|
||||
"task_started",
|
||||
"llm_call_started",
|
||||
"llm_guardrail_started",
|
||||
"tool_usage_started",
|
||||
"mcp_connection_started",
|
||||
"mcp_tool_execution_started",
|
||||
"memory_retrieval_started",
|
||||
"memory_save_started",
|
||||
"memory_query_started",
|
||||
"knowledge_query_started",
|
||||
"knowledge_search_query_started",
|
||||
"a2a_delegation_started",
|
||||
"a2a_conversation_started",
|
||||
"a2a_server_task_started",
|
||||
"a2a_parallel_delegation_started",
|
||||
"agent_reasoning_started",
|
||||
}
|
||||
)
|
||||
|
||||
SCOPE_ENDING_EVENTS: frozenset[str] = frozenset(
|
||||
{
|
||||
"flow_finished",
|
||||
"flow_paused",
|
||||
"method_execution_finished",
|
||||
"method_execution_failed",
|
||||
"crew_kickoff_completed",
|
||||
"crew_kickoff_failed",
|
||||
"crew_train_completed",
|
||||
"crew_train_failed",
|
||||
"crew_test_completed",
|
||||
"crew_test_failed",
|
||||
"agent_execution_completed",
|
||||
"agent_execution_error",
|
||||
"agent_evaluation_completed",
|
||||
"agent_evaluation_failed",
|
||||
"lite_agent_execution_completed",
|
||||
"lite_agent_execution_error",
|
||||
"task_completed",
|
||||
"task_failed",
|
||||
"llm_call_completed",
|
||||
"llm_call_failed",
|
||||
"llm_guardrail_completed",
|
||||
"llm_guardrail_failed",
|
||||
"tool_usage_finished",
|
||||
"tool_usage_error",
|
||||
"mcp_connection_completed",
|
||||
"mcp_connection_failed",
|
||||
"mcp_tool_execution_completed",
|
||||
"mcp_tool_execution_failed",
|
||||
"memory_retrieval_completed",
|
||||
"memory_save_completed",
|
||||
"memory_save_failed",
|
||||
"memory_query_completed",
|
||||
"memory_query_failed",
|
||||
"knowledge_query_completed",
|
||||
"knowledge_query_failed",
|
||||
"knowledge_search_query_completed",
|
||||
"knowledge_search_query_failed",
|
||||
"a2a_delegation_completed",
|
||||
"a2a_conversation_completed",
|
||||
"a2a_server_task_completed",
|
||||
"a2a_server_task_canceled",
|
||||
"a2a_server_task_failed",
|
||||
"a2a_parallel_delegation_completed",
|
||||
"agent_reasoning_completed",
|
||||
"agent_reasoning_failed",
|
||||
}
|
||||
)
|
||||
|
||||
VALID_EVENT_PAIRS: dict[str, str] = {
|
||||
"flow_finished": "flow_started",
|
||||
"flow_paused": "flow_started",
|
||||
"method_execution_finished": "method_execution_started",
|
||||
"method_execution_failed": "method_execution_started",
|
||||
"crew_kickoff_completed": "crew_kickoff_started",
|
||||
"crew_kickoff_failed": "crew_kickoff_started",
|
||||
"crew_train_completed": "crew_train_started",
|
||||
"crew_train_failed": "crew_train_started",
|
||||
"crew_test_completed": "crew_test_started",
|
||||
"crew_test_failed": "crew_test_started",
|
||||
"agent_execution_completed": "agent_execution_started",
|
||||
"agent_execution_error": "agent_execution_started",
|
||||
"agent_evaluation_completed": "agent_evaluation_started",
|
||||
"agent_evaluation_failed": "agent_evaluation_started",
|
||||
"lite_agent_execution_completed": "lite_agent_execution_started",
|
||||
"lite_agent_execution_error": "lite_agent_execution_started",
|
||||
"task_completed": "task_started",
|
||||
"task_failed": "task_started",
|
||||
"llm_call_completed": "llm_call_started",
|
||||
"llm_call_failed": "llm_call_started",
|
||||
"llm_guardrail_completed": "llm_guardrail_started",
|
||||
"llm_guardrail_failed": "llm_guardrail_started",
|
||||
"tool_usage_finished": "tool_usage_started",
|
||||
"tool_usage_error": "tool_usage_started",
|
||||
"mcp_connection_completed": "mcp_connection_started",
|
||||
"mcp_connection_failed": "mcp_connection_started",
|
||||
"mcp_tool_execution_completed": "mcp_tool_execution_started",
|
||||
"mcp_tool_execution_failed": "mcp_tool_execution_started",
|
||||
"memory_retrieval_completed": "memory_retrieval_started",
|
||||
"memory_save_completed": "memory_save_started",
|
||||
"memory_save_failed": "memory_save_started",
|
||||
"memory_query_completed": "memory_query_started",
|
||||
"memory_query_failed": "memory_query_started",
|
||||
"knowledge_query_completed": "knowledge_query_started",
|
||||
"knowledge_query_failed": "knowledge_query_started",
|
||||
"knowledge_search_query_completed": "knowledge_search_query_started",
|
||||
"knowledge_search_query_failed": "knowledge_search_query_started",
|
||||
"a2a_delegation_completed": "a2a_delegation_started",
|
||||
"a2a_conversation_completed": "a2a_conversation_started",
|
||||
"a2a_server_task_completed": "a2a_server_task_started",
|
||||
"a2a_server_task_canceled": "a2a_server_task_started",
|
||||
"a2a_server_task_failed": "a2a_server_task_started",
|
||||
"a2a_parallel_delegation_completed": "a2a_parallel_delegation_started",
|
||||
"agent_reasoning_completed": "agent_reasoning_started",
|
||||
"agent_reasoning_failed": "agent_reasoning_started",
|
||||
}
|
||||
@@ -1,19 +1,28 @@
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AAgentCardFetchedEvent,
|
||||
A2AArtifactReceivedEvent,
|
||||
A2AAuthenticationFailedEvent,
|
||||
A2AConnectionErrorEvent,
|
||||
A2AConversationCompletedEvent,
|
||||
A2AConversationStartedEvent,
|
||||
A2ADelegationCompletedEvent,
|
||||
A2ADelegationStartedEvent,
|
||||
A2AMessageSentEvent,
|
||||
A2AParallelDelegationCompletedEvent,
|
||||
A2AParallelDelegationStartedEvent,
|
||||
A2APollingStartedEvent,
|
||||
A2APollingStatusEvent,
|
||||
A2APushNotificationReceivedEvent,
|
||||
A2APushNotificationRegisteredEvent,
|
||||
A2APushNotificationSentEvent,
|
||||
A2APushNotificationTimeoutEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
A2AServerTaskCanceledEvent,
|
||||
A2AServerTaskCompletedEvent,
|
||||
A2AServerTaskFailedEvent,
|
||||
A2AServerTaskStartedEvent,
|
||||
A2AStreamingChunkEvent,
|
||||
A2AStreamingStartedEvent,
|
||||
)
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
@@ -93,7 +102,11 @@ from crewai.events.types.tool_usage_events import (
|
||||
|
||||
|
||||
EventTypes = (
|
||||
A2AConversationCompletedEvent
|
||||
A2AAgentCardFetchedEvent
|
||||
| A2AArtifactReceivedEvent
|
||||
| A2AAuthenticationFailedEvent
|
||||
| A2AConnectionErrorEvent
|
||||
| A2AConversationCompletedEvent
|
||||
| A2AConversationStartedEvent
|
||||
| A2ADelegationCompletedEvent
|
||||
| A2ADelegationStartedEvent
|
||||
@@ -102,12 +115,17 @@ EventTypes = (
|
||||
| A2APollingStatusEvent
|
||||
| A2APushNotificationReceivedEvent
|
||||
| A2APushNotificationRegisteredEvent
|
||||
| A2APushNotificationSentEvent
|
||||
| A2APushNotificationTimeoutEvent
|
||||
| A2AResponseReceivedEvent
|
||||
| A2AServerTaskCanceledEvent
|
||||
| A2AServerTaskCompletedEvent
|
||||
| A2AServerTaskFailedEvent
|
||||
| A2AServerTaskStartedEvent
|
||||
| A2AStreamingChunkEvent
|
||||
| A2AStreamingStartedEvent
|
||||
| A2AParallelDelegationStartedEvent
|
||||
| A2AParallelDelegationCompletedEvent
|
||||
| CrewKickoffStartedEvent
|
||||
| CrewKickoffCompletedEvent
|
||||
| CrewKickoffFailedEvent
|
||||
|
||||
@@ -267,9 +267,12 @@ class TraceBatchManager:
|
||||
|
||||
sorted_events = sorted(
|
||||
self.event_buffer,
|
||||
key=lambda e: e.timestamp
|
||||
if hasattr(e, "timestamp") and e.timestamp
|
||||
else "",
|
||||
key=lambda e: (
|
||||
e.emission_sequence
|
||||
if e.emission_sequence is not None
|
||||
else float("inf"),
|
||||
e.timestamp if hasattr(e, "timestamp") and e.timestamp else "",
|
||||
),
|
||||
)
|
||||
|
||||
self.current_batch.events = sorted_events
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
"""Trace collection listener for orchestrating trace collection."""
|
||||
|
||||
import os
|
||||
from typing import Any, ClassVar, cast
|
||||
from typing import Any, ClassVar
|
||||
import uuid
|
||||
|
||||
from typing_extensions import Self
|
||||
@@ -9,6 +9,7 @@ from typing_extensions import Self
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.events.event_bus import CrewAIEventsBus
|
||||
from crewai.events.listeners.tracing.first_time_trace_handler import (
|
||||
FirstTimeTraceHandler,
|
||||
@@ -18,6 +19,32 @@ from crewai.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
safe_serialize_to_dict,
|
||||
)
|
||||
from crewai.events.types.a2a_events import (
|
||||
A2AAgentCardFetchedEvent,
|
||||
A2AArtifactReceivedEvent,
|
||||
A2AAuthenticationFailedEvent,
|
||||
A2AConnectionErrorEvent,
|
||||
A2AConversationCompletedEvent,
|
||||
A2AConversationStartedEvent,
|
||||
A2ADelegationCompletedEvent,
|
||||
A2ADelegationStartedEvent,
|
||||
A2AMessageSentEvent,
|
||||
A2AParallelDelegationCompletedEvent,
|
||||
A2AParallelDelegationStartedEvent,
|
||||
A2APollingStartedEvent,
|
||||
A2APollingStatusEvent,
|
||||
A2APushNotificationReceivedEvent,
|
||||
A2APushNotificationRegisteredEvent,
|
||||
A2APushNotificationSentEvent,
|
||||
A2APushNotificationTimeoutEvent,
|
||||
A2AResponseReceivedEvent,
|
||||
A2AServerTaskCanceledEvent,
|
||||
A2AServerTaskCompletedEvent,
|
||||
A2AServerTaskFailedEvent,
|
||||
A2AServerTaskStartedEvent,
|
||||
A2AStreamingChunkEvent,
|
||||
A2AStreamingStartedEvent,
|
||||
)
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
@@ -105,7 +132,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
"""Create or return singleton instance."""
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
return cast(Self, cls._instance)
|
||||
return cls._instance
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -160,6 +187,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
self._register_flow_event_handlers(crewai_event_bus)
|
||||
self._register_context_event_handlers(crewai_event_bus)
|
||||
self._register_action_event_handlers(crewai_event_bus)
|
||||
self._register_a2a_event_handlers(crewai_event_bus)
|
||||
self._register_system_event_handlers(crewai_event_bus)
|
||||
|
||||
self._listeners_setup = True
|
||||
@@ -439,6 +467,147 @@ class TraceCollectionListener(BaseEventListener):
|
||||
) -> None:
|
||||
self._handle_action_event("knowledge_query_failed", source, event)
|
||||
|
||||
def _register_a2a_event_handlers(self, event_bus: CrewAIEventsBus) -> None:
|
||||
"""Register handlers for A2A (Agent-to-Agent) events."""
|
||||
|
||||
@event_bus.on(A2ADelegationStartedEvent)
|
||||
def on_a2a_delegation_started(
|
||||
source: Any, event: A2ADelegationStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_delegation_started", source, event)
|
||||
|
||||
@event_bus.on(A2ADelegationCompletedEvent)
|
||||
def on_a2a_delegation_completed(
|
||||
source: Any, event: A2ADelegationCompletedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_delegation_completed", source, event)
|
||||
|
||||
@event_bus.on(A2AConversationStartedEvent)
|
||||
def on_a2a_conversation_started(
|
||||
source: Any, event: A2AConversationStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_conversation_started", source, event)
|
||||
|
||||
@event_bus.on(A2AMessageSentEvent)
|
||||
def on_a2a_message_sent(source: Any, event: A2AMessageSentEvent) -> None:
|
||||
self._handle_action_event("a2a_message_sent", source, event)
|
||||
|
||||
@event_bus.on(A2AResponseReceivedEvent)
|
||||
def on_a2a_response_received(
|
||||
source: Any, event: A2AResponseReceivedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_response_received", source, event)
|
||||
|
||||
@event_bus.on(A2AConversationCompletedEvent)
|
||||
def on_a2a_conversation_completed(
|
||||
source: Any, event: A2AConversationCompletedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_conversation_completed", source, event)
|
||||
|
||||
@event_bus.on(A2APollingStartedEvent)
|
||||
def on_a2a_polling_started(source: Any, event: A2APollingStartedEvent) -> None:
|
||||
self._handle_action_event("a2a_polling_started", source, event)
|
||||
|
||||
@event_bus.on(A2APollingStatusEvent)
|
||||
def on_a2a_polling_status(source: Any, event: A2APollingStatusEvent) -> None:
|
||||
self._handle_action_event("a2a_polling_status", source, event)
|
||||
|
||||
@event_bus.on(A2APushNotificationRegisteredEvent)
|
||||
def on_a2a_push_notification_registered(
|
||||
source: Any, event: A2APushNotificationRegisteredEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_push_notification_registered", source, event)
|
||||
|
||||
@event_bus.on(A2APushNotificationReceivedEvent)
|
||||
def on_a2a_push_notification_received(
|
||||
source: Any, event: A2APushNotificationReceivedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_push_notification_received", source, event)
|
||||
|
||||
@event_bus.on(A2APushNotificationSentEvent)
|
||||
def on_a2a_push_notification_sent(
|
||||
source: Any, event: A2APushNotificationSentEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_push_notification_sent", source, event)
|
||||
|
||||
@event_bus.on(A2APushNotificationTimeoutEvent)
|
||||
def on_a2a_push_notification_timeout(
|
||||
source: Any, event: A2APushNotificationTimeoutEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_push_notification_timeout", source, event)
|
||||
|
||||
@event_bus.on(A2AStreamingStartedEvent)
|
||||
def on_a2a_streaming_started(
|
||||
source: Any, event: A2AStreamingStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_streaming_started", source, event)
|
||||
|
||||
@event_bus.on(A2AStreamingChunkEvent)
|
||||
def on_a2a_streaming_chunk(source: Any, event: A2AStreamingChunkEvent) -> None:
|
||||
self._handle_action_event("a2a_streaming_chunk", source, event)
|
||||
|
||||
@event_bus.on(A2AAgentCardFetchedEvent)
|
||||
def on_a2a_agent_card_fetched(
|
||||
source: Any, event: A2AAgentCardFetchedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_agent_card_fetched", source, event)
|
||||
|
||||
@event_bus.on(A2AAuthenticationFailedEvent)
|
||||
def on_a2a_authentication_failed(
|
||||
source: Any, event: A2AAuthenticationFailedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_authentication_failed", source, event)
|
||||
|
||||
@event_bus.on(A2AArtifactReceivedEvent)
|
||||
def on_a2a_artifact_received(
|
||||
source: Any, event: A2AArtifactReceivedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_artifact_received", source, event)
|
||||
|
||||
@event_bus.on(A2AConnectionErrorEvent)
|
||||
def on_a2a_connection_error(
|
||||
source: Any, event: A2AConnectionErrorEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_connection_error", source, event)
|
||||
|
||||
@event_bus.on(A2AServerTaskStartedEvent)
|
||||
def on_a2a_server_task_started(
|
||||
source: Any, event: A2AServerTaskStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_server_task_started", source, event)
|
||||
|
||||
@event_bus.on(A2AServerTaskCompletedEvent)
|
||||
def on_a2a_server_task_completed(
|
||||
source: Any, event: A2AServerTaskCompletedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_server_task_completed", source, event)
|
||||
|
||||
@event_bus.on(A2AServerTaskCanceledEvent)
|
||||
def on_a2a_server_task_canceled(
|
||||
source: Any, event: A2AServerTaskCanceledEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_server_task_canceled", source, event)
|
||||
|
||||
@event_bus.on(A2AServerTaskFailedEvent)
|
||||
def on_a2a_server_task_failed(
|
||||
source: Any, event: A2AServerTaskFailedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_server_task_failed", source, event)
|
||||
|
||||
@event_bus.on(A2AParallelDelegationStartedEvent)
|
||||
def on_a2a_parallel_delegation_started(
|
||||
source: Any, event: A2AParallelDelegationStartedEvent
|
||||
) -> None:
|
||||
self._handle_action_event("a2a_parallel_delegation_started", source, event)
|
||||
|
||||
@event_bus.on(A2AParallelDelegationCompletedEvent)
|
||||
def on_a2a_parallel_delegation_completed(
|
||||
source: Any, event: A2AParallelDelegationCompletedEvent
|
||||
) -> None:
|
||||
self._handle_action_event(
|
||||
"a2a_parallel_delegation_completed", source, event
|
||||
)
|
||||
|
||||
def _register_system_event_handlers(self, event_bus: CrewAIEventsBus) -> None:
|
||||
"""Register handlers for system signal events (SIGTERM, SIGINT, etc.)."""
|
||||
|
||||
@@ -448,7 +617,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
if self.batch_manager.is_batch_initialized():
|
||||
self.batch_manager.finalize_batch()
|
||||
|
||||
def _initialize_crew_batch(self, source: Any, event: Any) -> None:
|
||||
def _initialize_crew_batch(self, source: Any, event: BaseEvent) -> None:
|
||||
"""Initialize trace batch.
|
||||
|
||||
Args:
|
||||
@@ -458,7 +627,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
user_context = self._get_user_context()
|
||||
execution_metadata = {
|
||||
"crew_name": getattr(event, "crew_name", "Unknown Crew"),
|
||||
"execution_start": event.timestamp if hasattr(event, "timestamp") else None,
|
||||
"execution_start": event.timestamp,
|
||||
"crewai_version": get_crewai_version(),
|
||||
}
|
||||
|
||||
@@ -467,7 +636,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
|
||||
self._initialize_batch(user_context, execution_metadata)
|
||||
|
||||
def _initialize_flow_batch(self, source: Any, event: Any) -> None:
|
||||
def _initialize_flow_batch(self, source: Any, event: BaseEvent) -> None:
|
||||
"""Initialize trace batch for Flow execution.
|
||||
|
||||
Args:
|
||||
@@ -477,7 +646,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
user_context = self._get_user_context()
|
||||
execution_metadata = {
|
||||
"flow_name": getattr(event, "flow_name", "Unknown Flow"),
|
||||
"execution_start": event.timestamp if hasattr(event, "timestamp") else None,
|
||||
"execution_start": event.timestamp,
|
||||
"crewai_version": get_crewai_version(),
|
||||
"execution_type": "flow",
|
||||
}
|
||||
@@ -546,18 +715,16 @@ class TraceCollectionListener(BaseEventListener):
|
||||
self.batch_manager.end_event_processing()
|
||||
|
||||
def _create_trace_event(
|
||||
self, event_type: str, source: Any, event: Any
|
||||
self, event_type: str, source: Any, event: BaseEvent
|
||||
) -> TraceEvent:
|
||||
"""Create a trace event"""
|
||||
if hasattr(event, "timestamp") and event.timestamp:
|
||||
trace_event = TraceEvent(
|
||||
type=event_type,
|
||||
timestamp=event.timestamp.isoformat(),
|
||||
)
|
||||
else:
|
||||
trace_event = TraceEvent(
|
||||
type=event_type,
|
||||
)
|
||||
"""Create a trace event with ordering information."""
|
||||
trace_event = TraceEvent(
|
||||
type=event_type,
|
||||
timestamp=event.timestamp.isoformat() if event.timestamp else "",
|
||||
event_id=event.event_id,
|
||||
emission_sequence=event.emission_sequence,
|
||||
parent_event_id=event.parent_event_id,
|
||||
)
|
||||
|
||||
trace_event.event_data = self._build_event_data(event_type, event, source)
|
||||
|
||||
@@ -570,10 +737,15 @@ class TraceCollectionListener(BaseEventListener):
|
||||
if event_type not in self.complex_events:
|
||||
return safe_serialize_to_dict(event)
|
||||
if event_type == "task_started":
|
||||
task_name = event.task.name or event.task.description
|
||||
task_display_name = (
|
||||
task_name[:80] + "..." if len(task_name) > 80 else task_name
|
||||
)
|
||||
return {
|
||||
"task_description": event.task.description,
|
||||
"expected_output": event.task.expected_output,
|
||||
"task_name": event.task.name or event.task.description,
|
||||
"task_name": task_name,
|
||||
"task_display_name": task_display_name,
|
||||
"context": event.context,
|
||||
"agent_role": source.agent.role,
|
||||
"task_id": str(event.task.id),
|
||||
@@ -605,10 +777,8 @@ class TraceCollectionListener(BaseEventListener):
|
||||
}
|
||||
if event_type == "llm_call_started":
|
||||
event_data = safe_serialize_to_dict(event)
|
||||
event_data["task_name"] = (
|
||||
event.task_name or event.task_description
|
||||
if hasattr(event, "task_name") and event.task_name
|
||||
else None
|
||||
event_data["task_name"] = event.task_name or getattr(
|
||||
event, "task_description", None
|
||||
)
|
||||
return event_data
|
||||
if event_type == "llm_call_completed":
|
||||
|
||||
@@ -15,5 +15,8 @@ class TraceEvent:
|
||||
type: str = ""
|
||||
event_data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
emission_sequence: int | None = None
|
||||
parent_event_id: str | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
@@ -4,68 +4,120 @@ This module defines events emitted during A2A protocol delegation,
|
||||
including both single-turn and multiturn conversation flows.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import model_validator
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class A2AEventBase(BaseEvent):
|
||||
"""Base class for A2A events with task/agent context."""
|
||||
|
||||
from_task: Any | None = None
|
||||
from_agent: Any | None = None
|
||||
from_task: Any = None
|
||||
from_agent: Any = None
|
||||
|
||||
def __init__(self, **data: Any) -> None:
|
||||
"""Initialize A2A event, extracting task and agent metadata."""
|
||||
if data.get("from_task"):
|
||||
task = data["from_task"]
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def extract_task_and_agent_metadata(cls, data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Extract task and agent metadata before validation."""
|
||||
if task := data.get("from_task"):
|
||||
data["task_id"] = str(task.id)
|
||||
data["task_name"] = task.name or task.description
|
||||
data.setdefault("source_fingerprint", str(task.id))
|
||||
data.setdefault("source_type", "task")
|
||||
data.setdefault(
|
||||
"fingerprint_metadata",
|
||||
{
|
||||
"task_id": str(task.id),
|
||||
"task_name": task.name or task.description,
|
||||
},
|
||||
)
|
||||
data["from_task"] = None
|
||||
|
||||
if data.get("from_agent"):
|
||||
agent = data["from_agent"]
|
||||
if agent := data.get("from_agent"):
|
||||
data["agent_id"] = str(agent.id)
|
||||
data["agent_role"] = agent.role
|
||||
data.setdefault("source_fingerprint", str(agent.id))
|
||||
data.setdefault("source_type", "agent")
|
||||
data.setdefault(
|
||||
"fingerprint_metadata",
|
||||
{
|
||||
"agent_id": str(agent.id),
|
||||
"agent_role": agent.role,
|
||||
},
|
||||
)
|
||||
data["from_agent"] = None
|
||||
|
||||
super().__init__(**data)
|
||||
return data
|
||||
|
||||
|
||||
class A2ADelegationStartedEvent(A2AEventBase):
|
||||
"""Event emitted when A2A delegation starts.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL)
|
||||
task_description: Task being delegated to the A2A agent
|
||||
agent_id: A2A agent identifier
|
||||
is_multiturn: Whether this is part of a multiturn conversation
|
||||
turn_number: Current turn number (1-indexed, 1 for single-turn)
|
||||
endpoint: A2A agent endpoint URL (AgentCard URL).
|
||||
task_description: Task being delegated to the A2A agent.
|
||||
agent_id: A2A agent identifier.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
turn_number: Current turn number (1-indexed, 1 for single-turn).
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
protocol_version: A2A protocol version being used.
|
||||
provider: Agent provider/organization info from agent card.
|
||||
skill_id: ID of the specific skill being invoked.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_delegation_started"
|
||||
endpoint: str
|
||||
task_description: str
|
||||
agent_id: str
|
||||
context_id: str | None = None
|
||||
is_multiturn: bool = False
|
||||
turn_number: int = 1
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
protocol_version: str | None = None
|
||||
provider: dict[str, Any] | None = None
|
||||
skill_id: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2ADelegationCompletedEvent(A2AEventBase):
|
||||
"""Event emitted when A2A delegation completes.
|
||||
|
||||
Attributes:
|
||||
status: Completion status (completed, input_required, failed, etc.)
|
||||
result: Result message if status is completed
|
||||
error: Error/response message (error for failed, response for input_required)
|
||||
is_multiturn: Whether this is part of a multiturn conversation
|
||||
status: Completion status (completed, input_required, failed, etc.).
|
||||
result: Result message if status is completed.
|
||||
error: Error/response message (error for failed, response for input_required).
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
provider: Agent provider/organization info from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_delegation_completed"
|
||||
status: str
|
||||
result: str | None = None
|
||||
error: str | None = None
|
||||
context_id: str | None = None
|
||||
is_multiturn: bool = False
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
provider: dict[str, Any] | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AConversationStartedEvent(A2AEventBase):
|
||||
@@ -75,51 +127,95 @@ class A2AConversationStartedEvent(A2AEventBase):
|
||||
before the first message exchange.
|
||||
|
||||
Attributes:
|
||||
agent_id: A2A agent identifier
|
||||
endpoint: A2A agent endpoint URL
|
||||
a2a_agent_name: Name of the A2A agent from agent card
|
||||
agent_id: A2A agent identifier.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
protocol_version: A2A protocol version being used.
|
||||
provider: Agent provider/organization info from agent card.
|
||||
skill_id: ID of the specific skill being invoked.
|
||||
reference_task_ids: Related task IDs for context.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_conversation_started"
|
||||
agent_id: str
|
||||
endpoint: str
|
||||
context_id: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
protocol_version: str | None = None
|
||||
provider: dict[str, Any] | None = None
|
||||
skill_id: str | None = None
|
||||
reference_task_ids: list[str] | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AMessageSentEvent(A2AEventBase):
|
||||
"""Event emitted when a message is sent to the A2A agent.
|
||||
|
||||
Attributes:
|
||||
message: Message content sent to the A2A agent
|
||||
turn_number: Current turn number (1-indexed)
|
||||
is_multiturn: Whether this is part of a multiturn conversation
|
||||
agent_role: Role of the CrewAI agent sending the message
|
||||
message: Message content sent to the A2A agent.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
message_id: Unique A2A message identifier.
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
agent_role: Role of the CrewAI agent sending the message.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
skill_id: ID of the specific skill being invoked.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_message_sent"
|
||||
message: str
|
||||
turn_number: int
|
||||
context_id: str | None = None
|
||||
message_id: str | None = None
|
||||
is_multiturn: bool = False
|
||||
agent_role: str | None = None
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
skill_id: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AResponseReceivedEvent(A2AEventBase):
|
||||
"""Event emitted when a response is received from the A2A agent.
|
||||
|
||||
Attributes:
|
||||
response: Response content from the A2A agent
|
||||
turn_number: Current turn number (1-indexed)
|
||||
is_multiturn: Whether this is part of a multiturn conversation
|
||||
status: Response status (input_required, completed, etc.)
|
||||
agent_role: Role of the CrewAI agent (for display)
|
||||
response: Response content from the A2A agent.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
message_id: Unique A2A message identifier.
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
status: Response status (input_required, completed, etc.).
|
||||
final: Whether this is the final response in the stream.
|
||||
agent_role: Role of the CrewAI agent (for display).
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_response_received"
|
||||
response: str
|
||||
turn_number: int
|
||||
context_id: str | None = None
|
||||
message_id: str | None = None
|
||||
is_multiturn: bool = False
|
||||
status: str
|
||||
final: bool = False
|
||||
agent_role: str | None = None
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AConversationCompletedEvent(A2AEventBase):
|
||||
@@ -128,119 +224,433 @@ class A2AConversationCompletedEvent(A2AEventBase):
|
||||
This is emitted once at the end of a multiturn conversation.
|
||||
|
||||
Attributes:
|
||||
status: Final status (completed, failed, etc.)
|
||||
final_result: Final result if completed successfully
|
||||
error: Error message if failed
|
||||
total_turns: Total number of turns in the conversation
|
||||
status: Final status (completed, failed, etc.).
|
||||
final_result: Final result if completed successfully.
|
||||
error: Error message if failed.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
total_turns: Total number of turns in the conversation.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
reference_task_ids: Related task IDs for context.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_conversation_completed"
|
||||
status: Literal["completed", "failed"]
|
||||
final_result: str | None = None
|
||||
error: str | None = None
|
||||
context_id: str | None = None
|
||||
total_turns: int
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
reference_task_ids: list[str] | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2APollingStartedEvent(A2AEventBase):
|
||||
"""Event emitted when polling mode begins for A2A delegation.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID being polled
|
||||
polling_interval: Seconds between poll attempts
|
||||
endpoint: A2A agent endpoint URL
|
||||
task_id: A2A task ID being polled.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
polling_interval: Seconds between poll attempts.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_polling_started"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
polling_interval: float
|
||||
endpoint: str
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APollingStatusEvent(A2AEventBase):
|
||||
"""Event emitted on each polling iteration.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID being polled
|
||||
state: Current task state from remote agent
|
||||
elapsed_seconds: Time since polling started
|
||||
poll_count: Number of polls completed
|
||||
task_id: A2A task ID being polled.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
state: Current task state from remote agent.
|
||||
elapsed_seconds: Time since polling started.
|
||||
poll_count: Number of polls completed.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_polling_status"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
state: str
|
||||
elapsed_seconds: float
|
||||
poll_count: int
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APushNotificationRegisteredEvent(A2AEventBase):
|
||||
"""Event emitted when push notification callback is registered.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for which callback is registered
|
||||
callback_url: URL where agent will send push notifications
|
||||
task_id: A2A task ID for which callback is registered.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
callback_url: URL where agent will send push notifications.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_registered"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
callback_url: str
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APushNotificationReceivedEvent(A2AEventBase):
|
||||
"""Event emitted when a push notification is received.
|
||||
|
||||
This event should be emitted by the user's webhook handler when it receives
|
||||
a push notification from the remote A2A agent, before calling
|
||||
`result_store.store_result()`.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID from the notification
|
||||
state: Current task state from the notification
|
||||
task_id: A2A task ID from the notification.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
state: Current task state from the notification.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_received"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
state: str
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APushNotificationSentEvent(A2AEventBase):
|
||||
"""Event emitted when a push notification is sent to a callback URL.
|
||||
|
||||
Emitted by the A2A server when it sends a task status update to the
|
||||
client's registered push notification callback URL.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID being notified.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
callback_url: URL the notification was sent to.
|
||||
state: Task state being reported.
|
||||
success: Whether the notification was successfully delivered.
|
||||
error: Error message if delivery failed.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_sent"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
callback_url: str
|
||||
state: str
|
||||
success: bool = True
|
||||
error: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2APushNotificationTimeoutEvent(A2AEventBase):
|
||||
"""Event emitted when push notification wait times out.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID that timed out
|
||||
timeout_seconds: Timeout duration in seconds
|
||||
task_id: A2A task ID that timed out.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
timeout_seconds: Timeout duration in seconds.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_push_notification_timeout"
|
||||
task_id: str
|
||||
context_id: str | None = None
|
||||
timeout_seconds: float
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AStreamingStartedEvent(A2AEventBase):
|
||||
"""Event emitted when streaming mode begins for A2A delegation.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for the streaming session.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
agent_role: Role of the CrewAI agent.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_streaming_started"
|
||||
task_id: str | None = None
|
||||
context_id: str | None = None
|
||||
endpoint: str
|
||||
a2a_agent_name: str | None = None
|
||||
turn_number: int = 1
|
||||
is_multiturn: bool = False
|
||||
agent_role: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AStreamingChunkEvent(A2AEventBase):
|
||||
"""Event emitted when a streaming chunk is received.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for the streaming session.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
chunk: The text content of the chunk.
|
||||
chunk_index: Index of this chunk in the stream (0-indexed).
|
||||
final: Whether this is the final chunk in the stream.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_streaming_chunk"
|
||||
task_id: str | None = None
|
||||
context_id: str | None = None
|
||||
chunk: str
|
||||
chunk_index: int
|
||||
final: bool = False
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
turn_number: int = 1
|
||||
is_multiturn: bool = False
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AAgentCardFetchedEvent(A2AEventBase):
|
||||
"""Event emitted when an agent card is successfully fetched.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
agent_card: Full A2A agent card metadata.
|
||||
protocol_version: A2A protocol version from agent card.
|
||||
provider: Agent provider/organization info from agent card.
|
||||
cached: Whether the agent card was retrieved from cache.
|
||||
fetch_time_ms: Time taken to fetch the agent card in milliseconds.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_agent_card_fetched"
|
||||
endpoint: str
|
||||
a2a_agent_name: str | None = None
|
||||
agent_card: dict[str, Any] | None = None
|
||||
protocol_version: str | None = None
|
||||
provider: dict[str, Any] | None = None
|
||||
cached: bool = False
|
||||
fetch_time_ms: float | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AAuthenticationFailedEvent(A2AEventBase):
|
||||
"""Event emitted when authentication to an A2A agent fails.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
auth_type: Type of authentication attempted (e.g., bearer, oauth2, api_key).
|
||||
error: Error message describing the failure.
|
||||
status_code: HTTP status code if applicable.
|
||||
a2a_agent_name: Name of the A2A agent if known.
|
||||
protocol_version: A2A protocol version being used.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_authentication_failed"
|
||||
endpoint: str
|
||||
auth_type: str | None = None
|
||||
error: str
|
||||
status_code: int | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
protocol_version: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AArtifactReceivedEvent(A2AEventBase):
|
||||
"""Event emitted when an artifact is received from a remote A2A agent.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID the artifact belongs to.
|
||||
artifact_id: Unique identifier for the artifact.
|
||||
artifact_name: Name of the artifact.
|
||||
artifact_description: Purpose description of the artifact.
|
||||
mime_type: MIME type of the artifact content.
|
||||
size_bytes: Size of the artifact in bytes.
|
||||
append: Whether content should be appended to existing artifact.
|
||||
last_chunk: Whether this is the final chunk of the artifact.
|
||||
endpoint: A2A agent endpoint URL.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
context_id: Context ID for correlation.
|
||||
turn_number: Current turn number (1-indexed).
|
||||
is_multiturn: Whether this is part of a multiturn conversation.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
extensions: List of A2A extension URIs in use.
|
||||
"""
|
||||
|
||||
type: str = "a2a_artifact_received"
|
||||
task_id: str
|
||||
artifact_id: str
|
||||
artifact_name: str | None = None
|
||||
artifact_description: str | None = None
|
||||
mime_type: str | None = None
|
||||
size_bytes: int | None = None
|
||||
append: bool = False
|
||||
last_chunk: bool = False
|
||||
endpoint: str | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
context_id: str | None = None
|
||||
turn_number: int = 1
|
||||
is_multiturn: bool = False
|
||||
metadata: dict[str, Any] | None = None
|
||||
extensions: list[str] | None = None
|
||||
|
||||
|
||||
class A2AConnectionErrorEvent(A2AEventBase):
|
||||
"""Event emitted when a connection error occurs during A2A communication.
|
||||
|
||||
Attributes:
|
||||
endpoint: A2A agent endpoint URL.
|
||||
error: Error message describing the connection failure.
|
||||
error_type: Type of error (e.g., timeout, connection_refused, dns_error).
|
||||
status_code: HTTP status code if applicable.
|
||||
a2a_agent_name: Name of the A2A agent from agent card.
|
||||
operation: The operation being attempted when error occurred.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
task_id: A2A task ID if applicable.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_connection_error"
|
||||
endpoint: str
|
||||
error: str
|
||||
error_type: str | None = None
|
||||
status_code: int | None = None
|
||||
a2a_agent_name: str | None = None
|
||||
operation: str | None = None
|
||||
context_id: str | None = None
|
||||
task_id: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AServerTaskStartedEvent(A2AEventBase):
|
||||
"""Event emitted when an A2A server task execution starts."""
|
||||
"""Event emitted when an A2A server task execution starts.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for this execution.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_started"
|
||||
a2a_task_id: str
|
||||
a2a_context_id: str
|
||||
task_id: str
|
||||
context_id: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AServerTaskCompletedEvent(A2AEventBase):
|
||||
"""Event emitted when an A2A server task execution completes."""
|
||||
"""Event emitted when an A2A server task execution completes.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for this execution.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
result: The task result.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_completed"
|
||||
a2a_task_id: str
|
||||
a2a_context_id: str
|
||||
task_id: str
|
||||
context_id: str
|
||||
result: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AServerTaskCanceledEvent(A2AEventBase):
|
||||
"""Event emitted when an A2A server task execution is canceled."""
|
||||
"""Event emitted when an A2A server task execution is canceled.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for this execution.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_canceled"
|
||||
a2a_task_id: str
|
||||
a2a_context_id: str
|
||||
task_id: str
|
||||
context_id: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AServerTaskFailedEvent(A2AEventBase):
|
||||
"""Event emitted when an A2A server task execution fails."""
|
||||
"""Event emitted when an A2A server task execution fails.
|
||||
|
||||
Attributes:
|
||||
task_id: A2A task ID for this execution.
|
||||
context_id: A2A context ID grouping related tasks.
|
||||
error: Error message describing the failure.
|
||||
metadata: Custom A2A metadata key-value pairs.
|
||||
"""
|
||||
|
||||
type: str = "a2a_server_task_failed"
|
||||
a2a_task_id: str
|
||||
a2a_context_id: str
|
||||
task_id: str
|
||||
context_id: str
|
||||
error: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
|
||||
|
||||
class A2AParallelDelegationStartedEvent(A2AEventBase):
|
||||
"""Event emitted when parallel delegation to multiple A2A agents begins.
|
||||
|
||||
Attributes:
|
||||
endpoints: List of A2A agent endpoints being delegated to.
|
||||
task_description: Description of the task being delegated.
|
||||
"""
|
||||
|
||||
type: str = "a2a_parallel_delegation_started"
|
||||
endpoints: list[str]
|
||||
task_description: str
|
||||
|
||||
|
||||
class A2AParallelDelegationCompletedEvent(A2AEventBase):
|
||||
"""Event emitted when parallel delegation to multiple A2A agents completes.
|
||||
|
||||
Attributes:
|
||||
endpoints: List of A2A agent endpoints that were delegated to.
|
||||
success_count: Number of successful delegations.
|
||||
failure_count: Number of failed delegations.
|
||||
results: Summary of results from each agent.
|
||||
"""
|
||||
|
||||
type: str = "a2a_parallel_delegation_completed"
|
||||
endpoints: list[str]
|
||||
success_count: int
|
||||
failure_count: int
|
||||
results: dict[str, str] | None = None
|
||||
|
||||
@@ -30,7 +30,9 @@ from pydantic import BaseModel, Field, ValidationError
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.base_events import reset_emission_counter
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_context import get_current_parent_id
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
@@ -73,6 +75,7 @@ from crewai.flow.utils import (
|
||||
is_simple_flow_condition,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
@@ -570,7 +573,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
flow_id: str,
|
||||
persistence: FlowPersistence | None = None,
|
||||
**kwargs: Any,
|
||||
) -> "Flow[Any]":
|
||||
) -> Flow[Any]:
|
||||
"""Create a Flow instance from a pending feedback state.
|
||||
|
||||
This classmethod is used to restore a flow that was paused waiting
|
||||
@@ -631,7 +634,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return instance
|
||||
|
||||
@property
|
||||
def pending_feedback(self) -> "PendingFeedbackContext | None":
|
||||
def pending_feedback(self) -> PendingFeedbackContext | None:
|
||||
"""Get the pending feedback context if this flow is waiting for feedback.
|
||||
|
||||
Returns:
|
||||
@@ -716,9 +719,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
Raises:
|
||||
ValueError: If no pending feedback context exists
|
||||
"""
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
from datetime import datetime
|
||||
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult
|
||||
|
||||
if self._pending_feedback_context is None:
|
||||
raise ValueError(
|
||||
"No pending feedback context. Use from_pending() to restore a paused flow."
|
||||
@@ -744,7 +748,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
collapsed_outcome = self._collapse_to_outcome(
|
||||
feedback=feedback,
|
||||
outcomes=emit,
|
||||
llm=llm,
|
||||
llm=llm, # type: ignore[arg-type]
|
||||
)
|
||||
|
||||
# Create result
|
||||
@@ -792,13 +796,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._method_outputs.append(collapsed_outcome)
|
||||
|
||||
# Then trigger listeners for the outcome (e.g., "approved" triggers @listen("approved"))
|
||||
final_result = await self._execute_listeners(
|
||||
final_result = await self._execute_listeners( # type: ignore[func-returns-value]
|
||||
FlowMethodName(collapsed_outcome), # Use outcome as trigger
|
||||
result, # Pass HumanFeedbackResult to listeners
|
||||
)
|
||||
else:
|
||||
# Normal behavior - pass the HumanFeedbackResult
|
||||
final_result = await self._execute_listeners(
|
||||
final_result = await self._execute_listeners( # type: ignore[func-returns-value]
|
||||
FlowMethodName(context.method_name),
|
||||
result,
|
||||
)
|
||||
@@ -901,11 +905,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
model_fields = getattr(self.initial_state, "model_fields", None)
|
||||
if not model_fields or "id" not in model_fields:
|
||||
raise ValueError("Flow state model must have an 'id' field")
|
||||
instance = self.initial_state()
|
||||
instance = self.initial_state() # type: ignore[assignment]
|
||||
# Ensure id is set - generate UUID if empty
|
||||
if not getattr(instance, "id", None):
|
||||
object.__setattr__(instance, "id", str(uuid4()))
|
||||
return instance
|
||||
return instance # type: ignore[return-value]
|
||||
if self.initial_state is dict:
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
@@ -1326,6 +1330,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if filtered_inputs:
|
||||
self._initialize_state(filtered_inputs)
|
||||
|
||||
if get_current_parent_id() is None:
|
||||
reset_emission_counter()
|
||||
|
||||
# Emit FlowStartedEvent and log the start of the flow.
|
||||
if not self.suppress_flow_events:
|
||||
future = crewai_event_bus.emit(
|
||||
@@ -2053,7 +2060,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if isinstance(llm, str):
|
||||
llm_instance = LLM(model=llm)
|
||||
elif isinstance(llm, BaseLLMClass):
|
||||
llm_instance = llm
|
||||
llm_instance = llm # type: ignore[assignment]
|
||||
else:
|
||||
raise ValueError(f"Invalid llm type: {type(llm)}. Expected str or BaseLLM.")
|
||||
|
||||
@@ -2090,7 +2097,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
try:
|
||||
parsed = json.loads(response)
|
||||
return parsed.get("outcome", outcomes[0])
|
||||
return parsed.get("outcome", outcomes[0]) # type: ignore[no-any-return]
|
||||
except json.JSONDecodeError:
|
||||
# Not valid JSON, might be raw outcome string
|
||||
response_clean = response.strip()
|
||||
@@ -2099,9 +2106,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return outcome
|
||||
return outcomes[0]
|
||||
elif isinstance(response, FeedbackOutcome):
|
||||
return response.outcome
|
||||
return response.outcome # type: ignore[no-any-return]
|
||||
elif hasattr(response, "outcome"):
|
||||
return response.outcome
|
||||
return response.outcome # type: ignore[no-any-return]
|
||||
else:
|
||||
# Unexpected type, fall back to first outcome
|
||||
logger.warning(f"Unexpected response type: {type(response)}")
|
||||
|
||||
@@ -241,6 +241,9 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
started_at = time.time()
|
||||
started_event_emitted = False
|
||||
|
||||
if self.agent:
|
||||
event_data = {
|
||||
"agent_key": self.agent.key,
|
||||
@@ -258,151 +261,162 @@ class ToolUsage:
|
||||
event_data["task_name"] = self.task.name or self.task.description
|
||||
event_data["task_id"] = str(self.task.id)
|
||||
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
||||
started_event_emitted = True
|
||||
|
||||
started_at = time.time()
|
||||
from_cache = False
|
||||
result = None # type: ignore
|
||||
should_retry = False
|
||||
available_tool = None
|
||||
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
try:
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
try:
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
result = usage_limit_error
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
return self._format_result(result=result)
|
||||
except Exception:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
if result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker") if calling.arguments else None
|
||||
)
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
error = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
result = self._format_result(result=result)
|
||||
# Don't return early - fall through to finally block
|
||||
elif result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker")
|
||||
if calling.arguments
|
||||
else None
|
||||
)
|
||||
return error
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
return await self.ause(calling=calling, tool_string=tool_string)
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
)
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer
|
||||
data["result_as_answer"] = result_as_answer
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(
|
||||
available_tool, "current_usage_count"
|
||||
):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors(
|
||||
"tool_usage_exception"
|
||||
).format(error=e, tool=tool.name, tool_inputs=tool.description)
|
||||
result = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
)
|
||||
else:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
should_retry = True
|
||||
else:
|
||||
result = self._format_result(result=result)
|
||||
|
||||
finally:
|
||||
if started_event_emitted:
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer # type: ignore
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer # type: ignore
|
||||
data["result_as_answer"] = result_as_answer # type: ignore
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(available_tool, "current_usage_count"):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
# Handle retry after finally block ensures finished event was emitted
|
||||
if should_retry:
|
||||
return await self.ause(calling=calling, tool_string=tool_string)
|
||||
|
||||
return result
|
||||
|
||||
@@ -412,6 +426,7 @@ class ToolUsage:
|
||||
tool: CrewStructuredTool,
|
||||
calling: ToolCalling | InstructorToolCalling,
|
||||
) -> str:
|
||||
# Repeated usage check happens before event emission - safe to return early
|
||||
if self._check_tool_repeated_usage(calling=calling):
|
||||
try:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
@@ -428,6 +443,9 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
started_at = time.time()
|
||||
started_event_emitted = False
|
||||
|
||||
if self.agent:
|
||||
event_data = {
|
||||
"agent_key": self.agent.key,
|
||||
@@ -446,155 +464,162 @@ class ToolUsage:
|
||||
event_data["task_name"] = self.task.name or self.task.description
|
||||
event_data["task_id"] = str(self.task.id)
|
||||
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
||||
started_event_emitted = True
|
||||
|
||||
started_at = time.time()
|
||||
from_cache = False
|
||||
result = None # type: ignore
|
||||
should_retry = False
|
||||
available_tool = None
|
||||
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
import json
|
||||
try:
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
try:
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
result = usage_limit_error
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
return self._format_result(result=result)
|
||||
except Exception:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
if result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker") if calling.arguments else None
|
||||
)
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
# Add fingerprint metadata if available
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
# Add fingerprint metadata if available
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
else:
|
||||
# Add fingerprint metadata even to empty arguments
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = tool.invoke(input=arguments)
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
error = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
result = self._format_result(result=result)
|
||||
# Don't return early - fall through to finally block
|
||||
elif result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker")
|
||||
if calling.arguments
|
||||
else None
|
||||
)
|
||||
return error
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
return self.use(calling=calling, tool_string=tool_string)
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = tool.invoke(input=arguments)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
)
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer
|
||||
data["result_as_answer"] = result_as_answer
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(
|
||||
available_tool, "current_usage_count"
|
||||
):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors(
|
||||
"tool_usage_exception"
|
||||
).format(error=e, tool=tool.name, tool_inputs=tool.description)
|
||||
result = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
)
|
||||
else:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
should_retry = True
|
||||
else:
|
||||
result = self._format_result(result=result)
|
||||
|
||||
finally:
|
||||
if started_event_emitted:
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
|
||||
data["result_as_answer"] = result_as_answer # type: ignore
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(available_tool, "current_usage_count"):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
# Handle retry after finally block ensures finished event was emitted
|
||||
if should_retry:
|
||||
return self.use(calling=calling, tool_string=tool_string)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -26,9 +26,13 @@ def mock_agent() -> MagicMock:
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task() -> MagicMock:
|
||||
def mock_task(mock_context: MagicMock) -> MagicMock:
|
||||
"""Create a mock Task."""
|
||||
return MagicMock()
|
||||
task = MagicMock()
|
||||
task.id = mock_context.task_id
|
||||
task.name = "Mock Task"
|
||||
task.description = "Mock task description"
|
||||
return task
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@@ -179,8 +183,8 @@ class TestExecute:
|
||||
event = first_call[0][1]
|
||||
|
||||
assert event.type == "a2a_server_task_started"
|
||||
assert event.a2a_task_id == mock_context.task_id
|
||||
assert event.a2a_context_id == mock_context.context_id
|
||||
assert event.task_id == mock_context.task_id
|
||||
assert event.context_id == mock_context.context_id
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_emits_completed_event(
|
||||
@@ -201,7 +205,7 @@ class TestExecute:
|
||||
event = second_call[0][1]
|
||||
|
||||
assert event.type == "a2a_server_task_completed"
|
||||
assert event.a2a_task_id == mock_context.task_id
|
||||
assert event.task_id == mock_context.task_id
|
||||
assert event.result == "Task completed successfully"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@@ -250,7 +254,7 @@ class TestExecute:
|
||||
event = canceled_call[0][1]
|
||||
|
||||
assert event.type == "a2a_server_task_canceled"
|
||||
assert event.a2a_task_id == mock_context.task_id
|
||||
assert event.task_id == mock_context.task_id
|
||||
|
||||
|
||||
class TestCancel:
|
||||
|
||||
@@ -14,6 +14,16 @@ except ImportError:
|
||||
A2A_SDK_INSTALLED = False
|
||||
|
||||
|
||||
def _create_mock_agent_card(name: str = "Test", url: str = "http://test-endpoint.com/"):
|
||||
"""Create a mock agent card with proper model_dump behavior."""
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = name
|
||||
mock_card.url = url
|
||||
mock_card.model_dump.return_value = {"name": name, "url": url}
|
||||
mock_card.model_dump_json.return_value = f'{{"name": "{name}", "url": "{url}"}}'
|
||||
return mock_card
|
||||
|
||||
|
||||
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
|
||||
def test_trust_remote_completion_status_true_returns_directly():
|
||||
"""When trust_remote_completion_status=True and A2A returns completed, return result directly."""
|
||||
@@ -44,8 +54,7 @@ def test_trust_remote_completion_status_true_returns_directly():
|
||||
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
|
||||
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
|
||||
):
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = "Test"
|
||||
mock_card = _create_mock_agent_card()
|
||||
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
|
||||
|
||||
# A2A returns completed
|
||||
@@ -110,8 +119,7 @@ def test_trust_remote_completion_status_false_continues_conversation():
|
||||
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
|
||||
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
|
||||
):
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = "Test"
|
||||
mock_card = _create_mock_agent_card()
|
||||
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
|
||||
|
||||
# A2A returns completed
|
||||
|
||||
@@ -0,0 +1,115 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour
|
||||
personal goal is: Help with tasks\nTo give my best complete final answer to
|
||||
the task respond using the exact following format:\n\nThought: I now can give
|
||||
a great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''done''
|
||||
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||
word done.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '794'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbtswDL37Kwid4yJOnTr1regwrMN2C7DDVhisTNtaZUmQ6GZFkX8f
|
||||
ZKexu3XALgbMx/f0HsmXBECoWpQgZIcse6fTD+t12B1QfpN+n/Ht/stjc2icvvsaPj1/FqvIsA8/
|
||||
SfIr60La3mliZc0ES0/IFFWz4mq3u94Wm3wEeluTjrTWcZpfZGmvjEo36802Xedplp/onVWSgijh
|
||||
ewIA8DJ+o1FT0y9Rwnr1WukpBGxJlOcmAOGtjhWBIajAaFisZlBaw2RG7/vODm3HJdyBsQeQaKBV
|
||||
TwQIbQwAaMKB/A/zURnUcDP+lVBbQ0tBT80QMKYyg9YLAI2xjHEqY5T7E3I8m9e2dd4+hD+oolFG
|
||||
ha7yhMGaaDSwdWJEjwnA/Tik4U1u4bztHVdsH2l8LtteT3piXs4CzU8gW0a9qBeXq3f0qpoYlQ6L
|
||||
MQuJsqN6ps47waFWdgEki9R/u3lPe0quTPs/8jMgJTmmunKeaiXfJp7bPMXb/VfbecqjYRHIPylJ
|
||||
FSvycRM1NTjo6aBEeA5MfdUo05J3Xk1X1bjqsrjCQkpqMpEck98AAAD//wMAnStaOGQDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '601'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '628'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,115 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour
|
||||
personal goal is: Help with tasks\nTo give my best complete final answer to
|
||||
the task respond using the exact following format:\n\nThought: I now can give
|
||||
a great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''hi''
|
||||
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||
word hi.\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '790'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbtswDL37Kwid48JOk7rxLUAxdIdil24osBUGI9M2N1kSJLnpUOTf
|
||||
Bzlp7HYdsIsB8/E9vUfyJQEQXIsShOwwyN6q9CbL/Gb7sH/YfZUFP+Pdt1u/VTdfbu8afS8WkWF2
|
||||
P0mGV9aFNL1VFNjoIywdYaComhdX19ebdbFcj0BvalKR1tqQri7ytGfN6TJbrtNslearE70zLMmL
|
||||
Er4nAAAv4zca1TU9ixKyxWulJ++xJVGemwCEMypWBHrPPqAOYjGB0uhAevR+35mh7UIJn0GbPUjU
|
||||
0PITAUIbAwBqvyf3Q39ijQq2418JHc/lHDWDx5hJD0rNANTaBIwzGYM8npDD2boyrXVm599RRcOa
|
||||
fVc5Qm90tOmDsWJEDwnA4zii4U1qYZ3pbaiC+UXjc/l6c9QT02pm6OoEBhNQzerF5eIDvaqmgKz8
|
||||
bMhCouyonqjTRnCo2cyAZJb6bzcfaR+Ts27/R34CpCQbqK6so5rl28RTm6N4uf9qO095NCw8uSeW
|
||||
VAUmFzdRU4ODOp6T8L99oL5qWLfkrOPjTTW2uiyusJCSmlwkh+QPAAAA//8DAASWsy5iAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:26 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '369'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '391'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,115 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
''yes'' and nothing else.\n\nThis is the expected criteria for your final answer:
|
||||
The word yes.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '809'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJda9wwEHz3r1j0fA72nT9SvxVC0mvfSimUNpiNvLbVypKQ5CRHuP9e
|
||||
ZF/OTpNAXwze2RnN7O5TBMBEwypgvEfPByPjqyRxN1efH2+w+DGkn/bF9a7Nv/f7/PA1+8I2gaHv
|
||||
fhP3z6wLrgcjyQutZphbQk9BNS2Ly8sPebnbTsCgG5KB1hkfZxdpPAgl4m2yzeMki9PsRO+14ORY
|
||||
BT8jAICn6RuMqoYeWQXJ5rkykHPYEavOTQDMahkqDJ0TzqPybLOAXCtPavL+rddj1/sK9qD0A3BU
|
||||
0Il7AoQuBABU7oHsL3UtFEr4OP1VcCC31rPUjg5DKDVKuQJQKe0xDGVKcntCjmfvUnfG6jv3D5W1
|
||||
QgnX15bQaRV8Oq8Nm9BjBHA7zWh8EZsZqwfja6//0PRcWqSzHlt2s0KzE+i1R7mql/nmDb26IY9C
|
||||
utWUGUfeU7NQl5Xg2Ai9AqJV6tdu3tKekwvV/Y/8AnBOxlNTG0uN4C8TL22Wwum+13ae8mSYObL3
|
||||
glPtBdmwiYZaHOV8T8wdnKehboXqyBor5qNqTb0rCyw5pzZl0TH6CwAA//8DAPUTEd9jAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:33 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '418'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '434'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,115 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
''hello'' and nothing else.\n\nThis is the expected criteria for your final
|
||||
answer: The word hello.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '813'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xSTW/bMAy9+1cQOsdFnOarvm0YtvWyU4EdtsJgZdrWJouCRKcrivz3QU4au1sH
|
||||
7GLAfHxP75F8zgCUqVUJSncouvc2/7Bcxk9PX776fs+fe2zu+PYgZLvu/VZu1CIx+OEHaXlhXWnu
|
||||
vSUx7E6wDoRCSbXYbff7m83uejUCPddkE631kq+virw3zuSr5WqTL9d5sT7TOzaaoirhWwYA8Dx+
|
||||
k1FX0y9VwnLxUukpRmxJlZcmABXYporCGE0UdKIWE6jZCbnR+13HQ9tJCbfg+BE0OmjNgQChTQEA
|
||||
XXyk8N19NA4tvBv/SujIWp4rBmqGiCmWG6ydAegcC6axjFnuz8jx4t5y6wM/xD+oqjHOxK4KhJFd
|
||||
chqFvRrRYwZwP05peBVc+cC9l0r4J43PFdvipKem7czQ9RkUFrSz+m6zeEOvqknQ2Dibs9KoO6on
|
||||
6rQUHGrDMyCbpf7bzVvap+TGtf8jPwFakxeqKx+oNvp14qktUDref7VdpjwaVpHCwWiqxFBIm6ip
|
||||
wcGeLkrFpyjUV41xLQUfzOmsGl9d77a405qaQmXH7DcAAAD//wMAQklYDmUDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:32 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '581'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '619'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,115 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
''ok'' and nothing else.\n\nThis is the expected criteria for your final answer:
|
||||
The word ok.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '807'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJda9wwEHz3r1j0fA72fTnxW6GU9iglD4FC22A28tpWI0tCWicN4f57
|
||||
kX05O20KfTF4Z2c0s7vPCYBQtShByA5Z9k6n77MsfCRyX9ZDsTnsrp/Wh5v++uu37urzoRWryLB3
|
||||
P0nyC+tC2t5pYmXNBEtPyBRV82J/eXm1KzabEehtTTrSWsfp9iJPe2VUus7WuzTbpvn2RO+skhRE
|
||||
Cd8TAIDn8RuNmpp+iRKy1UulpxCwJVGemwCEtzpWBIagAqNhsZpBaQ2TGb3fdHZoOy7hExj7CBIN
|
||||
tOqBAKGNAQBNeCT/w3xQBjW8G/9KsPdLOU/NEDBmMoPWCwCNsYxxJmOQ2xNyPFvXtnXe3oU/qKJR
|
||||
RoWu8oTBmmgzsHViRI8JwO04ouFVauG87R1XbO9pfC7f55OemFezQLcnkC2jXtSL3eoNvaomRqXD
|
||||
YshCouyonqnzRnColV0AySL1327e0p6SK9P+j/wMSEmOqa6cp1rJ14nnNk/xcv/Vdp7yaFgE8g9K
|
||||
UsWKfNxETQ0OejonEZ4CU181yrTknVfTTTWu2hR7LKSkJhfJMfkNAAD//wMAw/X5HWIDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 07:55:34 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '499'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '517'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
99
lib/crewai/tests/events/test_event_context.py
Normal file
99
lib/crewai/tests/events/test_event_context.py
Normal file
@@ -0,0 +1,99 @@
|
||||
"""Tests for event context management."""
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.events.event_context import (
|
||||
SCOPE_ENDING_EVENTS,
|
||||
SCOPE_STARTING_EVENTS,
|
||||
VALID_EVENT_PAIRS,
|
||||
EmptyStackError,
|
||||
EventPairingError,
|
||||
MismatchBehavior,
|
||||
StackDepthExceededError,
|
||||
_event_context_config,
|
||||
EventContextConfig,
|
||||
get_current_parent_id,
|
||||
get_enclosing_parent_id,
|
||||
handle_empty_pop,
|
||||
handle_mismatch,
|
||||
pop_event_scope,
|
||||
push_event_scope,
|
||||
)
|
||||
|
||||
|
||||
class TestStackOperations:
|
||||
"""Tests for stack push/pop operations."""
|
||||
|
||||
def test_empty_stack_returns_none(self) -> None:
|
||||
assert get_current_parent_id() is None
|
||||
assert get_enclosing_parent_id() is None
|
||||
|
||||
def test_push_and_get_parent(self) -> None:
|
||||
push_event_scope("event-1", "task_started")
|
||||
assert get_current_parent_id() == "event-1"
|
||||
|
||||
def test_nested_push(self) -> None:
|
||||
push_event_scope("event-1", "crew_kickoff_started")
|
||||
push_event_scope("event-2", "task_started")
|
||||
assert get_current_parent_id() == "event-2"
|
||||
assert get_enclosing_parent_id() == "event-1"
|
||||
|
||||
def test_pop_restores_parent(self) -> None:
|
||||
push_event_scope("event-1", "crew_kickoff_started")
|
||||
push_event_scope("event-2", "task_started")
|
||||
popped = pop_event_scope()
|
||||
assert popped == ("event-2", "task_started")
|
||||
assert get_current_parent_id() == "event-1"
|
||||
|
||||
def test_pop_empty_stack_returns_none(self) -> None:
|
||||
assert pop_event_scope() is None
|
||||
|
||||
|
||||
class TestStackDepthLimit:
|
||||
"""Tests for stack depth limit."""
|
||||
|
||||
def test_depth_limit_exceeded_raises(self) -> None:
|
||||
_event_context_config.set(EventContextConfig(max_stack_depth=3))
|
||||
|
||||
push_event_scope("event-1", "type-1")
|
||||
push_event_scope("event-2", "type-2")
|
||||
push_event_scope("event-3", "type-3")
|
||||
|
||||
with pytest.raises(StackDepthExceededError):
|
||||
push_event_scope("event-4", "type-4")
|
||||
|
||||
|
||||
class TestMismatchHandling:
|
||||
"""Tests for mismatch behavior."""
|
||||
|
||||
def test_handle_mismatch_raises_when_configured(self) -> None:
|
||||
_event_context_config.set(
|
||||
EventContextConfig(mismatch_behavior=MismatchBehavior.RAISE)
|
||||
)
|
||||
|
||||
with pytest.raises(EventPairingError):
|
||||
handle_mismatch("task_completed", "llm_call_started", "task_started")
|
||||
|
||||
def test_handle_empty_pop_raises_when_configured(self) -> None:
|
||||
_event_context_config.set(
|
||||
EventContextConfig(empty_pop_behavior=MismatchBehavior.RAISE)
|
||||
)
|
||||
|
||||
with pytest.raises(EmptyStackError):
|
||||
handle_empty_pop("task_completed")
|
||||
|
||||
|
||||
class TestEventTypeSets:
|
||||
"""Tests for event type set completeness."""
|
||||
|
||||
def test_all_ending_events_have_pairs(self) -> None:
|
||||
for ending_event in SCOPE_ENDING_EVENTS:
|
||||
assert ending_event in VALID_EVENT_PAIRS
|
||||
|
||||
def test_all_pairs_reference_starting_events(self) -> None:
|
||||
for ending_event, starting_event in VALID_EVENT_PAIRS.items():
|
||||
assert starting_event in SCOPE_STARTING_EVENTS
|
||||
|
||||
def test_starting_and_ending_are_disjoint(self) -> None:
|
||||
overlap = SCOPE_STARTING_EVENTS & SCOPE_ENDING_EVENTS
|
||||
assert not overlap
|
||||
508
lib/crewai/tests/events/test_event_ordering.py
Normal file
508
lib/crewai/tests/events/test_event_ordering.py
Normal file
@@ -0,0 +1,508 @@
|
||||
"""Tests for event ordering and parent-child relationships."""
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
)
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallStartedEvent,
|
||||
)
|
||||
from crewai.events.types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class EventCollector:
|
||||
"""Collects events and provides helpers to find related events."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.events: list[BaseEvent] = []
|
||||
|
||||
def add(self, event: BaseEvent) -> None:
|
||||
self.events.append(event)
|
||||
|
||||
def first(self, event_type: type[BaseEvent]) -> BaseEvent | None:
|
||||
for e in self.events:
|
||||
if isinstance(e, event_type):
|
||||
return e
|
||||
return None
|
||||
|
||||
def all_of(self, event_type: type[BaseEvent]) -> list[BaseEvent]:
|
||||
return [e for e in self.events if isinstance(e, event_type)]
|
||||
|
||||
def with_parent(self, parent_id: str) -> list[BaseEvent]:
|
||||
return [e for e in self.events if e.parent_event_id == parent_id]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def collector() -> EventCollector:
|
||||
"""Fixture that collects events during test execution."""
|
||||
c = EventCollector()
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def h1(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def h2(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(TaskStartedEvent)
|
||||
def h3(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(TaskCompletedEvent)
|
||||
def h4(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionStartedEvent)
|
||||
def h5(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
||||
def h6(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||
def h7(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||
def h8(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(FlowStartedEvent)
|
||||
def h9(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(FlowFinishedEvent)
|
||||
def h10(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionStartedEvent)
|
||||
def h11(source, event):
|
||||
c.add(event)
|
||||
|
||||
@crewai_event_bus.on(MethodExecutionFinishedEvent)
|
||||
def h12(source, event):
|
||||
c.add(event)
|
||||
|
||||
return c
|
||||
|
||||
|
||||
class TestCrewEventOrdering:
|
||||
"""Tests for event ordering in crew execution."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_events_have_event_ids(self, collector: EventCollector) -> None:
|
||||
"""Every crew event should have a unique event_id."""
|
||||
agent = Agent(
|
||||
role="Responder",
|
||||
goal="Respond briefly",
|
||||
backstory="You give short answers.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'hello' and nothing else.",
|
||||
expected_output="The word hello.",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
crew.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
started = collector.first(CrewKickoffStartedEvent)
|
||||
completed = collector.first(CrewKickoffCompletedEvent)
|
||||
|
||||
assert started is not None
|
||||
assert started.event_id is not None
|
||||
assert len(started.event_id) > 0
|
||||
|
||||
assert completed is not None
|
||||
assert completed.event_id is not None
|
||||
assert completed.event_id != started.event_id
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_completed_after_started(self, collector: EventCollector) -> None:
|
||||
"""Crew completed event should have higher sequence than started."""
|
||||
agent = Agent(
|
||||
role="Responder",
|
||||
goal="Respond briefly",
|
||||
backstory="You give short answers.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'yes' and nothing else.",
|
||||
expected_output="The word yes.",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
crew.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
started = collector.first(CrewKickoffStartedEvent)
|
||||
completed = collector.first(CrewKickoffCompletedEvent)
|
||||
|
||||
assert started is not None
|
||||
assert completed is not None
|
||||
assert started.emission_sequence is not None
|
||||
assert completed.emission_sequence is not None
|
||||
assert completed.emission_sequence > started.emission_sequence
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_task_parent_is_crew(self, collector: EventCollector) -> None:
|
||||
"""Task events should have crew event as parent."""
|
||||
agent = Agent(
|
||||
role="Responder",
|
||||
goal="Respond briefly",
|
||||
backstory="You give short answers.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'ok' and nothing else.",
|
||||
expected_output="The word ok.",
|
||||
agent=agent,
|
||||
)
|
||||
crew = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
crew.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
crew_started = collector.first(CrewKickoffStartedEvent)
|
||||
task_started = collector.first(TaskStartedEvent)
|
||||
|
||||
assert crew_started is not None
|
||||
assert task_started is not None
|
||||
assert task_started.parent_event_id == crew_started.event_id
|
||||
|
||||
|
||||
class TestAgentEventOrdering:
|
||||
"""Tests for event ordering in agent execution."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_events_have_event_ids(self, collector: EventCollector) -> None:
|
||||
"""Agent execution events should have event_ids."""
|
||||
agent = Agent(
|
||||
role="Helper",
|
||||
goal="Help with tasks",
|
||||
backstory="You help.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'done' and nothing else.",
|
||||
expected_output="The word done.",
|
||||
agent=agent,
|
||||
)
|
||||
agent.execute_task(task)
|
||||
crewai_event_bus.flush()
|
||||
|
||||
started = collector.first(AgentExecutionStartedEvent)
|
||||
completed = collector.first(AgentExecutionCompletedEvent)
|
||||
|
||||
if started:
|
||||
assert started.event_id is not None
|
||||
|
||||
if completed:
|
||||
assert completed.event_id is not None
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_llm_events_have_parent(self, collector: EventCollector) -> None:
|
||||
"""LLM call events should have a parent event."""
|
||||
agent = Agent(
|
||||
role="Helper",
|
||||
goal="Help with tasks",
|
||||
backstory="You help.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'hi' and nothing else.",
|
||||
expected_output="The word hi.",
|
||||
agent=agent,
|
||||
)
|
||||
agent.execute_task(task)
|
||||
crewai_event_bus.flush()
|
||||
|
||||
llm_started = collector.first(LLMCallStartedEvent)
|
||||
|
||||
if llm_started:
|
||||
assert llm_started.event_id is not None
|
||||
# LLM events should have some parent in the hierarchy
|
||||
assert llm_started.parent_event_id is not None
|
||||
|
||||
|
||||
class TestFlowWithCrewEventOrdering:
|
||||
"""Tests for event ordering in flows containing crews."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_flow_events_have_ids(self, collector: EventCollector) -> None:
|
||||
"""Flow events should have event_ids."""
|
||||
agent = Agent(
|
||||
role="Worker",
|
||||
goal="Do work",
|
||||
backstory="You work.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'complete' and nothing else.",
|
||||
expected_output="The word complete.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
class SimpleFlow(Flow):
|
||||
@start()
|
||||
def run_crew(self):
|
||||
c = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = SimpleFlow()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
flow_started = collector.first(FlowStartedEvent)
|
||||
flow_finished = collector.first(FlowFinishedEvent)
|
||||
|
||||
assert flow_started is not None
|
||||
assert flow_started.event_id is not None
|
||||
|
||||
assert flow_finished is not None
|
||||
assert flow_finished.event_id is not None
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_method_parent_is_flow(self, collector: EventCollector) -> None:
|
||||
"""Method execution events should have flow as parent."""
|
||||
agent = Agent(
|
||||
role="Worker",
|
||||
goal="Do work",
|
||||
backstory="You work.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'ready' and nothing else.",
|
||||
expected_output="The word ready.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
class FlowWithMethod(Flow):
|
||||
@start()
|
||||
def my_method(self):
|
||||
c = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = FlowWithMethod()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
flow_started = collector.first(FlowStartedEvent)
|
||||
method_started = collector.first(MethodExecutionStartedEvent)
|
||||
|
||||
assert flow_started is not None
|
||||
assert method_started is not None
|
||||
assert method_started.parent_event_id == flow_started.event_id
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_parent_is_method(self, collector: EventCollector) -> None:
|
||||
"""Crew inside flow method should have method as parent."""
|
||||
agent = Agent(
|
||||
role="Worker",
|
||||
goal="Do work",
|
||||
backstory="You work.",
|
||||
verbose=False,
|
||||
)
|
||||
task = Task(
|
||||
description="Say 'go' and nothing else.",
|
||||
expected_output="The word go.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
class FlowWithCrew(Flow):
|
||||
@start()
|
||||
def run_it(self):
|
||||
c = Crew(agents=[agent], tasks=[task], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = FlowWithCrew()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
method_started = collector.first(MethodExecutionStartedEvent)
|
||||
crew_started = collector.first(CrewKickoffStartedEvent)
|
||||
|
||||
assert method_started is not None
|
||||
assert crew_started is not None
|
||||
assert crew_started.parent_event_id == method_started.event_id
|
||||
|
||||
|
||||
class TestFlowWithMultipleCrewsEventOrdering:
|
||||
"""Tests for event ordering in flows with multiple crews."""
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_two_crews_have_different_ids(self, collector: EventCollector) -> None:
|
||||
"""Two crews in a flow should have different event_ids."""
|
||||
agent1 = Agent(
|
||||
role="First",
|
||||
goal="Be first",
|
||||
backstory="You go first.",
|
||||
verbose=False,
|
||||
)
|
||||
agent2 = Agent(
|
||||
role="Second",
|
||||
goal="Be second",
|
||||
backstory="You go second.",
|
||||
verbose=False,
|
||||
)
|
||||
task1 = Task(
|
||||
description="Say '1' and nothing else.",
|
||||
expected_output="The number 1.",
|
||||
agent=agent1,
|
||||
)
|
||||
task2 = Task(
|
||||
description="Say '2' and nothing else.",
|
||||
expected_output="The number 2.",
|
||||
agent=agent2,
|
||||
)
|
||||
|
||||
class TwoCrewFlow(Flow):
|
||||
@start()
|
||||
def first(self):
|
||||
c = Crew(agents=[agent1], tasks=[task1], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
@listen(first)
|
||||
def second(self, _):
|
||||
c = Crew(agents=[agent2], tasks=[task2], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = TwoCrewFlow()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
crew_started_events = collector.all_of(CrewKickoffStartedEvent)
|
||||
|
||||
assert len(crew_started_events) >= 2
|
||||
assert crew_started_events[0].event_id != crew_started_events[1].event_id
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_second_crew_after_first(self, collector: EventCollector) -> None:
|
||||
"""Second crew should have higher sequence than first."""
|
||||
agent1 = Agent(
|
||||
role="First",
|
||||
goal="Be first",
|
||||
backstory="You go first.",
|
||||
verbose=False,
|
||||
)
|
||||
agent2 = Agent(
|
||||
role="Second",
|
||||
goal="Be second",
|
||||
backstory="You go second.",
|
||||
verbose=False,
|
||||
)
|
||||
task1 = Task(
|
||||
description="Say 'a' and nothing else.",
|
||||
expected_output="The letter a.",
|
||||
agent=agent1,
|
||||
)
|
||||
task2 = Task(
|
||||
description="Say 'b' and nothing else.",
|
||||
expected_output="The letter b.",
|
||||
agent=agent2,
|
||||
)
|
||||
|
||||
class SequentialCrewFlow(Flow):
|
||||
@start()
|
||||
def crew_a(self):
|
||||
c = Crew(agents=[agent1], tasks=[task1], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
@listen(crew_a)
|
||||
def crew_b(self, _):
|
||||
c = Crew(agents=[agent2], tasks=[task2], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = SequentialCrewFlow()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
crew_started_events = collector.all_of(CrewKickoffStartedEvent)
|
||||
|
||||
assert len(crew_started_events) >= 2
|
||||
first = crew_started_events[0]
|
||||
second = crew_started_events[1]
|
||||
|
||||
assert first.emission_sequence is not None
|
||||
assert second.emission_sequence is not None
|
||||
assert second.emission_sequence > first.emission_sequence
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_tasks_have_correct_crew_parents(self, collector: EventCollector) -> None:
|
||||
"""Tasks in different crews should have their own crew as parent."""
|
||||
agent1 = Agent(
|
||||
role="Alpha",
|
||||
goal="Do alpha work",
|
||||
backstory="You are alpha.",
|
||||
verbose=False,
|
||||
)
|
||||
agent2 = Agent(
|
||||
role="Beta",
|
||||
goal="Do beta work",
|
||||
backstory="You are beta.",
|
||||
verbose=False,
|
||||
)
|
||||
task1 = Task(
|
||||
description="Say 'alpha' and nothing else.",
|
||||
expected_output="The word alpha.",
|
||||
agent=agent1,
|
||||
)
|
||||
task2 = Task(
|
||||
description="Say 'beta' and nothing else.",
|
||||
expected_output="The word beta.",
|
||||
agent=agent2,
|
||||
)
|
||||
|
||||
class ParentTestFlow(Flow):
|
||||
@start()
|
||||
def alpha_crew(self):
|
||||
c = Crew(agents=[agent1], tasks=[task1], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
@listen(alpha_crew)
|
||||
def beta_crew(self, _):
|
||||
c = Crew(agents=[agent2], tasks=[task2], verbose=False)
|
||||
return c.kickoff()
|
||||
|
||||
flow = ParentTestFlow()
|
||||
flow.kickoff()
|
||||
crewai_event_bus.flush()
|
||||
|
||||
crew_started_events = collector.all_of(CrewKickoffStartedEvent)
|
||||
task_started_events = collector.all_of(TaskStartedEvent)
|
||||
|
||||
assert len(crew_started_events) >= 2
|
||||
assert len(task_started_events) >= 2
|
||||
|
||||
crew1_id = crew_started_events[0].event_id
|
||||
crew2_id = crew_started_events[1].event_id
|
||||
|
||||
task1_parent = task_started_events[0].parent_event_id
|
||||
task2_parent = task_started_events[1].parent_event_id
|
||||
|
||||
assert task1_parent == crew1_id
|
||||
assert task2_parent == crew2_id
|
||||
@@ -70,6 +70,9 @@ def test_long_term_memory_save_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": "test_agent",
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test_task",
|
||||
"metadata": {"task": "test_task", "quality": 0.5},
|
||||
}
|
||||
@@ -85,6 +88,9 @@ def test_long_term_memory_save_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": "test_agent",
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test_task",
|
||||
"metadata": {
|
||||
"task": "test_task",
|
||||
@@ -139,6 +145,9 @@ def test_long_term_memory_search_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test query",
|
||||
"limit": 5,
|
||||
"score_threshold": None,
|
||||
@@ -156,6 +165,9 @@ def test_long_term_memory_search_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": ANY,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test query",
|
||||
"results": None,
|
||||
"limit": 5,
|
||||
|
||||
@@ -81,6 +81,9 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"limit": 3,
|
||||
"score_threshold": 0.35,
|
||||
@@ -98,6 +101,9 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"results": [],
|
||||
"limit": 3,
|
||||
@@ -150,6 +156,9 @@ def test_short_term_memory_save_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task"},
|
||||
}
|
||||
@@ -166,6 +175,9 @@ def test_short_term_memory_save_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task"},
|
||||
"save_time_ms": ANY,
|
||||
|
||||
Reference in New Issue
Block a user