mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-22 14:48:13 +00:00
Compare commits
2 Commits
devin/1769
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f997b73577 | ||
|
|
7a65baeb9c |
25
conftest.py
25
conftest.py
@@ -1,6 +1,7 @@
|
||||
"""Pytest configuration for crewAI workspace."""
|
||||
|
||||
from collections.abc import Generator
|
||||
import gzip
|
||||
import os
|
||||
from pathlib import Path
|
||||
import tempfile
|
||||
@@ -31,6 +32,21 @@ def cleanup_event_handlers() -> Generator[None, Any, None]:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def reset_event_state() -> None:
|
||||
"""Reset event system state before each test for isolation."""
|
||||
from crewai.events.base_events import reset_emission_counter
|
||||
from crewai.events.event_context import (
|
||||
EventContextConfig,
|
||||
_event_context_config,
|
||||
_event_id_stack,
|
||||
)
|
||||
|
||||
reset_emission_counter()
|
||||
_event_id_stack.set(())
|
||||
_event_context_config.set(EventContextConfig())
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def setup_test_environment() -> Generator[None, Any, None]:
|
||||
"""Setup test environment for crewAI workspace."""
|
||||
@@ -138,9 +154,14 @@ def _filter_request_headers(request: Request) -> Request: # type: ignore[no-any
|
||||
|
||||
def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Filter sensitive headers from response before recording."""
|
||||
# Remove Content-Encoding to prevent decompression issues on replay
|
||||
|
||||
for encoding_header in ["Content-Encoding", "content-encoding"]:
|
||||
response["headers"].pop(encoding_header, None)
|
||||
if encoding_header in response["headers"]:
|
||||
encoding = response["headers"].pop(encoding_header)
|
||||
if encoding and encoding[0] == "gzip":
|
||||
body = response.get("body", {}).get("string", b"")
|
||||
if isinstance(body, bytes) and body.startswith(b"\x1f\x8b"):
|
||||
response["body"]["string"] = gzip.decompress(body).decode("utf-8")
|
||||
|
||||
for header_name, replacement in HEADERS_TO_FILTER.items():
|
||||
for variant in [header_name, header_name.upper(), header_name.title()]:
|
||||
|
||||
@@ -25,7 +25,7 @@ dependencies = [
|
||||
"openpyxl~=3.1.5",
|
||||
# Authentication and Security
|
||||
"python-dotenv~=1.1.1",
|
||||
"pyjwt~=2.9.0",
|
||||
"pyjwt>=2.9.0,<3",
|
||||
# Configuration and Utils
|
||||
"click~=8.1.7",
|
||||
"appdirs~=1.4.4",
|
||||
@@ -36,7 +36,7 @@ dependencies = [
|
||||
"json5~=0.10.0",
|
||||
"portalocker~=2.7.0",
|
||||
"pydantic-settings~=2.10.1",
|
||||
"mcp~=1.16.0",
|
||||
"mcp~=1.23.1",
|
||||
"uv~=0.9.13",
|
||||
"aiosqlite~=0.21.0",
|
||||
]
|
||||
|
||||
@@ -14,15 +14,25 @@ from typing import (
|
||||
from pydantic import BeforeValidator, HttpUrl, TypeAdapter
|
||||
from typing_extensions import NotRequired
|
||||
|
||||
from crewai.a2a.updates import (
|
||||
PollingConfig,
|
||||
PollingHandler,
|
||||
PushNotificationConfig,
|
||||
PushNotificationHandler,
|
||||
StreamingConfig,
|
||||
StreamingHandler,
|
||||
UpdateConfig,
|
||||
)
|
||||
|
||||
try:
|
||||
from crewai.a2a.updates import (
|
||||
PollingConfig,
|
||||
PollingHandler,
|
||||
PushNotificationConfig,
|
||||
PushNotificationHandler,
|
||||
StreamingConfig,
|
||||
StreamingHandler,
|
||||
UpdateConfig,
|
||||
)
|
||||
except ImportError:
|
||||
PollingConfig = Any # type: ignore[misc,assignment]
|
||||
PollingHandler = Any # type: ignore[misc,assignment]
|
||||
PushNotificationConfig = Any # type: ignore[misc,assignment]
|
||||
PushNotificationHandler = Any # type: ignore[misc,assignment]
|
||||
StreamingConfig = Any # type: ignore[misc,assignment]
|
||||
StreamingHandler = Any # type: ignore[misc,assignment]
|
||||
UpdateConfig = Any # type: ignore[misc,assignment]
|
||||
|
||||
|
||||
TransportType = Literal["JSONRPC", "GRPC", "HTTP+JSON"]
|
||||
|
||||
@@ -251,30 +251,48 @@ async def aexecute_a2a_delegation(
|
||||
if turn_number is None:
|
||||
turn_number = len([m for m in conversation_history if m.role == Role.user]) + 1
|
||||
|
||||
result = await _aexecute_a2a_delegation_impl(
|
||||
endpoint=endpoint,
|
||||
auth=auth,
|
||||
timeout=timeout,
|
||||
task_description=task_description,
|
||||
context=context,
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
extensions=extensions,
|
||||
conversation_history=conversation_history,
|
||||
is_multiturn=is_multiturn,
|
||||
turn_number=turn_number,
|
||||
agent_branch=agent_branch,
|
||||
agent_id=agent_id,
|
||||
agent_role=agent_role,
|
||||
response_model=response_model,
|
||||
updates=updates,
|
||||
transport_protocol=transport_protocol,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
skill_id=skill_id,
|
||||
)
|
||||
try:
|
||||
result = await _aexecute_a2a_delegation_impl(
|
||||
endpoint=endpoint,
|
||||
auth=auth,
|
||||
timeout=timeout,
|
||||
task_description=task_description,
|
||||
context=context,
|
||||
context_id=context_id,
|
||||
task_id=task_id,
|
||||
reference_task_ids=reference_task_ids,
|
||||
metadata=metadata,
|
||||
extensions=extensions,
|
||||
conversation_history=conversation_history,
|
||||
is_multiturn=is_multiturn,
|
||||
turn_number=turn_number,
|
||||
agent_branch=agent_branch,
|
||||
agent_id=agent_id,
|
||||
agent_role=agent_role,
|
||||
response_model=response_model,
|
||||
updates=updates,
|
||||
transport_protocol=transport_protocol,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
skill_id=skill_id,
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
agent_branch,
|
||||
A2ADelegationCompletedEvent(
|
||||
status="failed",
|
||||
result=None,
|
||||
error=str(e),
|
||||
context_id=context_id,
|
||||
is_multiturn=is_multiturn,
|
||||
endpoint=endpoint,
|
||||
metadata=metadata,
|
||||
extensions=list(extensions.keys()) if extensions else None,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
agent_card_data: dict[str, Any] = result.get("agent_card") or {}
|
||||
crewai_event_bus.emit(
|
||||
|
||||
@@ -14,7 +14,14 @@ from typing import (
|
||||
)
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
model_validator,
|
||||
)
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.agent.utils import (
|
||||
@@ -46,6 +53,7 @@ from crewai.events.types.knowledge_events import (
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
)
|
||||
from crewai.experimental.agent_executor import AgentExecutor
|
||||
@@ -85,17 +93,10 @@ from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
try:
|
||||
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
|
||||
except ImportError:
|
||||
A2AClientConfig = Any
|
||||
A2AConfig = Any
|
||||
A2AServerConfig = Any
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
|
||||
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
|
||||
from crewai.agents.agent_builder.base_agent import PlatformAppOrAction
|
||||
from crewai.task import Task
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
@@ -141,6 +142,8 @@ class Agent(BaseAgent):
|
||||
mcps: List of MCP server references for tool integration.
|
||||
"""
|
||||
|
||||
model_config = ConfigDict()
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
_mcp_clients: list[Any] = PrivateAttr(default_factory=list)
|
||||
_last_messages: list[LLMMessage] = PrivateAttr(default_factory=list)
|
||||
@@ -354,30 +357,43 @@ class Agent(BaseAgent):
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
memory = ""
|
||||
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._external_memory,
|
||||
agent=self,
|
||||
task=task,
|
||||
)
|
||||
memory = contextual_memory.build_context_for_task(task, context or "")
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
try:
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._external_memory,
|
||||
agent=self,
|
||||
task=task,
|
||||
)
|
||||
memory = contextual_memory.build_context_for_task(task, context or "")
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalCompletedEvent(
|
||||
task_id=str(task.id) if task else None,
|
||||
memory_content=memory,
|
||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="agent",
|
||||
from_agent=self,
|
||||
from_task=task,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalCompletedEvent(
|
||||
task_id=str(task.id) if task else None,
|
||||
memory_content=memory,
|
||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="agent",
|
||||
from_agent=self,
|
||||
from_task=task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalFailedEvent(
|
||||
task_id=str(task.id) if task else None,
|
||||
source_type="agent",
|
||||
from_agent=self,
|
||||
from_task=task,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
|
||||
knowledge_config = get_knowledge_config(self)
|
||||
task_prompt = handle_knowledge_retrieval(
|
||||
@@ -563,32 +579,45 @@ class Agent(BaseAgent):
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
memory = ""
|
||||
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._external_memory,
|
||||
agent=self,
|
||||
task=task,
|
||||
)
|
||||
memory = await contextual_memory.abuild_context_for_task(
|
||||
task, context or ""
|
||||
)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
try:
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._external_memory,
|
||||
agent=self,
|
||||
task=task,
|
||||
)
|
||||
memory = await contextual_memory.abuild_context_for_task(
|
||||
task, context or ""
|
||||
)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalCompletedEvent(
|
||||
task_id=str(task.id) if task else None,
|
||||
memory_content=memory,
|
||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="agent",
|
||||
from_agent=self,
|
||||
from_task=task,
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalCompletedEvent(
|
||||
task_id=str(task.id) if task else None,
|
||||
memory_content=memory,
|
||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="agent",
|
||||
from_agent=self,
|
||||
from_task=task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalFailedEvent(
|
||||
task_id=str(task.id) if task else None,
|
||||
source_type="agent",
|
||||
from_agent=self,
|
||||
from_task=task,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
|
||||
knowledge_config = get_knowledge_config(self)
|
||||
task_prompt = await ahandle_knowledge_retrieval(
|
||||
@@ -2039,3 +2068,22 @@ class Agent(BaseAgent):
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Rebuild Agent model to resolve A2A type forward references
|
||||
try:
|
||||
from crewai.a2a.config import (
|
||||
A2AClientConfig as _A2AClientConfig,
|
||||
A2AConfig as _A2AConfig,
|
||||
A2AServerConfig as _A2AServerConfig,
|
||||
)
|
||||
|
||||
Agent.model_rebuild(
|
||||
_types_namespace={
|
||||
"A2AConfig": _A2AConfig,
|
||||
"A2AClientConfig": _A2AClientConfig,
|
||||
"A2AServerConfig": _A2AServerConfig,
|
||||
}
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
@@ -189,9 +189,15 @@ def prepare_kickoff(crew: Crew, inputs: dict[str, Any] | None) -> dict[str, Any]
|
||||
Returns:
|
||||
The potentially modified inputs dictionary after before callbacks.
|
||||
"""
|
||||
from crewai.events.base_events import reset_emission_counter
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_context import get_current_parent_id, reset_last_event_id
|
||||
from crewai.events.types.crew_events import CrewKickoffStartedEvent
|
||||
|
||||
if get_current_parent_id() is None:
|
||||
reset_emission_counter()
|
||||
reset_last_event_id()
|
||||
|
||||
for before_callback in crew.before_kickoff_callbacks:
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
|
||||
@@ -75,6 +75,7 @@ from crewai.events.types.memory_events import (
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
@@ -174,6 +175,7 @@ __all__ = [
|
||||
"MemoryQueryFailedEvent",
|
||||
"MemoryQueryStartedEvent",
|
||||
"MemoryRetrievalCompletedEvent",
|
||||
"MemoryRetrievalFailedEvent",
|
||||
"MemoryRetrievalStartedEvent",
|
||||
"MemorySaveCompletedEvent",
|
||||
"MemorySaveFailedEvent",
|
||||
|
||||
@@ -1,9 +1,46 @@
|
||||
from collections.abc import Iterator
|
||||
import contextvars
|
||||
from datetime import datetime, timezone
|
||||
import itertools
|
||||
from typing import Any
|
||||
import uuid
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.utilities.serialization import to_serializable
|
||||
from crewai.utilities.serialization import Serializable, to_serializable
|
||||
|
||||
|
||||
_emission_counter: contextvars.ContextVar[Iterator[int]] = contextvars.ContextVar(
|
||||
"_emission_counter"
|
||||
)
|
||||
|
||||
|
||||
def _get_or_create_counter() -> Iterator[int]:
|
||||
"""Get the emission counter for the current context, creating if needed."""
|
||||
try:
|
||||
return _emission_counter.get()
|
||||
except LookupError:
|
||||
counter: Iterator[int] = itertools.count(start=1)
|
||||
_emission_counter.set(counter)
|
||||
return counter
|
||||
|
||||
|
||||
def get_next_emission_sequence() -> int:
|
||||
"""Get the next emission sequence number.
|
||||
|
||||
Returns:
|
||||
The next sequence number.
|
||||
"""
|
||||
return next(_get_or_create_counter())
|
||||
|
||||
|
||||
def reset_emission_counter() -> None:
|
||||
"""Reset the emission sequence counter to 1.
|
||||
|
||||
Resets for the current context only.
|
||||
"""
|
||||
counter: Iterator[int] = itertools.count(start=1)
|
||||
_emission_counter.set(counter)
|
||||
|
||||
|
||||
class BaseEvent(BaseModel):
|
||||
@@ -22,7 +59,13 @@ class BaseEvent(BaseModel):
|
||||
agent_id: str | None = None
|
||||
agent_role: str | None = None
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None):
|
||||
event_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||
parent_event_id: str | None = None
|
||||
previous_event_id: str | None = None
|
||||
triggered_by_event_id: str | None = None
|
||||
emission_sequence: int | None = None
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None) -> Serializable:
|
||||
"""
|
||||
Converts the event to a JSON-serializable dictionary.
|
||||
|
||||
@@ -34,13 +77,13 @@ class BaseEvent(BaseModel):
|
||||
"""
|
||||
return to_serializable(self, exclude=exclude)
|
||||
|
||||
def _set_task_params(self, data: dict[str, Any]):
|
||||
def _set_task_params(self, data: dict[str, Any]) -> None:
|
||||
if "from_task" in data and (task := data["from_task"]):
|
||||
self.task_id = str(task.id)
|
||||
self.task_name = task.name or task.description
|
||||
self.from_task = None
|
||||
|
||||
def _set_agent_params(self, data: dict[str, Any]):
|
||||
def _set_agent_params(self, data: dict[str, Any]) -> None:
|
||||
task = data.get("from_task", None)
|
||||
agent = task.agent if task else data.get("from_agent", None)
|
||||
|
||||
|
||||
@@ -16,8 +16,22 @@ from typing import Any, Final, ParamSpec, TypeVar
|
||||
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent, get_next_emission_sequence
|
||||
from crewai.events.depends import Depends
|
||||
from crewai.events.event_context import (
|
||||
SCOPE_ENDING_EVENTS,
|
||||
SCOPE_STARTING_EVENTS,
|
||||
VALID_EVENT_PAIRS,
|
||||
get_current_parent_id,
|
||||
get_enclosing_parent_id,
|
||||
get_last_event_id,
|
||||
get_triggering_event_id,
|
||||
handle_empty_pop,
|
||||
handle_mismatch,
|
||||
pop_event_scope,
|
||||
push_event_scope,
|
||||
set_last_event_id,
|
||||
)
|
||||
from crewai.events.handler_graph import build_execution_plan
|
||||
from crewai.events.types.event_bus_types import (
|
||||
AsyncHandler,
|
||||
@@ -69,6 +83,8 @@ class CrewAIEventsBus:
|
||||
_execution_plan_cache: dict[type[BaseEvent], ExecutionPlan]
|
||||
_console: ConsoleFormatter
|
||||
_shutting_down: bool
|
||||
_pending_futures: set[Future[Any]]
|
||||
_futures_lock: threading.Lock
|
||||
|
||||
def __new__(cls) -> Self:
|
||||
"""Create or return the singleton instance.
|
||||
@@ -91,6 +107,8 @@ class CrewAIEventsBus:
|
||||
"""
|
||||
self._shutting_down = False
|
||||
self._rwlock = RWLock()
|
||||
self._pending_futures: set[Future[Any]] = set()
|
||||
self._futures_lock = threading.Lock()
|
||||
self._sync_handlers: dict[type[BaseEvent], SyncHandlerSet] = {}
|
||||
self._async_handlers: dict[type[BaseEvent], AsyncHandlerSet] = {}
|
||||
self._handler_dependencies: dict[
|
||||
@@ -111,6 +129,25 @@ class CrewAIEventsBus:
|
||||
)
|
||||
self._loop_thread.start()
|
||||
|
||||
def _track_future(self, future: Future[Any]) -> Future[Any]:
|
||||
"""Track a future and set up automatic cleanup when it completes.
|
||||
|
||||
Args:
|
||||
future: The future to track
|
||||
|
||||
Returns:
|
||||
The same future for chaining
|
||||
"""
|
||||
with self._futures_lock:
|
||||
self._pending_futures.add(future)
|
||||
|
||||
def _cleanup(f: Future[Any]) -> None:
|
||||
with self._futures_lock:
|
||||
self._pending_futures.discard(f)
|
||||
|
||||
future.add_done_callback(_cleanup)
|
||||
return future
|
||||
|
||||
def _run_loop(self) -> None:
|
||||
"""Run the background async event loop."""
|
||||
asyncio.set_event_loop(self._loop)
|
||||
@@ -326,6 +363,28 @@ class CrewAIEventsBus:
|
||||
... await asyncio.wrap_future(future) # In async test
|
||||
... # or future.result(timeout=5.0) in sync code
|
||||
"""
|
||||
event.previous_event_id = get_last_event_id()
|
||||
event.triggered_by_event_id = get_triggering_event_id()
|
||||
event.emission_sequence = get_next_emission_sequence()
|
||||
if event.parent_event_id is None:
|
||||
event_type_name = event.type
|
||||
if event_type_name in SCOPE_ENDING_EVENTS:
|
||||
event.parent_event_id = get_enclosing_parent_id()
|
||||
popped = pop_event_scope()
|
||||
if popped is None:
|
||||
handle_empty_pop(event_type_name)
|
||||
else:
|
||||
_, popped_type = popped
|
||||
expected_start = VALID_EVENT_PAIRS.get(event_type_name)
|
||||
if expected_start and popped_type and popped_type != expected_start:
|
||||
handle_mismatch(event_type_name, popped_type, expected_start)
|
||||
elif event_type_name in SCOPE_STARTING_EVENTS:
|
||||
event.parent_event_id = get_current_parent_id()
|
||||
push_event_scope(event.event_id, event_type_name)
|
||||
else:
|
||||
event.parent_event_id = get_current_parent_id()
|
||||
|
||||
set_last_event_id(event.event_id)
|
||||
event_type = type(event)
|
||||
|
||||
with self._rwlock.r_locked():
|
||||
@@ -339,9 +398,11 @@ class CrewAIEventsBus:
|
||||
async_handlers = self._async_handlers.get(event_type, frozenset())
|
||||
|
||||
if has_dependencies:
|
||||
return asyncio.run_coroutine_threadsafe(
|
||||
self._emit_with_dependencies(source, event),
|
||||
self._loop,
|
||||
return self._track_future(
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self._emit_with_dependencies(source, event),
|
||||
self._loop,
|
||||
)
|
||||
)
|
||||
|
||||
if sync_handlers:
|
||||
@@ -353,16 +414,53 @@ class CrewAIEventsBus:
|
||||
ctx.run, self._call_handlers, source, event, sync_handlers
|
||||
)
|
||||
if not async_handlers:
|
||||
return sync_future
|
||||
return self._track_future(sync_future)
|
||||
|
||||
if async_handlers:
|
||||
return asyncio.run_coroutine_threadsafe(
|
||||
self._acall_handlers(source, event, async_handlers),
|
||||
self._loop,
|
||||
return self._track_future(
|
||||
asyncio.run_coroutine_threadsafe(
|
||||
self._acall_handlers(source, event, async_handlers),
|
||||
self._loop,
|
||||
)
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def flush(self, timeout: float | None = 30.0) -> bool:
|
||||
"""Block until all pending event handlers complete.
|
||||
|
||||
This method waits for all futures from previously emitted events to
|
||||
finish executing. Useful at the end of operations (like kickoff) to
|
||||
ensure all event handlers have completed before returning.
|
||||
|
||||
Args:
|
||||
timeout: Maximum time in seconds to wait for handlers to complete.
|
||||
Defaults to 30 seconds. Pass None to wait indefinitely.
|
||||
|
||||
Returns:
|
||||
True if all handlers completed, False if timeout occurred.
|
||||
"""
|
||||
with self._futures_lock:
|
||||
futures_to_wait = list(self._pending_futures)
|
||||
|
||||
if not futures_to_wait:
|
||||
return True
|
||||
|
||||
from concurrent.futures import wait as wait_futures
|
||||
|
||||
done, not_done = wait_futures(futures_to_wait, timeout=timeout)
|
||||
|
||||
# Check for exceptions in completed futures
|
||||
errors = [
|
||||
future.exception() for future in done if future.exception() is not None
|
||||
]
|
||||
for error in errors:
|
||||
self._console.print(
|
||||
f"[CrewAIEventsBus] Handler exception during flush: {error}"
|
||||
)
|
||||
|
||||
return len(not_done) == 0
|
||||
|
||||
async def aemit(self, source: Any, event: BaseEvent) -> None:
|
||||
"""Asynchronously emit an event to registered async handlers.
|
||||
|
||||
@@ -464,6 +562,9 @@ class CrewAIEventsBus:
|
||||
wait: If True, wait for all pending tasks to complete before stopping.
|
||||
If False, cancel all pending tasks immediately.
|
||||
"""
|
||||
if wait:
|
||||
self.flush()
|
||||
|
||||
with self._rwlock.w_locked():
|
||||
self._shutting_down = True
|
||||
loop = getattr(self, "_loop", None)
|
||||
|
||||
334
lib/crewai/src/crewai/events/event_context.py
Normal file
334
lib/crewai/src/crewai/events/event_context.py
Normal file
@@ -0,0 +1,334 @@
|
||||
"""Event context management for parent-child relationship tracking."""
|
||||
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
import contextvars
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
|
||||
class MismatchBehavior(Enum):
|
||||
"""Behavior when event pairs don't match."""
|
||||
|
||||
WARN = "warn"
|
||||
RAISE = "raise"
|
||||
SILENT = "silent"
|
||||
|
||||
|
||||
@dataclass
|
||||
class EventContextConfig:
|
||||
"""Configuration for event context behavior."""
|
||||
|
||||
max_stack_depth: int = 100
|
||||
mismatch_behavior: MismatchBehavior = MismatchBehavior.WARN
|
||||
empty_pop_behavior: MismatchBehavior = MismatchBehavior.WARN
|
||||
|
||||
|
||||
class StackDepthExceededError(Exception):
|
||||
"""Raised when stack depth limit is exceeded."""
|
||||
|
||||
|
||||
class EventPairingError(Exception):
|
||||
"""Raised when event pairs don't match."""
|
||||
|
||||
|
||||
class EmptyStackError(Exception):
|
||||
"""Raised when popping from empty stack."""
|
||||
|
||||
|
||||
_event_id_stack: contextvars.ContextVar[tuple[tuple[str, str], ...]] = (
|
||||
contextvars.ContextVar("_event_id_stack", default=())
|
||||
)
|
||||
|
||||
_event_context_config: contextvars.ContextVar[EventContextConfig | None] = (
|
||||
contextvars.ContextVar("_event_context_config", default=None)
|
||||
)
|
||||
|
||||
_last_event_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
|
||||
"_last_event_id", default=None
|
||||
)
|
||||
|
||||
_triggering_event_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
|
||||
"_triggering_event_id", default=None
|
||||
)
|
||||
|
||||
_default_config = EventContextConfig()
|
||||
|
||||
_console = ConsoleFormatter()
|
||||
|
||||
|
||||
def get_current_parent_id() -> str | None:
|
||||
"""Get the current parent event ID from the stack."""
|
||||
stack = _event_id_stack.get()
|
||||
return stack[-1][0] if stack else None
|
||||
|
||||
|
||||
def get_enclosing_parent_id() -> str | None:
|
||||
"""Get the parent of the current scope (stack[-2])."""
|
||||
stack = _event_id_stack.get()
|
||||
return stack[-2][0] if len(stack) >= 2 else None
|
||||
|
||||
|
||||
def get_last_event_id() -> str | None:
|
||||
"""Get the ID of the last emitted event for linear chain tracking.
|
||||
|
||||
Returns:
|
||||
The event_id of the previously emitted event, or None if no event yet.
|
||||
"""
|
||||
return _last_event_id.get()
|
||||
|
||||
|
||||
def reset_last_event_id() -> None:
|
||||
"""Reset the last event ID to None.
|
||||
|
||||
Should be called at the start of a new flow or when resetting event state.
|
||||
"""
|
||||
_last_event_id.set(None)
|
||||
|
||||
|
||||
def set_last_event_id(event_id: str) -> None:
|
||||
"""Set the ID of the last emitted event.
|
||||
|
||||
Args:
|
||||
event_id: The event_id to set as the last emitted event.
|
||||
"""
|
||||
_last_event_id.set(event_id)
|
||||
|
||||
|
||||
def get_triggering_event_id() -> str | None:
|
||||
"""Get the ID of the event that triggered the current execution.
|
||||
|
||||
Returns:
|
||||
The event_id of the triggering event, or None if not in a triggered context.
|
||||
"""
|
||||
return _triggering_event_id.get()
|
||||
|
||||
|
||||
def set_triggering_event_id(event_id: str | None) -> None:
|
||||
"""Set the ID of the triggering event for causal chain tracking.
|
||||
|
||||
Args:
|
||||
event_id: The event_id that triggered the current execution, or None.
|
||||
"""
|
||||
_triggering_event_id.set(event_id)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def triggered_by_scope(event_id: str) -> Generator[None, None, None]:
|
||||
"""Context manager to set the triggering event ID for causal chain tracking.
|
||||
|
||||
All events emitted within this context will have their triggered_by_event_id
|
||||
set to the provided event_id.
|
||||
|
||||
Args:
|
||||
event_id: The event_id that triggered the current execution.
|
||||
"""
|
||||
previous = _triggering_event_id.get()
|
||||
_triggering_event_id.set(event_id)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
_triggering_event_id.set(previous)
|
||||
|
||||
|
||||
def push_event_scope(event_id: str, event_type: str = "") -> None:
|
||||
"""Push an event ID and type onto the scope stack."""
|
||||
config = _event_context_config.get() or _default_config
|
||||
stack = _event_id_stack.get()
|
||||
|
||||
if 0 < config.max_stack_depth <= len(stack):
|
||||
raise StackDepthExceededError(
|
||||
f"Event stack depth limit ({config.max_stack_depth}) exceeded. "
|
||||
f"This usually indicates missing ending events."
|
||||
)
|
||||
|
||||
_event_id_stack.set((*stack, (event_id, event_type)))
|
||||
|
||||
|
||||
def pop_event_scope() -> tuple[str, str] | None:
|
||||
"""Pop an event entry from the scope stack."""
|
||||
stack = _event_id_stack.get()
|
||||
if not stack:
|
||||
return None
|
||||
_event_id_stack.set(stack[:-1])
|
||||
return stack[-1]
|
||||
|
||||
|
||||
def handle_empty_pop(event_type_name: str) -> None:
|
||||
"""Handle a pop attempt on an empty stack."""
|
||||
config = _event_context_config.get() or _default_config
|
||||
msg = (
|
||||
f"Ending event '{event_type_name}' emitted with empty scope stack. "
|
||||
"Missing starting event?"
|
||||
)
|
||||
|
||||
if config.empty_pop_behavior == MismatchBehavior.RAISE:
|
||||
raise EmptyStackError(msg)
|
||||
if config.empty_pop_behavior == MismatchBehavior.WARN:
|
||||
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
|
||||
|
||||
|
||||
def handle_mismatch(
|
||||
event_type_name: str,
|
||||
popped_type: str,
|
||||
expected_start: str,
|
||||
) -> None:
|
||||
"""Handle a mismatched event pair."""
|
||||
config = _event_context_config.get() or _default_config
|
||||
msg = (
|
||||
f"Event pairing mismatch. '{event_type_name}' closed '{popped_type}' "
|
||||
f"(expected '{expected_start}')"
|
||||
)
|
||||
|
||||
if config.mismatch_behavior == MismatchBehavior.RAISE:
|
||||
raise EventPairingError(msg)
|
||||
if config.mismatch_behavior == MismatchBehavior.WARN:
|
||||
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def event_scope(event_id: str, event_type: str = "") -> Generator[None, None, None]:
|
||||
"""Context manager to establish a parent event scope."""
|
||||
stack = _event_id_stack.get()
|
||||
already_on_stack = any(entry[0] == event_id for entry in stack)
|
||||
if not already_on_stack:
|
||||
push_event_scope(event_id, event_type)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
if not already_on_stack:
|
||||
pop_event_scope()
|
||||
|
||||
|
||||
SCOPE_STARTING_EVENTS: frozenset[str] = frozenset(
|
||||
{
|
||||
"flow_started",
|
||||
"method_execution_started",
|
||||
"crew_kickoff_started",
|
||||
"crew_train_started",
|
||||
"crew_test_started",
|
||||
"agent_execution_started",
|
||||
"agent_evaluation_started",
|
||||
"lite_agent_execution_started",
|
||||
"task_started",
|
||||
"llm_call_started",
|
||||
"llm_guardrail_started",
|
||||
"tool_usage_started",
|
||||
"mcp_connection_started",
|
||||
"mcp_tool_execution_started",
|
||||
"memory_retrieval_started",
|
||||
"memory_save_started",
|
||||
"memory_query_started",
|
||||
"knowledge_query_started",
|
||||
"knowledge_search_query_started",
|
||||
"a2a_delegation_started",
|
||||
"a2a_conversation_started",
|
||||
"a2a_server_task_started",
|
||||
"a2a_parallel_delegation_started",
|
||||
"agent_reasoning_started",
|
||||
}
|
||||
)
|
||||
|
||||
SCOPE_ENDING_EVENTS: frozenset[str] = frozenset(
|
||||
{
|
||||
"flow_finished",
|
||||
"flow_paused",
|
||||
"method_execution_finished",
|
||||
"method_execution_failed",
|
||||
"method_execution_paused",
|
||||
"crew_kickoff_completed",
|
||||
"crew_kickoff_failed",
|
||||
"crew_train_completed",
|
||||
"crew_train_failed",
|
||||
"crew_test_completed",
|
||||
"crew_test_failed",
|
||||
"agent_execution_completed",
|
||||
"agent_execution_error",
|
||||
"agent_evaluation_completed",
|
||||
"agent_evaluation_failed",
|
||||
"lite_agent_execution_completed",
|
||||
"lite_agent_execution_error",
|
||||
"task_completed",
|
||||
"task_failed",
|
||||
"llm_call_completed",
|
||||
"llm_call_failed",
|
||||
"llm_guardrail_completed",
|
||||
"llm_guardrail_failed",
|
||||
"tool_usage_finished",
|
||||
"tool_usage_error",
|
||||
"mcp_connection_completed",
|
||||
"mcp_connection_failed",
|
||||
"mcp_tool_execution_completed",
|
||||
"mcp_tool_execution_failed",
|
||||
"memory_retrieval_completed",
|
||||
"memory_retrieval_failed",
|
||||
"memory_save_completed",
|
||||
"memory_save_failed",
|
||||
"memory_query_completed",
|
||||
"memory_query_failed",
|
||||
"knowledge_query_completed",
|
||||
"knowledge_query_failed",
|
||||
"knowledge_search_query_completed",
|
||||
"knowledge_search_query_failed",
|
||||
"a2a_delegation_completed",
|
||||
"a2a_conversation_completed",
|
||||
"a2a_server_task_completed",
|
||||
"a2a_server_task_canceled",
|
||||
"a2a_server_task_failed",
|
||||
"a2a_parallel_delegation_completed",
|
||||
"agent_reasoning_completed",
|
||||
"agent_reasoning_failed",
|
||||
}
|
||||
)
|
||||
|
||||
VALID_EVENT_PAIRS: dict[str, str] = {
|
||||
"flow_finished": "flow_started",
|
||||
"flow_paused": "flow_started",
|
||||
"method_execution_finished": "method_execution_started",
|
||||
"method_execution_failed": "method_execution_started",
|
||||
"method_execution_paused": "method_execution_started",
|
||||
"crew_kickoff_completed": "crew_kickoff_started",
|
||||
"crew_kickoff_failed": "crew_kickoff_started",
|
||||
"crew_train_completed": "crew_train_started",
|
||||
"crew_train_failed": "crew_train_started",
|
||||
"crew_test_completed": "crew_test_started",
|
||||
"crew_test_failed": "crew_test_started",
|
||||
"agent_execution_completed": "agent_execution_started",
|
||||
"agent_execution_error": "agent_execution_started",
|
||||
"agent_evaluation_completed": "agent_evaluation_started",
|
||||
"agent_evaluation_failed": "agent_evaluation_started",
|
||||
"lite_agent_execution_completed": "lite_agent_execution_started",
|
||||
"lite_agent_execution_error": "lite_agent_execution_started",
|
||||
"task_completed": "task_started",
|
||||
"task_failed": "task_started",
|
||||
"llm_call_completed": "llm_call_started",
|
||||
"llm_call_failed": "llm_call_started",
|
||||
"llm_guardrail_completed": "llm_guardrail_started",
|
||||
"llm_guardrail_failed": "llm_guardrail_started",
|
||||
"tool_usage_finished": "tool_usage_started",
|
||||
"tool_usage_error": "tool_usage_started",
|
||||
"mcp_connection_completed": "mcp_connection_started",
|
||||
"mcp_connection_failed": "mcp_connection_started",
|
||||
"mcp_tool_execution_completed": "mcp_tool_execution_started",
|
||||
"mcp_tool_execution_failed": "mcp_tool_execution_started",
|
||||
"memory_retrieval_completed": "memory_retrieval_started",
|
||||
"memory_retrieval_failed": "memory_retrieval_started",
|
||||
"memory_save_completed": "memory_save_started",
|
||||
"memory_save_failed": "memory_save_started",
|
||||
"memory_query_completed": "memory_query_started",
|
||||
"memory_query_failed": "memory_query_started",
|
||||
"knowledge_query_completed": "knowledge_query_started",
|
||||
"knowledge_query_failed": "knowledge_query_started",
|
||||
"knowledge_search_query_completed": "knowledge_search_query_started",
|
||||
"knowledge_search_query_failed": "knowledge_search_query_started",
|
||||
"a2a_delegation_completed": "a2a_delegation_started",
|
||||
"a2a_conversation_completed": "a2a_conversation_started",
|
||||
"a2a_server_task_completed": "a2a_server_task_started",
|
||||
"a2a_server_task_canceled": "a2a_server_task_started",
|
||||
"a2a_server_task_failed": "a2a_server_task_started",
|
||||
"a2a_parallel_delegation_completed": "a2a_parallel_delegation_started",
|
||||
"agent_reasoning_completed": "agent_reasoning_started",
|
||||
"agent_reasoning_failed": "agent_reasoning_started",
|
||||
}
|
||||
@@ -79,6 +79,7 @@ from crewai.events.types.memory_events import (
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
@@ -173,6 +174,7 @@ EventTypes = (
|
||||
| MemoryQueryFailedEvent
|
||||
| MemoryRetrievalStartedEvent
|
||||
| MemoryRetrievalCompletedEvent
|
||||
| MemoryRetrievalFailedEvent
|
||||
| MCPConnectionStartedEvent
|
||||
| MCPConnectionCompletedEvent
|
||||
| MCPConnectionFailedEvent
|
||||
|
||||
@@ -267,9 +267,12 @@ class TraceBatchManager:
|
||||
|
||||
sorted_events = sorted(
|
||||
self.event_buffer,
|
||||
key=lambda e: e.timestamp
|
||||
if hasattr(e, "timestamp") and e.timestamp
|
||||
else "",
|
||||
key=lambda e: (
|
||||
e.emission_sequence
|
||||
if e.emission_sequence is not None
|
||||
else float("inf"),
|
||||
e.timestamp if hasattr(e, "timestamp") and e.timestamp else "",
|
||||
),
|
||||
)
|
||||
|
||||
self.current_batch.events = sorted_events
|
||||
|
||||
@@ -9,6 +9,7 @@ from typing_extensions import Self
|
||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.events.event_bus import CrewAIEventsBus
|
||||
from crewai.events.listeners.tracing.first_time_trace_handler import (
|
||||
FirstTimeTraceHandler,
|
||||
@@ -616,7 +617,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
if self.batch_manager.is_batch_initialized():
|
||||
self.batch_manager.finalize_batch()
|
||||
|
||||
def _initialize_crew_batch(self, source: Any, event: Any) -> None:
|
||||
def _initialize_crew_batch(self, source: Any, event: BaseEvent) -> None:
|
||||
"""Initialize trace batch.
|
||||
|
||||
Args:
|
||||
@@ -626,7 +627,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
user_context = self._get_user_context()
|
||||
execution_metadata = {
|
||||
"crew_name": getattr(event, "crew_name", "Unknown Crew"),
|
||||
"execution_start": event.timestamp if hasattr(event, "timestamp") else None,
|
||||
"execution_start": event.timestamp,
|
||||
"crewai_version": get_crewai_version(),
|
||||
}
|
||||
|
||||
@@ -635,7 +636,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
|
||||
self._initialize_batch(user_context, execution_metadata)
|
||||
|
||||
def _initialize_flow_batch(self, source: Any, event: Any) -> None:
|
||||
def _initialize_flow_batch(self, source: Any, event: BaseEvent) -> None:
|
||||
"""Initialize trace batch for Flow execution.
|
||||
|
||||
Args:
|
||||
@@ -645,7 +646,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
user_context = self._get_user_context()
|
||||
execution_metadata = {
|
||||
"flow_name": getattr(event, "flow_name", "Unknown Flow"),
|
||||
"execution_start": event.timestamp if hasattr(event, "timestamp") else None,
|
||||
"execution_start": event.timestamp,
|
||||
"crewai_version": get_crewai_version(),
|
||||
"execution_type": "flow",
|
||||
}
|
||||
@@ -714,18 +715,18 @@ class TraceCollectionListener(BaseEventListener):
|
||||
self.batch_manager.end_event_processing()
|
||||
|
||||
def _create_trace_event(
|
||||
self, event_type: str, source: Any, event: Any
|
||||
self, event_type: str, source: Any, event: BaseEvent
|
||||
) -> TraceEvent:
|
||||
"""Create a trace event"""
|
||||
if hasattr(event, "timestamp") and event.timestamp:
|
||||
trace_event = TraceEvent(
|
||||
type=event_type,
|
||||
timestamp=event.timestamp.isoformat(),
|
||||
)
|
||||
else:
|
||||
trace_event = TraceEvent(
|
||||
type=event_type,
|
||||
)
|
||||
"""Create a trace event with ordering information."""
|
||||
trace_event = TraceEvent(
|
||||
type=event_type,
|
||||
timestamp=event.timestamp.isoformat() if event.timestamp else "",
|
||||
event_id=event.event_id,
|
||||
emission_sequence=event.emission_sequence,
|
||||
parent_event_id=event.parent_event_id,
|
||||
previous_event_id=event.previous_event_id,
|
||||
triggered_by_event_id=event.triggered_by_event_id,
|
||||
)
|
||||
|
||||
trace_event.event_data = self._build_event_data(event_type, event, source)
|
||||
|
||||
@@ -778,10 +779,8 @@ class TraceCollectionListener(BaseEventListener):
|
||||
}
|
||||
if event_type == "llm_call_started":
|
||||
event_data = safe_serialize_to_dict(event)
|
||||
event_data["task_name"] = (
|
||||
event.task_name or event.task_description
|
||||
if hasattr(event, "task_name") and event.task_name
|
||||
else None
|
||||
event_data["task_name"] = event.task_name or getattr(
|
||||
event, "task_description", None
|
||||
)
|
||||
return event_data
|
||||
if event_type == "llm_call_completed":
|
||||
|
||||
@@ -15,5 +15,10 @@ class TraceEvent:
|
||||
type: str = ""
|
||||
event_data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
emission_sequence: int | None = None
|
||||
parent_event_id: str | None = None
|
||||
previous_event_id: str | None = None
|
||||
triggered_by_event_id: str | None = None
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return asdict(self)
|
||||
|
||||
@@ -14,7 +14,7 @@ class MemoryBaseEvent(BaseEvent):
|
||||
agent_role: str | None = None
|
||||
agent_id: str | None = None
|
||||
|
||||
def __init__(self, **data):
|
||||
def __init__(self, **data: Any) -> None:
|
||||
super().__init__(**data)
|
||||
self._set_agent_params(data)
|
||||
self._set_task_params(data)
|
||||
@@ -93,3 +93,11 @@ class MemoryRetrievalCompletedEvent(MemoryBaseEvent):
|
||||
task_id: str | None = None
|
||||
memory_content: str
|
||||
retrieval_time_ms: float
|
||||
|
||||
|
||||
class MemoryRetrievalFailedEvent(MemoryBaseEvent):
|
||||
"""Event emitted when memory retrieval for a task prompt fails."""
|
||||
|
||||
type: str = "memory_retrieval_failed"
|
||||
task_id: str | None = None
|
||||
error: str
|
||||
|
||||
@@ -31,7 +31,13 @@ from pydantic import BaseModel, Field, ValidationError
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.base_events import reset_emission_counter
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_context import (
|
||||
get_current_parent_id,
|
||||
reset_last_event_id,
|
||||
triggered_by_scope,
|
||||
)
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
@@ -753,6 +759,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
racing_listeners: frozenset[FlowMethodName],
|
||||
other_listeners: list[FlowMethodName],
|
||||
result: Any,
|
||||
triggering_event_id: str | None = None,
|
||||
) -> None:
|
||||
"""Execute racing listeners with first-wins semantics.
|
||||
|
||||
@@ -764,10 +771,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
racing_listeners: Set of listener names that race for an OR condition.
|
||||
other_listeners: Other listeners to execute in parallel (not racing).
|
||||
result: The result from the triggering method.
|
||||
triggering_event_id: The event_id of the event that triggered these listeners.
|
||||
"""
|
||||
racing_tasks = [
|
||||
asyncio.create_task(
|
||||
self._execute_single_listener(name, result),
|
||||
self._execute_single_listener(name, result, triggering_event_id),
|
||||
name=str(name),
|
||||
)
|
||||
for name in racing_listeners
|
||||
@@ -775,7 +783,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
other_tasks = [
|
||||
asyncio.create_task(
|
||||
self._execute_single_listener(name, result),
|
||||
self._execute_single_listener(name, result, triggering_event_id),
|
||||
name=str(name),
|
||||
)
|
||||
for name in other_listeners
|
||||
@@ -1557,6 +1565,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if filtered_inputs:
|
||||
self._initialize_state(filtered_inputs)
|
||||
|
||||
if get_current_parent_id() is None:
|
||||
reset_emission_counter()
|
||||
reset_last_event_id()
|
||||
|
||||
# Emit FlowStartedEvent and log the start of the flow.
|
||||
if not self.suppress_flow_events:
|
||||
future = crewai_event_bus.emit(
|
||||
@@ -1736,12 +1748,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
method = self._methods[start_method_name]
|
||||
enhanced_method = self._inject_trigger_payload_for_start_method(method)
|
||||
|
||||
result = await self._execute_method(start_method_name, enhanced_method)
|
||||
result, finished_event_id = await self._execute_method(
|
||||
start_method_name, enhanced_method
|
||||
)
|
||||
|
||||
# If start method is a router, use its result as an additional trigger
|
||||
if start_method_name in self._routers and result is not None:
|
||||
# Execute listeners for the start method name first
|
||||
await self._execute_listeners(start_method_name, result)
|
||||
await self._execute_listeners(start_method_name, result, finished_event_id)
|
||||
# Then execute listeners for the router result (e.g., "approved")
|
||||
router_result_trigger = FlowMethodName(str(result))
|
||||
listeners_for_result = self._find_triggered_methods(
|
||||
@@ -1765,16 +1779,21 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if name not in racing_members
|
||||
]
|
||||
await self._execute_racing_listeners(
|
||||
racing_members, other_listeners, listener_result
|
||||
racing_members,
|
||||
other_listeners,
|
||||
listener_result,
|
||||
finished_event_id,
|
||||
)
|
||||
else:
|
||||
tasks = [
|
||||
self._execute_single_listener(listener_name, listener_result)
|
||||
self._execute_single_listener(
|
||||
listener_name, listener_result, finished_event_id
|
||||
)
|
||||
for listener_name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
else:
|
||||
await self._execute_listeners(start_method_name, result)
|
||||
await self._execute_listeners(start_method_name, result, finished_event_id)
|
||||
|
||||
def _inject_trigger_payload_for_start_method(
|
||||
self, original_method: Callable[..., Any]
|
||||
@@ -1818,7 +1837,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
method: Callable[..., Any],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
) -> tuple[Any, str | None]:
|
||||
"""Execute a method and emit events.
|
||||
|
||||
Returns:
|
||||
A tuple of (result, finished_event_id) where finished_event_id is
|
||||
the event_id of the MethodExecutionFinishedEvent, or None if events
|
||||
are suppressed.
|
||||
"""
|
||||
try:
|
||||
dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | (
|
||||
kwargs or {}
|
||||
@@ -1859,21 +1885,21 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
self._completed_methods.add(method_name)
|
||||
|
||||
finished_event_id: str | None = None
|
||||
if not self.suppress_flow_events:
|
||||
future = crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionFinishedEvent(
|
||||
type="method_execution_finished",
|
||||
method_name=method_name,
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
state=self._copy_and_serialize_state(),
|
||||
result=result,
|
||||
),
|
||||
finished_event = MethodExecutionFinishedEvent(
|
||||
type="method_execution_finished",
|
||||
method_name=method_name,
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
state=self._copy_and_serialize_state(),
|
||||
result=result,
|
||||
)
|
||||
finished_event_id = finished_event.event_id
|
||||
future = crewai_event_bus.emit(self, finished_event)
|
||||
if future:
|
||||
self._event_futures.append(future)
|
||||
|
||||
return result
|
||||
return result, finished_event_id
|
||||
except Exception as e:
|
||||
# Check if this is a HumanFeedbackPending exception (paused, not failed)
|
||||
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
||||
@@ -1927,7 +1953,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return state_copy
|
||||
|
||||
async def _execute_listeners(
|
||||
self, trigger_method: FlowMethodName, result: Any
|
||||
self,
|
||||
trigger_method: FlowMethodName,
|
||||
result: Any,
|
||||
triggering_event_id: str | None = None,
|
||||
) -> None:
|
||||
"""Executes all listeners and routers triggered by a method completion.
|
||||
|
||||
@@ -1938,6 +1967,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
Args:
|
||||
trigger_method: The name of the method that triggered these listeners.
|
||||
result: The result from the triggering method, passed to listeners that accept parameters.
|
||||
triggering_event_id: The event_id of the MethodExecutionFinishedEvent that
|
||||
triggered these listeners, used for causal chain tracking.
|
||||
|
||||
Note:
|
||||
- Routers are executed sequentially to maintain flow control
|
||||
@@ -1952,6 +1983,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
] = {} # Map outcome -> HumanFeedbackResult
|
||||
current_trigger = trigger_method
|
||||
current_result = result # Track the result to pass to each router
|
||||
current_triggering_event_id = triggering_event_id
|
||||
|
||||
while True:
|
||||
routers_triggered = self._find_triggered_methods(
|
||||
@@ -1965,7 +1997,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
router_input = router_result_to_feedback.get(
|
||||
str(current_trigger), current_result
|
||||
)
|
||||
await self._execute_single_listener(router_name, router_input)
|
||||
current_triggering_event_id = await self._execute_single_listener(
|
||||
router_name, router_input, current_triggering_event_id
|
||||
)
|
||||
# After executing router, the router's result is the path
|
||||
router_result = (
|
||||
self._method_outputs[-1] if self._method_outputs else None
|
||||
@@ -2008,12 +2042,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if name not in racing_members
|
||||
]
|
||||
await self._execute_racing_listeners(
|
||||
racing_members, other_listeners, listener_result
|
||||
racing_members,
|
||||
other_listeners,
|
||||
listener_result,
|
||||
triggering_event_id,
|
||||
)
|
||||
else:
|
||||
tasks = [
|
||||
self._execute_single_listener(
|
||||
listener_name, listener_result
|
||||
listener_name, listener_result, triggering_event_id
|
||||
)
|
||||
for listener_name in listeners_triggered
|
||||
]
|
||||
@@ -2192,8 +2229,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return triggered
|
||||
|
||||
async def _execute_single_listener(
|
||||
self, listener_name: FlowMethodName, result: Any
|
||||
) -> None:
|
||||
self,
|
||||
listener_name: FlowMethodName,
|
||||
result: Any,
|
||||
triggering_event_id: str | None = None,
|
||||
) -> str | None:
|
||||
"""Executes a single listener method with proper event handling.
|
||||
|
||||
This internal method manages the execution of an individual listener,
|
||||
@@ -2202,6 +2242,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
Args:
|
||||
listener_name: The name of the listener method to execute.
|
||||
result: The result from the triggering method, which may be passed to the listener if it accepts parameters.
|
||||
triggering_event_id: The event_id of the event that triggered this listener,
|
||||
used for causal chain tracking.
|
||||
|
||||
Returns:
|
||||
The event_id of the MethodExecutionFinishedEvent emitted by this listener,
|
||||
or None if events are suppressed.
|
||||
|
||||
Note:
|
||||
- Inspects method signature to determine if it accepts the trigger result
|
||||
@@ -2227,7 +2273,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
):
|
||||
# This conditional start was executed, continue its chain
|
||||
await self._execute_start_method(start_method_name)
|
||||
return
|
||||
return None
|
||||
# For cyclic flows, clear from completed to allow re-execution
|
||||
self._completed_methods.discard(listener_name)
|
||||
# Also clear from fired OR listeners for cyclic flows
|
||||
@@ -2240,15 +2286,30 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
params = list(sig.parameters.values())
|
||||
method_params = [p for p in params if p.name != "self"]
|
||||
|
||||
if method_params:
|
||||
listener_result = await self._execute_method(
|
||||
listener_name, method, result
|
||||
)
|
||||
if triggering_event_id:
|
||||
with triggered_by_scope(triggering_event_id):
|
||||
if method_params:
|
||||
listener_result, finished_event_id = await self._execute_method(
|
||||
listener_name, method, result
|
||||
)
|
||||
else:
|
||||
listener_result, finished_event_id = await self._execute_method(
|
||||
listener_name, method
|
||||
)
|
||||
else:
|
||||
listener_result = await self._execute_method(listener_name, method)
|
||||
if method_params:
|
||||
listener_result, finished_event_id = await self._execute_method(
|
||||
listener_name, method, result
|
||||
)
|
||||
else:
|
||||
listener_result, finished_event_id = await self._execute_method(
|
||||
listener_name, method
|
||||
)
|
||||
|
||||
# Execute listeners (and possibly routers) of this listener
|
||||
await self._execute_listeners(listener_name, listener_result)
|
||||
await self._execute_listeners(
|
||||
listener_name, listener_result, finished_event_id
|
||||
)
|
||||
|
||||
# If this listener is also a router (e.g., has @human_feedback with emit),
|
||||
# we need to trigger listeners for the router result as well
|
||||
@@ -2275,15 +2336,22 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if name not in racing_members
|
||||
]
|
||||
await self._execute_racing_listeners(
|
||||
racing_members, other_listeners, feedback_result
|
||||
racing_members,
|
||||
other_listeners,
|
||||
feedback_result,
|
||||
finished_event_id,
|
||||
)
|
||||
else:
|
||||
tasks = [
|
||||
self._execute_single_listener(name, feedback_result)
|
||||
self._execute_single_listener(
|
||||
name, feedback_result, finished_event_id
|
||||
)
|
||||
for name in listeners_for_result
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
return finished_event_id
|
||||
|
||||
except Exception as e:
|
||||
# Don't log HumanFeedbackPending as an error - it's expected control flow
|
||||
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
||||
|
||||
@@ -241,6 +241,9 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
started_at = time.time()
|
||||
started_event_emitted = False
|
||||
|
||||
if self.agent:
|
||||
event_data = {
|
||||
"agent_key": self.agent.key,
|
||||
@@ -258,151 +261,162 @@ class ToolUsage:
|
||||
event_data["task_name"] = self.task.name or self.task.description
|
||||
event_data["task_id"] = str(self.task.id)
|
||||
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
||||
started_event_emitted = True
|
||||
|
||||
started_at = time.time()
|
||||
from_cache = False
|
||||
result = None # type: ignore
|
||||
should_retry = False
|
||||
available_tool = None
|
||||
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
try:
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
try:
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
result = usage_limit_error
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
return self._format_result(result=result)
|
||||
except Exception:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
if result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker") if calling.arguments else None
|
||||
)
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
error = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
result = self._format_result(result=result)
|
||||
# Don't return early - fall through to finally block
|
||||
elif result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker")
|
||||
if calling.arguments
|
||||
else None
|
||||
)
|
||||
return error
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
return await self.ause(calling=calling, tool_string=tool_string)
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
)
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer
|
||||
data["result_as_answer"] = result_as_answer
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(
|
||||
available_tool, "current_usage_count"
|
||||
):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors(
|
||||
"tool_usage_exception"
|
||||
).format(error=e, tool=tool.name, tool_inputs=tool.description)
|
||||
result = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
)
|
||||
else:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
should_retry = True
|
||||
else:
|
||||
result = self._format_result(result=result)
|
||||
|
||||
finally:
|
||||
if started_event_emitted:
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer # type: ignore
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer # type: ignore
|
||||
data["result_as_answer"] = result_as_answer # type: ignore
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(available_tool, "current_usage_count"):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
# Handle retry after finally block ensures finished event was emitted
|
||||
if should_retry:
|
||||
return await self.ause(calling=calling, tool_string=tool_string)
|
||||
|
||||
return result
|
||||
|
||||
@@ -412,6 +426,7 @@ class ToolUsage:
|
||||
tool: CrewStructuredTool,
|
||||
calling: ToolCalling | InstructorToolCalling,
|
||||
) -> str:
|
||||
# Repeated usage check happens before event emission - safe to return early
|
||||
if self._check_tool_repeated_usage(calling=calling):
|
||||
try:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
@@ -428,6 +443,9 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
started_at = time.time()
|
||||
started_event_emitted = False
|
||||
|
||||
if self.agent:
|
||||
event_data = {
|
||||
"agent_key": self.agent.key,
|
||||
@@ -446,155 +464,162 @@ class ToolUsage:
|
||||
event_data["task_name"] = self.task.name or self.task.description
|
||||
event_data["task_id"] = str(self.task.id)
|
||||
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
||||
started_event_emitted = True
|
||||
|
||||
started_at = time.time()
|
||||
from_cache = False
|
||||
result = None # type: ignore
|
||||
should_retry = False
|
||||
available_tool = None
|
||||
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
import json
|
||||
try:
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
|
||||
input_str = json.dumps(calling.arguments)
|
||||
else:
|
||||
input_str = str(calling.arguments)
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
|
||||
result = self.tools_handler.cache.read(
|
||||
tool=calling.tool_name, input=input_str
|
||||
) # type: ignore
|
||||
from_cache = result is not None
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
available_tool = next(
|
||||
(
|
||||
available_tool
|
||||
for available_tool in self.tools
|
||||
if available_tool.name == tool.name
|
||||
),
|
||||
None,
|
||||
)
|
||||
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
try:
|
||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||
if usage_limit_error:
|
||||
result = usage_limit_error
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
return self._format_result(result=result)
|
||||
except Exception:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
|
||||
if result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker") if calling.arguments else None
|
||||
)
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
# Add fingerprint metadata if available
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
# Add fingerprint metadata if available
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
else:
|
||||
# Add fingerprint metadata even to empty arguments
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = tool.invoke(input=arguments)
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
||||
error=e, tool=tool.name, tool_inputs=tool.description
|
||||
)
|
||||
error = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
result = self._format_result(result=result)
|
||||
# Don't return early - fall through to finally block
|
||||
elif result is None:
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]:
|
||||
coworker = (
|
||||
calling.arguments.get("coworker")
|
||||
if calling.arguments
|
||||
else None
|
||||
)
|
||||
return error
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
return self.use(calling=calling, tool_string=tool_string)
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
"properties"
|
||||
].keys()
|
||||
arguments = {
|
||||
k: v
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = tool.invoke(input=arguments)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
if (
|
||||
hasattr(available_tool, "cache_function")
|
||||
and available_tool.cache_function
|
||||
):
|
||||
should_cache = available_tool.cache_function(
|
||||
calling.arguments, result
|
||||
)
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
)
|
||||
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.tools_handler.on_tool_use(
|
||||
calling=calling, output=result, should_cache=should_cache
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer
|
||||
data["result_as_answer"] = result_as_answer
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(
|
||||
available_tool, "current_usage_count"
|
||||
):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||
self._run_attempts += 1
|
||||
if self._run_attempts > self._max_parsing_attempts:
|
||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||
error_message = self._i18n.errors(
|
||||
"tool_usage_exception"
|
||||
).format(error=e, tool=tool.name, tool_inputs=tool.description)
|
||||
result = ToolUsageError(
|
||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||
).message
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"\n\n{error_message}\n", color="red"
|
||||
)
|
||||
else:
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
should_retry = True
|
||||
else:
|
||||
result = self._format_result(result=result)
|
||||
|
||||
finally:
|
||||
if started_event_emitted:
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
self._telemetry.tool_usage(
|
||||
llm=self.function_calling_llm,
|
||||
tool_name=tool.name,
|
||||
attempts=self._run_attempts,
|
||||
)
|
||||
result = self._format_result(result=result)
|
||||
data = {
|
||||
"result": result,
|
||||
"tool_name": tool.name,
|
||||
"tool_args": calling.arguments,
|
||||
}
|
||||
|
||||
self.on_tool_use_finished(
|
||||
tool=tool,
|
||||
tool_calling=calling,
|
||||
from_cache=from_cache,
|
||||
started_at=started_at,
|
||||
result=result,
|
||||
)
|
||||
|
||||
if (
|
||||
hasattr(available_tool, "result_as_answer")
|
||||
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
|
||||
):
|
||||
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
|
||||
data["result_as_answer"] = result_as_answer # type: ignore
|
||||
|
||||
if self.agent and hasattr(self.agent, "tools_results"):
|
||||
self.agent.tools_results.append(data)
|
||||
|
||||
if available_tool and hasattr(available_tool, "current_usage_count"):
|
||||
available_tool.current_usage_count += 1
|
||||
if (
|
||||
hasattr(available_tool, "max_usage_count")
|
||||
and available_tool.max_usage_count is not None
|
||||
):
|
||||
self._printer.print(
|
||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||
color="blue",
|
||||
)
|
||||
# Handle retry after finally block ensures finished event was emitted
|
||||
if should_retry:
|
||||
return self.use(calling=calling, tool_string=tool_string)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -0,0 +1,118 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour
|
||||
personal goal is: Help with tasks\nTo give my best complete final answer to
|
||||
the task respond using the exact following format:\n\nThought: I now can give
|
||||
a great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''done''
|
||||
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||
word done.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '794'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D087GaV1OkB4Yos5MqLYqRSpLLZkV\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768923570,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: done\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 15:39:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '446'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '472'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,118 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour
|
||||
personal goal is: Help with tasks\nTo give my best complete final answer to
|
||||
the task respond using the exact following format:\n\nThought: I now can give
|
||||
a great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''hi''
|
||||
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||
word hi.\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '790'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D087HNU70QfEltqUwIaR3WflNQJMq\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768923571,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: hi\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": 14,\n
|
||||
\ \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 15:39:31 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '401'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '429'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,118 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
''yes'' and nothing else.\n\nThis is the expected criteria for your final answer:
|
||||
The word yes.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '809'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D0876LY6Tp1gWmwQ5f2A6EsqdbLOK\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768923560,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: yes\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 15:39:21 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '519'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '758'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,118 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
''hello'' and nothing else.\n\nThis is the expected criteria for your final
|
||||
answer: The word hello.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '813'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D0874asFandwADBjb4DfArsTUyu8K\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768923558,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: hello\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 15:39:18 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '478'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '497'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,118 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||
answer to the task respond using the exact following format:\n\nThought: I now
|
||||
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||
''ok'' and nothing else.\n\nThis is the expected criteria for your final answer:
|
||||
The word ok.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '807'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D0875A6FEJ2ZFKVHohoJdbBgKEMNx\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768923559,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: ok\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\": 14,\n
|
||||
\ \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 15:39:19 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '406'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '439'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,118 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Worker. You work.\nYour
|
||||
personal goal is: Do work\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''go'' and nothing
|
||||
else.\n\nThis is the expected criteria for your final answer: The word go.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '782'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D09c91Qh5LJ73NLwFrcRhThK7zNKS\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768929329,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: go\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\": 14,\n
|
||||
\ \"total_tokens\": 172,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 17:15:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '521'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '781'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,118 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Worker. You work.\nYour
|
||||
personal goal is: Do work\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''complete''
|
||||
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||
word complete.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '794'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D09bYGfIe5pA04mBGuMO94KLyKhry\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768929292,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: complete\",\n \"refusal\": null,\n \"annotations\":
|
||||
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 172,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 17:14:53 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '436'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '660'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,118 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Worker. You work.\nYour
|
||||
personal goal is: Do work\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''ready'' and
|
||||
nothing else.\n\nThis is the expected criteria for your final answer: The word
|
||||
ready.\nyou MUST return the actual complete content as the final answer, not
|
||||
a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '788'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D09cBIfuX53tF9rWbEKXXIr20uzSv\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768929331,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: ready\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 172,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 17:15:32 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '517'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1841'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,234 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are First. You go first.\nYour
|
||||
personal goal is: Be first\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''a'' and nothing
|
||||
else.\n\nThis is the expected criteria for your final answer: The letter a.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '786'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D09rkgXjWfa1XwICCnLAVV3LXFlUZ\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768930296,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: a\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": 14,\n
|
||||
\ \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 17:31:37 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '418'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '443'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Second. You go second.\nYour
|
||||
personal goal is: Be second\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''b'' and nothing
|
||||
else.\n\nThis is the expected criteria for your final answer: The letter b.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '789'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D09rlVUPgS5haPGYgA4RmW9EfPArd\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768930297,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: b\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": 14,\n
|
||||
\ \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 17:31:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '345'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '658'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,234 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Alpha. You are alpha.\nYour
|
||||
personal goal is: Do alpha work\nTo give my best complete final answer to the
|
||||
task respond using the exact following format:\n\nThought: I now can give a
|
||||
great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''alpha''
|
||||
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||
word alpha.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '798'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D09ri7edf1TcYqD0vAkS3IjNkai3V\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768930294,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: alpha\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 174,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 17:31:34 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '491'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '513'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Beta. You are beta.\nYour
|
||||
personal goal is: Do beta work\nTo give my best complete final answer to the
|
||||
task respond using the exact following format:\n\nThought: I now can give a
|
||||
great answer\nFinal Answer: Your final answer must be the great and the most
|
||||
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''beta''
|
||||
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||
word beta.\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '793'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D09rj5wiKmsX5P72qH0GEKL5pQEq6\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768930295,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: beta\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||
\ ],\n \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\":
|
||||
14,\n \"total_tokens\": 174,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 17:31:35 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '506'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '741'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,234 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are First. You go first.\nYour
|
||||
personal goal is: Be first\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''1'' and nothing
|
||||
else.\n\nThis is the expected criteria for your final answer: The number 1.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '786'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D09rmoYXlYGqmDh0Ca3r9xunjmE7k\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768930298,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: 1\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\": 15,\n
|
||||
\ \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 17:31:38 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '387'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '403'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Second. You go second.\nYour
|
||||
personal goal is: Be second\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''2'' and nothing
|
||||
else.\n\nThis is the expected criteria for your final answer: The number 2.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '789'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- async:asyncio
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-D09rnDNZsxICQvSZrx5rlgMdc2Tbp\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1768930299,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||
Answer: 2\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||
\ \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\": 15,\n
|
||||
\ \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 20 Jan 2026 17:31:39 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '560'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '581'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
180
lib/crewai/tests/events/test_event_context.py
Normal file
180
lib/crewai/tests/events/test_event_context.py
Normal file
@@ -0,0 +1,180 @@
|
||||
"""Tests for event context management."""
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.events.event_context import (
|
||||
SCOPE_ENDING_EVENTS,
|
||||
SCOPE_STARTING_EVENTS,
|
||||
VALID_EVENT_PAIRS,
|
||||
EmptyStackError,
|
||||
EventPairingError,
|
||||
MismatchBehavior,
|
||||
StackDepthExceededError,
|
||||
_event_context_config,
|
||||
EventContextConfig,
|
||||
get_current_parent_id,
|
||||
get_enclosing_parent_id,
|
||||
get_last_event_id,
|
||||
get_triggering_event_id,
|
||||
handle_empty_pop,
|
||||
handle_mismatch,
|
||||
pop_event_scope,
|
||||
push_event_scope,
|
||||
reset_last_event_id,
|
||||
set_last_event_id,
|
||||
set_triggering_event_id,
|
||||
triggered_by_scope,
|
||||
)
|
||||
|
||||
|
||||
class TestStackOperations:
|
||||
"""Tests for stack push/pop operations."""
|
||||
|
||||
def test_empty_stack_returns_none(self) -> None:
|
||||
assert get_current_parent_id() is None
|
||||
assert get_enclosing_parent_id() is None
|
||||
|
||||
def test_push_and_get_parent(self) -> None:
|
||||
push_event_scope("event-1", "task_started")
|
||||
assert get_current_parent_id() == "event-1"
|
||||
|
||||
def test_nested_push(self) -> None:
|
||||
push_event_scope("event-1", "crew_kickoff_started")
|
||||
push_event_scope("event-2", "task_started")
|
||||
assert get_current_parent_id() == "event-2"
|
||||
assert get_enclosing_parent_id() == "event-1"
|
||||
|
||||
def test_pop_restores_parent(self) -> None:
|
||||
push_event_scope("event-1", "crew_kickoff_started")
|
||||
push_event_scope("event-2", "task_started")
|
||||
popped = pop_event_scope()
|
||||
assert popped == ("event-2", "task_started")
|
||||
assert get_current_parent_id() == "event-1"
|
||||
|
||||
def test_pop_empty_stack_returns_none(self) -> None:
|
||||
assert pop_event_scope() is None
|
||||
|
||||
|
||||
class TestStackDepthLimit:
|
||||
"""Tests for stack depth limit."""
|
||||
|
||||
def test_depth_limit_exceeded_raises(self) -> None:
|
||||
_event_context_config.set(EventContextConfig(max_stack_depth=3))
|
||||
|
||||
push_event_scope("event-1", "type-1")
|
||||
push_event_scope("event-2", "type-2")
|
||||
push_event_scope("event-3", "type-3")
|
||||
|
||||
with pytest.raises(StackDepthExceededError):
|
||||
push_event_scope("event-4", "type-4")
|
||||
|
||||
|
||||
class TestMismatchHandling:
|
||||
"""Tests for mismatch behavior."""
|
||||
|
||||
def test_handle_mismatch_raises_when_configured(self) -> None:
|
||||
_event_context_config.set(
|
||||
EventContextConfig(mismatch_behavior=MismatchBehavior.RAISE)
|
||||
)
|
||||
|
||||
with pytest.raises(EventPairingError):
|
||||
handle_mismatch("task_completed", "llm_call_started", "task_started")
|
||||
|
||||
def test_handle_empty_pop_raises_when_configured(self) -> None:
|
||||
_event_context_config.set(
|
||||
EventContextConfig(empty_pop_behavior=MismatchBehavior.RAISE)
|
||||
)
|
||||
|
||||
with pytest.raises(EmptyStackError):
|
||||
handle_empty_pop("task_completed")
|
||||
|
||||
|
||||
class TestEventTypeSets:
|
||||
"""Tests for event type set completeness."""
|
||||
|
||||
def test_all_ending_events_have_pairs(self) -> None:
|
||||
for ending_event in SCOPE_ENDING_EVENTS:
|
||||
assert ending_event in VALID_EVENT_PAIRS
|
||||
|
||||
def test_all_pairs_reference_starting_events(self) -> None:
|
||||
for ending_event, starting_event in VALID_EVENT_PAIRS.items():
|
||||
assert starting_event in SCOPE_STARTING_EVENTS
|
||||
|
||||
def test_starting_and_ending_are_disjoint(self) -> None:
|
||||
overlap = SCOPE_STARTING_EVENTS & SCOPE_ENDING_EVENTS
|
||||
assert not overlap
|
||||
|
||||
|
||||
class TestLastEventIdTracking:
|
||||
"""Tests for linear chain event ID tracking."""
|
||||
|
||||
def test_initial_last_event_id_is_none(self) -> None:
|
||||
reset_last_event_id()
|
||||
assert get_last_event_id() is None
|
||||
|
||||
def test_set_and_get_last_event_id(self) -> None:
|
||||
reset_last_event_id()
|
||||
set_last_event_id("event-123")
|
||||
assert get_last_event_id() == "event-123"
|
||||
|
||||
def test_reset_clears_last_event_id(self) -> None:
|
||||
set_last_event_id("event-123")
|
||||
reset_last_event_id()
|
||||
assert get_last_event_id() is None
|
||||
|
||||
def test_overwrite_last_event_id(self) -> None:
|
||||
reset_last_event_id()
|
||||
set_last_event_id("event-1")
|
||||
set_last_event_id("event-2")
|
||||
assert get_last_event_id() == "event-2"
|
||||
|
||||
|
||||
class TestTriggeringEventIdTracking:
|
||||
"""Tests for causal chain event ID tracking."""
|
||||
|
||||
def test_initial_triggering_event_id_is_none(self) -> None:
|
||||
set_triggering_event_id(None)
|
||||
assert get_triggering_event_id() is None
|
||||
|
||||
def test_set_and_get_triggering_event_id(self) -> None:
|
||||
set_triggering_event_id("trigger-123")
|
||||
assert get_triggering_event_id() == "trigger-123"
|
||||
set_triggering_event_id(None)
|
||||
|
||||
def test_set_none_clears_triggering_event_id(self) -> None:
|
||||
set_triggering_event_id("trigger-123")
|
||||
set_triggering_event_id(None)
|
||||
assert get_triggering_event_id() is None
|
||||
|
||||
|
||||
class TestTriggeredByScope:
|
||||
"""Tests for triggered_by_scope context manager."""
|
||||
|
||||
def test_scope_sets_triggering_id(self) -> None:
|
||||
set_triggering_event_id(None)
|
||||
with triggered_by_scope("trigger-456"):
|
||||
assert get_triggering_event_id() == "trigger-456"
|
||||
|
||||
def test_scope_restores_previous_value(self) -> None:
|
||||
set_triggering_event_id(None)
|
||||
with triggered_by_scope("trigger-456"):
|
||||
pass
|
||||
assert get_triggering_event_id() is None
|
||||
|
||||
def test_nested_scopes(self) -> None:
|
||||
set_triggering_event_id(None)
|
||||
with triggered_by_scope("outer"):
|
||||
assert get_triggering_event_id() == "outer"
|
||||
with triggered_by_scope("inner"):
|
||||
assert get_triggering_event_id() == "inner"
|
||||
assert get_triggering_event_id() == "outer"
|
||||
assert get_triggering_event_id() is None
|
||||
|
||||
def test_scope_restores_on_exception(self) -> None:
|
||||
set_triggering_event_id(None)
|
||||
try:
|
||||
with triggered_by_scope("trigger-789"):
|
||||
raise ValueError("test error")
|
||||
except ValueError:
|
||||
pass
|
||||
assert get_triggering_event_id() is None
|
||||
1649
lib/crewai/tests/events/test_event_ordering.py
Normal file
1649
lib/crewai/tests/events/test_event_ordering.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -304,6 +304,11 @@ def test_external_memory_search_events(
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"limit": 3,
|
||||
"score_threshold": 0.35,
|
||||
@@ -321,6 +326,11 @@ def test_external_memory_search_events(
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": ANY,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"results": [],
|
||||
"limit": 3,
|
||||
@@ -376,6 +386,11 @@ def test_external_memory_save_events(
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "saving value",
|
||||
"metadata": {"task": "test_task"},
|
||||
}
|
||||
@@ -392,6 +407,11 @@ def test_external_memory_save_events(
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": ANY,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "saving value",
|
||||
"metadata": {"task": "test_task"},
|
||||
"save_time_ms": ANY,
|
||||
|
||||
@@ -70,6 +70,11 @@ def test_long_term_memory_save_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": "test_agent",
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test_task",
|
||||
"metadata": {"task": "test_task", "quality": 0.5},
|
||||
}
|
||||
@@ -85,6 +90,11 @@ def test_long_term_memory_save_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": "test_agent",
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test_task",
|
||||
"metadata": {
|
||||
"task": "test_task",
|
||||
@@ -139,6 +149,11 @@ def test_long_term_memory_search_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test query",
|
||||
"limit": 5,
|
||||
"score_threshold": None,
|
||||
@@ -156,6 +171,11 @@ def test_long_term_memory_search_events(long_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": ANY,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test query",
|
||||
"results": None,
|
||||
"limit": 5,
|
||||
|
||||
@@ -81,6 +81,11 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"limit": 3,
|
||||
"score_threshold": 0.35,
|
||||
@@ -98,6 +103,11 @@ def test_short_term_memory_search_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"query": "test value",
|
||||
"results": [],
|
||||
"limit": 3,
|
||||
@@ -150,6 +160,11 @@ def test_short_term_memory_save_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task"},
|
||||
}
|
||||
@@ -166,6 +181,11 @@ def test_short_term_memory_save_events(short_term_memory):
|
||||
"from_agent": None,
|
||||
"agent_role": None,
|
||||
"agent_id": None,
|
||||
"event_id": ANY,
|
||||
"parent_event_id": None,
|
||||
"previous_event_id": ANY,
|
||||
"triggered_by_event_id": None,
|
||||
"emission_sequence": ANY,
|
||||
"value": "test value",
|
||||
"metadata": {"task": "test_task"},
|
||||
"save_time_ms": ANY,
|
||||
|
||||
@@ -1204,7 +1204,8 @@ def test_complex_and_or_branching():
|
||||
|
||||
|
||||
# Final should be after both 2a and 2b
|
||||
assert execution_order[-1] == "final"
|
||||
# Note: we don't assert final is last because branch_1c has no downstream
|
||||
# dependencies and can complete after final due to parallel execution
|
||||
assert execution_order.index("final") > execution_order.index("branch_2a")
|
||||
assert execution_order.index("final") > execution_order.index("branch_2b")
|
||||
|
||||
|
||||
21
uv.lock
generated
21
uv.lock
generated
@@ -1251,7 +1251,7 @@ requires-dist = [
|
||||
{ name = "json5", specifier = "~=0.10.0" },
|
||||
{ name = "jsonref", specifier = "~=1.1.0" },
|
||||
{ name = "litellm", marker = "extra == 'litellm'", specifier = "~=1.74.9" },
|
||||
{ name = "mcp", specifier = "~=1.16.0" },
|
||||
{ name = "mcp", specifier = "~=1.23.1" },
|
||||
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = "~=0.1.94" },
|
||||
{ name = "openai", specifier = "~=1.83.0" },
|
||||
{ name = "openpyxl", specifier = "~=3.1.5" },
|
||||
@@ -1264,7 +1264,7 @@ requires-dist = [
|
||||
{ name = "portalocker", specifier = "~=2.7.0" },
|
||||
{ name = "pydantic", specifier = "~=2.11.9" },
|
||||
{ name = "pydantic-settings", specifier = "~=2.10.1" },
|
||||
{ name = "pyjwt", specifier = "~=2.9.0" },
|
||||
{ name = "pyjwt", specifier = ">=2.9.0,<3" },
|
||||
{ name = "python-dotenv", specifier = "~=1.1.1" },
|
||||
{ name = "qdrant-client", extras = ["fastembed"], marker = "extra == 'qdrant'", specifier = "~=1.14.3" },
|
||||
{ name = "regex", specifier = "~=2024.9.11" },
|
||||
@@ -3709,7 +3709,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "mcp"
|
||||
version = "1.16.0"
|
||||
version = "1.23.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "anyio" },
|
||||
@@ -3718,15 +3718,18 @@ dependencies = [
|
||||
{ name = "jsonschema" },
|
||||
{ name = "pydantic" },
|
||||
{ name = "pydantic-settings" },
|
||||
{ name = "pyjwt", extra = ["crypto"] },
|
||||
{ name = "python-multipart" },
|
||||
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
||||
{ name = "sse-starlette" },
|
||||
{ name = "starlette" },
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "typing-inspection" },
|
||||
{ name = "uvicorn", marker = "sys_platform != 'emscripten'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/3d/a1/b1f328da3b153683d2ec34f849b4b6eac2790fb240e3aef06ff2fab3df9d/mcp-1.16.0.tar.gz", hash = "sha256:39b8ca25460c578ee2cdad33feeea122694cfdf73eef58bee76c42f6ef0589df", size = 472918, upload-time = "2025-10-02T16:58:20.631Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a7/a4/d06a303f45997e266f2c228081abe299bbcba216cb806128e2e49095d25f/mcp-1.23.3.tar.gz", hash = "sha256:b3b0da2cc949950ce1259c7bfc1b081905a51916fcd7c8182125b85e70825201", size = 600697, upload-time = "2025-12-09T16:04:37.351Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/c9/0e/7cebc88e17daf94ebe28c95633af595ccb2864dc2ee7abd75542d98495cc/mcp-1.16.0-py3-none-any.whl", hash = "sha256:ec917be9a5d31b09ba331e1768aa576e0af45470d657a0319996a20a57d7d633", size = 167266, upload-time = "2025-10-02T16:58:19.039Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/c6/13c1a26b47b3f3a3b480783001ada4268917c9f42d78a079c336da2e75e5/mcp-1.23.3-py3-none-any.whl", hash = "sha256:32768af4b46a1b4f7df34e2bfdf5c6011e7b63d7f1b0e321d0fdef4cd6082031", size = 231570, upload-time = "2025-12-09T16:04:35.56Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5962,11 +5965,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pyjwt"
|
||||
version = "2.9.0"
|
||||
version = "2.10.1"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/fb/68/ce067f09fca4abeca8771fe667d89cc347d1e99da3e093112ac329c6020e/pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c", size = 78825, upload-time = "2024-08-01T15:01:08.445Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/79/84/0fdf9b18ba31d69877bd39c9cd6052b47f3761e9910c15de788e519f079f/PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850", size = 22344, upload-time = "2024-08-01T15:01:06.481Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" },
|
||||
]
|
||||
|
||||
[package.optional-dependencies]
|
||||
@@ -7604,12 +7607,14 @@ dependencies = [
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/be/f9/5e4491e5ccf42f5d9cfc663741d261b3e6e1683ae7812114e7636409fcc6/sqlalchemy-2.0.45.tar.gz", hash = "sha256:1632a4bda8d2d25703fdad6363058d882541bdaaee0e5e3ddfa0cd3229efce88", size = 9869912, upload-time = "2025-12-09T21:05:16.737Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/fe/70/75b1387d72e2847220441166c5eb4e9846dd753895208c13e6d66523b2d9/sqlalchemy-2.0.45-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c64772786d9eee72d4d3784c28f0a636af5b0a29f3fe26ff11f55efe90c0bd85", size = 2154148, upload-time = "2025-12-10T20:03:21.023Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d8/a4/7805e02323c49cb9d1ae5cd4913b28c97103079765f520043f914fca4cb3/sqlalchemy-2.0.45-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7ae64ebf7657395824a19bca98ab10eb9a3ecb026bf09524014f1bb81cb598d4", size = 3233051, upload-time = "2025-12-09T22:06:04.768Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/ec/32ae09139f61bef3de3142e85c47abdee8db9a55af2bb438da54a4549263/sqlalchemy-2.0.45-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0f02325709d1b1a1489f23a39b318e175a171497374149eae74d612634b234c0", size = 3232781, upload-time = "2025-12-09T22:09:54.435Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ad/bd/bf7b869b6f5585eac34222e1cf4405f4ba8c3b85dd6b1af5d4ce8bca695f/sqlalchemy-2.0.45-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2c3684fca8a05f0ac1d9a21c1f4a266983a7ea9180efb80ffeb03861ecd01a0", size = 3182096, upload-time = "2025-12-09T22:06:06.169Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/6a/c219720a241bb8f35c88815ccc27761f5af7fdef04b987b0e8a2c1a6dcaa/sqlalchemy-2.0.45-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:040f6f0545b3b7da6b9317fc3e922c9a98fc7243b2a1b39f78390fc0942f7826", size = 3205109, upload-time = "2025-12-09T22:09:55.969Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bd/c4/6ccf31b2bc925d5d95fab403ffd50d20d7c82b858cf1a4855664ca054dce/sqlalchemy-2.0.45-cp310-cp310-win32.whl", hash = "sha256:830d434d609fe7bfa47c425c445a8b37929f140a7a44cdaf77f6d34df3a7296a", size = 2114240, upload-time = "2025-12-09T21:29:54.007Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/de/29/a27a31fca07316def418db6f7c70ab14010506616a2decef1906050a0587/sqlalchemy-2.0.45-cp310-cp310-win_amd64.whl", hash = "sha256:0209d9753671b0da74da2cfbb9ecf9c02f72a759e4b018b3ab35f244c91842c7", size = 2137615, upload-time = "2025-12-09T21:29:55.85Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a2/1c/769552a9d840065137272ebe86ffbb0bc92b0f1e0a68ee5266a225f8cd7b/sqlalchemy-2.0.45-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e90a344c644a4fa871eb01809c32096487928bd2038bf10f3e4515cb688cc56", size = 2153860, upload-time = "2025-12-10T20:03:23.843Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/f8/9be54ff620e5b796ca7b44670ef58bc678095d51b0e89d6e3102ea468216/sqlalchemy-2.0.45-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b8c8b41b97fba5f62349aa285654230296829672fc9939cd7f35aab246d1c08b", size = 3309379, upload-time = "2025-12-09T22:06:07.461Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f6/2b/60ce3ee7a5ae172bfcd419ce23259bb874d2cddd44f67c5df3760a1e22f9/sqlalchemy-2.0.45-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:12c694ed6468333a090d2f60950e4250b928f457e4962389553d6ba5fe9951ac", size = 3309948, upload-time = "2025-12-09T22:09:57.643Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/42/bac8d393f5db550e4e466d03d16daaafd2bad1f74e48c12673fb499a7fc1/sqlalchemy-2.0.45-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:f7d27a1d977a1cfef38a0e2e1ca86f09c4212666ce34e6ae542f3ed0a33bc606", size = 3261239, upload-time = "2025-12-09T22:06:08.879Z" },
|
||||
|
||||
Reference in New Issue
Block a user