mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-27 17:18:13 +00:00
Merge branch 'main' into gl/feat/native-multimodal-files
This commit is contained in:
25
conftest.py
25
conftest.py
@@ -1,6 +1,7 @@
|
|||||||
"""Pytest configuration for crewAI workspace."""
|
"""Pytest configuration for crewAI workspace."""
|
||||||
|
|
||||||
from collections.abc import Generator
|
from collections.abc import Generator
|
||||||
|
import gzip
|
||||||
import os
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import tempfile
|
import tempfile
|
||||||
@@ -31,6 +32,21 @@ def cleanup_event_handlers() -> Generator[None, Any, None]:
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture(autouse=True, scope="function")
|
||||||
|
def reset_event_state() -> None:
|
||||||
|
"""Reset event system state before each test for isolation."""
|
||||||
|
from crewai.events.base_events import reset_emission_counter
|
||||||
|
from crewai.events.event_context import (
|
||||||
|
EventContextConfig,
|
||||||
|
_event_context_config,
|
||||||
|
_event_id_stack,
|
||||||
|
)
|
||||||
|
|
||||||
|
reset_emission_counter()
|
||||||
|
_event_id_stack.set(())
|
||||||
|
_event_context_config.set(EventContextConfig())
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(autouse=True, scope="function")
|
@pytest.fixture(autouse=True, scope="function")
|
||||||
def setup_test_environment() -> Generator[None, Any, None]:
|
def setup_test_environment() -> Generator[None, Any, None]:
|
||||||
"""Setup test environment for crewAI workspace."""
|
"""Setup test environment for crewAI workspace."""
|
||||||
@@ -138,9 +154,14 @@ def _filter_request_headers(request: Request) -> Request: # type: ignore[no-any
|
|||||||
|
|
||||||
def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any]:
|
def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any]:
|
||||||
"""Filter sensitive headers from response before recording."""
|
"""Filter sensitive headers from response before recording."""
|
||||||
# Remove Content-Encoding to prevent decompression issues on replay
|
|
||||||
for encoding_header in ["Content-Encoding", "content-encoding"]:
|
for encoding_header in ["Content-Encoding", "content-encoding"]:
|
||||||
response["headers"].pop(encoding_header, None)
|
if encoding_header in response["headers"]:
|
||||||
|
encoding = response["headers"].pop(encoding_header)
|
||||||
|
if encoding and encoding[0] == "gzip":
|
||||||
|
body = response.get("body", {}).get("string", b"")
|
||||||
|
if isinstance(body, bytes) and body.startswith(b"\x1f\x8b"):
|
||||||
|
response["body"]["string"] = gzip.decompress(body).decode("utf-8")
|
||||||
|
|
||||||
for header_name, replacement in HEADERS_TO_FILTER.items():
|
for header_name, replacement in HEADERS_TO_FILTER.items():
|
||||||
for variant in [header_name, header_name.upper(), header_name.title()]:
|
for variant in [header_name, header_name.upper(), header_name.title()]:
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ dependencies = [
|
|||||||
"openpyxl~=3.1.5",
|
"openpyxl~=3.1.5",
|
||||||
# Authentication and Security
|
# Authentication and Security
|
||||||
"python-dotenv~=1.1.1",
|
"python-dotenv~=1.1.1",
|
||||||
"pyjwt~=2.9.0",
|
"pyjwt>=2.9.0,<3",
|
||||||
# Configuration and Utils
|
# Configuration and Utils
|
||||||
"click~=8.1.7",
|
"click~=8.1.7",
|
||||||
"appdirs~=1.4.4",
|
"appdirs~=1.4.4",
|
||||||
@@ -36,7 +36,7 @@ dependencies = [
|
|||||||
"json5~=0.10.0",
|
"json5~=0.10.0",
|
||||||
"portalocker~=2.7.0",
|
"portalocker~=2.7.0",
|
||||||
"pydantic-settings~=2.10.1",
|
"pydantic-settings~=2.10.1",
|
||||||
"mcp~=1.16.0",
|
"mcp~=1.23.1",
|
||||||
"uv~=0.9.13",
|
"uv~=0.9.13",
|
||||||
"aiosqlite~=0.21.0",
|
"aiosqlite~=0.21.0",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -14,15 +14,25 @@ from typing import (
|
|||||||
from pydantic import BeforeValidator, HttpUrl, TypeAdapter
|
from pydantic import BeforeValidator, HttpUrl, TypeAdapter
|
||||||
from typing_extensions import NotRequired
|
from typing_extensions import NotRequired
|
||||||
|
|
||||||
from crewai.a2a.updates import (
|
|
||||||
PollingConfig,
|
try:
|
||||||
PollingHandler,
|
from crewai.a2a.updates import (
|
||||||
PushNotificationConfig,
|
PollingConfig,
|
||||||
PushNotificationHandler,
|
PollingHandler,
|
||||||
StreamingConfig,
|
PushNotificationConfig,
|
||||||
StreamingHandler,
|
PushNotificationHandler,
|
||||||
UpdateConfig,
|
StreamingConfig,
|
||||||
)
|
StreamingHandler,
|
||||||
|
UpdateConfig,
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
PollingConfig = Any # type: ignore[misc,assignment]
|
||||||
|
PollingHandler = Any # type: ignore[misc,assignment]
|
||||||
|
PushNotificationConfig = Any # type: ignore[misc,assignment]
|
||||||
|
PushNotificationHandler = Any # type: ignore[misc,assignment]
|
||||||
|
StreamingConfig = Any # type: ignore[misc,assignment]
|
||||||
|
StreamingHandler = Any # type: ignore[misc,assignment]
|
||||||
|
UpdateConfig = Any # type: ignore[misc,assignment]
|
||||||
|
|
||||||
|
|
||||||
TransportType = Literal["JSONRPC", "GRPC", "HTTP+JSON"]
|
TransportType = Literal["JSONRPC", "GRPC", "HTTP+JSON"]
|
||||||
|
|||||||
@@ -251,30 +251,48 @@ async def aexecute_a2a_delegation(
|
|||||||
if turn_number is None:
|
if turn_number is None:
|
||||||
turn_number = len([m for m in conversation_history if m.role == Role.user]) + 1
|
turn_number = len([m for m in conversation_history if m.role == Role.user]) + 1
|
||||||
|
|
||||||
result = await _aexecute_a2a_delegation_impl(
|
try:
|
||||||
endpoint=endpoint,
|
result = await _aexecute_a2a_delegation_impl(
|
||||||
auth=auth,
|
endpoint=endpoint,
|
||||||
timeout=timeout,
|
auth=auth,
|
||||||
task_description=task_description,
|
timeout=timeout,
|
||||||
context=context,
|
task_description=task_description,
|
||||||
context_id=context_id,
|
context=context,
|
||||||
task_id=task_id,
|
context_id=context_id,
|
||||||
reference_task_ids=reference_task_ids,
|
task_id=task_id,
|
||||||
metadata=metadata,
|
reference_task_ids=reference_task_ids,
|
||||||
extensions=extensions,
|
metadata=metadata,
|
||||||
conversation_history=conversation_history,
|
extensions=extensions,
|
||||||
is_multiturn=is_multiturn,
|
conversation_history=conversation_history,
|
||||||
turn_number=turn_number,
|
is_multiturn=is_multiturn,
|
||||||
agent_branch=agent_branch,
|
turn_number=turn_number,
|
||||||
agent_id=agent_id,
|
agent_branch=agent_branch,
|
||||||
agent_role=agent_role,
|
agent_id=agent_id,
|
||||||
response_model=response_model,
|
agent_role=agent_role,
|
||||||
updates=updates,
|
response_model=response_model,
|
||||||
transport_protocol=transport_protocol,
|
updates=updates,
|
||||||
from_task=from_task,
|
transport_protocol=transport_protocol,
|
||||||
from_agent=from_agent,
|
from_task=from_task,
|
||||||
skill_id=skill_id,
|
from_agent=from_agent,
|
||||||
)
|
skill_id=skill_id,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
crewai_event_bus.emit(
|
||||||
|
agent_branch,
|
||||||
|
A2ADelegationCompletedEvent(
|
||||||
|
status="failed",
|
||||||
|
result=None,
|
||||||
|
error=str(e),
|
||||||
|
context_id=context_id,
|
||||||
|
is_multiturn=is_multiturn,
|
||||||
|
endpoint=endpoint,
|
||||||
|
metadata=metadata,
|
||||||
|
extensions=list(extensions.keys()) if extensions else None,
|
||||||
|
from_task=from_task,
|
||||||
|
from_agent=from_agent,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
raise
|
||||||
|
|
||||||
agent_card_data: dict[str, Any] = result.get("agent_card") or {}
|
agent_card_data: dict[str, Any] = result.get("agent_card") or {}
|
||||||
crewai_event_bus.emit(
|
crewai_event_bus.emit(
|
||||||
|
|||||||
@@ -14,7 +14,14 @@ from typing import (
|
|||||||
)
|
)
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
|
from pydantic import (
|
||||||
|
BaseModel,
|
||||||
|
ConfigDict,
|
||||||
|
Field,
|
||||||
|
InstanceOf,
|
||||||
|
PrivateAttr,
|
||||||
|
model_validator,
|
||||||
|
)
|
||||||
from typing_extensions import Self
|
from typing_extensions import Self
|
||||||
|
|
||||||
from crewai.agent.utils import (
|
from crewai.agent.utils import (
|
||||||
@@ -46,6 +53,7 @@ from crewai.events.types.knowledge_events import (
|
|||||||
)
|
)
|
||||||
from crewai.events.types.memory_events import (
|
from crewai.events.types.memory_events import (
|
||||||
MemoryRetrievalCompletedEvent,
|
MemoryRetrievalCompletedEvent,
|
||||||
|
MemoryRetrievalFailedEvent,
|
||||||
MemoryRetrievalStartedEvent,
|
MemoryRetrievalStartedEvent,
|
||||||
)
|
)
|
||||||
from crewai.experimental.agent_executor import AgentExecutor
|
from crewai.experimental.agent_executor import AgentExecutor
|
||||||
@@ -85,17 +93,10 @@ from crewai.utilities.token_counter_callback import TokenCalcHandler
|
|||||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||||
|
|
||||||
|
|
||||||
try:
|
|
||||||
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
|
|
||||||
except ImportError:
|
|
||||||
A2AClientConfig = Any # type: ignore[assignment,misc]
|
|
||||||
A2AConfig = Any # type: ignore[assignment,misc]
|
|
||||||
A2AServerConfig = Any # type: ignore[assignment,misc]
|
|
||||||
|
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from crewai_tools import CodeInterpreterTool
|
from crewai_tools import CodeInterpreterTool
|
||||||
|
|
||||||
|
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
|
||||||
from crewai.agents.agent_builder.base_agent import PlatformAppOrAction
|
from crewai.agents.agent_builder.base_agent import PlatformAppOrAction
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.tools.base_tool import BaseTool
|
from crewai.tools.base_tool import BaseTool
|
||||||
@@ -141,6 +142,8 @@ class Agent(BaseAgent):
|
|||||||
mcps: List of MCP server references for tool integration.
|
mcps: List of MCP server references for tool integration.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
model_config = ConfigDict()
|
||||||
|
|
||||||
_times_executed: int = PrivateAttr(default=0)
|
_times_executed: int = PrivateAttr(default=0)
|
||||||
_mcp_clients: list[Any] = PrivateAttr(default_factory=list)
|
_mcp_clients: list[Any] = PrivateAttr(default_factory=list)
|
||||||
_last_messages: list[LLMMessage] = PrivateAttr(default_factory=list)
|
_last_messages: list[LLMMessage] = PrivateAttr(default_factory=list)
|
||||||
@@ -355,30 +358,43 @@ class Agent(BaseAgent):
|
|||||||
)
|
)
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
memory = ""
|
||||||
|
|
||||||
contextual_memory = ContextualMemory(
|
try:
|
||||||
self.crew._short_term_memory,
|
contextual_memory = ContextualMemory(
|
||||||
self.crew._long_term_memory,
|
self.crew._short_term_memory,
|
||||||
self.crew._entity_memory,
|
self.crew._long_term_memory,
|
||||||
self.crew._external_memory,
|
self.crew._entity_memory,
|
||||||
agent=self,
|
self.crew._external_memory,
|
||||||
task=task,
|
agent=self,
|
||||||
)
|
task=task,
|
||||||
memory = contextual_memory.build_context_for_task(task, context or "")
|
)
|
||||||
if memory.strip() != "":
|
memory = contextual_memory.build_context_for_task(task, context or "")
|
||||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
if memory.strip() != "":
|
||||||
|
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
crewai_event_bus.emit(
|
||||||
self,
|
self,
|
||||||
event=MemoryRetrievalCompletedEvent(
|
event=MemoryRetrievalCompletedEvent(
|
||||||
task_id=str(task.id) if task else None,
|
task_id=str(task.id) if task else None,
|
||||||
memory_content=memory,
|
memory_content=memory,
|
||||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||||
source_type="agent",
|
source_type="agent",
|
||||||
from_agent=self,
|
from_agent=self,
|
||||||
from_task=task,
|
from_task=task,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
except Exception as e:
|
||||||
|
crewai_event_bus.emit(
|
||||||
|
self,
|
||||||
|
event=MemoryRetrievalFailedEvent(
|
||||||
|
task_id=str(task.id) if task else None,
|
||||||
|
source_type="agent",
|
||||||
|
from_agent=self,
|
||||||
|
from_task=task,
|
||||||
|
error=str(e),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
knowledge_config = get_knowledge_config(self)
|
knowledge_config = get_knowledge_config(self)
|
||||||
task_prompt = handle_knowledge_retrieval(
|
task_prompt = handle_knowledge_retrieval(
|
||||||
@@ -564,32 +580,45 @@ class Agent(BaseAgent):
|
|||||||
)
|
)
|
||||||
|
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
memory = ""
|
||||||
|
|
||||||
contextual_memory = ContextualMemory(
|
try:
|
||||||
self.crew._short_term_memory,
|
contextual_memory = ContextualMemory(
|
||||||
self.crew._long_term_memory,
|
self.crew._short_term_memory,
|
||||||
self.crew._entity_memory,
|
self.crew._long_term_memory,
|
||||||
self.crew._external_memory,
|
self.crew._entity_memory,
|
||||||
agent=self,
|
self.crew._external_memory,
|
||||||
task=task,
|
agent=self,
|
||||||
)
|
task=task,
|
||||||
memory = await contextual_memory.abuild_context_for_task(
|
)
|
||||||
task, context or ""
|
memory = await contextual_memory.abuild_context_for_task(
|
||||||
)
|
task, context or ""
|
||||||
if memory.strip() != "":
|
)
|
||||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
if memory.strip() != "":
|
||||||
|
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||||
|
|
||||||
crewai_event_bus.emit(
|
crewai_event_bus.emit(
|
||||||
self,
|
self,
|
||||||
event=MemoryRetrievalCompletedEvent(
|
event=MemoryRetrievalCompletedEvent(
|
||||||
task_id=str(task.id) if task else None,
|
task_id=str(task.id) if task else None,
|
||||||
memory_content=memory,
|
memory_content=memory,
|
||||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||||
source_type="agent",
|
source_type="agent",
|
||||||
from_agent=self,
|
from_agent=self,
|
||||||
from_task=task,
|
from_task=task,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
except Exception as e:
|
||||||
|
crewai_event_bus.emit(
|
||||||
|
self,
|
||||||
|
event=MemoryRetrievalFailedEvent(
|
||||||
|
task_id=str(task.id) if task else None,
|
||||||
|
source_type="agent",
|
||||||
|
from_agent=self,
|
||||||
|
from_task=task,
|
||||||
|
error=str(e),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
knowledge_config = get_knowledge_config(self)
|
knowledge_config = get_knowledge_config(self)
|
||||||
task_prompt = await ahandle_knowledge_retrieval(
|
task_prompt = await ahandle_knowledge_retrieval(
|
||||||
@@ -2040,3 +2069,22 @@ class Agent(BaseAgent):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
# Rebuild Agent model to resolve A2A type forward references
|
||||||
|
try:
|
||||||
|
from crewai.a2a.config import (
|
||||||
|
A2AClientConfig as _A2AClientConfig,
|
||||||
|
A2AConfig as _A2AConfig,
|
||||||
|
A2AServerConfig as _A2AServerConfig,
|
||||||
|
)
|
||||||
|
|
||||||
|
Agent.model_rebuild(
|
||||||
|
_types_namespace={
|
||||||
|
"A2AConfig": _A2AConfig,
|
||||||
|
"A2AClientConfig": _A2AClientConfig,
|
||||||
|
"A2AServerConfig": _A2AServerConfig,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|||||||
@@ -228,9 +228,15 @@ def prepare_kickoff(
|
|||||||
Returns:
|
Returns:
|
||||||
The potentially modified inputs dictionary after before callbacks.
|
The potentially modified inputs dictionary after before callbacks.
|
||||||
"""
|
"""
|
||||||
|
from crewai.events.base_events import reset_emission_counter
|
||||||
from crewai.events.event_bus import crewai_event_bus
|
from crewai.events.event_bus import crewai_event_bus
|
||||||
|
from crewai.events.event_context import get_current_parent_id, reset_last_event_id
|
||||||
from crewai.events.types.crew_events import CrewKickoffStartedEvent
|
from crewai.events.types.crew_events import CrewKickoffStartedEvent
|
||||||
|
|
||||||
|
if get_current_parent_id() is None:
|
||||||
|
reset_emission_counter()
|
||||||
|
reset_last_event_id()
|
||||||
|
|
||||||
# Normalize inputs to dict[str, Any] for internal processing
|
# Normalize inputs to dict[str, Any] for internal processing
|
||||||
normalized: dict[str, Any] | None = dict(inputs) if inputs is not None else None
|
normalized: dict[str, Any] | None = dict(inputs) if inputs is not None else None
|
||||||
|
|
||||||
|
|||||||
@@ -75,6 +75,7 @@ from crewai.events.types.memory_events import (
|
|||||||
MemoryQueryFailedEvent,
|
MemoryQueryFailedEvent,
|
||||||
MemoryQueryStartedEvent,
|
MemoryQueryStartedEvent,
|
||||||
MemoryRetrievalCompletedEvent,
|
MemoryRetrievalCompletedEvent,
|
||||||
|
MemoryRetrievalFailedEvent,
|
||||||
MemoryRetrievalStartedEvent,
|
MemoryRetrievalStartedEvent,
|
||||||
MemorySaveCompletedEvent,
|
MemorySaveCompletedEvent,
|
||||||
MemorySaveFailedEvent,
|
MemorySaveFailedEvent,
|
||||||
@@ -174,6 +175,7 @@ __all__ = [
|
|||||||
"MemoryQueryFailedEvent",
|
"MemoryQueryFailedEvent",
|
||||||
"MemoryQueryStartedEvent",
|
"MemoryQueryStartedEvent",
|
||||||
"MemoryRetrievalCompletedEvent",
|
"MemoryRetrievalCompletedEvent",
|
||||||
|
"MemoryRetrievalFailedEvent",
|
||||||
"MemoryRetrievalStartedEvent",
|
"MemoryRetrievalStartedEvent",
|
||||||
"MemorySaveCompletedEvent",
|
"MemorySaveCompletedEvent",
|
||||||
"MemorySaveFailedEvent",
|
"MemorySaveFailedEvent",
|
||||||
|
|||||||
@@ -1,9 +1,46 @@
|
|||||||
|
from collections.abc import Iterator
|
||||||
|
import contextvars
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
import itertools
|
||||||
from typing import Any
|
from typing import Any
|
||||||
|
import uuid
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
from crewai.utilities.serialization import to_serializable
|
from crewai.utilities.serialization import Serializable, to_serializable
|
||||||
|
|
||||||
|
|
||||||
|
_emission_counter: contextvars.ContextVar[Iterator[int]] = contextvars.ContextVar(
|
||||||
|
"_emission_counter"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_or_create_counter() -> Iterator[int]:
|
||||||
|
"""Get the emission counter for the current context, creating if needed."""
|
||||||
|
try:
|
||||||
|
return _emission_counter.get()
|
||||||
|
except LookupError:
|
||||||
|
counter: Iterator[int] = itertools.count(start=1)
|
||||||
|
_emission_counter.set(counter)
|
||||||
|
return counter
|
||||||
|
|
||||||
|
|
||||||
|
def get_next_emission_sequence() -> int:
|
||||||
|
"""Get the next emission sequence number.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The next sequence number.
|
||||||
|
"""
|
||||||
|
return next(_get_or_create_counter())
|
||||||
|
|
||||||
|
|
||||||
|
def reset_emission_counter() -> None:
|
||||||
|
"""Reset the emission sequence counter to 1.
|
||||||
|
|
||||||
|
Resets for the current context only.
|
||||||
|
"""
|
||||||
|
counter: Iterator[int] = itertools.count(start=1)
|
||||||
|
_emission_counter.set(counter)
|
||||||
|
|
||||||
|
|
||||||
class BaseEvent(BaseModel):
|
class BaseEvent(BaseModel):
|
||||||
@@ -22,7 +59,13 @@ class BaseEvent(BaseModel):
|
|||||||
agent_id: str | None = None
|
agent_id: str | None = None
|
||||||
agent_role: str | None = None
|
agent_role: str | None = None
|
||||||
|
|
||||||
def to_json(self, exclude: set[str] | None = None):
|
event_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
|
||||||
|
parent_event_id: str | None = None
|
||||||
|
previous_event_id: str | None = None
|
||||||
|
triggered_by_event_id: str | None = None
|
||||||
|
emission_sequence: int | None = None
|
||||||
|
|
||||||
|
def to_json(self, exclude: set[str] | None = None) -> Serializable:
|
||||||
"""
|
"""
|
||||||
Converts the event to a JSON-serializable dictionary.
|
Converts the event to a JSON-serializable dictionary.
|
||||||
|
|
||||||
@@ -34,13 +77,13 @@ class BaseEvent(BaseModel):
|
|||||||
"""
|
"""
|
||||||
return to_serializable(self, exclude=exclude)
|
return to_serializable(self, exclude=exclude)
|
||||||
|
|
||||||
def _set_task_params(self, data: dict[str, Any]):
|
def _set_task_params(self, data: dict[str, Any]) -> None:
|
||||||
if "from_task" in data and (task := data["from_task"]):
|
if "from_task" in data and (task := data["from_task"]):
|
||||||
self.task_id = str(task.id)
|
self.task_id = str(task.id)
|
||||||
self.task_name = task.name or task.description
|
self.task_name = task.name or task.description
|
||||||
self.from_task = None
|
self.from_task = None
|
||||||
|
|
||||||
def _set_agent_params(self, data: dict[str, Any]):
|
def _set_agent_params(self, data: dict[str, Any]) -> None:
|
||||||
task = data.get("from_task", None)
|
task = data.get("from_task", None)
|
||||||
agent = task.agent if task else data.get("from_agent", None)
|
agent = task.agent if task else data.get("from_agent", None)
|
||||||
|
|
||||||
|
|||||||
@@ -16,8 +16,22 @@ from typing import Any, Final, ParamSpec, TypeVar
|
|||||||
|
|
||||||
from typing_extensions import Self
|
from typing_extensions import Self
|
||||||
|
|
||||||
from crewai.events.base_events import BaseEvent
|
from crewai.events.base_events import BaseEvent, get_next_emission_sequence
|
||||||
from crewai.events.depends import Depends
|
from crewai.events.depends import Depends
|
||||||
|
from crewai.events.event_context import (
|
||||||
|
SCOPE_ENDING_EVENTS,
|
||||||
|
SCOPE_STARTING_EVENTS,
|
||||||
|
VALID_EVENT_PAIRS,
|
||||||
|
get_current_parent_id,
|
||||||
|
get_enclosing_parent_id,
|
||||||
|
get_last_event_id,
|
||||||
|
get_triggering_event_id,
|
||||||
|
handle_empty_pop,
|
||||||
|
handle_mismatch,
|
||||||
|
pop_event_scope,
|
||||||
|
push_event_scope,
|
||||||
|
set_last_event_id,
|
||||||
|
)
|
||||||
from crewai.events.handler_graph import build_execution_plan
|
from crewai.events.handler_graph import build_execution_plan
|
||||||
from crewai.events.types.event_bus_types import (
|
from crewai.events.types.event_bus_types import (
|
||||||
AsyncHandler,
|
AsyncHandler,
|
||||||
@@ -69,6 +83,8 @@ class CrewAIEventsBus:
|
|||||||
_execution_plan_cache: dict[type[BaseEvent], ExecutionPlan]
|
_execution_plan_cache: dict[type[BaseEvent], ExecutionPlan]
|
||||||
_console: ConsoleFormatter
|
_console: ConsoleFormatter
|
||||||
_shutting_down: bool
|
_shutting_down: bool
|
||||||
|
_pending_futures: set[Future[Any]]
|
||||||
|
_futures_lock: threading.Lock
|
||||||
|
|
||||||
def __new__(cls) -> Self:
|
def __new__(cls) -> Self:
|
||||||
"""Create or return the singleton instance.
|
"""Create or return the singleton instance.
|
||||||
@@ -91,6 +107,8 @@ class CrewAIEventsBus:
|
|||||||
"""
|
"""
|
||||||
self._shutting_down = False
|
self._shutting_down = False
|
||||||
self._rwlock = RWLock()
|
self._rwlock = RWLock()
|
||||||
|
self._pending_futures: set[Future[Any]] = set()
|
||||||
|
self._futures_lock = threading.Lock()
|
||||||
self._sync_handlers: dict[type[BaseEvent], SyncHandlerSet] = {}
|
self._sync_handlers: dict[type[BaseEvent], SyncHandlerSet] = {}
|
||||||
self._async_handlers: dict[type[BaseEvent], AsyncHandlerSet] = {}
|
self._async_handlers: dict[type[BaseEvent], AsyncHandlerSet] = {}
|
||||||
self._handler_dependencies: dict[
|
self._handler_dependencies: dict[
|
||||||
@@ -111,6 +129,25 @@ class CrewAIEventsBus:
|
|||||||
)
|
)
|
||||||
self._loop_thread.start()
|
self._loop_thread.start()
|
||||||
|
|
||||||
|
def _track_future(self, future: Future[Any]) -> Future[Any]:
|
||||||
|
"""Track a future and set up automatic cleanup when it completes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
future: The future to track
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The same future for chaining
|
||||||
|
"""
|
||||||
|
with self._futures_lock:
|
||||||
|
self._pending_futures.add(future)
|
||||||
|
|
||||||
|
def _cleanup(f: Future[Any]) -> None:
|
||||||
|
with self._futures_lock:
|
||||||
|
self._pending_futures.discard(f)
|
||||||
|
|
||||||
|
future.add_done_callback(_cleanup)
|
||||||
|
return future
|
||||||
|
|
||||||
def _run_loop(self) -> None:
|
def _run_loop(self) -> None:
|
||||||
"""Run the background async event loop."""
|
"""Run the background async event loop."""
|
||||||
asyncio.set_event_loop(self._loop)
|
asyncio.set_event_loop(self._loop)
|
||||||
@@ -326,6 +363,28 @@ class CrewAIEventsBus:
|
|||||||
... await asyncio.wrap_future(future) # In async test
|
... await asyncio.wrap_future(future) # In async test
|
||||||
... # or future.result(timeout=5.0) in sync code
|
... # or future.result(timeout=5.0) in sync code
|
||||||
"""
|
"""
|
||||||
|
event.previous_event_id = get_last_event_id()
|
||||||
|
event.triggered_by_event_id = get_triggering_event_id()
|
||||||
|
event.emission_sequence = get_next_emission_sequence()
|
||||||
|
if event.parent_event_id is None:
|
||||||
|
event_type_name = event.type
|
||||||
|
if event_type_name in SCOPE_ENDING_EVENTS:
|
||||||
|
event.parent_event_id = get_enclosing_parent_id()
|
||||||
|
popped = pop_event_scope()
|
||||||
|
if popped is None:
|
||||||
|
handle_empty_pop(event_type_name)
|
||||||
|
else:
|
||||||
|
_, popped_type = popped
|
||||||
|
expected_start = VALID_EVENT_PAIRS.get(event_type_name)
|
||||||
|
if expected_start and popped_type and popped_type != expected_start:
|
||||||
|
handle_mismatch(event_type_name, popped_type, expected_start)
|
||||||
|
elif event_type_name in SCOPE_STARTING_EVENTS:
|
||||||
|
event.parent_event_id = get_current_parent_id()
|
||||||
|
push_event_scope(event.event_id, event_type_name)
|
||||||
|
else:
|
||||||
|
event.parent_event_id = get_current_parent_id()
|
||||||
|
|
||||||
|
set_last_event_id(event.event_id)
|
||||||
event_type = type(event)
|
event_type = type(event)
|
||||||
|
|
||||||
with self._rwlock.r_locked():
|
with self._rwlock.r_locked():
|
||||||
@@ -339,9 +398,11 @@ class CrewAIEventsBus:
|
|||||||
async_handlers = self._async_handlers.get(event_type, frozenset())
|
async_handlers = self._async_handlers.get(event_type, frozenset())
|
||||||
|
|
||||||
if has_dependencies:
|
if has_dependencies:
|
||||||
return asyncio.run_coroutine_threadsafe(
|
return self._track_future(
|
||||||
self._emit_with_dependencies(source, event),
|
asyncio.run_coroutine_threadsafe(
|
||||||
self._loop,
|
self._emit_with_dependencies(source, event),
|
||||||
|
self._loop,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
if sync_handlers:
|
if sync_handlers:
|
||||||
@@ -353,16 +414,53 @@ class CrewAIEventsBus:
|
|||||||
ctx.run, self._call_handlers, source, event, sync_handlers
|
ctx.run, self._call_handlers, source, event, sync_handlers
|
||||||
)
|
)
|
||||||
if not async_handlers:
|
if not async_handlers:
|
||||||
return sync_future
|
return self._track_future(sync_future)
|
||||||
|
|
||||||
if async_handlers:
|
if async_handlers:
|
||||||
return asyncio.run_coroutine_threadsafe(
|
return self._track_future(
|
||||||
self._acall_handlers(source, event, async_handlers),
|
asyncio.run_coroutine_threadsafe(
|
||||||
self._loop,
|
self._acall_handlers(source, event, async_handlers),
|
||||||
|
self._loop,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def flush(self, timeout: float | None = 30.0) -> bool:
|
||||||
|
"""Block until all pending event handlers complete.
|
||||||
|
|
||||||
|
This method waits for all futures from previously emitted events to
|
||||||
|
finish executing. Useful at the end of operations (like kickoff) to
|
||||||
|
ensure all event handlers have completed before returning.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
timeout: Maximum time in seconds to wait for handlers to complete.
|
||||||
|
Defaults to 30 seconds. Pass None to wait indefinitely.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if all handlers completed, False if timeout occurred.
|
||||||
|
"""
|
||||||
|
with self._futures_lock:
|
||||||
|
futures_to_wait = list(self._pending_futures)
|
||||||
|
|
||||||
|
if not futures_to_wait:
|
||||||
|
return True
|
||||||
|
|
||||||
|
from concurrent.futures import wait as wait_futures
|
||||||
|
|
||||||
|
done, not_done = wait_futures(futures_to_wait, timeout=timeout)
|
||||||
|
|
||||||
|
# Check for exceptions in completed futures
|
||||||
|
errors = [
|
||||||
|
future.exception() for future in done if future.exception() is not None
|
||||||
|
]
|
||||||
|
for error in errors:
|
||||||
|
self._console.print(
|
||||||
|
f"[CrewAIEventsBus] Handler exception during flush: {error}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return len(not_done) == 0
|
||||||
|
|
||||||
async def aemit(self, source: Any, event: BaseEvent) -> None:
|
async def aemit(self, source: Any, event: BaseEvent) -> None:
|
||||||
"""Asynchronously emit an event to registered async handlers.
|
"""Asynchronously emit an event to registered async handlers.
|
||||||
|
|
||||||
@@ -464,6 +562,9 @@ class CrewAIEventsBus:
|
|||||||
wait: If True, wait for all pending tasks to complete before stopping.
|
wait: If True, wait for all pending tasks to complete before stopping.
|
||||||
If False, cancel all pending tasks immediately.
|
If False, cancel all pending tasks immediately.
|
||||||
"""
|
"""
|
||||||
|
if wait:
|
||||||
|
self.flush()
|
||||||
|
|
||||||
with self._rwlock.w_locked():
|
with self._rwlock.w_locked():
|
||||||
self._shutting_down = True
|
self._shutting_down = True
|
||||||
loop = getattr(self, "_loop", None)
|
loop = getattr(self, "_loop", None)
|
||||||
|
|||||||
334
lib/crewai/src/crewai/events/event_context.py
Normal file
334
lib/crewai/src/crewai/events/event_context.py
Normal file
@@ -0,0 +1,334 @@
|
|||||||
|
"""Event context management for parent-child relationship tracking."""
|
||||||
|
|
||||||
|
from collections.abc import Generator
|
||||||
|
from contextlib import contextmanager
|
||||||
|
import contextvars
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||||
|
|
||||||
|
|
||||||
|
class MismatchBehavior(Enum):
|
||||||
|
"""Behavior when event pairs don't match."""
|
||||||
|
|
||||||
|
WARN = "warn"
|
||||||
|
RAISE = "raise"
|
||||||
|
SILENT = "silent"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class EventContextConfig:
|
||||||
|
"""Configuration for event context behavior."""
|
||||||
|
|
||||||
|
max_stack_depth: int = 100
|
||||||
|
mismatch_behavior: MismatchBehavior = MismatchBehavior.WARN
|
||||||
|
empty_pop_behavior: MismatchBehavior = MismatchBehavior.WARN
|
||||||
|
|
||||||
|
|
||||||
|
class StackDepthExceededError(Exception):
|
||||||
|
"""Raised when stack depth limit is exceeded."""
|
||||||
|
|
||||||
|
|
||||||
|
class EventPairingError(Exception):
|
||||||
|
"""Raised when event pairs don't match."""
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyStackError(Exception):
|
||||||
|
"""Raised when popping from empty stack."""
|
||||||
|
|
||||||
|
|
||||||
|
_event_id_stack: contextvars.ContextVar[tuple[tuple[str, str], ...]] = (
|
||||||
|
contextvars.ContextVar("_event_id_stack", default=())
|
||||||
|
)
|
||||||
|
|
||||||
|
_event_context_config: contextvars.ContextVar[EventContextConfig | None] = (
|
||||||
|
contextvars.ContextVar("_event_context_config", default=None)
|
||||||
|
)
|
||||||
|
|
||||||
|
_last_event_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
|
||||||
|
"_last_event_id", default=None
|
||||||
|
)
|
||||||
|
|
||||||
|
_triggering_event_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
|
||||||
|
"_triggering_event_id", default=None
|
||||||
|
)
|
||||||
|
|
||||||
|
_default_config = EventContextConfig()
|
||||||
|
|
||||||
|
_console = ConsoleFormatter()
|
||||||
|
|
||||||
|
|
||||||
|
def get_current_parent_id() -> str | None:
|
||||||
|
"""Get the current parent event ID from the stack."""
|
||||||
|
stack = _event_id_stack.get()
|
||||||
|
return stack[-1][0] if stack else None
|
||||||
|
|
||||||
|
|
||||||
|
def get_enclosing_parent_id() -> str | None:
|
||||||
|
"""Get the parent of the current scope (stack[-2])."""
|
||||||
|
stack = _event_id_stack.get()
|
||||||
|
return stack[-2][0] if len(stack) >= 2 else None
|
||||||
|
|
||||||
|
|
||||||
|
def get_last_event_id() -> str | None:
|
||||||
|
"""Get the ID of the last emitted event for linear chain tracking.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The event_id of the previously emitted event, or None if no event yet.
|
||||||
|
"""
|
||||||
|
return _last_event_id.get()
|
||||||
|
|
||||||
|
|
||||||
|
def reset_last_event_id() -> None:
|
||||||
|
"""Reset the last event ID to None.
|
||||||
|
|
||||||
|
Should be called at the start of a new flow or when resetting event state.
|
||||||
|
"""
|
||||||
|
_last_event_id.set(None)
|
||||||
|
|
||||||
|
|
||||||
|
def set_last_event_id(event_id: str) -> None:
|
||||||
|
"""Set the ID of the last emitted event.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_id: The event_id to set as the last emitted event.
|
||||||
|
"""
|
||||||
|
_last_event_id.set(event_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_triggering_event_id() -> str | None:
|
||||||
|
"""Get the ID of the event that triggered the current execution.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The event_id of the triggering event, or None if not in a triggered context.
|
||||||
|
"""
|
||||||
|
return _triggering_event_id.get()
|
||||||
|
|
||||||
|
|
||||||
|
def set_triggering_event_id(event_id: str | None) -> None:
|
||||||
|
"""Set the ID of the triggering event for causal chain tracking.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_id: The event_id that triggered the current execution, or None.
|
||||||
|
"""
|
||||||
|
_triggering_event_id.set(event_id)
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def triggered_by_scope(event_id: str) -> Generator[None, None, None]:
|
||||||
|
"""Context manager to set the triggering event ID for causal chain tracking.
|
||||||
|
|
||||||
|
All events emitted within this context will have their triggered_by_event_id
|
||||||
|
set to the provided event_id.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
event_id: The event_id that triggered the current execution.
|
||||||
|
"""
|
||||||
|
previous = _triggering_event_id.get()
|
||||||
|
_triggering_event_id.set(event_id)
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
_triggering_event_id.set(previous)
|
||||||
|
|
||||||
|
|
||||||
|
def push_event_scope(event_id: str, event_type: str = "") -> None:
|
||||||
|
"""Push an event ID and type onto the scope stack."""
|
||||||
|
config = _event_context_config.get() or _default_config
|
||||||
|
stack = _event_id_stack.get()
|
||||||
|
|
||||||
|
if 0 < config.max_stack_depth <= len(stack):
|
||||||
|
raise StackDepthExceededError(
|
||||||
|
f"Event stack depth limit ({config.max_stack_depth}) exceeded. "
|
||||||
|
f"This usually indicates missing ending events."
|
||||||
|
)
|
||||||
|
|
||||||
|
_event_id_stack.set((*stack, (event_id, event_type)))
|
||||||
|
|
||||||
|
|
||||||
|
def pop_event_scope() -> tuple[str, str] | None:
|
||||||
|
"""Pop an event entry from the scope stack."""
|
||||||
|
stack = _event_id_stack.get()
|
||||||
|
if not stack:
|
||||||
|
return None
|
||||||
|
_event_id_stack.set(stack[:-1])
|
||||||
|
return stack[-1]
|
||||||
|
|
||||||
|
|
||||||
|
def handle_empty_pop(event_type_name: str) -> None:
|
||||||
|
"""Handle a pop attempt on an empty stack."""
|
||||||
|
config = _event_context_config.get() or _default_config
|
||||||
|
msg = (
|
||||||
|
f"Ending event '{event_type_name}' emitted with empty scope stack. "
|
||||||
|
"Missing starting event?"
|
||||||
|
)
|
||||||
|
|
||||||
|
if config.empty_pop_behavior == MismatchBehavior.RAISE:
|
||||||
|
raise EmptyStackError(msg)
|
||||||
|
if config.empty_pop_behavior == MismatchBehavior.WARN:
|
||||||
|
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
|
||||||
|
|
||||||
|
|
||||||
|
def handle_mismatch(
|
||||||
|
event_type_name: str,
|
||||||
|
popped_type: str,
|
||||||
|
expected_start: str,
|
||||||
|
) -> None:
|
||||||
|
"""Handle a mismatched event pair."""
|
||||||
|
config = _event_context_config.get() or _default_config
|
||||||
|
msg = (
|
||||||
|
f"Event pairing mismatch. '{event_type_name}' closed '{popped_type}' "
|
||||||
|
f"(expected '{expected_start}')"
|
||||||
|
)
|
||||||
|
|
||||||
|
if config.mismatch_behavior == MismatchBehavior.RAISE:
|
||||||
|
raise EventPairingError(msg)
|
||||||
|
if config.mismatch_behavior == MismatchBehavior.WARN:
|
||||||
|
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def event_scope(event_id: str, event_type: str = "") -> Generator[None, None, None]:
|
||||||
|
"""Context manager to establish a parent event scope."""
|
||||||
|
stack = _event_id_stack.get()
|
||||||
|
already_on_stack = any(entry[0] == event_id for entry in stack)
|
||||||
|
if not already_on_stack:
|
||||||
|
push_event_scope(event_id, event_type)
|
||||||
|
try:
|
||||||
|
yield
|
||||||
|
finally:
|
||||||
|
if not already_on_stack:
|
||||||
|
pop_event_scope()
|
||||||
|
|
||||||
|
|
||||||
|
SCOPE_STARTING_EVENTS: frozenset[str] = frozenset(
|
||||||
|
{
|
||||||
|
"flow_started",
|
||||||
|
"method_execution_started",
|
||||||
|
"crew_kickoff_started",
|
||||||
|
"crew_train_started",
|
||||||
|
"crew_test_started",
|
||||||
|
"agent_execution_started",
|
||||||
|
"agent_evaluation_started",
|
||||||
|
"lite_agent_execution_started",
|
||||||
|
"task_started",
|
||||||
|
"llm_call_started",
|
||||||
|
"llm_guardrail_started",
|
||||||
|
"tool_usage_started",
|
||||||
|
"mcp_connection_started",
|
||||||
|
"mcp_tool_execution_started",
|
||||||
|
"memory_retrieval_started",
|
||||||
|
"memory_save_started",
|
||||||
|
"memory_query_started",
|
||||||
|
"knowledge_query_started",
|
||||||
|
"knowledge_search_query_started",
|
||||||
|
"a2a_delegation_started",
|
||||||
|
"a2a_conversation_started",
|
||||||
|
"a2a_server_task_started",
|
||||||
|
"a2a_parallel_delegation_started",
|
||||||
|
"agent_reasoning_started",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
SCOPE_ENDING_EVENTS: frozenset[str] = frozenset(
|
||||||
|
{
|
||||||
|
"flow_finished",
|
||||||
|
"flow_paused",
|
||||||
|
"method_execution_finished",
|
||||||
|
"method_execution_failed",
|
||||||
|
"method_execution_paused",
|
||||||
|
"crew_kickoff_completed",
|
||||||
|
"crew_kickoff_failed",
|
||||||
|
"crew_train_completed",
|
||||||
|
"crew_train_failed",
|
||||||
|
"crew_test_completed",
|
||||||
|
"crew_test_failed",
|
||||||
|
"agent_execution_completed",
|
||||||
|
"agent_execution_error",
|
||||||
|
"agent_evaluation_completed",
|
||||||
|
"agent_evaluation_failed",
|
||||||
|
"lite_agent_execution_completed",
|
||||||
|
"lite_agent_execution_error",
|
||||||
|
"task_completed",
|
||||||
|
"task_failed",
|
||||||
|
"llm_call_completed",
|
||||||
|
"llm_call_failed",
|
||||||
|
"llm_guardrail_completed",
|
||||||
|
"llm_guardrail_failed",
|
||||||
|
"tool_usage_finished",
|
||||||
|
"tool_usage_error",
|
||||||
|
"mcp_connection_completed",
|
||||||
|
"mcp_connection_failed",
|
||||||
|
"mcp_tool_execution_completed",
|
||||||
|
"mcp_tool_execution_failed",
|
||||||
|
"memory_retrieval_completed",
|
||||||
|
"memory_retrieval_failed",
|
||||||
|
"memory_save_completed",
|
||||||
|
"memory_save_failed",
|
||||||
|
"memory_query_completed",
|
||||||
|
"memory_query_failed",
|
||||||
|
"knowledge_query_completed",
|
||||||
|
"knowledge_query_failed",
|
||||||
|
"knowledge_search_query_completed",
|
||||||
|
"knowledge_search_query_failed",
|
||||||
|
"a2a_delegation_completed",
|
||||||
|
"a2a_conversation_completed",
|
||||||
|
"a2a_server_task_completed",
|
||||||
|
"a2a_server_task_canceled",
|
||||||
|
"a2a_server_task_failed",
|
||||||
|
"a2a_parallel_delegation_completed",
|
||||||
|
"agent_reasoning_completed",
|
||||||
|
"agent_reasoning_failed",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
VALID_EVENT_PAIRS: dict[str, str] = {
|
||||||
|
"flow_finished": "flow_started",
|
||||||
|
"flow_paused": "flow_started",
|
||||||
|
"method_execution_finished": "method_execution_started",
|
||||||
|
"method_execution_failed": "method_execution_started",
|
||||||
|
"method_execution_paused": "method_execution_started",
|
||||||
|
"crew_kickoff_completed": "crew_kickoff_started",
|
||||||
|
"crew_kickoff_failed": "crew_kickoff_started",
|
||||||
|
"crew_train_completed": "crew_train_started",
|
||||||
|
"crew_train_failed": "crew_train_started",
|
||||||
|
"crew_test_completed": "crew_test_started",
|
||||||
|
"crew_test_failed": "crew_test_started",
|
||||||
|
"agent_execution_completed": "agent_execution_started",
|
||||||
|
"agent_execution_error": "agent_execution_started",
|
||||||
|
"agent_evaluation_completed": "agent_evaluation_started",
|
||||||
|
"agent_evaluation_failed": "agent_evaluation_started",
|
||||||
|
"lite_agent_execution_completed": "lite_agent_execution_started",
|
||||||
|
"lite_agent_execution_error": "lite_agent_execution_started",
|
||||||
|
"task_completed": "task_started",
|
||||||
|
"task_failed": "task_started",
|
||||||
|
"llm_call_completed": "llm_call_started",
|
||||||
|
"llm_call_failed": "llm_call_started",
|
||||||
|
"llm_guardrail_completed": "llm_guardrail_started",
|
||||||
|
"llm_guardrail_failed": "llm_guardrail_started",
|
||||||
|
"tool_usage_finished": "tool_usage_started",
|
||||||
|
"tool_usage_error": "tool_usage_started",
|
||||||
|
"mcp_connection_completed": "mcp_connection_started",
|
||||||
|
"mcp_connection_failed": "mcp_connection_started",
|
||||||
|
"mcp_tool_execution_completed": "mcp_tool_execution_started",
|
||||||
|
"mcp_tool_execution_failed": "mcp_tool_execution_started",
|
||||||
|
"memory_retrieval_completed": "memory_retrieval_started",
|
||||||
|
"memory_retrieval_failed": "memory_retrieval_started",
|
||||||
|
"memory_save_completed": "memory_save_started",
|
||||||
|
"memory_save_failed": "memory_save_started",
|
||||||
|
"memory_query_completed": "memory_query_started",
|
||||||
|
"memory_query_failed": "memory_query_started",
|
||||||
|
"knowledge_query_completed": "knowledge_query_started",
|
||||||
|
"knowledge_query_failed": "knowledge_query_started",
|
||||||
|
"knowledge_search_query_completed": "knowledge_search_query_started",
|
||||||
|
"knowledge_search_query_failed": "knowledge_search_query_started",
|
||||||
|
"a2a_delegation_completed": "a2a_delegation_started",
|
||||||
|
"a2a_conversation_completed": "a2a_conversation_started",
|
||||||
|
"a2a_server_task_completed": "a2a_server_task_started",
|
||||||
|
"a2a_server_task_canceled": "a2a_server_task_started",
|
||||||
|
"a2a_server_task_failed": "a2a_server_task_started",
|
||||||
|
"a2a_parallel_delegation_completed": "a2a_parallel_delegation_started",
|
||||||
|
"agent_reasoning_completed": "agent_reasoning_started",
|
||||||
|
"agent_reasoning_failed": "agent_reasoning_started",
|
||||||
|
}
|
||||||
@@ -79,6 +79,7 @@ from crewai.events.types.memory_events import (
|
|||||||
MemoryQueryFailedEvent,
|
MemoryQueryFailedEvent,
|
||||||
MemoryQueryStartedEvent,
|
MemoryQueryStartedEvent,
|
||||||
MemoryRetrievalCompletedEvent,
|
MemoryRetrievalCompletedEvent,
|
||||||
|
MemoryRetrievalFailedEvent,
|
||||||
MemoryRetrievalStartedEvent,
|
MemoryRetrievalStartedEvent,
|
||||||
MemorySaveCompletedEvent,
|
MemorySaveCompletedEvent,
|
||||||
MemorySaveFailedEvent,
|
MemorySaveFailedEvent,
|
||||||
@@ -173,6 +174,7 @@ EventTypes = (
|
|||||||
| MemoryQueryFailedEvent
|
| MemoryQueryFailedEvent
|
||||||
| MemoryRetrievalStartedEvent
|
| MemoryRetrievalStartedEvent
|
||||||
| MemoryRetrievalCompletedEvent
|
| MemoryRetrievalCompletedEvent
|
||||||
|
| MemoryRetrievalFailedEvent
|
||||||
| MCPConnectionStartedEvent
|
| MCPConnectionStartedEvent
|
||||||
| MCPConnectionCompletedEvent
|
| MCPConnectionCompletedEvent
|
||||||
| MCPConnectionFailedEvent
|
| MCPConnectionFailedEvent
|
||||||
|
|||||||
@@ -267,9 +267,12 @@ class TraceBatchManager:
|
|||||||
|
|
||||||
sorted_events = sorted(
|
sorted_events = sorted(
|
||||||
self.event_buffer,
|
self.event_buffer,
|
||||||
key=lambda e: e.timestamp
|
key=lambda e: (
|
||||||
if hasattr(e, "timestamp") and e.timestamp
|
e.emission_sequence
|
||||||
else "",
|
if e.emission_sequence is not None
|
||||||
|
else float("inf"),
|
||||||
|
e.timestamp if hasattr(e, "timestamp") and e.timestamp else "",
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
self.current_batch.events = sorted_events
|
self.current_batch.events = sorted_events
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from typing_extensions import Self
|
|||||||
from crewai.cli.authentication.token import AuthError, get_auth_token
|
from crewai.cli.authentication.token import AuthError, get_auth_token
|
||||||
from crewai.cli.version import get_crewai_version
|
from crewai.cli.version import get_crewai_version
|
||||||
from crewai.events.base_event_listener import BaseEventListener
|
from crewai.events.base_event_listener import BaseEventListener
|
||||||
|
from crewai.events.base_events import BaseEvent
|
||||||
from crewai.events.event_bus import CrewAIEventsBus
|
from crewai.events.event_bus import CrewAIEventsBus
|
||||||
from crewai.events.listeners.tracing.first_time_trace_handler import (
|
from crewai.events.listeners.tracing.first_time_trace_handler import (
|
||||||
FirstTimeTraceHandler,
|
FirstTimeTraceHandler,
|
||||||
@@ -616,7 +617,7 @@ class TraceCollectionListener(BaseEventListener):
|
|||||||
if self.batch_manager.is_batch_initialized():
|
if self.batch_manager.is_batch_initialized():
|
||||||
self.batch_manager.finalize_batch()
|
self.batch_manager.finalize_batch()
|
||||||
|
|
||||||
def _initialize_crew_batch(self, source: Any, event: Any) -> None:
|
def _initialize_crew_batch(self, source: Any, event: BaseEvent) -> None:
|
||||||
"""Initialize trace batch.
|
"""Initialize trace batch.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -626,7 +627,7 @@ class TraceCollectionListener(BaseEventListener):
|
|||||||
user_context = self._get_user_context()
|
user_context = self._get_user_context()
|
||||||
execution_metadata = {
|
execution_metadata = {
|
||||||
"crew_name": getattr(event, "crew_name", "Unknown Crew"),
|
"crew_name": getattr(event, "crew_name", "Unknown Crew"),
|
||||||
"execution_start": event.timestamp if hasattr(event, "timestamp") else None,
|
"execution_start": event.timestamp,
|
||||||
"crewai_version": get_crewai_version(),
|
"crewai_version": get_crewai_version(),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -635,7 +636,7 @@ class TraceCollectionListener(BaseEventListener):
|
|||||||
|
|
||||||
self._initialize_batch(user_context, execution_metadata)
|
self._initialize_batch(user_context, execution_metadata)
|
||||||
|
|
||||||
def _initialize_flow_batch(self, source: Any, event: Any) -> None:
|
def _initialize_flow_batch(self, source: Any, event: BaseEvent) -> None:
|
||||||
"""Initialize trace batch for Flow execution.
|
"""Initialize trace batch for Flow execution.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -645,7 +646,7 @@ class TraceCollectionListener(BaseEventListener):
|
|||||||
user_context = self._get_user_context()
|
user_context = self._get_user_context()
|
||||||
execution_metadata = {
|
execution_metadata = {
|
||||||
"flow_name": getattr(event, "flow_name", "Unknown Flow"),
|
"flow_name": getattr(event, "flow_name", "Unknown Flow"),
|
||||||
"execution_start": event.timestamp if hasattr(event, "timestamp") else None,
|
"execution_start": event.timestamp,
|
||||||
"crewai_version": get_crewai_version(),
|
"crewai_version": get_crewai_version(),
|
||||||
"execution_type": "flow",
|
"execution_type": "flow",
|
||||||
}
|
}
|
||||||
@@ -714,18 +715,18 @@ class TraceCollectionListener(BaseEventListener):
|
|||||||
self.batch_manager.end_event_processing()
|
self.batch_manager.end_event_processing()
|
||||||
|
|
||||||
def _create_trace_event(
|
def _create_trace_event(
|
||||||
self, event_type: str, source: Any, event: Any
|
self, event_type: str, source: Any, event: BaseEvent
|
||||||
) -> TraceEvent:
|
) -> TraceEvent:
|
||||||
"""Create a trace event"""
|
"""Create a trace event with ordering information."""
|
||||||
if hasattr(event, "timestamp") and event.timestamp:
|
trace_event = TraceEvent(
|
||||||
trace_event = TraceEvent(
|
type=event_type,
|
||||||
type=event_type,
|
timestamp=event.timestamp.isoformat() if event.timestamp else "",
|
||||||
timestamp=event.timestamp.isoformat(),
|
event_id=event.event_id,
|
||||||
)
|
emission_sequence=event.emission_sequence,
|
||||||
else:
|
parent_event_id=event.parent_event_id,
|
||||||
trace_event = TraceEvent(
|
previous_event_id=event.previous_event_id,
|
||||||
type=event_type,
|
triggered_by_event_id=event.triggered_by_event_id,
|
||||||
)
|
)
|
||||||
|
|
||||||
trace_event.event_data = self._build_event_data(event_type, event, source)
|
trace_event.event_data = self._build_event_data(event_type, event, source)
|
||||||
|
|
||||||
@@ -778,10 +779,8 @@ class TraceCollectionListener(BaseEventListener):
|
|||||||
}
|
}
|
||||||
if event_type == "llm_call_started":
|
if event_type == "llm_call_started":
|
||||||
event_data = safe_serialize_to_dict(event)
|
event_data = safe_serialize_to_dict(event)
|
||||||
event_data["task_name"] = (
|
event_data["task_name"] = event.task_name or getattr(
|
||||||
event.task_name or event.task_description
|
event, "task_description", None
|
||||||
if hasattr(event, "task_name") and event.task_name
|
|
||||||
else None
|
|
||||||
)
|
)
|
||||||
return event_data
|
return event_data
|
||||||
if event_type == "llm_call_completed":
|
if event_type == "llm_call_completed":
|
||||||
|
|||||||
@@ -15,5 +15,10 @@ class TraceEvent:
|
|||||||
type: str = ""
|
type: str = ""
|
||||||
event_data: dict[str, Any] = field(default_factory=dict)
|
event_data: dict[str, Any] = field(default_factory=dict)
|
||||||
|
|
||||||
|
emission_sequence: int | None = None
|
||||||
|
parent_event_id: str | None = None
|
||||||
|
previous_event_id: str | None = None
|
||||||
|
triggered_by_event_id: str | None = None
|
||||||
|
|
||||||
def to_dict(self) -> dict[str, Any]:
|
def to_dict(self) -> dict[str, Any]:
|
||||||
return asdict(self)
|
return asdict(self)
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ from crewai.events.base_events import BaseEvent
|
|||||||
class LLMEventBase(BaseEvent):
|
class LLMEventBase(BaseEvent):
|
||||||
from_task: Any | None = None
|
from_task: Any | None = None
|
||||||
from_agent: Any | None = None
|
from_agent: Any | None = None
|
||||||
|
model: str | None = None
|
||||||
|
|
||||||
def __init__(self, **data: Any) -> None:
|
def __init__(self, **data: Any) -> None:
|
||||||
if data.get("from_task"):
|
if data.get("from_task"):
|
||||||
@@ -42,7 +43,6 @@ class LLMCallStartedEvent(LLMEventBase):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
type: str = "llm_call_started"
|
type: str = "llm_call_started"
|
||||||
model: str | None = None
|
|
||||||
messages: str | list[dict[str, Any]] | None = None
|
messages: str | list[dict[str, Any]] | None = None
|
||||||
tools: list[dict[str, Any]] | None = None
|
tools: list[dict[str, Any]] | None = None
|
||||||
callbacks: list[Any] | None = None
|
callbacks: list[Any] | None = None
|
||||||
@@ -56,7 +56,6 @@ class LLMCallCompletedEvent(LLMEventBase):
|
|||||||
messages: str | list[dict[str, Any]] | None = None
|
messages: str | list[dict[str, Any]] | None = None
|
||||||
response: Any
|
response: Any
|
||||||
call_type: LLMCallType
|
call_type: LLMCallType
|
||||||
model: str | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class LLMCallFailedEvent(LLMEventBase):
|
class LLMCallFailedEvent(LLMEventBase):
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ class MemoryBaseEvent(BaseEvent):
|
|||||||
agent_role: str | None = None
|
agent_role: str | None = None
|
||||||
agent_id: str | None = None
|
agent_id: str | None = None
|
||||||
|
|
||||||
def __init__(self, **data):
|
def __init__(self, **data: Any) -> None:
|
||||||
super().__init__(**data)
|
super().__init__(**data)
|
||||||
self._set_agent_params(data)
|
self._set_agent_params(data)
|
||||||
self._set_task_params(data)
|
self._set_task_params(data)
|
||||||
@@ -93,3 +93,11 @@ class MemoryRetrievalCompletedEvent(MemoryBaseEvent):
|
|||||||
task_id: str | None = None
|
task_id: str | None = None
|
||||||
memory_content: str
|
memory_content: str
|
||||||
retrieval_time_ms: float
|
retrieval_time_ms: float
|
||||||
|
|
||||||
|
|
||||||
|
class MemoryRetrievalFailedEvent(MemoryBaseEvent):
|
||||||
|
"""Event emitted when memory retrieval for a task prompt fails."""
|
||||||
|
|
||||||
|
type: str = "memory_retrieval_failed"
|
||||||
|
task_id: str | None = None
|
||||||
|
error: str
|
||||||
|
|||||||
@@ -31,7 +31,13 @@ from pydantic import BaseModel, Field, ValidationError
|
|||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
from rich.panel import Panel
|
from rich.panel import Panel
|
||||||
|
|
||||||
|
from crewai.events.base_events import reset_emission_counter
|
||||||
from crewai.events.event_bus import crewai_event_bus
|
from crewai.events.event_bus import crewai_event_bus
|
||||||
|
from crewai.events.event_context import (
|
||||||
|
get_current_parent_id,
|
||||||
|
reset_last_event_id,
|
||||||
|
triggered_by_scope,
|
||||||
|
)
|
||||||
from crewai.events.listeners.tracing.trace_listener import (
|
from crewai.events.listeners.tracing.trace_listener import (
|
||||||
TraceCollectionListener,
|
TraceCollectionListener,
|
||||||
)
|
)
|
||||||
@@ -753,6 +759,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
racing_listeners: frozenset[FlowMethodName],
|
racing_listeners: frozenset[FlowMethodName],
|
||||||
other_listeners: list[FlowMethodName],
|
other_listeners: list[FlowMethodName],
|
||||||
result: Any,
|
result: Any,
|
||||||
|
triggering_event_id: str | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Execute racing listeners with first-wins semantics.
|
"""Execute racing listeners with first-wins semantics.
|
||||||
|
|
||||||
@@ -764,10 +771,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
racing_listeners: Set of listener names that race for an OR condition.
|
racing_listeners: Set of listener names that race for an OR condition.
|
||||||
other_listeners: Other listeners to execute in parallel (not racing).
|
other_listeners: Other listeners to execute in parallel (not racing).
|
||||||
result: The result from the triggering method.
|
result: The result from the triggering method.
|
||||||
|
triggering_event_id: The event_id of the event that triggered these listeners.
|
||||||
"""
|
"""
|
||||||
racing_tasks = [
|
racing_tasks = [
|
||||||
asyncio.create_task(
|
asyncio.create_task(
|
||||||
self._execute_single_listener(name, result),
|
self._execute_single_listener(name, result, triggering_event_id),
|
||||||
name=str(name),
|
name=str(name),
|
||||||
)
|
)
|
||||||
for name in racing_listeners
|
for name in racing_listeners
|
||||||
@@ -775,7 +783,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
|
|
||||||
other_tasks = [
|
other_tasks = [
|
||||||
asyncio.create_task(
|
asyncio.create_task(
|
||||||
self._execute_single_listener(name, result),
|
self._execute_single_listener(name, result, triggering_event_id),
|
||||||
name=str(name),
|
name=str(name),
|
||||||
)
|
)
|
||||||
for name in other_listeners
|
for name in other_listeners
|
||||||
@@ -1557,6 +1565,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
if filtered_inputs:
|
if filtered_inputs:
|
||||||
self._initialize_state(filtered_inputs)
|
self._initialize_state(filtered_inputs)
|
||||||
|
|
||||||
|
if get_current_parent_id() is None:
|
||||||
|
reset_emission_counter()
|
||||||
|
reset_last_event_id()
|
||||||
|
|
||||||
# Emit FlowStartedEvent and log the start of the flow.
|
# Emit FlowStartedEvent and log the start of the flow.
|
||||||
if not self.suppress_flow_events:
|
if not self.suppress_flow_events:
|
||||||
future = crewai_event_bus.emit(
|
future = crewai_event_bus.emit(
|
||||||
@@ -1736,12 +1748,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
method = self._methods[start_method_name]
|
method = self._methods[start_method_name]
|
||||||
enhanced_method = self._inject_trigger_payload_for_start_method(method)
|
enhanced_method = self._inject_trigger_payload_for_start_method(method)
|
||||||
|
|
||||||
result = await self._execute_method(start_method_name, enhanced_method)
|
result, finished_event_id = await self._execute_method(
|
||||||
|
start_method_name, enhanced_method
|
||||||
|
)
|
||||||
|
|
||||||
# If start method is a router, use its result as an additional trigger
|
# If start method is a router, use its result as an additional trigger
|
||||||
if start_method_name in self._routers and result is not None:
|
if start_method_name in self._routers and result is not None:
|
||||||
# Execute listeners for the start method name first
|
# Execute listeners for the start method name first
|
||||||
await self._execute_listeners(start_method_name, result)
|
await self._execute_listeners(start_method_name, result, finished_event_id)
|
||||||
# Then execute listeners for the router result (e.g., "approved")
|
# Then execute listeners for the router result (e.g., "approved")
|
||||||
router_result_trigger = FlowMethodName(str(result))
|
router_result_trigger = FlowMethodName(str(result))
|
||||||
listeners_for_result = self._find_triggered_methods(
|
listeners_for_result = self._find_triggered_methods(
|
||||||
@@ -1765,16 +1779,21 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
if name not in racing_members
|
if name not in racing_members
|
||||||
]
|
]
|
||||||
await self._execute_racing_listeners(
|
await self._execute_racing_listeners(
|
||||||
racing_members, other_listeners, listener_result
|
racing_members,
|
||||||
|
other_listeners,
|
||||||
|
listener_result,
|
||||||
|
finished_event_id,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
tasks = [
|
tasks = [
|
||||||
self._execute_single_listener(listener_name, listener_result)
|
self._execute_single_listener(
|
||||||
|
listener_name, listener_result, finished_event_id
|
||||||
|
)
|
||||||
for listener_name in listeners_for_result
|
for listener_name in listeners_for_result
|
||||||
]
|
]
|
||||||
await asyncio.gather(*tasks)
|
await asyncio.gather(*tasks)
|
||||||
else:
|
else:
|
||||||
await self._execute_listeners(start_method_name, result)
|
await self._execute_listeners(start_method_name, result, finished_event_id)
|
||||||
|
|
||||||
def _inject_trigger_payload_for_start_method(
|
def _inject_trigger_payload_for_start_method(
|
||||||
self, original_method: Callable[..., Any]
|
self, original_method: Callable[..., Any]
|
||||||
@@ -1818,7 +1837,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
method: Callable[..., Any],
|
method: Callable[..., Any],
|
||||||
*args: Any,
|
*args: Any,
|
||||||
**kwargs: Any,
|
**kwargs: Any,
|
||||||
) -> Any:
|
) -> tuple[Any, str | None]:
|
||||||
|
"""Execute a method and emit events.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A tuple of (result, finished_event_id) where finished_event_id is
|
||||||
|
the event_id of the MethodExecutionFinishedEvent, or None if events
|
||||||
|
are suppressed.
|
||||||
|
"""
|
||||||
try:
|
try:
|
||||||
dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | (
|
dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | (
|
||||||
kwargs or {}
|
kwargs or {}
|
||||||
@@ -1859,21 +1885,21 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
|
|
||||||
self._completed_methods.add(method_name)
|
self._completed_methods.add(method_name)
|
||||||
|
|
||||||
|
finished_event_id: str | None = None
|
||||||
if not self.suppress_flow_events:
|
if not self.suppress_flow_events:
|
||||||
future = crewai_event_bus.emit(
|
finished_event = MethodExecutionFinishedEvent(
|
||||||
self,
|
type="method_execution_finished",
|
||||||
MethodExecutionFinishedEvent(
|
method_name=method_name,
|
||||||
type="method_execution_finished",
|
flow_name=self.name or self.__class__.__name__,
|
||||||
method_name=method_name,
|
state=self._copy_and_serialize_state(),
|
||||||
flow_name=self.name or self.__class__.__name__,
|
result=result,
|
||||||
state=self._copy_and_serialize_state(),
|
|
||||||
result=result,
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
|
finished_event_id = finished_event.event_id
|
||||||
|
future = crewai_event_bus.emit(self, finished_event)
|
||||||
if future:
|
if future:
|
||||||
self._event_futures.append(future)
|
self._event_futures.append(future)
|
||||||
|
|
||||||
return result
|
return result, finished_event_id
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Check if this is a HumanFeedbackPending exception (paused, not failed)
|
# Check if this is a HumanFeedbackPending exception (paused, not failed)
|
||||||
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
||||||
@@ -1927,7 +1953,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
return state_copy
|
return state_copy
|
||||||
|
|
||||||
async def _execute_listeners(
|
async def _execute_listeners(
|
||||||
self, trigger_method: FlowMethodName, result: Any
|
self,
|
||||||
|
trigger_method: FlowMethodName,
|
||||||
|
result: Any,
|
||||||
|
triggering_event_id: str | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Executes all listeners and routers triggered by a method completion.
|
"""Executes all listeners and routers triggered by a method completion.
|
||||||
|
|
||||||
@@ -1938,6 +1967,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
Args:
|
Args:
|
||||||
trigger_method: The name of the method that triggered these listeners.
|
trigger_method: The name of the method that triggered these listeners.
|
||||||
result: The result from the triggering method, passed to listeners that accept parameters.
|
result: The result from the triggering method, passed to listeners that accept parameters.
|
||||||
|
triggering_event_id: The event_id of the MethodExecutionFinishedEvent that
|
||||||
|
triggered these listeners, used for causal chain tracking.
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
- Routers are executed sequentially to maintain flow control
|
- Routers are executed sequentially to maintain flow control
|
||||||
@@ -1952,6 +1983,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
] = {} # Map outcome -> HumanFeedbackResult
|
] = {} # Map outcome -> HumanFeedbackResult
|
||||||
current_trigger = trigger_method
|
current_trigger = trigger_method
|
||||||
current_result = result # Track the result to pass to each router
|
current_result = result # Track the result to pass to each router
|
||||||
|
current_triggering_event_id = triggering_event_id
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
routers_triggered = self._find_triggered_methods(
|
routers_triggered = self._find_triggered_methods(
|
||||||
@@ -1965,7 +1997,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
router_input = router_result_to_feedback.get(
|
router_input = router_result_to_feedback.get(
|
||||||
str(current_trigger), current_result
|
str(current_trigger), current_result
|
||||||
)
|
)
|
||||||
await self._execute_single_listener(router_name, router_input)
|
current_triggering_event_id = await self._execute_single_listener(
|
||||||
|
router_name, router_input, current_triggering_event_id
|
||||||
|
)
|
||||||
# After executing router, the router's result is the path
|
# After executing router, the router's result is the path
|
||||||
router_result = (
|
router_result = (
|
||||||
self._method_outputs[-1] if self._method_outputs else None
|
self._method_outputs[-1] if self._method_outputs else None
|
||||||
@@ -2008,12 +2042,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
if name not in racing_members
|
if name not in racing_members
|
||||||
]
|
]
|
||||||
await self._execute_racing_listeners(
|
await self._execute_racing_listeners(
|
||||||
racing_members, other_listeners, listener_result
|
racing_members,
|
||||||
|
other_listeners,
|
||||||
|
listener_result,
|
||||||
|
triggering_event_id,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
tasks = [
|
tasks = [
|
||||||
self._execute_single_listener(
|
self._execute_single_listener(
|
||||||
listener_name, listener_result
|
listener_name, listener_result, triggering_event_id
|
||||||
)
|
)
|
||||||
for listener_name in listeners_triggered
|
for listener_name in listeners_triggered
|
||||||
]
|
]
|
||||||
@@ -2192,8 +2229,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
return triggered
|
return triggered
|
||||||
|
|
||||||
async def _execute_single_listener(
|
async def _execute_single_listener(
|
||||||
self, listener_name: FlowMethodName, result: Any
|
self,
|
||||||
) -> None:
|
listener_name: FlowMethodName,
|
||||||
|
result: Any,
|
||||||
|
triggering_event_id: str | None = None,
|
||||||
|
) -> str | None:
|
||||||
"""Executes a single listener method with proper event handling.
|
"""Executes a single listener method with proper event handling.
|
||||||
|
|
||||||
This internal method manages the execution of an individual listener,
|
This internal method manages the execution of an individual listener,
|
||||||
@@ -2202,6 +2242,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
Args:
|
Args:
|
||||||
listener_name: The name of the listener method to execute.
|
listener_name: The name of the listener method to execute.
|
||||||
result: The result from the triggering method, which may be passed to the listener if it accepts parameters.
|
result: The result from the triggering method, which may be passed to the listener if it accepts parameters.
|
||||||
|
triggering_event_id: The event_id of the event that triggered this listener,
|
||||||
|
used for causal chain tracking.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The event_id of the MethodExecutionFinishedEvent emitted by this listener,
|
||||||
|
or None if events are suppressed.
|
||||||
|
|
||||||
Note:
|
Note:
|
||||||
- Inspects method signature to determine if it accepts the trigger result
|
- Inspects method signature to determine if it accepts the trigger result
|
||||||
@@ -2227,7 +2273,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
):
|
):
|
||||||
# This conditional start was executed, continue its chain
|
# This conditional start was executed, continue its chain
|
||||||
await self._execute_start_method(start_method_name)
|
await self._execute_start_method(start_method_name)
|
||||||
return
|
return None
|
||||||
# For cyclic flows, clear from completed to allow re-execution
|
# For cyclic flows, clear from completed to allow re-execution
|
||||||
self._completed_methods.discard(listener_name)
|
self._completed_methods.discard(listener_name)
|
||||||
# Also clear from fired OR listeners for cyclic flows
|
# Also clear from fired OR listeners for cyclic flows
|
||||||
@@ -2240,15 +2286,30 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
params = list(sig.parameters.values())
|
params = list(sig.parameters.values())
|
||||||
method_params = [p for p in params if p.name != "self"]
|
method_params = [p for p in params if p.name != "self"]
|
||||||
|
|
||||||
if method_params:
|
if triggering_event_id:
|
||||||
listener_result = await self._execute_method(
|
with triggered_by_scope(triggering_event_id):
|
||||||
listener_name, method, result
|
if method_params:
|
||||||
)
|
listener_result, finished_event_id = await self._execute_method(
|
||||||
|
listener_name, method, result
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
listener_result, finished_event_id = await self._execute_method(
|
||||||
|
listener_name, method
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
listener_result = await self._execute_method(listener_name, method)
|
if method_params:
|
||||||
|
listener_result, finished_event_id = await self._execute_method(
|
||||||
|
listener_name, method, result
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
listener_result, finished_event_id = await self._execute_method(
|
||||||
|
listener_name, method
|
||||||
|
)
|
||||||
|
|
||||||
# Execute listeners (and possibly routers) of this listener
|
# Execute listeners (and possibly routers) of this listener
|
||||||
await self._execute_listeners(listener_name, listener_result)
|
await self._execute_listeners(
|
||||||
|
listener_name, listener_result, finished_event_id
|
||||||
|
)
|
||||||
|
|
||||||
# If this listener is also a router (e.g., has @human_feedback with emit),
|
# If this listener is also a router (e.g., has @human_feedback with emit),
|
||||||
# we need to trigger listeners for the router result as well
|
# we need to trigger listeners for the router result as well
|
||||||
@@ -2275,15 +2336,22 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
if name not in racing_members
|
if name not in racing_members
|
||||||
]
|
]
|
||||||
await self._execute_racing_listeners(
|
await self._execute_racing_listeners(
|
||||||
racing_members, other_listeners, feedback_result
|
racing_members,
|
||||||
|
other_listeners,
|
||||||
|
feedback_result,
|
||||||
|
finished_event_id,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
tasks = [
|
tasks = [
|
||||||
self._execute_single_listener(name, feedback_result)
|
self._execute_single_listener(
|
||||||
|
name, feedback_result, finished_event_id
|
||||||
|
)
|
||||||
for name in listeners_for_result
|
for name in listeners_for_result
|
||||||
]
|
]
|
||||||
await asyncio.gather(*tasks)
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
return finished_event_id
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
# Don't log HumanFeedbackPending as an error - it's expected control flow
|
# Don't log HumanFeedbackPending as an error - it's expected control flow
|
||||||
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
||||||
|
|||||||
@@ -375,6 +375,7 @@ class BaseLLM(ABC):
|
|||||||
error=error,
|
error=error,
|
||||||
from_task=from_task,
|
from_task=from_task,
|
||||||
from_agent=from_agent,
|
from_agent=from_agent,
|
||||||
|
model=self.model,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|||||||
@@ -241,6 +241,9 @@ class ToolUsage:
|
|||||||
if self.task:
|
if self.task:
|
||||||
self.task.increment_tools_errors()
|
self.task.increment_tools_errors()
|
||||||
|
|
||||||
|
started_at = time.time()
|
||||||
|
started_event_emitted = False
|
||||||
|
|
||||||
if self.agent:
|
if self.agent:
|
||||||
event_data = {
|
event_data = {
|
||||||
"agent_key": self.agent.key,
|
"agent_key": self.agent.key,
|
||||||
@@ -258,151 +261,162 @@ class ToolUsage:
|
|||||||
event_data["task_name"] = self.task.name or self.task.description
|
event_data["task_name"] = self.task.name or self.task.description
|
||||||
event_data["task_id"] = str(self.task.id)
|
event_data["task_id"] = str(self.task.id)
|
||||||
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
||||||
|
started_event_emitted = True
|
||||||
|
|
||||||
started_at = time.time()
|
|
||||||
from_cache = False
|
from_cache = False
|
||||||
result = None # type: ignore
|
result = None # type: ignore
|
||||||
|
should_retry = False
|
||||||
|
available_tool = None
|
||||||
|
|
||||||
if self.tools_handler and self.tools_handler.cache:
|
try:
|
||||||
input_str = ""
|
if self.tools_handler and self.tools_handler.cache:
|
||||||
if calling.arguments:
|
input_str = ""
|
||||||
if isinstance(calling.arguments, dict):
|
if calling.arguments:
|
||||||
input_str = json.dumps(calling.arguments)
|
if isinstance(calling.arguments, dict):
|
||||||
else:
|
input_str = json.dumps(calling.arguments)
|
||||||
input_str = str(calling.arguments)
|
else:
|
||||||
|
input_str = str(calling.arguments)
|
||||||
|
|
||||||
result = self.tools_handler.cache.read(
|
result = self.tools_handler.cache.read(
|
||||||
tool=calling.tool_name, input=input_str
|
tool=calling.tool_name, input=input_str
|
||||||
) # type: ignore
|
) # type: ignore
|
||||||
from_cache = result is not None
|
from_cache = result is not None
|
||||||
|
|
||||||
available_tool = next(
|
available_tool = next(
|
||||||
(
|
(
|
||||||
available_tool
|
available_tool
|
||||||
for available_tool in self.tools
|
for available_tool in self.tools
|
||||||
if available_tool.name == tool.name
|
if available_tool.name == tool.name
|
||||||
),
|
),
|
||||||
None,
|
None,
|
||||||
)
|
)
|
||||||
|
|
||||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||||
if usage_limit_error:
|
if usage_limit_error:
|
||||||
try:
|
|
||||||
result = usage_limit_error
|
result = usage_limit_error
|
||||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||||
return self._format_result(result=result)
|
result = self._format_result(result=result)
|
||||||
except Exception:
|
# Don't return early - fall through to finally block
|
||||||
if self.task:
|
elif result is None:
|
||||||
self.task.increment_tools_errors()
|
try:
|
||||||
|
if calling.tool_name in [
|
||||||
if result is None:
|
"Delegate work to coworker",
|
||||||
try:
|
"Ask question to coworker",
|
||||||
if calling.tool_name in [
|
]:
|
||||||
"Delegate work to coworker",
|
coworker = (
|
||||||
"Ask question to coworker",
|
calling.arguments.get("coworker")
|
||||||
]:
|
if calling.arguments
|
||||||
coworker = (
|
else None
|
||||||
calling.arguments.get("coworker") if calling.arguments else None
|
|
||||||
)
|
|
||||||
if self.task:
|
|
||||||
self.task.increment_delegations(coworker)
|
|
||||||
|
|
||||||
if calling.arguments:
|
|
||||||
try:
|
|
||||||
acceptable_args = tool.args_schema.model_json_schema()[
|
|
||||||
"properties"
|
|
||||||
].keys()
|
|
||||||
arguments = {
|
|
||||||
k: v
|
|
||||||
for k, v in calling.arguments.items()
|
|
||||||
if k in acceptable_args
|
|
||||||
}
|
|
||||||
arguments = self._add_fingerprint_metadata(arguments)
|
|
||||||
result = await tool.ainvoke(input=arguments)
|
|
||||||
except Exception:
|
|
||||||
arguments = calling.arguments
|
|
||||||
arguments = self._add_fingerprint_metadata(arguments)
|
|
||||||
result = await tool.ainvoke(input=arguments)
|
|
||||||
else:
|
|
||||||
arguments = self._add_fingerprint_metadata({})
|
|
||||||
result = await tool.ainvoke(input=arguments)
|
|
||||||
except Exception as e:
|
|
||||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
|
||||||
self._run_attempts += 1
|
|
||||||
if self._run_attempts > self._max_parsing_attempts:
|
|
||||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
|
||||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
|
||||||
error=e, tool=tool.name, tool_inputs=tool.description
|
|
||||||
)
|
|
||||||
error = ToolUsageError(
|
|
||||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
|
||||||
).message
|
|
||||||
if self.task:
|
|
||||||
self.task.increment_tools_errors()
|
|
||||||
if self.agent and self.agent.verbose:
|
|
||||||
self._printer.print(
|
|
||||||
content=f"\n\n{error_message}\n", color="red"
|
|
||||||
)
|
)
|
||||||
return error
|
if self.task:
|
||||||
|
self.task.increment_delegations(coworker)
|
||||||
|
|
||||||
if self.task:
|
if calling.arguments:
|
||||||
self.task.increment_tools_errors()
|
try:
|
||||||
return await self.ause(calling=calling, tool_string=tool_string)
|
acceptable_args = tool.args_schema.model_json_schema()[
|
||||||
|
"properties"
|
||||||
|
].keys()
|
||||||
|
arguments = {
|
||||||
|
k: v
|
||||||
|
for k, v in calling.arguments.items()
|
||||||
|
if k in acceptable_args
|
||||||
|
}
|
||||||
|
arguments = self._add_fingerprint_metadata(arguments)
|
||||||
|
result = await tool.ainvoke(input=arguments)
|
||||||
|
except Exception:
|
||||||
|
arguments = calling.arguments
|
||||||
|
arguments = self._add_fingerprint_metadata(arguments)
|
||||||
|
result = await tool.ainvoke(input=arguments)
|
||||||
|
else:
|
||||||
|
arguments = self._add_fingerprint_metadata({})
|
||||||
|
result = await tool.ainvoke(input=arguments)
|
||||||
|
|
||||||
if self.tools_handler:
|
if self.tools_handler:
|
||||||
should_cache = True
|
should_cache = True
|
||||||
if (
|
if (
|
||||||
hasattr(available_tool, "cache_function")
|
hasattr(available_tool, "cache_function")
|
||||||
and available_tool.cache_function
|
and available_tool.cache_function
|
||||||
):
|
):
|
||||||
should_cache = available_tool.cache_function(
|
should_cache = available_tool.cache_function(
|
||||||
calling.arguments, result
|
calling.arguments, result
|
||||||
|
)
|
||||||
|
|
||||||
|
self.tools_handler.on_tool_use(
|
||||||
|
calling=calling, output=result, should_cache=should_cache
|
||||||
|
)
|
||||||
|
|
||||||
|
self._telemetry.tool_usage(
|
||||||
|
llm=self.function_calling_llm,
|
||||||
|
tool_name=tool.name,
|
||||||
|
attempts=self._run_attempts,
|
||||||
)
|
)
|
||||||
|
result = self._format_result(result=result)
|
||||||
|
data = {
|
||||||
|
"result": result,
|
||||||
|
"tool_name": tool.name,
|
||||||
|
"tool_args": calling.arguments,
|
||||||
|
}
|
||||||
|
|
||||||
self.tools_handler.on_tool_use(
|
if (
|
||||||
calling=calling, output=result, should_cache=should_cache
|
hasattr(available_tool, "result_as_answer")
|
||||||
|
and available_tool.result_as_answer
|
||||||
|
):
|
||||||
|
result_as_answer = available_tool.result_as_answer
|
||||||
|
data["result_as_answer"] = result_as_answer
|
||||||
|
|
||||||
|
if self.agent and hasattr(self.agent, "tools_results"):
|
||||||
|
self.agent.tools_results.append(data)
|
||||||
|
|
||||||
|
if available_tool and hasattr(
|
||||||
|
available_tool, "current_usage_count"
|
||||||
|
):
|
||||||
|
available_tool.current_usage_count += 1
|
||||||
|
if (
|
||||||
|
hasattr(available_tool, "max_usage_count")
|
||||||
|
and available_tool.max_usage_count is not None
|
||||||
|
):
|
||||||
|
self._printer.print(
|
||||||
|
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||||
|
color="blue",
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||||
|
self._run_attempts += 1
|
||||||
|
if self._run_attempts > self._max_parsing_attempts:
|
||||||
|
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||||
|
error_message = self._i18n.errors(
|
||||||
|
"tool_usage_exception"
|
||||||
|
).format(error=e, tool=tool.name, tool_inputs=tool.description)
|
||||||
|
result = ToolUsageError(
|
||||||
|
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||||
|
).message
|
||||||
|
if self.task:
|
||||||
|
self.task.increment_tools_errors()
|
||||||
|
if self.agent and self.agent.verbose:
|
||||||
|
self._printer.print(
|
||||||
|
content=f"\n\n{error_message}\n", color="red"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if self.task:
|
||||||
|
self.task.increment_tools_errors()
|
||||||
|
should_retry = True
|
||||||
|
else:
|
||||||
|
result = self._format_result(result=result)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
if started_event_emitted:
|
||||||
|
self.on_tool_use_finished(
|
||||||
|
tool=tool,
|
||||||
|
tool_calling=calling,
|
||||||
|
from_cache=from_cache,
|
||||||
|
started_at=started_at,
|
||||||
|
result=result,
|
||||||
)
|
)
|
||||||
|
|
||||||
self._telemetry.tool_usage(
|
# Handle retry after finally block ensures finished event was emitted
|
||||||
llm=self.function_calling_llm,
|
if should_retry:
|
||||||
tool_name=tool.name,
|
return await self.ause(calling=calling, tool_string=tool_string)
|
||||||
attempts=self._run_attempts,
|
|
||||||
)
|
|
||||||
result = self._format_result(result=result)
|
|
||||||
data = {
|
|
||||||
"result": result,
|
|
||||||
"tool_name": tool.name,
|
|
||||||
"tool_args": calling.arguments,
|
|
||||||
}
|
|
||||||
|
|
||||||
self.on_tool_use_finished(
|
|
||||||
tool=tool,
|
|
||||||
tool_calling=calling,
|
|
||||||
from_cache=from_cache,
|
|
||||||
started_at=started_at,
|
|
||||||
result=result,
|
|
||||||
)
|
|
||||||
|
|
||||||
if (
|
|
||||||
hasattr(available_tool, "result_as_answer")
|
|
||||||
and available_tool.result_as_answer # type: ignore
|
|
||||||
):
|
|
||||||
result_as_answer = available_tool.result_as_answer # type: ignore
|
|
||||||
data["result_as_answer"] = result_as_answer # type: ignore
|
|
||||||
|
|
||||||
if self.agent and hasattr(self.agent, "tools_results"):
|
|
||||||
self.agent.tools_results.append(data)
|
|
||||||
|
|
||||||
if available_tool and hasattr(available_tool, "current_usage_count"):
|
|
||||||
available_tool.current_usage_count += 1
|
|
||||||
if (
|
|
||||||
hasattr(available_tool, "max_usage_count")
|
|
||||||
and available_tool.max_usage_count is not None
|
|
||||||
):
|
|
||||||
self._printer.print(
|
|
||||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
|
||||||
color="blue",
|
|
||||||
)
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
@@ -412,6 +426,7 @@ class ToolUsage:
|
|||||||
tool: CrewStructuredTool,
|
tool: CrewStructuredTool,
|
||||||
calling: ToolCalling | InstructorToolCalling,
|
calling: ToolCalling | InstructorToolCalling,
|
||||||
) -> str:
|
) -> str:
|
||||||
|
# Repeated usage check happens before event emission - safe to return early
|
||||||
if self._check_tool_repeated_usage(calling=calling):
|
if self._check_tool_repeated_usage(calling=calling):
|
||||||
try:
|
try:
|
||||||
result = self._i18n.errors("task_repeated_usage").format(
|
result = self._i18n.errors("task_repeated_usage").format(
|
||||||
@@ -428,6 +443,9 @@ class ToolUsage:
|
|||||||
if self.task:
|
if self.task:
|
||||||
self.task.increment_tools_errors()
|
self.task.increment_tools_errors()
|
||||||
|
|
||||||
|
started_at = time.time()
|
||||||
|
started_event_emitted = False
|
||||||
|
|
||||||
if self.agent:
|
if self.agent:
|
||||||
event_data = {
|
event_data = {
|
||||||
"agent_key": self.agent.key,
|
"agent_key": self.agent.key,
|
||||||
@@ -446,155 +464,162 @@ class ToolUsage:
|
|||||||
event_data["task_name"] = self.task.name or self.task.description
|
event_data["task_name"] = self.task.name or self.task.description
|
||||||
event_data["task_id"] = str(self.task.id)
|
event_data["task_id"] = str(self.task.id)
|
||||||
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
crewai_event_bus.emit(self, ToolUsageStartedEvent(**event_data))
|
||||||
|
started_event_emitted = True
|
||||||
|
|
||||||
started_at = time.time()
|
|
||||||
from_cache = False
|
from_cache = False
|
||||||
result = None # type: ignore
|
result = None # type: ignore
|
||||||
|
should_retry = False
|
||||||
|
available_tool = None
|
||||||
|
|
||||||
if self.tools_handler and self.tools_handler.cache:
|
try:
|
||||||
input_str = ""
|
if self.tools_handler and self.tools_handler.cache:
|
||||||
if calling.arguments:
|
input_str = ""
|
||||||
if isinstance(calling.arguments, dict):
|
if calling.arguments:
|
||||||
import json
|
if isinstance(calling.arguments, dict):
|
||||||
|
input_str = json.dumps(calling.arguments)
|
||||||
|
else:
|
||||||
|
input_str = str(calling.arguments)
|
||||||
|
|
||||||
input_str = json.dumps(calling.arguments)
|
result = self.tools_handler.cache.read(
|
||||||
else:
|
tool=calling.tool_name, input=input_str
|
||||||
input_str = str(calling.arguments)
|
) # type: ignore
|
||||||
|
from_cache = result is not None
|
||||||
|
|
||||||
result = self.tools_handler.cache.read(
|
available_tool = next(
|
||||||
tool=calling.tool_name, input=input_str
|
(
|
||||||
) # type: ignore
|
available_tool
|
||||||
from_cache = result is not None
|
for available_tool in self.tools
|
||||||
|
if available_tool.name == tool.name
|
||||||
|
),
|
||||||
|
None,
|
||||||
|
)
|
||||||
|
|
||||||
available_tool = next(
|
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
||||||
(
|
if usage_limit_error:
|
||||||
available_tool
|
|
||||||
for available_tool in self.tools
|
|
||||||
if available_tool.name == tool.name
|
|
||||||
),
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
|
|
||||||
usage_limit_error = self._check_usage_limit(available_tool, tool.name)
|
|
||||||
if usage_limit_error:
|
|
||||||
try:
|
|
||||||
result = usage_limit_error
|
result = usage_limit_error
|
||||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||||
return self._format_result(result=result)
|
result = self._format_result(result=result)
|
||||||
except Exception:
|
# Don't return early - fall through to finally block
|
||||||
if self.task:
|
elif result is None:
|
||||||
self.task.increment_tools_errors()
|
try:
|
||||||
|
if calling.tool_name in [
|
||||||
if result is None:
|
"Delegate work to coworker",
|
||||||
try:
|
"Ask question to coworker",
|
||||||
if calling.tool_name in [
|
]:
|
||||||
"Delegate work to coworker",
|
coworker = (
|
||||||
"Ask question to coworker",
|
calling.arguments.get("coworker")
|
||||||
]:
|
if calling.arguments
|
||||||
coworker = (
|
else None
|
||||||
calling.arguments.get("coworker") if calling.arguments else None
|
|
||||||
)
|
|
||||||
if self.task:
|
|
||||||
self.task.increment_delegations(coworker)
|
|
||||||
|
|
||||||
if calling.arguments:
|
|
||||||
try:
|
|
||||||
acceptable_args = tool.args_schema.model_json_schema()[
|
|
||||||
"properties"
|
|
||||||
].keys()
|
|
||||||
arguments = {
|
|
||||||
k: v
|
|
||||||
for k, v in calling.arguments.items()
|
|
||||||
if k in acceptable_args
|
|
||||||
}
|
|
||||||
# Add fingerprint metadata if available
|
|
||||||
arguments = self._add_fingerprint_metadata(arguments)
|
|
||||||
result = tool.invoke(input=arguments)
|
|
||||||
except Exception:
|
|
||||||
arguments = calling.arguments
|
|
||||||
# Add fingerprint metadata if available
|
|
||||||
arguments = self._add_fingerprint_metadata(arguments)
|
|
||||||
result = tool.invoke(input=arguments)
|
|
||||||
else:
|
|
||||||
# Add fingerprint metadata even to empty arguments
|
|
||||||
arguments = self._add_fingerprint_metadata({})
|
|
||||||
result = tool.invoke(input=arguments)
|
|
||||||
except Exception as e:
|
|
||||||
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
|
||||||
self._run_attempts += 1
|
|
||||||
if self._run_attempts > self._max_parsing_attempts:
|
|
||||||
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
|
||||||
error_message = self._i18n.errors("tool_usage_exception").format(
|
|
||||||
error=e, tool=tool.name, tool_inputs=tool.description
|
|
||||||
)
|
|
||||||
error = ToolUsageError(
|
|
||||||
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
|
||||||
).message
|
|
||||||
if self.task:
|
|
||||||
self.task.increment_tools_errors()
|
|
||||||
if self.agent and self.agent.verbose:
|
|
||||||
self._printer.print(
|
|
||||||
content=f"\n\n{error_message}\n", color="red"
|
|
||||||
)
|
)
|
||||||
return error
|
if self.task:
|
||||||
|
self.task.increment_delegations(coworker)
|
||||||
|
|
||||||
if self.task:
|
if calling.arguments:
|
||||||
self.task.increment_tools_errors()
|
try:
|
||||||
return self.use(calling=calling, tool_string=tool_string)
|
acceptable_args = tool.args_schema.model_json_schema()[
|
||||||
|
"properties"
|
||||||
|
].keys()
|
||||||
|
arguments = {
|
||||||
|
k: v
|
||||||
|
for k, v in calling.arguments.items()
|
||||||
|
if k in acceptable_args
|
||||||
|
}
|
||||||
|
arguments = self._add_fingerprint_metadata(arguments)
|
||||||
|
result = tool.invoke(input=arguments)
|
||||||
|
except Exception:
|
||||||
|
arguments = calling.arguments
|
||||||
|
arguments = self._add_fingerprint_metadata(arguments)
|
||||||
|
result = tool.invoke(input=arguments)
|
||||||
|
else:
|
||||||
|
arguments = self._add_fingerprint_metadata({})
|
||||||
|
result = tool.invoke(input=arguments)
|
||||||
|
|
||||||
if self.tools_handler:
|
if self.tools_handler:
|
||||||
should_cache = True
|
should_cache = True
|
||||||
if (
|
if (
|
||||||
hasattr(available_tool, "cache_function")
|
hasattr(available_tool, "cache_function")
|
||||||
and available_tool.cache_function
|
and available_tool.cache_function
|
||||||
):
|
):
|
||||||
should_cache = available_tool.cache_function(
|
should_cache = available_tool.cache_function(
|
||||||
calling.arguments, result
|
calling.arguments, result
|
||||||
|
)
|
||||||
|
|
||||||
|
self.tools_handler.on_tool_use(
|
||||||
|
calling=calling, output=result, should_cache=should_cache
|
||||||
|
)
|
||||||
|
|
||||||
|
self._telemetry.tool_usage(
|
||||||
|
llm=self.function_calling_llm,
|
||||||
|
tool_name=tool.name,
|
||||||
|
attempts=self._run_attempts,
|
||||||
)
|
)
|
||||||
|
result = self._format_result(result=result)
|
||||||
|
data = {
|
||||||
|
"result": result,
|
||||||
|
"tool_name": tool.name,
|
||||||
|
"tool_args": calling.arguments,
|
||||||
|
}
|
||||||
|
|
||||||
self.tools_handler.on_tool_use(
|
if (
|
||||||
calling=calling, output=result, should_cache=should_cache
|
hasattr(available_tool, "result_as_answer")
|
||||||
|
and available_tool.result_as_answer
|
||||||
|
):
|
||||||
|
result_as_answer = available_tool.result_as_answer
|
||||||
|
data["result_as_answer"] = result_as_answer
|
||||||
|
|
||||||
|
if self.agent and hasattr(self.agent, "tools_results"):
|
||||||
|
self.agent.tools_results.append(data)
|
||||||
|
|
||||||
|
if available_tool and hasattr(
|
||||||
|
available_tool, "current_usage_count"
|
||||||
|
):
|
||||||
|
available_tool.current_usage_count += 1
|
||||||
|
if (
|
||||||
|
hasattr(available_tool, "max_usage_count")
|
||||||
|
and available_tool.max_usage_count is not None
|
||||||
|
):
|
||||||
|
self._printer.print(
|
||||||
|
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
||||||
|
color="blue",
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.on_tool_error(tool=tool, tool_calling=calling, e=e)
|
||||||
|
self._run_attempts += 1
|
||||||
|
if self._run_attempts > self._max_parsing_attempts:
|
||||||
|
self._telemetry.tool_usage_error(llm=self.function_calling_llm)
|
||||||
|
error_message = self._i18n.errors(
|
||||||
|
"tool_usage_exception"
|
||||||
|
).format(error=e, tool=tool.name, tool_inputs=tool.description)
|
||||||
|
result = ToolUsageError(
|
||||||
|
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
|
||||||
|
).message
|
||||||
|
if self.task:
|
||||||
|
self.task.increment_tools_errors()
|
||||||
|
if self.agent and self.agent.verbose:
|
||||||
|
self._printer.print(
|
||||||
|
content=f"\n\n{error_message}\n", color="red"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
if self.task:
|
||||||
|
self.task.increment_tools_errors()
|
||||||
|
should_retry = True
|
||||||
|
else:
|
||||||
|
result = self._format_result(result=result)
|
||||||
|
|
||||||
|
finally:
|
||||||
|
if started_event_emitted:
|
||||||
|
self.on_tool_use_finished(
|
||||||
|
tool=tool,
|
||||||
|
tool_calling=calling,
|
||||||
|
from_cache=from_cache,
|
||||||
|
started_at=started_at,
|
||||||
|
result=result,
|
||||||
)
|
)
|
||||||
self._telemetry.tool_usage(
|
|
||||||
llm=self.function_calling_llm,
|
|
||||||
tool_name=tool.name,
|
|
||||||
attempts=self._run_attempts,
|
|
||||||
)
|
|
||||||
result = self._format_result(result=result)
|
|
||||||
data = {
|
|
||||||
"result": result,
|
|
||||||
"tool_name": tool.name,
|
|
||||||
"tool_args": calling.arguments,
|
|
||||||
}
|
|
||||||
|
|
||||||
self.on_tool_use_finished(
|
# Handle retry after finally block ensures finished event was emitted
|
||||||
tool=tool,
|
if should_retry:
|
||||||
tool_calling=calling,
|
return self.use(calling=calling, tool_string=tool_string)
|
||||||
from_cache=from_cache,
|
|
||||||
started_at=started_at,
|
|
||||||
result=result,
|
|
||||||
)
|
|
||||||
|
|
||||||
if (
|
|
||||||
hasattr(available_tool, "result_as_answer")
|
|
||||||
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
|
|
||||||
):
|
|
||||||
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
|
|
||||||
data["result_as_answer"] = result_as_answer # type: ignore
|
|
||||||
|
|
||||||
if self.agent and hasattr(self.agent, "tools_results"):
|
|
||||||
self.agent.tools_results.append(data)
|
|
||||||
|
|
||||||
if available_tool and hasattr(available_tool, "current_usage_count"):
|
|
||||||
available_tool.current_usage_count += 1
|
|
||||||
if (
|
|
||||||
hasattr(available_tool, "max_usage_count")
|
|
||||||
and available_tool.max_usage_count is not None
|
|
||||||
):
|
|
||||||
self._printer.print(
|
|
||||||
content=f"Tool '{available_tool.name}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
|
|
||||||
color="blue",
|
|
||||||
)
|
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,118 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour
|
||||||
|
personal goal is: Help with tasks\nTo give my best complete final answer to
|
||||||
|
the task respond using the exact following format:\n\nThought: I now can give
|
||||||
|
a great answer\nFinal Answer: Your final answer must be the great and the most
|
||||||
|
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||||
|
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''done''
|
||||||
|
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||||
|
word done.\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '794'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D087GaV1OkB4Yos5MqLYqRSpLLZkV\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768923570,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: done\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||||
|
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||||
|
\ ],\n \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\":
|
||||||
|
14,\n \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 15:39:30 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '446'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '472'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,118 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Helper. You help.\nYour
|
||||||
|
personal goal is: Help with tasks\nTo give my best complete final answer to
|
||||||
|
the task respond using the exact following format:\n\nThought: I now can give
|
||||||
|
a great answer\nFinal Answer: Your final answer must be the great and the most
|
||||||
|
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||||
|
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''hi''
|
||||||
|
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||||
|
word hi.\nyou MUST return the actual complete content as the final answer, not
|
||||||
|
a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '790'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D087HNU70QfEltqUwIaR3WflNQJMq\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768923571,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: hi\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||||
|
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||||
|
\ \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": 14,\n
|
||||||
|
\ \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 15:39:31 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '401'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '429'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,118 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||||
|
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||||
|
answer to the task respond using the exact following format:\n\nThought: I now
|
||||||
|
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||||
|
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||||
|
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||||
|
''yes'' and nothing else.\n\nThis is the expected criteria for your final answer:
|
||||||
|
The word yes.\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '809'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D0876LY6Tp1gWmwQ5f2A6EsqdbLOK\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768923560,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: yes\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||||
|
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||||
|
\ ],\n \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\":
|
||||||
|
14,\n \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 15:39:21 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '519'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '758'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,118 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||||
|
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||||
|
answer to the task respond using the exact following format:\n\nThought: I now
|
||||||
|
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||||
|
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||||
|
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||||
|
''hello'' and nothing else.\n\nThis is the expected criteria for your final
|
||||||
|
answer: The word hello.\nyou MUST return the actual complete content as the
|
||||||
|
final answer, not a summary.\n\nBegin! This is VERY important to you, use the
|
||||||
|
tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '813'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D0874asFandwADBjb4DfArsTUyu8K\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768923558,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: hello\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||||
|
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||||
|
\ ],\n \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\":
|
||||||
|
14,\n \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 15:39:18 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '478'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '497'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,118 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Responder. You give short
|
||||||
|
answers.\nYour personal goal is: Respond briefly\nTo give my best complete final
|
||||||
|
answer to the task respond using the exact following format:\n\nThought: I now
|
||||||
|
can give a great answer\nFinal Answer: Your final answer must be the great and
|
||||||
|
the most complete as possible, it must be outcome described.\n\nI MUST use these
|
||||||
|
formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say
|
||||||
|
''ok'' and nothing else.\n\nThis is the expected criteria for your final answer:
|
||||||
|
The word ok.\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '807'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D0875A6FEJ2ZFKVHohoJdbBgKEMNx\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768923559,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: ok\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||||
|
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||||
|
\ \"usage\": {\n \"prompt_tokens\": 161,\n \"completion_tokens\": 14,\n
|
||||||
|
\ \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 15:39:19 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '406'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '439'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,118 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Worker. You work.\nYour
|
||||||
|
personal goal is: Do work\nTo give my best complete final answer to the task
|
||||||
|
respond using the exact following format:\n\nThought: I now can give a great
|
||||||
|
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||||
|
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||||
|
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''go'' and nothing
|
||||||
|
else.\n\nThis is the expected criteria for your final answer: The word go.\nyou
|
||||||
|
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||||
|
This is VERY important to you, use the tools available and give your best Final
|
||||||
|
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '782'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- async:asyncio
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D09c91Qh5LJ73NLwFrcRhThK7zNKS\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768929329,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: go\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||||
|
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||||
|
\ \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\": 14,\n
|
||||||
|
\ \"total_tokens\": 172,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 17:15:30 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '521'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '781'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,118 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Worker. You work.\nYour
|
||||||
|
personal goal is: Do work\nTo give my best complete final answer to the task
|
||||||
|
respond using the exact following format:\n\nThought: I now can give a great
|
||||||
|
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||||
|
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||||
|
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''complete''
|
||||||
|
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||||
|
word complete.\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '794'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- async:asyncio
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D09bYGfIe5pA04mBGuMO94KLyKhry\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768929292,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: complete\",\n \"refusal\": null,\n \"annotations\":
|
||||||
|
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
|
||||||
|
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\":
|
||||||
|
14,\n \"total_tokens\": 172,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 17:14:53 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '436'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '660'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,118 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Worker. You work.\nYour
|
||||||
|
personal goal is: Do work\nTo give my best complete final answer to the task
|
||||||
|
respond using the exact following format:\n\nThought: I now can give a great
|
||||||
|
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||||
|
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||||
|
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''ready'' and
|
||||||
|
nothing else.\n\nThis is the expected criteria for your final answer: The word
|
||||||
|
ready.\nyou MUST return the actual complete content as the final answer, not
|
||||||
|
a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '788'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- async:asyncio
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D09cBIfuX53tF9rWbEKXXIr20uzSv\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768929331,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: ready\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||||
|
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||||
|
\ ],\n \"usage\": {\n \"prompt_tokens\": 158,\n \"completion_tokens\":
|
||||||
|
14,\n \"total_tokens\": 172,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 17:15:32 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '517'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '1841'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,234 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are First. You go first.\nYour
|
||||||
|
personal goal is: Be first\nTo give my best complete final answer to the task
|
||||||
|
respond using the exact following format:\n\nThought: I now can give a great
|
||||||
|
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||||
|
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||||
|
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''a'' and nothing
|
||||||
|
else.\n\nThis is the expected criteria for your final answer: The letter a.\nyou
|
||||||
|
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||||
|
This is VERY important to you, use the tools available and give your best Final
|
||||||
|
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '786'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- async:asyncio
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D09rkgXjWfa1XwICCnLAVV3LXFlUZ\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768930296,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: a\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||||
|
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||||
|
\ \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": 14,\n
|
||||||
|
\ \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 17:31:37 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '418'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '443'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Second. You go second.\nYour
|
||||||
|
personal goal is: Be second\nTo give my best complete final answer to the task
|
||||||
|
respond using the exact following format:\n\nThought: I now can give a great
|
||||||
|
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||||
|
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||||
|
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''b'' and nothing
|
||||||
|
else.\n\nThis is the expected criteria for your final answer: The letter b.\nyou
|
||||||
|
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||||
|
This is VERY important to you, use the tools available and give your best Final
|
||||||
|
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '789'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- async:asyncio
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D09rlVUPgS5haPGYgA4RmW9EfPArd\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768930297,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: b\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||||
|
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||||
|
\ \"usage\": {\n \"prompt_tokens\": 159,\n \"completion_tokens\": 14,\n
|
||||||
|
\ \"total_tokens\": 173,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 17:31:38 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '345'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '658'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,234 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Alpha. You are alpha.\nYour
|
||||||
|
personal goal is: Do alpha work\nTo give my best complete final answer to the
|
||||||
|
task respond using the exact following format:\n\nThought: I now can give a
|
||||||
|
great answer\nFinal Answer: Your final answer must be the great and the most
|
||||||
|
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||||
|
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''alpha''
|
||||||
|
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||||
|
word alpha.\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '798'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- async:asyncio
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D09ri7edf1TcYqD0vAkS3IjNkai3V\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768930294,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: alpha\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||||
|
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||||
|
\ ],\n \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\":
|
||||||
|
14,\n \"total_tokens\": 174,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 17:31:34 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '491'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '513'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Beta. You are beta.\nYour
|
||||||
|
personal goal is: Do beta work\nTo give my best complete final answer to the
|
||||||
|
task respond using the exact following format:\n\nThought: I now can give a
|
||||||
|
great answer\nFinal Answer: Your final answer must be the great and the most
|
||||||
|
complete as possible, it must be outcome described.\n\nI MUST use these formats,
|
||||||
|
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''beta''
|
||||||
|
and nothing else.\n\nThis is the expected criteria for your final answer: The
|
||||||
|
word beta.\nyou MUST return the actual complete content as the final answer,
|
||||||
|
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||||
|
and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '793'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- async:asyncio
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D09rj5wiKmsX5P72qH0GEKL5pQEq6\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768930295,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: beta\",\n \"refusal\": null,\n \"annotations\": []\n
|
||||||
|
\ },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n
|
||||||
|
\ ],\n \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\":
|
||||||
|
14,\n \"total_tokens\": 174,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 17:31:35 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '506'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '741'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
@@ -0,0 +1,234 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are First. You go first.\nYour
|
||||||
|
personal goal is: Be first\nTo give my best complete final answer to the task
|
||||||
|
respond using the exact following format:\n\nThought: I now can give a great
|
||||||
|
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||||
|
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||||
|
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''1'' and nothing
|
||||||
|
else.\n\nThis is the expected criteria for your final answer: The number 1.\nyou
|
||||||
|
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||||
|
This is VERY important to you, use the tools available and give your best Final
|
||||||
|
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '786'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- async:asyncio
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D09rmoYXlYGqmDh0Ca3r9xunjmE7k\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768930298,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: 1\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||||
|
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||||
|
\ \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\": 15,\n
|
||||||
|
\ \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 17:31:38 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '387'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '403'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"messages":[{"role":"system","content":"You are Second. You go second.\nYour
|
||||||
|
personal goal is: Be second\nTo give my best complete final answer to the task
|
||||||
|
respond using the exact following format:\n\nThought: I now can give a great
|
||||||
|
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||||
|
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||||
|
depends on it!"},{"role":"user","content":"\nCurrent Task: Say ''2'' and nothing
|
||||||
|
else.\n\nThis is the expected criteria for your final answer: The number 2.\nyou
|
||||||
|
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||||
|
This is VERY important to you, use the tools available and give your best Final
|
||||||
|
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||||
|
headers:
|
||||||
|
User-Agent:
|
||||||
|
- X-USER-AGENT-XXX
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- ACCEPT-ENCODING-XXX
|
||||||
|
authorization:
|
||||||
|
- AUTHORIZATION-XXX
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '789'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
x-stainless-arch:
|
||||||
|
- X-STAINLESS-ARCH-XXX
|
||||||
|
x-stainless-async:
|
||||||
|
- async:asyncio
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- X-STAINLESS-OS-XXX
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.83.0
|
||||||
|
x-stainless-read-timeout:
|
||||||
|
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.10
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"id\": \"chatcmpl-D09rnDNZsxICQvSZrx5rlgMdc2Tbp\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1768930299,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Thought: I now can give a great answer\\nFinal
|
||||||
|
Answer: 2\",\n \"refusal\": null,\n \"annotations\": []\n },\n
|
||||||
|
\ \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n
|
||||||
|
\ \"usage\": {\n \"prompt_tokens\": 160,\n \"completion_tokens\": 15,\n
|
||||||
|
\ \"total_tokens\": 175,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||||
|
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- CF-RAY-XXX
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Tue, 20 Jan 2026 17:31:39 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- SET-COOKIE-XXX
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- STS-XXX
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- X-CONTENT-TYPE-XXX
|
||||||
|
access-control-expose-headers:
|
||||||
|
- ACCESS-CONTROL-XXX
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- OPENAI-ORG-XXX
|
||||||
|
openai-processing-ms:
|
||||||
|
- '560'
|
||||||
|
openai-project:
|
||||||
|
- OPENAI-PROJECT-XXX
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
x-envoy-upstream-service-time:
|
||||||
|
- '581'
|
||||||
|
x-openai-proxy-wasm:
|
||||||
|
- v0.1
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||||
|
x-request-id:
|
||||||
|
- X-REQUEST-ID-XXX
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
version: 1
|
||||||
180
lib/crewai/tests/events/test_event_context.py
Normal file
180
lib/crewai/tests/events/test_event_context.py
Normal file
@@ -0,0 +1,180 @@
|
|||||||
|
"""Tests for event context management."""
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from crewai.events.event_context import (
|
||||||
|
SCOPE_ENDING_EVENTS,
|
||||||
|
SCOPE_STARTING_EVENTS,
|
||||||
|
VALID_EVENT_PAIRS,
|
||||||
|
EmptyStackError,
|
||||||
|
EventPairingError,
|
||||||
|
MismatchBehavior,
|
||||||
|
StackDepthExceededError,
|
||||||
|
_event_context_config,
|
||||||
|
EventContextConfig,
|
||||||
|
get_current_parent_id,
|
||||||
|
get_enclosing_parent_id,
|
||||||
|
get_last_event_id,
|
||||||
|
get_triggering_event_id,
|
||||||
|
handle_empty_pop,
|
||||||
|
handle_mismatch,
|
||||||
|
pop_event_scope,
|
||||||
|
push_event_scope,
|
||||||
|
reset_last_event_id,
|
||||||
|
set_last_event_id,
|
||||||
|
set_triggering_event_id,
|
||||||
|
triggered_by_scope,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class TestStackOperations:
|
||||||
|
"""Tests for stack push/pop operations."""
|
||||||
|
|
||||||
|
def test_empty_stack_returns_none(self) -> None:
|
||||||
|
assert get_current_parent_id() is None
|
||||||
|
assert get_enclosing_parent_id() is None
|
||||||
|
|
||||||
|
def test_push_and_get_parent(self) -> None:
|
||||||
|
push_event_scope("event-1", "task_started")
|
||||||
|
assert get_current_parent_id() == "event-1"
|
||||||
|
|
||||||
|
def test_nested_push(self) -> None:
|
||||||
|
push_event_scope("event-1", "crew_kickoff_started")
|
||||||
|
push_event_scope("event-2", "task_started")
|
||||||
|
assert get_current_parent_id() == "event-2"
|
||||||
|
assert get_enclosing_parent_id() == "event-1"
|
||||||
|
|
||||||
|
def test_pop_restores_parent(self) -> None:
|
||||||
|
push_event_scope("event-1", "crew_kickoff_started")
|
||||||
|
push_event_scope("event-2", "task_started")
|
||||||
|
popped = pop_event_scope()
|
||||||
|
assert popped == ("event-2", "task_started")
|
||||||
|
assert get_current_parent_id() == "event-1"
|
||||||
|
|
||||||
|
def test_pop_empty_stack_returns_none(self) -> None:
|
||||||
|
assert pop_event_scope() is None
|
||||||
|
|
||||||
|
|
||||||
|
class TestStackDepthLimit:
|
||||||
|
"""Tests for stack depth limit."""
|
||||||
|
|
||||||
|
def test_depth_limit_exceeded_raises(self) -> None:
|
||||||
|
_event_context_config.set(EventContextConfig(max_stack_depth=3))
|
||||||
|
|
||||||
|
push_event_scope("event-1", "type-1")
|
||||||
|
push_event_scope("event-2", "type-2")
|
||||||
|
push_event_scope("event-3", "type-3")
|
||||||
|
|
||||||
|
with pytest.raises(StackDepthExceededError):
|
||||||
|
push_event_scope("event-4", "type-4")
|
||||||
|
|
||||||
|
|
||||||
|
class TestMismatchHandling:
|
||||||
|
"""Tests for mismatch behavior."""
|
||||||
|
|
||||||
|
def test_handle_mismatch_raises_when_configured(self) -> None:
|
||||||
|
_event_context_config.set(
|
||||||
|
EventContextConfig(mismatch_behavior=MismatchBehavior.RAISE)
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(EventPairingError):
|
||||||
|
handle_mismatch("task_completed", "llm_call_started", "task_started")
|
||||||
|
|
||||||
|
def test_handle_empty_pop_raises_when_configured(self) -> None:
|
||||||
|
_event_context_config.set(
|
||||||
|
EventContextConfig(empty_pop_behavior=MismatchBehavior.RAISE)
|
||||||
|
)
|
||||||
|
|
||||||
|
with pytest.raises(EmptyStackError):
|
||||||
|
handle_empty_pop("task_completed")
|
||||||
|
|
||||||
|
|
||||||
|
class TestEventTypeSets:
|
||||||
|
"""Tests for event type set completeness."""
|
||||||
|
|
||||||
|
def test_all_ending_events_have_pairs(self) -> None:
|
||||||
|
for ending_event in SCOPE_ENDING_EVENTS:
|
||||||
|
assert ending_event in VALID_EVENT_PAIRS
|
||||||
|
|
||||||
|
def test_all_pairs_reference_starting_events(self) -> None:
|
||||||
|
for ending_event, starting_event in VALID_EVENT_PAIRS.items():
|
||||||
|
assert starting_event in SCOPE_STARTING_EVENTS
|
||||||
|
|
||||||
|
def test_starting_and_ending_are_disjoint(self) -> None:
|
||||||
|
overlap = SCOPE_STARTING_EVENTS & SCOPE_ENDING_EVENTS
|
||||||
|
assert not overlap
|
||||||
|
|
||||||
|
|
||||||
|
class TestLastEventIdTracking:
|
||||||
|
"""Tests for linear chain event ID tracking."""
|
||||||
|
|
||||||
|
def test_initial_last_event_id_is_none(self) -> None:
|
||||||
|
reset_last_event_id()
|
||||||
|
assert get_last_event_id() is None
|
||||||
|
|
||||||
|
def test_set_and_get_last_event_id(self) -> None:
|
||||||
|
reset_last_event_id()
|
||||||
|
set_last_event_id("event-123")
|
||||||
|
assert get_last_event_id() == "event-123"
|
||||||
|
|
||||||
|
def test_reset_clears_last_event_id(self) -> None:
|
||||||
|
set_last_event_id("event-123")
|
||||||
|
reset_last_event_id()
|
||||||
|
assert get_last_event_id() is None
|
||||||
|
|
||||||
|
def test_overwrite_last_event_id(self) -> None:
|
||||||
|
reset_last_event_id()
|
||||||
|
set_last_event_id("event-1")
|
||||||
|
set_last_event_id("event-2")
|
||||||
|
assert get_last_event_id() == "event-2"
|
||||||
|
|
||||||
|
|
||||||
|
class TestTriggeringEventIdTracking:
|
||||||
|
"""Tests for causal chain event ID tracking."""
|
||||||
|
|
||||||
|
def test_initial_triggering_event_id_is_none(self) -> None:
|
||||||
|
set_triggering_event_id(None)
|
||||||
|
assert get_triggering_event_id() is None
|
||||||
|
|
||||||
|
def test_set_and_get_triggering_event_id(self) -> None:
|
||||||
|
set_triggering_event_id("trigger-123")
|
||||||
|
assert get_triggering_event_id() == "trigger-123"
|
||||||
|
set_triggering_event_id(None)
|
||||||
|
|
||||||
|
def test_set_none_clears_triggering_event_id(self) -> None:
|
||||||
|
set_triggering_event_id("trigger-123")
|
||||||
|
set_triggering_event_id(None)
|
||||||
|
assert get_triggering_event_id() is None
|
||||||
|
|
||||||
|
|
||||||
|
class TestTriggeredByScope:
|
||||||
|
"""Tests for triggered_by_scope context manager."""
|
||||||
|
|
||||||
|
def test_scope_sets_triggering_id(self) -> None:
|
||||||
|
set_triggering_event_id(None)
|
||||||
|
with triggered_by_scope("trigger-456"):
|
||||||
|
assert get_triggering_event_id() == "trigger-456"
|
||||||
|
|
||||||
|
def test_scope_restores_previous_value(self) -> None:
|
||||||
|
set_triggering_event_id(None)
|
||||||
|
with triggered_by_scope("trigger-456"):
|
||||||
|
pass
|
||||||
|
assert get_triggering_event_id() is None
|
||||||
|
|
||||||
|
def test_nested_scopes(self) -> None:
|
||||||
|
set_triggering_event_id(None)
|
||||||
|
with triggered_by_scope("outer"):
|
||||||
|
assert get_triggering_event_id() == "outer"
|
||||||
|
with triggered_by_scope("inner"):
|
||||||
|
assert get_triggering_event_id() == "inner"
|
||||||
|
assert get_triggering_event_id() == "outer"
|
||||||
|
assert get_triggering_event_id() is None
|
||||||
|
|
||||||
|
def test_scope_restores_on_exception(self) -> None:
|
||||||
|
set_triggering_event_id(None)
|
||||||
|
try:
|
||||||
|
with triggered_by_scope("trigger-789"):
|
||||||
|
raise ValueError("test error")
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
assert get_triggering_event_id() is None
|
||||||
1649
lib/crewai/tests/events/test_event_ordering.py
Normal file
1649
lib/crewai/tests/events/test_event_ordering.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -304,6 +304,11 @@ def test_external_memory_search_events(
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": None,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"query": "test value",
|
"query": "test value",
|
||||||
"limit": 3,
|
"limit": 3,
|
||||||
"score_threshold": 0.35,
|
"score_threshold": 0.35,
|
||||||
@@ -321,6 +326,11 @@ def test_external_memory_search_events(
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": ANY,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"query": "test value",
|
"query": "test value",
|
||||||
"results": [],
|
"results": [],
|
||||||
"limit": 3,
|
"limit": 3,
|
||||||
@@ -376,6 +386,11 @@ def test_external_memory_save_events(
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": None,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"value": "saving value",
|
"value": "saving value",
|
||||||
"metadata": {"task": "test_task"},
|
"metadata": {"task": "test_task"},
|
||||||
}
|
}
|
||||||
@@ -392,6 +407,11 @@ def test_external_memory_save_events(
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": ANY,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"value": "saving value",
|
"value": "saving value",
|
||||||
"metadata": {"task": "test_task"},
|
"metadata": {"task": "test_task"},
|
||||||
"save_time_ms": ANY,
|
"save_time_ms": ANY,
|
||||||
|
|||||||
@@ -70,6 +70,11 @@ def test_long_term_memory_save_events(long_term_memory):
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": "test_agent",
|
"agent_role": "test_agent",
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": None,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"value": "test_task",
|
"value": "test_task",
|
||||||
"metadata": {"task": "test_task", "quality": 0.5},
|
"metadata": {"task": "test_task", "quality": 0.5},
|
||||||
}
|
}
|
||||||
@@ -85,6 +90,11 @@ def test_long_term_memory_save_events(long_term_memory):
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": "test_agent",
|
"agent_role": "test_agent",
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": None,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"value": "test_task",
|
"value": "test_task",
|
||||||
"metadata": {
|
"metadata": {
|
||||||
"task": "test_task",
|
"task": "test_task",
|
||||||
@@ -139,6 +149,11 @@ def test_long_term_memory_search_events(long_term_memory):
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": None,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"query": "test query",
|
"query": "test query",
|
||||||
"limit": 5,
|
"limit": 5,
|
||||||
"score_threshold": None,
|
"score_threshold": None,
|
||||||
@@ -156,6 +171,11 @@ def test_long_term_memory_search_events(long_term_memory):
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": ANY,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"query": "test query",
|
"query": "test query",
|
||||||
"results": None,
|
"results": None,
|
||||||
"limit": 5,
|
"limit": 5,
|
||||||
|
|||||||
@@ -81,6 +81,11 @@ def test_short_term_memory_search_events(short_term_memory):
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": None,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"query": "test value",
|
"query": "test value",
|
||||||
"limit": 3,
|
"limit": 3,
|
||||||
"score_threshold": 0.35,
|
"score_threshold": 0.35,
|
||||||
@@ -98,6 +103,11 @@ def test_short_term_memory_search_events(short_term_memory):
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": None,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"query": "test value",
|
"query": "test value",
|
||||||
"results": [],
|
"results": [],
|
||||||
"limit": 3,
|
"limit": 3,
|
||||||
@@ -150,6 +160,11 @@ def test_short_term_memory_save_events(short_term_memory):
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": None,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"value": "test value",
|
"value": "test value",
|
||||||
"metadata": {"task": "test_task"},
|
"metadata": {"task": "test_task"},
|
||||||
}
|
}
|
||||||
@@ -166,6 +181,11 @@ def test_short_term_memory_save_events(short_term_memory):
|
|||||||
"from_agent": None,
|
"from_agent": None,
|
||||||
"agent_role": None,
|
"agent_role": None,
|
||||||
"agent_id": None,
|
"agent_id": None,
|
||||||
|
"event_id": ANY,
|
||||||
|
"parent_event_id": None,
|
||||||
|
"previous_event_id": ANY,
|
||||||
|
"triggered_by_event_id": None,
|
||||||
|
"emission_sequence": ANY,
|
||||||
"value": "test value",
|
"value": "test value",
|
||||||
"metadata": {"task": "test_task"},
|
"metadata": {"task": "test_task"},
|
||||||
"save_time_ms": ANY,
|
"save_time_ms": ANY,
|
||||||
|
|||||||
@@ -1204,7 +1204,8 @@ def test_complex_and_or_branching():
|
|||||||
|
|
||||||
|
|
||||||
# Final should be after both 2a and 2b
|
# Final should be after both 2a and 2b
|
||||||
assert execution_order[-1] == "final"
|
# Note: we don't assert final is last because branch_1c has no downstream
|
||||||
|
# dependencies and can complete after final due to parallel execution
|
||||||
assert execution_order.index("final") > execution_order.index("branch_2a")
|
assert execution_order.index("final") > execution_order.index("branch_2a")
|
||||||
assert execution_order.index("final") > execution_order.index("branch_2b")
|
assert execution_order.index("final") > execution_order.index("branch_2b")
|
||||||
|
|
||||||
|
|||||||
19
uv.lock
generated
19
uv.lock
generated
@@ -1295,7 +1295,7 @@ requires-dist = [
|
|||||||
{ name = "json5", specifier = "~=0.10.0" },
|
{ name = "json5", specifier = "~=0.10.0" },
|
||||||
{ name = "jsonref", specifier = "~=1.1.0" },
|
{ name = "jsonref", specifier = "~=1.1.0" },
|
||||||
{ name = "litellm", marker = "extra == 'litellm'", specifier = "~=1.74.9" },
|
{ name = "litellm", marker = "extra == 'litellm'", specifier = "~=1.74.9" },
|
||||||
{ name = "mcp", specifier = "~=1.16.0" },
|
{ name = "mcp", specifier = "~=1.23.1" },
|
||||||
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = "~=0.1.94" },
|
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = "~=0.1.94" },
|
||||||
{ name = "openai", specifier = "~=1.83.0" },
|
{ name = "openai", specifier = "~=1.83.0" },
|
||||||
{ name = "openpyxl", specifier = "~=3.1.5" },
|
{ name = "openpyxl", specifier = "~=3.1.5" },
|
||||||
@@ -1308,7 +1308,7 @@ requires-dist = [
|
|||||||
{ name = "portalocker", specifier = "~=2.7.0" },
|
{ name = "portalocker", specifier = "~=2.7.0" },
|
||||||
{ name = "pydantic", specifier = "~=2.11.9" },
|
{ name = "pydantic", specifier = "~=2.11.9" },
|
||||||
{ name = "pydantic-settings", specifier = "~=2.10.1" },
|
{ name = "pydantic-settings", specifier = "~=2.10.1" },
|
||||||
{ name = "pyjwt", specifier = "~=2.9.0" },
|
{ name = "pyjwt", specifier = ">=2.9.0,<3" },
|
||||||
{ name = "python-dotenv", specifier = "~=1.1.1" },
|
{ name = "python-dotenv", specifier = "~=1.1.1" },
|
||||||
{ name = "qdrant-client", extras = ["fastembed"], marker = "extra == 'qdrant'", specifier = "~=1.14.3" },
|
{ name = "qdrant-client", extras = ["fastembed"], marker = "extra == 'qdrant'", specifier = "~=1.14.3" },
|
||||||
{ name = "regex", specifier = "~=2024.9.11" },
|
{ name = "regex", specifier = "~=2024.9.11" },
|
||||||
@@ -3777,7 +3777,7 @@ wheels = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mcp"
|
name = "mcp"
|
||||||
version = "1.16.0"
|
version = "1.23.3"
|
||||||
source = { registry = "https://pypi.org/simple" }
|
source = { registry = "https://pypi.org/simple" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "anyio" },
|
{ name = "anyio" },
|
||||||
@@ -3786,15 +3786,18 @@ dependencies = [
|
|||||||
{ name = "jsonschema" },
|
{ name = "jsonschema" },
|
||||||
{ name = "pydantic" },
|
{ name = "pydantic" },
|
||||||
{ name = "pydantic-settings" },
|
{ name = "pydantic-settings" },
|
||||||
|
{ name = "pyjwt", extra = ["crypto"] },
|
||||||
{ name = "python-multipart" },
|
{ name = "python-multipart" },
|
||||||
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
{ name = "pywin32", marker = "sys_platform == 'win32'" },
|
||||||
{ name = "sse-starlette" },
|
{ name = "sse-starlette" },
|
||||||
{ name = "starlette" },
|
{ name = "starlette" },
|
||||||
|
{ name = "typing-extensions" },
|
||||||
|
{ name = "typing-inspection" },
|
||||||
{ name = "uvicorn", marker = "sys_platform != 'emscripten'" },
|
{ name = "uvicorn", marker = "sys_platform != 'emscripten'" },
|
||||||
]
|
]
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/3d/a1/b1f328da3b153683d2ec34f849b4b6eac2790fb240e3aef06ff2fab3df9d/mcp-1.16.0.tar.gz", hash = "sha256:39b8ca25460c578ee2cdad33feeea122694cfdf73eef58bee76c42f6ef0589df", size = 472918, upload-time = "2025-10-02T16:58:20.631Z" }
|
sdist = { url = "https://files.pythonhosted.org/packages/a7/a4/d06a303f45997e266f2c228081abe299bbcba216cb806128e2e49095d25f/mcp-1.23.3.tar.gz", hash = "sha256:b3b0da2cc949950ce1259c7bfc1b081905a51916fcd7c8182125b85e70825201", size = 600697, upload-time = "2025-12-09T16:04:37.351Z" }
|
||||||
wheels = [
|
wheels = [
|
||||||
{ url = "https://files.pythonhosted.org/packages/c9/0e/7cebc88e17daf94ebe28c95633af595ccb2864dc2ee7abd75542d98495cc/mcp-1.16.0-py3-none-any.whl", hash = "sha256:ec917be9a5d31b09ba331e1768aa576e0af45470d657a0319996a20a57d7d633", size = 167266, upload-time = "2025-10-02T16:58:19.039Z" },
|
{ url = "https://files.pythonhosted.org/packages/32/c6/13c1a26b47b3f3a3b480783001ada4268917c9f42d78a079c336da2e75e5/mcp-1.23.3-py3-none-any.whl", hash = "sha256:32768af4b46a1b4f7df34e2bfdf5c6011e7b63d7f1b0e321d0fdef4cd6082031", size = 231570, upload-time = "2025-12-09T16:04:35.56Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -6030,11 +6033,11 @@ wheels = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "pyjwt"
|
name = "pyjwt"
|
||||||
version = "2.9.0"
|
version = "2.10.1"
|
||||||
source = { registry = "https://pypi.org/simple" }
|
source = { registry = "https://pypi.org/simple" }
|
||||||
sdist = { url = "https://files.pythonhosted.org/packages/fb/68/ce067f09fca4abeca8771fe667d89cc347d1e99da3e093112ac329c6020e/pyjwt-2.9.0.tar.gz", hash = "sha256:7e1e5b56cc735432a7369cbfa0efe50fa113ebecdc04ae6922deba8b84582d0c", size = 78825, upload-time = "2024-08-01T15:01:08.445Z" }
|
sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" }
|
||||||
wheels = [
|
wheels = [
|
||||||
{ url = "https://files.pythonhosted.org/packages/79/84/0fdf9b18ba31d69877bd39c9cd6052b47f3761e9910c15de788e519f079f/PyJWT-2.9.0-py3-none-any.whl", hash = "sha256:3b02fb0f44517787776cf48f2ae25d8e14f300e6d7545a4315cee571a415e850", size = 22344, upload-time = "2024-08-01T15:01:06.481Z" },
|
{ url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" },
|
||||||
]
|
]
|
||||||
|
|
||||||
[package.optional-dependencies]
|
[package.optional-dependencies]
|
||||||
|
|||||||
Reference in New Issue
Block a user