Merge branch 'main' into alert-autofix-27

This commit is contained in:
Greyson LaLonde
2025-11-18 12:11:17 -05:00
committed by GitHub
118 changed files with 13574 additions and 1727 deletions

View File

@@ -12,7 +12,7 @@ dependencies = [
"pytube>=15.0.0",
"requests>=2.32.5",
"docker>=7.1.0",
"crewai==1.4.1",
"crewai==1.5.0",
"lancedb>=0.5.4",
"tiktoken>=0.8.0",
"beautifulsoup4>=4.13.4",

View File

@@ -287,4 +287,4 @@ __all__ = [
"ZapierActionTools",
]
__version__ = "1.4.1"
__version__ = "1.5.0"

View File

@@ -12,12 +12,16 @@ from pydantic.types import ImportString
class QdrantToolSchema(BaseModel):
query: str = Field(..., description="Query to search in Qdrant DB")
query: str = Field(
..., description="Query to search in Qdrant DB - always required."
)
filter_by: str | None = Field(
default=None, description="Parameter to filter the search by."
default=None,
description="Parameter to filter the search by. When filtering, needs to be used in conjunction with filter_value.",
)
filter_value: Any | None = Field(
default=None, description="Value to filter the search by."
default=None,
description="Value to filter the search by. When filtering, needs to be used in conjunction with filter_by.",
)

View File

@@ -48,7 +48,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = [
"crewai-tools==1.4.1",
"crewai-tools==1.5.0",
]
embeddings = [
"tiktoken~=0.8.0"

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "1.4.1"
__version__ = "1.5.0"
_telemetry_submitted = False

View File

@@ -38,6 +38,7 @@ class A2AConfig(BaseModel):
max_turns: Maximum conversation turns with A2A agent (default: 10).
response_model: Optional Pydantic model for structured A2A agent responses.
fail_fast: If True, raise error when agent unreachable; if False, skip and continue (default: True).
trust_remote_completion_status: If True, return A2A agent's result directly when status is "completed"; if False, always ask server agent to respond (default: False).
"""
endpoint: Url = Field(description="A2A agent endpoint URL")
@@ -57,3 +58,7 @@ class A2AConfig(BaseModel):
default=True,
description="If True, raise an error immediately when the A2A agent is unreachable. If False, skip the A2A agent and continue execution.",
)
trust_remote_completion_status: bool = Field(
default=False,
description='If True, return the A2A agent\'s result directly when status is "completed" without asking the server agent to respond. If False, always ask the server agent to respond, allowing it to potentially delegate again.',
)

View File

@@ -52,7 +52,7 @@ def wrap_agent_with_a2a_instance(agent: Agent) -> None:
Args:
agent: The agent instance to wrap
"""
original_execute_task = agent.execute_task.__func__
original_execute_task = agent.execute_task.__func__ # type: ignore[attr-defined]
@wraps(original_execute_task)
def execute_task_with_a2a(
@@ -73,7 +73,7 @@ def wrap_agent_with_a2a_instance(agent: Agent) -> None:
Task execution result
"""
if not self.a2a:
return original_execute_task(self, task, context, tools)
return original_execute_task(self, task, context, tools) # type: ignore[no-any-return]
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
@@ -498,6 +498,23 @@ def _delegate_to_a2a(
conversation_history = a2a_result.get("history", [])
if a2a_result["status"] in ["completed", "input_required"]:
if (
a2a_result["status"] == "completed"
and agent_config.trust_remote_completion_status
):
result_text = a2a_result.get("result", "")
final_turn_number = turn_num + 1
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="completed",
final_result=result_text,
error=None,
total_turns=final_turn_number,
),
)
return result_text # type: ignore[no-any-return]
final_result, next_request = _handle_agent_response_and_continue(
self=self,
a2a_result=a2a_result,

View File

@@ -119,6 +119,7 @@ class Agent(BaseAgent):
_times_executed: int = PrivateAttr(default=0)
_mcp_clients: list[Any] = PrivateAttr(default_factory=list)
_last_messages: list[LLMMessage] = PrivateAttr(default_factory=list)
max_execution_time: int | None = Field(
default=None,
description="Maximum execution time for an agent to execute a task",
@@ -538,6 +539,12 @@ class Agent(BaseAgent):
event=AgentExecutionCompletedEvent(agent=self, task=task, output=result),
)
self._last_messages = (
self.agent_executor.messages.copy()
if self.agent_executor and hasattr(self.agent_executor, "messages")
else []
)
self._cleanup_mcp_clients()
return result
@@ -1341,6 +1348,15 @@ class Agent(BaseAgent):
def set_fingerprint(self, fingerprint: Fingerprint) -> None:
self.security_config.fingerprint = fingerprint
@property
def last_messages(self) -> list[LLMMessage]:
"""Get messages from the last task execution.
Returns:
List of LLM messages from the most recent task execution.
"""
return self._last_messages
def _get_knowledge_search_query(self, task_prompt: str, task: Task) -> str | None:
"""Generate a search query for the knowledge base based on the task description."""
crewai_event_bus.emit(

View File

@@ -23,6 +23,10 @@ from crewai.events.types.logging_events import (
AgentLogsExecutionEvent,
AgentLogsStartedEvent,
)
from crewai.hooks.llm_hooks import (
get_after_llm_call_hooks,
get_before_llm_call_hooks,
)
from crewai.utilities.agent_utils import (
enforce_rpm_limit,
format_message_for_llm,
@@ -130,6 +134,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.messages: list[LLMMessage] = []
self.iterations = 0
self.log_error_after = 3
self.before_llm_call_hooks: list[Callable] = []
self.after_llm_call_hooks: list[Callable] = []
self.before_llm_call_hooks.extend(get_before_llm_call_hooks())
self.after_llm_call_hooks.extend(get_after_llm_call_hooks())
if self.llm:
# This may be mutating the shared llm object and needs further evaluation
existing_stop = getattr(self.llm, "stop", [])
@@ -226,6 +234,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
executor_context=self,
)
formatted_answer = process_llm_response(answer, self.use_stop_words) # type: ignore[assignment]
@@ -254,6 +263,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
task=self.task,
agent=self.agent,
function_calling_llm=self.function_calling_llm,
crew=self.crew,
)
formatted_answer = self._handle_agent_action(
formatted_answer, tool_result

View File

@@ -1,5 +1,5 @@
import time
from typing import Any
from typing import TYPE_CHECKING, Any, TypeVar, cast
import webbrowser
from pydantic import BaseModel, Field
@@ -13,6 +13,8 @@ from crewai.cli.shared.token_manager import TokenManager
console = Console()
TOauth2Settings = TypeVar("TOauth2Settings", bound="Oauth2Settings")
class Oauth2Settings(BaseModel):
provider: str = Field(
@@ -28,9 +30,15 @@ class Oauth2Settings(BaseModel):
description="OAuth2 audience value, typically used to identify the target API or resource.",
default=None,
)
extra: dict[str, Any] = Field(
description="Extra configuration for the OAuth2 provider.",
default={},
)
@classmethod
def from_settings(cls):
def from_settings(cls: type[TOauth2Settings]) -> TOauth2Settings:
"""Create an Oauth2Settings instance from the CLI settings."""
settings = Settings()
return cls(
@@ -38,12 +46,20 @@ class Oauth2Settings(BaseModel):
domain=settings.oauth2_domain,
client_id=settings.oauth2_client_id,
audience=settings.oauth2_audience,
extra=settings.oauth2_extra,
)
if TYPE_CHECKING:
from crewai.cli.authentication.providers.base_provider import BaseProvider
class ProviderFactory:
@classmethod
def from_settings(cls, settings: Oauth2Settings | None = None):
def from_settings(
cls: type["ProviderFactory"], # noqa: UP037
settings: Oauth2Settings | None = None,
) -> "BaseProvider": # noqa: UP037
settings = settings or Oauth2Settings.from_settings()
import importlib
@@ -53,11 +69,11 @@ class ProviderFactory:
)
provider = getattr(module, f"{settings.provider.capitalize()}Provider")
return provider(settings)
return cast("BaseProvider", provider(settings))
class AuthenticationCommand:
def __init__(self):
def __init__(self) -> None:
self.token_manager = TokenManager()
self.oauth2_provider = ProviderFactory.from_settings()
@@ -84,7 +100,7 @@ class AuthenticationCommand:
timeout=20,
)
response.raise_for_status()
return response.json()
return cast(dict[str, Any], response.json())
def _display_auth_instructions(self, device_code_data: dict[str, str]) -> None:
"""Display the authentication instructions to the user."""

View File

@@ -24,3 +24,7 @@ class BaseProvider(ABC):
@abstractmethod
def get_client_id(self) -> str: ...
def get_required_fields(self) -> list[str]:
"""Returns which provider-specific fields inside the "extra" dict will be required"""
return []

View File

@@ -3,16 +3,16 @@ from crewai.cli.authentication.providers.base_provider import BaseProvider
class OktaProvider(BaseProvider):
def get_authorize_url(self) -> str:
return f"https://{self.settings.domain}/oauth2/default/v1/device/authorize"
return f"{self._oauth2_base_url()}/v1/device/authorize"
def get_token_url(self) -> str:
return f"https://{self.settings.domain}/oauth2/default/v1/token"
return f"{self._oauth2_base_url()}/v1/token"
def get_jwks_url(self) -> str:
return f"https://{self.settings.domain}/oauth2/default/v1/keys"
return f"{self._oauth2_base_url()}/v1/keys"
def get_issuer(self) -> str:
return f"https://{self.settings.domain}/oauth2/default"
return self._oauth2_base_url().removesuffix("/oauth2")
def get_audience(self) -> str:
if self.settings.audience is None:
@@ -27,3 +27,16 @@ class OktaProvider(BaseProvider):
"Client ID is required. Please set it in the configuration."
)
return self.settings.client_id
def get_required_fields(self) -> list[str]:
return ["authorization_server_name", "using_org_auth_server"]
def _oauth2_base_url(self) -> str:
using_org_auth_server = self.settings.extra.get("using_org_auth_server", False)
if using_org_auth_server:
base_url = f"https://{self.settings.domain}/oauth2"
else:
base_url = f"https://{self.settings.domain}/oauth2/{self.settings.extra.get('authorization_server_name', 'default')}"
return f"{base_url}"

View File

@@ -493,5 +493,206 @@ def config_reset():
config_command.reset_all_settings()
@crewai.group()
def env():
"""Environment variable commands."""
@env.command("view")
def env_view():
"""View tracing-related environment variables."""
import os
from pathlib import Path
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
console = Console()
# Check for .env file
env_file = Path(".env")
env_file_exists = env_file.exists()
# Create table for environment variables
table = Table(show_header=True, header_style="bold cyan", expand=True)
table.add_column("Environment Variable", style="cyan", width=30)
table.add_column("Value", style="white", width=20)
table.add_column("Source", style="yellow", width=20)
# Check CREWAI_TRACING_ENABLED
crewai_tracing = os.getenv("CREWAI_TRACING_ENABLED", "")
if crewai_tracing:
table.add_row(
"CREWAI_TRACING_ENABLED",
crewai_tracing,
"Environment/Shell",
)
else:
table.add_row(
"CREWAI_TRACING_ENABLED",
"[dim]Not set[/dim]",
"[dim]—[/dim]",
)
# Check other related env vars
crewai_testing = os.getenv("CREWAI_TESTING", "")
if crewai_testing:
table.add_row("CREWAI_TESTING", crewai_testing, "Environment/Shell")
crewai_user_id = os.getenv("CREWAI_USER_ID", "")
if crewai_user_id:
table.add_row("CREWAI_USER_ID", crewai_user_id, "Environment/Shell")
crewai_org_id = os.getenv("CREWAI_ORG_ID", "")
if crewai_org_id:
table.add_row("CREWAI_ORG_ID", crewai_org_id, "Environment/Shell")
# Check if .env file exists
table.add_row(
".env file",
"✅ Found" if env_file_exists else "❌ Not found",
str(env_file.resolve()) if env_file_exists else "N/A",
)
panel = Panel(
table,
title="Tracing Environment Variables",
border_style="blue",
padding=(1, 2),
)
console.print("\n")
console.print(panel)
# Show helpful message
if env_file_exists:
console.print(
"\n[dim]💡 Tip: To enable tracing via .env, add: CREWAI_TRACING_ENABLED=true[/dim]"
)
else:
console.print(
"\n[dim]💡 Tip: Create a .env file in your project root and add: CREWAI_TRACING_ENABLED=true[/dim]"
)
console.print()
@crewai.group()
def traces():
"""Trace collection management commands."""
@traces.command("enable")
def traces_enable():
"""Enable trace collection for crew/flow executions."""
from rich.console import Console
from rich.panel import Panel
from crewai.events.listeners.tracing.utils import (
_load_user_data,
_save_user_data,
)
console = Console()
# Update user data to enable traces
user_data = _load_user_data()
user_data["trace_consent"] = True
user_data["first_execution_done"] = True
_save_user_data(user_data)
panel = Panel(
"✅ Trace collection has been enabled!\n\n"
"Your crew/flow executions will now send traces to CrewAI+.\n"
"Use 'crewai traces disable' to turn off trace collection.",
title="Traces Enabled",
border_style="green",
padding=(1, 2),
)
console.print(panel)
@traces.command("disable")
def traces_disable():
"""Disable trace collection for crew/flow executions."""
from rich.console import Console
from rich.panel import Panel
from crewai.events.listeners.tracing.utils import (
_load_user_data,
_save_user_data,
)
console = Console()
# Update user data to disable traces
user_data = _load_user_data()
user_data["trace_consent"] = False
user_data["first_execution_done"] = True
_save_user_data(user_data)
panel = Panel(
"❌ Trace collection has been disabled!\n\n"
"Your crew/flow executions will no longer send traces.\n"
"Use 'crewai traces enable' to turn trace collection back on.",
title="Traces Disabled",
border_style="red",
padding=(1, 2),
)
console.print(panel)
@traces.command("status")
def traces_status():
"""Show current trace collection status."""
import os
from rich.console import Console
from rich.panel import Panel
from rich.table import Table
from crewai.events.listeners.tracing.utils import (
_load_user_data,
is_tracing_enabled,
)
console = Console()
user_data = _load_user_data()
table = Table(show_header=False, box=None)
table.add_column("Setting", style="cyan")
table.add_column("Value", style="white")
# Check environment variable
env_enabled = os.getenv("CREWAI_TRACING_ENABLED", "false")
table.add_row("CREWAI_TRACING_ENABLED", env_enabled)
# Check user consent
trace_consent = user_data.get("trace_consent")
if trace_consent is True:
consent_status = "✅ Enabled (user consented)"
elif trace_consent is False:
consent_status = "❌ Disabled (user declined)"
else:
consent_status = "⚪ Not set (first-time user)"
table.add_row("User Consent", consent_status)
# Check overall status
if is_tracing_enabled():
overall_status = "✅ ENABLED"
border_style = "green"
else:
overall_status = "❌ DISABLED"
border_style = "red"
table.add_row("Overall Status", overall_status)
panel = Panel(
table,
title="Trace Collection Status",
border_style=border_style,
padding=(1, 2),
)
console.print(panel)
if __name__ == "__main__":
crewai()

View File

@@ -11,18 +11,18 @@ console = Console()
class BaseCommand:
def __init__(self):
def __init__(self) -> None:
self._telemetry = Telemetry()
self._telemetry.set_tracer()
class PlusAPIMixin:
def __init__(self, telemetry):
def __init__(self, telemetry: Telemetry) -> None:
try:
telemetry.set_tracer()
self.plus_api_client = PlusAPI(api_key=get_auth_token())
except Exception:
self._deploy_signup_error_span = telemetry.deploy_signup_error_span()
telemetry.deploy_signup_error_span()
console.print(
"Please sign up/login to CrewAI+ before using the CLI.",
style="bold red",

View File

@@ -2,6 +2,7 @@ import json
from logging import getLogger
from pathlib import Path
import tempfile
from typing import Any
from pydantic import BaseModel, Field
@@ -136,7 +137,12 @@ class Settings(BaseModel):
default=DEFAULT_CLI_SETTINGS["oauth2_domain"],
)
def __init__(self, config_path: Path | None = None, **data):
oauth2_extra: dict[str, Any] = Field(
description="Extra configuration for the OAuth2 provider.",
default={},
)
def __init__(self, config_path: Path | None = None, **data: dict[str, Any]) -> None:
"""Load Settings from config path with fallback support"""
if config_path is None:
config_path = get_writable_config_path()

View File

@@ -1,9 +1,10 @@
from typing import Any
from typing import Any, cast
import requests
from requests.exceptions import JSONDecodeError, RequestException
from rich.console import Console
from crewai.cli.authentication.main import Oauth2Settings, ProviderFactory
from crewai.cli.command import BaseCommand
from crewai.cli.settings.main import SettingsCommand
from crewai.cli.version import get_crewai_version
@@ -13,7 +14,7 @@ console = Console()
class EnterpriseConfigureCommand(BaseCommand):
def __init__(self):
def __init__(self) -> None:
super().__init__()
self.settings_command = SettingsCommand()
@@ -54,25 +55,12 @@ class EnterpriseConfigureCommand(BaseCommand):
except JSONDecodeError as e:
raise ValueError(f"Invalid JSON response from {oauth_endpoint}") from e
required_fields = [
"audience",
"domain",
"device_authorization_client_id",
"provider",
]
missing_fields = [
field for field in required_fields if field not in oauth_config
]
if missing_fields:
raise ValueError(
f"Missing required fields in OAuth2 configuration: {', '.join(missing_fields)}"
)
self._validate_oauth_config(oauth_config)
console.print(
"✅ Successfully retrieved OAuth2 configuration", style="green"
)
return oauth_config
return cast(dict[str, Any], oauth_config)
except RequestException as e:
raise ValueError(f"Failed to connect to enterprise URL: {e!s}") from e
@@ -89,6 +77,7 @@ class EnterpriseConfigureCommand(BaseCommand):
"oauth2_audience": oauth_config["audience"],
"oauth2_client_id": oauth_config["device_authorization_client_id"],
"oauth2_domain": oauth_config["domain"],
"oauth2_extra": oauth_config["extra"],
}
console.print("🔄 Updating local OAuth2 configuration...")
@@ -99,3 +88,38 @@ class EnterpriseConfigureCommand(BaseCommand):
except Exception as e:
raise ValueError(f"Failed to update OAuth2 settings: {e!s}") from e
def _validate_oauth_config(self, oauth_config: dict[str, Any]) -> None:
required_fields = [
"audience",
"domain",
"device_authorization_client_id",
"provider",
"extra",
]
missing_basic_fields = [
field for field in required_fields if field not in oauth_config
]
missing_provider_specific_fields = [
field
for field in self._get_provider_specific_fields(oauth_config["provider"])
if field not in oauth_config.get("extra", {})
]
if missing_basic_fields:
raise ValueError(
f"Missing required fields in OAuth2 configuration: [{', '.join(missing_basic_fields)}]"
)
if missing_provider_specific_fields:
raise ValueError(
f"Missing authentication provider required fields in OAuth2 configuration: [{', '.join(missing_provider_specific_fields)}] (Configured provider: '{oauth_config['provider']}')"
)
def _get_provider_specific_fields(self, provider_name: str) -> list[str]:
provider = ProviderFactory.from_settings(
Oauth2Settings(provider=provider_name, client_id="dummy", domain="dummy")
)
return provider.get_required_fields()

View File

@@ -3,7 +3,7 @@ import subprocess
class Repository:
def __init__(self, path="."):
def __init__(self, path: str = ".") -> None:
self.path = path
if not self.is_git_installed():

View File

@@ -1,3 +1,4 @@
from typing import Any
from urllib.parse import urljoin
import requests
@@ -36,19 +37,21 @@ class PlusAPI:
str(settings.enterprise_base_url) or DEFAULT_CREWAI_ENTERPRISE_URL
)
def _make_request(self, method: str, endpoint: str, **kwargs) -> requests.Response:
def _make_request(
self, method: str, endpoint: str, **kwargs: Any
) -> requests.Response:
url = urljoin(self.base_url, endpoint)
session = requests.Session()
session.trust_env = False
return session.request(method, url, headers=self.headers, **kwargs)
def login_to_tool_repository(self):
def login_to_tool_repository(self) -> requests.Response:
return self._make_request("POST", f"{self.TOOLS_RESOURCE}/login")
def get_tool(self, handle: str):
def get_tool(self, handle: str) -> requests.Response:
return self._make_request("GET", f"{self.TOOLS_RESOURCE}/{handle}")
def get_agent(self, handle: str):
def get_agent(self, handle: str) -> requests.Response:
return self._make_request("GET", f"{self.AGENTS_RESOURCE}/{handle}")
def publish_tool(
@@ -58,8 +61,8 @@ class PlusAPI:
version: str,
description: str | None,
encoded_file: str,
available_exports: list[str] | None = None,
):
available_exports: list[dict[str, Any]] | None = None,
) -> requests.Response:
params = {
"handle": handle,
"public": is_public,
@@ -111,13 +114,13 @@ class PlusAPI:
def list_crews(self) -> requests.Response:
return self._make_request("GET", self.CREWS_RESOURCE)
def create_crew(self, payload) -> requests.Response:
def create_crew(self, payload: dict[str, Any]) -> requests.Response:
return self._make_request("POST", self.CREWS_RESOURCE, json=payload)
def get_organizations(self) -> requests.Response:
return self._make_request("GET", self.ORGANIZATIONS_RESOURCE)
def initialize_trace_batch(self, payload) -> requests.Response:
def initialize_trace_batch(self, payload: dict[str, Any]) -> requests.Response:
return self._make_request(
"POST",
f"{self.TRACING_RESOURCE}/batches",
@@ -125,14 +128,18 @@ class PlusAPI:
timeout=30,
)
def initialize_ephemeral_trace_batch(self, payload) -> requests.Response:
def initialize_ephemeral_trace_batch(
self, payload: dict[str, Any]
) -> requests.Response:
return self._make_request(
"POST",
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches",
json=payload,
)
def send_trace_events(self, trace_batch_id: str, payload) -> requests.Response:
def send_trace_events(
self, trace_batch_id: str, payload: dict[str, Any]
) -> requests.Response:
return self._make_request(
"POST",
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/events",
@@ -141,7 +148,7 @@ class PlusAPI:
)
def send_ephemeral_trace_events(
self, trace_batch_id: str, payload
self, trace_batch_id: str, payload: dict[str, Any]
) -> requests.Response:
return self._make_request(
"POST",
@@ -150,7 +157,9 @@ class PlusAPI:
timeout=30,
)
def finalize_trace_batch(self, trace_batch_id: str, payload) -> requests.Response:
def finalize_trace_batch(
self, trace_batch_id: str, payload: dict[str, Any]
) -> requests.Response:
return self._make_request(
"PATCH",
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
@@ -159,7 +168,7 @@ class PlusAPI:
)
def finalize_ephemeral_trace_batch(
self, trace_batch_id: str, payload
self, trace_batch_id: str, payload: dict[str, Any]
) -> requests.Response:
return self._make_request(
"PATCH",

View File

@@ -1,3 +1,5 @@
from datetime import datetime
import os
from typing import Any
from rich.console import Console
@@ -5,6 +7,7 @@ from rich.table import Table
from crewai.cli.command import BaseCommand
from crewai.cli.config import HIDDEN_SETTINGS_KEYS, READONLY_SETTINGS_KEYS, Settings
from crewai.events.listeners.tracing.utils import _load_user_data
console = Console()
@@ -34,11 +37,47 @@ class SettingsCommand(BaseCommand):
current_value = getattr(self.settings, field_name)
description = field_info.description or "No description available"
display_value = (
str(current_value) if current_value is not None else "Not set"
str(current_value) if current_value not in [None, {}] else "Not set"
)
table.add_row(field_name, display_value, description)
# Add trace-related settings from user data
user_data = _load_user_data()
# CREWAI_TRACING_ENABLED environment variable
env_tracing = os.getenv("CREWAI_TRACING_ENABLED", "")
env_tracing_display = env_tracing if env_tracing else "Not set"
table.add_row(
"CREWAI_TRACING_ENABLED",
env_tracing_display,
"Environment variable to enable/disable tracing",
)
# Trace consent status
trace_consent = user_data.get("trace_consent")
if trace_consent is True:
consent_display = "✅ Enabled"
elif trace_consent is False:
consent_display = "❌ Disabled"
else:
consent_display = "Not set"
table.add_row(
"trace_consent", consent_display, "Whether trace collection is enabled"
)
# First execution timestamp
if user_data.get("first_execution_at"):
timestamp = datetime.fromtimestamp(user_data["first_execution_at"])
first_exec_display = timestamp.strftime("%Y-%m-%d %H:%M:%S")
else:
first_exec_display = "Not set"
table.add_row(
"first_execution_at",
first_exec_display,
"Timestamp of first crew/flow execution",
)
console.print(table)
def set(self, key: str, value: str) -> None:

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.4.1"
"crewai[tools]==1.5.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.4.1"
"crewai[tools]==1.5.0"
]
[project.scripts]

View File

@@ -30,11 +30,11 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
A class to handle tool repository related operations for CrewAI projects.
"""
def __init__(self):
def __init__(self) -> None:
BaseCommand.__init__(self)
PlusAPIMixin.__init__(self, telemetry=self._telemetry)
def create(self, handle: str):
def create(self, handle: str) -> None:
self._ensure_not_in_project()
folder_name = handle.replace(" ", "_").replace("-", "_").lower()
@@ -64,7 +64,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
finally:
os.chdir(old_directory)
def publish(self, is_public: bool, force: bool = False):
def publish(self, is_public: bool, force: bool = False) -> None:
if not git.Repository().is_synced() and not force:
console.print(
"[bold red]Failed to publish tool.[/bold red]\n"
@@ -137,7 +137,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
style="bold green",
)
def install(self, handle: str):
def install(self, handle: str) -> None:
self._print_current_organization()
get_response = self.plus_api_client.get_tool(handle)
@@ -180,7 +180,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
settings.org_name = login_response_json["current_organization"]["name"]
settings.dump()
def _add_package(self, tool_details: dict[str, Any]):
def _add_package(self, tool_details: dict[str, Any]) -> None:
is_from_pypi = tool_details.get("source", None) == "pypi"
tool_handle = tool_details["handle"]
repository_handle = tool_details["repository"]["handle"]
@@ -209,7 +209,7 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
click.echo(add_package_result.stderr, err=True)
raise SystemExit
def _ensure_not_in_project(self):
def _ensure_not_in_project(self) -> None:
if os.path.isfile("./pyproject.toml"):
console.print(
"[bold red]Oops! It looks like you're inside a project.[/bold red]"

View File

@@ -5,7 +5,7 @@ import os
from pathlib import Path
import shutil
import sys
from typing import Any, get_type_hints
from typing import Any, cast, get_type_hints
import click
from rich.console import Console
@@ -23,7 +23,9 @@ if sys.version_info >= (3, 11):
console = Console()
def copy_template(src, dst, name, class_name, folder_name):
def copy_template(
src: Path, dst: Path, name: str, class_name: str, folder_name: str
) -> None:
"""Copy a file from src to dst."""
with open(src, "r") as file:
content = file.read()
@@ -40,13 +42,13 @@ def copy_template(src, dst, name, class_name, folder_name):
click.secho(f" - Created {dst}", fg="green")
def read_toml(file_path: str = "pyproject.toml"):
def read_toml(file_path: str = "pyproject.toml") -> dict[str, Any]:
"""Read the content of a TOML file and return it as a dictionary."""
with open(file_path, "rb") as f:
return tomli.load(f)
def parse_toml(content):
def parse_toml(content: str) -> dict[str, Any]:
if sys.version_info >= (3, 11):
return tomllib.loads(content)
return tomli.loads(content)
@@ -103,7 +105,7 @@ def _get_project_attribute(
)
except Exception as e:
# Handle TOML decode errors for Python 3.11+
if sys.version_info >= (3, 11) and isinstance(e, tomllib.TOMLDecodeError): # type: ignore
if sys.version_info >= (3, 11) and isinstance(e, tomllib.TOMLDecodeError):
console.print(
f"Error: {pyproject_path} is not a valid TOML file.", style="bold red"
)
@@ -126,7 +128,7 @@ def _get_nested_value(data: dict[str, Any], keys: list[str]) -> Any:
return reduce(dict.__getitem__, keys, data)
def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
def fetch_and_json_env_file(env_file_path: str = ".env") -> dict[str, Any]:
"""Fetch the environment variables from a .env file and return them as a dictionary."""
try:
# Read the .env file
@@ -150,7 +152,7 @@ def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
return {}
def tree_copy(source, destination):
def tree_copy(source: Path, destination: Path) -> None:
"""Copies the entire directory structure from the source to the destination."""
for item in os.listdir(source):
source_item = os.path.join(source, item)
@@ -161,7 +163,7 @@ def tree_copy(source, destination):
shutil.copy2(source_item, destination_item)
def tree_find_and_replace(directory, find, replace):
def tree_find_and_replace(directory: Path, find: str, replace: str) -> None:
"""Recursively searches through a directory, replacing a target string in
both file contents and filenames with a specified replacement string.
"""
@@ -187,7 +189,7 @@ def tree_find_and_replace(directory, find, replace):
os.rename(old_dirpath, new_dirpath)
def load_env_vars(folder_path):
def load_env_vars(folder_path: Path) -> dict[str, Any]:
"""
Loads environment variables from a .env file in the specified folder path.
@@ -208,7 +210,9 @@ def load_env_vars(folder_path):
return env_vars
def update_env_vars(env_vars, provider, model):
def update_env_vars(
env_vars: dict[str, Any], provider: str, model: str
) -> dict[str, Any] | None:
"""
Updates environment variables with the API key for the selected provider and model.
@@ -220,15 +224,20 @@ def update_env_vars(env_vars, provider, model):
Returns:
- None
"""
api_key_var = ENV_VARS.get(
provider,
[
click.prompt(
f"Enter the environment variable name for your {provider.capitalize()} API key",
type=str,
)
],
)[0]
provider_config = cast(
list[str],
ENV_VARS.get(
provider,
[
click.prompt(
f"Enter the environment variable name for your {provider.capitalize()} API key",
type=str,
)
],
),
)
api_key_var = provider_config[0]
if api_key_var not in env_vars:
try:
@@ -246,7 +255,7 @@ def update_env_vars(env_vars, provider, model):
return env_vars
def write_env_file(folder_path, env_vars):
def write_env_file(folder_path: Path, env_vars: dict[str, Any]) -> None:
"""
Writes environment variables to a .env file in the specified folder.
@@ -342,18 +351,18 @@ def get_crews(crew_path: str = "crew.py", require: bool = False) -> list[Crew]:
return crew_instances
def get_crew_instance(module_attr) -> Crew | None:
def get_crew_instance(module_attr: Any) -> Crew | None:
if (
callable(module_attr)
and hasattr(module_attr, "is_crew_class")
and module_attr.is_crew_class
):
return module_attr().crew()
return cast(Crew, module_attr().crew())
try:
if (ismethod(module_attr) or isfunction(module_attr)) and get_type_hints(
module_attr
).get("return") is Crew:
return module_attr()
return cast(Crew, module_attr())
except Exception:
return None
@@ -362,7 +371,7 @@ def get_crew_instance(module_attr) -> Crew | None:
return None
def fetch_crews(module_attr) -> list[Crew]:
def fetch_crews(module_attr: Any) -> list[Crew]:
crew_instances: list[Crew] = []
if crew_instance := get_crew_instance(module_attr):
@@ -377,7 +386,7 @@ def fetch_crews(module_attr) -> list[Crew]:
return crew_instances
def is_valid_tool(obj):
def is_valid_tool(obj: Any) -> bool:
from crewai.tools.base_tool import Tool
if isclass(obj):
@@ -389,7 +398,7 @@ def is_valid_tool(obj):
return isinstance(obj, Tool)
def extract_available_exports(dir_path: str = "src"):
def extract_available_exports(dir_path: str = "src") -> list[dict[str, Any]]:
"""
Extract available tool classes from the project's __init__.py files.
Only includes classes that inherit from BaseTool or functions decorated with @tool.
@@ -419,7 +428,9 @@ def extract_available_exports(dir_path: str = "src"):
raise SystemExit(1) from e
def build_env_with_tool_repository_credentials(repository_handle: str):
def build_env_with_tool_repository_credentials(
repository_handle: str,
) -> dict[str, Any]:
repository_handle = repository_handle.upper().replace("-", "_")
settings = Settings()
@@ -472,7 +483,7 @@ def _load_tools_from_init(init_file: Path) -> list[dict[str, Any]]:
sys.modules.pop("temp_module", None)
def _print_no_tools_warning():
def _print_no_tools_warning() -> None:
"""
Display warning and usage instructions if no tools were found.
"""

View File

@@ -27,6 +27,8 @@ from pydantic import (
model_validator,
)
from pydantic_core import PydanticCustomError
from rich.console import Console
from rich.panel import Panel
from typing_extensions import Self
from crewai.agent import Agent
@@ -39,8 +41,8 @@ from crewai.events.listeners.tracing.trace_listener import (
TraceCollectionListener,
)
from crewai.events.listeners.tracing.utils import (
is_tracing_enabled,
should_auto_collect_first_time_traces,
set_tracing_enabled,
should_enable_tracing,
)
from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent,
@@ -280,8 +282,8 @@ class Crew(FlowTrackable, BaseModel):
description="Metrics for the LLM usage during all tasks execution.",
)
tracing: bool | None = Field(
default=False,
description="Whether to enable tracing for the crew.",
default=None,
description="Whether to enable tracing for the crew. True=always enable, False=always disable, None=check environment/user settings.",
)
@field_validator("id", mode="before")
@@ -311,17 +313,16 @@ class Crew(FlowTrackable, BaseModel):
@model_validator(mode="after")
def set_private_attrs(self) -> Crew:
"""set private attributes."""
self._cache_handler = CacheHandler()
event_listener = EventListener() # type: ignore[no-untyped-call]
if (
is_tracing_enabled()
or self.tracing
or should_auto_collect_first_time_traces()
):
trace_listener = TraceCollectionListener()
trace_listener.setup_listeners(crewai_event_bus)
# Determine and set tracing state once for this execution
tracing_enabled = should_enable_tracing(override=self.tracing)
set_tracing_enabled(tracing_enabled)
# Always setup trace listener - actual execution control is via contextvar
trace_listener = TraceCollectionListener()
trace_listener.setup_listeners(crewai_event_bus)
event_listener.verbose = self.verbose
event_listener.formatter.verbose = self.verbose
self._logger = Logger(verbose=self.verbose)
@@ -809,6 +810,7 @@ class Crew(FlowTrackable, BaseModel):
"json_dict": output.json_dict,
"output_format": output.output_format,
"agent": output.agent,
"messages": output.messages,
},
"task_index": task_index,
"inputs": inputs,
@@ -1170,6 +1172,10 @@ class Crew(FlowTrackable, BaseModel):
total_tokens=self.token_usage.total_tokens,
),
)
# Finalization is handled by trace listener (always initialized)
# The batch manager checks contextvar to determine if tracing is enabled
return CrewOutput(
raw=final_task_output.raw,
pydantic=final_task_output.pydantic,
@@ -1236,6 +1242,7 @@ class Crew(FlowTrackable, BaseModel):
pydantic=stored_output["pydantic"],
json_dict=stored_output["json_dict"],
output_format=stored_output["output_format"],
messages=stored_output.get("messages", []),
)
self.tasks[i].output = task_output
@@ -1649,3 +1656,32 @@ class Crew(FlowTrackable, BaseModel):
and able_to_inject
):
self.tasks[0].allow_crewai_trigger_context = True
def _show_tracing_disabled_message(self) -> None:
"""Show a message when tracing is disabled."""
from crewai.events.listeners.tracing.utils import has_user_declined_tracing
console = Console()
if has_user_declined_tracing():
message = """Info: Tracing is disabled.
To enable tracing, do any one of these:
• Set tracing=True in your Crew code
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
• Run: crewai traces enable"""
else:
message = """Info: Tracing is disabled.
To enable tracing, do any one of these:
• Set tracing=True in your Crew code
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
• Run: crewai traces enable"""
panel = Panel(
message,
title="Tracing Status",
border_style="blue",
padding=(1, 2),
)
console.print(panel)

View File

@@ -10,6 +10,7 @@ import atexit
from collections.abc import Callable, Generator
from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import contextmanager
import contextvars
import threading
from typing import Any, Final, ParamSpec, TypeVar
@@ -288,8 +289,9 @@ class CrewAIEventsBus:
if event_type is LLMStreamChunkEvent:
self._call_handlers(source, event, level_sync)
else:
ctx = contextvars.copy_context()
future = self._sync_executor.submit(
self._call_handlers, source, event, level_sync
ctx.run, self._call_handlers, source, event, level_sync
)
await asyncio.get_running_loop().run_in_executor(
None, future.result
@@ -346,8 +348,9 @@ class CrewAIEventsBus:
if event_type is LLMStreamChunkEvent:
self._call_handlers(source, event, sync_handlers)
else:
ctx = contextvars.copy_context()
sync_future = self._sync_executor.submit(
self._call_handlers, source, event, sync_handlers
ctx.run, self._call_handlers, source, event, sync_handlers
)
if not async_handlers:
return sync_future

View File

@@ -1,5 +1,4 @@
import logging
from pathlib import Path
import uuid
import webbrowser
@@ -17,47 +16,6 @@ from crewai.events.listeners.tracing.utils import (
logger = logging.getLogger(__name__)
def _update_or_create_env_file():
"""Update or create .env file with CREWAI_TRACING_ENABLED=true."""
env_path = Path(".env")
env_content = ""
variable_name = "CREWAI_TRACING_ENABLED"
variable_value = "true"
# Read existing content if file exists
if env_path.exists():
with open(env_path, "r") as f:
env_content = f.read()
# Check if CREWAI_TRACING_ENABLED is already set
lines = env_content.splitlines()
variable_exists = False
updated_lines = []
for line in lines:
if line.strip().startswith(f"{variable_name}="):
# Update existing variable
updated_lines.append(f"{variable_name}={variable_value}")
variable_exists = True
else:
updated_lines.append(line)
# Add variable if it doesn't exist
if not variable_exists:
if updated_lines and not updated_lines[-1].strip():
# If last line is empty, replace it
updated_lines[-1] = f"{variable_name}={variable_value}"
else:
# Add new line and then the variable
updated_lines.append(f"{variable_name}={variable_value}")
# Write updated content
with open(env_path, "w") as f:
f.write("\n".join(updated_lines))
if updated_lines: # Add final newline if there's content
f.write("\n")
class FirstTimeTraceHandler:
"""Handles the first-time user trace collection and display flow."""
@@ -96,20 +54,16 @@ class FirstTimeTraceHandler:
if user_wants_traces:
self._initialize_backend_and_send_events()
# Enable tracing for future runs by updating .env file
try:
_update_or_create_env_file()
except Exception: # noqa: S110
pass
if self.ephemeral_url:
self._display_ephemeral_trace_link()
else:
self._show_tracing_declined_message()
mark_first_execution_completed()
mark_first_execution_completed(user_consented=user_wants_traces)
except Exception as e:
self._gracefully_fail(f"Error in trace handling: {e}")
mark_first_execution_completed()
mark_first_execution_completed(user_consented=False)
def _initialize_backend_and_send_events(self):
"""Initialize backend batch and send collected events."""
@@ -182,8 +136,13 @@ This trace shows:
• Tool usage and results
• LLM calls and responses
✅ Tracing has been enabled for future runs! (CREWAI_TRACING_ENABLED=true added to .env)
You can also add tracing=True to your Crew(tracing=True) / Flow(tracing=True) for more control.
✅ Tracing has been enabled for future runs!
Your preference has been saved. Future Crew/Flow executions will automatically collect traces.
To disable tracing later, do any one of these:
• Set tracing=False in your Crew/Flow code
• Set CREWAI_TRACING_ENABLED=false in your project's .env file
• Run: crewai traces disable
📝 Note: This link will expire in 24 hours.
""".strip()
@@ -199,6 +158,32 @@ You can also add tracing=True to your Crew(tracing=True) / Flow(tracing=True) fo
console.print(panel)
console.print()
def _show_tracing_declined_message(self):
"""Show message when user declines tracing."""
console = Console()
panel_content = """
Info: Tracing has been disabled.
Your preference has been saved. Future Crew/Flow executions will not collect traces.
To enable tracing later, do any one of these:
• Set tracing=True in your Crew/Flow code
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
• Run: crewai traces enable
""".strip()
panel = Panel(
panel_content,
title="Tracing Preference Saved",
border_style="blue",
padding=(1, 2),
)
console.print("\n")
console.print(panel)
console.print()
def _gracefully_fail(self, error_message: str):
"""Handle errors gracefully without disrupting user experience."""
console = Console()
@@ -218,8 +203,14 @@ Unfortunately, we couldn't upload them to the server right now, but here's what
• Execution duration: {self.batch_manager.calculate_duration("execution")}ms
• Batch ID: {self.batch_manager.trace_batch_id}
Tracing has been enabled for future runs! (CREWAI_TRACING_ENABLED=true added to .env)
Tracing has been enabled for future runs!
Your preference has been saved. Future Crew/Flow executions will automatically collect traces.
The traces include agent decisions, task execution, and tool usage.
To disable tracing later, do any one of these:
• Set tracing=False in your Crew/Flow code
• Set CREWAI_TRACING_ENABLED=false in your project's .env file
• Run: crewai traces disable
""".strip()
panel = Panel(

View File

@@ -12,7 +12,10 @@ from crewai.cli.authentication.token import AuthError, get_auth_token
from crewai.cli.plus_api import PlusAPI
from crewai.cli.version import get_crewai_version
from crewai.events.listeners.tracing.types import TraceEvent
from crewai.events.listeners.tracing.utils import should_auto_collect_first_time_traces
from crewai.events.listeners.tracing.utils import (
is_tracing_enabled_in_context,
should_auto_collect_first_time_traces,
)
from crewai.utilities.constants import CREWAI_BASE_URL
@@ -107,6 +110,9 @@ class TraceBatchManager:
):
"""Send batch initialization to backend"""
if not is_tracing_enabled_in_context():
return
if not self.plus_api or not self.current_batch:
return
@@ -243,7 +249,8 @@ class TraceBatchManager:
def finalize_batch(self) -> TraceBatch | None:
"""Finalize batch and return it for sending"""
if not self.current_batch:
if not self.current_batch or not is_tracing_enabled_in_context():
return None
all_handlers_completed = self.wait_for_pending_events()

View File

@@ -10,13 +10,14 @@ from crewai.cli.authentication.token import AuthError, get_auth_token
from crewai.cli.version import get_crewai_version
from crewai.events.base_event_listener import BaseEventListener
from crewai.events.event_bus import CrewAIEventsBus
from crewai.events.utils.console_formatter import ConsoleFormatter
from crewai.events.listeners.tracing.first_time_trace_handler import (
FirstTimeTraceHandler,
)
from crewai.events.listeners.tracing.trace_batch_manager import TraceBatchManager
from crewai.events.listeners.tracing.types import TraceEvent
from crewai.events.listeners.tracing.utils import safe_serialize_to_dict
from crewai.events.listeners.tracing.utils import (
safe_serialize_to_dict,
)
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
@@ -80,6 +81,7 @@ from crewai.events.types.tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageStartedEvent,
)
from crewai.events.utils.console_formatter import ConsoleFormatter
class TraceCollectionListener(BaseEventListener):
@@ -627,3 +629,35 @@ class TraceCollectionListener(BaseEventListener):
"event": safe_serialize_to_dict(event),
"source": source,
}
def _show_tracing_disabled_message(self) -> None:
"""Show a message when tracing is disabled."""
from rich.console import Console
from rich.panel import Panel
from crewai.events.listeners.tracing.utils import has_user_declined_tracing
console = Console()
if has_user_declined_tracing():
message = """Info: Tracing is disabled.
To enable tracing, do any one of these:
• Set tracing=True in your Crew/Flow code
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
• Run: crewai traces enable"""
else:
message = """Info: Tracing is disabled.
To enable tracing, do any one of these:
• Set tracing=True in your Crew/Flow code
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
• Run: crewai traces enable"""
panel = Panel(
message,
title="Tracing Status",
border_style="blue",
padding=(1, 2),
)
console.print(panel)

View File

@@ -1,3 +1,4 @@
from contextvars import ContextVar, Token
from datetime import datetime
import getpass
import hashlib
@@ -8,7 +9,7 @@ from pathlib import Path
import platform
import re
import subprocess
from typing import Any
from typing import Any, cast
import uuid
import click
@@ -23,7 +24,120 @@ from crewai.utilities.serialization import to_serializable
logger = logging.getLogger(__name__)
_tracing_enabled: ContextVar[bool | None] = ContextVar("_tracing_enabled", default=None)
def should_enable_tracing(*, override: bool | None = None) -> bool:
"""Determine if tracing should be enabled.
This is the single source of truth for tracing enablement.
Priority order:
1. Explicit override (e.g., Crew.tracing=True/False)
2. Environment variable CREWAI_TRACING_ENABLED
3. User consent from user_data
Args:
override: Explicit override for tracing (True=always enable, False=always disable, None=check other settings)
Returns:
True if tracing should be enabled, False otherwise.
"""
if override is True:
return True
if override is False:
return False
env_value = os.getenv("CREWAI_TRACING_ENABLED", "").lower()
if env_value in ("true", "1"):
return True
data = _load_user_data()
if data.get("trace_consent", False) is not False:
return True
return False
def set_tracing_enabled(enabled: bool) -> object:
"""Set tracing enabled state for current execution context.
Args:
enabled: Whether tracing should be enabled
Returns:
A token that can be used with reset_tracing_enabled to restore previous value.
"""
return _tracing_enabled.set(enabled)
def reset_tracing_enabled(token: Token[bool | None]) -> None:
"""Reset tracing enabled state to previous value.
Args:
token: Token returned from set_tracing_enabled
"""
_tracing_enabled.reset(token)
def is_tracing_enabled_in_context() -> bool:
"""Check if tracing is enabled in current execution context.
Returns:
True if tracing is enabled in context, False otherwise.
Returns False if context has not been set.
"""
enabled = _tracing_enabled.get()
return enabled if enabled is not None else False
def _user_data_file() -> Path:
base = Path(db_storage_path())
base.mkdir(parents=True, exist_ok=True)
return base / ".crewai_user.json"
def _load_user_data() -> dict[str, Any]:
p = _user_data_file()
if p.exists():
try:
return cast(dict[str, Any], json.loads(p.read_text()))
except (json.JSONDecodeError, OSError, PermissionError) as e:
logger.warning(f"Failed to load user data: {e}")
return {}
def _save_user_data(data: dict[str, Any]) -> None:
try:
p = _user_data_file()
p.write_text(json.dumps(data, indent=2))
except (OSError, PermissionError) as e:
logger.warning(f"Failed to save user data: {e}")
def has_user_declined_tracing() -> bool:
"""Check if user has explicitly declined trace collection.
Returns:
True if user previously declined tracing, False otherwise.
"""
data = _load_user_data()
if data.get("first_execution_done", False):
return data.get("trace_consent", False) is False
return False
def is_tracing_enabled() -> bool:
"""Check if tracing should be enabled.
Returns:
True if tracing is enabled and not disabled, False otherwise.
"""
# If user has explicitly declined tracing, never enable it
if has_user_declined_tracing():
return False
return os.getenv("CREWAI_TRACING_ENABLED", "false").lower() == "true"
@@ -213,36 +327,12 @@ def _get_generic_system_id() -> str | None:
return None
def _user_data_file() -> Path:
base = Path(db_storage_path())
base.mkdir(parents=True, exist_ok=True)
return base / ".crewai_user.json"
def _load_user_data() -> dict:
p = _user_data_file()
if p.exists():
try:
return json.loads(p.read_text())
except (json.JSONDecodeError, OSError, PermissionError) as e:
logger.warning(f"Failed to load user data: {e}")
return {}
def _save_user_data(data: dict) -> None:
try:
p = _user_data_file()
p.write_text(json.dumps(data, indent=2))
except (OSError, PermissionError) as e:
logger.warning(f"Failed to save user data: {e}")
def get_user_id() -> str:
"""Stable, anonymized user identifier with caching."""
data = _load_user_data()
if "user_id" in data:
return data["user_id"]
return cast(str, data["user_id"])
try:
username = getpass.getuser()
@@ -263,8 +353,12 @@ def is_first_execution() -> bool:
return not data.get("first_execution_done", False)
def mark_first_execution_done() -> None:
"""Mark that the first execution has been completed."""
def mark_first_execution_done(user_consented: bool = False) -> None:
"""Mark that the first execution has been completed.
Args:
user_consented: Whether the user consented to trace collection.
"""
data = _load_user_data()
if data.get("first_execution_done", False):
return
@@ -275,12 +369,13 @@ def mark_first_execution_done() -> None:
"first_execution_at": datetime.now().timestamp(),
"user_id": get_user_id(),
"machine_id": _get_machine_id(),
"trace_consent": user_consented,
}
)
_save_user_data(data)
def safe_serialize_to_dict(obj, exclude: set[str] | None = None) -> dict[str, Any]:
def safe_serialize_to_dict(obj: Any, exclude: set[str] | None = None) -> dict[str, Any]:
"""Safely serialize an object to a dictionary for event data."""
try:
serialized = to_serializable(obj, exclude)
@@ -291,7 +386,9 @@ def safe_serialize_to_dict(obj, exclude: set[str] | None = None) -> dict[str, An
return {"serialization_error": str(e), "object_type": type(obj).__name__}
def truncate_messages(messages, max_content_length=500, max_messages=5):
def truncate_messages(
messages: list[dict[str, Any]], max_content_length: int = 500, max_messages: int = 5
) -> list[dict[str, Any]]:
"""Truncate message content and limit number of messages"""
if not messages or not isinstance(messages, list):
return messages
@@ -308,9 +405,22 @@ def truncate_messages(messages, max_content_length=500, max_messages=5):
def should_auto_collect_first_time_traces() -> bool:
"""True if we should auto-collect traces for first-time user."""
"""True if we should auto-collect traces for first-time user.
Returns:
True if first-time user AND telemetry not disabled AND tracing not explicitly enabled, False otherwise.
"""
if _is_test_environment():
return False
# If user has previously declined, never auto-collect
if has_user_declined_tracing():
return False
if is_tracing_enabled_in_context():
return False
return is_first_execution()
@@ -355,7 +465,7 @@ def prompt_user_for_trace_viewing(timeout_seconds: int = 20) -> bool:
result = [False]
def get_input():
def get_input() -> None:
try:
response = input().strip().lower()
result[0] = response in ["y", "yes"]
@@ -377,6 +487,10 @@ def prompt_user_for_trace_viewing(timeout_seconds: int = 20) -> bool:
return False
def mark_first_execution_completed() -> None:
"""Mark first execution as completed (called after trace prompt)."""
mark_first_execution_done()
def mark_first_execution_completed(user_consented: bool = False) -> None:
"""Mark first execution as completed (called after trace prompt).
Args:
user_consented: Whether the user consented to trace collection.
"""
mark_first_execution_done(user_consented=user_consented)

View File

@@ -1,3 +1,4 @@
import threading
from typing import Any, ClassVar
from rich.console import Console
@@ -27,6 +28,7 @@ class ConsoleFormatter:
_pending_a2a_turn_number: int | None = None
_a2a_turn_branches: ClassVar[dict[int, Tree]] = {}
_current_a2a_agent_name: str | None = None
crew_completion_printed: ClassVar[threading.Event] = threading.Event()
def __init__(self, verbose: bool = False):
self.console = Console(width=None)
@@ -47,13 +49,44 @@ class ConsoleFormatter:
padding=(1, 2),
)
def _show_tracing_disabled_message_if_needed(self) -> None:
"""Show tracing disabled message if tracing is not enabled."""
from crewai.events.listeners.tracing.utils import (
has_user_declined_tracing,
is_tracing_enabled_in_context,
)
if not is_tracing_enabled_in_context():
if has_user_declined_tracing():
message = """Info: Tracing is disabled.
To enable tracing, do any one of these:
• Set tracing=True in your Crew/Flow code
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
• Run: crewai traces enable"""
else:
message = """Info: Tracing is disabled.
To enable tracing, do any one of these:
• Set tracing=True in your Crew/Flow code
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
• Run: crewai traces enable"""
panel = Panel(
message,
title="Tracing Status",
border_style="blue",
padding=(1, 2),
)
self.console.print(panel)
def create_status_content(
self,
title: str,
name: str,
status_style: str = "blue",
tool_args: dict[str, Any] | str = "",
**fields,
**fields: Any,
) -> Text:
"""Create standardized status content with consistent formatting."""
content = Text()
@@ -92,7 +125,7 @@ class ConsoleFormatter:
"""Add a node to the tree with consistent styling."""
return parent.add(Text(text, style=style))
def print(self, *args, **kwargs) -> None:
def print(self, *args: Any, **kwargs: Any) -> None:
"""Custom print that replaces consecutive Tree renders.
* If the argument is a single ``Tree`` instance, we either start a
@@ -208,11 +241,20 @@ class ConsoleFormatter:
self.print_panel(content, title, style)
if status in ["completed", "failed"]:
self.crew_completion_printed.set()
# Show tracing disabled message after crew completion
self._show_tracing_disabled_message_if_needed()
def create_crew_tree(self, crew_name: str, source_id: str) -> Tree | None:
"""Create and initialize a new crew tree with initial status."""
if not self.verbose:
return None
# Reset the crew completion event for this new crew execution
ConsoleFormatter.crew_completion_printed.clear()
tree = Tree(
Text("🚀 Crew: ", style="cyan bold") + Text(crew_name, style="cyan")
)
@@ -497,7 +539,7 @@ class ConsoleFormatter:
return method_branch
def get_llm_tree(self, tool_name: str):
def get_llm_tree(self, tool_name: str) -> Tree:
text = Text()
text.append(f"🔧 Using {tool_name} from LLM available_function", style="yellow")
@@ -512,7 +554,7 @@ class ConsoleFormatter:
self,
tool_name: str,
tool_args: dict[str, Any] | str,
):
) -> None:
# Create status content for the tool usage
content = self.create_status_content(
"Tool Usage Started", tool_name, Status="In Progress", tool_args=tool_args
@@ -528,7 +570,7 @@ class ConsoleFormatter:
def handle_llm_tool_usage_finished(
self,
tool_name: str,
):
) -> None:
tree = self.get_llm_tree(tool_name)
self.add_tree_node(tree, "✅ Tool Usage Completed", "green")
self.print(tree)
@@ -538,7 +580,7 @@ class ConsoleFormatter:
self,
tool_name: str,
error: str,
):
) -> None:
tree = self.get_llm_tree(tool_name)
self.add_tree_node(tree, "❌ Tool Usage Failed", "red")
self.print(tree)
@@ -1558,7 +1600,7 @@ class ConsoleFormatter:
if branch_to_use is None and tree_to_use is not None:
branch_to_use = tree_to_use
def add_panel():
def add_panel() -> None:
memory_text = str(memory_content)
if len(memory_text) > 500:
memory_text = memory_text[:497] + "..."

View File

@@ -26,14 +26,17 @@ from uuid import uuid4
from opentelemetry import baggage
from opentelemetry.context import attach, detach
from pydantic import BaseModel, Field, ValidationError
from rich.console import Console
from rich.panel import Panel
from crewai.events.event_bus import crewai_event_bus
from crewai.events.listeners.tracing.trace_listener import (
TraceCollectionListener,
)
from crewai.events.listeners.tracing.utils import (
is_tracing_enabled,
should_auto_collect_first_time_traces,
has_user_declined_tracing,
set_tracing_enabled,
should_enable_tracing,
)
from crewai.events.types.flow_events import (
FlowCreatedEvent,
@@ -452,7 +455,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
_router_paths: ClassVar[dict[FlowMethodName, list[FlowMethodName]]] = {}
initial_state: type[T] | T | None = None
name: str | None = None
tracing: bool | None = False
tracing: bool | None = None
def __class_getitem__(cls: type[Flow[T]], item: type[T]) -> type[Flow[T]]:
class _FlowGeneric(cls): # type: ignore
@@ -464,13 +467,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
def __init__(
self,
persistence: FlowPersistence | None = None,
tracing: bool | None = False,
tracing: bool | None = None,
**kwargs: Any,
) -> None:
"""Initialize a new Flow instance.
Args:
persistence: Optional persistence backend for storing flow states
tracing: Whether to enable tracing. True=always enable, False=always disable, None=check environment/user settings
**kwargs: Additional state values to initialize or override
"""
# Initialize basic instance attributes
@@ -488,13 +492,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
# Initialize state with initial values
self._state = self._create_initial_state()
self.tracing = tracing
if (
is_tracing_enabled()
or self.tracing
or should_auto_collect_first_time_traces()
):
trace_listener = TraceCollectionListener()
trace_listener.setup_listeners(crewai_event_bus)
tracing_enabled = should_enable_tracing(override=self.tracing)
set_tracing_enabled(tracing_enabled)
trace_listener = TraceCollectionListener()
trace_listener.setup_listeners(crewai_event_bus)
# Apply any additional kwargs
if kwargs:
self._initialize_state(kwargs)
@@ -936,18 +938,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
)
self._event_futures.clear()
if (
is_tracing_enabled()
or self.tracing
or should_auto_collect_first_time_traces()
):
trace_listener = TraceCollectionListener()
if trace_listener.batch_manager.batch_owner_type == "flow":
if trace_listener.first_time_handler.is_first_time:
trace_listener.first_time_handler.mark_events_collected()
trace_listener.first_time_handler.handle_execution_completion()
else:
trace_listener.batch_manager.finalize_batch()
trace_listener = TraceCollectionListener()
if trace_listener.batch_manager.batch_owner_type == "flow":
if trace_listener.first_time_handler.is_first_time:
trace_listener.first_time_handler.mark_events_collected()
trace_listener.first_time_handler.handle_execution_completion()
else:
trace_listener.batch_manager.finalize_batch()
return final_output
finally:
@@ -1381,3 +1378,32 @@ class Flow(Generic[T], metaclass=FlowMeta):
)
structure = build_flow_structure(self)
return render_interactive(structure, filename=filename, show=show)
@staticmethod
def _show_tracing_disabled_message() -> None:
"""Show a message when tracing is disabled."""
console = Console()
if has_user_declined_tracing():
message = """Info: Tracing is disabled.
To enable tracing, do any one of these:
• Set tracing=True in your Flow code
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
• Run: crewai traces enable"""
else:
message = """Info: Tracing is disabled.
To enable tracing, do any one of these:
• Set tracing=True in your Flow code
• Set CREWAI_TRACING_ENABLED=true in your project's .env file
• Run: crewai traces enable"""
panel = Panel(
message,
title="Tracing Status",
border_style="blue",
padding=(1, 2),
)
console.print(panel)

View File

@@ -0,0 +1,108 @@
from __future__ import annotations
from crewai.hooks.decorators import (
after_llm_call,
after_tool_call,
before_llm_call,
before_tool_call,
)
from crewai.hooks.llm_hooks import (
LLMCallHookContext,
clear_after_llm_call_hooks,
clear_all_llm_call_hooks,
clear_before_llm_call_hooks,
get_after_llm_call_hooks,
get_before_llm_call_hooks,
register_after_llm_call_hook,
register_before_llm_call_hook,
unregister_after_llm_call_hook,
unregister_before_llm_call_hook,
)
from crewai.hooks.tool_hooks import (
ToolCallHookContext,
clear_after_tool_call_hooks,
clear_all_tool_call_hooks,
clear_before_tool_call_hooks,
get_after_tool_call_hooks,
get_before_tool_call_hooks,
register_after_tool_call_hook,
register_before_tool_call_hook,
unregister_after_tool_call_hook,
unregister_before_tool_call_hook,
)
def clear_all_global_hooks() -> dict[str, tuple[int, int]]:
"""Clear all global hooks across all hook types (LLM and Tool).
This is a convenience function that clears all registered hooks in one call.
Useful for testing, resetting state, or cleaning up between different
execution contexts.
Returns:
Dictionary with counts of cleared hooks:
{
"llm_hooks": (before_count, after_count),
"tool_hooks": (before_count, after_count),
"total": (total_before_count, total_after_count)
}
Example:
>>> # Register various hooks
>>> register_before_llm_call_hook(llm_hook1)
>>> register_after_llm_call_hook(llm_hook2)
>>> register_before_tool_call_hook(tool_hook1)
>>> register_after_tool_call_hook(tool_hook2)
>>>
>>> # Clear all hooks at once
>>> result = clear_all_global_hooks()
>>> print(result)
{
'llm_hooks': (1, 1),
'tool_hooks': (1, 1),
'total': (2, 2)
}
"""
llm_counts = clear_all_llm_call_hooks()
tool_counts = clear_all_tool_call_hooks()
return {
"llm_hooks": llm_counts,
"tool_hooks": tool_counts,
"total": (llm_counts[0] + tool_counts[0], llm_counts[1] + tool_counts[1]),
}
__all__ = [
# Context classes
"LLMCallHookContext",
"ToolCallHookContext",
# Decorators
"after_llm_call",
"after_tool_call",
"before_llm_call",
"before_tool_call",
"clear_after_llm_call_hooks",
"clear_after_tool_call_hooks",
"clear_all_global_hooks",
"clear_all_llm_call_hooks",
"clear_all_tool_call_hooks",
# Clear hooks
"clear_before_llm_call_hooks",
"clear_before_tool_call_hooks",
"get_after_llm_call_hooks",
"get_after_tool_call_hooks",
# Get hooks
"get_before_llm_call_hooks",
"get_before_tool_call_hooks",
"register_after_llm_call_hook",
"register_after_tool_call_hook",
# LLM Hook registration
"register_before_llm_call_hook",
# Tool Hook registration
"register_before_tool_call_hook",
"unregister_after_llm_call_hook",
"unregister_after_tool_call_hook",
"unregister_before_llm_call_hook",
"unregister_before_tool_call_hook",
]

View File

@@ -0,0 +1,300 @@
from __future__ import annotations
from collections.abc import Callable
from functools import wraps
import inspect
from typing import TYPE_CHECKING, Any, TypeVar, overload
if TYPE_CHECKING:
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
F = TypeVar("F", bound=Callable[..., Any])
def _create_hook_decorator(
hook_type: str,
register_function: Callable[..., Any],
marker_attribute: str,
) -> Callable[..., Any]:
"""Create a hook decorator with filtering support.
This factory function eliminates code duplication across the four hook decorators.
Args:
hook_type: Type of hook ("llm" or "tool")
register_function: Function to call for registration (e.g., register_before_llm_call_hook)
marker_attribute: Attribute name to mark functions (e.g., "is_before_llm_call_hook")
Returns:
A decorator function that supports filters and auto-registration
"""
def decorator_factory(
func: Callable[..., Any] | None = None,
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> Callable[..., Any]:
def decorator(f: Callable[..., Any]) -> Callable[..., Any]:
setattr(f, marker_attribute, True)
sig = inspect.signature(f)
params = list(sig.parameters.keys())
is_method = len(params) >= 2 and params[0] == "self"
if tools:
f._filter_tools = tools # type: ignore[attr-defined]
if agents:
f._filter_agents = agents # type: ignore[attr-defined]
if tools or agents:
@wraps(f)
def filtered_hook(context: Any) -> Any:
if tools and hasattr(context, "tool_name"):
if context.tool_name not in tools:
return None
if agents and hasattr(context, "agent"):
if context.agent and context.agent.role not in agents:
return None
return f(context)
if not is_method:
register_function(filtered_hook)
return f
if not is_method:
register_function(f)
return f
if func is None:
return decorator
return decorator(func)
return decorator_factory
@overload
def before_llm_call(
func: Callable[[LLMCallHookContext], None],
) -> Callable[[LLMCallHookContext], None]: ...
@overload
def before_llm_call(
*,
agents: list[str] | None = None,
) -> Callable[
[Callable[[LLMCallHookContext], None]], Callable[[LLMCallHookContext], None]
]: ...
def before_llm_call(
func: Callable[[LLMCallHookContext], None] | None = None,
*,
agents: list[str] | None = None,
) -> (
Callable[[LLMCallHookContext], None]
| Callable[
[Callable[[LLMCallHookContext], None]], Callable[[LLMCallHookContext], None]
]
):
"""Decorator to register a function as a before_llm_call hook.
Example:
Simple usage::
@before_llm_call
def log_calls(context):
print(f"LLM call by {context.agent.role}")
With agent filter::
@before_llm_call(agents=["Researcher", "Analyst"])
def log_specific_agents(context):
print(f"Filtered LLM call: {context.agent.role}")
"""
from crewai.hooks.llm_hooks import register_before_llm_call_hook
return _create_hook_decorator( # type: ignore[return-value]
hook_type="llm",
register_function=register_before_llm_call_hook,
marker_attribute="is_before_llm_call_hook",
)(func=func, agents=agents)
@overload
def after_llm_call(
func: Callable[[LLMCallHookContext], str | None],
) -> Callable[[LLMCallHookContext], str | None]: ...
@overload
def after_llm_call(
*,
agents: list[str] | None = None,
) -> Callable[
[Callable[[LLMCallHookContext], str | None]],
Callable[[LLMCallHookContext], str | None],
]: ...
def after_llm_call(
func: Callable[[LLMCallHookContext], str | None] | None = None,
*,
agents: list[str] | None = None,
) -> (
Callable[[LLMCallHookContext], str | None]
| Callable[
[Callable[[LLMCallHookContext], str | None]],
Callable[[LLMCallHookContext], str | None],
]
):
"""Decorator to register a function as an after_llm_call hook.
Example:
Simple usage::
@after_llm_call
def sanitize(context):
if "SECRET" in context.response:
return context.response.replace("SECRET", "[REDACTED]")
return None
With agent filter::
@after_llm_call(agents=["Researcher"])
def log_researcher_responses(context):
print(f"Response length: {len(context.response)}")
return None
"""
from crewai.hooks.llm_hooks import register_after_llm_call_hook
return _create_hook_decorator( # type: ignore[return-value]
hook_type="llm",
register_function=register_after_llm_call_hook,
marker_attribute="is_after_llm_call_hook",
)(func=func, agents=agents)
@overload
def before_tool_call(
func: Callable[[ToolCallHookContext], bool | None],
) -> Callable[[ToolCallHookContext], bool | None]: ...
@overload
def before_tool_call(
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> Callable[
[Callable[[ToolCallHookContext], bool | None]],
Callable[[ToolCallHookContext], bool | None],
]: ...
def before_tool_call(
func: Callable[[ToolCallHookContext], bool | None] | None = None,
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> (
Callable[[ToolCallHookContext], bool | None]
| Callable[
[Callable[[ToolCallHookContext], bool | None]],
Callable[[ToolCallHookContext], bool | None],
]
):
"""Decorator to register a function as a before_tool_call hook.
Example:
Simple usage::
@before_tool_call
def log_all_tools(context):
print(f"Tool: {context.tool_name}")
return None
With tool filter::
@before_tool_call(tools=["delete_file", "execute_code"])
def approve_dangerous(context):
response = context.request_human_input(prompt="Approve?")
return None if response == "yes" else False
With combined filters::
@before_tool_call(tools=["write_file"], agents=["Developer"])
def approve_dev_writes(context):
return None # Only for Developer writing files
"""
from crewai.hooks.tool_hooks import register_before_tool_call_hook
return _create_hook_decorator( # type: ignore[return-value]
hook_type="tool",
register_function=register_before_tool_call_hook,
marker_attribute="is_before_tool_call_hook",
)(func=func, tools=tools, agents=agents)
@overload
def after_tool_call(
func: Callable[[ToolCallHookContext], str | None],
) -> Callable[[ToolCallHookContext], str | None]: ...
@overload
def after_tool_call(
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> Callable[
[Callable[[ToolCallHookContext], str | None]],
Callable[[ToolCallHookContext], str | None],
]: ...
def after_tool_call(
func: Callable[[ToolCallHookContext], str | None] | None = None,
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> (
Callable[[ToolCallHookContext], str | None]
| Callable[
[Callable[[ToolCallHookContext], str | None]],
Callable[[ToolCallHookContext], str | None],
]
):
"""Decorator to register a function as an after_tool_call hook.
Example:
Simple usage::
@after_tool_call
def log_results(context):
print(f"Result: {len(context.tool_result)} chars")
return None
With tool filter::
@after_tool_call(tools=["web_search", "ExaSearchTool"])
def sanitize_search_results(context):
if "SECRET" in context.tool_result:
return context.tool_result.replace("SECRET", "[REDACTED]")
return None
"""
from crewai.hooks.tool_hooks import register_after_tool_call_hook
return _create_hook_decorator( # type: ignore[return-value]
hook_type="tool",
register_function=register_after_tool_call_hook,
marker_attribute="is_after_tool_call_hook",
)(func=func, tools=tools, agents=agents)

View File

@@ -0,0 +1,290 @@
from __future__ import annotations
from typing import TYPE_CHECKING
from crewai.events.event_listener import event_listener
from crewai.hooks.types import AfterLLMCallHookType, BeforeLLMCallHookType
from crewai.utilities.printer import Printer
if TYPE_CHECKING:
from crewai.agents.crew_agent_executor import CrewAgentExecutor
class LLMCallHookContext:
"""Context object passed to LLM call hooks with full executor access.
Provides hooks with complete access to the executor state, allowing
modification of messages, responses, and executor attributes.
Attributes:
executor: Full reference to the CrewAgentExecutor instance
messages: Direct reference to executor.messages (mutable list).
Can be modified in both before_llm_call and after_llm_call hooks.
Modifications in after_llm_call hooks persist to the next iteration,
allowing hooks to modify conversation history for subsequent LLM calls.
IMPORTANT: Modify messages in-place (e.g., append, extend, remove items).
Do NOT replace the list (e.g., context.messages = []), as this will break
the executor. Use context.messages.append() or context.messages.extend()
instead of assignment.
agent: Reference to the agent executing the task
task: Reference to the task being executed
crew: Reference to the crew instance
llm: Reference to the LLM instance
iterations: Current iteration count
response: LLM response string (only set for after_llm_call hooks).
Can be modified by returning a new string from after_llm_call hook.
"""
def __init__(
self,
executor: CrewAgentExecutor,
response: str | None = None,
) -> None:
"""Initialize hook context with executor reference.
Args:
executor: The CrewAgentExecutor instance
response: Optional response string (for after_llm_call hooks)
"""
self.executor = executor
self.messages = executor.messages
self.agent = executor.agent
self.task = executor.task
self.crew = executor.crew
self.llm = executor.llm
self.iterations = executor.iterations
self.response = response
def request_human_input(
self,
prompt: str,
default_message: str = "Press Enter to continue, or provide feedback:",
) -> str:
"""Request human input during LLM hook execution.
This method pauses live console updates, displays a prompt to the user,
waits for their input, and then resumes live updates. This is useful for
approval gates, debugging, or getting human feedback during execution.
Args:
prompt: Custom message to display to the user
default_message: Message shown after the prompt
Returns:
User's input as a string (empty string if just Enter pressed)
Example:
>>> def approval_hook(context: LLMCallHookContext) -> None:
... if context.iterations > 5:
... response = context.request_human_input(
... prompt="Allow this LLM call?",
... default_message="Type 'no' to skip, or press Enter:",
... )
... if response.lower() == "no":
... print("LLM call skipped by user")
"""
printer = Printer()
event_listener.formatter.pause_live_updates()
try:
printer.print(content=f"\n{prompt}", color="bold_yellow")
printer.print(content=default_message, color="cyan")
response = input().strip()
if response:
printer.print(content="\nProcessing your input...", color="cyan")
return response
finally:
event_listener.formatter.resume_live_updates()
_before_llm_call_hooks: list[BeforeLLMCallHookType] = []
_after_llm_call_hooks: list[AfterLLMCallHookType] = []
def register_before_llm_call_hook(
hook: BeforeLLMCallHookType,
) -> None:
"""Register a global before_llm_call hook.
Global hooks are added to all executors automatically.
This is a convenience function for registering hooks that should
apply to all LLM calls across all executors.
Args:
hook: Function that receives LLMCallHookContext and can:
- Modify context.messages directly (in-place)
- Return False to block LLM execution
- Return True or None to allow execution
IMPORTANT: Modify messages in-place (append, extend, remove items).
Do NOT replace the list (context.messages = []), as this will break execution.
Example:
>>> def log_llm_calls(context: LLMCallHookContext) -> None:
... print(f"LLM call by {context.agent.role}")
... print(f"Messages: {len(context.messages)}")
... return None # Allow execution
>>>
>>> register_before_llm_call_hook(log_llm_calls)
>>>
>>> def block_excessive_iterations(context: LLMCallHookContext) -> bool | None:
... if context.iterations > 10:
... print("Blocked: Too many iterations")
... return False # Block execution
... return None # Allow execution
>>>
>>> register_before_llm_call_hook(block_excessive_iterations)
"""
_before_llm_call_hooks.append(hook)
def register_after_llm_call_hook(
hook: AfterLLMCallHookType,
) -> None:
"""Register a global after_llm_call hook.
Global hooks are added to all executors automatically.
This is a convenience function for registering hooks that should
apply to all LLM calls across all executors.
Args:
hook: Function that receives LLMCallHookContext and can modify:
- The response: Return modified response string or None to keep original
- The messages: Modify context.messages directly (mutable reference)
Both modifications are supported and can be used together.
IMPORTANT: Modify messages in-place (append, extend, remove items).
Do NOT replace the list (context.messages = []), as this will break execution.
Example:
>>> def sanitize_response(context: LLMCallHookContext) -> str | None:
... if context.response and "SECRET" in context.response:
... return context.response.replace("SECRET", "[REDACTED]")
... return None
>>>
>>> register_after_llm_call_hook(sanitize_response)
"""
_after_llm_call_hooks.append(hook)
def get_before_llm_call_hooks() -> list[BeforeLLMCallHookType]:
"""Get all registered global before_llm_call hooks.
Returns:
List of registered before hooks
"""
return _before_llm_call_hooks.copy()
def get_after_llm_call_hooks() -> list[AfterLLMCallHookType]:
"""Get all registered global after_llm_call hooks.
Returns:
List of registered after hooks
"""
return _after_llm_call_hooks.copy()
def unregister_before_llm_call_hook(
hook: BeforeLLMCallHookType,
) -> bool:
"""Unregister a specific global before_llm_call hook.
Args:
hook: The hook function to remove
Returns:
True if the hook was found and removed, False otherwise
Example:
>>> def my_hook(context: LLMCallHookContext) -> None:
... print("Before LLM call")
>>>
>>> register_before_llm_call_hook(my_hook)
>>> unregister_before_llm_call_hook(my_hook)
True
"""
try:
_before_llm_call_hooks.remove(hook)
return True
except ValueError:
return False
def unregister_after_llm_call_hook(
hook: AfterLLMCallHookType,
) -> bool:
"""Unregister a specific global after_llm_call hook.
Args:
hook: The hook function to remove
Returns:
True if the hook was found and removed, False otherwise
Example:
>>> def my_hook(context: LLMCallHookContext) -> str | None:
... return None
>>>
>>> register_after_llm_call_hook(my_hook)
>>> unregister_after_llm_call_hook(my_hook)
True
"""
try:
_after_llm_call_hooks.remove(hook)
return True
except ValueError:
return False
def clear_before_llm_call_hooks() -> int:
"""Clear all registered global before_llm_call hooks.
Returns:
Number of hooks that were cleared
Example:
>>> register_before_llm_call_hook(hook1)
>>> register_before_llm_call_hook(hook2)
>>> clear_before_llm_call_hooks()
2
"""
count = len(_before_llm_call_hooks)
_before_llm_call_hooks.clear()
return count
def clear_after_llm_call_hooks() -> int:
"""Clear all registered global after_llm_call hooks.
Returns:
Number of hooks that were cleared
Example:
>>> register_after_llm_call_hook(hook1)
>>> register_after_llm_call_hook(hook2)
>>> clear_after_llm_call_hooks()
2
"""
count = len(_after_llm_call_hooks)
_after_llm_call_hooks.clear()
return count
def clear_all_llm_call_hooks() -> tuple[int, int]:
"""Clear all registered global LLM call hooks (both before and after).
Returns:
Tuple of (before_hooks_cleared, after_hooks_cleared)
Example:
>>> register_before_llm_call_hook(before_hook)
>>> register_after_llm_call_hook(after_hook)
>>> clear_all_llm_call_hooks()
(1, 1)
"""
before_count = clear_before_llm_call_hooks()
after_count = clear_after_llm_call_hooks()
return (before_count, after_count)

View File

@@ -0,0 +1,305 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from crewai.events.event_listener import event_listener
from crewai.hooks.types import AfterToolCallHookType, BeforeToolCallHookType
from crewai.utilities.printer import Printer
if TYPE_CHECKING:
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.crew import Crew
from crewai.task import Task
from crewai.tools.structured_tool import CrewStructuredTool
class ToolCallHookContext:
"""Context object passed to tool call hooks.
Provides hooks with access to the tool being called, its input,
the agent/task/crew context, and the result (for after hooks).
Attributes:
tool_name: Name of the tool being called
tool_input: Tool input parameters (mutable dict).
Can be modified in-place by before_tool_call hooks.
IMPORTANT: Modify in-place (e.g., context.tool_input['key'] = value).
Do NOT replace the dict (e.g., context.tool_input = {}), as this
will not affect the actual tool execution.
tool: Reference to the CrewStructuredTool instance
agent: Agent executing the tool (may be None)
task: Current task being executed (may be None)
crew: Crew instance (may be None)
tool_result: Tool execution result (only set for after_tool_call hooks).
Can be modified by returning a new string from after_tool_call hook.
"""
def __init__(
self,
tool_name: str,
tool_input: dict[str, Any],
tool: CrewStructuredTool,
agent: Agent | BaseAgent | None = None,
task: Task | None = None,
crew: Crew | None = None,
tool_result: str | None = None,
) -> None:
"""Initialize tool call hook context.
Args:
tool_name: Name of the tool being called
tool_input: Tool input parameters (mutable)
tool: Tool instance reference
agent: Optional agent executing the tool
task: Optional current task
crew: Optional crew instance
tool_result: Optional tool result (for after hooks)
"""
self.tool_name = tool_name
self.tool_input = tool_input
self.tool = tool
self.agent = agent
self.task = task
self.crew = crew
self.tool_result = tool_result
def request_human_input(
self,
prompt: str,
default_message: str = "Press Enter to continue, or provide feedback:",
) -> str:
"""Request human input during tool hook execution.
This method pauses live console updates, displays a prompt to the user,
waits for their input, and then resumes live updates. This is useful for
approval gates, reviewing tool results, or getting human feedback during execution.
Args:
prompt: Custom message to display to the user
default_message: Message shown after the prompt
Returns:
User's input as a string (empty string if just Enter pressed)
Example:
>>> def approval_hook(context: ToolCallHookContext) -> bool | None:
... if context.tool_name == "delete_file":
... response = context.request_human_input(
... prompt="Allow file deletion?",
... default_message="Type 'approve' to continue:",
... )
... if response.lower() != "approve":
... return False # Block execution
... return None # Allow execution
"""
printer = Printer()
event_listener.formatter.pause_live_updates()
try:
printer.print(content=f"\n{prompt}", color="bold_yellow")
printer.print(content=default_message, color="cyan")
response = input().strip()
if response:
printer.print(content="\nProcessing your input...", color="cyan")
return response
finally:
event_listener.formatter.resume_live_updates()
# Global hook registries
_before_tool_call_hooks: list[BeforeToolCallHookType] = []
_after_tool_call_hooks: list[AfterToolCallHookType] = []
def register_before_tool_call_hook(
hook: BeforeToolCallHookType,
) -> None:
"""Register a global before_tool_call hook.
Global hooks are added to all tool executions automatically.
This is a convenience function for registering hooks that should
apply to all tool calls across all agents and crews.
Args:
hook: Function that receives ToolCallHookContext and can:
- Modify tool_input in-place
- Return False to block tool execution
- Return True or None to allow execution
IMPORTANT: Modify tool_input in-place (e.g., context.tool_input['key'] = value).
Do NOT replace the dict (context.tool_input = {}), as this will not affect
the actual tool execution.
Example:
>>> def log_tool_usage(context: ToolCallHookContext) -> None:
... print(f"Executing tool: {context.tool_name}")
... print(f"Input: {context.tool_input}")
... return None # Allow execution
>>>
>>> register_before_tool_call_hook(log_tool_usage)
>>> def block_dangerous_tools(context: ToolCallHookContext) -> bool | None:
... if context.tool_name == "delete_database":
... print("Blocked dangerous tool execution!")
... return False # Block execution
... return None # Allow execution
>>>
>>> register_before_tool_call_hook(block_dangerous_tools)
"""
_before_tool_call_hooks.append(hook)
def register_after_tool_call_hook(
hook: AfterToolCallHookType,
) -> None:
"""Register a global after_tool_call hook.
Global hooks are added to all tool executions automatically.
This is a convenience function for registering hooks that should
apply to all tool calls across all agents and crews.
Args:
hook: Function that receives ToolCallHookContext and can modify
the tool result. Return modified result string or None to keep
the original result. The tool_result is available in context.tool_result.
Example:
>>> def sanitize_output(context: ToolCallHookContext) -> str | None:
... if context.tool_result and "SECRET_KEY" in context.tool_result:
... return context.tool_result.replace("SECRET_KEY=...", "[REDACTED]")
... return None # Keep original result
>>>
>>> register_after_tool_call_hook(sanitize_output)
>>> def log_tool_results(context: ToolCallHookContext) -> None:
... print(f"Tool {context.tool_name} returned: {context.tool_result[:100]}")
... return None # Keep original result
>>>
>>> register_after_tool_call_hook(log_tool_results)
"""
_after_tool_call_hooks.append(hook)
def get_before_tool_call_hooks() -> list[BeforeToolCallHookType]:
"""Get all registered global before_tool_call hooks.
Returns:
List of registered before hooks
"""
return _before_tool_call_hooks.copy()
def get_after_tool_call_hooks() -> list[AfterToolCallHookType]:
"""Get all registered global after_tool_call hooks.
Returns:
List of registered after hooks
"""
return _after_tool_call_hooks.copy()
def unregister_before_tool_call_hook(
hook: BeforeToolCallHookType,
) -> bool:
"""Unregister a specific global before_tool_call hook.
Args:
hook: The hook function to remove
Returns:
True if the hook was found and removed, False otherwise
Example:
>>> def my_hook(context: ToolCallHookContext) -> None:
... print("Before tool call")
>>>
>>> register_before_tool_call_hook(my_hook)
>>> unregister_before_tool_call_hook(my_hook)
True
"""
try:
_before_tool_call_hooks.remove(hook)
return True
except ValueError:
return False
def unregister_after_tool_call_hook(
hook: AfterToolCallHookType,
) -> bool:
"""Unregister a specific global after_tool_call hook.
Args:
hook: The hook function to remove
Returns:
True if the hook was found and removed, False otherwise
Example:
>>> def my_hook(context: ToolCallHookContext) -> str | None:
... return None
>>>
>>> register_after_tool_call_hook(my_hook)
>>> unregister_after_tool_call_hook(my_hook)
True
"""
try:
_after_tool_call_hooks.remove(hook)
return True
except ValueError:
return False
def clear_before_tool_call_hooks() -> int:
"""Clear all registered global before_tool_call hooks.
Returns:
Number of hooks that were cleared
Example:
>>> register_before_tool_call_hook(hook1)
>>> register_before_tool_call_hook(hook2)
>>> clear_before_tool_call_hooks()
2
"""
count = len(_before_tool_call_hooks)
_before_tool_call_hooks.clear()
return count
def clear_after_tool_call_hooks() -> int:
"""Clear all registered global after_tool_call hooks.
Returns:
Number of hooks that were cleared
Example:
>>> register_after_tool_call_hook(hook1)
>>> register_after_tool_call_hook(hook2)
>>> clear_after_tool_call_hooks()
2
"""
count = len(_after_tool_call_hooks)
_after_tool_call_hooks.clear()
return count
def clear_all_tool_call_hooks() -> tuple[int, int]:
"""Clear all registered global tool call hooks (both before and after).
Returns:
Tuple of (before_hooks_cleared, after_hooks_cleared)
Example:
>>> register_before_tool_call_hook(before_hook)
>>> register_after_tool_call_hook(after_hook)
>>> clear_all_tool_call_hooks()
(1, 1)
"""
before_count = clear_before_tool_call_hooks()
after_count = clear_after_tool_call_hooks()
return (before_count, after_count)

View File

@@ -0,0 +1,137 @@
from __future__ import annotations
from collections.abc import Callable
from typing import TYPE_CHECKING, Generic, Protocol, TypeVar, runtime_checkable
if TYPE_CHECKING:
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
ContextT = TypeVar("ContextT", contravariant=True)
ReturnT = TypeVar("ReturnT", covariant=True)
@runtime_checkable
class Hook(Protocol, Generic[ContextT, ReturnT]):
"""Generic protocol for hook functions.
This protocol defines the common interface for all hook types in CrewAI.
Hooks receive a context object and optionally return a modified result.
Type Parameters:
ContextT: The context type (LLMCallHookContext or ToolCallHookContext)
ReturnT: The return type (None, str | None, or bool | None)
Example:
>>> # Before LLM call hook: receives LLMCallHookContext, returns None
>>> hook: Hook[LLMCallHookContext, None] = lambda ctx: print(ctx.iterations)
>>>
>>> # After LLM call hook: receives LLMCallHookContext, returns str | None
>>> hook: Hook[LLMCallHookContext, str | None] = lambda ctx: ctx.response
"""
def __call__(self, context: ContextT) -> ReturnT:
"""Execute the hook with the given context.
Args:
context: Context object with relevant execution state
Returns:
Hook-specific return value (None, str | None, or bool | None)
"""
...
class BeforeLLMCallHook(Hook["LLMCallHookContext", bool | None], Protocol):
"""Protocol for before_llm_call hooks.
These hooks are called before an LLM is invoked and can modify the messages
that will be sent to the LLM or block the execution entirely.
"""
def __call__(self, context: LLMCallHookContext) -> bool | None:
"""Execute the before LLM call hook.
Args:
context: Context object with executor, messages, agent, task, etc.
Messages can be modified in-place.
Returns:
False to block LLM execution, True or None to allow execution
"""
...
class AfterLLMCallHook(Hook["LLMCallHookContext", str | None], Protocol):
"""Protocol for after_llm_call hooks.
These hooks are called after an LLM returns a response and can modify
the response or the message history.
"""
def __call__(self, context: LLMCallHookContext) -> str | None:
"""Execute the after LLM call hook.
Args:
context: Context object with executor, messages, agent, task, response, etc.
Messages can be modified in-place. Response is available in context.response.
Returns:
Modified response string, or None to keep the original response
"""
...
class BeforeToolCallHook(Hook["ToolCallHookContext", bool | None], Protocol):
"""Protocol for before_tool_call hooks.
These hooks are called before a tool is executed and can modify the tool
input or block the execution entirely.
"""
def __call__(self, context: ToolCallHookContext) -> bool | None:
"""Execute the before tool call hook.
Args:
context: Context object with tool_name, tool_input, tool, agent, task, etc.
Tool input can be modified in-place.
Returns:
False to block tool execution, True or None to allow execution
"""
...
class AfterToolCallHook(Hook["ToolCallHookContext", str | None], Protocol):
"""Protocol for after_tool_call hooks.
These hooks are called after a tool executes and can modify the result.
"""
def __call__(self, context: ToolCallHookContext) -> str | None:
"""Execute the after tool call hook.
Args:
context: Context object with tool_name, tool_input, tool_result, etc.
Tool result is available in context.tool_result.
Returns:
Modified tool result string, or None to keep the original result
"""
...
# - All before hooks: bool | None (False = block execution, True/None = allow)
# - All after hooks: str | None (str = modified result, None = keep original)
BeforeLLMCallHookType = Hook["LLMCallHookContext", bool | None]
AfterLLMCallHookType = Hook["LLMCallHookContext", str | None]
BeforeToolCallHookType = Hook["ToolCallHookContext", bool | None]
AfterToolCallHookType = Hook["ToolCallHookContext", str | None]
# Alternative Callable-based type aliases for compatibility
BeforeLLMCallHookCallable = Callable[["LLMCallHookContext"], bool | None]
AfterLLMCallHookCallable = Callable[["LLMCallHookContext"], str | None]
BeforeToolCallHookCallable = Callable[["ToolCallHookContext"], bool | None]
AfterToolCallHookCallable = Callable[["ToolCallHookContext"], str | None]

View File

@@ -0,0 +1,157 @@
from __future__ import annotations
from collections.abc import Callable
from typing import TYPE_CHECKING, Any, TypeVar
if TYPE_CHECKING:
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
P = TypeVar("P")
R = TypeVar("R")
def _copy_method_metadata(wrapper: Any, original: Callable[..., Any]) -> None:
"""Copy metadata from original function to wrapper.
Args:
wrapper: The wrapper object to copy metadata to
original: The original function to copy from
"""
wrapper.__name__ = original.__name__
wrapper.__doc__ = original.__doc__
wrapper.__module__ = original.__module__
wrapper.__qualname__ = original.__qualname__
wrapper.__annotations__ = original.__annotations__
class BeforeLLMCallHookMethod:
"""Wrapper for methods marked as before_llm_call hooks within @CrewBase classes.
This wrapper marks a method so it can be detected and registered as a
crew-scoped hook during crew initialization.
"""
is_before_llm_call_hook: bool = True
def __init__(
self,
meth: Callable[[Any, LLMCallHookContext], None],
agents: list[str] | None = None,
) -> None:
"""Initialize the hook method wrapper.
Args:
meth: The method to wrap
agents: Optional list of agent roles to filter
"""
self._meth = meth
self.agents = agents
_copy_method_metadata(self, meth)
def __call__(self, *args: Any, **kwargs: Any) -> None:
"""Call the wrapped method.
Args:
*args: Positional arguments
**kwargs: Keyword arguments
"""
return self._meth(*args, **kwargs)
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
"""Support instance methods by implementing descriptor protocol.
Args:
obj: The instance that the method is accessed through
objtype: The type of the instance
Returns:
Self when accessed through class, bound method when accessed through instance
"""
if obj is None:
return self
# Return bound method
return lambda context: self._meth(obj, context)
class AfterLLMCallHookMethod:
"""Wrapper for methods marked as after_llm_call hooks within @CrewBase classes."""
is_after_llm_call_hook: bool = True
def __init__(
self,
meth: Callable[[Any, LLMCallHookContext], str | None],
agents: list[str] | None = None,
) -> None:
"""Initialize the hook method wrapper."""
self._meth = meth
self.agents = agents
_copy_method_metadata(self, meth)
def __call__(self, *args: Any, **kwargs: Any) -> str | None:
"""Call the wrapped method."""
return self._meth(*args, **kwargs)
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
"""Support instance methods."""
if obj is None:
return self
return lambda context: self._meth(obj, context)
class BeforeToolCallHookMethod:
"""Wrapper for methods marked as before_tool_call hooks within @CrewBase classes."""
is_before_tool_call_hook: bool = True
def __init__(
self,
meth: Callable[[Any, ToolCallHookContext], bool | None],
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> None:
"""Initialize the hook method wrapper."""
self._meth = meth
self.tools = tools
self.agents = agents
_copy_method_metadata(self, meth)
def __call__(self, *args: Any, **kwargs: Any) -> bool | None:
"""Call the wrapped method."""
return self._meth(*args, **kwargs)
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
"""Support instance methods."""
if obj is None:
return self
return lambda context: self._meth(obj, context)
class AfterToolCallHookMethod:
"""Wrapper for methods marked as after_tool_call hooks within @CrewBase classes."""
is_after_tool_call_hook: bool = True
def __init__(
self,
meth: Callable[[Any, ToolCallHookContext], str | None],
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> None:
"""Initialize the hook method wrapper."""
self._meth = meth
self.tools = tools
self.agents = agents
_copy_method_metadata(self, meth)
def __call__(self, *args: Any, **kwargs: Any) -> str | None:
"""Call the wrapped method."""
return self._meth(*args, **kwargs)
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
"""Support instance methods."""
if obj is None:
return self
return lambda context: self._meth(obj, context)

View File

@@ -358,6 +358,7 @@ class LiteAgent(FlowTrackable, BaseModel):
pydantic=formatted_result,
agent_role=self.role,
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
messages=self._messages,
)
# Process guardrail if set
@@ -541,6 +542,7 @@ class LiteAgent(FlowTrackable, BaseModel):
agent_key=self.key,
agent_role=self.role,
agent=self.original_agent,
crew=None,
)
except Exception as e:
raise e

View File

@@ -6,6 +6,8 @@ from typing import Any
from pydantic import BaseModel, Field
from crewai.utilities.types import LLMMessage
class LiteAgentOutput(BaseModel):
"""Class that represents the result of a LiteAgent execution."""
@@ -20,6 +22,7 @@ class LiteAgentOutput(BaseModel):
usage_metrics: dict[str, Any] | None = Field(
description="Token usage metrics for this execution", default=None
)
messages: list[LLMMessage] = Field(description="Messages of the agent", default=[])
def to_dict(self) -> dict[str, Any]:
"""Convert pydantic_output to a dictionary."""

View File

@@ -293,6 +293,8 @@ class CrewBaseMeta(type):
kickoff=_filter_methods(original_methods, "is_kickoff"),
)
_register_crew_hooks(instance, cls)
def close_mcp_server(
self: CrewInstance, _instance: CrewInstance, outputs: CrewOutput
@@ -438,6 +440,144 @@ def _filter_methods(
}
def _register_crew_hooks(instance: CrewInstance, cls: type) -> None:
"""Detect and register crew-scoped hook methods.
Args:
instance: Crew instance to register hooks for.
cls: Crew class type.
"""
hook_methods = {
name: method
for name, method in cls.__dict__.items()
if any(
hasattr(method, attr)
for attr in [
"is_before_llm_call_hook",
"is_after_llm_call_hook",
"is_before_tool_call_hook",
"is_after_tool_call_hook",
]
)
}
if not hook_methods:
return
from crewai.hooks import (
register_after_llm_call_hook,
register_after_tool_call_hook,
register_before_llm_call_hook,
register_before_tool_call_hook,
)
instance._registered_hook_functions = []
instance._hooks_being_registered = True
for hook_method in hook_methods.values():
bound_hook = hook_method.__get__(instance, cls)
has_tool_filter = hasattr(hook_method, "_filter_tools")
has_agent_filter = hasattr(hook_method, "_filter_agents")
if hasattr(hook_method, "is_before_llm_call_hook"):
if has_agent_filter:
agents_filter = hook_method._filter_agents
def make_filtered_before_llm(bound_fn, agents_list):
def filtered(context):
if context.agent and context.agent.role not in agents_list:
return None
return bound_fn(context)
return filtered
final_hook = make_filtered_before_llm(bound_hook, agents_filter)
else:
final_hook = bound_hook
register_before_llm_call_hook(final_hook)
instance._registered_hook_functions.append(("before_llm_call", final_hook))
if hasattr(hook_method, "is_after_llm_call_hook"):
if has_agent_filter:
agents_filter = hook_method._filter_agents
def make_filtered_after_llm(bound_fn, agents_list):
def filtered(context):
if context.agent and context.agent.role not in agents_list:
return None
return bound_fn(context)
return filtered
final_hook = make_filtered_after_llm(bound_hook, agents_filter)
else:
final_hook = bound_hook
register_after_llm_call_hook(final_hook)
instance._registered_hook_functions.append(("after_llm_call", final_hook))
if hasattr(hook_method, "is_before_tool_call_hook"):
if has_tool_filter or has_agent_filter:
tools_filter = getattr(hook_method, "_filter_tools", None)
agents_filter = getattr(hook_method, "_filter_agents", None)
def make_filtered_before_tool(bound_fn, tools_list, agents_list):
def filtered(context):
if tools_list and context.tool_name not in tools_list:
return None
if (
agents_list
and context.agent
and context.agent.role not in agents_list
):
return None
return bound_fn(context)
return filtered
final_hook = make_filtered_before_tool(
bound_hook, tools_filter, agents_filter
)
else:
final_hook = bound_hook
register_before_tool_call_hook(final_hook)
instance._registered_hook_functions.append(("before_tool_call", final_hook))
if hasattr(hook_method, "is_after_tool_call_hook"):
if has_tool_filter or has_agent_filter:
tools_filter = getattr(hook_method, "_filter_tools", None)
agents_filter = getattr(hook_method, "_filter_agents", None)
def make_filtered_after_tool(bound_fn, tools_list, agents_list):
def filtered(context):
if tools_list and context.tool_name not in tools_list:
return None
if (
agents_list
and context.agent
and context.agent.role not in agents_list
):
return None
return bound_fn(context)
return filtered
final_hook = make_filtered_after_tool(
bound_hook, tools_filter, agents_filter
)
else:
final_hook = bound_hook
register_after_tool_call_hook(final_hook)
instance._registered_hook_functions.append(("after_tool_call", final_hook))
instance._hooks_being_registered = False
def map_all_agent_variables(self: CrewInstance) -> None:
"""Map agent configuration variables to callable instances.

View File

@@ -539,6 +539,7 @@ class Task(BaseModel):
json_dict=json_output,
agent=agent.role,
output_format=self._get_output_format(),
messages=agent.last_messages,
)
if self._guardrails:
@@ -949,6 +950,7 @@ Follow these guidelines:
json_dict=json_output,
agent=agent.role,
output_format=self._get_output_format(),
messages=agent.last_messages,
)
return task_output

View File

@@ -6,6 +6,7 @@ from typing import Any
from pydantic import BaseModel, Field, model_validator
from crewai.tasks.output_format import OutputFormat
from crewai.utilities.types import LLMMessage
class TaskOutput(BaseModel):
@@ -40,6 +41,7 @@ class TaskOutput(BaseModel):
output_format: OutputFormat = Field(
description="Output format of the task", default=OutputFormat.RAW
)
messages: list[LLMMessage] = Field(description="Messages of the task", default=[])
@model_validator(mode="after")
def set_summary(self):

View File

@@ -33,6 +33,7 @@ from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.agent import Agent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.lite_agent import LiteAgent
from crewai.llm import LLM
from crewai.task import Task
@@ -236,6 +237,7 @@ def get_llm_response(
from_task: Task | None = None,
from_agent: Agent | LiteAgent | None = None,
response_model: type[BaseModel] | None = None,
executor_context: CrewAgentExecutor | None = None,
) -> str:
"""Call the LLM and return the response, handling any invalid responses.
@@ -247,6 +249,7 @@ def get_llm_response(
from_task: Optional task context for the LLM call
from_agent: Optional agent context for the LLM call
response_model: Optional Pydantic model for structured outputs
executor_context: Optional executor context for hook invocation
Returns:
The response from the LLM as a string
@@ -255,6 +258,12 @@ def get_llm_response(
Exception: If an error occurs.
ValueError: If the response is None or empty.
"""
if executor_context is not None:
if not _setup_before_llm_call_hooks(executor_context, printer):
raise ValueError("LLM call blocked by before_llm_call hook")
messages = executor_context.messages
try:
answer = llm.call(
messages,
@@ -272,7 +281,7 @@ def get_llm_response(
)
raise ValueError("Invalid response from LLM call - None or empty.")
return answer
return _setup_after_llm_call_hooks(executor_context, answer, printer)
def process_llm_response(
@@ -661,3 +670,103 @@ def load_agent_from_repository(from_repository: str) -> dict[str, Any]:
else:
attributes[key] = value
return attributes
def _setup_before_llm_call_hooks(
executor_context: CrewAgentExecutor | None, printer: Printer
) -> bool:
"""Setup and invoke before_llm_call hooks for the executor context.
Args:
executor_context: The executor context to setup the hooks for.
printer: Printer instance for error logging.
Returns:
True if LLM execution should proceed, False if blocked by a hook.
"""
if executor_context and executor_context.before_llm_call_hooks:
from crewai.hooks.llm_hooks import LLMCallHookContext
original_messages = executor_context.messages
hook_context = LLMCallHookContext(executor_context)
try:
for hook in executor_context.before_llm_call_hooks:
result = hook(hook_context)
if result is False:
printer.print(
content="LLM call blocked by before_llm_call hook",
color="yellow",
)
return False
except Exception as e:
printer.print(
content=f"Error in before_llm_call hook: {e}",
color="yellow",
)
if not isinstance(executor_context.messages, list):
printer.print(
content=(
"Warning: before_llm_call hook replaced messages with non-list. "
"Restoring original messages list. Hooks should modify messages in-place, "
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
),
color="yellow",
)
if isinstance(original_messages, list):
executor_context.messages = original_messages
else:
executor_context.messages = []
return True
def _setup_after_llm_call_hooks(
executor_context: CrewAgentExecutor | None,
answer: str,
printer: Printer,
) -> str:
"""Setup and invoke after_llm_call hooks for the executor context.
Args:
executor_context: The executor context to setup the hooks for.
answer: The LLM response string.
printer: Printer instance for error logging.
Returns:
The potentially modified response string.
"""
if executor_context and executor_context.after_llm_call_hooks:
from crewai.hooks.llm_hooks import LLMCallHookContext
original_messages = executor_context.messages
hook_context = LLMCallHookContext(executor_context, response=answer)
try:
for hook in executor_context.after_llm_call_hooks:
modified_response = hook(hook_context)
if modified_response is not None and isinstance(modified_response, str):
answer = modified_response
except Exception as e:
printer.print(
content=f"Error in after_llm_call hook: {e}",
color="yellow",
)
if not isinstance(executor_context.messages, list):
printer.print(
content=(
"Warning: after_llm_call hook replaced messages with non-list. "
"Restoring original messages list. Hooks should modify messages in-place, "
"not replace the list (e.g., use context.messages.append() not context.messages = [])."
),
color="yellow",
)
if isinstance(original_messages, list):
executor_context.messages = original_messages
else:
executor_context.messages = []
return answer

View File

@@ -4,16 +4,23 @@ from typing import TYPE_CHECKING
from crewai.agents.parser import AgentAction
from crewai.agents.tools_handler import ToolsHandler
from crewai.hooks.tool_hooks import (
ToolCallHookContext,
get_after_tool_call_hooks,
get_before_tool_call_hooks,
)
from crewai.security.fingerprint import Fingerprint
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_types import ToolResult
from crewai.tools.tool_usage import ToolUsage, ToolUsageError
from crewai.utilities.i18n import I18N
from crewai.utilities.logger import Logger
if TYPE_CHECKING:
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.crew import Crew
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.task import Task
@@ -30,9 +37,13 @@ def execute_tool_and_check_finality(
agent: Agent | BaseAgent | None = None,
function_calling_llm: BaseLLM | LLM | None = None,
fingerprint_context: dict[str, str] | None = None,
crew: Crew | None = None,
) -> ToolResult:
"""Execute a tool and check if the result should be treated as a final answer.
This function integrates tool hooks for before and after tool execution,
allowing programmatic interception and modification of tool calls.
Args:
agent_action: The action containing the tool to execute
tools: List of available tools
@@ -44,10 +55,12 @@ def execute_tool_and_check_finality(
agent: Optional agent instance for tool execution
function_calling_llm: Optional LLM for function calling
fingerprint_context: Optional context for fingerprinting
crew: Optional crew instance for hook context
Returns:
ToolResult containing the execution result and whether it should be treated as a final answer
"""
logger = Logger(verbose=crew.verbose if crew else False)
tool_name_to_tool_map = {tool.name: tool for tool in tools}
if agent_key and agent_role and agent:
@@ -83,10 +96,62 @@ def execute_tool_and_check_finality(
] or tool_calling.tool_name.casefold().replace("_", " ") in [
name.casefold().strip() for name in tool_name_to_tool_map
]:
tool_result = tool_usage.use(tool_calling, agent_action.text)
tool = tool_name_to_tool_map.get(tool_calling.tool_name)
if tool:
return ToolResult(tool_result, tool.result_as_answer)
if not tool:
tool_result = i18n.errors("wrong_tool_name").format(
tool=tool_calling.tool_name,
tools=", ".join([t.name.casefold() for t in tools]),
)
return ToolResult(result=tool_result, result_as_answer=False)
tool_input = tool_calling.arguments if tool_calling.arguments else {}
hook_context = ToolCallHookContext(
tool_name=tool_calling.tool_name,
tool_input=tool_input,
tool=tool,
agent=agent,
task=task,
crew=crew,
)
before_hooks = get_before_tool_call_hooks()
try:
for hook in before_hooks:
result = hook(hook_context)
if result is False:
blocked_message = (
f"Tool execution blocked by hook. "
f"Tool: {tool_calling.tool_name}"
)
return ToolResult(blocked_message, False)
except Exception as e:
logger.log("error", f"Error in before_tool_call hook: {e}")
tool_result = tool_usage.use(tool_calling, agent_action.text)
after_hook_context = ToolCallHookContext(
tool_name=tool_calling.tool_name,
tool_input=tool_input,
tool=tool,
agent=agent,
task=task,
crew=crew,
tool_result=tool_result,
)
# Execute after_tool_call hooks
after_hooks = get_after_tool_call_hooks()
modified_result = tool_result
try:
for hook in after_hooks:
hook_result = hook(after_hook_context)
if hook_result is not None:
modified_result = hook_result
after_hook_context.tool_result = modified_result
except Exception as e:
logger.log("error", f"Error in after_tool_call hook: {e}")
return ToolResult(modified_result, tool.result_as_answer)
# Handle invalid tool name
tool_result = i18n.errors("wrong_tool_name").format(

View File

@@ -1,6 +1,8 @@
"""Types for CrewAI utilities."""
from typing import Any, Literal, TypedDict
from typing import Any, Literal
from typing_extensions import TypedDict
class LLMMessage(TypedDict):

View File

@@ -0,0 +1,147 @@
"""Test trust_remote_completion_status flag in A2A wrapper."""
from unittest.mock import MagicMock, patch
import pytest
from crewai.a2a.config import A2AConfig
try:
from a2a.types import Message, Role
A2A_SDK_INSTALLED = True
except ImportError:
A2A_SDK_INSTALLED = False
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_trust_remote_completion_status_true_returns_directly():
"""When trust_remote_completion_status=True and A2A returns completed, return result directly."""
from crewai.a2a.wrapper import _delegate_to_a2a
from crewai.a2a.types import AgentResponseProtocol
from crewai import Agent, Task
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
trust_remote_completion_status=True,
)
agent = Agent(
role="test manager",
goal="coordinate",
backstory="test",
a2a=a2a_config,
)
task = Task(description="test", expected_output="test", agent=agent)
class MockResponse:
is_a2a = True
message = "Please help"
a2a_ids = ["http://test-endpoint.com/"]
with (
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
):
mock_card = MagicMock()
mock_card.name = "Test"
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
# A2A returns completed
mock_execute.return_value = {
"status": "completed",
"result": "Done by remote",
"history": [],
}
# This should return directly without checking LLM response
result = _delegate_to_a2a(
self=agent,
agent_response=MockResponse(),
task=task,
original_fn=lambda *args, **kwargs: "fallback",
context=None,
tools=None,
agent_cards={"http://test-endpoint.com/": mock_card},
original_task_description="test",
)
assert result == "Done by remote"
assert mock_execute.call_count == 1
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_trust_remote_completion_status_false_continues_conversation():
"""When trust_remote_completion_status=False and A2A returns completed, ask server agent."""
from crewai.a2a.wrapper import _delegate_to_a2a
from crewai import Agent, Task
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
trust_remote_completion_status=False,
)
agent = Agent(
role="test manager",
goal="coordinate",
backstory="test",
a2a=a2a_config,
)
task = Task(description="test", expected_output="test", agent=agent)
class MockResponse:
is_a2a = True
message = "Please help"
a2a_ids = ["http://test-endpoint.com/"]
call_count = 0
def mock_original_fn(self, task, context, tools):
nonlocal call_count
call_count += 1
if call_count == 1:
# Server decides to finish
return '{"is_a2a": false, "message": "Server final answer", "a2a_ids": []}'
return "unexpected"
with (
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
):
mock_card = MagicMock()
mock_card.name = "Test"
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
# A2A returns completed
mock_execute.return_value = {
"status": "completed",
"result": "Done by remote",
"history": [],
}
result = _delegate_to_a2a(
self=agent,
agent_response=MockResponse(),
task=task,
original_fn=mock_original_fn,
context=None,
tools=None,
agent_cards={"http://test-endpoint.com/": mock_card},
original_task_description="test",
)
# Should call original_fn to get server response
assert call_count >= 1
assert result == "Server final answer"
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_default_trust_remote_completion_status_is_false():
"""Verify that default value of trust_remote_completion_status is False."""
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
)
assert a2a_config.trust_remote_completion_status is False

View File

@@ -2148,7 +2148,7 @@ def test_agent_with_knowledge_with_no_crewai_knowledge():
mock_knowledge.query.assert_called_once()
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
@pytest.mark.vcr(filter_headers=["authorization"])
def test_agent_with_only_crewai_knowledge():
mock_knowledge = MagicMock(spec=Knowledge)

View File

@@ -238,6 +238,27 @@ def test_lite_agent_returns_usage_metrics():
assert result.usage_metrics["total_tokens"] > 0
@pytest.mark.vcr(filter_headers=["authorization"])
def test_lite_agent_output_includes_messages():
"""Test that LiteAgentOutput includes messages from agent execution."""
llm = LLM(model="gpt-4o-mini")
agent = Agent(
role="Research Assistant",
goal="Find information about the population of Tokyo",
backstory="You are a helpful research assistant who can search for information about the population of Tokyo.",
llm=llm,
tools=[WebSearchTool()],
verbose=True,
)
result = agent.kickoff("What is the population of Tokyo?")
assert isinstance(result, LiteAgentOutput)
assert hasattr(result, "messages")
assert isinstance(result.messages, list)
assert len(result.messages) > 0
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.asyncio
async def test_lite_agent_returns_usage_metrics_async():

View File

@@ -0,0 +1,125 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
is the expected criteria for your final answer: hello\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nBegin! This is VERY
important to you, use the tools available and give your best Final Answer, your
job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '768'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA4xSTWvcMBC9+1cMOq/LrvcT30pCQ2hPPZW2wYylsa1EloQkZ7eE/e9F8nbtNCn0
YvC8eU/vzcxLBsCkYCUw3mHgvVX5Db+TN40v1p+/HZ5Ot4/16VibL18Pt7v+O7JFZJj6kXj4w/rA
TW8VBWn0CHNHGCiqrva7dbHe7TfLBPRGkIq01oZ8Y/JeapkXy2KTL/f56nBhd0Zy8qyEHxkAwEv6
Rp9a0ImVkLRSpSfvsSVWXpsAmDMqVhh6L31AHdhiArnRgXSyfg/aHIGjhlY+EyC00Tag9kdyAD/1
J6lRwcf0X0JHSpm5lKNm8Bjj6EGpGYBam4BxHCnEwwU5X20r01pnav8XlTVSS99VjtAbHS36YCxL
6DkDeEjjGV4lZtaZ3oYqmCdKz62261GPTVuZocUFDCagmtV328U7epWggFL52YAZR96RmKjTNnAQ
0syAbJb6rZv3tMfkUrf/Iz8BnJMNJCrrSEj+OvHU5ige7b/arlNOhpkn9yw5VUGSi5sQ1OCgxlNi
/pcP1FeN1C056+R4T42ttitRHzbYYM2yc/YbAAD//wMA8psF7l0DAAA=
headers:
CF-RAY:
- 99f1539c6ee7300b-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 15 Nov 2025 19:59:01 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=iJ7DXHm9JEv8bD0KtW7kldOwGHzDHimj_krrUoVmeWE-1763236741-1.0.1.1-xHKDPJseB3CipXlmYujRzoXEH1migUJ0tnSBSv5GTUQTcz5bUrq4zOGEEP0EBmf.EovzlSffbmbTILOP0JSuiNfHJaGxv2e0zdL11mrf93s;
path=/; expires=Sat, 15-Nov-25 20:29:01 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=oxDuGA6GZmxAwFshfsuJX0CY15NqcsDWeNUCWzgKh8s-1763236741049-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '423'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '442'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999832'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_40cbf724f6154e619aa343371e48c2e0
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,125 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
is the expected criteria for your final answer: hello\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nBegin! This is VERY
important to you, use the tools available and give your best Final Answer, your
job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '768'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4VtyQ/oFgRtkUvRS3tpA4EmV9K2FEmQVGwj8L8X
pFxLSVMgFwHa2RnO7O5zBsBIsgqY6HgQvVX5vfhM98Vpf1x+LT82VGzoW3n+cj7J7+ZBsUVkmMMv
FOEv64MwvVUYyOgRFg55wKi62m2LdbHdlcsE9EaiirTWhrw0eU+a8vVyXebLXb7aX9mdIYGeVfAj
AwB4Tt/oU0s8sQqSVqr06D1vkVW3JgDmjIoVxr0nH7gObDGBwuiAOll/AG2OILiGlp4QOLTRNnDt
j+gAfupPpLmCu/RfQYdKmbmUw2bwPMbRg1IzgGttAo/jSCEer8jlZluZ1jpz8K+orCFNvqsdcm90
tOiDsSyhlwzgMY1neJGYWWd6G+pgfmN6brUpRj02bWWGrq9gMIGrWX27WbyhV0sMnJSfDZgJLjqU
E3XaBh8kmRmQzVL/6+Yt7TE56fY98hMgBNqAsrYOJYmXiac2h/Fo/9d2m3IyzDy6JxJYB0IXNyGx
4YMaT4n5sw/Y1w3pFp11NN5TY+vNSh72JW/4gWWX7A8AAAD//wMA4G7eUl0DAAA=
headers:
CF-RAY:
- 99f1539888ef2db2-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 15 Nov 2025 19:59:00 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=XfT4seD2vDCBhKUjM9OKFn5pKK0guvewRLCuULoZnBg-1763236740-1.0.1.1-zPAXYvNJ5nm4SdMpIaKFFAF1Uu_TTX1J6Pz3NhGjhY8GWCM13UtG2dg_4zqAf4ag.ZiOr0jBFi64qTdzWDsB8i4GpXeY0YJ_1WGwFIh21JY;
path=/; expires=Sat, 15-Nov-25 20:29:00 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=ggMXMo_t19yDC2ZcfQNnNeE8_tibkraG0hezFWQf3Xk-1763236740469-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '466'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '485'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999832'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999832'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_d62131d777d34f568bd37dcf3ecc3749
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,823 @@
interactions:
- request:
body: '{"trace_id": "REDACTED_TRACE_ID", "execution_type": "crew", "user_identifier":
null, "execution_context": {"crew_fingerprint": null, "crew_name": "crew", "flow_name":
null, "crewai_version": "1.4.1", "privacy_level": "standard"}, "execution_metadata":
{"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count":
0, "execution_started_at": "2025-11-15T19:58:54.275699+00:00"}, "ephemeral_trace_id":
"REDACTED_EPHEMERAL_ID"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '488'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- REDACTED_ORG_UUID
X-Crewai-Version:
- 1.4.1
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
response:
body:
string: '{"id":"REDACTED_UUID","ephemeral_trace_id": "REDACTED_EPHEMERAL_ID","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.4.1","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.4.1","privacy_level":"standard"},"created_at":"2025-11-15T19:58:54.413Z","updated_at":"2025-11-15T19:58:54.413Z","access_code":
"REDACTED_ACCESS_CODE","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '515'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 19:58:54 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"f189110ff0b9b1a9a6de911c8373b6cf"
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- REDACTED_ORG_UUID
x-runtime:
- '0.050437'
x-xss-protection:
- 1; mode=block
status:
code: 201
message: Created
- request:
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
is the expected criteria for your final answer: hello\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nBegin! This is VERY
important to you, use the tools available and give your best Final Answer, your
job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '768'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJNj9MwEL3nV4x8blDTz1VuuyuoQHBYcUKwiqb2JDE4Hst2WtCq/x05
7TZZWCQukTJv3vN7M/OUAQitRAlCthhl50x+L3d64z887I6fLW/D22b+KRZ3t18ePu7sQcwSg/ff
ScZn1hvJnTMUNdszLD1hpKRabDfLxXKzXa4GoGNFJtEaF/MV5522Ol/MF6t8vs2Lmwu7ZS0piBK+
ZgAAT8M3+bSKfooS5rPnSkchYEOivDYBCM8mVQSGoENEG8VsBCXbSHaw/h4sH0GihUYfCBCaZBvQ
hiN5gG/2nbZo4Hb4L6ElY3gq5anuA6Y4tjdmAqC1HDGNYwjxeEFOV9uGG+d5H/6gilpbHdrKEwa2
yWKI7MSAnjKAx2E8/YvEwnnuXKwi/6DhuWK9POuJcSsTdHEBI0c0k/pmPXtFr1IUUZswGbCQKFtS
I3XcBvZK8wTIJqn/dvOa9jm5ts3/yI+AlOQiqcp5Ulq+TDy2eUpH+6+265QHwyKQP2hJVdTk0yYU
1dib8ymJ8CtE6qpa24a88/p8T7Wr1oXa36ywxr3ITtlvAAAA//8DADWEgGFdAwAA
headers:
CF-RAY:
- 99f15376386adf9a-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 15 Nov 2025 19:58:55 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=9N8QMgVR0T8m_LdeyT4oWCaQR47O2ACGkH9wXpfPKl8-1763236735-1.0.1.1-8xseH3YJzZo2ypKXBqE14SRYMqgQ1HSsW4ayyXXngCD66TFqO2xnfd9OqOA3mNh8hmoRXr9SGuLn84hiEL95_w_RQXvRFQ.JQb7mFThffN4;
path=/; expires=Sat, 15-Nov-25 20:28:55 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=U_X_uM8Tk1B.1aiCr807RSOANcHTrF7LPQW1aUwSUCI-1763236735590-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1083'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1098'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999832'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_51e6f28672744e42b0cf17b175e98cad
status:
code: 200
message: OK
- request:
body: '{"events": [{"event_id": "REDACTED_EVENT_ID", "timestamp":
"2025-11-15T19:58:54.274122+00:00", "type": "crew_kickoff_started", "event_data":
{"timestamp": "2025-11-15T19:58:54.274122+00:00", "type": "crew_kickoff_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
"crew", "crew": null, "inputs": null}}, {"event_id": "REDACTED_EVENT_ID",
"timestamp": "2025-11-15T19:58:54.276149+00:00", "type": "task_started", "event_data":
{"task_description": "Say hello", "expected_output": "hello", "task_name": "Say
hello", "context": "", "agent_role": "Test Agent", "task_id": "REDACTED_TASK_ID"}},
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T19:58:54.277520+00:00",
"type": "agent_execution_started", "event_data": {"agent_role": "Test Agent",
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
"6ab5ba71-81ef-4aea-800a-a4e332976b23", "timestamp": "2025-11-15T19:58:54.277708+00:00",
"type": "llm_call_started", "event_data": {"timestamp": "2025-11-15T19:58:54.277708+00:00",
"type": "llm_call_started", "source_fingerprint": null, "source_type": null,
"fingerprint_metadata": null, "task_id": "REDACTED_TASK_ID",
"task_name": "Say hello", "agent_id": "REDACTED_AGENT_ID",
"agent_role": "Test Agent", "from_task": null, "from_agent": null, "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\nFinal Answer: Your final answer must be the great and
the most complete as possible, it must be outcome described.\n\nI MUST use these
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
Say hello\n\nThis is the expected criteria for your final answer: hello\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks":
["<crewai.utilities.token_counter_callback.TokenCalcHandler object at 0x10e737920>"],
"available_functions": null}}, {"event_id": "REDACTED_EVENT_ID",
"timestamp": "2025-11-15T19:58:55.617486+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-11-15T19:58:55.617486+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": "REDACTED_TASK_ID", "task_name": "Say hello",
"agent_id": "REDACTED_AGENT_ID", "agent_role": "Test Agent",
"from_task": null, "from_agent": null, "messages": [{"role": "system", "content":
"You are Test Agent. Test backstory\nYour personal goal is: Test goal\nTo give
my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Say hello\n\nThis is the expected criteria for your
final answer: hello\nyou MUST return the actual complete content as the final
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"response": "I now can give a great answer \nFinal Answer: hello", "call_type":
"<LLMCallType.LLM_CALL: ''llm_call''>", "model": "gpt-4o-mini"}}, {"event_id":
"6da05ee3-40a0-44d3-9070-58f83e91fb02", "timestamp": "2025-11-15T19:58:55.617749+00:00",
"type": "agent_execution_completed", "event_data": {"agent_role": "Test Agent",
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
"323a901f-c31a-4937-aa83-99f80a195ec9", "timestamp": "2025-11-15T19:58:55.617956+00:00",
"type": "task_completed", "event_data": {"task_description": "Say hello", "task_name":
"Say hello", "task_id": "REDACTED_TASK_ID", "output_raw":
"hello", "output_format": "OutputFormat.RAW", "agent_role": "Test Agent"}},
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T19:58:55.620199+00:00",
"type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-11-15T19:58:55.620199+00:00",
"type": "crew_kickoff_completed", "source_fingerprint": null, "source_type":
null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id":
null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description":
"Say hello", "name": "Say hello", "expected_output": "hello", "summary": "Say
hello...", "raw": "hello", "pydantic": null, "json_dict": null, "agent": "Test
Agent", "output_format": "raw", "messages": [{"role": "''system''", "content":
"''You are Test Agent. Test backstory\\nYour personal goal is: Test goal\\nTo
give my best complete final answer to the task respond using the exact following
format:\\n\\nThought: I now can give a great answer\\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\\n\\nI MUST use these formats, my job depends on it!''"}, {"role":
"''user''", "content": "''\\nCurrent Task: Say hello\\n\\nThis is the expected
criteria for your final answer: hello\\nyou MUST return the actual complete
content as the final answer, not a summary.\\n\\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\\n\\nThought:''"}, {"role": "''assistant''", "content": "''I now can
give a great answer \\nFinal Answer: hello''"}]}, "total_tokens": 165}}], "batch_metadata":
{"events_count": 8, "batch_sequence": 1, "is_final_batch": false}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '6047'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- REDACTED_ORG_UUID
X-Crewai-Version:
- 1.4.1
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_UUID/events
response:
body:
string: '{"events_created":8,"ephemeral_trace_batch_id": "REDACTED_BATCH_ID"}'
headers:
Connection:
- keep-alive
Content-Length:
- '86'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 19:58:55 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"5763c4d7ea0188702ab3c06667edacb2"
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- REDACTED_ORG_UUID
x-runtime:
- '0.085717'
x-xss-protection:
- 1; mode=block
status:
code: 200
message: OK
- request:
body: '{"status": "completed", "duration_ms": 1545, "final_event_count": 8}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '68'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- REDACTED_ORG_UUID
X-Crewai-Version:
- 1.4.1
method: PATCH
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_UUID/finalize
response:
body:
string: '{"id":"REDACTED_UUID","ephemeral_trace_id": "REDACTED_EPHEMERAL_ID","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"completed","duration_ms":1545,"crewai_version":"1.4.1","total_events":8,"execution_context":{"crew_name":"crew","flow_name":null,"privacy_level":"standard","crewai_version":"1.4.1","crew_fingerprint":null},"created_at":"2025-11-15T19:58:54.413Z","updated_at":"2025-11-15T19:58:55.963Z","access_code":
"REDACTED_ACCESS_CODE","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '517'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 19:58:55 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"87272a0b299949ee15066ac5b6c288c8"
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- REDACTED_ORG_UUID
x-runtime:
- '0.040548'
x-xss-protection:
- 1; mode=block
status:
code: 200
message: OK
- request:
body: !!binary |
Ct8QCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSthAKEgoQY3Jld2FpLnRl
bGVtZXRyeRKcCAoQnBgYneZ/2zN+PxfURVYEhxIIl8jmYkveFbEqDENyZXcgQ3JlYXRlZDABOSBG
V8F3RngYQbD+XsF3RngYShkKDmNyZXdhaV92ZXJzaW9uEgcKBTEuNC4xShsKDnB5dGhvbl92ZXJz
aW9uEgkKBzMuMTIuMTBKLgoIY3Jld19rZXkSIgogZTU5ZjRhOTQ1MDMyOTJhYjg2NTVhODc4Nzlk
ZjNkMGVKMQoHY3Jld19pZBImCiRmNTFiYWY5YS0wOTliLTQ2ZjYtYTQxZS0zYjVkNTNmN2U3NzJK
OgoQY3Jld19maW5nZXJwcmludBImCiRlYTU0MGVkMC1mMmQxLTQwNDQtOGI5Zi1hNjI0MmY1NGYx
MjRKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNy
ZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSjsKG2Ny
ZXdfZmluZ2VycHJpbnRfY3JlYXRlZF9hdBIcChoyMDI1LTExLTE1VDE0OjU4OjU0LjI3MjkyMUrR
AgoLY3Jld19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiMGMzZDYzYTY5MGUxM2Y1MTBkZTNjZDZkZmQz
MTgxNmIiLCAiaWQiOiAiNTQ4YzlkOWMtN2M4OS00NTcwLTg2MzUtMTU3OTc0ZDc1M2JlIiwgInJv
bGUiOiAiVGVzdCBBZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyNSwgIm1h
eF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8t
bWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlv
bj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEK
CmNyZXdfdGFza3MS8AEK7QFbeyJrZXkiOiAiMTdjYzlhYjJiMmQwYmIwY2RkMzZkNTNlMDUyYmEz
YTEiLCAiaWQiOiAiMGFjODNjNzktYmZiNS00MTc5LTk0NzAtMmI0OWIxNmUxM2I0IiwgImFzeW5j
X2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6
ICJUZXN0IEFnZW50IiwgImFnZW50X2tleSI6ICIwYzNkNjNhNjkwZTEzZjUxMGRlM2NkNmRmZDMx
ODE2YiIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEpwEChA/Ny+I8Uec4bmw/hRH3QdM
Egj4Fl8kb84nDCoMVGFzayBDcmVhdGVkMAE5yF54wXdGeBhBwAZ5wXdGeBhKLgoIY3Jld19rZXkS
IgogZTU5ZjRhOTQ1MDMyOTJhYjg2NTVhODc4NzlkZjNkMGVKMQoHY3Jld19pZBImCiRmNTFiYWY5
YS0wOTliLTQ2ZjYtYTQxZS0zYjVkNTNmN2U3NzJKOgoQY3Jld19maW5nZXJwcmludBImCiRlYTU0
MGVkMC1mMmQxLTQwNDQtOGI5Zi1hNjI0MmY1NGYxMjRKLgoIdGFza19rZXkSIgogMTdjYzlhYjJi
MmQwYmIwY2RkMzZkNTNlMDUyYmEzYTFKMQoHdGFza19pZBImCiQwYWM4M2M3OS1iZmI1LTQxNzkt
OTQ3MC0yYjQ5YjE2ZTEzYjRKOgoQdGFza19maW5nZXJwcmludBImCiQ4NTBjZTAyMS1mYmMxLTRk
MzEtYTA3Ny0xZDVmNjMzOWMyY2VKOwobdGFza19maW5nZXJwcmludF9jcmVhdGVkX2F0EhwKGjIw
MjUtMTEtMTVUMTQ6NTg6NTQuMjcyODY4SjsKEWFnZW50X2ZpbmdlcnByaW50EiYKJDUzMWExMTg3
LTZmOWEtNGNmMi1hYzMwLWUzZTczMWE4MzY5Y0oaCgphZ2VudF9yb2xlEgwKClRlc3QgQWdlbnR6
AhgBhQEAAQAAEuEDChCrg6pKIgwTTkf7+bOsNaasEgjUfxiqLjY0BCoOVGFzayBFeGVjdXRpb24w
ATlwPXnBd0Z4GEHg9nIReEZ4GEouCghjcmV3X2tleRIiCiBlNTlmNGE5NDUwMzI5MmFiODY1NWE4
Nzg3OWRmM2QwZUoxCgdjcmV3X2lkEiYKJGY1MWJhZjlhLTA5OWItNDZmNi1hNDFlLTNiNWQ1M2Y3
ZTc3Mko6ChBjcmV3X2ZpbmdlcnByaW50EiYKJGVhNTQwZWQwLWYyZDEtNDA0NC04YjlmLWE2MjQy
ZjU0ZjEyNEouCgh0YXNrX2tleRIiCiAxN2NjOWFiMmIyZDBiYjBjZGQzNmQ1M2UwNTJiYTNhMUox
Cgd0YXNrX2lkEiYKJDBhYzgzYzc5LWJmYjUtNDE3OS05NDcwLTJiNDliMTZlMTNiNEo7ChFhZ2Vu
dF9maW5nZXJwcmludBImCiQ1MzFhMTE4Ny02ZjlhLTRjZjItYWMzMC1lM2U3MzFhODM2OWNKGgoK
YWdlbnRfcm9sZRIMCgpUZXN0IEFnZW50SjoKEHRhc2tfZmluZ2VycHJpbnQSJgokODUwY2UwMjEt
ZmJjMS00ZDMxLWEwNzctMWQ1ZjYzMzljMmNlegIYAYUBAAEAAA==
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '2146'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.38.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Sat, 15 Nov 2025 19:58:59 GMT
status:
code: 200
message: OK
- request:
body: '{"events": [{"event_id": "REDACTED_EVENT_ID", "timestamp":
"2025-11-15T20:12:50.759077+00:00", "type": "crew_kickoff_started", "event_data":
{"timestamp": "2025-11-15T20:12:50.759077+00:00", "type": "crew_kickoff_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
"crew", "crew": null, "inputs": null}}, {"event_id": "REDACTED_EVENT_ID",
"timestamp": "2025-11-15T20:12:50.761789+00:00", "type": "task_started", "event_data":
{"task_description": "Say hello", "expected_output": "hello", "task_name": "Say
hello", "context": "", "agent_role": "Test Agent", "task_id": "REDACTED_TASK_ID"}},
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:12:50.762556+00:00",
"type": "agent_execution_started", "event_data": {"agent_role": "Test Agent",
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
"112efd06-87b7-4600-892f-3c96672571c6", "timestamp": "2025-11-15T20:12:50.762726+00:00",
"type": "llm_call_started", "event_data": {"timestamp": "2025-11-15T20:12:50.762726+00:00",
"type": "llm_call_started", "source_fingerprint": null, "source_type": null,
"fingerprint_metadata": null, "task_id": "REDACTED_TASK_ID",
"task_name": "Say hello", "agent_id": "REDACTED_AGENT_ID",
"agent_role": "Test Agent", "from_task": null, "from_agent": null, "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\nFinal Answer: Your final answer must be the great and
the most complete as possible, it must be outcome described.\n\nI MUST use these
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
Say hello\n\nThis is the expected criteria for your final answer: hello\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks":
["<crewai.utilities.token_counter_callback.TokenCalcHandler object at 0x10e8b5b20>"],
"available_functions": null}}, {"event_id": "REDACTED_EVENT_ID",
"timestamp": "2025-11-15T20:12:50.877587+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-11-15T20:12:50.877587+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": "REDACTED_TASK_ID", "task_name": "Say hello",
"agent_id": "REDACTED_AGENT_ID", "agent_role": "Test Agent",
"from_task": null, "from_agent": null, "messages": [{"role": "system", "content":
"You are Test Agent. Test backstory\nYour personal goal is: Test goal\nTo give
my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Say hello\n\nThis is the expected criteria for your
final answer: hello\nyou MUST return the actual complete content as the final
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"response": "I now can give a great answer \nFinal Answer: hello", "call_type":
"<LLMCallType.LLM_CALL: ''llm_call''>", "model": "gpt-4o-mini"}}, {"event_id":
"430a26b3-c38b-4f75-8656-412124a6df95", "timestamp": "2025-11-15T20:12:50.877724+00:00",
"type": "agent_execution_completed", "event_data": {"agent_role": "Test Agent",
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
"a76bbe00-1cc7-44a8-9ec3-c4ed8fca948d", "timestamp": "2025-11-15T20:12:50.877830+00:00",
"type": "task_completed", "event_data": {"task_description": "Say hello", "task_name":
"Say hello", "task_id": "REDACTED_TASK_ID", "output_raw":
"hello", "output_format": "OutputFormat.RAW", "agent_role": "Test Agent"}},
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:12:50.879327+00:00",
"type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-11-15T20:12:50.879327+00:00",
"type": "crew_kickoff_completed", "source_fingerprint": null, "source_type":
null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id":
null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description":
"Say hello", "name": "Say hello", "expected_output": "hello", "summary": "Say
hello...", "raw": "hello", "pydantic": null, "json_dict": null, "agent": "Test
Agent", "output_format": "raw", "messages": [{"role": "''system''", "content":
"''You are Test Agent. Test backstory\\nYour personal goal is: Test goal\\nTo
give my best complete final answer to the task respond using the exact following
format:\\n\\nThought: I now can give a great answer\\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\\n\\nI MUST use these formats, my job depends on it!''"}, {"role":
"''user''", "content": "''\\nCurrent Task: Say hello\\n\\nThis is the expected
criteria for your final answer: hello\\nyou MUST return the actual complete
content as the final answer, not a summary.\\n\\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\\n\\nThought:''"}, {"role": "''assistant''", "content": "''I now can
give a great answer \\nFinal Answer: hello''"}]}, "total_tokens": 165}}], "batch_metadata":
{"events_count": 8, "batch_sequence": 1, "is_final_batch": false}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '6047'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- 73c2b193-f579-422c-84c7-76a39a1da77f
X-Crewai-Version:
- 1.4.1
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_EPHEMERAL_ID/events
response:
body:
string: '{"error":"Couldn''t find EphemeralTraceBatch with [WHERE \"ephemeral_trace_batches\".\"ephemeral_trace_id\"
= $1]","message":"Trace batch not found"}'
headers:
Connection:
- keep-alive
Content-Length:
- '148'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 20:12:51 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 869cd156-577e-4f89-a822-0cd097bfb011
x-runtime:
- '0.038867'
x-xss-protection:
- 1; mode=block
status:
code: 404
message: Not Found
- request:
body: '{"status": "failed", "failure_reason": "Error sending events to backend"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '73'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- 73c2b193-f579-422c-84c7-76a39a1da77f
X-Crewai-Version:
- 1.4.1
method: PATCH
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches/REDACTED_EPHEMERAL_ID
response:
body:
string: '{"error":"bad_credentials","message":"Bad credentials"}'
headers:
Connection:
- keep-alive
Content-Length:
- '55'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 20:12:51 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 1d74da02-f5f2-4bdc-8c9e-51bc9d3aff98
x-runtime:
- '0.046789'
x-xss-protection:
- 1; mode=block
status:
code: 401
message: Unauthorized
version: 1

View File

@@ -0,0 +1,817 @@
interactions:
- request:
body: '{"trace_id": "REDACTED_TRACE_ID", "execution_type": "crew", "user_identifier":
null, "execution_context": {"crew_fingerprint": null, "crew_name": "crew", "flow_name":
null, "crewai_version": "1.4.1", "privacy_level": "standard"}, "execution_metadata":
{"expected_duration_estimate": 300, "agent_count": 0, "task_count": 0, "flow_method_count":
0, "execution_started_at": "2025-11-15T20:00:40.213233+00:00"}, "ephemeral_trace_id":
"REDACTED_EPHEMERAL_ID"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '488'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- REDACTED_ORG_UUID
X-Crewai-Version:
- 1.4.1
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
response:
body:
string: '{"id":"REDACTED_UUID","ephemeral_trace_id": "REDACTED_EPHEMERAL_ID","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.4.1","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.4.1","privacy_level":"standard"},"created_at":"2025-11-15T20:00:40.347Z","updated_at":"2025-11-15T20:00:40.347Z","access_code":
"REDACTED_ACCESS_CODE","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '515'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 20:00:40 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"1dad6ea33b1bd62ea816884d05ca0842"
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- REDACTED_ORG_UUID
x-runtime:
- '0.046518'
x-xss-protection:
- 1; mode=block
status:
code: 201
message: Created
- request:
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
is the expected criteria for your final answer: hello\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nBegin! This is VERY
important to you, use the tools available and give your best Final Answer, your
job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '768'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4XlV1zfggBtekt76yMQVtRKoktxCZJKWgT+94KU
YyltCuQiQDs7w5ndfcoAhKrFAYTsMMje6vxGfjzykXdfzZe7z7eh54Jub77dvZf9vqjEIjK4OpIM
z6x3knurKSg2IywdYaCoWlzt1qv1br9ZJqDnmnSktTbkG857ZVS+Wq42+fIqL/ZndsdKkhcH+J4B
ADylb/RpavolDpC0UqUn77Elcbg0AQjHOlYEeq98QBPEYgIlm0AmWf8Ehh9BooFWPRAgtNE2oPGP
5AB+mA/KoIbr9H+AjrTmuZSjZvAY45hB6xmAxnDAOI4U4v6MnC62NbfWceX/oopGGeW70hF6NtGi
D2xFQk8ZwH0az/AisbCOexvKwD8pPVds16OemLYyQ1dnMHBAPavvtotX9MqaAirtZwMWEmVH9USd
toFDrXgGZLPU/7p5TXtMrkz7FvkJkJJsoLq0jmolXyae2hzFo/1f22XKybDw5B6UpDIocnETNTU4
6PGUhP/tA/Vlo0xLzjo13lNjy21RV/sNNliJ7JT9AQAA//8DANqYTe5dAwAA
headers:
CF-RAY:
- 99f1560c3f5d4809-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 15 Nov 2025 20:00:41 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=h.tA2Rq1WhYqakfMp30WNbqx91S5jvXxlyjIW8bMhHY-1763236841-1.0.1.1-V.a.LzWhmsyvoXIFirG2pejIlbZ7BiLfwdlv6dDF.QddisjnkoYsgBPhVnxl.GwDFVDKymer1bQK_6vSoHBaQIcV4MJ7YayMl9lLs0.UcFM;
path=/; expires=Sat, 15-Nov-25 20:30:41 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=8Td_UnVGEcigZt.Nhy9rEFpaW9pgP0QJpdzFdEoktJk-1763236841097-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '563'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '666'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999832'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999832'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_8e8e5bfc663840d68daf4ac70308eece
status:
code: 200
message: OK
- request:
body: '{"events": [{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:00:40.210936+00:00",
"type": "crew_kickoff_started", "event_data": {"timestamp": "2025-11-15T20:00:40.210936+00:00",
"type": "crew_kickoff_started", "source_fingerprint": null, "source_type": null,
"fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id":
null, "agent_role": null, "crew_name": "crew", "crew": null, "inputs": null}},
{"event_id": "REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:00:40.213519+00:00",
"type": "agent_execution_started", "event_data": {"agent_role": "Test Agent",
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
"REDACTED_EVENT_ID", "timestamp": "2025-11-15T20:00:40.213671+00:00", "type":
"llm_call_started", "event_data": {"timestamp": "2025-11-15T20:00:40.213671+00:00",
"type": "llm_call_started", "source_fingerprint": null, "source_type": null,
"fingerprint_metadata": null, "task_id": "REDACTED_TASK_ID", "task_name": "Say
hello", "agent_id": "REDACTED_AGENT_ID", "agent_role": "Test Agent", "from_task":
null, "from_agent": null, "model": "gpt-4o-mini", "messages": [{"role": "system",
"content": "You are Test Agent. Test backstory\nYour personal goal is: Test
goal\nTo give my best complete final answer to the task respond using the exact
following format:\n\nThought: I now can give a great answer\nFinal Answer: Your
final answer must be the great and the most complete as possible, it must be
outcome described.\n\nI MUST use these formats, my job depends on it!"}, {"role":
"user", "content": "\nCurrent Task: Say hello\n\nThis is the expected criteria
for your final answer: hello\nyou MUST return the actual complete content as
the final answer, not a summary.\n\nBegin! This is VERY important to you, use
the tools available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"tools": null, "callbacks": ["<crewai.utilities.token_counter_callback.TokenCalcHandler
object at 0x108cbb5f0>"], "available_functions": null}}, {"event_id": "REDACTED_EVENT_ID",
"timestamp": "2025-11-15T20:00:41.117164+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-11-15T20:00:41.117164+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": "REDACTED_TASK_ID", "task_name": "Say hello", "agent_id": "REDACTED_AGENT_ID",
"agent_role": "Test Agent", "from_task": null, "from_agent": null, "messages":
[{"role": "system", "content": "You are Test Agent. Test backstory\nYour personal
goal is: Test goal\nTo give my best complete final answer to the task respond
using the exact following format:\n\nThought: I now can give a great answer\nFinal
Answer: Your final answer must be the great and the most complete as possible,
it must be outcome described.\n\nI MUST use these formats, my job depends on
it!"}, {"role": "user", "content": "\nCurrent Task: Say hello\n\nThis is the
expected criteria for your final answer: hello\nyou MUST return the actual complete
content as the final answer, not a summary.\n\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\n\nThought:"}], "response": "I now can give a great answer \nFinal Answer:
hello", "call_type": "<LLMCallType.LLM_CALL: ''llm_call''>", "model": "gpt-4o-mini"}},
{"event_id": "1d32853b-04dd-49f1-9b0b-fca92a82ea0f", "timestamp": "2025-11-15T20:00:41.117412+00:00",
"type": "agent_execution_completed", "event_data": {"agent_role": "Test Agent",
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
"3af2dbb3-6117-4df1-9dc8-3b4cbc1bb689", "timestamp": "2025-11-15T20:00:41.117869+00:00",
"type": "task_completed", "event_data": {"task_description": "Say hello", "task_name":
"Say hello", "task_id": "REDACTED_TASK_ID", "output_raw": "hello", "output_format":
"OutputFormat.RAW", "agent_role": "Test Agent"}}, {"event_id": "REDACTED_EVENT_ID",
"timestamp": "2025-11-15T20:00:41.119050+00:00", "type": "crew_kickoff_completed",
"event_data": {"timestamp": "2025-11-15T20:00:41.119050+00:00", "type": "crew_kickoff_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
"crew", "crew": null, "output": {"description": "Say hello", "name": "Say hello",
"expected_output": "hello", "summary": "Say hello...", "raw": "hello", "pydantic":
null, "json_dict": null, "agent": "Test Agent", "output_format": "raw", "messages":
[{"role": "''system''", "content": "''You are Test Agent. Test backstory\\nYour
personal goal is: Test goal\\nTo give my best complete final answer to the task
respond using the exact following format:\\n\\nThought: I now can give a great
answer\\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\\n\\nI MUST use these formats, my
job depends on it!''"}, {"role": "''user''", "content": "''\\nCurrent Task:
Say hello\\n\\nThis is the expected criteria for your final answer: hello\\nyou
MUST return the actual complete content as the final answer, not a summary.\\n\\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\\n\\nThought:''"}, {"role": "''assistant''",
"content": "''I now can give a great answer \\nFinal Answer: hello''"}]}, "total_tokens":
165}}], "batch_metadata": {"events_count": 7, "batch_sequence": 1, "is_final_batch":
false}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '5723'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- REDACTED_ORG_UUID
X-Crewai-Version:
- 1.4.1
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_UUID/events
response:
body:
string: '{"events_created":7,"ephemeral_trace_batch_id": "REDACTED_BATCH_ID"}'
headers:
Connection:
- keep-alive
Content-Length:
- '86'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 20:00:41 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"e539cd458f6386627ec23f6f6a46a996"
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- REDACTED_ORG_UUID
x-runtime:
- '0.062954'
x-xss-protection:
- 1; mode=block
status:
code: 200
message: OK
- request:
body: '{"status": "completed", "duration_ms": 1070, "final_event_count": 7}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '68'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- REDACTED_ORG_UUID
X-Crewai-Version:
- 1.4.1
method: PATCH
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_UUID/finalize
response:
body:
string: '{"id":"REDACTED_UUID","ephemeral_trace_id": "REDACTED_EPHEMERAL_ID","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"completed","duration_ms":1070,"crewai_version":"1.4.1","total_events":7,"execution_context":{"crew_name":"crew","flow_name":null,"privacy_level":"standard","crewai_version":"1.4.1","crew_fingerprint":null},"created_at":"2025-11-15T20:00:40.347Z","updated_at":"2025-11-15T20:00:41.423Z","access_code":
"REDACTED_ACCESS_CODE","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '517'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 20:00:41 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"de9bcb107d0382f1b309276d8fc39196"
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- REDACTED_ORG_UUID
x-runtime:
- '0.045900'
x-xss-protection:
- 1; mode=block
status:
code: 200
message: OK
- request:
body: !!binary |
Ct8QCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSthAKEgoQY3Jld2FpLnRl
bGVtZXRyeRKcCAoQvXQY4SQ+2Mlfdsll/QHJghII0Bd15ezW7r4qDENyZXcgQ3JlYXRlZDABOShe
q2uQRngYQZDhtWuQRngYShkKDmNyZXdhaV92ZXJzaW9uEgcKBTEuNC4xShsKDnB5dGhvbl92ZXJz
aW9uEgkKBzMuMTIuMTBKLgoIY3Jld19rZXkSIgogZTU5ZjRhOTQ1MDMyOTJhYjg2NTVhODc4Nzlk
ZjNkMGVKMQoHY3Jld19pZBImCiQ2NWVkNDMyNS02NTE4LTRiMzUtOGQ3OS02NzA2ZDc5OTY0YWVK
OgoQY3Jld19maW5nZXJwcmludBImCiQ1MmM5ODNiOC02OTcwLTQ2ZmMtYmQ1YS0wY2MwNzY1M2Rk
NDhKHAoMY3Jld19wcm9jZXNzEgwKCnNlcXVlbnRpYWxKEQoLY3Jld19tZW1vcnkSAhAAShoKFGNy
ZXdfbnVtYmVyX29mX3Rhc2tzEgIYAUobChVjcmV3X251bWJlcl9vZl9hZ2VudHMSAhgBSjsKG2Ny
ZXdfZmluZ2VycHJpbnRfY3JlYXRlZF9hdBIcChoyMDI1LTExLTE1VDE1OjAwOjQwLjIwOTg4NUrR
AgoLY3Jld19hZ2VudHMSwQIKvgJbeyJrZXkiOiAiMGMzZDYzYTY5MGUxM2Y1MTBkZTNjZDZkZmQz
MTgxNmIiLCAiaWQiOiAiYjE3OTNkNmYtN2Q4My00Y2YzLWE1NzQtNDE4ZGJkZWNmNzJmIiwgInJv
bGUiOiAiVGVzdCBBZ2VudCIsICJ2ZXJib3NlPyI6IGZhbHNlLCAibWF4X2l0ZXIiOiAyNSwgIm1h
eF9ycG0iOiBudWxsLCAiZnVuY3Rpb25fY2FsbGluZ19sbG0iOiAiIiwgImxsbSI6ICJncHQtNG8t
bWluaSIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2RlX2V4ZWN1dGlv
bj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6IFtdfV1K/wEK
CmNyZXdfdGFza3MS8AEK7QFbeyJrZXkiOiAiMTdjYzlhYjJiMmQwYmIwY2RkMzZkNTNlMDUyYmEz
YTEiLCAiaWQiOiAiOTUyY2ZmYzItNjVjNi00ZGMzLTk0MjItMjJiNjk0ZWJjNDU0IiwgImFzeW5j
X2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdlbnRfcm9sZSI6
ICJUZXN0IEFnZW50IiwgImFnZW50X2tleSI6ICIwYzNkNjNhNjkwZTEzZjUxMGRlM2NkNmRmZDMx
ODE2YiIsICJ0b29sc19uYW1lcyI6IFtdfV16AhgBhQEAAQAAEpwEChCNBcmqTbiktztgYNe6R2lF
EgiTrCx+R/HhAioMVGFzayBDcmVhdGVkMAE5uMi/a5BGeBhB+GTAa5BGeBhKLgoIY3Jld19rZXkS
IgogZTU5ZjRhOTQ1MDMyOTJhYjg2NTVhODc4NzlkZjNkMGVKMQoHY3Jld19pZBImCiQ2NWVkNDMy
NS02NTE4LTRiMzUtOGQ3OS02NzA2ZDc5OTY0YWVKOgoQY3Jld19maW5nZXJwcmludBImCiQ1MmM5
ODNiOC02OTcwLTQ2ZmMtYmQ1YS0wY2MwNzY1M2RkNDhKLgoIdGFza19rZXkSIgogMTdjYzlhYjJi
MmQwYmIwY2RkMzZkNTNlMDUyYmEzYTFKMQoHdGFza19pZBImCiQ5NTJjZmZjMi02NWM2LTRkYzMt
OTQyMi0yMmI2OTRlYmM0NTRKOgoQdGFza19maW5nZXJwcmludBImCiQyMTM3NzZkZC04MDMwLTQ1
ODYtYmI1MC02NjNiYjI0NjAwNWJKOwobdGFza19maW5nZXJwcmludF9jcmVhdGVkX2F0EhwKGjIw
MjUtMTEtMTVUMTU6MDA6NDAuMjA5ODQwSjsKEWFnZW50X2ZpbmdlcnByaW50EiYKJDVmMmJlOWQw
LTZiMjQtNDFiYy05YzQyLTI0ZjdlOTM3MjJjYkoaCgphZ2VudF9yb2xlEgwKClRlc3QgQWdlbnR6
AhgBhQEAAQAAEuEDChBC+bce4EVDxB/d79LFgX4NEghWvN23SKW/0SoOVGFzayBFeGVjdXRpb24w
ATnYk8BrkEZ4GEHI1LihkEZ4GEouCghjcmV3X2tleRIiCiBlNTlmNGE5NDUwMzI5MmFiODY1NWE4
Nzg3OWRmM2QwZUoxCgdjcmV3X2lkEiYKJDY1ZWQ0MzI1LTY1MTgtNGIzNS04ZDc5LTY3MDZkNzk5
NjRhZUo6ChBjcmV3X2ZpbmdlcnByaW50EiYKJDUyYzk4M2I4LTY5NzAtNDZmYy1iZDVhLTBjYzA3
NjUzZGQ0OEouCgh0YXNrX2tleRIiCiAxN2NjOWFiMmIyZDBiYjBjZGQzNmQ1M2UwNTJiYTNhMUox
Cgd0YXNrX2lkEiYKJDk1MmNmZmMyLTY1YzYtNGRjMy05NDIyLTIyYjY5NGViYzQ1NEo7ChFhZ2Vu
dF9maW5nZXJwcmludBImCiQ1ZjJiZTlkMC02YjI0LTQxYmMtOWM0Mi0yNGY3ZTkzNzIyY2JKGgoK
YWdlbnRfcm9sZRIMCgpUZXN0IEFnZW50SjoKEHRhc2tfZmluZ2VycHJpbnQSJgokMjEzNzc2ZGQt
ODAzMC00NTg2LWJiNTAtNjYzYmIyNDYwMDViegIYAYUBAAEAAA==
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '2146'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.38.0
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Sat, 15 Nov 2025 20:00:44 GMT
status:
code: 200
message: OK
- request:
body: '{"events": [{"event_id": "6a66ce15-fdb3-490b-a09b-7724817d0116", "timestamp":
"2025-11-15T20:15:51.057965+00:00", "type": "crew_kickoff_started", "event_data":
{"timestamp": "2025-11-15T20:15:51.057965+00:00", "type": "crew_kickoff_started",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": null, "task_name": null, "agent_id": null, "agent_role": null, "crew_name":
"crew", "crew": null, "inputs": null}}, {"event_id": "15f2b75b-c7bb-48d1-8f61-faec2736da5d",
"timestamp": "2025-11-15T20:15:51.059954+00:00", "type": "task_started", "event_data":
{"task_description": "Say hello", "expected_output": "hello", "task_name": "Say
hello", "context": "", "agent_role": "Test Agent", "task_id": "bbb08fd7-2580-43a8-bc71-5e0c08c7cc61"}},
{"event_id": "eb90a87c-523c-40d6-b996-01706cbf8844", "timestamp": "2025-11-15T20:15:51.061205+00:00",
"type": "agent_execution_started", "event_data": {"agent_role": "Test Agent",
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
"862c2b07-d82a-4f02-9c99-519292679a87", "timestamp": "2025-11-15T20:15:51.061443+00:00",
"type": "llm_call_started", "event_data": {"timestamp": "2025-11-15T20:15:51.061443+00:00",
"type": "llm_call_started", "source_fingerprint": null, "source_type": null,
"fingerprint_metadata": null, "task_id": "bbb08fd7-2580-43a8-bc71-5e0c08c7cc61",
"task_name": "Say hello", "agent_id": "82ee52ae-9eba-4648-877b-8cf2fc1624ae",
"agent_role": "Test Agent", "from_task": null, "from_agent": null, "model":
"gpt-4o-mini", "messages": [{"role": "system", "content": "You are Test Agent.
Test backstory\nYour personal goal is: Test goal\nTo give my best complete final
answer to the task respond using the exact following format:\n\nThought: I now
can give a great answer\nFinal Answer: Your final answer must be the great and
the most complete as possible, it must be outcome described.\n\nI MUST use these
formats, my job depends on it!"}, {"role": "user", "content": "\nCurrent Task:
Say hello\n\nThis is the expected criteria for your final answer: hello\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "tools": null, "callbacks":
["<crewai.utilities.token_counter_callback.TokenCalcHandler object at 0x10d617f50>"],
"available_functions": null}}, {"event_id": "fff5720d-9167-48cf-9196-9ee96f765688",
"timestamp": "2025-11-15T20:15:51.175710+00:00", "type": "llm_call_completed",
"event_data": {"timestamp": "2025-11-15T20:15:51.175710+00:00", "type": "llm_call_completed",
"source_fingerprint": null, "source_type": null, "fingerprint_metadata": null,
"task_id": "bbb08fd7-2580-43a8-bc71-5e0c08c7cc61", "task_name": "Say hello",
"agent_id": "82ee52ae-9eba-4648-877b-8cf2fc1624ae", "agent_role": "Test Agent",
"from_task": null, "from_agent": null, "messages": [{"role": "system", "content":
"You are Test Agent. Test backstory\nYour personal goal is: Test goal\nTo give
my best complete final answer to the task respond using the exact following
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
"content": "\nCurrent Task: Say hello\n\nThis is the expected criteria for your
final answer: hello\nyou MUST return the actual complete content as the final
answer, not a summary.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:"}],
"response": "I now can give a great answer \nFinal Answer: hello", "call_type":
"<LLMCallType.LLM_CALL: ''llm_call''>", "model": "gpt-4o-mini"}}, {"event_id":
"1ce38e05-20f8-4f6b-b303-720dbcbb73b2", "timestamp": "2025-11-15T20:15:51.175899+00:00",
"type": "agent_execution_completed", "event_data": {"agent_role": "Test Agent",
"agent_goal": "Test goal", "agent_backstory": "Test backstory"}}, {"event_id":
"dca0b4dd-dcfe-4002-9251-56cde6855f33", "timestamp": "2025-11-15T20:15:51.176016+00:00",
"type": "task_completed", "event_data": {"task_description": "Say hello", "task_name":
"Say hello", "task_id": "bbb08fd7-2580-43a8-bc71-5e0c08c7cc61", "output_raw":
"hello", "output_format": "OutputFormat.RAW", "agent_role": "Test Agent"}},
{"event_id": "7e3993e7-e729-43a9-af63-b1429d0d2abc", "timestamp": "2025-11-15T20:15:51.177161+00:00",
"type": "crew_kickoff_completed", "event_data": {"timestamp": "2025-11-15T20:15:51.177161+00:00",
"type": "crew_kickoff_completed", "source_fingerprint": null, "source_type":
null, "fingerprint_metadata": null, "task_id": null, "task_name": null, "agent_id":
null, "agent_role": null, "crew_name": "crew", "crew": null, "output": {"description":
"Say hello", "name": "Say hello", "expected_output": "hello", "summary": "Say
hello...", "raw": "hello", "pydantic": null, "json_dict": null, "agent": "Test
Agent", "output_format": "raw", "messages": [{"role": "''system''", "content":
"''You are Test Agent. Test backstory\\nYour personal goal is: Test goal\\nTo
give my best complete final answer to the task respond using the exact following
format:\\n\\nThought: I now can give a great answer\\nFinal Answer: Your final
answer must be the great and the most complete as possible, it must be outcome
described.\\n\\nI MUST use these formats, my job depends on it!''"}, {"role":
"''user''", "content": "''\\nCurrent Task: Say hello\\n\\nThis is the expected
criteria for your final answer: hello\\nyou MUST return the actual complete
content as the final answer, not a summary.\\n\\nBegin! This is VERY important
to you, use the tools available and give your best Final Answer, your job depends
on it!\\n\\nThought:''"}, {"role": "''assistant''", "content": "''I now can
give a great answer \\nFinal Answer: hello''"}]}, "total_tokens": 165}}], "batch_metadata":
{"events_count": 8, "batch_sequence": 1, "is_final_batch": false}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '6047'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- 73c2b193-f579-422c-84c7-76a39a1da77f
X-Crewai-Version:
- 1.4.1
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches/REDACTED_EPHEMERAL_ID/events
response:
body:
string: '{"error":"Couldn''t find EphemeralTraceBatch with [WHERE \"ephemeral_trace_batches\".\"ephemeral_trace_id\"
= $1]","message":"Trace batch not found"}'
headers:
Connection:
- keep-alive
Content-Length:
- '148'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 20:15:51 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 255abbea-b49c-4dcc-ade5-3e16fd59277d
x-runtime:
- '0.050642'
x-xss-protection:
- 1; mode=block
status:
code: 404
message: Not Found
- request:
body: '{"status": "failed", "failure_reason": "Error sending events to backend"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '73'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- 73c2b193-f579-422c-84c7-76a39a1da77f
X-Crewai-Version:
- 1.4.1
method: PATCH
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches/REDACTED_EPHEMERAL_ID
response:
body:
string: '{"error":"bad_credentials","message":"Bad credentials"}'
headers:
Connection:
- keep-alive
Content-Length:
- '55'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 20:15:51 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 7bbda7a6-5a8e-4dfc-bcef-fe9b8bff7532
x-runtime:
- '0.042800'
x-xss-protection:
- 1; mode=block
status:
code: 401
message: Unauthorized
version: 1

View File

@@ -1,30 +1,30 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Test task\n\nThis
depends on it!"},{"role":"user","content":"\nCurrent Task: Test task\n\nThis
is the expected criteria for your final answer: test output\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"]}'
your job depends on it!\n\nThought:"}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
- gzip, deflate
connection:
- keep-alive
content-length:
- '812'
- '774'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
@@ -34,33 +34,37 @@ interactions:
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
- 1.109.1
x-stainless-read-timeout:
- '600.0'
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFLLbtswELzrKxY8W4WV+JHoVgR95NJD4UvaBgJDrSS2FJclV3bSwP9e
kHYsuU2BXghwZ2c4s8vnDEDoWpQgVCdZ9c7kNx+4v7vb7jafnrrPX25/vtObX48f1Q31m+uFmEUG
PXxHxS+sN4p6Z5A12QOsPErGqFqsl1fF4nJdzBPQU40m0lrH+YLyXludX8wvFvl8nRdXR3ZHWmEQ
JXzNAACe0xl92hofRQlJK1V6DEG2KMpTE4DwZGJFyBB0YGlZzEZQkWW0yfotWNqBkhZavUWQ0Ebb
IG3YoQf4Zt9rKw28TfcSNhgYaGA3nAl6bIYgYyg7GDMBpLXEMg4lRbk/IvuTeUOt8/QQ/qCKRlsd
usqjDGSj0cDkREL3GcB9GtJwlls4T73jiukHpueK5eKgJ8bdTNDLI8jE0kzqq/XsFb2qRpbahMmY
hZKqw3qkjjuRQ61pAmST1H+7eU37kFzb9n/kR0ApdIx15TzWWp0nHts8xq/7r7bTlJNhEdBvtcKK
Nfq4iRobOZjD/kV4Cox91WjbondeH35V46rlai6bFS6X1yLbZ78BAAD//wMAZdfoWWMDAAA=
H4sIAAAAAAAAAwAAAP//jFTBjhs3DL37K4i59DI2bHe9m/rWBCmQFkWLdlEgbQODK3FmlNWQU5Hj
2A323wNpvGtvs4deBiM9PurxUdTnGUAVfLWFynVorh/i/I376X3bxHd//LJa/eZXt4F/bOjPn39d
/v72zb9VnRly95GcPbIWTvohkgXhCXaJ0ChnXd1cf7verDbr6wL04ilmWjvY/ErmfeAwXy/XV/Pl
zXz16sTuJDjSagt/zQAAPpdv1smeDtUWlvXjTk+q2FK1fQoCqJLEvFOhalBDtqo+g07YiIv0d8Dy
CRwytGFPgNBm2YCsnygB/M0/BMYI35f1Fm47AjoM5Iw8uBSMUkBoJIF1BE2JPXGDggkMSfbBE2R3
EnXEmo8J3EjqMZsFwoWrY7ETEsVsW+bmbSM1MNT7Bdx2QSGwi6On/DP3NFgHyBiPGrTOVNojG9AB
cy+0BiaX3UlH8GhYA7IHFwlTriIiFwkK1qGBQ6P0eG6x6GAgzSRBRhtGWxQDMPSn6oh1TDTRaU/p
CKjZnELL6lHvc6iTPaVcVCdJxraLx6xWx2iBWwiTA72oATUNOSutYH/2qayLrYOohrtIC3h9hEbc
qDnFZKJOPgsTm9Zft0Q7GaMHFgPheISeyCbzB3KhCZc9vRsNMKoAHRyRP3V98qsGT72wWsJSgIuY
gh1rGBK5oEH45PQ0EsSkJ4/R+0SqpE/2fKOQ6J8xJOqz6ucXJR4Xl/c2UTMq5tnhMcYLAJnlpC1P
zIcT8vA0I1HaIcmd/odaNYGDdrtEqMJ5HtRkqAr6MAP4UGZxfDZe1ZCkH2xnck/luNXmaspXnZ+A
C3T16oSaGMYzsL5Z1y8k3HkyDFEvxrly6DryZ+p59nH0QS6A2UXZX8t5KfdUeuD2/6Q/A87RYOR3
QyIf3POSz2GJPpan4uWwJ5uL4Eop7YOjnQVKuRWeGhzj9HBVelSjftcEbikNKUyvVzPsNtdLbK5p
s/mumj3MvgAAAP//AwAmD0HmywUAAA==
headers:
CF-RAY:
- 980b9e0c5fa516a0-SJC
- 99f2bc8f6f4dfab6-SJC
Connection:
- keep-alive
Content-Encoding:
@@ -68,14 +72,14 @@ interactions:
Content-Type:
- application/json
Date:
- Wed, 17 Sep 2025 21:15:11 GMT
- Sun, 16 Nov 2025 00:05:27 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=w6UZxbAZgYg9EFkKPfrSbMK97MB4jfs7YyvcEmgkvak-1758143711-1.0.1.1-j7YC1nvoMKxYK0T.5G2XDF6TXUCPu_HUs4YO9v65r3NHQFIcOaHbQXX4vqabSgynL2tZy23pbZgD8Cdmxhdw9dp4zkAXhU.imP43_pw4dSE;
path=/; expires=Wed, 17-Sep-25 21:45:11 GMT; domain=.api.openai.com; HttpOnly;
- __cf_bm=REDACTED;
path=/; expires=Sun, 16-Nov-25 00:35:27 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=ij9Q8tB7sj2GczANlJ7gbXVjj6hMhz1iVb6oGHuRYu8-1758143711202-0.0.1.1-604800000;
- _cfuvid=REDACTED;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
@@ -90,15 +94,15 @@ interactions:
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
- test-org
openai-processing-ms:
- '462'
- '1493'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
- proj_test123
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '665'
- '1733'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
@@ -108,11 +112,11 @@ interactions:
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
- '149999832'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999830'
- '149999832'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
@@ -120,7 +124,7 @@ interactions:
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_04536db97c8c4768a200e38c1368c176
- req_test123
status:
code: 200
message: OK

View File

@@ -1,103 +1,4 @@
interactions:
- request:
body: '{"trace_id": "9d9d9d14-e5bc-44bc-8cfc-3df9ba4e6055", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "crew", "flow_name": null, "crewai_version": "1.3.0", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-06T15:58:15.778396+00:00"},
"ephemeral_trace_id": "9d9d9d14-e5bc-44bc-8cfc-3df9ba4e6055"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '488'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.3.0
X-Crewai-Version:
- 1.3.0
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
response:
body:
string: '{"id":"f303021e-f1a0-4fd8-9c7d-8ba6779f8ad3","ephemeral_trace_id":"9d9d9d14-e5bc-44bc-8cfc-3df9ba4e6055","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.3.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.3.0","privacy_level":"standard"},"created_at":"2025-11-06T15:58:16.189Z","updated_at":"2025-11-06T15:58:16.189Z","access_code":"TRACE-c2990cd4d4","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '515'
Content-Type:
- application/json; charset=utf-8
Date:
- Thu, 06 Nov 2025 15:58:16 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"8df0b730688b8bc094b74c66a6293578"
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 38352441-7508-4e1e-9bff-77d1689dffdf
x-runtime:
- '0.085540'
x-xss-protection:
- 1; mode=block
status:
code: 201
message: Created
- request:
body: '{"messages":[{"role":"system","content":"Your goal is to rewrite the user
query so that it is optimized for retrieval from a vector database. Consider
@@ -115,7 +16,7 @@ interactions:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
- gzip, deflate
connection:
- keep-alive
content-length:
@@ -143,23 +44,23 @@ interactions:
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJBbtswELzrFQQvvViBLMuy42sObYEWKIoiQFMEAkOu5G0oLkGu0xaB
/15QciwlTYFceODsDGeG+5gJIdHInZB6r1j33uZX33+1H78y9tvVt+Lmpv68KrfXX96Xnz7cu61c
JAbd/QTNT6wLTb23wEhuhHUAxZBUl5u6rKqqvqwHoCcDNtE6z3lFeY8O87Ioq7zY5MuTuN4Taohy
J35kQgjxOJzJpzPwW+5EsXi66SFG1YHcnYeEkIFsupEqRoysHMvFBGpyDG6wfo0G+V0UrXqggAxC
k6VwMZ8O0B6iSo7dwdoZoJwjVinx4PP2hBzPzix1PtBdfEGVLTqM+yaAiuSSi8jk5YAeMyFuhwYO
z0JJH6j33DDdw/DccrMa9eRU/ISuTxgTKzsnbRevyDUGWKGNswqlVnoPZqJOfauDQZoB2Sz0v2Ze
0x6Do+veIj8BWoNnMI0PYFA/DzyNBUhr+b+xc8mDYRkhPKCGhhFC+ggDrTrYcVlk/BMZ+qZF10Hw
AceNaX2zrgvV1rBeX8rsmP0FAAD//wMA5SIzeT8DAAA=
H4sIAAAAAAAAAwAAAP//jFLBTtwwFLznKyxfetmg3YXspnutCmpVIS70UqHI2C/JK46fZb+sQGj/
HTlZNqGA1IsPnjfjmfF7zoSQaOROSN0q1p23+Te9rh8vr67tj+99Wdw8NDc/Xdy32KvbX71cJAbd
/wXNr6wzTZ23wEhuhHUAxZBUV9vN+apcb8tiADoyYBOt8ZxfUN6hw3y9XF/ky22+Ko/sllBDlDvx
JxNCiOfhTD6dgUe5E8vF600HMaoG5O40JIQMZNONVDFiZOVYLiZQk2Nwg/XfaJC/RFGrPQVkEJos
hbP5dIC6jyo5dr21M0A5R6xS4sHn3RE5nJxZanyg+/gPVdboMLZVABXJJReRycsBPWRC3A0N9G9C
SR+o81wxPcDw3Gp7PurJqfgJLY4YEys7J5WLD+QqA6zQxlmFUivdgpmoU9+qN0gzIJuFfm/mI+0x
OLrmf+QnQGvwDKbyAQzqt4GnsQBpLT8bO5U8GJYRwh41VIwQ0kcYqFVvx2WR8SkydFWNroHgA44b
U/uq2CxVvYGi+CqzQ/YCAAD//wMAZMa5Sz8DAAA=
headers:
CF-RAY:
- 99a5ca96bb1443e7-EWR
- 99ec2e536dcc3c7d-SJC
Connection:
- keep-alive
Content-Encoding:
@@ -167,12 +68,12 @@ interactions:
Content-Type:
- application/json
Date:
- Thu, 06 Nov 2025 15:58:16 GMT
- Sat, 15 Nov 2025 04:59:45 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=REDACTED;
path=/; expires=Thu, 06-Nov-25 16:28:16 GMT; domain=.api.openai.com; HttpOnly;
path=/; expires=Sat, 15-Nov-25 05:29:45 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=REDACTED;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
@@ -189,31 +90,37 @@ interactions:
cf-cache-status:
- DYNAMIC
openai-organization:
- user-REDACTED
- REDACTED_ORG
openai-processing-ms:
- '235'
- '418'
openai-project:
- proj_REDACTED
- REDACTED_PROJECT
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '420'
- '434'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '10000'
- '30000'
x-ratelimit-limit-tokens:
- '200000'
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999785'
x-ratelimit-remaining-requests:
- '9999'
- '29999'
x-ratelimit-remaining-tokens:
- '199785'
- '149999785'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 8.64s
- 2ms
x-ratelimit-reset-tokens:
- 64ms
- 0s
x-request-id:
- req_9810e9721aa9463c930414ab5174ab61
- REDACTED_REQUEST_ID
status:
code: 200
message: OK
@@ -233,7 +140,7 @@ interactions:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
- gzip, deflate
connection:
- keep-alive
content-length:
@@ -264,25 +171,26 @@ interactions:
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFPBahsxEL37KwZderGN7dqO41vaUghtT4FCacIiS7PrSbQaVZq1swT/
e9HayTptCr0ING/e6M2b0dMAQJFVa1Bmq8XUwY0+/tiXXy4/2BDN/p7izdY4/vrp29Wvmxbv1TAz
eHOPRp5ZY8N1cCjE/gibiFowV51eLGfz+Xx5ueqAmi26TKuCjOY8qsnTaDaZzUeTi9F0dWJvmQwm
tYafAwCAp+7MOr3FR7WGyfA5UmNKukK1fkkCUJFdjiidEiXRXtSwBw17Qd9JvwbPezDaQ0U7BA1V
lg3apz1GgFv/mbx2cNXd1/CdLMm7BKXecSRBMOw4AiXwLBCajSPjWrBsmhq9oAWOsCeLroUHz3s/
husSWm5gq3cIKaChkgx0ih4lZ1sUTS6B3nAjxweHcA21bmGDoDcOQRhC5B3ZLLjmiJApHNFCxBTY
Jxyf9xuxbJLOnvvGuTNAe8+i88w6p+9OyOHFW8dViLxJf1BVSZ7StoioE/vsYxIOqkMPA4C7bobN
q7GoELkOUgg/YPfcdLk61lP96vTofHEChUW7Pj6bvh++Ua842Xa2Bcpos0XbU/uV0Y0lPgMGZ13/
reat2sfOyVf/U74HjMEgaIsQ0ZJ53XGfFjH/rH+lvbjcCVYJ444MFkIY8yQslrpxx31XqU2CdVGS
rzCGSMelL0OxWE50ucTF4lINDoPfAAAA//8DAPFGfbMCBAAA
H4sIAAAAAAAAAwAAAP//jFNNbxNBDL3nV1hz4bKp8tGkITdEBVRC4oLgAFXkzHg3prP2aGY2aaj6
39Fu0mxaisRlpfXze7bHzw8DAMPOLMHYDWZbBz98byfl/bW9mcrH69GX37Kd8v6z/X63Ubz/aoqW
oetfZPMT68JqHTxlVjnANhJmalXHV/PpeDG5Wsw6oFZHvqVVIQ8vdViz8HAymlwOR1fD8eLI3ihb
SmYJPwYAAA/dt+1THN2bJYyKp0hNKWFFZnlKAjBRfRsxmBKnjJJN0YNWJZN0rd+A6A4sClS8JUCo
2rYBJe0oAvyUDyzo4V33v4Rv7Di/SVDiViNnAqteI3AC0QyhWXu2fg9ObVOTZHKACTh3BbYY97DG
RA5UIFBM2kqHSCVFEkvpAj7pjrYUC7Ba1yov6iTAWqUCFsdbdg36BFpmEmCxvnEEa99Q0c5AUgCK
g0iugHWTIStYlZJjfRoiBbJcsn1RpQAVgp023oEQuSM1NT4DQiTPuPYESZtoCTSC40g2+z1guoMN
1xfnbx2pbBK2+5bG+zMARTRj65duy7dH5PG0V69ViLpOL6imZOG0WUXCpNLuMGUNpkMfBwC3nX+a
Z5YwIWod8irrHXXlxvPFQc/0tu3R+fwIZs3o+/hkelm8ordylJF9OnOgsWg35Hpqb1dsHOsZMDib
+u9uXtM+TM5S/Y98D1hLIZNbhUiO7fOJ+7RI7VX/K+30yl3DJlHcsqVVZortJhyV2PjDrZm0T5nq
VclSUQyRDwdXhtVsPsJyTrPZWzN4HPwBAAD//wMAtb7X3X4EAAA=
headers:
CF-RAY:
- 99a5ca9c5ef543e7-EWR
- 99ec2e59baca3c7d-SJC
Connection:
- keep-alive
Content-Encoding:
@@ -290,7 +198,7 @@ interactions:
Content-Type:
- application/json
Date:
- Thu, 06 Nov 2025 15:58:19 GMT
- Sat, 15 Nov 2025 04:59:47 GMT
Server:
- cloudflare
Strict-Transport-Security:
@@ -306,31 +214,37 @@ interactions:
cf-cache-status:
- DYNAMIC
openai-organization:
- user-REDACTED
- REDACTED_ORG
openai-processing-ms:
- '1326'
- '1471'
openai-project:
- proj_REDACTED
- REDACTED_PROJECT
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1754'
- '1488'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '10000'
- '30000'
x-ratelimit-limit-tokens:
- '200000'
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999805'
x-ratelimit-remaining-requests:
- '9998'
- '29999'
x-ratelimit-remaining-tokens:
- '199803'
- '149999802'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 15.913s
- 2ms
x-ratelimit-reset-tokens:
- 59ms
- 0s
x-request-id:
- req_f975e16b666e498b8bcfdfab525f71b3
- REDACTED_REQUEST_ID
status:
code: 200
message: OK

View File

@@ -0,0 +1,261 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Research Assistant. You
are a helpful research assistant who can search for information about the population
of Tokyo.\nYour personal goal is: Find information about the population of Tokyo\n\nYou
ONLY have access to the following tools, and should NEVER make up tools that
are not listed here:\n\nTool Name: search_web\nTool Arguments: {''query'': {''description'':
None, ''type'': ''str''}}\nTool Description: Search the web for information
about a topic.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
you should always think about what to do\nAction: the action to take, only one
name of [search_web], just the name, exactly as it''s written.\nAction Input:
the input to the action, just a simple JSON object, enclosed in curly braces,
using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
all necessary information is gathered, return the following format:\n\n```\nThought:
I now know the final answer\nFinal Answer: the final answer to the original
input question\n```"},{"role":"user","content":"What is the population of Tokyo?"}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1160'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jJM9b9swEIZ3/YoDZzvwd2xtRTI0HZolcIcqkGnqJLGheCx5Suoa/u+F
5A/JTQp00XDPva/ui/sIQOhMxCBUKVlVzgzv5Lf7fDtbmcfRV1M9/l5/Wa/N6/r+7fNydycGjYK2
P1DxWXWjqHIGWZM9YuVRMjau49vFZDleLVbzFlSUoWlkhePhjIaVtno4GU1mw9HtcLw8qUvSCoOI
4XsEALBvv02dNsNfIobR4BypMARZoIgvSQDCk2kiQoagA0vLYtBBRZbRtqVvNpvEPpVUFyXH8AAW
MQMmCCi9KiEnD1wiGMkYGLTNyVeyaRI8FtJn2hZtgiNXmyOgHJ7oZUc3if2kmkh8ckvfcHuOwYN1
NcewT8TPGv0uEXEiVO09Wv7IDCajyTQRh8RuNpt+Lx7zOshmnrY2pgektcStSTvF5xM5XOZmqHCe
tuEvqci11aFMPcpAtplRYHKipYcI4LndT301cuE8VY5TphdsfzeZnvYjurPo6HR5gkwsTU+1OIMr
vzRDltqE3oaFkqrErJN25yDrTFMPRL2u31fzkfexc22L/7HvgFLoGLPUecy0uu64S/PYvJp/pV2m
3BYsAvpXrTBljb7ZRIa5rM3xlkXYBcYqzbUt0Duvjwedu3S+GMl8gfP5SkSH6A8AAAD//wMAJGbR
+94DAAA=
headers:
CF-RAY:
- 99c98dd3ddb9ce6c-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 11 Nov 2025 00:08:16 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=6maCeRS26vR_uzqYdtL7RXY7kzGdvLhWcE2RP2PnZS0-1762819696-1.0.1.1-72zCZZVBiGDdwPDvETKS_fUA4DYCLVyVHDYW2qpSxxAUuWKNPLxQQ1PpeI7YuB9v.y1e3oapeuV5mBjcP4c9_ZbH.ZI14TUNOexPUB6yCaQ;
path=/; expires=Tue, 11-Nov-25 00:38:16 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=a.XOUFuP.5IthR7ITJrIWIZSWWAkmHU._pM9.qhCnhM-1762819696364-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1199'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1351'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999735'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999735'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_50a8251d98f748bb8e73304a2548b694
status:
code: 200
message: OK
- request:
body: '{"messages":[{"role":"system","content":"You are Research Assistant. You
are a helpful research assistant who can search for information about the population
of Tokyo.\nYour personal goal is: Find information about the population of Tokyo\n\nYou
ONLY have access to the following tools, and should NEVER make up tools that
are not listed here:\n\nTool Name: search_web\nTool Arguments: {''query'': {''description'':
None, ''type'': ''str''}}\nTool Description: Search the web for information
about a topic.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
you should always think about what to do\nAction: the action to take, only one
name of [search_web], just the name, exactly as it''s written.\nAction Input:
the input to the action, just a simple JSON object, enclosed in curly braces,
using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
all necessary information is gathered, return the following format:\n\n```\nThought:
I now know the final answer\nFinal Answer: the final answer to the original
input question\n```"},{"role":"user","content":"What is the population of Tokyo?"},{"role":"assistant","content":"```\nThought:
I need to search for the latest information regarding the population of Tokyo.\nAction:
search_web\nAction Input: {\"query\":\"current population of Tokyo 2023\"}\n```\nObservation:
Tokyo''s population in 2023 was approximately 21 million people in the city
proper, and 37 million in the greater metropolitan area."}],"model":"gpt-4o-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '1521'
content-type:
- application/json
cookie:
- __cf_bm=6maCeRS26vR_uzqYdtL7RXY7kzGdvLhWcE2RP2PnZS0-1762819696-1.0.1.1-72zCZZVBiGDdwPDvETKS_fUA4DYCLVyVHDYW2qpSxxAUuWKNPLxQQ1PpeI7YuB9v.y1e3oapeuV5mBjcP4c9_ZbH.ZI14TUNOexPUB6yCaQ;
_cfuvid=a.XOUFuP.5IthR7ITJrIWIZSWWAkmHU._pM9.qhCnhM-1762819696364-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFPLbtswELz7KxY8W4Es+RHr1ifgQw8F3OZQBxJDrSTWFJcgqSRG4H8v
KD+kNCnQCwFyZpazs+TLBIDJkmXARMO9aI2KPvG7z81d8mWf4E/cxN9+PK5SvfkYf08Pe8+mQUEP
v1H4i+pGUGsUekn6BAuL3GOoOlstk9vZerle9UBLJaogq42P5hS1UssoiZN5FK+i2e1Z3ZAU6FgG
vyYAAC/9GnzqEp9ZBvH0ctKic7xGll1JAMySCieMOyed5/rk+QwK0h51b70oip3eNtTVjc9gA5qe
YB8W3yBUUnMFXLsntDv9td996HcZbBsEQ6ZTPLQMVMGW9gcCqSGJkxSkA26MpWfZco/qAMkMWqlU
IBskozBQwy1C+gMYSwYtcF1CuroSz4y6j9JCi96SISU918At8pudLopi3JrFqnM8xKs7pUYA15p8
77UP9f6MHK8xKqqNpQf3l5RVUkvX5Ba5Ix0ic54M69HjBOC+H1f3agLMWGqNzz3tsb8ujdNTPTa8
kgGdX0BPnquRar6cvlMvL9Fzqdxo4Exw0WA5SIfXwbtS0giYjLp+6+a92qfOpa7/p/wACIHGY5kb
i6UUrzseaBbDJ/oX7Zpyb5g5tI9SYO4l2jCJEiveqfN3dAfnsc0rqWu0xsrT+65MvljGvFriYrFm
k+PkDwAAAP//AwDgLjwY7QMAAA==
headers:
CF-RAY:
- 99c98dde7fc9ce6c-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 11 Nov 2025 00:08:18 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1339'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '1523'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999657'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999657'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_ade054352f8c4dfdba50683755eba41d
status:
code: 200
message: OK
version: 1

View File

@@ -1,104 +1,10 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Say ''Hello, World!''"}], "model":
"gpt-3.5-turbo"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '92'
content-type:
- application/json
cookie:
- __cf_bm=rb61BZH2ejzD5YPmLaEJqI7km71QqyNJGTVdNxBq6qk-1727213194-1.0.1.1-pJ49onmgX9IugEMuYQMralzD7oj_6W.CHbSu4Su1z3NyjTGYg.rhgJZWng8feFYah._oSnoYlkTjpK1Wd2C9FA;
_cfuvid=lbRdAddVWV6W3f5Dm9SaOPWDUOxqtZBSPr_fTW26nEA-1727213194587-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.47.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.47.0
x-stainless-raw-response:
- 'true'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.11.7
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-AB7WOl4G3lFflxNyRE5fAnkueUNWp\",\n \"object\":
\"chat.completion\",\n \"created\": 1727213884,\n \"model\": \"gpt-3.5-turbo-0125\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello, World!\",\n \"refusal\":
null\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 13,\n \"completion_tokens\":
4,\n \"total_tokens\": 17,\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
0\n }\n },\n \"system_fingerprint\": null\n}\n"
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 8c85eb570b271cf3-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 24 Sep 2024 21:38:04 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '170'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '50000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '49999978'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_c504d56aee4210a9911e1b90551f1e46
http_version: HTTP/1.1
status_code: 200
- request:
body: '{"trace_id": "9d3dfee1-ebe8-4eb3-aa28-e77448706cb5", "execution_type":
body: '{"trace_id": "3fe0e5a3-1d9c-4604-b3a7-2cd3f16e95f9", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "0.193.2",
"privacy_level": "standard"}, "execution_metadata": {"expected_duration_estimate":
300, "agent_count": 0, "task_count": 0, "flow_method_count": 0, "execution_started_at":
"2025-09-24T05:36:10.874552+00:00"}}'
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.4.1", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-15T04:57:05.245294+00:00"}}'
headers:
Accept:
- '*/*'
@@ -107,54 +13,73 @@ interactions:
Connection:
- keep-alive
Content-Length:
- '436'
- '434'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/0.193.2
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- d3a3d10c-35db-423f-a7a4-c026030ba64d
- 73c2b193-f579-422c-84c7-76a39a1da77f
X-Crewai-Version:
- 0.193.2
- 1.4.1
method: POST
uri: http://localhost:3000/crewai_plus/api/v1/tracing/batches
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
response:
body:
string: '{"id":"bc65d267-2f55-4edd-9277-61486245c5f6","trace_id":"9d3dfee1-ebe8-4eb3-aa28-e77448706cb5","execution_type":"crew","crew_name":"Unknown
Crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"0.193.2","privacy_level":"standard","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"Unknown
Crew","flow_name":null,"crewai_version":"0.193.2","privacy_level":"standard"},"created_at":"2025-09-24T05:36:11.292Z","updated_at":"2025-09-24T05:36:11.292Z"}'
string: '{"error":"bad_credentials","message":"Bad credentials"}'
headers:
Connection:
- keep-alive
Content-Length:
- '496'
cache-control:
- max-age=0, private, must-revalidate
content-security-policy:
- 'default-src ''self'' *.crewai.com crewai.com; script-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts https://www.gstatic.com
https://run.pstmn.io https://share.descript.com/; style-src ''self'' ''unsafe-inline''
*.crewai.com crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self''
data: *.crewai.com crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net; font-src ''self'' data: *.crewai.com crewai.com;
connect-src ''self'' *.crewai.com crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ ws://localhost:3036
wss://localhost:3036; frame-src ''self'' *.crewai.com crewai.com https://connect.useparagon.com/
https://zeus.tools.crewai.com https://zeus.useparagon.com/* https://connect.tools.crewai.com/
https://www.youtube.com https://share.descript.com'
content-type:
- '55'
Content-Type:
- application/json; charset=utf-8
etag:
- W/"43353f343ab1e228123d1a9c9a4b6e7c"
Date:
- Sat, 15 Nov 2025 04:57:05 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
server-timing:
- cache_read.active_support;dur=0.09, cache_fetch_hit.active_support;dur=0.00,
cache_read_multi.active_support;dur=0.08, start_processing.action_controller;dur=0.00,
sql.active_record;dur=24.53, instantiation.active_record;dur=1.01, feature_operation.flipper;dur=0.07,
start_transaction.active_record;dur=0.02, transaction.active_record;dur=24.66,
process_action.action_controller;dur=399.97
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
@@ -164,12 +89,120 @@ interactions:
x-permitted-cross-domain-policies:
- none
x-request-id:
- 256ac03e-f7ae-4e03-b5e0-31bd179a7afc
- 98dde4ab-199c-4d1c-a059-3d8b9c0c93d3
x-runtime:
- '0.422765'
- '0.037564'
x-xss-protection:
- 1; mode=block
status:
code: 201
message: Created
code: 401
message: Unauthorized
- request:
body: '{"messages":[{"role":"user","content":"Say ''Hello, World!''"}],"model":"gpt-3.5-turbo"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '86'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jJJNaxsxEIbv+yvUOa+NP2q78TUQegihTQmGFrPI0nitVKtRpdm2Ifi/
F8kfu24SyEUHPfOO3nc0z4UQYDQsBaidZNV4O7hWE31n9Rdz9TCaXd9//dPcLlZhdf999OvmG5RJ
QZtHVHxSDRU13iIbcgesAkrG1HW8mE/HnybzySyDhjTaJKs9D6bD2YDbsKHBaDyZHZU7MgojLMWP
QgghnvOZPDqNf2EpRuXppsEYZY2wPBcJAYFsugEZo4ksHUPZQUWO0WXbn9FaKsWKgtUf+jUBt22U
yaNrre0B6RyxTBmzu/WR7M9+LNU+0Cb+J4WtcSbuqoAykktvRyYPme4LIdY5d3sRBXygxnPF9BPz
c+PpoR10k+7gxyNjYml7mkX5SrNKI0tjY29soKTaoe6U3Yxlqw31QNGL/NLLa70PsY2r39O+A0qh
Z9SVD6iNuszblQVMa/hW2XnE2TBEDL+NwooNhvQNGreytYcFgfgUGZtqa1yNwQeTtyR9Y7Ev/gEA
AP//AwAqA1omJAMAAA==
headers:
CF-RAY:
- 99ec2a70de42f9e4-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 15 Nov 2025 04:57:05 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=REDACTED;
path=/; expires=Sat, 15-Nov-25 05:27:05 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=REDACTED;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- REDACTED_ORG
openai-processing-ms:
- '162'
openai-project:
- REDACTED_PROJECT
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '183'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-requests:
- '10000'
x-ratelimit-limit-tokens:
- '50000000'
x-ratelimit-remaining-requests:
- '9999'
x-ratelimit-remaining-tokens:
- '49999993'
x-ratelimit-reset-requests:
- 6ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- REDACTED_REQUEST_ID
status:
code: 200
message: OK
version: 1

View File

@@ -81,11 +81,9 @@ interactions:
Server:
- cloudflare
Set-Cookie:
- __cf_bm=REDACTED;
path=/; expires=Wed, 05-Nov-25 22:40:59 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=REDACTED;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
- __cf_bm=REDACTED; path=/; expires=Wed, 05-Nov-25 22:40:59 GMT; domain=.api.openai.com;
HttpOnly; Secure; SameSite=None
- _cfuvid=REDACTED; path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
@@ -127,4 +125,105 @@ interactions:
status:
code: 200
message: OK
- request:
body: '{"trace_id": "c682f49d-bb6b-49d9-84b7-06e1881d37cd", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "crew", "flow_name": null, "crewai_version": "1.4.1", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-15T21:20:09.431751+00:00"},
"ephemeral_trace_id": "c682f49d-bb6b-49d9-84b7-06e1881d37cd"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '488'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.4.1
X-Crewai-Organization-Id:
- 73c2b193-f579-422c-84c7-76a39a1da77f
X-Crewai-Version:
- 1.4.1
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
response:
body:
string: '{"id":"25f0f0b3-90bb-4e2a-bde5-817920201bf1","ephemeral_trace_id":"c682f49d-bb6b-49d9-84b7-06e1881d37cd","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.4.1","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.4.1","privacy_level":"standard"},"created_at":"2025-11-15T21:20:09.594Z","updated_at":"2025-11-15T21:20:09.594Z","access_code":"TRACE-1fb0209738","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '515'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 15 Nov 2025 21:20:09 GMT
cache-control:
- no-store
content-security-policy:
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
https://drive.google.com https://slides.google.com https://accounts.google.com
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
https://www.youtube.com https://share.descript.com'
etag:
- W/"e8d1e903c8c6ec2f765163c0c03bed79"
expires:
- '0'
permissions-policy:
- camera=(), microphone=(self), geolocation=()
pragma:
- no-cache
referrer-policy:
- strict-origin-when-cross-origin
strict-transport-security:
- max-age=63072000; includeSubDomains
vary:
- Accept
x-content-type-options:
- nosniff
x-frame-options:
- SAMEORIGIN
x-permitted-cross-domain-policies:
- none
x-request-id:
- 5ea5f513-c359-4a92-a84a-08ad44d9857b
x-runtime:
- '0.044665'
x-xss-protection:
- 1; mode=block
status:
code: 201
message: Created
version: 1

View File

@@ -0,0 +1,423 @@
interactions:
- request:
body: '{"messages":[{"role":"system","content":"You are Researcher. You''re an
expert researcher, specialized in technology, software engineering, AI and startups.
You work as a freelancer and is now working on doing research and analysis for
a new customer.\nYour personal goal is: Make the best research and analysis
on content about AI and AI agents\nTo give my best complete final answer to
the task respond using the exact following format:\n\nThought: I now can give
a great answer\nFinal Answer: Your final answer must be the great and the most
complete as possible, it must be outcome described.\n\nI MUST use these formats,
my job depends on it!"},{"role":"user","content":"\nCurrent Task: Give me a
list of 3 interesting ideas about AI.\n\nThis is the expected criteria for your
final answer: Bullet point list of 3 ideas.\nyou MUST return the actual complete
content as the final answer, not a summary.\n\nYou MUST follow these instructions:
\n - Include specific examples and real-world case studies to enhance the credibility
and depth of the article ideas.\n - Incorporate mentions of notable companies,
projects, or tools relevant to each topic to provide concrete context.\n - Add
diverse viewpoints such as interviews with experts, users, or thought leaders
to enrich the narrative and lend authority.\n - Address ethical, social, and
emotional considerations explicitly to reflect a balanced and comprehensive
analysis.\n - Enhance the descriptions by including implications for future
developments and the potential impact on society.\n - Use more engaging and
vivid language that draws the reader into each topic''s nuances and importance.\n
- Include notes or summaries that contextualize each set of ideas in terms of
relevance and potential reader engagement.\n - In future tasks, focus on elaborating
initial outlines into more detailed and nuanced article proposals with richer
content and insights.\n\nBegin! This is VERY important to you, use the tools
available and give your best Final Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '2076'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1xXS44kxw3d6xREbwwMqgsaWWPJvSvNT21roIE0kAx5NswIViWnI4MpMiKra7TR
IbTxSbz3UXQSgxFZ1S1tGuiMH/n43iPrl08Arjhe3cBVGLGEaU7Xz/HHL3/YfTz+9GJ69sPXL8sr
e/bT9JMefv4Ql39dbfyEDB8olPOpbZBpTlRYcl8OSljIb336xd8++/Lp3//6xbO2MEmk5McOc7n+
fPv0euLM1599+tmz608/v376+Xp8FA5kVzfw708AAH5pfz3QHOn+6gY+3Zy/TGSGB7q6uWwCuFJJ
/uUKzdgK5nK1eVgMkgvlFvu7UephLDdwC1mOEDDDgRcChIMnAJjtSPo+v+KMCXbtv5v3+X2+hidP
drfXL5QXyvCW1CRj4o8U4WvCVMaASjfwHS2SqsPCHzkf4C0Wplzg21qCTGTwblSPAN4qRQ7F395l
TKfCwZ48eZ8B3o1swJEQ6H5OomQwyhF2t8AGRTHbXnTyy8fLwzCcgDIOqX3mw5hOwDnywrGuURbP
b/JY5oTZYECjCJJhXkOMWBAwR5gfQmvFsy28EgXODmygDXjtMTMZJL4juP3qDfyIxSSvUMCIC0Gi
hRQPFD30IoCe5keCyW/HBEpBNNoGDpRl4mCb9npInNt6UcZkfrIgJ1EvViCFMpLi7K/XzD9XSiew
ysVTFCAM4zmjLbwgmt9wjr//+h/zKOxkhSYY0cBGOWaYVSY2As6XrPMB7jhmcgA/VD0BoabTBgwX
X0u8kEFZqzirYEeKcyFdKHvpt3Db/mM6GhzZAXmo1KyyJzN2+ljHu4droLQQJhikjEC5jNUYbYK9
KOxuWw6zOJEZU4dKHJBsgIPUArPyguHUlloxjUJVLqdNpwQfxpYelbEBHDnRNKF59iPm2MhjlI3X
jJxn2BP6XgJjchyK09NG3hcIUlMEpVgDQWSbUbl4YfzCh4wxBDKDoRbAZALKdmdA9xhIB2whcaaf
najlBLzvfBFHRwlqpoVyOvkDRXmoheIWXk5SGoQe0gXAgTLtucBeZYJRZmpY8DSrLOTElkMW4x7L
5Hj0iOh+JmXKwQ/cM5UTyEIKWItMbmsQKbDX7HrCO86HLbyqpSr53YlDA8nTDqlGgt3t9SxHUoqw
sJaKaYUDLvbUK6+E6brw5ELLXEQdi0aYI6HikAgGFi+JqG1WtvoeBJtQC+kGJlECJZslm5ftEfAU
ZGV8GfFcLrovlCMk3lPLO7ioO2nOJZB9Xz4kGRzhbfekIjMHNyGlRAvm0g5RPuChxWTABYrUMLpl
1QkzHCml64HacgGEfc0R3YQwNX/oPOa8cCG/FyNpk3zwbGJXu2tLHWSCAZObAAxUjkQZCoUxS5JD
T6Axu9GvmQxn2l68uxbJMkltRrA7NL5whudui47bbY7VCUZ2Ay/vZ8zRo/5KPGL/6qjstGzgTTUO
3au+L6KnQsmV82fzjpSWJoUi/iL2F5thfjtTXgX9YvfNN//778um8YXNqYL+yD/qHUUKd+2ZR/v9
+yD3bf/kgTQzFpes5B5URtWe0oEyKRZRA6vB2eeRvKj5QL75D70knHGYVVyuZF51MnpQYxgxJcoH
8pORu/4gSye/7F0uo6iNPHd76lc6o4YTBEkJB9Eu+O6KjSOoha0YtBYTaaYcKZd0OoffousdfAvf
uWKOoilCQCOwUmN3nC69H1Ezaa/RKmun2+XJ3e117C28ozerxBo8gxays+11RY2MGeY6JLbRz+5u
r9do3EaEinpHmFHdDPrGAJEGLOSwtRkD0krnVnOnQEsUvqM933nTj+IebKHapcNTHp3f9lCOUZQ/
SrYNHEdOBHZHs88KcBTV0+r8HnbkPnd4ITqwdPZIB0J7z+kN59JFPsjQjDthoDYbOK/Wgmzh5aVR
tCBbmfcSqvnYEGQ+qVvoBuSYqZV9cwHRqeBvhNVP3BIb1BQ98jt73FIe5BFpkuA1/3hpG24GZzA6
D5t546lbd5Bpksj7k4fdKPso7+jFb2lzXqTpEZ0vDvFffABxLCX//utvR8nuNh7+Hi1wbpYOC6lV
O4PONnWbefSABcmZQrl0hKKUo9u7H5jdtEzmka00B2vD0IMUXJrNm6s/750hQqipVMXUSX9fHj/p
TeBcvHTy1kt7zs1etQAXo7TfPjKhgHPhBR+baxtUyDwUzq1W+2orb/5kp77YqWQzK3ul93vS3oCU
w/jIaxrGWRa8aIldxlxOFws+02l3C6+9vWan+g18VTk1r33nfjSjUi4b2IUgNZfWA79vTcw6OeGd
VitH0TKeVswfWW/vURijNg9rKVR1ckEmiu2KveJEjYRA2ap2gUP0fiRzkwEmPmR77FILpkq2AZrm
Ea0P9+UScDhtnK89Yk5t5upcYs1ktoXnf5yZX4scEl0G1Lb5DQcVk33p8zOZZ882dtKsvW0QbENz
dwPOXLgV4MHie7dowmj9+DIcOJQtzznJafqznVqgjMry4KXOPx+c1Fr784Fo4ParYY8u3TbBH3Jr
BOtobVB9fThBwiOQT5DdWDZA9+Sz0p77uru3rbLsDv8HfJ4nwjZAw+52A4p97DEJTD47YEKdVvOz
qgtx6oNBm33ZgvK0anjr3Zz03Hvf8ZS5wGsatLbd/3SJPlc87kXbiLiw5+6TrJN1JjUfkhrEkhsC
7ZwVraEL1X8ouL7degaKsUvr8nvDf9jERUJvZfvW50KqbVqLZHzI6zB4qOkccpfKZd7utJ5VhpXT
k2grwFpZPs9tzSrdVVbnquaaX8X8fwAAAP//jFjNbtwgEL7vUyBfcmkr7SZt95pjpLxCZBF7sFEw
EMCtcth3j74BL3jbSj2PjT3AfH+squh9ZfwrhN2Iok3jxr3cp0B3UQBLjLn5++k6xs1JjhrfBjXL
N5qd2SSdh72xgO4wafbOW7M7LZ+5NGHIBbg3b3sdtcQ3e7VFdXNvi056khv7KZKBRaospvDxSSw6
rpGgMW4p75t4do5pXM4kR+64zh6jgVMZNfOFyghWTsuFD8GwjamsGhToTSFpdfUGIKxXQgYgvP7l
kjRfduhTrEv2PHGWMA+vwcnRZCzOpgnZyQI7v5PkMQVnJ+YDpBJAe0auDfKLT6SxkQsYJffVO1Pu
uV68HFKm6p0oL/6WmW7FuWLYZ+kzC6hMer9xS1r6oIUdUBRB4gaB5GwmuUXbzR6AHNqcJpBao0RY
ZFdjmoK01qW8kUiIXkrlcs2EjJswHPHm1Q7kGOc+kIzOIv+JyfmOq5eDEC+cPa27OKmDy/KpT+6N
+HP355I9dTXzqtWfD/elmnCotXA8nrbKbsV+JOQZscmvukEOM4313Rp2Qa64pnBo+v7zf/62du5d
2+l/lq+FAdqIxn6LRdqe62OBEAr+67HrPvMPdxGRyEB90hRwFiMpuZqc1HUZKnuFiQ8+6BzXKd8/
DKfz96M6/zh1h8vhEwAA//8DAJPMJFq9FAAA
headers:
CF-RAY:
- 99c98602dfefcf4d-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 11 Nov 2025 00:03:08 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=ObqPLq12_9tJ06.V1RkHCM6FH_YGcLoC2ykIFBEawa8-1762819388-1.0.1.1-l7PJTVbZ1vCcKdeOe8GQVuFL59SCk0xhO_dMFY2wuH5Ybd1hhM_Xcv_QivXVhZlBGlRgRAgG631P99JOs_IYAYcNFJReE.3NpPl34VfPVeQ;
path=/; expires=Tue, 11-Nov-25 00:33:08 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=kdn.HizdlSPG7cBu_zv1ZPcu0jMwDQIA4H9YvMXu6a0-1762819388587-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '13504'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '13638'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999507'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999507'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_2de40e1beb5f42ea896664df36e8ce8f
status:
code: 200
message: OK
- request:
body: "{\"messages\":[{\"role\":\"system\",\"content\":\"You are Researcher. You're
an expert researcher, specialized in technology, software engineering, AI and
startups. You work as a freelancer and is now working on doing research and
analysis for a new customer.\\nYour personal goal is: Make the best research
and analysis on content about AI and AI agents\\nTo give my best complete final
answer to the task respond using the exact following format:\\n\\nThought: I
now can give a great answer\\nFinal Answer: Your final answer must be the great
and the most complete as possible, it must be outcome described.\\n\\nI MUST
use these formats, my job depends on it!\"},{\"role\":\"user\",\"content\":\"\\nCurrent
Task: Summarize the ideas from the previous task.\\n\\nThis is the expected
criteria for your final answer: A summary of the ideas.\\nyou MUST return the
actual complete content as the final answer, not a summary.\\n\\nThis is the
context you're working with:\\n- **AI-Driven Personalized Healthcare: Revolutionizing
Patient Outcomes Through Predictive Analytics**\\n This idea explores how AI
is transforming healthcare by enabling highly individualized treatment plans
based on patient data and predictive models. For instance, companies like IBM
Watson Health have leveraged AI to analyze medical records, genomics, and clinical
trials to tailor cancer therapies uniquely suited to each patient. DeepMind\u2019s
AI system has shown promise in predicting kidney injury early, saving lives
through proactive intervention. Interviews with healthcare professionals and
patients reveal both enthusiasm for AI\u2019s potential and concerns about privacy
and data security, highlighting ethical dilemmas in handling sensitive information.
Socially, this shift could reduce disparities in healthcare access but also
risks exacerbating inequality if AI tools are unevenly distributed. Emotionally,
patients benefit from hope and improved prognosis but might also experience
anxiety over automated decision-making. Future implications include AI-powered
virtual health assistants and real-time monitoring with wearable biosensors,
promising a smarter, more responsive healthcare ecosystem that could extend
life expectancy and quality of life globally. This topic is relevant and engaging
as it touches human well-being at a fundamental level and invites readers to
consider the intricate balance between technology and ethics in medicine.\\n\\n-
**Autonomous AI Agents in Creative Industries: Expanding Boundaries of Art,
Music, and Storytelling**\\n This idea delves into AI agents like OpenAI\u2019s
DALL\xB7E for visual art, Jukedeck and OpenAI\u2019s Jukebox for music composition,
and narrative generators such as AI Dungeon, transforming creative processes.
These AI tools challenge traditional notions of authorship and creativity by
collaborating with human artists or independently generating content. Real-world
case studies include Warner Music experimenting with AI-driven music production
and the Guardian publishing AI-generated poetry, sparking public debate. Thought
leaders like AI artist Refik Anadol discuss how AI enhances creative horizons,
while skeptics worry about the dilution of human emotional expression and potential
job displacement for artists. Ethical discussions focus on copyright, ownership,
and the authenticity of AI-produced works. Socially, AI agents democratize access
to creative tools but may also commodify art. The emotional dimension involves
audiences' reception\u2014wonder and fascination versus skepticism and emotional
disconnect. Future trends anticipate sophisticated AI collaborators that understand
cultural context and emotions, potentially redefining art itself. This idea
captivates readers interested in the fusion of technology and the human spirit,
offering a rich narrative on innovation and identity.\\n\\n- **Ethical AI Governance:
Building Transparent, Accountable Systems for a Trustworthy Future**\\n This
topic addresses the urgent need for frameworks ensuring AI development aligns
with human values, emphasizing transparency, accountability, and fairness. Companies
like Google DeepMind and Microsoft have established AI ethics boards, while
initiatives such as OpenAI commit to responsible AI deployment. Real-world scenarios
include controversies over biased facial recognition systems used by law enforcement,
exemplified by cases involving companies like Clearview AI, raising societal
alarm about surveillance and discrimination. Experts like Timnit Gebru and Kate
Crawford provide critical perspectives on bias and structural injustice embedded
in AI systems, advocating for inclusive design and regulation. Ethically, this
topic probes the moral responsibility of creators versus users and the consequences
of autonomous AI decisions. Socially, there's a call for inclusive governance
involving diverse stakeholders to prevent marginalization. Emotionally, public
trust hinges on transparent communication and mitigation of fears related to
AI misuse or job displacement. Looking ahead, the establishment of international
AI regulatory standards and ethical certifications may become pivotal, ensuring
AI benefits are shared broadly and risks minimized. This topic strongly resonates
with readers concerned about the socio-political impact of AI and invites active
discourse on shaping a future where technology empowers rather than undermines
humanity.\\n\\nYou MUST follow these instructions: \\n - Include specific examples
and real-world case studies to enhance the credibility and depth of the article
ideas.\\n - Incorporate mentions of notable companies, projects, or tools relevant
to each topic to provide concrete context.\\n - Add diverse viewpoints such
as interviews with experts, users, or thought leaders to enrich the narrative
and lend authority.\\n - Address ethical, social, and emotional considerations
explicitly to reflect a balanced and comprehensive analysis.\\n - Enhance the
descriptions by including implications for future developments and the potential
impact on society.\\n - Use more engaging and vivid language that draws the
reader into each topic's nuances and importance.\\n - Include notes or summaries
that contextualize each set of ideas in terms of relevance and potential reader
engagement.\\n - In future tasks, focus on elaborating initial outlines into
more detailed and nuanced article proposals with richer content and insights.\\n\\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\\n\\nThought:\"}],\"model\":\"gpt-4.1-mini\"}"
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '6552'
content-type:
- application/json
cookie:
- __cf_bm=ObqPLq12_9tJ06.V1RkHCM6FH_YGcLoC2ykIFBEawa8-1762819388-1.0.1.1-l7PJTVbZ1vCcKdeOe8GQVuFL59SCk0xhO_dMFY2wuH5Ybd1hhM_Xcv_QivXVhZlBGlRgRAgG631P99JOs_IYAYcNFJReE.3NpPl34VfPVeQ;
_cfuvid=kdn.HizdlSPG7cBu_zv1ZPcu0jMwDQIA4H9YvMXu6a0-1762819388587-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1xXzY4bxxG++ykKexRIQlKcRN7b2pIcBrItSwIUIL4Ue4ozJfZ0tau6yR35oofw
JU+Sex5FTxJUz3BJ6bLAcqa66+f7qfnjG4Ab7m5u4SYMWMKY4/oHfP/sw1v99WM3vvzX8x9//3mr
rx4f3te85WflZuURsvtAoZyjNkHGHKmwpPlxUMJCfuqTv//t6bMn3/3l2XftwSgdRQ/rc1l/u3my
Hjnx+unjp39dP/52/eTbJXwQDmQ3t/DvbwAA/mh/PdHU0f3NLTxenX8ZyQx7url9eAngRiX6Lzdo
xlYwzUkvD4OkQqnl/m6Q2g/lFraQ5AQBE/R8JEDovQDAZCdSgN/SS04Y4a79f+s//JYePbrbrp8r
HynBa1KThJE/Ugf/IIxlCKh0C2/oKLF6Y/gjpx5eY2FKBX6pJchIBu8G9RzgtVLHofjtdwnjVDjY
o0d+07uBDbgjhECpkBpIgjIQFMVke9ERW1gWz1X2cLcFTjA8ZLGC00BKkC9X4PkKoIS7SDBMmXSd
r8so3oLRk80Rk0FBjqL+QNr1nIpywELQkT8zv5tTx0fuKkbIS6kdFtzAa5ZEpN4DBwsmJoPIB4Lt
9z/Beywmaekc1MKeghdSBIz3BcrSpiNagdHrwAhKQbSzFfSUZPRq/C6jYivA1EGInNqLRRkj7D25
1NsKguK+eCo7siwH8skHUi9LMXtmZfDxR+6Tty2wUZzgxGUAPBf2+dN/DGri36u3VvYcaQNveeSI
GqcVPCfKP3Hq2nvYHf2KzmuyyQqNBgMevXejJCvqdIHIe1obHluXMOOOI5cJdhNgCNXfiRPsRSmg
tfwx1EJw4C7RBJw+VPXkcSDsfBo2jbnIaIA5E3rzV4AxysljsxKNucHh3FB2fB0pOV43sE3G/VD2
9fyA6WRzEy7ggqwYCnuEQ9PbvvTHYOB+iH4EIDRIJFRtYP386c/TwJFgxDQBjTvF4PNuvcoqIxt5
ocBjVjlSB3ImjN8wSKZVmxaB52DcJ95zwFQgiI8yGeBO6ow+yMpHDFOLpTK0Wq3QCbWzgXNrFSXj
r5qxcMt70Uh4Di3+riSoqSO1IOppwYgfRMEkMBWM0NEOC902qmRxxXEMzvQsAjvlrqfrRmIIZAY9
ZoMjqVVrscp2mOFYE/3uXezYivKuqcrDgRINRpzgJGqUgO55RkjHllG5MNkGXoziQRgdnw9jcoXs
wPEOQcVMBTv7/OnPHSXac2nH7FVG2FEppD6fPokts/hCM2a5mahAr5hz9NAGmJoIjUCOpIC1yNjw
flV95yRjSXZpVpzAanbxaSkMdcQEH2rXuyht4JXIoaUm6oNseABKx3YMdbCvparLVIi1I4O77bpJ
JHVwZC3eyfl+eLCJuaIToTZR3LE4LEQNOop8nOVLCeO68EgwSuIiM61cOWLXOAk2ohbSFYyi5IBM
FL4ql4LMMrCBtzUMwCnJsWHtgv8iQGlw2YA+yg5j0weg+0yhYFrw7JBwkZB9e7yCEVtbiiO2SObQ
2JuagilFOjpLGhMuaAAlH6I/MOACRWoYaAag49tPn/t/ohjXO2qFusRijNaYipDqLHE7jC3pHZUT
UYJQG4bW5IAvFIYkUfo5+zPZGrVss/hqLZJklOpDg7u+oZQT/OCO5BTdpq46B8hu4cV9xqbr8L3U
1GFTQGeFlhX8VI3DbAZvi+hUKDomr4y1yUUu3tXYiDzIqSH0kgDOCfjYlGzA3OT5nMueKboHhQFj
pNSfO4O14bSNQrHjudWQZB6y7M9H+PDmgDKIuh5t4F3jszky0OCXTGnRxud3r179778v3PHI/aCZ
6VwUHNkc1Khlpmuh+4Zyx1N2T1wk13vS4sRaVu7vxbVuMeR/1gN1FA4tq6u7/fed3F91J05zEQSi
3LcVyST1toGfz0oPrSVnr7/bwvOaepIEerUZEdjVdJrfnV2qmxKOPsNmQTivLzlKgY6OFCXPG8pC
7N0Ed9sNvHGKnkRjB02FwsIsjrE2r30giA2+XSwaAe9RE+mMmlayc03Zr8AI1eii31mlq4F87Fr8
rjJMS2uLYjjMUvJuIPixonaMqR24k9hBrrtzTvOB6/M4O8hCRacVuGo3HrvuxGmJcTkPUtXI7Xkf
62Irx7YvP/QYtbAVeEN7Pvg62UkEGvOA5r12hN9tvbbG4DOQB1H+KMlW8054kdz5NGt7n6+cgTxM
0rIoNF0a+QKw4HYTDALOFtXMSylyUwVJfrlbFal0dFGhRgA/MpwZ4d4VfTF4yNH3LDJr77ufLYY8
e61T1CUAguRJfe9YgZx8LRk4rx449nDFLIu8n5xjXwzhJHqwB0bTxU09JEYKZ15l0jLBXnGkFrOB
txJ49tcykNHFnH3RCw69j3TF/BVIptSALu4zrqQ7N2D3WW984DwDZVfLF9YYZByl4/3UBEcdxEVg
RLP1gs22NOVavjZ+rB1TcrwoenVNLGakUwrtuBP5PuDqn+RIcUnYKeQL+YGyJ2bj3NO2OzVqXM0y
MqVld3o5G/EVX50cS2nNoJXQOPVu+JIH73Voc7jbQpAYcSeKxdvTluJI85dGIc1KbS6hxlIVI7SP
u/uv/G1xptUX/bss0SfHJ/tnBc4FLpqGWpbFb7bRgL4tN6Ap+YRs/gbq66w83q59tYXVV0bXNiB/
OvPJMiuXlS/gtjgEwh4tcJqV6WFNPg+h1WoUzopx2RZW/mWYSkOS1zzn0Pot+4W5HHy6OvNmsdgX
Zfg/AAAA//+MWctu5DYQvPsriDnLRmxsAiM3Z5E1jE1O8XUx4FAtDdcUKfMxGwfwvwfVTUmciQ85
GQZHlNiP6qoid87Dk3oEKfMI0K/qt2KFxDyj0WcdyedOPRgTis8c+b+qeJGR/xxLyhX8JM3taGUc
AF8+0oRDnChlO3IA+VTTTPWc2C2GQ0m5aSZFPhWmXA9PZ2jP2ECzC2/yL8s0DjJzFYnySbtC2wzN
64EMQrWciAWWhG7QNnpK6Ub9QZqDgBQqju4qVh9DGB2t2o4f/NOCNochi6KbRelK+QqvUYegWagK
QIY4am//qR3F+8qYbRQF97fN0i05gHnMwSeLHOCHmADNmEPdQyjFZCl1daDxLLU6gQxrwBIr5tHL
1F9kqERSStjpH4qgewxJaEcgQmX6F7r9syPNmlA9PHU8WicUMHFueyBLZJqTSjyRdcIJ8YmRNHLi
+/oJdaxFy89zUY/anXS1TOrkq7oOHcmmjXK1B5cMP9vJ26we6RAL7/4VH/M56h9DiD3Q+mR7hhub
UHNcnq+okeAhQanvqVfajSHafMRXIXZrV6UcixGQgdBGW1GC+pkpF0YrJh8dpH4w0sisYJEKvLBT
9FqsdFFPkKy8d6SxOKDbm5rIHLW3aWpGm0HSe+4TFAuzJgCDTDrIEk/ysrVCqmlQ2TcwFHgWqjon
31+XtGj1C5mGt9FrkekAADkjwkvFVIVhp1kbtgdW8XYx/za60giFbazhzOOKPoq9wWq9WG9CnANT
3B7KKyED+oWOwWE2VsLDRIxARNSkIzPQ2lf1rAlIiM5eDYQb9QXzTvtmPkDDQlRxliIdFhRkhaKt
z9r6phQzUA99Q74XN25DS+7b4iu96xQNg2ysJsvgt4n2yaaSqBKTvmeA9qP6Hg4r86lw97clEfCL
5mWHpyrehJKy6ci/XQajNJIAfFNhLPUBRWdWiKGY2T6RGhOzKKnZngJ4bw5qLDpqn4kkO1UQVINA
pBGzFve2uRMk6Ih1eBhpC4V7U7B9J1gGZxO2J5pXMYoxIY7bylcqBmBnNnfqd6yeC/sRwdWxI/UJ
MDzZ6pYtikSPElrr1SLo9DI3xSxtxjdNLC+SDBb0VtTwnhCLagJNUh8283i9vr7Gn98Bc2zc2qQO
2rwIRuAQkTKJkVDhW3N946Cpg0ZklFgBtxN6lhXgdg7WLw4nVCvI7CVMgItJcjuODv6eU6Ieqqb2
LFQKJyAx3VqTVAmK0uoEU7dTU3HZDtoQm5Xa98nouYqixbobGJisiBMDWwue0pkd3dJfBqEVA5pk
NRQrqGjNcaNF3HMtBzqHPtm0ZQErB2XNIzaTCcXBJIqcSqokkyptDyU7lq3r61Ha7HOj+oBgjuXI
HJJm63sQdwglTEB99k6lzxZCb6dGiw6rWfh2015PRBpK0rgj8cW5ZkF71AU/jIuRb3Xlfb0KcWGc
Yziki0d3g/U2Hfcg2cHj2iPlMO949f1KqW985VLOblF24hnsc3ghft3t7e0n2XC33fU0yz+tyxmY
sa3c3d7ddx/sua+XBs3Fzc5oc6R+e3a75QEEhGbhqjn5fz/oo73l9NaP/2f7bcHAHKJ+v9ydtIfe
fhbpOzt8H/9sjTR/8C7BSTe0z5YistHToIur92oyYveDBX2ao5V7qmHefzJ39z/fDve/3O2u3q/+
BQAA//8DAPcawNa2GwAA
headers:
CF-RAY:
- 99c9865b6af3cf4d-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Tue, 11 Nov 2025 00:03:32 GMT
Server:
- cloudflare
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '22788'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '22942'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149998392'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149998392'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_48c359c72cdc47aeb89c6d6eeffdce7d
status:
code: 200
message: OK
version: 1

View File

@@ -37,6 +37,36 @@ class TestOktaProvider:
provider = OktaProvider(settings)
expected_url = "https://my-company.okta.com/oauth2/default/v1/device/authorize"
assert provider.get_authorize_url() == expected_url
def test_get_authorize_url_with_custom_authorization_server_name(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": False,
"authorization_server_name": "my_auth_server_xxxAAA777"
}
)
provider = OktaProvider(settings)
expected_url = "https://test-domain.okta.com/oauth2/my_auth_server_xxxAAA777/v1/device/authorize"
assert provider.get_authorize_url() == expected_url
def test_get_authorize_url_when_using_org_auth_server(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": True,
"authorization_server_name": None
}
)
provider = OktaProvider(settings)
expected_url = "https://test-domain.okta.com/oauth2/v1/device/authorize"
assert provider.get_authorize_url() == expected_url
def test_get_token_url(self):
expected_url = "https://test-domain.okta.com/oauth2/default/v1/token"
@@ -53,6 +83,36 @@ class TestOktaProvider:
expected_url = "https://another-domain.okta.com/oauth2/default/v1/token"
assert provider.get_token_url() == expected_url
def test_get_token_url_with_custom_authorization_server_name(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": False,
"authorization_server_name": "my_auth_server_xxxAAA777"
}
)
provider = OktaProvider(settings)
expected_url = "https://test-domain.okta.com/oauth2/my_auth_server_xxxAAA777/v1/token"
assert provider.get_token_url() == expected_url
def test_get_token_url_when_using_org_auth_server(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": True,
"authorization_server_name": None
}
)
provider = OktaProvider(settings)
expected_url = "https://test-domain.okta.com/oauth2/v1/token"
assert provider.get_token_url() == expected_url
def test_get_jwks_url(self):
expected_url = "https://test-domain.okta.com/oauth2/default/v1/keys"
assert self.provider.get_jwks_url() == expected_url
@@ -68,6 +128,36 @@ class TestOktaProvider:
expected_url = "https://dev.okta.com/oauth2/default/v1/keys"
assert provider.get_jwks_url() == expected_url
def test_get_jwks_url_with_custom_authorization_server_name(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": False,
"authorization_server_name": "my_auth_server_xxxAAA777"
}
)
provider = OktaProvider(settings)
expected_url = "https://test-domain.okta.com/oauth2/my_auth_server_xxxAAA777/v1/keys"
assert provider.get_jwks_url() == expected_url
def test_get_jwks_url_when_using_org_auth_server(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": True,
"authorization_server_name": None
}
)
provider = OktaProvider(settings)
expected_url = "https://test-domain.okta.com/oauth2/v1/keys"
assert provider.get_jwks_url() == expected_url
def test_get_issuer(self):
expected_issuer = "https://test-domain.okta.com/oauth2/default"
assert self.provider.get_issuer() == expected_issuer
@@ -83,6 +173,36 @@ class TestOktaProvider:
expected_issuer = "https://prod.okta.com/oauth2/default"
assert provider.get_issuer() == expected_issuer
def test_get_issuer_with_custom_authorization_server_name(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": False,
"authorization_server_name": "my_auth_server_xxxAAA777"
}
)
provider = OktaProvider(settings)
expected_issuer = "https://test-domain.okta.com/oauth2/my_auth_server_xxxAAA777"
assert provider.get_issuer() == expected_issuer
def test_get_issuer_when_using_org_auth_server(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": True,
"authorization_server_name": None
}
)
provider = OktaProvider(settings)
expected_issuer = "https://test-domain.okta.com"
assert provider.get_issuer() == expected_issuer
def test_get_audience(self):
assert self.provider.get_audience() == "test-audience"
@@ -100,3 +220,38 @@ class TestOktaProvider:
def test_get_client_id(self):
assert self.provider.get_client_id() == "test-client-id"
def test_get_required_fields(self):
assert set(self.provider.get_required_fields()) == set(["authorization_server_name", "using_org_auth_server"])
def test_oauth2_base_url(self):
assert self.provider._oauth2_base_url() == "https://test-domain.okta.com/oauth2/default"
def test_oauth2_base_url_with_custom_authorization_server_name(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": False,
"authorization_server_name": "my_auth_server_xxxAAA777"
}
)
provider = OktaProvider(settings)
assert provider._oauth2_base_url() == "https://test-domain.okta.com/oauth2/my_auth_server_xxxAAA777"
def test_oauth2_base_url_when_using_org_auth_server(self):
settings = Oauth2Settings(
provider="okta",
domain="test-domain.okta.com",
client_id="test-client-id",
audience=None,
extra={
"using_org_auth_server": True,
"authorization_server_name": None
}
)
provider = OktaProvider(settings)
assert provider._oauth2_base_url() == "https://test-domain.okta.com/oauth2"

View File

@@ -37,7 +37,8 @@ class TestEnterpriseConfigureCommand(unittest.TestCase):
'audience': 'test_audience',
'domain': 'test.domain.com',
'device_authorization_client_id': 'test_client_id',
'provider': 'workos'
'provider': 'workos',
'extra': {}
}
mock_requests_get.return_value = mock_response
@@ -60,11 +61,12 @@ class TestEnterpriseConfigureCommand(unittest.TestCase):
('oauth2_provider', 'workos'),
('oauth2_audience', 'test_audience'),
('oauth2_client_id', 'test_client_id'),
('oauth2_domain', 'test.domain.com')
('oauth2_domain', 'test.domain.com'),
('oauth2_extra', {})
]
actual_calls = self.mock_settings_command.set.call_args_list
self.assertEqual(len(actual_calls), 5)
self.assertEqual(len(actual_calls), 6)
for i, (key, value) in enumerate(expected_calls):
call_args = actual_calls[i][0]

View File

@@ -0,0 +1,2 @@
"""Tests for CrewAI hooks functionality."""

View File

@@ -0,0 +1,619 @@
"""Tests for crew-scoped hooks within @CrewBase classes."""
from __future__ import annotations
from unittest.mock import Mock
import pytest
from crewai import Agent, Crew
from crewai.hooks import (
LLMCallHookContext,
ToolCallHookContext,
before_llm_call,
before_tool_call,
get_before_llm_call_hooks,
get_before_tool_call_hooks,
)
from crewai.project import CrewBase, agent, crew
@pytest.fixture(autouse=True)
def clear_hooks():
"""Clear global hooks before and after each test."""
from crewai.hooks import llm_hooks, tool_hooks
# Store original hooks
original_before_llm = llm_hooks._before_llm_call_hooks.copy()
original_before_tool = tool_hooks._before_tool_call_hooks.copy()
# Clear hooks
llm_hooks._before_llm_call_hooks.clear()
tool_hooks._before_tool_call_hooks.clear()
yield
# Restore original hooks
llm_hooks._before_llm_call_hooks.clear()
tool_hooks._before_tool_call_hooks.clear()
llm_hooks._before_llm_call_hooks.extend(original_before_llm)
tool_hooks._before_tool_call_hooks.extend(original_before_tool)
class TestCrewScopedHooks:
"""Test hooks defined as methods within @CrewBase classes."""
def test_crew_scoped_hook_is_registered_on_instance_creation(self):
"""Test that crew-scoped hooks are registered when crew instance is created."""
@CrewBase
class TestCrew:
@before_llm_call
def my_hook(self, context):
pass
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Check hooks before instance creation
hooks_before = get_before_llm_call_hooks()
initial_count = len(hooks_before)
# Create instance - should register the hook
crew_instance = TestCrew()
# Check hooks after instance creation
hooks_after = get_before_llm_call_hooks()
# Should have one more hook registered
assert len(hooks_after) == initial_count + 1
def test_crew_scoped_hook_has_access_to_self(self):
"""Test that crew-scoped hooks can access self and instance variables."""
execution_log = []
@CrewBase
class TestCrew:
def __init__(self):
self.crew_name = "TestCrew"
self.call_count = 0
@before_llm_call
def my_hook(self, context):
# Can access self
self.call_count += 1
execution_log.append(f"{self.crew_name}:{self.call_count}")
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Get the registered hook
hooks = get_before_llm_call_hooks()
crew_hook = hooks[-1] # Last registered hook
# Create mock context
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute hook multiple times
crew_hook(context)
crew_hook(context)
# Verify hook accessed self and modified instance state
assert len(execution_log) == 2
assert execution_log[0] == "TestCrew:1"
assert execution_log[1] == "TestCrew:2"
assert crew_instance.call_count == 2
def test_multiple_crews_have_isolated_hooks(self):
"""Test that different crew instances have isolated hooks."""
crew1_executions = []
crew2_executions = []
@CrewBase
class Crew1:
@before_llm_call
def crew1_hook(self, context):
crew1_executions.append("crew1")
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
@CrewBase
class Crew2:
@before_llm_call
def crew2_hook(self, context):
crew2_executions.append("crew2")
@agent
def analyst(self):
return Agent(role="Analyst", goal="Analyze", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create both instances
instance1 = Crew1()
instance2 = Crew2()
# Both hooks should be registered
hooks = get_before_llm_call_hooks()
assert len(hooks) >= 2
# Create mock context
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute all hooks
for hook in hooks:
hook(context)
# Both hooks should have executed
assert "crew1" in crew1_executions
assert "crew2" in crew2_executions
def test_crew_scoped_hook_with_filters(self):
"""Test that filtered crew-scoped hooks work correctly."""
execution_log = []
@CrewBase
class TestCrew:
@before_tool_call(tools=["delete_file"])
def filtered_hook(self, context):
execution_log.append(f"filtered:{context.tool_name}")
return None
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Get registered hooks
hooks = get_before_tool_call_hooks()
crew_hook = hooks[-1] # Last registered
# Test with matching tool
mock_tool = Mock()
context1 = ToolCallHookContext(
tool_name="delete_file", tool_input={}, tool=mock_tool
)
crew_hook(context1)
assert len(execution_log) == 1
assert execution_log[0] == "filtered:delete_file"
# Test with non-matching tool
context2 = ToolCallHookContext(
tool_name="read_file", tool_input={}, tool=mock_tool
)
crew_hook(context2)
# Should still be 1 (filtered hook didn't run)
assert len(execution_log) == 1
def test_crew_scoped_hook_no_double_registration(self):
"""Test that crew-scoped hooks are not registered twice."""
@CrewBase
class TestCrew:
@before_llm_call
def my_hook(self, context):
pass
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Get initial hook count
initial_hooks = len(get_before_llm_call_hooks())
# Create first instance
instance1 = TestCrew()
# Should add 1 hook
hooks_after_first = get_before_llm_call_hooks()
assert len(hooks_after_first) == initial_hooks + 1
# Create second instance
instance2 = TestCrew()
# Should add another hook (one per instance)
hooks_after_second = get_before_llm_call_hooks()
assert len(hooks_after_second) == initial_hooks + 2
def test_crew_scoped_hook_method_signature(self):
"""Test that crew-scoped hooks have correct signature (self + context)."""
@CrewBase
class TestCrew:
def __init__(self):
self.test_value = "test"
@before_llm_call
def my_hook(self, context):
# Should be able to access both self and context
return f"{self.test_value}:{context.iterations}"
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Verify the hook method has is_before_llm_call_hook marker
assert hasattr(crew_instance.my_hook, "__func__")
hook_func = crew_instance.my_hook.__func__
assert hasattr(hook_func, "is_before_llm_call_hook")
assert hook_func.is_before_llm_call_hook is True
def test_crew_scoped_with_agent_filter(self):
"""Test crew-scoped hooks with agent filters."""
execution_log = []
@CrewBase
class TestCrew:
@before_llm_call(agents=["Researcher"])
def filtered_hook(self, context):
execution_log.append(context.agent.role)
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Get hooks
hooks = get_before_llm_call_hooks()
crew_hook = hooks[-1]
# Test with matching agent
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Researcher")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context1 = LLMCallHookContext(executor=mock_executor)
crew_hook(context1)
assert len(execution_log) == 1
assert execution_log[0] == "Researcher"
# Test with non-matching agent
mock_executor.agent.role = "Analyst"
context2 = LLMCallHookContext(executor=mock_executor)
crew_hook(context2)
# Should still be 1 (filtered out)
assert len(execution_log) == 1
class TestCrewScopedHookAttributes:
"""Test that crew-scoped hooks have correct attributes set."""
def test_hook_marker_attribute_is_set(self):
"""Test that decorator sets marker attribute on method."""
@CrewBase
class TestCrew:
@before_llm_call
def my_hook(self, context):
pass
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Check the unbound method has the marker
assert hasattr(TestCrew.__dict__["my_hook"], "is_before_llm_call_hook")
assert TestCrew.__dict__["my_hook"].is_before_llm_call_hook is True
def test_filter_attributes_are_preserved(self):
"""Test that filter attributes are preserved on methods."""
@CrewBase
class TestCrew:
@before_tool_call(tools=["delete_file"], agents=["Dev"])
def filtered_hook(self, context):
return None
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Check filter attributes are set
hook_method = TestCrew.__dict__["filtered_hook"]
assert hasattr(hook_method, "is_before_tool_call_hook")
assert hasattr(hook_method, "_filter_tools")
assert hasattr(hook_method, "_filter_agents")
assert hook_method._filter_tools == ["delete_file"]
assert hook_method._filter_agents == ["Dev"]
def test_registered_hooks_tracked_on_instance(self):
"""Test that registered hooks are tracked on the crew instance."""
@CrewBase
class TestCrew:
@before_llm_call
def llm_hook(self, context):
pass
@before_tool_call
def tool_hook(self, context):
return None
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Check that hooks are tracked
assert hasattr(crew_instance, "_registered_hook_functions")
assert isinstance(crew_instance._registered_hook_functions, list)
assert len(crew_instance._registered_hook_functions) == 2
# Check hook types
hook_types = [ht for ht, _ in crew_instance._registered_hook_functions]
assert "before_llm_call" in hook_types
assert "before_tool_call" in hook_types
class TestCrewScopedHookExecution:
"""Test execution behavior of crew-scoped hooks."""
def test_crew_hook_executes_with_bound_self(self):
"""Test that crew-scoped hook executes with self properly bound."""
execution_log = []
@CrewBase
class TestCrew:
def __init__(self):
self.instance_id = id(self)
@before_llm_call
def my_hook(self, context):
# Should have access to self
execution_log.append(self.instance_id)
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
expected_id = crew_instance.instance_id
# Get and execute hook
hooks = get_before_llm_call_hooks()
crew_hook = hooks[-1]
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute hook
crew_hook(context)
# Verify it had access to self
assert len(execution_log) == 1
assert execution_log[0] == expected_id
def test_crew_hook_can_modify_instance_state(self):
"""Test that crew-scoped hooks can modify instance variables."""
@CrewBase
class TestCrew:
def __init__(self):
self.counter = 0
@before_tool_call
def increment_counter(self, context):
self.counter += 1
return None
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
assert crew_instance.counter == 0
# Get and execute hook
hooks = get_before_tool_call_hooks()
crew_hook = hooks[-1]
mock_tool = Mock()
context = ToolCallHookContext(tool_name="test", tool_input={}, tool=mock_tool)
# Execute hook 3 times
crew_hook(context)
crew_hook(context)
crew_hook(context)
# Verify counter was incremented
assert crew_instance.counter == 3
def test_multiple_instances_maintain_separate_state(self):
"""Test that multiple instances of the same crew maintain separate state."""
@CrewBase
class TestCrew:
def __init__(self):
self.call_count = 0
@before_llm_call
def count_calls(self, context):
self.call_count += 1
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create two instances
instance1 = TestCrew()
instance2 = TestCrew()
# Get all hooks (should include hooks from both instances)
all_hooks = get_before_llm_call_hooks()
# Find hooks for each instance (last 2 registered)
hook1 = all_hooks[-2]
hook2 = all_hooks[-1]
# Create mock context
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute first hook twice
hook1(context)
hook1(context)
# Execute second hook once
hook2(context)
# Each instance should have independent state
# Note: We can't easily verify which hook belongs to which instance
# in this test without more introspection, but the fact that it doesn't
# crash and hooks can maintain state proves isolation works
class TestSignatureDetection:
"""Test that signature detection correctly identifies methods vs functions."""
def test_method_signature_detected(self):
"""Test that methods with 'self' parameter are detected."""
import inspect
@CrewBase
class TestCrew:
@before_llm_call
def method_hook(self, context):
pass
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Check that method has self parameter
method = TestCrew.__dict__["method_hook"]
sig = inspect.signature(method)
params = list(sig.parameters.keys())
assert params[0] == "self"
assert len(params) == 2 # self + context
def test_standalone_function_signature_detected(self):
"""Test that standalone functions without 'self' are detected."""
import inspect
@before_llm_call
def standalone_hook(context):
pass
# Should have only context parameter (no self)
sig = inspect.signature(standalone_hook)
params = list(sig.parameters.keys())
assert "self" not in params
assert len(params) == 1 # Just context
# Should be registered
hooks = get_before_llm_call_hooks()
assert len(hooks) >= 1

View File

@@ -0,0 +1,335 @@
"""Tests for decorator-based hook registration."""
from __future__ import annotations
from unittest.mock import Mock
import pytest
from crewai.hooks import (
after_llm_call,
after_tool_call,
before_llm_call,
before_tool_call,
get_after_llm_call_hooks,
get_after_tool_call_hooks,
get_before_llm_call_hooks,
get_before_tool_call_hooks,
)
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
@pytest.fixture(autouse=True)
def clear_hooks():
"""Clear global hooks before and after each test."""
from crewai.hooks import llm_hooks, tool_hooks
# Store original hooks
original_before_llm = llm_hooks._before_llm_call_hooks.copy()
original_after_llm = llm_hooks._after_llm_call_hooks.copy()
original_before_tool = tool_hooks._before_tool_call_hooks.copy()
original_after_tool = tool_hooks._after_tool_call_hooks.copy()
# Clear hooks
llm_hooks._before_llm_call_hooks.clear()
llm_hooks._after_llm_call_hooks.clear()
tool_hooks._before_tool_call_hooks.clear()
tool_hooks._after_tool_call_hooks.clear()
yield
# Restore original hooks
llm_hooks._before_llm_call_hooks.clear()
llm_hooks._after_llm_call_hooks.clear()
tool_hooks._before_tool_call_hooks.clear()
tool_hooks._after_tool_call_hooks.clear()
llm_hooks._before_llm_call_hooks.extend(original_before_llm)
llm_hooks._after_llm_call_hooks.extend(original_after_llm)
tool_hooks._before_tool_call_hooks.extend(original_before_tool)
tool_hooks._after_tool_call_hooks.extend(original_after_tool)
class TestLLMHookDecorators:
"""Test LLM hook decorators."""
def test_before_llm_call_decorator_registers_hook(self):
"""Test that @before_llm_call decorator registers the hook."""
@before_llm_call
def test_hook(context):
pass
hooks = get_before_llm_call_hooks()
assert len(hooks) == 1
def test_after_llm_call_decorator_registers_hook(self):
"""Test that @after_llm_call decorator registers the hook."""
@after_llm_call
def test_hook(context):
return None
hooks = get_after_llm_call_hooks()
assert len(hooks) == 1
def test_decorated_hook_executes_correctly(self):
"""Test that decorated hook executes and modifies behavior."""
execution_log = []
@before_llm_call
def test_hook(context):
execution_log.append("executed")
# Create mock context
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute the hook
hooks = get_before_llm_call_hooks()
hooks[0](context)
assert len(execution_log) == 1
assert execution_log[0] == "executed"
def test_before_llm_call_with_agent_filter(self):
"""Test that agent filter works correctly."""
execution_log = []
@before_llm_call(agents=["Researcher"])
def filtered_hook(context):
execution_log.append(context.agent.role)
hooks = get_before_llm_call_hooks()
assert len(hooks) == 1
# Test with matching agent
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Researcher")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
hooks[0](context)
assert len(execution_log) == 1
assert execution_log[0] == "Researcher"
# Test with non-matching agent
mock_executor.agent.role = "Analyst"
context2 = LLMCallHookContext(executor=mock_executor)
hooks[0](context2)
# Should still be 1 (hook didn't execute)
assert len(execution_log) == 1
class TestToolHookDecorators:
"""Test tool hook decorators."""
def test_before_tool_call_decorator_registers_hook(self):
"""Test that @before_tool_call decorator registers the hook."""
@before_tool_call
def test_hook(context):
return None
hooks = get_before_tool_call_hooks()
assert len(hooks) == 1
def test_after_tool_call_decorator_registers_hook(self):
"""Test that @after_tool_call decorator registers the hook."""
@after_tool_call
def test_hook(context):
return None
hooks = get_after_tool_call_hooks()
assert len(hooks) == 1
def test_before_tool_call_with_tool_filter(self):
"""Test that tool filter works correctly."""
execution_log = []
@before_tool_call(tools=["delete_file", "execute_code"])
def filtered_hook(context):
execution_log.append(context.tool_name)
return None
hooks = get_before_tool_call_hooks()
assert len(hooks) == 1
# Test with matching tool
mock_tool = Mock()
context = ToolCallHookContext(
tool_name="delete_file",
tool_input={},
tool=mock_tool,
)
hooks[0](context)
assert len(execution_log) == 1
assert execution_log[0] == "delete_file"
# Test with non-matching tool
context2 = ToolCallHookContext(
tool_name="read_file",
tool_input={},
tool=mock_tool,
)
hooks[0](context2)
# Should still be 1 (hook didn't execute for read_file)
assert len(execution_log) == 1
def test_before_tool_call_with_combined_filters(self):
"""Test that combined tool and agent filters work."""
execution_log = []
@before_tool_call(tools=["write_file"], agents=["Developer"])
def filtered_hook(context):
execution_log.append(f"{context.tool_name}-{context.agent.role}")
return None
hooks = get_before_tool_call_hooks()
mock_tool = Mock()
mock_agent = Mock(role="Developer")
# Test with both matching
context = ToolCallHookContext(
tool_name="write_file",
tool_input={},
tool=mock_tool,
agent=mock_agent,
)
hooks[0](context)
assert len(execution_log) == 1
assert execution_log[0] == "write_file-Developer"
# Test with tool matching but agent not
mock_agent.role = "Researcher"
context2 = ToolCallHookContext(
tool_name="write_file",
tool_input={},
tool=mock_tool,
agent=mock_agent,
)
hooks[0](context2)
# Should still be 1 (hook didn't execute)
assert len(execution_log) == 1
def test_after_tool_call_with_filter(self):
"""Test that after_tool_call decorator with filter works."""
@after_tool_call(tools=["web_search"])
def filtered_hook(context):
if context.tool_result:
return context.tool_result.upper()
return None
hooks = get_after_tool_call_hooks()
mock_tool = Mock()
# Test with matching tool
context = ToolCallHookContext(
tool_name="web_search",
tool_input={},
tool=mock_tool,
tool_result="result",
)
result = hooks[0](context)
assert result == "RESULT"
# Test with non-matching tool
context2 = ToolCallHookContext(
tool_name="other_tool",
tool_input={},
tool=mock_tool,
tool_result="result",
)
result2 = hooks[0](context2)
assert result2 is None # Hook didn't run, returns None
class TestDecoratorAttributes:
"""Test that decorators set proper attributes on functions."""
def test_before_llm_call_sets_attribute(self):
"""Test that decorator sets is_before_llm_call_hook attribute."""
@before_llm_call
def test_hook(context):
pass
assert hasattr(test_hook, "is_before_llm_call_hook")
assert test_hook.is_before_llm_call_hook is True
def test_before_tool_call_sets_attributes_with_filters(self):
"""Test that decorator with filters sets filter attributes."""
@before_tool_call(tools=["delete_file"], agents=["Dev"])
def test_hook(context):
return None
assert hasattr(test_hook, "is_before_tool_call_hook")
assert test_hook.is_before_tool_call_hook is True
assert hasattr(test_hook, "_filter_tools")
assert test_hook._filter_tools == ["delete_file"]
assert hasattr(test_hook, "_filter_agents")
assert test_hook._filter_agents == ["Dev"]
class TestMultipleDecorators:
"""Test using multiple decorators together."""
def test_multiple_decorators_all_register(self):
"""Test that multiple decorated functions all register."""
@before_llm_call
def hook1(context):
pass
@before_llm_call
def hook2(context):
pass
@after_llm_call
def hook3(context):
return None
before_hooks = get_before_llm_call_hooks()
after_hooks = get_after_llm_call_hooks()
assert len(before_hooks) == 2
assert len(after_hooks) == 1
def test_decorator_and_manual_registration_work_together(self):
"""Test that decorators and manual registration can be mixed."""
from crewai.hooks import register_before_tool_call_hook
@before_tool_call
def decorated_hook(context):
return None
def manual_hook(context):
return None
register_before_tool_call_hook(manual_hook)
hooks = get_before_tool_call_hooks()
assert len(hooks) == 2

View File

@@ -0,0 +1,395 @@
"""Tests for human approval functionality in hooks."""
from __future__ import annotations
from unittest.mock import Mock, patch
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
import pytest
@pytest.fixture
def mock_executor():
"""Create a mock executor for LLM hook context."""
executor = Mock()
executor.messages = [{"role": "system", "content": "Test message"}]
executor.agent = Mock(role="Test Agent")
executor.task = Mock(description="Test Task")
executor.crew = Mock()
executor.llm = Mock()
executor.iterations = 0
return executor
@pytest.fixture
def mock_tool():
"""Create a mock tool for tool hook context."""
tool = Mock()
tool.name = "test_tool"
tool.description = "Test tool description"
return tool
@pytest.fixture
def mock_agent():
"""Create a mock agent."""
agent = Mock()
agent.role = "Test Agent"
return agent
@pytest.fixture
def mock_task():
"""Create a mock task."""
task = Mock()
task.description = "Test task"
return task
class TestLLMHookHumanInput:
"""Test request_human_input() on LLMCallHookContext."""
@patch("builtins.input", return_value="test response")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_returns_user_response(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that request_human_input returns the user's input."""
# Setup mock formatter
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
response = context.request_human_input(
prompt="Test prompt", default_message="Test default message"
)
assert response == "test response"
mock_input.assert_called_once()
@patch("builtins.input", return_value="")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_returns_empty_string_on_enter(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that pressing Enter returns empty string."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
response = context.request_human_input(prompt="Test")
assert response == ""
mock_input.assert_called_once()
@patch("builtins.input", return_value="test")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_pauses_and_resumes_live_updates(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that live updates are paused and resumed."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
context.request_human_input(prompt="Test")
# Verify pause was called
mock_formatter.pause_live_updates.assert_called_once()
# Verify resume was called
mock_formatter.resume_live_updates.assert_called_once()
@patch("builtins.input", side_effect=Exception("Input error"))
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_resumes_on_exception(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that live updates are resumed even if input raises exception."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
with pytest.raises(Exception, match="Input error"):
context.request_human_input(prompt="Test")
# Verify resume was still called (in finally block)
mock_formatter.resume_live_updates.assert_called_once()
@patch("builtins.input", return_value=" test response ")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_strips_whitespace(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that user input is stripped of leading/trailing whitespace."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
response = context.request_human_input(prompt="Test")
assert response == "test response" # Whitespace stripped
class TestToolHookHumanInput:
"""Test request_human_input() on ToolCallHookContext."""
@patch("builtins.input", return_value="approve")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_request_human_input_returns_user_response(
self, mock_event_listener, mock_input, mock_tool, mock_agent, mock_task
):
"""Test that request_human_input returns the user's input."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={"arg": "value"},
tool=mock_tool,
agent=mock_agent,
task=mock_task,
)
response = context.request_human_input(
prompt="Approve this tool?", default_message="Type 'approve':"
)
assert response == "approve"
mock_input.assert_called_once()
@patch("builtins.input", return_value="")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_request_human_input_handles_empty_input(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that empty input (Enter key) is handled correctly."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
response = context.request_human_input(prompt="Test")
assert response == ""
@patch("builtins.input", return_value="test")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_request_human_input_pauses_and_resumes(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that live updates are properly paused and resumed."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
context.request_human_input(prompt="Test")
mock_formatter.pause_live_updates.assert_called_once()
mock_formatter.resume_live_updates.assert_called_once()
@patch("builtins.input", side_effect=KeyboardInterrupt)
@patch("crewai.hooks.tool_hooks.event_listener")
def test_request_human_input_resumes_on_keyboard_interrupt(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that live updates are resumed even on keyboard interrupt."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
with pytest.raises(KeyboardInterrupt):
context.request_human_input(prompt="Test")
# Verify resume was still called (in finally block)
mock_formatter.resume_live_updates.assert_called_once()
class TestApprovalHookIntegration:
"""Test integration scenarios with approval hooks."""
@patch("builtins.input", return_value="approve")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_approval_hook_allows_execution(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that approval hook allows execution when approved."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
def approval_hook(context: ToolCallHookContext) -> bool | None:
response = context.request_human_input(
prompt="Approve?", default_message="Type 'approve':"
)
return None if response == "approve" else False
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
result = approval_hook(context)
assert result is None # Allowed
assert mock_input.called
@patch("builtins.input", return_value="deny")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_approval_hook_blocks_execution(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that approval hook blocks execution when denied."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
def approval_hook(context: ToolCallHookContext) -> bool | None:
response = context.request_human_input(
prompt="Approve?", default_message="Type 'approve':"
)
return None if response == "approve" else False
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
result = approval_hook(context)
assert result is False # Blocked
assert mock_input.called
@patch("builtins.input", return_value="modified result")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_review_hook_modifies_result(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that review hook can modify tool results."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
def review_hook(context: ToolCallHookContext) -> str | None:
response = context.request_human_input(
prompt="Review result",
default_message="Press Enter to keep, or provide modified version:",
)
return response if response else None
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
tool_result="original result",
)
modified_result = review_hook(context)
assert modified_result == "modified result"
assert mock_input.called
@patch("builtins.input", return_value="")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_review_hook_keeps_original_on_enter(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that pressing Enter keeps original result."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
def review_hook(context: ToolCallHookContext) -> str | None:
response = context.request_human_input(
prompt="Review result", default_message="Press Enter to keep:"
)
return response if response else None
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
tool_result="original result",
)
modified_result = review_hook(context)
assert modified_result is None # Keep original
class TestCostControlApproval:
"""Test cost control approval hook scenarios."""
@patch("builtins.input", return_value="yes")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_cost_control_allows_when_approved(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that expensive calls are allowed when approved."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
# Set high iteration count
mock_executor.iterations = 10
def cost_control_hook(context: LLMCallHookContext) -> None:
if context.iterations > 5:
response = context.request_human_input(
prompt=f"Iteration {context.iterations} - expensive call",
default_message="Type 'yes' to continue:",
)
if response.lower() != "yes":
print("Call blocked")
context = LLMCallHookContext(executor=mock_executor)
# Should not raise exception and should call input
cost_control_hook(context)
assert mock_input.called
@patch("builtins.input", return_value="no")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_cost_control_logs_when_denied(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that denied calls are logged."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
mock_executor.iterations = 10
messages_logged = []
def cost_control_hook(context: LLMCallHookContext) -> None:
if context.iterations > 5:
response = context.request_human_input(
prompt=f"Iteration {context.iterations}",
default_message="Type 'yes' to continue:",
)
if response.lower() != "yes":
messages_logged.append("blocked")
context = LLMCallHookContext(executor=mock_executor)
cost_control_hook(context)
assert len(messages_logged) == 1
assert messages_logged[0] == "blocked"

View File

@@ -0,0 +1,311 @@
"""Unit tests for LLM hooks functionality."""
from __future__ import annotations
from unittest.mock import Mock
from crewai.hooks import clear_all_llm_call_hooks, unregister_after_llm_call_hook, unregister_before_llm_call_hook
import pytest
from crewai.hooks.llm_hooks import (
LLMCallHookContext,
get_after_llm_call_hooks,
get_before_llm_call_hooks,
register_after_llm_call_hook,
register_before_llm_call_hook,
)
@pytest.fixture
def mock_executor():
"""Create a mock executor for testing."""
executor = Mock()
executor.messages = [{"role": "system", "content": "Test message"}]
executor.agent = Mock(role="Test Agent")
executor.task = Mock(description="Test Task")
executor.crew = Mock()
executor.llm = Mock()
executor.iterations = 0
return executor
@pytest.fixture(autouse=True)
def clear_hooks():
"""Clear global hooks before and after each test."""
# Import the private variables to clear them
from crewai.hooks import llm_hooks
# Store original hooks
original_before = llm_hooks._before_llm_call_hooks.copy()
original_after = llm_hooks._after_llm_call_hooks.copy()
# Clear hooks
llm_hooks._before_llm_call_hooks.clear()
llm_hooks._after_llm_call_hooks.clear()
yield
# Restore original hooks
llm_hooks._before_llm_call_hooks.clear()
llm_hooks._after_llm_call_hooks.clear()
llm_hooks._before_llm_call_hooks.extend(original_before)
llm_hooks._after_llm_call_hooks.extend(original_after)
class TestLLMCallHookContext:
"""Test LLMCallHookContext initialization and attributes."""
def test_context_initialization(self, mock_executor):
"""Test that context is initialized correctly with executor."""
context = LLMCallHookContext(executor=mock_executor)
assert context.executor == mock_executor
assert context.messages == mock_executor.messages
assert context.agent == mock_executor.agent
assert context.task == mock_executor.task
assert context.crew == mock_executor.crew
assert context.llm == mock_executor.llm
assert context.iterations == mock_executor.iterations
assert context.response is None
def test_context_with_response(self, mock_executor):
"""Test that context includes response when provided."""
test_response = "Test LLM response"
context = LLMCallHookContext(executor=mock_executor, response=test_response)
assert context.response == test_response
def test_messages_are_mutable_reference(self, mock_executor):
"""Test that modifying context.messages modifies executor.messages."""
context = LLMCallHookContext(executor=mock_executor)
# Add a message through context
new_message = {"role": "user", "content": "New message"}
context.messages.append(new_message)
# Check that executor.messages is also modified
assert new_message in mock_executor.messages
assert len(mock_executor.messages) == 2
class TestBeforeLLMCallHooks:
"""Test before_llm_call hook registration and execution."""
def test_register_before_hook(self):
"""Test that before hooks are registered correctly."""
def test_hook(context):
pass
register_before_llm_call_hook(test_hook)
hooks = get_before_llm_call_hooks()
assert len(hooks) == 1
assert hooks[0] == test_hook
def test_multiple_before_hooks(self):
"""Test that multiple before hooks can be registered."""
def hook1(context):
pass
def hook2(context):
pass
register_before_llm_call_hook(hook1)
register_before_llm_call_hook(hook2)
hooks = get_before_llm_call_hooks()
assert len(hooks) == 2
assert hook1 in hooks
assert hook2 in hooks
def test_before_hook_can_modify_messages(self, mock_executor):
"""Test that before hooks can modify messages in-place."""
def add_message_hook(context):
context.messages.append({"role": "system", "content": "Added by hook"})
context = LLMCallHookContext(executor=mock_executor)
add_message_hook(context)
assert len(context.messages) == 2
assert context.messages[1]["content"] == "Added by hook"
def test_get_before_hooks_returns_copy(self):
"""Test that get_before_llm_call_hooks returns a copy."""
def test_hook(context):
pass
register_before_llm_call_hook(test_hook)
hooks1 = get_before_llm_call_hooks()
hooks2 = get_before_llm_call_hooks()
# They should be equal but not the same object
assert hooks1 == hooks2
assert hooks1 is not hooks2
class TestAfterLLMCallHooks:
"""Test after_llm_call hook registration and execution."""
def test_register_after_hook(self):
"""Test that after hooks are registered correctly."""
def test_hook(context):
return None
register_after_llm_call_hook(test_hook)
hooks = get_after_llm_call_hooks()
assert len(hooks) == 1
assert hooks[0] == test_hook
def test_multiple_after_hooks(self):
"""Test that multiple after hooks can be registered."""
def hook1(context):
return None
def hook2(context):
return None
register_after_llm_call_hook(hook1)
register_after_llm_call_hook(hook2)
hooks = get_after_llm_call_hooks()
assert len(hooks) == 2
assert hook1 in hooks
assert hook2 in hooks
def test_after_hook_can_modify_response(self, mock_executor):
"""Test that after hooks can modify the response."""
original_response = "Original response"
def modify_response_hook(context):
if context.response:
return context.response.replace("Original", "Modified")
return None
context = LLMCallHookContext(executor=mock_executor, response=original_response)
modified = modify_response_hook(context)
assert modified == "Modified response"
def test_after_hook_returns_none_keeps_original(self, mock_executor):
"""Test that returning None keeps the original response."""
original_response = "Original response"
def no_change_hook(context):
return None
context = LLMCallHookContext(executor=mock_executor, response=original_response)
result = no_change_hook(context)
assert result is None
assert context.response == original_response
def test_get_after_hooks_returns_copy(self):
"""Test that get_after_llm_call_hooks returns a copy."""
def test_hook(context):
return None
register_after_llm_call_hook(test_hook)
hooks1 = get_after_llm_call_hooks()
hooks2 = get_after_llm_call_hooks()
# They should be equal but not the same object
assert hooks1 == hooks2
assert hooks1 is not hooks2
class TestLLMHooksIntegration:
"""Test integration scenarios with multiple hooks."""
def test_multiple_before_hooks_execute_in_order(self, mock_executor):
"""Test that multiple before hooks execute in registration order."""
execution_order = []
def hook1(context):
execution_order.append(1)
def hook2(context):
execution_order.append(2)
def hook3(context):
execution_order.append(3)
register_before_llm_call_hook(hook1)
register_before_llm_call_hook(hook2)
register_before_llm_call_hook(hook3)
context = LLMCallHookContext(executor=mock_executor)
hooks = get_before_llm_call_hooks()
for hook in hooks:
hook(context)
assert execution_order == [1, 2, 3]
def test_multiple_after_hooks_chain_modifications(self, mock_executor):
"""Test that multiple after hooks can chain modifications."""
def hook1(context):
if context.response:
return context.response + " [hook1]"
return None
def hook2(context):
if context.response:
return context.response + " [hook2]"
return None
register_after_llm_call_hook(hook1)
register_after_llm_call_hook(hook2)
context = LLMCallHookContext(executor=mock_executor, response="Original")
hooks = get_after_llm_call_hooks()
# Simulate chaining (how it would be used in practice)
result = context.response
for hook in hooks:
# Update context for next hook
context.response = result
modified = hook(context)
if modified is not None:
result = modified
assert result == "Original [hook1] [hook2]"
def test_unregister_before_hook(self):
"""Test that before hooks can be unregistered."""
def test_hook(context):
pass
register_before_llm_call_hook(test_hook)
unregister_before_llm_call_hook(test_hook)
hooks = get_before_llm_call_hooks()
assert len(hooks) == 0
def test_unregister_after_hook(self):
"""Test that after hooks can be unregistered."""
def test_hook(context):
return None
register_after_llm_call_hook(test_hook)
unregister_after_llm_call_hook(test_hook)
hooks = get_after_llm_call_hooks()
assert len(hooks) == 0
def test_clear_all_llm_call_hooks(self):
"""Test that all llm call hooks can be cleared."""
def test_hook(context):
pass
register_before_llm_call_hook(test_hook)
register_after_llm_call_hook(test_hook)
clear_all_llm_call_hooks()
hooks = get_before_llm_call_hooks()
assert len(hooks) == 0

View File

@@ -0,0 +1,498 @@
from __future__ import annotations
from unittest.mock import Mock
from crewai.hooks import clear_all_tool_call_hooks, unregister_after_tool_call_hook, unregister_before_tool_call_hook
import pytest
from crewai.hooks.tool_hooks import (
ToolCallHookContext,
get_after_tool_call_hooks,
get_before_tool_call_hooks,
register_after_tool_call_hook,
register_before_tool_call_hook,
)
@pytest.fixture
def mock_tool():
"""Create a mock tool for testing."""
tool = Mock()
tool.name = "test_tool"
tool.description = "Test tool description"
return tool
@pytest.fixture
def mock_agent():
"""Create a mock agent for testing."""
agent = Mock()
agent.role = "Test Agent"
return agent
@pytest.fixture
def mock_task():
"""Create a mock task for testing."""
task = Mock()
task.description = "Test task"
return task
@pytest.fixture
def mock_crew():
"""Create a mock crew for testing."""
crew = Mock()
return crew
@pytest.fixture(autouse=True)
def clear_hooks():
"""Clear global hooks before and after each test."""
from crewai.hooks import tool_hooks
# Store original hooks
original_before = tool_hooks._before_tool_call_hooks.copy()
original_after = tool_hooks._after_tool_call_hooks.copy()
# Clear hooks
tool_hooks._before_tool_call_hooks.clear()
tool_hooks._after_tool_call_hooks.clear()
yield
# Restore original hooks
tool_hooks._before_tool_call_hooks.clear()
tool_hooks._after_tool_call_hooks.clear()
tool_hooks._before_tool_call_hooks.extend(original_before)
tool_hooks._after_tool_call_hooks.extend(original_after)
class TestToolCallHookContext:
"""Test ToolCallHookContext initialization and attributes."""
def test_context_initialization(self, mock_tool, mock_agent, mock_task, mock_crew):
"""Test that context is initialized correctly."""
tool_input = {"arg1": "value1", "arg2": "value2"}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
agent=mock_agent,
task=mock_task,
crew=mock_crew,
)
assert context.tool_name == "test_tool"
assert context.tool_input == tool_input
assert context.tool == mock_tool
assert context.agent == mock_agent
assert context.task == mock_task
assert context.crew == mock_crew
assert context.tool_result is None
def test_context_with_result(self, mock_tool):
"""Test that context includes result when provided."""
tool_input = {"arg1": "value1"}
tool_result = "Test tool result"
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
tool_result=tool_result,
)
assert context.tool_result == tool_result
def test_tool_input_is_mutable_reference(self, mock_tool):
"""Test that modifying context.tool_input modifies the original dict."""
tool_input = {"arg1": "value1"}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
)
# Modify through context
context.tool_input["arg2"] = "value2"
# Check that original dict is also modified
assert "arg2" in tool_input
assert tool_input["arg2"] == "value2"
class TestBeforeToolCallHooks:
"""Test before_tool_call hook registration and execution."""
def test_register_before_hook(self):
"""Test that before hooks are registered correctly."""
def test_hook(context):
return None
register_before_tool_call_hook(test_hook)
hooks = get_before_tool_call_hooks()
assert len(hooks) == 1
assert hooks[0] == test_hook
def test_multiple_before_hooks(self):
"""Test that multiple before hooks can be registered."""
def hook1(context):
return None
def hook2(context):
return None
register_before_tool_call_hook(hook1)
register_before_tool_call_hook(hook2)
hooks = get_before_tool_call_hooks()
assert len(hooks) == 2
assert hook1 in hooks
assert hook2 in hooks
def test_before_hook_can_block_execution(self, mock_tool):
"""Test that before hooks can block tool execution."""
def block_hook(context):
if context.tool_name == "dangerous_tool":
return False # Block execution
return None # Allow execution
tool_input = {}
context = ToolCallHookContext(
tool_name="dangerous_tool",
tool_input=tool_input,
tool=mock_tool,
)
result = block_hook(context)
assert result is False
def test_before_hook_can_allow_execution(self, mock_tool):
"""Test that before hooks can explicitly allow execution."""
def allow_hook(context):
return None # Allow execution
tool_input = {}
context = ToolCallHookContext(
tool_name="safe_tool",
tool_input=tool_input,
tool=mock_tool,
)
result = allow_hook(context)
assert result is None
def test_before_hook_can_modify_input(self, mock_tool):
"""Test that before hooks can modify tool input in-place."""
def modify_input_hook(context):
context.tool_input["modified_by_hook"] = True
return None
tool_input = {"arg1": "value1"}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
)
modify_input_hook(context)
assert "modified_by_hook" in context.tool_input
assert context.tool_input["modified_by_hook"] is True
def test_get_before_hooks_returns_copy(self):
"""Test that get_before_tool_call_hooks returns a copy."""
def test_hook(context):
return None
register_before_tool_call_hook(test_hook)
hooks1 = get_before_tool_call_hooks()
hooks2 = get_before_tool_call_hooks()
# They should be equal but not the same object
assert hooks1 == hooks2
assert hooks1 is not hooks2
class TestAfterToolCallHooks:
"""Test after_tool_call hook registration and execution."""
def test_register_after_hook(self):
"""Test that after hooks are registered correctly."""
def test_hook(context):
return None
register_after_tool_call_hook(test_hook)
hooks = get_after_tool_call_hooks()
assert len(hooks) == 1
assert hooks[0] == test_hook
def test_multiple_after_hooks(self):
"""Test that multiple after hooks can be registered."""
def hook1(context):
return None
def hook2(context):
return None
register_after_tool_call_hook(hook1)
register_after_tool_call_hook(hook2)
hooks = get_after_tool_call_hooks()
assert len(hooks) == 2
assert hook1 in hooks
assert hook2 in hooks
def test_after_hook_can_modify_result(self, mock_tool):
"""Test that after hooks can modify the tool result."""
original_result = "Original result"
def modify_result_hook(context):
if context.tool_result:
return context.tool_result.replace("Original", "Modified")
return None
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
tool_result=original_result,
)
modified = modify_result_hook(context)
assert modified == "Modified result"
def test_after_hook_returns_none_keeps_original(self, mock_tool):
"""Test that returning None keeps the original result."""
original_result = "Original result"
def no_change_hook(context):
return None
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
tool_result=original_result,
)
result = no_change_hook(context)
assert result is None
assert context.tool_result == original_result
def test_get_after_hooks_returns_copy(self):
"""Test that get_after_tool_call_hooks returns a copy."""
def test_hook(context):
return None
register_after_tool_call_hook(test_hook)
hooks1 = get_after_tool_call_hooks()
hooks2 = get_after_tool_call_hooks()
# They should be equal but not the same object
assert hooks1 == hooks2
assert hooks1 is not hooks2
class TestToolHooksIntegration:
"""Test integration scenarios with multiple hooks."""
def test_multiple_before_hooks_execute_in_order(self, mock_tool):
"""Test that multiple before hooks execute in registration order."""
execution_order = []
def hook1(context):
execution_order.append(1)
return None
def hook2(context):
execution_order.append(2)
return None
def hook3(context):
execution_order.append(3)
return None
register_before_tool_call_hook(hook1)
register_before_tool_call_hook(hook2)
register_before_tool_call_hook(hook3)
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
)
hooks = get_before_tool_call_hooks()
for hook in hooks:
hook(context)
assert execution_order == [1, 2, 3]
def test_first_blocking_hook_stops_execution(self, mock_tool):
"""Test that first hook returning False blocks execution."""
execution_order = []
def hook1(context):
execution_order.append(1)
return None # Allow
def hook2(context):
execution_order.append(2)
return False # Block
def hook3(context):
execution_order.append(3)
return None # This shouldn't run
register_before_tool_call_hook(hook1)
register_before_tool_call_hook(hook2)
register_before_tool_call_hook(hook3)
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
)
hooks = get_before_tool_call_hooks()
blocked = False
for hook in hooks:
result = hook(context)
if result is False:
blocked = True
break
assert blocked is True
assert execution_order == [1, 2] # hook3 didn't run
def test_multiple_after_hooks_chain_modifications(self, mock_tool):
"""Test that multiple after hooks can chain modifications."""
def hook1(context):
if context.tool_result:
return context.tool_result + " [hook1]"
return None
def hook2(context):
if context.tool_result:
return context.tool_result + " [hook2]"
return None
register_after_tool_call_hook(hook1)
register_after_tool_call_hook(hook2)
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
tool_result="Original",
)
hooks = get_after_tool_call_hooks()
# Simulate chaining (how it would be used in practice)
result = context.tool_result
for hook in hooks:
# Update context for next hook
context.tool_result = result
modified = hook(context)
if modified is not None:
result = modified
assert result == "Original [hook1] [hook2]"
def test_hooks_with_validation_and_sanitization(self, mock_tool):
"""Test a realistic scenario with validation and sanitization hooks."""
# Validation hook (before)
def validate_file_path(context):
if context.tool_name == "write_file":
file_path = context.tool_input.get("file_path", "")
if ".env" in file_path:
return False # Block sensitive files
return None
# Sanitization hook (after)
def sanitize_secrets(context):
if context.tool_result and "SECRET_KEY" in context.tool_result:
return context.tool_result.replace("SECRET_KEY=abc123", "SECRET_KEY=[REDACTED]")
return None
register_before_tool_call_hook(validate_file_path)
register_after_tool_call_hook(sanitize_secrets)
# Test blocking
blocked_context = ToolCallHookContext(
tool_name="write_file",
tool_input={"file_path": ".env"},
tool=mock_tool,
)
before_hooks = get_before_tool_call_hooks()
blocked = False
for hook in before_hooks:
if hook(blocked_context) is False:
blocked = True
break
assert blocked is True
# Test sanitization
sanitize_context = ToolCallHookContext(
tool_name="read_file",
tool_input={"file_path": "config.txt"},
tool=mock_tool,
tool_result="Content: SECRET_KEY=abc123",
)
after_hooks = get_after_tool_call_hooks()
result = sanitize_context.tool_result
for hook in after_hooks:
sanitize_context.tool_result = result
modified = hook(sanitize_context)
if modified is not None:
result = modified
assert "SECRET_KEY=[REDACTED]" in result
assert "abc123" not in result
def test_unregister_before_hook(self):
"""Test that before hooks can be unregistered."""
def test_hook(context):
pass
register_before_tool_call_hook(test_hook)
unregister_before_tool_call_hook(test_hook)
hooks = get_before_tool_call_hooks()
assert len(hooks) == 0
def test_unregister_after_hook(self):
"""Test that after hooks can be unregistered."""
def test_hook(context):
return None
register_after_tool_call_hook(test_hook)
unregister_after_tool_call_hook(test_hook)
hooks = get_after_tool_call_hooks()
assert len(hooks) == 0
def test_clear_all_tool_call_hooks(self):
"""Test that all tool call hooks can be cleared."""
def test_hook(context):
pass
register_before_tool_call_hook(test_hook)
register_after_tool_call_hook(test_hook)
clear_all_tool_call_hooks()
hooks = get_before_tool_call_hooks()
assert len(hooks) == 0

View File

@@ -340,7 +340,7 @@ def test_sync_task_execution(researcher, writer):
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
@@ -412,7 +412,7 @@ def test_manager_agent_delegating_to_assigned_task_agent(researcher, writer):
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
@@ -513,7 +513,7 @@ def test_manager_agent_delegates_with_varied_role_cases():
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
task.output = mock_task_output
@@ -611,7 +611,7 @@ def test_crew_with_delegating_agents_should_not_override_task_tools(ceo, writer)
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
@@ -669,7 +669,7 @@ def test_crew_with_delegating_agents_should_not_override_agent_tools(ceo, writer
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
@@ -788,7 +788,7 @@ def test_task_tools_override_agent_tools_with_allow_delegation(researcher, write
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# We mock execute_sync to verify which tools get used at runtime
@@ -1225,7 +1225,7 @@ async def test_async_task_execution_call_count(researcher, writer):
# Create a valid TaskOutput instance to mock the return value
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# Create a MagicMock Future instance
@@ -1784,7 +1784,7 @@ def test_hierarchical_kickoff_usage_metrics_include_manager(researcher):
Task,
"execute_sync",
return_value=TaskOutput(
description="dummy", raw="Hello", agent=researcher.role
description="dummy", raw="Hello", agent=researcher.role, messages=[]
),
):
crew.kickoff()
@@ -1828,7 +1828,7 @@ def test_hierarchical_crew_creation_tasks_with_agents(researcher, writer):
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
@@ -1881,7 +1881,7 @@ def test_hierarchical_crew_creation_tasks_with_async_execution(researcher, write
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# Create a mock Future that returns our TaskOutput
@@ -2246,11 +2246,13 @@ def test_conditional_task_uses_last_output(researcher, writer):
description="First task output",
raw="First success output", # Will be used by third task's condition
agent=researcher.role,
messages=[],
)
mock_third = TaskOutput(
description="Third task output",
raw="Third task executed", # Output when condition succeeds using first task output
agent=writer.role,
messages=[],
)
# Set up mocks for task execution and conditional logic
@@ -2318,11 +2320,13 @@ def test_conditional_tasks_result_collection(researcher, writer):
description="Success output",
raw="Success output", # Triggers third task's condition
agent=researcher.role,
messages=[],
)
mock_conditional = TaskOutput(
description="Conditional output",
raw="Conditional task executed",
agent=writer.role,
messages=[],
)
# Set up mocks for task execution and conditional logic
@@ -2399,6 +2403,7 @@ def test_multiple_conditional_tasks(researcher, writer):
description="Mock success",
raw="Success and proceed output",
agent=researcher.role,
messages=[],
)
# Set up mocks for task execution
@@ -2806,7 +2811,7 @@ def test_manager_agent(researcher, writer):
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# Because we are mocking execute_sync, we never hit the underlying _execute_core
@@ -3001,6 +3006,7 @@ def test_replay_feature(researcher, writer):
output_format=OutputFormat.RAW,
pydantic=None,
summary="Mocked output for list of ideas",
messages=[],
)
crew.kickoff()
@@ -3052,6 +3058,7 @@ def test_crew_task_db_init():
output_format=OutputFormat.RAW,
pydantic=None,
summary="Write about AI in healthcare...",
messages=[],
)
crew.kickoff()
@@ -3114,6 +3121,7 @@ def test_replay_task_with_context():
output_format=OutputFormat.RAW,
pydantic=None,
summary="Detailed report on AI advancements...",
messages=[],
)
mock_task_output2 = TaskOutput(
description="Summarize the AI advancements report.",
@@ -3123,6 +3131,7 @@ def test_replay_task_with_context():
output_format=OutputFormat.RAW,
pydantic=None,
summary="Summary of the AI advancements report...",
messages=[],
)
mock_task_output3 = TaskOutput(
description="Write an article based on the AI advancements summary.",
@@ -3132,6 +3141,7 @@ def test_replay_task_with_context():
output_format=OutputFormat.RAW,
pydantic=None,
summary="Article on AI advancements...",
messages=[],
)
mock_task_output4 = TaskOutput(
description="Create a presentation based on the AI advancements article.",
@@ -3141,6 +3151,7 @@ def test_replay_task_with_context():
output_format=OutputFormat.RAW,
pydantic=None,
summary="Presentation on AI advancements...",
messages=[],
)
with patch.object(Task, "execute_sync") as mock_execute_task:
@@ -3164,6 +3175,70 @@ def test_replay_task_with_context():
db_handler.reset()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_replay_preserves_messages():
"""Test that replay preserves messages from stored task outputs."""
from crewai.utilities.types import LLMMessage
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
allow_delegation=False,
)
task = Task(
description="Say hello",
expected_output="A greeting",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], process=Process.sequential)
mock_messages: list[LLMMessage] = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Say hello"},
{"role": "assistant", "content": "Hello!"},
]
mock_task_output = TaskOutput(
description="Say hello",
raw="Hello!",
agent="Test Agent",
messages=mock_messages,
)
with patch.object(Task, "execute_sync", return_value=mock_task_output):
crew.kickoff()
# Verify the task output was stored with messages
db_handler = TaskOutputStorageHandler()
stored_outputs = db_handler.load()
assert stored_outputs is not None
assert len(stored_outputs) > 0
# Verify messages are in the stored output
stored_output = stored_outputs[0]["output"]
assert "messages" in stored_output
assert len(stored_output["messages"]) == 3
assert stored_output["messages"][0]["role"] == "system"
assert stored_output["messages"][1]["role"] == "user"
assert stored_output["messages"][2]["role"] == "assistant"
# Replay the task and verify messages are preserved
with patch.object(Task, "execute_sync", return_value=mock_task_output):
replayed_output = crew.replay(str(task.id))
# Verify the replayed task output has messages
assert len(replayed_output.tasks_output) > 0
replayed_task_output = replayed_output.tasks_output[0]
assert hasattr(replayed_task_output, "messages")
assert isinstance(replayed_task_output.messages, list)
assert len(replayed_task_output.messages) == 3
db_handler.reset()
@pytest.mark.vcr(filter_headers=["authorization"])
def test_replay_with_context():
agent = Agent(role="test_agent", backstory="Test Description", goal="Test Goal")
@@ -3181,6 +3256,7 @@ def test_replay_with_context():
pydantic=None,
json_dict={},
output_format=OutputFormat.RAW,
messages=[],
)
task1.output = context_output
@@ -3241,6 +3317,7 @@ def test_replay_with_context_set_to_nullable():
description="Test Task Output",
raw="test raw output",
agent="test_agent",
messages=[],
)
crew.kickoff()
@@ -3264,6 +3341,7 @@ def test_replay_with_invalid_task_id():
pydantic=None,
json_dict={},
output_format=OutputFormat.RAW,
messages=[],
)
task1.output = context_output
@@ -3328,6 +3406,7 @@ def test_replay_interpolates_inputs_properly(mock_interpolate_inputs):
pydantic=None,
json_dict={},
output_format=OutputFormat.RAW,
messages=[],
)
task1.output = context_output
@@ -3386,6 +3465,7 @@ def test_replay_setup_context():
pydantic=None,
json_dict={},
output_format=OutputFormat.RAW,
messages=[],
)
task1.output = context_output
crew = Crew(agents=[agent], tasks=[task1, task2], process=Process.sequential)
@@ -3619,6 +3699,7 @@ def test_conditional_should_skip(researcher, writer):
description="Task 1 description",
raw="Task 1 output",
agent="Researcher",
messages=[],
)
result = crew_met.kickoff()
@@ -3653,6 +3734,7 @@ def test_conditional_should_execute(researcher, writer):
description="Task 1 description",
raw="Task 1 output",
agent="Researcher",
messages=[],
)
crew_met.kickoff()
@@ -3824,7 +3906,7 @@ def test_task_tools_preserve_code_execution_tools():
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
with patch.object(
@@ -3878,7 +3960,7 @@ def test_multimodal_flag_adds_multimodal_tools():
crew = Crew(agents=[multimodal_agent], tasks=[task], process=Process.sequential)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
description="Mock description", raw="mocked output", agent="mocked agent", messages=[]
)
# Mock execute_sync to verify the tools passed at runtime
@@ -3942,6 +4024,7 @@ def test_multimodal_agent_image_tool_handling():
description="Mock description",
raw="A detailed analysis of the image",
agent="Image Analyst",
messages=[],
)
with patch.object(Task, "execute_sync") as mock_execute_sync:

View File

@@ -162,6 +162,7 @@ def test_task_callback_returns_task_output():
"name": task.name or task.description,
"expected_output": "Bullet point list of 5 interesting ideas.",
"output_format": OutputFormat.RAW,
"messages": [],
}
assert output_dict == expected_output
@@ -696,8 +697,13 @@ def test_save_task_json_output():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_save_task_pydantic_output():
import uuid
def test_save_task_pydantic_output(tmp_path, monkeypatch):
"""Test saving pydantic output to a file.
Uses tmp_path fixture and monkeypatch to change directory to avoid
file system race conditions on enterprise systems.
"""
from pathlib import Path
class ScoreOutput(BaseModel):
score: int
@@ -709,7 +715,9 @@ def test_save_task_pydantic_output():
allow_delegation=False,
)
output_file = f"score_{uuid.uuid4()}.json"
monkeypatch.chdir(tmp_path)
output_file = "score_output.json"
task = Task(
description="Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work'",
expected_output="The score of the title.",
@@ -721,11 +729,9 @@ def test_save_task_pydantic_output():
crew = Crew(agents=[scorer], tasks=[task])
crew.kickoff()
output_file_exists = os.path.exists(output_file)
assert output_file_exists
assert {"score": 4} == json.loads(open(output_file).read())
if output_file_exists:
os.remove(output_file)
output_path = Path(output_file).resolve()
assert output_path.exists()
assert {"score": 4} == json.loads(output_path.read_text())
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -1680,3 +1686,44 @@ def test_task_copy_with_list_context():
assert isinstance(copied_task2.context, list)
assert len(copied_task2.context) == 1
assert copied_task2.context[0] is task1
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_output_includes_messages():
"""Test that TaskOutput includes messages from agent execution."""
researcher = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
backstory="You're an expert researcher, specialized in technology, software engineering, AI and startups. You work as a freelancer and is now working on doing research and analysis for a new customer.",
allow_delegation=False,
)
task1 = Task(
description="Give me a list of 3 interesting ideas about AI.",
expected_output="Bullet point list of 3 ideas.",
agent=researcher,
)
task2 = Task(
description="Summarize the ideas from the previous task.",
expected_output="A summary of the ideas.",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task1, task2], process=Process.sequential)
result = crew.kickoff()
# Verify both tasks have messages
assert len(result.tasks_output) == 2
# Check first task output has messages
task1_output = result.tasks_output[0]
assert hasattr(task1_output, "messages")
assert isinstance(task1_output.messages, list)
assert len(task1_output.messages) > 0
# Check second task output has messages
task2_output = result.tasks_output[1]
assert hasattr(task2_output, "messages")
assert isinstance(task2_output.messages, list)
assert len(task2_output.messages) > 0

View File

@@ -38,6 +38,7 @@ def test_task_without_guardrail():
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
agent.last_messages = []
task = create_smart_task(description="Test task", expected_output="Output")
@@ -56,6 +57,7 @@ def test_task_with_successful_guardrail_func():
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test task", expected_output="Output", guardrail=guardrail
@@ -76,6 +78,7 @@ def test_task_with_failing_guardrail():
agent.role = "test_agent"
agent.execute_task.side_effect = ["bad result", "good result"]
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test task",
@@ -103,6 +106,7 @@ def test_task_with_guardrail_retries():
agent.role = "test_agent"
agent.execute_task.return_value = "bad result"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test task",
@@ -128,6 +132,7 @@ def test_guardrail_error_in_context():
agent = Mock()
agent.role = "test_agent"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test task",
@@ -295,6 +300,7 @@ def test_hallucination_guardrail_integration():
agent.role = "test_agent"
agent.execute_task.return_value = "test result"
agent.crew = None
agent.last_messages = []
mock_llm = Mock(spec=LLM)
guardrail = HallucinationGuardrail(
@@ -342,6 +348,7 @@ def test_multiple_guardrails_sequential_processing():
agent.role = "sequential_agent"
agent.execute_task.return_value = "original text"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test sequential guardrails",
@@ -391,6 +398,7 @@ def test_multiple_guardrails_with_validation_failure():
agent.role = "validation_agent"
agent.execute_task = mock_execute_task
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test guardrails with validation",
@@ -432,6 +440,7 @@ def test_multiple_guardrails_with_mixed_string_and_taskoutput():
agent.role = "mixed_agent"
agent.execute_task.return_value = "original"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test mixed return types",
@@ -469,6 +478,7 @@ def test_multiple_guardrails_with_retry_on_middle_guardrail():
agent.role = "retry_agent"
agent.execute_task.return_value = "base"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test retry in middle guardrail",
@@ -500,6 +510,7 @@ def test_multiple_guardrails_with_max_retries_exceeded():
agent.role = "failing_agent"
agent.execute_task.return_value = "test"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test max retries with multiple guardrails",
@@ -523,6 +534,7 @@ def test_multiple_guardrails_empty_list():
agent.role = "empty_agent"
agent.execute_task.return_value = "no guardrails"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test empty guardrails list",
@@ -582,6 +594,7 @@ def test_multiple_guardrails_processing_order():
agent.role = "order_agent"
agent.execute_task.return_value = "base"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test processing order",
@@ -625,6 +638,7 @@ def test_multiple_guardrails_with_pydantic_output():
agent.role = "pydantic_agent"
agent.execute_task.return_value = "test content"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test guardrails with Pydantic",
@@ -658,6 +672,7 @@ def test_guardrails_vs_single_guardrail_mutual_exclusion():
agent.role = "exclusion_agent"
agent.execute_task.return_value = "test"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test mutual exclusion",
@@ -700,6 +715,7 @@ def test_per_guardrail_independent_retry_tracking():
agent.role = "independent_retry_agent"
agent.execute_task.return_value = "base"
agent.crew = None
agent.last_messages = []
task = create_smart_task(
description="Test independent retry tracking",

View File

@@ -0,0 +1,112 @@
"""Tests to verify that traces are sent when enabled and not sent when disabled.
VCR will record HTTP interactions. Inspect cassettes to verify tracing behavior.
"""
import pytest
from crewai import Agent, Crew, Task
from tests.utils import wait_for_event_handlers
class TestTraceEnableDisable:
"""Test suite to verify trace sending behavior with VCR cassette recording."""
@pytest.mark.vcr(filter_headers=["authorization"])
def test_no_http_calls_when_disabled_via_env(self):
"""Test execution when tracing disabled via CREWAI_TRACING_ENABLED=false."""
with pytest.MonkeyPatch.context() as mp:
mp.setenv("CREWAI_TRACING_ENABLED", "false")
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
result = crew.kickoff()
wait_for_event_handlers()
assert result is not None
@pytest.mark.vcr(filter_headers=["authorization"])
def test_no_http_calls_when_disabled_via_tracing_false(self):
"""Test execution when tracing=False explicitly set."""
with pytest.MonkeyPatch.context() as mp:
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False, tracing=False)
result = crew.kickoff()
wait_for_event_handlers()
assert result is not None
@pytest.mark.vcr(filter_headers=["authorization"])
def test_trace_calls_when_enabled_via_env(self):
"""Test execution when tracing enabled via CREWAI_TRACING_ENABLED=true."""
with pytest.MonkeyPatch.context() as mp:
mp.setenv("CREWAI_TRACING_ENABLED", "true")
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
mp.setenv("OTEL_SDK_DISABLED", "false")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
result = crew.kickoff()
wait_for_event_handlers()
assert result is not None
@pytest.mark.vcr(filter_headers=["authorization"])
def test_trace_calls_when_enabled_via_tracing_true(self):
"""Test execution when tracing=True explicitly set."""
with pytest.MonkeyPatch.context() as mp:
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
mp.setenv("OTEL_SDK_DISABLED", "false")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False, tracing=True)
result = crew.kickoff()
wait_for_event_handlers()
assert result is not None

View File

@@ -20,6 +20,21 @@ from tests.utils import wait_for_event_handlers
class TestTraceListenerSetup:
"""Test TraceListener is properly setup and collecting events"""
@pytest.fixture(autouse=True)
def mock_user_data_file_io(self):
"""Mock user data file I/O to prevent file system pollution between tests"""
with (
patch(
"crewai.events.listeners.tracing.utils._load_user_data",
return_value={},
),
patch(
"crewai.events.listeners.tracing.utils._save_user_data",
return_value=None,
),
):
yield
@pytest.fixture(autouse=True)
def mock_auth_token(self):
"""Mock authentication token for all tests in this class"""
@@ -45,6 +60,13 @@ class TestTraceListenerSetup:
"""Reset tracing singleton instances between tests"""
from crewai.events.event_bus import crewai_event_bus
from crewai.events.event_listener import EventListener
from crewai.events.listeners.tracing.utils import _tracing_enabled
# Reset the tracing enabled contextvar
try:
_tracing_enabled.set(None)
except (LookupError, AttributeError):
pass
# Clear event bus handlers BEFORE creating any new singletons
with crewai_event_bus._rwlock.w_locked():
@@ -53,11 +75,19 @@ class TestTraceListenerSetup:
crewai_event_bus._handler_dependencies = {}
crewai_event_bus._execution_plan_cache = {}
# Reset TraceCollectionListener singleton
if hasattr(TraceCollectionListener, "_instance"):
TraceCollectionListener._instance = None
TraceCollectionListener._initialized = False
TraceCollectionListener._listeners_setup = False
# Reset TraceCollectionListener singleton - must reset instance attributes too
if TraceCollectionListener._instance is not None:
# Reset instance attributes that shadow class attributes (only if they exist as instance attrs)
instance_dict = TraceCollectionListener._instance.__dict__
if "_initialized" in instance_dict:
del TraceCollectionListener._instance._initialized
if "_listeners_setup" in instance_dict:
del TraceCollectionListener._instance._listeners_setup
# Reset class attributes
TraceCollectionListener._instance = None
TraceCollectionListener._initialized = False
TraceCollectionListener._listeners_setup = False
# Reset EventListener singleton
if hasattr(EventListener, "_instance"):
@@ -72,10 +102,19 @@ class TestTraceListenerSetup:
crewai_event_bus._handler_dependencies = {}
crewai_event_bus._execution_plan_cache = {}
if hasattr(TraceCollectionListener, "_instance"):
TraceCollectionListener._instance = None
TraceCollectionListener._initialized = False
TraceCollectionListener._listeners_setup = False
# Reset TraceCollectionListener singleton - must reset instance attributes too
if TraceCollectionListener._instance is not None:
# Reset instance attributes that shadow class attributes (only if they exist as instance attrs)
instance_dict = TraceCollectionListener._instance.__dict__
if "_initialized" in instance_dict:
del TraceCollectionListener._instance._initialized
if "_listeners_setup" in instance_dict:
del TraceCollectionListener._instance._listeners_setup
# Reset class attributes
TraceCollectionListener._instance = None
TraceCollectionListener._initialized = False
TraceCollectionListener._listeners_setup = False
if hasattr(EventListener, "_instance"):
EventListener._instance = None
@@ -119,7 +158,15 @@ class TestTraceListenerSetup:
def test_trace_listener_collects_crew_events(self):
"""Test that trace listener properly collects events from crew execution"""
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
with patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "true",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
):
agent = Agent(
role="Test Agent",
goal="Test goal",
@@ -148,7 +195,15 @@ class TestTraceListenerSetup:
def test_batch_manager_finalizes_batch_clears_buffer(self):
"""Test that batch manager properly finalizes batch and clears buffer"""
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
with patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "true",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
):
agent = Agent(
role="Test Agent",
goal="Test goal",
@@ -206,7 +261,15 @@ class TestTraceListenerSetup:
def test_events_collection_batch_manager(self, mock_plus_api_calls):
"""Test that trace listener properly collects events from crew execution"""
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
with patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "true",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
):
agent = Agent(
role="Test Agent",
goal="Test goal",
@@ -300,7 +363,15 @@ class TestTraceListenerSetup:
def test_trace_listener_setup_correctly_for_crew(self):
"""Test that trace listener is set up correctly when enabled"""
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
with patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "true",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
):
agent = Agent(
role="Test Agent",
goal="Test goal",
@@ -318,11 +389,19 @@ class TestTraceListenerSetup:
Crew(agents=[agent], tasks=[task], verbose=True)
assert mock_listener_setup.call_count >= 1
@pytest.mark.vcr(filter_headers=["authorization"])
def test_trace_listener_setup_correctly_for_flow(self):
"""Test that trace listener is set up correctly when enabled"""
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
with patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "true",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
):
class FlowExample(Flow):
@start()
def start(self):
@@ -338,7 +417,15 @@ class TestTraceListenerSetup:
def test_trace_listener_ephemeral_batch(self):
"""Test that trace listener properly handles ephemeral batches"""
with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}),
patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "true",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
),
patch(
"crewai.events.listeners.tracing.trace_listener.TraceCollectionListener._check_authenticated",
return_value=False,
@@ -371,7 +458,15 @@ class TestTraceListenerSetup:
@pytest.mark.vcr(filter_headers=["authorization"])
def test_trace_listener_with_authenticated_user(self):
"""Test that trace listener properly handles authenticated batches"""
with patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "true"}):
with patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "true",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
):
agent = Agent(
role="Test Agent",
goal="Test goal",
@@ -433,7 +528,15 @@ class TestTraceListenerSetup:
"""Test first-time user trace collection logic with timeout behavior"""
with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "false"}),
patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "false",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
),
patch(
"crewai.events.listeners.tracing.utils._is_test_environment",
return_value=False,
@@ -472,6 +575,10 @@ class TestTraceListenerSetup:
trace_listener = TraceCollectionListener()
trace_listener.setup_listeners(crewai_event_bus)
trace_listener.first_time_handler = FirstTimeTraceHandler()
if trace_listener.first_time_handler.initialize_for_first_time_user():
trace_listener.first_time_handler.set_batch_manager(trace_listener.batch_manager)
assert trace_listener.first_time_handler.is_first_time is True
assert trace_listener.first_time_handler.collected_events is False
@@ -494,7 +601,15 @@ class TestTraceListenerSetup:
"""Test first-time user trace collection when user accepts viewing traces"""
with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "false"}),
patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "false",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
),
patch(
"crewai.events.listeners.tracing.utils._is_test_environment",
return_value=False,
@@ -531,6 +646,12 @@ class TestTraceListenerSetup:
from crewai.events.event_bus import crewai_event_bus
trace_listener = TraceCollectionListener()
trace_listener.setup_listeners(crewai_event_bus)
# Re-initialize first-time handler after patches are applied to ensure clean state
trace_listener.first_time_handler = FirstTimeTraceHandler()
if trace_listener.first_time_handler.initialize_for_first_time_user():
trace_listener.first_time_handler.set_batch_manager(trace_listener.batch_manager)
trace_listener.batch_manager.ephemeral_trace_url = (
"https://crewai.com/trace/mock-id"
@@ -546,8 +667,6 @@ class TestTraceListenerSetup:
trace_listener.first_time_handler, "_display_ephemeral_trace_link"
) as mock_display_link,
):
trace_listener.setup_listeners(crewai_event_bus)
assert trace_listener.first_time_handler.is_first_time is True
trace_listener.first_time_handler.collected_events = True
@@ -567,7 +686,15 @@ class TestTraceListenerSetup:
def test_first_time_user_trace_consolidation_logic(self, mock_plus_api_calls):
"""Test the consolidation logic for first-time users vs regular tracing"""
with (
patch.dict(os.environ, {"CREWAI_TRACING_ENABLED": "false"}),
patch.dict(
os.environ,
{
"CREWAI_TRACING_ENABLED": "",
"CREWAI_DISABLE_TELEMETRY": "false",
"CREWAI_DISABLE_TRACKING": "false",
"OTEL_SDK_DISABLED": "false",
},
),
patch(
"crewai.events.listeners.tracing.utils._is_test_environment",
return_value=False,
@@ -588,6 +715,13 @@ class TestTraceListenerSetup:
crewai_event_bus._async_handlers = {}
trace_listener = TraceCollectionListener()
# Re-initialize first-time handler after patches are applied to ensure clean state
# This is necessary because the singleton may have been created before patches were active
trace_listener.first_time_handler = FirstTimeTraceHandler()
if trace_listener.first_time_handler.initialize_for_first_time_user():
trace_listener.first_time_handler.set_batch_manager(trace_listener.batch_manager)
trace_listener.setup_listeners(crewai_event_bus)
assert trace_listener.first_time_handler.is_first_time is True
@@ -668,40 +802,41 @@ class TestTraceListenerSetup:
def test_trace_batch_marked_as_failed_on_finalize_error(self):
"""Test that trace batch is marked as failed when finalization returns non-200 status"""
# Test the error handling logic directly in TraceBatchManager
batch_manager = TraceBatchManager()
with patch("crewai.events.listeners.tracing.trace_batch_manager.is_tracing_enabled_in_context", return_value=True):
batch_manager = TraceBatchManager()
# Initialize a batch
batch_manager.current_batch = batch_manager.initialize_batch(
user_context={"privacy_level": "standard"},
execution_metadata={
"execution_type": "crew",
"crew_name": "test_crew",
},
)
batch_manager.trace_batch_id = "test_batch_id_12345"
batch_manager.backend_initialized = True
# Mock the API responses
with (
patch.object(
batch_manager.plus_api,
"send_trace_events",
return_value=MagicMock(status_code=200),
),
patch.object(
batch_manager.plus_api,
"finalize_trace_batch",
return_value=MagicMock(status_code=500, text="Internal Server Error"),
),
patch.object(
batch_manager.plus_api,
"mark_trace_batch_as_failed",
) as mock_mark_failed,
):
# Call finalize_batch directly
batch_manager.finalize_batch()
# Verify that mark_trace_batch_as_failed was called with the error message
mock_mark_failed.assert_called_once_with(
"test_batch_id_12345", "Internal Server Error"
# Initialize a batch
batch_manager.current_batch = batch_manager.initialize_batch(
user_context={"privacy_level": "standard"},
execution_metadata={
"execution_type": "crew",
"crew_name": "test_crew",
},
)
batch_manager.trace_batch_id = "test_batch_id_12345"
batch_manager.backend_initialized = True
# Mock the API responses
with (
patch.object(
batch_manager.plus_api,
"send_trace_events",
return_value=MagicMock(status_code=200),
),
patch.object(
batch_manager.plus_api,
"finalize_trace_batch",
return_value=MagicMock(status_code=500, text="Internal Server Error"),
),
patch.object(
batch_manager.plus_api,
"mark_trace_batch_as_failed",
) as mock_mark_failed,
):
# Call finalize_batch directly
batch_manager.finalize_batch()
# Verify that mark_trace_batch_as_failed was called with the error message
mock_mark_failed.assert_called_once_with(
"test_batch_id_12345", "Internal Server Error"
)

View File

@@ -1,3 +1,3 @@
"""CrewAI development tools."""
__version__ = "1.4.1"
__version__ = "1.5.0"