Merge branch 'main' into gl/fix/ensure-complete-tool-signature

This commit is contained in:
Greyson Lalonde
2025-11-30 19:47:31 -05:00
216 changed files with 2786 additions and 1954 deletions

161
.env.test Normal file
View File

@@ -0,0 +1,161 @@
# =============================================================================
# Test Environment Variables
# =============================================================================
# This file contains all environment variables needed to run tests locally
# in a way that mimics the GitHub Actions CI environment.
# =============================================================================
# -----------------------------------------------------------------------------
# LLM Provider API Keys
# -----------------------------------------------------------------------------
OPENAI_API_KEY=fake-api-key
ANTHROPIC_API_KEY=fake-anthropic-key
GEMINI_API_KEY=fake-gemini-key
AZURE_API_KEY=fake-azure-key
OPENROUTER_API_KEY=fake-openrouter-key
# -----------------------------------------------------------------------------
# AWS Credentials
# -----------------------------------------------------------------------------
AWS_ACCESS_KEY_ID=fake-aws-access-key
AWS_SECRET_ACCESS_KEY=fake-aws-secret-key
AWS_DEFAULT_REGION=us-east-1
AWS_REGION_NAME=us-east-1
# -----------------------------------------------------------------------------
# Azure OpenAI Configuration
# -----------------------------------------------------------------------------
AZURE_ENDPOINT=https://fake-azure-endpoint.openai.azure.com
AZURE_OPENAI_ENDPOINT=https://fake-azure-endpoint.openai.azure.com
AZURE_OPENAI_API_KEY=fake-azure-openai-key
AZURE_API_VERSION=2024-02-15-preview
OPENAI_API_VERSION=2024-02-15-preview
# -----------------------------------------------------------------------------
# Google Cloud Configuration
# -----------------------------------------------------------------------------
#GOOGLE_CLOUD_PROJECT=fake-gcp-project
#GOOGLE_CLOUD_LOCATION=us-central1
# -----------------------------------------------------------------------------
# OpenAI Configuration
# -----------------------------------------------------------------------------
OPENAI_BASE_URL=https://api.openai.com/v1
OPENAI_API_BASE=https://api.openai.com/v1
# -----------------------------------------------------------------------------
# Search & Scraping Tool API Keys
# -----------------------------------------------------------------------------
SERPER_API_KEY=fake-serper-key
EXA_API_KEY=fake-exa-key
BRAVE_API_KEY=fake-brave-key
FIRECRAWL_API_KEY=fake-firecrawl-key
TAVILY_API_KEY=fake-tavily-key
SERPAPI_API_KEY=fake-serpapi-key
SERPLY_API_KEY=fake-serply-key
LINKUP_API_KEY=fake-linkup-key
PARALLEL_API_KEY=fake-parallel-key
# -----------------------------------------------------------------------------
# Exa Configuration
# -----------------------------------------------------------------------------
EXA_BASE_URL=https://api.exa.ai
# -----------------------------------------------------------------------------
# Web Scraping & Automation
# -----------------------------------------------------------------------------
BRIGHT_DATA_API_KEY=fake-brightdata-key
BRIGHT_DATA_ZONE=fake-zone
BRIGHTDATA_API_URL=https://api.brightdata.com
BRIGHTDATA_DEFAULT_TIMEOUT=600
BRIGHTDATA_DEFAULT_POLLING_INTERVAL=1
OXYLABS_USERNAME=fake-oxylabs-user
OXYLABS_PASSWORD=fake-oxylabs-pass
SCRAPFLY_API_KEY=fake-scrapfly-key
SCRAPEGRAPH_API_KEY=fake-scrapegraph-key
BROWSERBASE_API_KEY=fake-browserbase-key
BROWSERBASE_PROJECT_ID=fake-browserbase-project
HYPERBROWSER_API_KEY=fake-hyperbrowser-key
MULTION_API_KEY=fake-multion-key
APIFY_API_TOKEN=fake-apify-token
# -----------------------------------------------------------------------------
# Database & Vector Store Credentials
# -----------------------------------------------------------------------------
SINGLESTOREDB_URL=mysql://fake:fake@localhost:3306/fake
SINGLESTOREDB_HOST=localhost
SINGLESTOREDB_PORT=3306
SINGLESTOREDB_USER=fake-user
SINGLESTOREDB_PASSWORD=fake-password
SINGLESTOREDB_DATABASE=fake-database
SINGLESTOREDB_CONNECT_TIMEOUT=30
SNOWFLAKE_USER=fake-snowflake-user
SNOWFLAKE_PASSWORD=fake-snowflake-password
SNOWFLAKE_ACCOUNT=fake-snowflake-account
SNOWFLAKE_WAREHOUSE=fake-snowflake-warehouse
SNOWFLAKE_DATABASE=fake-snowflake-database
SNOWFLAKE_SCHEMA=fake-snowflake-schema
WEAVIATE_URL=http://localhost:8080
WEAVIATE_API_KEY=fake-weaviate-key
EMBEDCHAIN_DB_URI=sqlite:///test.db
# Databricks Credentials
DATABRICKS_HOST=https://fake-databricks.cloud.databricks.com
DATABRICKS_TOKEN=fake-databricks-token
DATABRICKS_CONFIG_PROFILE=fake-profile
# MongoDB Credentials
MONGODB_URI=mongodb://fake:fake@localhost:27017/fake
# -----------------------------------------------------------------------------
# CrewAI Platform & Enterprise
# -----------------------------------------------------------------------------
# setting CREWAI_PLATFORM_INTEGRATION_TOKEN causes these test to fail:
#=========================== short test summary info ============================
#FAILED tests/test_context.py::TestPlatformIntegrationToken::test_platform_context_manager_basic_usage - AssertionError: assert 'fake-platform-token' is None
# + where 'fake-platform-token' = get_platform_integration_token()
#FAILED tests/test_context.py::TestPlatformIntegrationToken::test_context_var_isolation_between_tests - AssertionError: assert 'fake-platform-token' is None
# + where 'fake-platform-token' = get_platform_integration_token()
#FAILED tests/test_context.py::TestPlatformIntegrationToken::test_multiple_sequential_context_managers - AssertionError: assert 'fake-platform-token' is None
# + where 'fake-platform-token' = get_platform_integration_token()
#CREWAI_PLATFORM_INTEGRATION_TOKEN=fake-platform-token
CREWAI_PERSONAL_ACCESS_TOKEN=fake-personal-token
CREWAI_PLUS_URL=https://fake.crewai.com
# -----------------------------------------------------------------------------
# Other Service API Keys
# -----------------------------------------------------------------------------
ZAPIER_API_KEY=fake-zapier-key
PATRONUS_API_KEY=fake-patronus-key
MINDS_API_KEY=fake-minds-key
HF_TOKEN=fake-hf-token
# -----------------------------------------------------------------------------
# Feature Flags/Testing Modes
# -----------------------------------------------------------------------------
CREWAI_DISABLE_TELEMETRY=true
OTEL_SDK_DISABLED=true
CREWAI_TESTING=true
CREWAI_TRACING_ENABLED=false
# -----------------------------------------------------------------------------
# Testing/CI Configuration
# -----------------------------------------------------------------------------
# VCR recording mode: "none" (default), "new_episodes", "all", "once"
PYTEST_VCR_RECORD_MODE=none
# Set to "true" by GitHub when running in GitHub Actions
# GITHUB_ACTIONS=false
# -----------------------------------------------------------------------------
# Python Configuration
# -----------------------------------------------------------------------------
PYTHONUNBUFFERED=1

View File

@@ -5,18 +5,6 @@ on: [pull_request]
permissions:
contents: read
env:
OPENAI_API_KEY: fake-api-key
PYTHONUNBUFFERED: 1
BRAVE_API_KEY: fake-brave-key
SNOWFLAKE_USER: fake-snowflake-user
SNOWFLAKE_PASSWORD: fake-snowflake-password
SNOWFLAKE_ACCOUNT: fake-snowflake-account
SNOWFLAKE_WAREHOUSE: fake-snowflake-warehouse
SNOWFLAKE_DATABASE: fake-snowflake-database
SNOWFLAKE_SCHEMA: fake-snowflake-schema
EMBEDCHAIN_DB_URI: sqlite:///test.db
jobs:
tests:
name: tests (${{ matrix.python-version }})
@@ -84,26 +72,20 @@ jobs:
# fi
cd lib/crewai && uv run pytest \
--block-network \
--timeout=30 \
-vv \
--splits 8 \
--group ${{ matrix.group }} \
$DURATIONS_ARG \
--durations=10 \
-n auto \
--maxfail=3
- name: Run tool tests (group ${{ matrix.group }} of 8)
run: |
cd lib/crewai-tools && uv run pytest \
--block-network \
--timeout=30 \
-vv \
--splits 8 \
--group ${{ matrix.group }} \
--durations=10 \
-n auto \
--maxfail=3

166
conftest.py Normal file
View File

@@ -0,0 +1,166 @@
"""Pytest configuration for crewAI workspace."""
from collections.abc import Generator
import os
from pathlib import Path
import tempfile
from typing import Any
from dotenv import load_dotenv
import pytest
from vcr.request import Request # type: ignore[import-untyped]
env_test_path = Path(__file__).parent / ".env.test"
load_dotenv(env_test_path, override=True)
load_dotenv(override=True)
@pytest.fixture(autouse=True, scope="function")
def cleanup_event_handlers() -> Generator[None, Any, None]:
"""Clean up event bus handlers after each test to prevent test pollution."""
yield
try:
from crewai.events.event_bus import crewai_event_bus
with crewai_event_bus._rwlock.w_locked():
crewai_event_bus._sync_handlers.clear()
crewai_event_bus._async_handlers.clear()
except Exception: # noqa: S110
pass
@pytest.fixture(autouse=True, scope="function")
def setup_test_environment() -> Generator[None, Any, None]:
"""Setup test environment for crewAI workspace."""
with tempfile.TemporaryDirectory() as temp_dir:
storage_dir = Path(temp_dir) / "crewai_test_storage"
storage_dir.mkdir(parents=True, exist_ok=True)
if not storage_dir.exists() or not storage_dir.is_dir():
raise RuntimeError(
f"Failed to create test storage directory: {storage_dir}"
)
try:
test_file = storage_dir / ".permissions_test"
test_file.touch()
test_file.unlink()
except (OSError, IOError) as e:
raise RuntimeError(
f"Test storage directory {storage_dir} is not writable: {e}"
) from e
os.environ["CREWAI_STORAGE_DIR"] = str(storage_dir)
os.environ["CREWAI_TESTING"] = "true"
try:
yield
finally:
os.environ.pop("CREWAI_TESTING", "true")
os.environ.pop("CREWAI_STORAGE_DIR", None)
os.environ.pop("CREWAI_DISABLE_TELEMETRY", "true")
os.environ.pop("OTEL_SDK_DISABLED", "true")
os.environ.pop("OPENAI_BASE_URL", "https://api.openai.com/v1")
os.environ.pop("OPENAI_API_BASE", "https://api.openai.com/v1")
HEADERS_TO_FILTER = {
"authorization": "AUTHORIZATION-XXX",
"content-security-policy": "CSP-FILTERED",
"cookie": "COOKIE-XXX",
"set-cookie": "SET-COOKIE-XXX",
"permissions-policy": "PERMISSIONS-POLICY-XXX",
"referrer-policy": "REFERRER-POLICY-XXX",
"strict-transport-security": "STS-XXX",
"x-content-type-options": "X-CONTENT-TYPE-XXX",
"x-frame-options": "X-FRAME-OPTIONS-XXX",
"x-permitted-cross-domain-policies": "X-PERMITTED-XXX",
"x-request-id": "X-REQUEST-ID-XXX",
"x-runtime": "X-RUNTIME-XXX",
"x-xss-protection": "X-XSS-PROTECTION-XXX",
"x-stainless-arch": "X-STAINLESS-ARCH-XXX",
"x-stainless-os": "X-STAINLESS-OS-XXX",
"x-stainless-read-timeout": "X-STAINLESS-READ-TIMEOUT-XXX",
"cf-ray": "CF-RAY-XXX",
"etag": "ETAG-XXX",
"Strict-Transport-Security": "STS-XXX",
"access-control-expose-headers": "ACCESS-CONTROL-XXX",
"openai-organization": "OPENAI-ORG-XXX",
"openai-project": "OPENAI-PROJECT-XXX",
"x-ratelimit-limit-requests": "X-RATELIMIT-LIMIT-REQUESTS-XXX",
"x-ratelimit-limit-tokens": "X-RATELIMIT-LIMIT-TOKENS-XXX",
"x-ratelimit-remaining-requests": "X-RATELIMIT-REMAINING-REQUESTS-XXX",
"x-ratelimit-remaining-tokens": "X-RATELIMIT-REMAINING-TOKENS-XXX",
"x-ratelimit-reset-requests": "X-RATELIMIT-RESET-REQUESTS-XXX",
"x-ratelimit-reset-tokens": "X-RATELIMIT-RESET-TOKENS-XXX",
"x-goog-api-key": "X-GOOG-API-KEY-XXX",
}
def _filter_request_headers(request: Request) -> Request: # type: ignore[no-any-unimported]
"""Filter sensitive headers from request before recording."""
for header_name, replacement in HEADERS_TO_FILTER.items():
for variant in [header_name, header_name.upper(), header_name.title()]:
if variant in request.headers:
request.headers[variant] = [replacement]
return request
def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any]:
"""Filter sensitive headers from response before recording."""
for header_name, replacement in HEADERS_TO_FILTER.items():
for variant in [header_name, header_name.upper(), header_name.title()]:
if variant in response["headers"]:
response["headers"][variant] = [replacement]
return response
@pytest.fixture(scope="module")
def vcr_cassette_dir(request: Any) -> str:
"""Generate cassette directory path based on test module location.
Organizes cassettes to mirror test directory structure within each package:
lib/crewai/tests/llms/google/test_google.py -> lib/crewai/tests/cassettes/llms/google/
lib/crewai-tools/tests/tools/test_search.py -> lib/crewai-tools/tests/cassettes/tools/
"""
test_file = Path(request.fspath)
for parent in test_file.parents:
if parent.name in ("crewai", "crewai-tools") and parent.parent.name == "lib":
package_root = parent
break
else:
package_root = test_file.parent
tests_root = package_root / "tests"
test_dir = test_file.parent
if test_dir != tests_root:
relative_path = test_dir.relative_to(tests_root)
cassette_dir = tests_root / "cassettes" / relative_path
else:
cassette_dir = tests_root / "cassettes"
cassette_dir.mkdir(parents=True, exist_ok=True)
return str(cassette_dir)
@pytest.fixture(scope="module")
def vcr_config(vcr_cassette_dir: str) -> dict[str, Any]:
"""Configure VCR with organized cassette storage."""
config = {
"cassette_library_dir": vcr_cassette_dir,
"record_mode": os.getenv("PYTEST_VCR_RECORD_MODE", "once"),
"filter_headers": [(k, v) for k, v in HEADERS_TO_FILTER.items()],
"before_record_request": _filter_request_headers,
"before_record_response": _filter_response_headers,
"filter_query_parameters": ["key"],
}
if os.getenv("GITHUB_ACTIONS") == "true":
config["record_mode"] = "none"
return config

View File

@@ -218,7 +218,7 @@ Update the root `README.md` only if the tool introduces a new category or notabl
## Discovery and specs
Our internal tooling discovers classes whose names end with `Tool`. Keep your class exported from the module path under `crewai_tools/tools/...` to be picked up by scripts like `generate_tool_specs.py`.
Our internal tooling discovers classes whose names end with `Tool`. Keep your class exported from the module path under `crewai_tools/tools/...` to be picked up by scripts like `crewai_tools.generate_tool_specs.py`.
---

View File

@@ -12,7 +12,7 @@ dependencies = [
"pytube>=15.0.0",
"requests>=2.32.5",
"docker>=7.1.0",
"crewai==1.6.0",
"crewai==1.6.1",
"lancedb>=0.5.4",
"tiktoken>=0.8.0",
"beautifulsoup4>=4.13.4",

View File

@@ -291,4 +291,4 @@ __all__ = [
"ZapierActionTools",
]
__version__ = "1.6.0"
__version__ = "1.6.1"

View File

@@ -4,17 +4,20 @@ from collections.abc import Mapping
import inspect
import json
from pathlib import Path
from typing import Any, cast
from typing import Any
from crewai.tools.base_tool import BaseTool, EnvVar
from crewai_tools import tools
from pydantic import BaseModel
from pydantic.json_schema import GenerateJsonSchema
from pydantic_core import PydanticOmit
from crewai_tools import tools
class SchemaGenerator(GenerateJsonSchema):
def handle_invalid_for_json_schema(self, schema, error_info):
def handle_invalid_for_json_schema(
self, schema: Any, error_info: Any
) -> dict[str, Any]:
raise PydanticOmit
@@ -73,7 +76,7 @@ class ToolSpecExtractor:
@staticmethod
def _extract_field_default(
field: dict | None, fallback: str | list[Any] = ""
field: dict[str, Any] | None, fallback: str | list[Any] = ""
) -> str | list[Any] | int:
if not field:
return fallback
@@ -83,7 +86,7 @@ class ToolSpecExtractor:
return default if isinstance(default, (list, str, int)) else fallback
@staticmethod
def _extract_params(args_schema_field: dict | None) -> dict[str, Any]:
def _extract_params(args_schema_field: dict[str, Any] | None) -> dict[str, Any]:
if not args_schema_field:
return {}
@@ -94,15 +97,15 @@ class ToolSpecExtractor:
):
return {}
# Cast to type[BaseModel] after runtime check
schema_class = cast(type[BaseModel], args_schema_class)
try:
return schema_class.model_json_schema(schema_generator=SchemaGenerator)
return args_schema_class.model_json_schema(schema_generator=SchemaGenerator)
except Exception:
return {}
@staticmethod
def _extract_env_vars(env_vars_field: dict | None) -> list[dict[str, Any]]:
def _extract_env_vars(
env_vars_field: dict[str, Any] | None,
) -> list[dict[str, Any]]:
if not env_vars_field:
return []

View File

@@ -1,21 +0,0 @@
import pytest
def pytest_configure(config):
"""Register custom markers."""
config.addinivalue_line("markers", "integration: mark test as an integration test")
config.addinivalue_line("markers", "asyncio: mark test as an async test")
# Set the asyncio loop scope through ini configuration
config.inicfg["asyncio_mode"] = "auto"
@pytest.fixture(scope="function")
def event_loop():
"""Create an instance of the default event loop for each test case."""
import asyncio
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
yield loop
loop.close()

View File

@@ -2,7 +2,7 @@ import json
from unittest import mock
from crewai.tools.base_tool import BaseTool, EnvVar
from generate_tool_specs import ToolSpecExtractor
from crewai_tools.generate_tool_specs import ToolSpecExtractor
from pydantic import BaseModel, Field
import pytest
@@ -61,8 +61,8 @@ def test_unwrap_schema(extractor):
@pytest.fixture
def mock_tool_extractor(extractor):
with (
mock.patch("generate_tool_specs.dir", return_value=["MockTool"]),
mock.patch("generate_tool_specs.getattr", return_value=MockTool),
mock.patch("crewai_tools.generate_tool_specs.dir", return_value=["MockTool"]),
mock.patch("crewai_tools.generate_tool_specs.getattr", return_value=MockTool),
):
extractor.extract_all_tools()
assert len(extractor.tools_spec) == 1

View File

@@ -4,7 +4,7 @@ from crewai_tools.tools.firecrawl_crawl_website_tool.firecrawl_crawl_website_too
FirecrawlCrawlWebsiteTool,
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_firecrawl_crawl_tool_integration():
tool = FirecrawlCrawlWebsiteTool(config={
"limit": 2,

View File

@@ -4,7 +4,7 @@ from crewai_tools.tools.firecrawl_scrape_website_tool.firecrawl_scrape_website_t
FirecrawlScrapeWebsiteTool,
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_firecrawl_scrape_tool_integration():
tool = FirecrawlScrapeWebsiteTool()
result = tool.run(url="https://firecrawl.dev")

View File

@@ -3,7 +3,7 @@ import pytest
from crewai_tools.tools.firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_firecrawl_search_tool_integration():
tool = FirecrawlSearchTool()
result = tool.run(query="firecrawl")

View File

@@ -23,15 +23,13 @@ from crewai_tools.tools.rag.rag_tool import Adapter
import pytest
pytestmark = [pytest.mark.vcr(filter_headers=["authorization"])]
@pytest.fixture
def mock_adapter():
mock_adapter = MagicMock(spec=Adapter)
return mock_adapter
@pytest.mark.vcr()
def test_directory_search_tool():
with tempfile.TemporaryDirectory() as temp_dir:
test_file = Path(temp_dir) / "test.txt"
@@ -65,6 +63,7 @@ def test_pdf_search_tool(mock_adapter):
)
@pytest.mark.vcr()
def test_txt_search_tool():
with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as temp_file:
temp_file.write(b"This is a test file for txt search")
@@ -102,6 +101,7 @@ def test_docx_search_tool(mock_adapter):
)
@pytest.mark.vcr()
def test_json_search_tool():
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as temp_file:
temp_file.write(b'{"test": "This is a test JSON file"}')
@@ -127,6 +127,7 @@ def test_xml_search_tool(mock_adapter):
)
@pytest.mark.vcr()
def test_csv_search_tool():
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as temp_file:
temp_file.write(b"name,description\ntest,This is a test CSV file")
@@ -141,6 +142,7 @@ def test_csv_search_tool():
os.unlink(temp_file_path)
@pytest.mark.vcr()
def test_mdx_search_tool():
with tempfile.NamedTemporaryFile(suffix=".mdx", delete=False) as temp_file:
temp_file.write(b"# Test MDX\nThis is a test MDX file")

View File

@@ -48,7 +48,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = [
"crewai-tools==1.6.0",
"crewai-tools==1.6.1",
]
embeddings = [
"tiktoken~=0.8.0"

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "1.6.0"
__version__ = "1.6.1"
_telemetry_submitted = False

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.6.0"
"crewai[tools]==1.6.1"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.6.0"
"crewai[tools]==1.6.1"
]
[project.scripts]

View File

@@ -76,7 +76,7 @@ class TraceBatchManager:
use_ephemeral: bool = False,
) -> TraceBatch:
"""Initialize a new trace batch (thread-safe)"""
with self._init_lock:
with self._batch_ready_cv:
if self.current_batch is not None:
logger.debug(
"Batch already initialized, skipping duplicate initialization"
@@ -99,7 +99,6 @@ class TraceBatchManager:
self.backend_initialized = True
self._batch_ready_cv.notify_all()
return self.current_batch
def _initialize_backend_batch(
@@ -107,7 +106,7 @@ class TraceBatchManager:
user_context: dict[str, str],
execution_metadata: dict[str, Any],
use_ephemeral: bool = False,
):
) -> None:
"""Send batch initialization to backend"""
if not is_tracing_enabled_in_context():
@@ -204,7 +203,7 @@ class TraceBatchManager:
return False
return True
def add_event(self, trace_event: TraceEvent):
def add_event(self, trace_event: TraceEvent) -> None:
"""Add event to buffer"""
self.event_buffer.append(trace_event)
@@ -300,7 +299,7 @@ class TraceBatchManager:
return finalized_batch
def _finalize_backend_batch(self, events_count: int = 0):
def _finalize_backend_batch(self, events_count: int = 0) -> None:
"""Send batch finalization to backend
Args:
@@ -366,7 +365,7 @@ class TraceBatchManager:
logger.error(f"❌ Error finalizing trace batch: {e}")
self.plus_api.mark_trace_batch_as_failed(self.trace_batch_id, str(e))
def _cleanup_batch_data(self):
def _cleanup_batch_data(self) -> None:
"""Clean up batch data after successful finalization to free memory"""
try:
if hasattr(self, "event_buffer") and self.event_buffer:
@@ -411,7 +410,7 @@ class TraceBatchManager:
lambda: self.current_batch is not None, timeout=timeout
)
def record_start_time(self, key: str):
def record_start_time(self, key: str) -> None:
"""Record start time for duration calculation"""
self.execution_start_times[key] = datetime.now(timezone.utc)

View File

@@ -71,6 +71,7 @@ from crewai.events.types.reasoning_events import (
AgentReasoningFailedEvent,
AgentReasoningStartedEvent,
)
from crewai.events.types.system_events import SignalEvent, on_signal
from crewai.events.types.task_events import (
TaskCompletedEvent,
TaskFailedEvent,
@@ -159,6 +160,7 @@ class TraceCollectionListener(BaseEventListener):
self._register_flow_event_handlers(crewai_event_bus)
self._register_context_event_handlers(crewai_event_bus)
self._register_action_event_handlers(crewai_event_bus)
self._register_system_event_handlers(crewai_event_bus)
self._listeners_setup = True
@@ -458,6 +460,15 @@ class TraceCollectionListener(BaseEventListener):
) -> None:
self._handle_action_event("knowledge_query_failed", source, event)
def _register_system_event_handlers(self, event_bus: CrewAIEventsBus) -> None:
"""Register handlers for system signal events (SIGTERM, SIGINT, etc.)."""
@on_signal
def handle_signal(source: Any, event: SignalEvent) -> None:
"""Flush trace batch on system signals to prevent data loss."""
if self.batch_manager.is_batch_initialized():
self.batch_manager.finalize_batch()
def _initialize_crew_batch(self, source: Any, event: Any) -> None:
"""Initialize trace batch.

View File

@@ -0,0 +1,102 @@
"""System signal event types for CrewAI.
This module contains event types for system-level signals like SIGTERM,
allowing listeners to perform cleanup operations before process termination.
"""
from collections.abc import Callable
from enum import IntEnum
import signal
from typing import Annotated, Literal, TypeVar
from pydantic import Field, TypeAdapter
from crewai.events.base_events import BaseEvent
class SignalType(IntEnum):
"""Enumeration of supported system signals."""
SIGTERM = signal.SIGTERM
SIGINT = signal.SIGINT
SIGHUP = signal.SIGHUP
SIGTSTP = signal.SIGTSTP
SIGCONT = signal.SIGCONT
class SigTermEvent(BaseEvent):
"""Event emitted when SIGTERM is received."""
type: Literal["SIGTERM"] = "SIGTERM"
signal_number: SignalType = SignalType.SIGTERM
reason: str | None = None
class SigIntEvent(BaseEvent):
"""Event emitted when SIGINT is received."""
type: Literal["SIGINT"] = "SIGINT"
signal_number: SignalType = SignalType.SIGINT
reason: str | None = None
class SigHupEvent(BaseEvent):
"""Event emitted when SIGHUP is received."""
type: Literal["SIGHUP"] = "SIGHUP"
signal_number: SignalType = SignalType.SIGHUP
reason: str | None = None
class SigTStpEvent(BaseEvent):
"""Event emitted when SIGTSTP is received.
Note: SIGSTOP cannot be caught - it immediately suspends the process.
"""
type: Literal["SIGTSTP"] = "SIGTSTP"
signal_number: SignalType = SignalType.SIGTSTP
reason: str | None = None
class SigContEvent(BaseEvent):
"""Event emitted when SIGCONT is received."""
type: Literal["SIGCONT"] = "SIGCONT"
signal_number: SignalType = SignalType.SIGCONT
reason: str | None = None
SignalEvent = Annotated[
SigTermEvent | SigIntEvent | SigHupEvent | SigTStpEvent | SigContEvent,
Field(discriminator="type"),
]
signal_event_adapter: TypeAdapter[SignalEvent] = TypeAdapter(SignalEvent)
SIGNAL_EVENT_TYPES: tuple[type[BaseEvent], ...] = (
SigTermEvent,
SigIntEvent,
SigHupEvent,
SigTStpEvent,
SigContEvent,
)
T = TypeVar("T", bound=Callable[[object, SignalEvent], None])
def on_signal(func: T) -> T:
"""Decorator to register a handler for all signal events.
Args:
func: Handler function that receives (source, event) arguments.
Returns:
The original function, registered for all signal event types.
"""
from crewai.events.event_bus import crewai_event_bus
for event_type in SIGNAL_EVENT_TYPES:
crewai_event_bus.on(event_type)(func)
return func

View File

@@ -406,46 +406,100 @@ class LLM(BaseLLM):
instance.is_litellm = True
return instance
@classmethod
def _matches_provider_pattern(cls, model: str, provider: str) -> bool:
"""Check if a model name matches provider-specific patterns.
This allows supporting models that aren't in the hardcoded constants list,
including "latest" versions and new models that follow provider naming conventions.
Args:
model: The model name to check
provider: The provider to check against (canonical name)
Returns:
True if the model matches the provider's naming pattern, False otherwise
"""
model_lower = model.lower()
if provider == "openai":
return any(
model_lower.startswith(prefix)
for prefix in ["gpt-", "o1", "o3", "o4", "whisper-"]
)
if provider == "anthropic" or provider == "claude":
return any(
model_lower.startswith(prefix) for prefix in ["claude-", "anthropic."]
)
if provider == "gemini" or provider == "google":
return any(
model_lower.startswith(prefix)
for prefix in ["gemini-", "gemma-", "learnlm-"]
)
if provider == "bedrock":
return "." in model_lower
if provider == "azure":
return any(
model_lower.startswith(prefix)
for prefix in ["gpt-", "gpt-35-", "o1", "o3", "o4", "azure-"]
)
return False
@classmethod
def _validate_model_in_constants(cls, model: str, provider: str) -> bool:
"""Validate if a model name exists in the provider's constants.
"""Validate if a model name exists in the provider's constants or matches provider patterns.
This method first checks the hardcoded constants list for known models.
If not found, it falls back to pattern matching to support new models,
"latest" versions, and models that follow provider naming conventions.
Args:
model: The model name to validate
provider: The provider to check against (canonical name)
Returns:
True if the model exists in the provider's constants, False otherwise
True if the model exists in constants or matches provider patterns, False otherwise
"""
if provider == "openai":
return model in OPENAI_MODELS
if provider == "openai" and model in OPENAI_MODELS:
return True
if provider == "anthropic" or provider == "claude":
return model in ANTHROPIC_MODELS
if (
provider == "anthropic" or provider == "claude"
) and model in ANTHROPIC_MODELS:
return True
if provider == "gemini":
return model in GEMINI_MODELS
if (provider == "gemini" or provider == "google") and model in GEMINI_MODELS:
return True
if provider == "bedrock":
return model in BEDROCK_MODELS
if provider == "bedrock" and model in BEDROCK_MODELS:
return True
if provider == "azure":
# azure does not provide a list of available models, determine a better way to handle this
return True
return False
# Fallback to pattern matching for models not in constants
return cls._matches_provider_pattern(model, provider)
@classmethod
def _infer_provider_from_model(cls, model: str) -> str:
"""Infer the provider from the model name.
This method first checks the hardcoded constants list for known models.
If not found, it uses pattern matching to infer the provider from model name patterns.
This allows supporting new models and "latest" versions without hardcoding.
Args:
model: The model name without provider prefix
Returns:
The inferred provider name, defaults to "openai"
"""
if model in OPENAI_MODELS:
return "openai"
@@ -1699,12 +1753,14 @@ class LLM(BaseLLM):
max_tokens=self.max_tokens,
presence_penalty=self.presence_penalty,
frequency_penalty=self.frequency_penalty,
logit_bias=copy.deepcopy(self.logit_bias, memo)
if self.logit_bias
else None,
response_format=copy.deepcopy(self.response_format, memo)
logit_bias=(
copy.deepcopy(self.logit_bias, memo) if self.logit_bias else None
),
response_format=(
copy.deepcopy(self.response_format, memo)
if self.response_format
else None,
else None
),
seed=self.seed,
logprobs=self.logprobs,
top_logprobs=self.top_logprobs,

View File

@@ -182,6 +182,8 @@ OPENAI_MODELS: list[OpenAIModels] = [
AnthropicModels: TypeAlias = Literal[
"claude-opus-4-5-20251101",
"claude-opus-4-5",
"claude-3-7-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-5-haiku-latest",
@@ -208,6 +210,8 @@ AnthropicModels: TypeAlias = Literal[
"claude-3-haiku-20240307",
]
ANTHROPIC_MODELS: list[AnthropicModels] = [
"claude-opus-4-5-20251101",
"claude-opus-4-5",
"claude-3-7-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-5-haiku-latest",
@@ -252,6 +256,7 @@ GeminiModels: TypeAlias = Literal[
"gemini-2.5-flash-preview-tts",
"gemini-2.5-pro-preview-tts",
"gemini-2.5-computer-use-preview-10-2025",
"gemini-2.5-pro-exp-03-25",
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-exp",
@@ -305,6 +310,7 @@ GEMINI_MODELS: list[GeminiModels] = [
"gemini-2.5-flash-preview-tts",
"gemini-2.5-pro-preview-tts",
"gemini-2.5-computer-use-preview-10-2025",
"gemini-2.5-pro-exp-03-25",
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-exp",
@@ -452,6 +458,7 @@ BedrockModels: TypeAlias = Literal[
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
"anthropic.claude-haiku-4-5-20251001-v1:0",
"anthropic.claude-instant-v1:2:100k",
"anthropic.claude-opus-4-5-20251101-v1:0",
"anthropic.claude-opus-4-1-20250805-v1:0",
"anthropic.claude-opus-4-20250514-v1:0",
"anthropic.claude-sonnet-4-20250514-v1:0",
@@ -524,6 +531,7 @@ BEDROCK_MODELS: list[BedrockModels] = [
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
"anthropic.claude-haiku-4-5-20251001-v1:0",
"anthropic.claude-instant-v1:2:100k",
"anthropic.claude-opus-4-5-20251101-v1:0",
"anthropic.claude-opus-4-1-20250805-v1:0",
"anthropic.claude-opus-4-20250514-v1:0",
"anthropic.claude-sonnet-4-20250514-v1:0",

View File

@@ -8,6 +8,7 @@ from typing import TYPE_CHECKING, Any
from pydantic import BaseModel
from crewai.utilities.agent_utils import is_context_length_exceeded
from crewai.utilities.converter import generate_model_description
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
)
@@ -26,6 +27,7 @@ try:
from azure.ai.inference.models import (
ChatCompletions,
ChatCompletionsToolCall,
JsonSchemaFormat,
StreamingChatCompletionsUpdate,
)
from azure.core.credentials import (
@@ -278,13 +280,16 @@ class AzureCompletion(BaseLLM):
}
if response_model and self.is_openai_model:
params["response_format"] = {
"type": "json_schema",
"json_schema": {
"name": response_model.__name__,
"schema": response_model.model_json_schema(),
},
}
model_description = generate_model_description(response_model)
json_schema_info = model_description["json_schema"]
json_schema_name = json_schema_info["name"]
params["response_format"] = JsonSchemaFormat(
name=json_schema_name,
schema=json_schema_info["schema"],
description=f"Schema for {json_schema_name}",
strict=json_schema_info["strict"],
)
# Only include model parameter for non-Azure OpenAI endpoints
# Azure OpenAI endpoints have the deployment name in the URL
@@ -311,8 +316,8 @@ class AzureCompletion(BaseLLM):
params["tool_choice"] = "auto"
additional_params = self.additional_params
additional_drop_params = additional_params.get('additional_drop_params')
drop_params = additional_params.get('drop_params')
additional_drop_params = additional_params.get("additional_drop_params")
drop_params = additional_params.get("drop_params")
if drop_params and isinstance(additional_drop_params, list):
for drop_param in additional_drop_params:

View File

@@ -2,8 +2,10 @@
from __future__ import annotations
import asyncio
from collections.abc import Callable
from functools import wraps
import inspect
from typing import TYPE_CHECKING, Any, Concatenate, ParamSpec, TypeVar, overload
from crewai.project.utils import memoize
@@ -156,6 +158,23 @@ def cache_handler(meth: Callable[P, R]) -> CacheHandlerMethod[P, R]:
return CacheHandlerMethod(memoize(meth))
def _call_method(method: Callable[..., Any], *args: Any, **kwargs: Any) -> Any:
"""Call a method, awaiting it if async and running in an event loop."""
result = method(*args, **kwargs)
if inspect.iscoroutine(result):
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as pool:
return pool.submit(asyncio.run, result).result()
return asyncio.run(result)
return result
@overload
def crew(
meth: Callable[Concatenate[SelfT, P], Crew],
@@ -198,7 +217,7 @@ def crew(
# Instantiate tasks in order
for _, task_method in tasks:
task_instance = task_method(self)
task_instance = _call_method(task_method, self)
instantiated_tasks.append(task_instance)
agent_instance = getattr(task_instance, "agent", None)
if agent_instance and agent_instance.role not in agent_roles:
@@ -207,7 +226,7 @@ def crew(
# Instantiate agents not included by tasks
for _, agent_method in agents:
agent_instance = agent_method(self)
agent_instance = _call_method(agent_method, self)
if agent_instance.role not in agent_roles:
instantiated_agents.append(agent_instance)
agent_roles.add(agent_instance.role)
@@ -215,7 +234,7 @@ def crew(
self.agents = instantiated_agents
self.tasks = instantiated_tasks
crew_instance = meth(self, *args, **kwargs)
crew_instance: Crew = _call_method(meth, self, *args, **kwargs)
def callback_wrapper(
hook: Callable[Concatenate[CrewInstance, P2], R2], instance: CrewInstance

View File

@@ -1,7 +1,8 @@
"""Utility functions for the crewai project module."""
from collections.abc import Callable
from collections.abc import Callable, Coroutine
from functools import wraps
import inspect
from typing import Any, ParamSpec, TypeVar, cast
from pydantic import BaseModel
@@ -37,8 +38,8 @@ def _make_hashable(arg: Any) -> Any:
def memoize(meth: Callable[P, R]) -> Callable[P, R]:
"""Memoize a method by caching its results based on arguments.
Handles Pydantic BaseModel instances by converting them to JSON strings
before hashing for cache lookup.
Handles both sync and async methods. Pydantic BaseModel instances are
converted to JSON strings before hashing for cache lookup.
Args:
meth: The method to memoize.
@@ -46,18 +47,16 @@ def memoize(meth: Callable[P, R]) -> Callable[P, R]:
Returns:
A memoized version of the method that caches results.
"""
if inspect.iscoroutinefunction(meth):
return cast(Callable[P, R], _memoize_async(meth))
return _memoize_sync(meth)
def _memoize_sync(meth: Callable[P, R]) -> Callable[P, R]:
"""Memoize a synchronous method."""
@wraps(meth)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
"""Wrapper that converts arguments to hashable form before caching.
Args:
*args: Positional arguments to the memoized method.
**kwargs: Keyword arguments to the memoized method.
Returns:
The result of the memoized method call.
"""
hashable_args = tuple(_make_hashable(arg) for arg in args)
hashable_kwargs = tuple(
sorted((k, _make_hashable(v)) for k, v in kwargs.items())
@@ -73,3 +72,27 @@ def memoize(meth: Callable[P, R]) -> Callable[P, R]:
return result
return cast(Callable[P, R], wrapper)
def _memoize_async(
meth: Callable[P, Coroutine[Any, Any, R]],
) -> Callable[P, Coroutine[Any, Any, R]]:
"""Memoize an async method."""
@wraps(meth)
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
hashable_args = tuple(_make_hashable(arg) for arg in args)
hashable_kwargs = tuple(
sorted((k, _make_hashable(v)) for k, v in kwargs.items())
)
cache_key = str((hashable_args, hashable_kwargs))
cached_result: R | None = cache.read(tool=meth.__name__, input=cache_key)
if cached_result is not None:
return cached_result
result = await meth(*args, **kwargs)
cache.add(tool=meth.__name__, input=cache_key, output=result)
return result
return wrapper

View File

@@ -2,8 +2,10 @@
from __future__ import annotations
import asyncio
from collections.abc import Callable
from functools import partial
import inspect
from pathlib import Path
from typing import (
TYPE_CHECKING,
@@ -132,6 +134,22 @@ class CrewClass(Protocol):
crew: Callable[..., Crew]
def _resolve_result(result: Any) -> Any:
"""Resolve a potentially async result to its value."""
if inspect.iscoroutine(result):
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as pool:
return pool.submit(asyncio.run, result).result()
return asyncio.run(result)
return result
class DecoratedMethod(Generic[P, R]):
"""Base wrapper for methods with decorator metadata.
@@ -162,7 +180,12 @@ class DecoratedMethod(Generic[P, R]):
"""
if obj is None:
return self
bound = partial(self._meth, obj)
inner = partial(self._meth, obj)
def _bound(*args: Any, **kwargs: Any) -> R:
result: R = _resolve_result(inner(*args, **kwargs)) # type: ignore[call-arg]
return result
for attr in (
"is_agent",
"is_llm",
@@ -174,8 +197,8 @@ class DecoratedMethod(Generic[P, R]):
"is_crew",
):
if hasattr(self, attr):
setattr(bound, attr, getattr(self, attr))
return bound
setattr(_bound, attr, getattr(self, attr))
return _bound
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> R:
"""Call the wrapped method.
@@ -236,6 +259,7 @@ class BoundTaskMethod(Generic[TaskResultT]):
The task result with name ensured.
"""
result = self._task_method.unwrap()(self._obj, *args, **kwargs)
result = _resolve_result(result)
return self._task_method.ensure_task_name(result)
@@ -292,7 +316,9 @@ class TaskMethod(Generic[P, TaskResultT]):
Returns:
The task instance with name set if not already provided.
"""
return self.ensure_task_name(self._meth(*args, **kwargs))
result = self._meth(*args, **kwargs)
result = _resolve_result(result)
return self.ensure_task_name(result)
def unwrap(self) -> Callable[P, TaskResultT]:
"""Get the original unwrapped method.

View File

@@ -9,12 +9,14 @@ data is collected. Users can opt-in to share more complete data using the
from __future__ import annotations
import asyncio
import atexit
from collections.abc import Callable
from importlib.metadata import version
import json
import logging
import os
import platform
import signal
import threading
from typing import TYPE_CHECKING, Any
@@ -31,6 +33,14 @@ from opentelemetry.sdk.trace.export import (
from opentelemetry.trace import Span
from typing_extensions import Self
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.system_events import (
SigContEvent,
SigHupEvent,
SigIntEvent,
SigTStpEvent,
SigTermEvent,
)
from crewai.telemetry.constants import (
CREWAI_TELEMETRY_BASE_URL,
CREWAI_TELEMETRY_SERVICE_NAME,
@@ -121,6 +131,7 @@ class Telemetry:
)
self.provider.add_span_processor(processor)
self._register_shutdown_handlers()
self.ready = True
except Exception as e:
if isinstance(
@@ -155,6 +166,71 @@ class Telemetry:
self.ready = False
self.trace_set = False
def _register_shutdown_handlers(self) -> None:
"""Register handlers for graceful shutdown on process exit and signals."""
atexit.register(self._shutdown)
self._original_handlers: dict[int, Any] = {}
self._register_signal_handler(signal.SIGTERM, SigTermEvent, shutdown=True)
self._register_signal_handler(signal.SIGINT, SigIntEvent, shutdown=True)
self._register_signal_handler(signal.SIGHUP, SigHupEvent, shutdown=False)
self._register_signal_handler(signal.SIGTSTP, SigTStpEvent, shutdown=False)
self._register_signal_handler(signal.SIGCONT, SigContEvent, shutdown=False)
def _register_signal_handler(
self,
sig: signal.Signals,
event_class: type,
shutdown: bool = False,
) -> None:
"""Register a signal handler that emits an event.
Args:
sig: The signal to handle.
event_class: The event class to instantiate and emit.
shutdown: Whether to trigger shutdown on this signal.
"""
try:
original_handler = signal.getsignal(sig)
self._original_handlers[sig] = original_handler
def handler(signum: int, frame: Any) -> None:
crewai_event_bus.emit(self, event_class())
if shutdown:
self._shutdown()
if original_handler not in (signal.SIG_DFL, signal.SIG_IGN, None):
if callable(original_handler):
original_handler(signum, frame)
elif shutdown:
raise SystemExit(0)
signal.signal(sig, handler)
except ValueError as e:
logger.warning(
f"Cannot register {sig.name} handler: not running in main thread",
exc_info=e,
)
except OSError as e:
logger.warning(f"Cannot register {sig.name} handler: {e}", exc_info=e)
def _shutdown(self) -> None:
"""Flush and shutdown the telemetry provider on process exit.
Uses a short timeout to avoid blocking process shutdown.
"""
if not self.ready:
return
try:
self.provider.force_flush(timeout_millis=5000)
self.provider.shutdown()
self.ready = False
except Exception as e:
logger.debug(f"Telemetry shutdown failed: {e}")
def _safe_telemetry_operation(
self, operation: Callable[[], Span | None]
) -> Span | None:

View File

@@ -147,7 +147,7 @@ def test_custom_llm():
assert agent.llm.model == "gpt-4"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_execution():
agent = Agent(
role="test role",
@@ -166,7 +166,7 @@ def test_agent_execution():
assert output == "1 + 1 is 2"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_execution_with_tools():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -211,7 +211,7 @@ def test_agent_execution_with_tools():
assert received_events[0].tool_args == {"first_number": 3, "second_number": 4}
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_logging_tool_usage():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -245,7 +245,7 @@ def test_logging_tool_usage():
assert agent.tools_handler.last_used_tool.arguments == tool_usage.arguments
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_cache_hitting():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -325,7 +325,7 @@ def test_cache_hitting():
assert received_events[0].output == "12"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_disabling_cache_for_agent():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -389,7 +389,7 @@ def test_disabling_cache_for_agent():
read.assert_not_called()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_execution_with_specific_tools():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -412,7 +412,7 @@ def test_agent_execution_with_specific_tools():
assert output == "The result of the multiplication is 12."
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool():
@tool
def multiplier(first_number: int, second_number: int) -> float:
@@ -438,7 +438,7 @@ def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool():
assert output == "12"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_powered_by_new_o_model_family_that_uses_tool():
@tool
def comapny_customer_data() -> str:
@@ -464,7 +464,7 @@ def test_agent_powered_by_new_o_model_family_that_uses_tool():
assert output == "42"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_custom_max_iterations():
@tool
def get_final_answer() -> float:
@@ -509,7 +509,7 @@ def test_agent_custom_max_iterations():
assert call_count == 2
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
@pytest.mark.timeout(30)
def test_agent_max_iterations_stops_loop():
"""Test that agent execution terminates when max_iter is reached."""
@@ -546,7 +546,7 @@ def test_agent_max_iterations_stops_loop():
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_repeated_tool_usage(capsys):
"""Test that agents handle repeated tool usage appropriately.
@@ -595,7 +595,7 @@ def test_agent_repeated_tool_usage(capsys):
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
@tool
def get_final_answer(anything: str) -> float:
@@ -638,7 +638,7 @@ def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_moved_on_after_max_iterations():
@tool
def get_final_answer() -> float:
@@ -665,7 +665,7 @@ def test_agent_moved_on_after_max_iterations():
assert output == "42"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_respect_the_max_rpm_set(capsys):
@tool
def get_final_answer() -> float:
@@ -699,7 +699,7 @@ def test_agent_respect_the_max_rpm_set(capsys):
moveon.assert_called()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
from unittest.mock import patch
@@ -737,7 +737,7 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
moveon.assert_not_called()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_without_max_rpm_respects_crew_rpm(capsys):
from unittest.mock import patch
@@ -797,7 +797,7 @@ def test_agent_without_max_rpm_respects_crew_rpm(capsys):
moveon.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_error_on_parsing_tool(capsys):
from unittest.mock import patch
@@ -840,7 +840,7 @@ def test_agent_error_on_parsing_tool(capsys):
assert "Error on parsing tool." in captured.out
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_remembers_output_format_after_using_tools_too_many_times():
from unittest.mock import patch
@@ -875,7 +875,7 @@ def test_agent_remembers_output_format_after_using_tools_too_many_times():
remember_format.assert_called()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_use_specific_tasks_output_as_context(capsys):
agent1 = Agent(role="test role", goal="test goal", backstory="test backstory")
agent2 = Agent(role="test role2", goal="test goal2", backstory="test backstory2")
@@ -902,7 +902,7 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
assert "hi" in result.raw.lower() or "hello" in result.raw.lower()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_step_callback():
class StepCallback:
def callback(self, step):
@@ -936,7 +936,7 @@ def test_agent_step_callback():
callback.assert_called()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_function_calling_llm():
from crewai.llm import LLM
llm = LLM(model="gpt-4o", is_litellm=True)
@@ -983,7 +983,7 @@ def test_agent_function_calling_llm():
mock_original_tool_calling.assert_called()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
from crewai.tools import BaseTool
@@ -1013,7 +1013,7 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
assert result.raw == "Howdy!"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_tool_usage_information_is_appended_to_agent():
from crewai.tools import BaseTool
@@ -1068,7 +1068,7 @@ def test_agent_definition_based_on_dict():
# test for human input
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_human_input():
# Agent configuration
config = {
@@ -1216,7 +1216,7 @@ Thought:<|eot_id|>
assert mock_format_prompt.return_value == expected_prompt
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_task_allow_crewai_trigger_context():
from crewai import Crew
@@ -1237,7 +1237,7 @@ def test_task_allow_crewai_trigger_context():
assert "Trigger Payload: Important context data" in prompt
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_task_without_allow_crewai_trigger_context():
from crewai import Crew
@@ -1260,7 +1260,7 @@ def test_task_without_allow_crewai_trigger_context():
assert "Important context data" not in prompt
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_task_allow_crewai_trigger_context_no_payload():
from crewai import Crew
@@ -1282,7 +1282,7 @@ def test_task_allow_crewai_trigger_context_no_payload():
assert "Trigger Payload:" not in prompt
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
from crewai import Crew
@@ -1311,7 +1311,7 @@ def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
assert "Trigger Payload: Initial context data" not in first_prompt
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_first_task_auto_inject_trigger():
from crewai import Crew
@@ -1344,7 +1344,7 @@ def test_first_task_auto_inject_trigger():
assert "Trigger Payload:" not in second_prompt
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_ensure_first_task_allow_crewai_trigger_context_is_false_does_not_inject():
from crewai import Crew
@@ -1549,7 +1549,7 @@ def test_agent_with_additional_kwargs():
assert agent.llm.frequency_penalty == 0.1
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_llm_call():
llm = LLM(model="gpt-3.5-turbo")
messages = [{"role": "user", "content": "Say 'Hello, World!'"}]
@@ -1558,7 +1558,7 @@ def test_llm_call():
assert "Hello, World!" in response
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_llm_call_with_error():
llm = LLM(model="non-existent-model")
messages = [{"role": "user", "content": "This should fail"}]
@@ -1567,7 +1567,7 @@ def test_llm_call_with_error():
llm.call(messages)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_handle_context_length_exceeds_limit():
# Import necessary modules
from crewai.utilities.agent_utils import handle_context_length
@@ -1620,7 +1620,7 @@ def test_handle_context_length_exceeds_limit():
mock_summarize.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_handle_context_length_exceeds_limit_cli_no():
agent = Agent(
role="test role",
@@ -1695,7 +1695,7 @@ def test_agent_with_all_llm_attributes():
assert agent.llm.api_key == "sk-your-api-key-here"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_llm_call_with_all_attributes():
llm = LLM(
model="gpt-3.5-turbo",
@@ -1712,7 +1712,7 @@ def test_llm_call_with_all_attributes():
assert "STOP" not in response
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_with_ollama_llama3():
agent = Agent(
role="test role",
@@ -1733,7 +1733,7 @@ def test_agent_with_ollama_llama3():
assert "Llama3" in response or "AI" in response or "language model" in response
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_llm_call_with_ollama_llama3():
llm = LLM(
model="ollama/llama3.2:3b",
@@ -1752,7 +1752,7 @@ def test_llm_call_with_ollama_llama3():
assert "Llama3" in response or "AI" in response or "language model" in response
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_execute_task_basic():
agent = Agent(
role="test role",
@@ -1771,7 +1771,7 @@ def test_agent_execute_task_basic():
assert "4" in result
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_execute_task_with_context():
agent = Agent(
role="test role",
@@ -1793,7 +1793,7 @@ def test_agent_execute_task_with_context():
assert "fox" in result.lower() and "dog" in result.lower()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_execute_task_with_tool():
@tool
def dummy_tool(query: str) -> str:
@@ -1818,7 +1818,7 @@ def test_agent_execute_task_with_tool():
assert "Dummy result for: test query" in result
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_execute_task_with_custom_llm():
agent = Agent(
role="test role",
@@ -1839,7 +1839,7 @@ def test_agent_execute_task_with_custom_llm():
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_execute_task_with_ollama():
agent = Agent(
role="test role",
@@ -1859,7 +1859,7 @@ def test_agent_execute_task_with_ollama():
assert "AI" in result or "artificial intelligence" in result.lower()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_with_knowledge_sources():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -1891,7 +1891,7 @@ def test_agent_with_knowledge_sources():
assert "red" in result.raw.lower()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -1939,7 +1939,7 @@ def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold():
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold_default():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -1988,7 +1988,7 @@ def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold_defau
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_with_knowledge_sources_extensive_role():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -2024,7 +2024,7 @@ def test_agent_with_knowledge_sources_extensive_role():
assert "red" in result.raw.lower()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_with_knowledge_sources_works_with_copy():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -2063,7 +2063,7 @@ def test_agent_with_knowledge_sources_works_with_copy():
assert isinstance(agent_copy.llm, BaseLLM)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_with_knowledge_sources_generate_search_query():
content = "Brandon's favorite color is red and he likes Mexican food."
string_source = StringKnowledgeSource(content=content)
@@ -2116,7 +2116,7 @@ def test_agent_with_knowledge_sources_generate_search_query():
assert "red" in result.raw.lower()
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_with_knowledge_with_no_crewai_knowledge():
mock_knowledge = MagicMock(spec=Knowledge)
@@ -2143,7 +2143,7 @@ def test_agent_with_knowledge_with_no_crewai_knowledge():
mock_knowledge.query.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_with_only_crewai_knowledge():
mock_knowledge = MagicMock(spec=Knowledge)
@@ -2168,7 +2168,7 @@ def test_agent_with_only_crewai_knowledge():
mock_knowledge.query.assert_called_once()
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_knowledege_with_crewai_knowledge():
crew_knowledge = MagicMock(spec=Knowledge)
agent_knowledge = MagicMock(spec=Knowledge)
@@ -2197,7 +2197,7 @@ def test_agent_knowledege_with_crewai_knowledge():
crew_knowledge.query.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_litellm_auth_error_handling():
"""Test that LiteLLM authentication errors are handled correctly and not retried."""
from litellm import AuthenticationError as LiteLLMAuthenticationError
@@ -2326,7 +2326,7 @@ def test_litellm_anthropic_error_handling():
mock_llm_call.assert_called_once()
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_get_knowledge_search_query():
"""Test that _get_knowledge_search_query calls the LLM with the correct prompts."""
from crewai.utilities.i18n import I18N

View File

@@ -70,7 +70,7 @@ class ResearchResult(BaseModel):
sources: list[str] = Field(description="List of sources used")
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
@pytest.mark.parametrize("verbose", [True, False])
def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
"""Test that LiteAgent is created with the correct parameters when Agent.kickoff() is called."""
@@ -130,7 +130,7 @@ def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
assert created_lite_agent["response_format"] == TestResponse
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_lite_agent_with_tools():
"""Test that Agent can use tools."""
# Create a LiteAgent with tools
@@ -174,7 +174,7 @@ def test_lite_agent_with_tools():
assert event.tool_name == "search_web"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_lite_agent_structured_output():
"""Test that Agent can return a simple structured output."""
@@ -217,7 +217,7 @@ def test_lite_agent_structured_output():
return result
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_lite_agent_returns_usage_metrics():
"""Test that LiteAgent returns usage metrics."""
llm = LLM(model="gpt-4o-mini")
@@ -238,7 +238,7 @@ def test_lite_agent_returns_usage_metrics():
assert result.usage_metrics["total_tokens"] > 0
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_lite_agent_output_includes_messages():
"""Test that LiteAgentOutput includes messages from agent execution."""
llm = LLM(model="gpt-4o-mini")
@@ -259,7 +259,7 @@ def test_lite_agent_output_includes_messages():
assert len(result.messages) > 0
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_lite_agent_returns_usage_metrics_async():
"""Test that LiteAgent returns usage metrics when run asynchronously."""
@@ -354,9 +354,9 @@ def test_sets_parent_flow_when_inside_flow():
assert captured_agent.parent_flow is flow
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_guardrail_is_called_using_string():
guardrail_events = defaultdict(list)
guardrail_events: dict[str, list] = defaultdict(list)
from crewai.events.event_types import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
@@ -369,35 +369,33 @@ def test_guardrail_is_called_using_string():
guardrail="""Only include Brazilian players, both women and men""",
)
all_events_received = threading.Event()
condition = threading.Condition()
@crewai_event_bus.on(LLMGuardrailStartedEvent)
def capture_guardrail_started(source, event):
assert isinstance(source, LiteAgent)
assert source.original_agent == agent
with condition:
guardrail_events["started"].append(event)
if (
len(guardrail_events["started"]) == 2
and len(guardrail_events["completed"]) == 2
):
all_events_received.set()
condition.notify()
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
def capture_guardrail_completed(source, event):
assert isinstance(source, LiteAgent)
assert source.original_agent == agent
with condition:
guardrail_events["completed"].append(event)
if (
len(guardrail_events["started"]) == 2
and len(guardrail_events["completed"]) == 2
):
all_events_received.set()
condition.notify()
result = agent.kickoff(messages="Top 10 best players in the world?")
assert all_events_received.wait(timeout=10), (
"Timeout waiting for all guardrail events"
with condition:
success = condition.wait_for(
lambda: len(guardrail_events["started"]) >= 2
and len(guardrail_events["completed"]) >= 2,
timeout=10,
)
assert success, "Timeout waiting for all guardrail events"
assert len(guardrail_events["started"]) == 2
assert len(guardrail_events["completed"]) == 2
assert not guardrail_events["completed"][0].success
@@ -408,33 +406,27 @@ def test_guardrail_is_called_using_string():
)
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_guardrail_is_called_using_callable():
guardrail_events = defaultdict(list)
guardrail_events: dict[str, list] = defaultdict(list)
from crewai.events.event_types import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
all_events_received = threading.Event()
condition = threading.Condition()
@crewai_event_bus.on(LLMGuardrailStartedEvent)
def capture_guardrail_started(source, event):
with condition:
guardrail_events["started"].append(event)
if (
len(guardrail_events["started"]) == 1
and len(guardrail_events["completed"]) == 1
):
all_events_received.set()
condition.notify()
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
def capture_guardrail_completed(source, event):
with condition:
guardrail_events["completed"].append(event)
if (
len(guardrail_events["started"]) == 1
and len(guardrail_events["completed"]) == 1
):
all_events_received.set()
condition.notify()
agent = Agent(
role="Sports Analyst",
@@ -445,42 +437,40 @@ def test_guardrail_is_called_using_callable():
result = agent.kickoff(messages="Top 1 best players in the world?")
assert all_events_received.wait(timeout=10), (
"Timeout waiting for all guardrail events"
with condition:
success = condition.wait_for(
lambda: len(guardrail_events["started"]) >= 1
and len(guardrail_events["completed"]) >= 1,
timeout=10,
)
assert success, "Timeout waiting for all guardrail events"
assert len(guardrail_events["started"]) == 1
assert len(guardrail_events["completed"]) == 1
assert guardrail_events["completed"][0].success
assert "Pelé - Santos, 1958" in result.raw
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_guardrail_reached_attempt_limit():
guardrail_events = defaultdict(list)
guardrail_events: dict[str, list] = defaultdict(list)
from crewai.events.event_types import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
all_events_received = threading.Event()
condition = threading.Condition()
@crewai_event_bus.on(LLMGuardrailStartedEvent)
def capture_guardrail_started(source, event):
with condition:
guardrail_events["started"].append(event)
if (
len(guardrail_events["started"]) == 3
and len(guardrail_events["completed"]) == 3
):
all_events_received.set()
condition.notify()
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
def capture_guardrail_completed(source, event):
with condition:
guardrail_events["completed"].append(event)
if (
len(guardrail_events["started"]) == 3
and len(guardrail_events["completed"]) == 3
):
all_events_received.set()
condition.notify()
agent = Agent(
role="Sports Analyst",
@@ -498,9 +488,13 @@ def test_guardrail_reached_attempt_limit():
):
agent.kickoff(messages="Top 10 best players in the world?")
assert all_events_received.wait(timeout=10), (
"Timeout waiting for all guardrail events"
with condition:
success = condition.wait_for(
lambda: len(guardrail_events["started"]) >= 3
and len(guardrail_events["completed"]) >= 3,
timeout=10,
)
assert success, "Timeout waiting for all guardrail events"
assert len(guardrail_events["started"]) == 3 # 2 retries + 1 initial call
assert len(guardrail_events["completed"]) == 3 # 2 retries + 1 initial call
assert not guardrail_events["completed"][0].success
@@ -508,7 +502,7 @@ def test_guardrail_reached_attempt_limit():
assert not guardrail_events["completed"][2].success
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_output_when_guardrail_returns_base_model():
class Player(BaseModel):
name: str
@@ -599,7 +593,7 @@ def test_lite_agent_with_custom_llm_and_guardrails():
assert result2.raw == "Modified by guardrail"
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_lite_agent_with_invalid_llm():
"""Test that LiteAgent raises proper error when create_llm returns None."""
with patch("crewai.lite_agent.create_llm", return_value=None):
@@ -615,7 +609,7 @@ def test_lite_agent_with_invalid_llm():
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get")
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_kickoff_with_platform_tools(mock_get):
"""Test that Agent.kickoff() properly integrates platform tools with LiteAgent"""
mock_response = Mock()
@@ -657,7 +651,7 @@ def test_agent_kickoff_with_platform_tools(mock_get):
@patch.dict("os.environ", {"EXA_API_KEY": "test_exa_key"})
@patch("crewai.agent.Agent._get_external_mcp_tools")
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.vcr()
def test_agent_kickoff_with_mcp_tools(mock_get_mcp_tools):
"""Test that Agent.kickoff() properly integrates MCP tools with LiteAgent"""
# Setup mock MCP tools - create a proper BaseTool instance

View File

@@ -1,126 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
personal goal is: Test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
This is VERY important to you, use the tools available and give your best Final
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
["\nObservation:"]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '825'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.93.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.93.0
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFJdi9swEHz3r1j0HBc7ZydXvx1HC0evhT6UUtrDKNLa1lXWqpJ8aTjy
34vsXOz0A/pi8M7OaGZ3nxMApiSrgImOB9Fbnd4Why/v79HeePeD37/Zfdx8evf5+rY4fNjdPbJV
ZNDuEUV4Yb0S1FuNQZGZYOGQB4yq+bYsr7J1nhcj0JNEHWmtDWlBaa+MStfZukizbZpfn9gdKYGe
VfA1AQB4Hr/Rp5H4k1WQrV4qPXrPW2TVuQmAOdKxwrj3ygduAlvNoCAT0IzW78DQHgQ30KonBA5t
tA3c+D06gG/mrTJcw834X0GHWhPsyWm5FHTYDJ7HUGbQegFwYyjwOJQxysMJOZ7Na2qto53/jcoa
ZZTvaofck4lGfSDLRvSYADyMQxoucjPrqLehDvQdx+fycjvpsXk3C/TqBAYKXC/q29NoL/VqiYEr
7RdjZoKLDuVMnXfCB6loASSL1H+6+Zv2lFyZ9n/kZ0AItAFlbR1KJS4Tz20O4+n+q+085dEw8+ie
lMA6KHRxExIbPujpoJg/+IB93SjTorNOTVfV2LrcZLzZYFm+Zskx+QUAAP//AwB1vYZ+YwMAAA==
headers:
CF-RAY:
- 96fc9f29dea3cf1f-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 15 Aug 2025 23:55:15 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=oA9oTa3cE0ZaEUDRf0hCpnarSAQKzrVUhl6qDS4j09w-1755302115-1.0.1.1-gUUDl4ZqvBQkg7244DTwOmSiDUT2z_AiQu0P1xUaABjaufSpZuIlI5G0H7OSnW.ldypvpxjj45NGWesJ62M_2U7r20tHz_gMmDFw6D5ZiNc;
path=/; expires=Sat, 16-Aug-25 00:25:15 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=ICenEGMmOE5jaOjwD30bAOwrF8.XRbSIKTBl1EyWs0o-1755302115700-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Strict-Transport-Security:
- max-age=31536000; includeSubDomains; preload
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '735'
openai-project:
- proj_xitITlrFeen7zjNSzML82h9x
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '753'
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999827'
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_212fde9d945a462ba0d89ea856131dce
status:
code: 200
message: OK
version: 1

View File

@@ -0,0 +1,211 @@
interactions:
- request:
body: '{"trace_id": "4d0d2b51-d83a-4054-b41e-8c2d17baa88f", "execution_type":
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
"crew_name": "crew", "flow_name": null, "crewai_version": "1.6.0", "privacy_level":
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-29T02:50:39.376314+00:00"},
"ephemeral_trace_id": "4d0d2b51-d83a-4054-b41e-8c2d17baa88f"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '488'
Content-Type:
- application/json
User-Agent:
- CrewAI-CLI/1.6.0
X-Crewai-Version:
- 1.6.0
authorization:
- AUTHORIZATION-XXX
method: POST
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
response:
body:
string: '{"id":"71726285-2e63-4d2a-b4c4-4bbd0ff6a9f1","ephemeral_trace_id":"4d0d2b51-d83a-4054-b41e-8c2d17baa88f","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.6.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.6.0","privacy_level":"standard"},"created_at":"2025-11-29T02:50:39.931Z","updated_at":"2025-11-29T02:50:39.931Z","access_code":"TRACE-bf7f3f49b3","user_identifier":null}'
headers:
Connection:
- keep-alive
Content-Length:
- '515'
Content-Type:
- application/json; charset=utf-8
Date:
- Sat, 29 Nov 2025 02:50:39 GMT
cache-control:
- no-store
content-security-policy:
- CSP-FILTERED
etag:
- ETAG-XXX
expires:
- '0'
permissions-policy:
- PERMISSIONS-POLICY-XXX
pragma:
- no-cache
referrer-policy:
- REFERRER-POLICY-XXX
strict-transport-security:
- STS-XXX
vary:
- Accept
x-content-type-options:
- X-CONTENT-TYPE-XXX
x-frame-options:
- X-FRAME-OPTIONS-XXX
x-permitted-cross-domain-policies:
- X-PERMITTED-XXX
x-request-id:
- X-REQUEST-ID-XXX
x-runtime:
- X-RUNTIME-XXX
x-xss-protection:
- X-XSS-PROTECTION-XXX
status:
code: 201
message: Created
- request:
body: '{"messages":[{"role":"system","content":"You are test role. test backstory\nYour
personal goal is: test goal\nTo give my best complete final answer to the task
respond using the exact following format:\n\nThought: I now can give a great
answer\nFinal Answer: Your final answer must be the great and the most complete
as possible, it must be outcome described.\n\nI MUST use these formats, my job
depends on it!"},{"role":"user","content":"\nCurrent Task: Analyze the data\n\nThis
is the expected criteria for your final answer: Analysis report\nyou MUST return
the actual complete content as the final answer, not a summary.\n\nBegin! This
is VERY important to you, use the tools available and give your best Final Answer,
your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
authorization:
- AUTHORIZATION-XXX
connection:
- keep-alive
content-length:
- '785'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.109.1
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 1.109.1
x-stainless-read-timeout:
- X-STAINLESS-READ-TIMEOUT-XXX
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.10
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAAwAAAP//jFbbbhtHDH33VxD71AKyECeynegtdW9uiiZI3AJpXRj0DHeXzSxnO5yV
ohT594Kzq5WcpEBfdBkOycPDy/CfE4CKfbWGyrWYXdeH06v2/EX7li7T5uenL364rH/6NUn+/Ze/
3nP99mW1MI14/xe5vNdautj1gTJHGcUuEWYyq2eXF6snT1cXT54VQRc9BVNr+ny6Wp6ddix8+vjR
4/PTR6vTs9Wk3kZ2pNUa/jgBAPinfBpQ8fS+WsOjxf6kI1VsqFrPlwCqFIOdVKjKmlFytTgIXZRM
UrDftHFo2ryGa5C4BYcCDW8IEBoLAFB0S+lWvmfBAM/LvzXcyq08Fww7ZYXX1MeU7ehsCdeSU/SD
MyJu5aYlyKjvINHfAydSQMhtTOYTcG8g1pBbKn4FPGZcwk2EPsUNe0Ni1CZqSdSQpeJuUVTsMrSo
cE8koDvN1GFmhyHsINGGaUt+AVvOLWC2mDkK5AieMnIAFA+JAm1QHNl5/gRwR5J1abE9XsK35u3l
hpLZvZU3bEoSQXtyXLMb4WxR99g9sBSTfYpdXzCzTgEAqg5dYaQhobRXV8p7SHmPyMCQZqhjmlkz
qgEF0OUBA6gjwcRxMVrpI0tW0MG1gAoydOYBA2wwDKQLcJipiYntd+aOQGn8ExPE3FKCDSbG+0AK
2zgEb867guYej5I2BlMYejIx9CpFR6osza2cjkdXgVBYmjV8JzokluaQPlaoExHUKXbA4qJYxZK4
AqfjYmnGbRmjlGKyrEzWX6YGhT+gJXcNV1NkH0zNrmtOg8uj1+LRaKS6JpdLpe8Jne39xjpgmA3+
WgC4FlPWYrBJ2LdqyWFvJVXvICcSP0p7K7QkCl9xDdj3gZ3R+HXhaLWEuW9uyLXCltnimdQl7guk
Nxkza2anFk5wQyhQjPOOUBbQkefyHT0twPrbY/LgacO4L/FBPKUiAkeSEwbIJJ7E7QpOz9pTUo5S
Ir+xCGZwa7geQ2M3ux76rTmJCXzcSvk9hR03lMYqsnrnjk7Hahqb2axfxZRoiuLg46ol987I3cu0
5d6aOW+tnz3XNSWSfKjFQuL5Er5n8SxNYe4F7eDVRPr6wOMIWrmREoXkQ2YsfJTYYTCQU4/OWK9F
uWmzcSCZUp8ozxyU+jlK9rbFbNo74K4Pu6L/KpZBgwGucFDSNfy4662nlBSijIkJO4u7RpdjMgh1
GKzkjxqjhHqxhKsoLgyWpxLtm6HrMO1KLSAL1BMTI/SulFuhspS5GauZQknb/aAspApl/r/PI+3k
92NmZuA1udh1JP7IUj2kMhbQjVwkYNmQZm7KpYL2cgk/c8cjXQXtc9mZN80Jy0AicXEwVsnPwymY
Cvlp/LnY02hdh7pmx5b/aVxz15v7PUnU59Z4OOrgW3m6hOd9T+Lt+Sw9+NlU/rpUZOnnBeRSV+Ng
Gd0YtH0DYoA45H6YHoFvyFlKzX0im1yfTf+Hk5/14eifn7zpDYhDDpaSEk9HuY0+htjsHswtOsz9
D/MMm+dX2C3hut4/A/uJihvkYJEtygjaTZwpgWbqFbYcAuxKYeAh7NIXJb+m+inawsB34o3y6eR4
qUhUD4q22cgQwpEAReJUErbO/DlJPs4LTIhNn+K9fqJa1Sys7V0i1Ci2rGiOfVWkH08A/iyL0vBg
96lGuu9yfEfF3dn5+WivOixoB+mTp88maY4Zw0FwvlotvmDwbqRKj3atyqFryR9UD4sZDp7jkeDk
KOzP4XzJ9hg6S/N/zB8EzlGfyd/1iTy7hyEfriWyBfa/rs00F8CV2tbj6C4zJUuFpxqHMG6V1bh4
3dUsjc1LHlfLur97dnlxQeerZ/ePq5OPJ/8CAAD//wMAwDPj9GkLAAA=
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sat, 29 Nov 2025 02:50:45 GMT
Server:
- cloudflare
Set-Cookie:
- SET-COOKIE-XXX
Strict-Transport-Security:
- STS-XXX
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- X-CONTENT-TYPE-XXX
access-control-expose-headers:
- ACCESS-CONTROL-XXX
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- OPENAI-ORG-XXX
openai-processing-ms:
- '5125'
openai-project:
- OPENAI-PROJECT-XXX
openai-version:
- '2020-10-01'
x-envoy-upstream-service-time:
- '5227'
x-openai-proxy-wasm:
- v0.1
x-ratelimit-limit-project-tokens:
- '150000000'
x-ratelimit-limit-requests:
- X-RATELIMIT-LIMIT-REQUESTS-XXX
x-ratelimit-limit-tokens:
- X-RATELIMIT-LIMIT-TOKENS-XXX
x-ratelimit-remaining-project-tokens:
- '149999830'
x-ratelimit-remaining-requests:
- X-RATELIMIT-REMAINING-REQUESTS-XXX
x-ratelimit-remaining-tokens:
- X-RATELIMIT-REMAINING-TOKENS-XXX
x-ratelimit-reset-project-tokens:
- 0s
x-ratelimit-reset-requests:
- X-RATELIMIT-RESET-REQUESTS-XXX
x-ratelimit-reset-tokens:
- X-RATELIMIT-RESET-TOKENS-XXX
x-request-id:
- X-REQUEST-ID-XXX
status:
code: 200
message: OK
version: 1

Some files were not shown because too many files have changed in this diff Show More