mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-19 12:58:14 +00:00
Compare commits
5 Commits
devin/1763
...
gl/chore/i
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e4fbd9bf26 | ||
|
|
008f906f60 | ||
|
|
3ce019b07b | ||
|
|
2355ec0733 | ||
|
|
c925d2d519 |
161
.env.test
Normal file
161
.env.test
Normal file
@@ -0,0 +1,161 @@
|
||||
# =============================================================================
|
||||
# Test Environment Variables
|
||||
# =============================================================================
|
||||
# This file contains all environment variables needed to run tests locally
|
||||
# in a way that mimics the GitHub Actions CI environment.
|
||||
|
||||
# =============================================================================
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# LLM Provider API Keys
|
||||
# -----------------------------------------------------------------------------
|
||||
OPENAI_API_KEY=fake-api-key
|
||||
ANTHROPIC_API_KEY=fake-anthropic-key
|
||||
GEMINI_API_KEY=fake-gemini-key
|
||||
AZURE_API_KEY=fake-azure-key
|
||||
OPENROUTER_API_KEY=fake-openrouter-key
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# AWS Credentials
|
||||
# -----------------------------------------------------------------------------
|
||||
AWS_ACCESS_KEY_ID=fake-aws-access-key
|
||||
AWS_SECRET_ACCESS_KEY=fake-aws-secret-key
|
||||
AWS_DEFAULT_REGION=us-east-1
|
||||
AWS_REGION_NAME=us-east-1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Azure OpenAI Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
AZURE_ENDPOINT=https://fake-azure-endpoint.openai.azure.com
|
||||
AZURE_OPENAI_ENDPOINT=https://fake-azure-endpoint.openai.azure.com
|
||||
AZURE_OPENAI_API_KEY=fake-azure-openai-key
|
||||
AZURE_API_VERSION=2024-02-15-preview
|
||||
OPENAI_API_VERSION=2024-02-15-preview
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Google Cloud Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
#GOOGLE_CLOUD_PROJECT=fake-gcp-project
|
||||
#GOOGLE_CLOUD_LOCATION=us-central1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# OpenAI Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
OPENAI_BASE_URL=https://api.openai.com/v1
|
||||
OPENAI_API_BASE=https://api.openai.com/v1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Search & Scraping Tool API Keys
|
||||
# -----------------------------------------------------------------------------
|
||||
SERPER_API_KEY=fake-serper-key
|
||||
EXA_API_KEY=fake-exa-key
|
||||
BRAVE_API_KEY=fake-brave-key
|
||||
FIRECRAWL_API_KEY=fake-firecrawl-key
|
||||
TAVILY_API_KEY=fake-tavily-key
|
||||
SERPAPI_API_KEY=fake-serpapi-key
|
||||
SERPLY_API_KEY=fake-serply-key
|
||||
LINKUP_API_KEY=fake-linkup-key
|
||||
PARALLEL_API_KEY=fake-parallel-key
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Exa Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
EXA_BASE_URL=https://api.exa.ai
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Web Scraping & Automation
|
||||
# -----------------------------------------------------------------------------
|
||||
BRIGHT_DATA_API_KEY=fake-brightdata-key
|
||||
BRIGHT_DATA_ZONE=fake-zone
|
||||
BRIGHTDATA_API_URL=https://api.brightdata.com
|
||||
BRIGHTDATA_DEFAULT_TIMEOUT=600
|
||||
BRIGHTDATA_DEFAULT_POLLING_INTERVAL=1
|
||||
|
||||
OXYLABS_USERNAME=fake-oxylabs-user
|
||||
OXYLABS_PASSWORD=fake-oxylabs-pass
|
||||
|
||||
SCRAPFLY_API_KEY=fake-scrapfly-key
|
||||
SCRAPEGRAPH_API_KEY=fake-scrapegraph-key
|
||||
|
||||
BROWSERBASE_API_KEY=fake-browserbase-key
|
||||
BROWSERBASE_PROJECT_ID=fake-browserbase-project
|
||||
|
||||
HYPERBROWSER_API_KEY=fake-hyperbrowser-key
|
||||
MULTION_API_KEY=fake-multion-key
|
||||
APIFY_API_TOKEN=fake-apify-token
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Database & Vector Store Credentials
|
||||
# -----------------------------------------------------------------------------
|
||||
SINGLESTOREDB_URL=mysql://fake:fake@localhost:3306/fake
|
||||
SINGLESTOREDB_HOST=localhost
|
||||
SINGLESTOREDB_PORT=3306
|
||||
SINGLESTOREDB_USER=fake-user
|
||||
SINGLESTOREDB_PASSWORD=fake-password
|
||||
SINGLESTOREDB_DATABASE=fake-database
|
||||
SINGLESTOREDB_CONNECT_TIMEOUT=30
|
||||
|
||||
SNOWFLAKE_USER=fake-snowflake-user
|
||||
SNOWFLAKE_PASSWORD=fake-snowflake-password
|
||||
SNOWFLAKE_ACCOUNT=fake-snowflake-account
|
||||
SNOWFLAKE_WAREHOUSE=fake-snowflake-warehouse
|
||||
SNOWFLAKE_DATABASE=fake-snowflake-database
|
||||
SNOWFLAKE_SCHEMA=fake-snowflake-schema
|
||||
|
||||
WEAVIATE_URL=http://localhost:8080
|
||||
WEAVIATE_API_KEY=fake-weaviate-key
|
||||
|
||||
EMBEDCHAIN_DB_URI=sqlite:///test.db
|
||||
|
||||
# Databricks Credentials
|
||||
DATABRICKS_HOST=https://fake-databricks.cloud.databricks.com
|
||||
DATABRICKS_TOKEN=fake-databricks-token
|
||||
DATABRICKS_CONFIG_PROFILE=fake-profile
|
||||
|
||||
# MongoDB Credentials
|
||||
MONGODB_URI=mongodb://fake:fake@localhost:27017/fake
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# CrewAI Platform & Enterprise
|
||||
# -----------------------------------------------------------------------------
|
||||
# setting CREWAI_PLATFORM_INTEGRATION_TOKEN causes these test to fail:
|
||||
#=========================== short test summary info ============================
|
||||
#FAILED tests/test_context.py::TestPlatformIntegrationToken::test_platform_context_manager_basic_usage - AssertionError: assert 'fake-platform-token' is None
|
||||
# + where 'fake-platform-token' = get_platform_integration_token()
|
||||
#FAILED tests/test_context.py::TestPlatformIntegrationToken::test_context_var_isolation_between_tests - AssertionError: assert 'fake-platform-token' is None
|
||||
# + where 'fake-platform-token' = get_platform_integration_token()
|
||||
#FAILED tests/test_context.py::TestPlatformIntegrationToken::test_multiple_sequential_context_managers - AssertionError: assert 'fake-platform-token' is None
|
||||
# + where 'fake-platform-token' = get_platform_integration_token()
|
||||
#CREWAI_PLATFORM_INTEGRATION_TOKEN=fake-platform-token
|
||||
CREWAI_PERSONAL_ACCESS_TOKEN=fake-personal-token
|
||||
CREWAI_PLUS_URL=https://fake.crewai.com
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Other Service API Keys
|
||||
# -----------------------------------------------------------------------------
|
||||
ZAPIER_API_KEY=fake-zapier-key
|
||||
PATRONUS_API_KEY=fake-patronus-key
|
||||
MINDS_API_KEY=fake-minds-key
|
||||
HF_TOKEN=fake-hf-token
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Feature Flags/Testing Modes
|
||||
# -----------------------------------------------------------------------------
|
||||
CREWAI_DISABLE_TELEMETRY=true
|
||||
OTEL_SDK_DISABLED=true
|
||||
CREWAI_TESTING=true
|
||||
CREWAI_TRACING_ENABLED=false
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Testing/CI Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
# VCR recording mode: "none" (default), "new_episodes", "all", "once"
|
||||
PYTEST_VCR_RECORD_MODE=none
|
||||
|
||||
# Set to "true" by GitHub when running in GitHub Actions
|
||||
# GITHUB_ACTIONS=false
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Python Configuration
|
||||
# -----------------------------------------------------------------------------
|
||||
PYTHONUNBUFFERED=1
|
||||
18
.github/workflows/tests.yml
vendored
18
.github/workflows/tests.yml
vendored
@@ -5,18 +5,6 @@ on: [pull_request]
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
PYTHONUNBUFFERED: 1
|
||||
BRAVE_API_KEY: fake-brave-key
|
||||
SNOWFLAKE_USER: fake-snowflake-user
|
||||
SNOWFLAKE_PASSWORD: fake-snowflake-password
|
||||
SNOWFLAKE_ACCOUNT: fake-snowflake-account
|
||||
SNOWFLAKE_WAREHOUSE: fake-snowflake-warehouse
|
||||
SNOWFLAKE_DATABASE: fake-snowflake-database
|
||||
SNOWFLAKE_SCHEMA: fake-snowflake-schema
|
||||
EMBEDCHAIN_DB_URI: sqlite:///test.db
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: tests (${{ matrix.python-version }})
|
||||
@@ -84,26 +72,20 @@ jobs:
|
||||
# fi
|
||||
|
||||
cd lib/crewai && uv run pytest \
|
||||
--block-network \
|
||||
--timeout=30 \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--group ${{ matrix.group }} \
|
||||
$DURATIONS_ARG \
|
||||
--durations=10 \
|
||||
-n auto \
|
||||
--maxfail=3
|
||||
|
||||
- name: Run tool tests (group ${{ matrix.group }} of 8)
|
||||
run: |
|
||||
cd lib/crewai-tools && uv run pytest \
|
||||
--block-network \
|
||||
--timeout=30 \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--group ${{ matrix.group }} \
|
||||
--durations=10 \
|
||||
-n auto \
|
||||
--maxfail=3
|
||||
|
||||
|
||||
|
||||
167
conftest.py
Normal file
167
conftest.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""Pytest configuration for crewAI workspace."""
|
||||
|
||||
from collections.abc import Generator
|
||||
import os
|
||||
from pathlib import Path
|
||||
import tempfile
|
||||
from typing import Any
|
||||
|
||||
from dotenv import load_dotenv
|
||||
import pytest
|
||||
from vcr.request import Request # type: ignore[import-untyped]
|
||||
|
||||
|
||||
env_test_path = Path(__file__).parent / ".env.test"
|
||||
load_dotenv(env_test_path, override=True)
|
||||
load_dotenv(override=True)
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def cleanup_event_handlers() -> Generator[None, Any, None]:
|
||||
"""Clean up event bus handlers after each test to prevent test pollution."""
|
||||
yield
|
||||
|
||||
try:
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
|
||||
with crewai_event_bus._rwlock.w_locked():
|
||||
crewai_event_bus._sync_handlers.clear()
|
||||
crewai_event_bus._async_handlers.clear()
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True, scope="function")
|
||||
def setup_test_environment() -> Generator[None, Any, None]:
|
||||
"""Setup test environment for crewAI workspace."""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
storage_dir = Path(temp_dir) / "crewai_test_storage"
|
||||
storage_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if not storage_dir.exists() or not storage_dir.is_dir():
|
||||
raise RuntimeError(
|
||||
f"Failed to create test storage directory: {storage_dir}"
|
||||
)
|
||||
|
||||
try:
|
||||
test_file = storage_dir / ".permissions_test"
|
||||
test_file.touch()
|
||||
test_file.unlink()
|
||||
except (OSError, IOError) as e:
|
||||
raise RuntimeError(
|
||||
f"Test storage directory {storage_dir} is not writable: {e}"
|
||||
) from e
|
||||
|
||||
os.environ["CREWAI_STORAGE_DIR"] = str(storage_dir)
|
||||
os.environ["CREWAI_TESTING"] = "true"
|
||||
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.environ.pop("CREWAI_TESTING", "true")
|
||||
os.environ.pop("CREWAI_STORAGE_DIR", None)
|
||||
os.environ.pop("CREWAI_DISABLE_TELEMETRY", "true")
|
||||
os.environ.pop("OTEL_SDK_DISABLED", "true")
|
||||
os.environ.pop("OPENAI_BASE_URL", "https://api.openai.com/v1")
|
||||
os.environ.pop("OPENAI_API_BASE", "https://api.openai.com/v1")
|
||||
|
||||
|
||||
HEADERS_TO_FILTER = {
|
||||
"authorization": "AUTHORIZATION-XXX",
|
||||
"content-security-policy": "CSP-FILTERED",
|
||||
"cookie": "COOKIE-XXX",
|
||||
"set-cookie": "SET-COOKIE-XXX",
|
||||
"permissions-policy": "PERMISSIONS-POLICY-XXX",
|
||||
"referrer-policy": "REFERRER-POLICY-XXX",
|
||||
"strict-transport-security": "STS-XXX",
|
||||
"x-content-type-options": "X-CONTENT-TYPE-XXX",
|
||||
"x-frame-options": "X-FRAME-OPTIONS-XXX",
|
||||
"x-permitted-cross-domain-policies": "X-PERMITTED-XXX",
|
||||
"x-request-id": "X-REQUEST-ID-XXX",
|
||||
"x-runtime": "X-RUNTIME-XXX",
|
||||
"x-xss-protection": "X-XSS-PROTECTION-XXX",
|
||||
"x-stainless-arch": "X-STAINLESS-ARCH-XXX",
|
||||
"x-stainless-os": "X-STAINLESS-OS-XXX",
|
||||
"x-stainless-read-timeout": "X-STAINLESS-READ-TIMEOUT-XXX",
|
||||
"cf-ray": "CF-RAY-XXX",
|
||||
"etag": "ETAG-XXX",
|
||||
"Strict-Transport-Security": "STS-XXX",
|
||||
"access-control-expose-headers": "ACCESS-CONTROL-XXX",
|
||||
"openai-organization": "OPENAI-ORG-XXX",
|
||||
"openai-project": "OPENAI-PROJECT-XXX",
|
||||
"x-ratelimit-limit-requests": "X-RATELIMIT-LIMIT-REQUESTS-XXX",
|
||||
"x-ratelimit-limit-tokens": "X-RATELIMIT-LIMIT-TOKENS-XXX",
|
||||
"x-ratelimit-remaining-requests": "X-RATELIMIT-REMAINING-REQUESTS-XXX",
|
||||
"x-ratelimit-remaining-tokens": "X-RATELIMIT-REMAINING-TOKENS-XXX",
|
||||
"x-ratelimit-reset-requests": "X-RATELIMIT-RESET-REQUESTS-XXX",
|
||||
"x-ratelimit-reset-tokens": "X-RATELIMIT-RESET-TOKENS-XXX",
|
||||
"x-goog-api-key": "X-GOOG-API-KEY-XXX",
|
||||
}
|
||||
|
||||
|
||||
def _filter_request_headers(request: Request) -> Request: # type: ignore[no-any-unimported]
|
||||
"""Filter sensitive headers from request before recording."""
|
||||
for header_name, replacement in HEADERS_TO_FILTER.items():
|
||||
for variant in [header_name, header_name.upper(), header_name.title()]:
|
||||
if variant in request.headers:
|
||||
request.headers[variant] = [replacement]
|
||||
return request
|
||||
|
||||
|
||||
def _filter_response_headers(response: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Filter sensitive headers from response before recording."""
|
||||
for header_name, replacement in HEADERS_TO_FILTER.items():
|
||||
for variant in [header_name, header_name.upper(), header_name.title()]:
|
||||
if variant in response["headers"]:
|
||||
response["headers"][variant] = [replacement]
|
||||
return response
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def vcr_cassette_dir(request: Any) -> str:
|
||||
"""Generate cassette directory path based on test module location.
|
||||
|
||||
Organizes cassettes to mirror test directory structure within each package:
|
||||
lib/crewai/tests/llms/google/test_google.py -> lib/crewai/tests/cassettes/llms/google/
|
||||
lib/crewai-tools/tests/tools/test_search.py -> lib/crewai-tools/tests/cassettes/tools/
|
||||
"""
|
||||
test_file = Path(request.fspath)
|
||||
|
||||
for parent in test_file.parents:
|
||||
if parent.name in ("crewai", "crewai-tools") and parent.parent.name == "lib":
|
||||
package_root = parent
|
||||
break
|
||||
else:
|
||||
package_root = test_file.parent
|
||||
|
||||
tests_root = package_root / "tests"
|
||||
test_dir = test_file.parent
|
||||
|
||||
if test_dir != tests_root:
|
||||
relative_path = test_dir.relative_to(tests_root)
|
||||
cassette_dir = tests_root / "cassettes" / relative_path
|
||||
else:
|
||||
cassette_dir = tests_root / "cassettes"
|
||||
|
||||
cassette_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return str(cassette_dir)
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def vcr_config(vcr_cassette_dir: str) -> dict[str, Any]:
|
||||
"""Configure VCR with organized cassette storage."""
|
||||
config = {
|
||||
"cassette_library_dir": vcr_cassette_dir,
|
||||
"record_mode": os.getenv("PYTEST_VCR_RECORD_MODE", "once"),
|
||||
"filter_headers": [(k, v) for k, v in HEADERS_TO_FILTER.items()],
|
||||
"before_record_request": _filter_request_headers,
|
||||
"before_record_response": _filter_response_headers,
|
||||
"filter_query_parameters": ["key"],
|
||||
"match_on": ["method", "scheme", "host", "port", "path"],
|
||||
}
|
||||
|
||||
if os.getenv("GITHUB_ACTIONS") == "true":
|
||||
config["record_mode"] = "none"
|
||||
|
||||
return config
|
||||
@@ -63,10 +63,6 @@ crew = Crew(
|
||||
| **Guardrails** _(optional)_ | `guardrails` | `Optional[List[Callable] | List[str]]` | List of guardrails to validate task output before proceeding to next task. |
|
||||
| **Guardrail Max Retries** _(optional)_ | `guardrail_max_retries` | `Optional[int]` | Maximum number of retries when guardrail validation fails. Defaults to 3. |
|
||||
|
||||
<Note type="warning" title="Deprecated: max_retries">
|
||||
The task attribute `max_retries` is deprecated and will be removed in v1.0.0.
|
||||
Use `guardrail_max_retries` instead to control retry attempts when a guardrail fails.
|
||||
</Note>
|
||||
|
||||
## Creating Tasks
|
||||
|
||||
|
||||
@@ -218,7 +218,7 @@ Update the root `README.md` only if the tool introduces a new category or notabl
|
||||
|
||||
## Discovery and specs
|
||||
|
||||
Our internal tooling discovers classes whose names end with `Tool`. Keep your class exported from the module path under `crewai_tools/tools/...` to be picked up by scripts like `generate_tool_specs.py`.
|
||||
Our internal tooling discovers classes whose names end with `Tool`. Keep your class exported from the module path under `crewai_tools/tools/...` to be picked up by scripts like `crewai_tools.generate_tool_specs.py`.
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -8,17 +8,17 @@ authors = [
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"lancedb>=0.5.4",
|
||||
"pytube>=15.0.0",
|
||||
"requests>=2.32.5",
|
||||
"docker>=7.1.0",
|
||||
"lancedb~=0.5.4",
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.6.1",
|
||||
"lancedb>=0.5.4",
|
||||
"tiktoken>=0.8.0",
|
||||
"beautifulsoup4>=4.13.4",
|
||||
"python-docx>=1.2.0",
|
||||
"youtube-transcript-api>=1.2.2",
|
||||
"pymupdf>=1.26.6",
|
||||
"lancedb~=0.5.4",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
"python-docx~=1.2.0",
|
||||
"youtube-transcript-api~=1.2.2",
|
||||
"pymupdf~=1.26.6",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -4,17 +4,20 @@ from collections.abc import Mapping
|
||||
import inspect
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, cast
|
||||
from typing import Any
|
||||
|
||||
from crewai.tools.base_tool import BaseTool, EnvVar
|
||||
from crewai_tools import tools
|
||||
from pydantic import BaseModel
|
||||
from pydantic.json_schema import GenerateJsonSchema
|
||||
from pydantic_core import PydanticOmit
|
||||
|
||||
from crewai_tools import tools
|
||||
|
||||
|
||||
class SchemaGenerator(GenerateJsonSchema):
|
||||
def handle_invalid_for_json_schema(self, schema, error_info):
|
||||
def handle_invalid_for_json_schema(
|
||||
self, schema: Any, error_info: Any
|
||||
) -> dict[str, Any]:
|
||||
raise PydanticOmit
|
||||
|
||||
|
||||
@@ -73,7 +76,7 @@ class ToolSpecExtractor:
|
||||
|
||||
@staticmethod
|
||||
def _extract_field_default(
|
||||
field: dict | None, fallback: str | list[Any] = ""
|
||||
field: dict[str, Any] | None, fallback: str | list[Any] = ""
|
||||
) -> str | list[Any] | int:
|
||||
if not field:
|
||||
return fallback
|
||||
@@ -83,7 +86,7 @@ class ToolSpecExtractor:
|
||||
return default if isinstance(default, (list, str, int)) else fallback
|
||||
|
||||
@staticmethod
|
||||
def _extract_params(args_schema_field: dict | None) -> dict[str, Any]:
|
||||
def _extract_params(args_schema_field: dict[str, Any] | None) -> dict[str, Any]:
|
||||
if not args_schema_field:
|
||||
return {}
|
||||
|
||||
@@ -94,15 +97,15 @@ class ToolSpecExtractor:
|
||||
):
|
||||
return {}
|
||||
|
||||
# Cast to type[BaseModel] after runtime check
|
||||
schema_class = cast(type[BaseModel], args_schema_class)
|
||||
try:
|
||||
return schema_class.model_json_schema(schema_generator=SchemaGenerator)
|
||||
return args_schema_class.model_json_schema(schema_generator=SchemaGenerator)
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def _extract_env_vars(env_vars_field: dict | None) -> list[dict[str, Any]]:
|
||||
def _extract_env_vars(
|
||||
env_vars_field: dict[str, Any] | None,
|
||||
) -> list[dict[str, Any]]:
|
||||
if not env_vars_field:
|
||||
return []
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
"""Register custom markers."""
|
||||
config.addinivalue_line("markers", "integration: mark test as an integration test")
|
||||
config.addinivalue_line("markers", "asyncio: mark test as an async test")
|
||||
|
||||
# Set the asyncio loop scope through ini configuration
|
||||
config.inicfg["asyncio_mode"] = "auto"
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def event_loop():
|
||||
"""Create an instance of the default event loop for each test case."""
|
||||
import asyncio
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
yield loop
|
||||
loop.close()
|
||||
@@ -2,7 +2,7 @@ import json
|
||||
from unittest import mock
|
||||
|
||||
from crewai.tools.base_tool import BaseTool, EnvVar
|
||||
from generate_tool_specs import ToolSpecExtractor
|
||||
from crewai_tools.generate_tool_specs import ToolSpecExtractor
|
||||
from pydantic import BaseModel, Field
|
||||
import pytest
|
||||
|
||||
@@ -61,8 +61,8 @@ def test_unwrap_schema(extractor):
|
||||
@pytest.fixture
|
||||
def mock_tool_extractor(extractor):
|
||||
with (
|
||||
mock.patch("generate_tool_specs.dir", return_value=["MockTool"]),
|
||||
mock.patch("generate_tool_specs.getattr", return_value=MockTool),
|
||||
mock.patch("crewai_tools.generate_tool_specs.dir", return_value=["MockTool"]),
|
||||
mock.patch("crewai_tools.generate_tool_specs.getattr", return_value=MockTool),
|
||||
):
|
||||
extractor.extract_all_tools()
|
||||
assert len(extractor.tools_spec) == 1
|
||||
|
||||
@@ -4,7 +4,7 @@ from crewai_tools.tools.firecrawl_crawl_website_tool.firecrawl_crawl_website_too
|
||||
FirecrawlCrawlWebsiteTool,
|
||||
)
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_firecrawl_crawl_tool_integration():
|
||||
tool = FirecrawlCrawlWebsiteTool(config={
|
||||
"limit": 2,
|
||||
|
||||
@@ -4,7 +4,7 @@ from crewai_tools.tools.firecrawl_scrape_website_tool.firecrawl_scrape_website_t
|
||||
FirecrawlScrapeWebsiteTool,
|
||||
)
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_firecrawl_scrape_tool_integration():
|
||||
tool = FirecrawlScrapeWebsiteTool()
|
||||
result = tool.run(url="https://firecrawl.dev")
|
||||
|
||||
@@ -3,7 +3,7 @@ import pytest
|
||||
from crewai_tools.tools.firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_firecrawl_search_tool_integration():
|
||||
tool = FirecrawlSearchTool()
|
||||
result = tool.run(query="firecrawl")
|
||||
|
||||
@@ -23,15 +23,13 @@ from crewai_tools.tools.rag.rag_tool import Adapter
|
||||
import pytest
|
||||
|
||||
|
||||
pytestmark = [pytest.mark.vcr(filter_headers=["authorization"])]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_adapter():
|
||||
mock_adapter = MagicMock(spec=Adapter)
|
||||
return mock_adapter
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_directory_search_tool():
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
test_file = Path(temp_dir) / "test.txt"
|
||||
@@ -65,6 +63,7 @@ def test_pdf_search_tool(mock_adapter):
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_txt_search_tool():
|
||||
with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as temp_file:
|
||||
temp_file.write(b"This is a test file for txt search")
|
||||
@@ -102,6 +101,7 @@ def test_docx_search_tool(mock_adapter):
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_json_search_tool():
|
||||
with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as temp_file:
|
||||
temp_file.write(b'{"test": "This is a test JSON file"}')
|
||||
@@ -127,6 +127,7 @@ def test_xml_search_tool(mock_adapter):
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_csv_search_tool():
|
||||
with tempfile.NamedTemporaryFile(suffix=".csv", delete=False) as temp_file:
|
||||
temp_file.write(b"name,description\ntest,This is a test CSV file")
|
||||
@@ -141,6 +142,7 @@ def test_csv_search_tool():
|
||||
os.unlink(temp_file_path)
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
def test_mdx_search_tool():
|
||||
with tempfile.NamedTemporaryFile(suffix=".mdx", delete=False) as temp_file:
|
||||
temp_file.write(b"# Test MDX\nThis is a test MDX file")
|
||||
|
||||
@@ -9,35 +9,35 @@ authors = [
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
# Core Dependencies
|
||||
"pydantic>=2.11.9",
|
||||
"openai>=1.13.3",
|
||||
"pydantic~=2.11.9",
|
||||
"openai~=1.83.0",
|
||||
"instructor>=1.3.3",
|
||||
# Text Processing
|
||||
"pdfplumber>=0.11.4",
|
||||
"regex>=2024.9.11",
|
||||
"pdfplumber~=0.11.4",
|
||||
"regex~=2024.9.11",
|
||||
# Telemetry and Monitoring
|
||||
"opentelemetry-api>=1.30.0",
|
||||
"opentelemetry-sdk>=1.30.0",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.30.0",
|
||||
"opentelemetry-api~=1.34.0",
|
||||
"opentelemetry-sdk~=1.34.0",
|
||||
"opentelemetry-exporter-otlp-proto-http~=1.34.0",
|
||||
# Data Handling
|
||||
"chromadb~=1.1.0",
|
||||
"tokenizers>=0.20.3",
|
||||
"openpyxl>=3.1.5",
|
||||
"tokenizers~=0.20.3",
|
||||
"openpyxl~=3.1.5",
|
||||
# Authentication and Security
|
||||
"python-dotenv>=1.1.1",
|
||||
"pyjwt>=2.9.0",
|
||||
"python-dotenv~=1.1.1",
|
||||
"pyjwt~=2.9.0",
|
||||
# Configuration and Utils
|
||||
"click>=8.1.7",
|
||||
"appdirs>=1.4.4",
|
||||
"jsonref>=1.1.0",
|
||||
"json-repair==0.25.2",
|
||||
"uv>=0.4.25",
|
||||
"tomli-w>=1.1.0",
|
||||
"tomli>=2.0.2",
|
||||
"json5>=0.10.0",
|
||||
"portalocker==2.7.0",
|
||||
"pydantic-settings>=2.10.1",
|
||||
"mcp>=1.16.0",
|
||||
"click~=8.1.7",
|
||||
"appdirs~=1.4.4",
|
||||
"jsonref~=1.1.0",
|
||||
"json-repair~=0.25.2",
|
||||
"tomli-w~=1.1.0",
|
||||
"tomli~=2.0.2",
|
||||
"json5~=0.10.0",
|
||||
"portalocker~=2.7.0",
|
||||
"pydantic-settings~=2.10.1",
|
||||
"mcp~=1.16.0",
|
||||
"uv~=0.9.13",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -53,50 +53,47 @@ tools = [
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
]
|
||||
pdfplumber = [
|
||||
"pdfplumber>=0.11.4",
|
||||
]
|
||||
pandas = [
|
||||
"pandas>=2.2.3",
|
||||
"pandas~=2.2.3",
|
||||
]
|
||||
openpyxl = [
|
||||
"openpyxl>=3.1.5",
|
||||
"openpyxl~=3.1.5",
|
||||
]
|
||||
mem0 = ["mem0ai>=0.1.94"]
|
||||
mem0 = ["mem0ai~=0.1.94"]
|
||||
docling = [
|
||||
"docling>=2.12.0",
|
||||
"docling~=2.63.0",
|
||||
]
|
||||
qdrant = [
|
||||
"qdrant-client[fastembed]>=1.14.3",
|
||||
"qdrant-client[fastembed]~=1.14.3",
|
||||
]
|
||||
aws = [
|
||||
"boto3>=1.40.38",
|
||||
"boto3~=1.40.38",
|
||||
]
|
||||
watson = [
|
||||
"ibm-watsonx-ai>=1.3.39",
|
||||
"ibm-watsonx-ai~=1.3.39",
|
||||
]
|
||||
voyageai = [
|
||||
"voyageai>=0.3.5",
|
||||
"voyageai~=0.3.5",
|
||||
]
|
||||
litellm = [
|
||||
"litellm>=1.74.9",
|
||||
"litellm~=1.74.9",
|
||||
]
|
||||
bedrock = [
|
||||
"boto3>=1.40.45",
|
||||
"boto3~=1.40.45",
|
||||
]
|
||||
google-genai = [
|
||||
"google-genai>=1.2.0",
|
||||
"google-genai~=1.2.0",
|
||||
]
|
||||
azure-ai-inference = [
|
||||
"azure-ai-inference>=1.0.0b9",
|
||||
"azure-ai-inference~=1.0.0b9",
|
||||
]
|
||||
anthropic = [
|
||||
"anthropic>=0.69.0",
|
||||
"anthropic~=0.71.0",
|
||||
]
|
||||
a2a = [
|
||||
a2a = [
|
||||
"a2a-sdk~=0.3.10",
|
||||
"httpx-auth>=0.23.1",
|
||||
"httpx-sse>=0.4.0",
|
||||
"httpx-auth~=0.23.1",
|
||||
"httpx-sse~=0.4.0",
|
||||
]
|
||||
|
||||
|
||||
|
||||
@@ -950,34 +950,15 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def _handle_crew_planning(self) -> None:
|
||||
"""Handles the Crew planning."""
|
||||
import re
|
||||
|
||||
self._logger.log("info", "Planning the crew execution")
|
||||
result = CrewPlanner(
|
||||
tasks=self.tasks, planning_agent_llm=self.planning_llm
|
||||
)._handle_crew_planning()
|
||||
|
||||
plan_map: dict[int, str] = {}
|
||||
for step_plan in result.list_of_plans_per_task:
|
||||
match = re.search(r"Task Number (\d+)", step_plan.task, re.IGNORECASE)
|
||||
if match:
|
||||
task_number = int(match.group(1))
|
||||
plan_map[task_number] = step_plan.plan
|
||||
else:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
f"Could not extract task number from plan task field: {step_plan.task}",
|
||||
)
|
||||
|
||||
for idx, task in enumerate(self.tasks):
|
||||
task_number = idx + 1 # Task numbers are 1-indexed
|
||||
if task_number in plan_map:
|
||||
task.description += plan_map[task_number]
|
||||
else:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
f"No plan found for task {task_number}. Task description: {task.description}",
|
||||
)
|
||||
for task, step_plan in zip(
|
||||
self.tasks, result.list_of_plans_per_task, strict=False
|
||||
):
|
||||
task.description += step_plan.plan
|
||||
|
||||
def _store_execution_log(
|
||||
self,
|
||||
|
||||
@@ -76,7 +76,7 @@ class TraceBatchManager:
|
||||
use_ephemeral: bool = False,
|
||||
) -> TraceBatch:
|
||||
"""Initialize a new trace batch (thread-safe)"""
|
||||
with self._init_lock:
|
||||
with self._batch_ready_cv:
|
||||
if self.current_batch is not None:
|
||||
logger.debug(
|
||||
"Batch already initialized, skipping duplicate initialization"
|
||||
@@ -99,7 +99,6 @@ class TraceBatchManager:
|
||||
self.backend_initialized = True
|
||||
|
||||
self._batch_ready_cv.notify_all()
|
||||
|
||||
return self.current_batch
|
||||
|
||||
def _initialize_backend_batch(
|
||||
@@ -107,7 +106,7 @@ class TraceBatchManager:
|
||||
user_context: dict[str, str],
|
||||
execution_metadata: dict[str, Any],
|
||||
use_ephemeral: bool = False,
|
||||
):
|
||||
) -> None:
|
||||
"""Send batch initialization to backend"""
|
||||
|
||||
if not is_tracing_enabled_in_context():
|
||||
@@ -204,7 +203,7 @@ class TraceBatchManager:
|
||||
return False
|
||||
return True
|
||||
|
||||
def add_event(self, trace_event: TraceEvent):
|
||||
def add_event(self, trace_event: TraceEvent) -> None:
|
||||
"""Add event to buffer"""
|
||||
self.event_buffer.append(trace_event)
|
||||
|
||||
@@ -300,7 +299,7 @@ class TraceBatchManager:
|
||||
|
||||
return finalized_batch
|
||||
|
||||
def _finalize_backend_batch(self, events_count: int = 0):
|
||||
def _finalize_backend_batch(self, events_count: int = 0) -> None:
|
||||
"""Send batch finalization to backend
|
||||
|
||||
Args:
|
||||
@@ -366,7 +365,7 @@ class TraceBatchManager:
|
||||
logger.error(f"❌ Error finalizing trace batch: {e}")
|
||||
self.plus_api.mark_trace_batch_as_failed(self.trace_batch_id, str(e))
|
||||
|
||||
def _cleanup_batch_data(self):
|
||||
def _cleanup_batch_data(self) -> None:
|
||||
"""Clean up batch data after successful finalization to free memory"""
|
||||
try:
|
||||
if hasattr(self, "event_buffer") and self.event_buffer:
|
||||
@@ -411,7 +410,7 @@ class TraceBatchManager:
|
||||
lambda: self.current_batch is not None, timeout=timeout
|
||||
)
|
||||
|
||||
def record_start_time(self, key: str):
|
||||
def record_start_time(self, key: str) -> None:
|
||||
"""Record start time for duration calculation"""
|
||||
self.execution_start_times[key] = datetime.now(timezone.utc)
|
||||
|
||||
|
||||
@@ -71,6 +71,7 @@ from crewai.events.types.reasoning_events import (
|
||||
AgentReasoningFailedEvent,
|
||||
AgentReasoningStartedEvent,
|
||||
)
|
||||
from crewai.events.types.system_events import SignalEvent, on_signal
|
||||
from crewai.events.types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
@@ -159,6 +160,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
self._register_flow_event_handlers(crewai_event_bus)
|
||||
self._register_context_event_handlers(crewai_event_bus)
|
||||
self._register_action_event_handlers(crewai_event_bus)
|
||||
self._register_system_event_handlers(crewai_event_bus)
|
||||
|
||||
self._listeners_setup = True
|
||||
|
||||
@@ -458,6 +460,15 @@ class TraceCollectionListener(BaseEventListener):
|
||||
) -> None:
|
||||
self._handle_action_event("knowledge_query_failed", source, event)
|
||||
|
||||
def _register_system_event_handlers(self, event_bus: CrewAIEventsBus) -> None:
|
||||
"""Register handlers for system signal events (SIGTERM, SIGINT, etc.)."""
|
||||
|
||||
@on_signal
|
||||
def handle_signal(source: Any, event: SignalEvent) -> None:
|
||||
"""Flush trace batch on system signals to prevent data loss."""
|
||||
if self.batch_manager.is_batch_initialized():
|
||||
self.batch_manager.finalize_batch()
|
||||
|
||||
def _initialize_crew_batch(self, source: Any, event: Any) -> None:
|
||||
"""Initialize trace batch.
|
||||
|
||||
|
||||
102
lib/crewai/src/crewai/events/types/system_events.py
Normal file
102
lib/crewai/src/crewai/events/types/system_events.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""System signal event types for CrewAI.
|
||||
|
||||
This module contains event types for system-level signals like SIGTERM,
|
||||
allowing listeners to perform cleanup operations before process termination.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from enum import IntEnum
|
||||
import signal
|
||||
from typing import Annotated, Literal, TypeVar
|
||||
|
||||
from pydantic import Field, TypeAdapter
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class SignalType(IntEnum):
|
||||
"""Enumeration of supported system signals."""
|
||||
|
||||
SIGTERM = signal.SIGTERM
|
||||
SIGINT = signal.SIGINT
|
||||
SIGHUP = signal.SIGHUP
|
||||
SIGTSTP = signal.SIGTSTP
|
||||
SIGCONT = signal.SIGCONT
|
||||
|
||||
|
||||
class SigTermEvent(BaseEvent):
|
||||
"""Event emitted when SIGTERM is received."""
|
||||
|
||||
type: Literal["SIGTERM"] = "SIGTERM"
|
||||
signal_number: SignalType = SignalType.SIGTERM
|
||||
reason: str | None = None
|
||||
|
||||
|
||||
class SigIntEvent(BaseEvent):
|
||||
"""Event emitted when SIGINT is received."""
|
||||
|
||||
type: Literal["SIGINT"] = "SIGINT"
|
||||
signal_number: SignalType = SignalType.SIGINT
|
||||
reason: str | None = None
|
||||
|
||||
|
||||
class SigHupEvent(BaseEvent):
|
||||
"""Event emitted when SIGHUP is received."""
|
||||
|
||||
type: Literal["SIGHUP"] = "SIGHUP"
|
||||
signal_number: SignalType = SignalType.SIGHUP
|
||||
reason: str | None = None
|
||||
|
||||
|
||||
class SigTStpEvent(BaseEvent):
|
||||
"""Event emitted when SIGTSTP is received.
|
||||
|
||||
Note: SIGSTOP cannot be caught - it immediately suspends the process.
|
||||
"""
|
||||
|
||||
type: Literal["SIGTSTP"] = "SIGTSTP"
|
||||
signal_number: SignalType = SignalType.SIGTSTP
|
||||
reason: str | None = None
|
||||
|
||||
|
||||
class SigContEvent(BaseEvent):
|
||||
"""Event emitted when SIGCONT is received."""
|
||||
|
||||
type: Literal["SIGCONT"] = "SIGCONT"
|
||||
signal_number: SignalType = SignalType.SIGCONT
|
||||
reason: str | None = None
|
||||
|
||||
|
||||
SignalEvent = Annotated[
|
||||
SigTermEvent | SigIntEvent | SigHupEvent | SigTStpEvent | SigContEvent,
|
||||
Field(discriminator="type"),
|
||||
]
|
||||
|
||||
signal_event_adapter: TypeAdapter[SignalEvent] = TypeAdapter(SignalEvent)
|
||||
|
||||
SIGNAL_EVENT_TYPES: tuple[type[BaseEvent], ...] = (
|
||||
SigTermEvent,
|
||||
SigIntEvent,
|
||||
SigHupEvent,
|
||||
SigTStpEvent,
|
||||
SigContEvent,
|
||||
)
|
||||
|
||||
|
||||
T = TypeVar("T", bound=Callable[[object, SignalEvent], None])
|
||||
|
||||
|
||||
def on_signal(func: T) -> T:
|
||||
"""Decorator to register a handler for all signal events.
|
||||
|
||||
Args:
|
||||
func: Handler function that receives (source, event) arguments.
|
||||
|
||||
Returns:
|
||||
The original function, registered for all signal event types.
|
||||
"""
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
|
||||
for event_type in SIGNAL_EVENT_TYPES:
|
||||
crewai_event_bus.on(event_type)(func)
|
||||
return func
|
||||
@@ -256,6 +256,7 @@ GeminiModels: TypeAlias = Literal[
|
||||
"gemini-2.5-flash-preview-tts",
|
||||
"gemini-2.5-pro-preview-tts",
|
||||
"gemini-2.5-computer-use-preview-10-2025",
|
||||
"gemini-2.5-pro-exp-03-25",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-exp",
|
||||
@@ -309,6 +310,7 @@ GEMINI_MODELS: list[GeminiModels] = [
|
||||
"gemini-2.5-flash-preview-tts",
|
||||
"gemini-2.5-pro-preview-tts",
|
||||
"gemini-2.5-computer-use-preview-10-2025",
|
||||
"gemini-2.5-pro-exp-03-25",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-exp",
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
from concurrent.futures import Future
|
||||
from copy import copy as shallow_copy
|
||||
import datetime
|
||||
@@ -10,6 +11,7 @@ import logging
|
||||
from pathlib import Path
|
||||
import threading
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
ClassVar,
|
||||
cast,
|
||||
@@ -17,11 +19,11 @@ from typing import (
|
||||
get_origin,
|
||||
)
|
||||
import uuid
|
||||
import warnings
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
@@ -37,6 +39,7 @@ from crewai.events.types.task_events import (
|
||||
TaskFailedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.security import Fingerprint, SecurityConfig
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -57,6 +60,9 @@ from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent.core import Agent
|
||||
|
||||
_printer = Printer()
|
||||
|
||||
|
||||
@@ -101,17 +107,17 @@ class Task(BaseModel):
|
||||
description="Configuration for the agent",
|
||||
default=None,
|
||||
)
|
||||
callback: Any | None = Field(
|
||||
callback: Callable[[TaskOutput], None] | None = Field(
|
||||
description="Callback to be executed after the task is completed.", default=None
|
||||
)
|
||||
agent: BaseAgent | None = Field(
|
||||
agent: Agent | None = Field(
|
||||
description="Agent responsible for execution the task.", default=None
|
||||
)
|
||||
context: list[Task] | None | _NotSpecified = Field(
|
||||
description="Other tasks that will have their output used as context for this task.",
|
||||
default=NOT_SPECIFIED,
|
||||
)
|
||||
async_execution: bool | None = Field(
|
||||
async_execution: bool = Field(
|
||||
description="Whether the task should be executed asynchronously or not.",
|
||||
default=False,
|
||||
)
|
||||
@@ -151,11 +157,11 @@ class Task(BaseModel):
|
||||
frozen=True,
|
||||
description="Unique identifier for the object, not set by user.",
|
||||
)
|
||||
human_input: bool | None = Field(
|
||||
human_input: bool = Field(
|
||||
description="Whether the task should have a human review the final answer of the agent",
|
||||
default=False,
|
||||
)
|
||||
markdown: bool | None = Field(
|
||||
markdown: bool = Field(
|
||||
description="Whether the task should instruct the agent to return the final answer formatted in Markdown",
|
||||
default=False,
|
||||
)
|
||||
@@ -172,11 +178,6 @@ class Task(BaseModel):
|
||||
default=None,
|
||||
description="List of guardrails to validate task output before proceeding to next task. Also supports a single guardrail function or string description of a guardrail to validate task output before proceeding to next task",
|
||||
)
|
||||
|
||||
max_retries: int | None = Field(
|
||||
default=None,
|
||||
description="[DEPRECATED] Maximum number of retries when guardrail fails. Use guardrail_max_retries instead. Will be removed in v1.0.0",
|
||||
)
|
||||
guardrail_max_retries: int = Field(
|
||||
default=3, description="Maximum number of retries when guardrail fails"
|
||||
)
|
||||
@@ -187,8 +188,8 @@ class Task(BaseModel):
|
||||
end_time: datetime.datetime | None = Field(
|
||||
default=None, description="End time of the task execution"
|
||||
)
|
||||
allow_crewai_trigger_context: bool | None = Field(
|
||||
default=None,
|
||||
allow_crewai_trigger_context: bool = Field(
|
||||
default=False,
|
||||
description="Whether this task should append 'Trigger Payload: {crewai_trigger_payload}' to the task description when crewai_trigger_payload exists in crew inputs.",
|
||||
)
|
||||
_guardrail: GuardrailCallable | None = PrivateAttr(default=None)
|
||||
@@ -202,7 +203,9 @@ class Task(BaseModel):
|
||||
_original_expected_output: str | None = PrivateAttr(default=None)
|
||||
_original_output_file: str | None = PrivateAttr(default=None)
|
||||
_thread: threading.Thread | None = PrivateAttr(default=None)
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
model_config = ConfigDict(
|
||||
arbitrary_types_allowed=True,
|
||||
)
|
||||
|
||||
@field_validator("guardrail")
|
||||
@classmethod
|
||||
@@ -288,15 +291,16 @@ class Task(BaseModel):
|
||||
if self.agent is None:
|
||||
raise ValueError("Agent is required to use LLMGuardrail")
|
||||
|
||||
self._guardrail = cast(
|
||||
GuardrailCallable,
|
||||
LLMGuardrail(description=self.guardrail, llm=self.agent.llm),
|
||||
self._guardrail = LLMGuardrail(
|
||||
description=self.guardrail, llm=cast(BaseLLM, self.agent.llm)
|
||||
)
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def ensure_guardrails_is_list_of_callables(self) -> Task:
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
guardrails = []
|
||||
if self.guardrails is not None:
|
||||
if isinstance(self.guardrails, (list, tuple)):
|
||||
@@ -309,14 +313,11 @@ class Task(BaseModel):
|
||||
raise ValueError(
|
||||
"Agent is required to use non-programmatic guardrails"
|
||||
)
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
guardrails.append(
|
||||
cast(
|
||||
GuardrailCallable,
|
||||
LLMGuardrail(
|
||||
description=guardrail, llm=self.agent.llm
|
||||
),
|
||||
LLMGuardrail(
|
||||
description=guardrail,
|
||||
llm=cast(BaseLLM, self.agent.llm),
|
||||
)
|
||||
)
|
||||
else:
|
||||
@@ -329,14 +330,11 @@ class Task(BaseModel):
|
||||
raise ValueError(
|
||||
"Agent is required to use non-programmatic guardrails"
|
||||
)
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
guardrails.append(
|
||||
cast(
|
||||
GuardrailCallable,
|
||||
LLMGuardrail(
|
||||
description=self.guardrails, llm=self.agent.llm
|
||||
),
|
||||
LLMGuardrail(
|
||||
description=self.guardrails,
|
||||
llm=cast(BaseLLM, self.agent.llm),
|
||||
)
|
||||
)
|
||||
else:
|
||||
@@ -436,21 +434,9 @@ class Task(BaseModel):
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def handle_max_retries_deprecation(self) -> Self:
|
||||
if self.max_retries is not None:
|
||||
warnings.warn(
|
||||
"The 'max_retries' parameter is deprecated and will be removed in CrewAI v1.0.0. "
|
||||
"Please use 'guardrail_max_retries' instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
self.guardrail_max_retries = self.max_retries
|
||||
return self
|
||||
|
||||
def execute_sync(
|
||||
self,
|
||||
agent: BaseAgent | None = None,
|
||||
agent: Agent | None = None,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> TaskOutput:
|
||||
@@ -488,9 +474,9 @@ class Task(BaseModel):
|
||||
|
||||
def _execute_task_async(
|
||||
self,
|
||||
agent: BaseAgent | None,
|
||||
agent: Agent | None,
|
||||
context: str | None,
|
||||
tools: list[Any] | None,
|
||||
tools: list[BaseTool] | None,
|
||||
future: Future[TaskOutput],
|
||||
) -> None:
|
||||
"""Execute the task asynchronously with context handling."""
|
||||
@@ -499,9 +485,9 @@ class Task(BaseModel):
|
||||
|
||||
def _execute_core(
|
||||
self,
|
||||
agent: BaseAgent | None,
|
||||
agent: Agent | None,
|
||||
context: str | None,
|
||||
tools: list[Any] | None,
|
||||
tools: list[BaseTool] | None,
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task."""
|
||||
try:
|
||||
@@ -611,8 +597,6 @@ class Task(BaseModel):
|
||||
if trigger_payload is not None:
|
||||
description += f"\n\nTrigger Payload: {trigger_payload}"
|
||||
|
||||
tasks_slices = [description]
|
||||
|
||||
output = self.i18n.slice("expected_output").format(
|
||||
expected_output=self.expected_output
|
||||
)
|
||||
@@ -715,7 +699,7 @@ Follow these guidelines:
|
||||
self.processed_by_agents.add(agent_name)
|
||||
self.delegations += 1
|
||||
|
||||
def copy( # type: ignore
|
||||
def copy( # type: ignore[override]
|
||||
self, agents: list[BaseAgent], task_mapping: dict[str, Task]
|
||||
) -> Task:
|
||||
"""Creates a deep copy of the Task while preserving its original class type.
|
||||
@@ -859,7 +843,7 @@ Follow these guidelines:
|
||||
def _invoke_guardrail_function(
|
||||
self,
|
||||
task_output: TaskOutput,
|
||||
agent: BaseAgent,
|
||||
agent: Agent,
|
||||
tools: list[BaseTool],
|
||||
guardrail: GuardrailCallable | None,
|
||||
guardrail_index: int | None = None,
|
||||
|
||||
@@ -2,7 +2,7 @@ from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.lite_agent_output import LiteAgentOutput
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -38,7 +38,9 @@ class LLMGuardrail:
|
||||
|
||||
self.llm: BaseLLM = llm
|
||||
|
||||
def _validate_output(self, task_output: TaskOutput) -> LiteAgentOutput:
|
||||
def _validate_output(
|
||||
self, task_output: TaskOutput | LiteAgentOutput
|
||||
) -> LiteAgentOutput:
|
||||
agent = Agent(
|
||||
role="Guardrail Agent",
|
||||
goal="Validate the output of the task",
|
||||
@@ -64,18 +66,17 @@ class LLMGuardrail:
|
||||
|
||||
return agent.kickoff(query, response_format=LLMGuardrailResult)
|
||||
|
||||
def __call__(self, task_output: TaskOutput) -> tuple[bool, Any]:
|
||||
def __call__(self, task_output: TaskOutput | LiteAgentOutput) -> tuple[bool, Any]:
|
||||
"""Validates the output of a task based on specified criteria.
|
||||
|
||||
Args:
|
||||
task_output (TaskOutput): The output to be validated.
|
||||
task_output: The output to be validated.
|
||||
|
||||
Returns:
|
||||
Tuple[bool, Any]: A tuple containing:
|
||||
A tuple containing:
|
||||
- bool: True if validation passed, False otherwise
|
||||
- Any: The validation result or error message
|
||||
"""
|
||||
|
||||
try:
|
||||
result = self._validate_output(task_output)
|
||||
if not isinstance(result.pydantic, LLMGuardrailResult):
|
||||
|
||||
@@ -9,12 +9,14 @@ data is collected. Users can opt-in to share more complete data using the
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import atexit
|
||||
from collections.abc import Callable
|
||||
from importlib.metadata import version
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import signal
|
||||
import threading
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
@@ -31,6 +33,14 @@ from opentelemetry.sdk.trace.export import (
|
||||
from opentelemetry.trace import Span
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.system_events import (
|
||||
SigContEvent,
|
||||
SigHupEvent,
|
||||
SigIntEvent,
|
||||
SigTStpEvent,
|
||||
SigTermEvent,
|
||||
)
|
||||
from crewai.telemetry.constants import (
|
||||
CREWAI_TELEMETRY_BASE_URL,
|
||||
CREWAI_TELEMETRY_SERVICE_NAME,
|
||||
@@ -121,6 +131,7 @@ class Telemetry:
|
||||
)
|
||||
|
||||
self.provider.add_span_processor(processor)
|
||||
self._register_shutdown_handlers()
|
||||
self.ready = True
|
||||
except Exception as e:
|
||||
if isinstance(
|
||||
@@ -155,6 +166,71 @@ class Telemetry:
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
|
||||
def _register_shutdown_handlers(self) -> None:
|
||||
"""Register handlers for graceful shutdown on process exit and signals."""
|
||||
atexit.register(self._shutdown)
|
||||
|
||||
self._original_handlers: dict[int, Any] = {}
|
||||
|
||||
self._register_signal_handler(signal.SIGTERM, SigTermEvent, shutdown=True)
|
||||
self._register_signal_handler(signal.SIGINT, SigIntEvent, shutdown=True)
|
||||
self._register_signal_handler(signal.SIGHUP, SigHupEvent, shutdown=False)
|
||||
self._register_signal_handler(signal.SIGTSTP, SigTStpEvent, shutdown=False)
|
||||
self._register_signal_handler(signal.SIGCONT, SigContEvent, shutdown=False)
|
||||
|
||||
def _register_signal_handler(
|
||||
self,
|
||||
sig: signal.Signals,
|
||||
event_class: type,
|
||||
shutdown: bool = False,
|
||||
) -> None:
|
||||
"""Register a signal handler that emits an event.
|
||||
|
||||
Args:
|
||||
sig: The signal to handle.
|
||||
event_class: The event class to instantiate and emit.
|
||||
shutdown: Whether to trigger shutdown on this signal.
|
||||
"""
|
||||
try:
|
||||
original_handler = signal.getsignal(sig)
|
||||
self._original_handlers[sig] = original_handler
|
||||
|
||||
def handler(signum: int, frame: Any) -> None:
|
||||
crewai_event_bus.emit(self, event_class())
|
||||
|
||||
if shutdown:
|
||||
self._shutdown()
|
||||
|
||||
if original_handler not in (signal.SIG_DFL, signal.SIG_IGN, None):
|
||||
if callable(original_handler):
|
||||
original_handler(signum, frame)
|
||||
elif shutdown:
|
||||
raise SystemExit(0)
|
||||
|
||||
signal.signal(sig, handler)
|
||||
except ValueError as e:
|
||||
logger.warning(
|
||||
f"Cannot register {sig.name} handler: not running in main thread",
|
||||
exc_info=e,
|
||||
)
|
||||
except OSError as e:
|
||||
logger.warning(f"Cannot register {sig.name} handler: {e}", exc_info=e)
|
||||
|
||||
def _shutdown(self) -> None:
|
||||
"""Flush and shutdown the telemetry provider on process exit.
|
||||
|
||||
Uses a short timeout to avoid blocking process shutdown.
|
||||
"""
|
||||
if not self.ready:
|
||||
return
|
||||
|
||||
try:
|
||||
self.provider.force_flush(timeout_millis=5000)
|
||||
self.provider.shutdown()
|
||||
self.ready = False
|
||||
except Exception as e:
|
||||
logger.debug(f"Telemetry shutdown failed: {e}")
|
||||
|
||||
def _safe_telemetry_operation(
|
||||
self, operation: Callable[[], Span | None]
|
||||
) -> Span | None:
|
||||
|
||||
@@ -147,7 +147,7 @@ def test_custom_llm():
|
||||
assert agent.llm.model == "gpt-4"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execution():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -166,7 +166,7 @@ def test_agent_execution():
|
||||
assert output == "1 + 1 is 2"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execution_with_tools():
|
||||
@tool
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
@@ -211,7 +211,7 @@ def test_agent_execution_with_tools():
|
||||
assert received_events[0].tool_args == {"first_number": 3, "second_number": 4}
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_logging_tool_usage():
|
||||
@tool
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
@@ -245,7 +245,7 @@ def test_logging_tool_usage():
|
||||
assert agent.tools_handler.last_used_tool.arguments == tool_usage.arguments
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_cache_hitting():
|
||||
@tool
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
@@ -325,7 +325,7 @@ def test_cache_hitting():
|
||||
assert received_events[0].output == "12"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_disabling_cache_for_agent():
|
||||
@tool
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
@@ -389,7 +389,7 @@ def test_disabling_cache_for_agent():
|
||||
read.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execution_with_specific_tools():
|
||||
@tool
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
@@ -412,7 +412,7 @@ def test_agent_execution_with_specific_tools():
|
||||
assert output == "The result of the multiplication is 12."
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool():
|
||||
@tool
|
||||
def multiplier(first_number: int, second_number: int) -> float:
|
||||
@@ -438,7 +438,7 @@ def test_agent_powered_by_new_o_model_family_that_allows_skipping_tool():
|
||||
assert output == "12"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_powered_by_new_o_model_family_that_uses_tool():
|
||||
@tool
|
||||
def comapny_customer_data() -> str:
|
||||
@@ -464,7 +464,7 @@ def test_agent_powered_by_new_o_model_family_that_uses_tool():
|
||||
assert output == "42"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_custom_max_iterations():
|
||||
@tool
|
||||
def get_final_answer() -> float:
|
||||
@@ -509,7 +509,7 @@ def test_agent_custom_max_iterations():
|
||||
assert call_count == 2
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.timeout(30)
|
||||
def test_agent_max_iterations_stops_loop():
|
||||
"""Test that agent execution terminates when max_iter is reached."""
|
||||
@@ -546,7 +546,7 @@ def test_agent_max_iterations_stops_loop():
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_repeated_tool_usage(capsys):
|
||||
"""Test that agents handle repeated tool usage appropriately.
|
||||
|
||||
@@ -595,7 +595,7 @@ def test_agent_repeated_tool_usage(capsys):
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
|
||||
@tool
|
||||
def get_final_answer(anything: str) -> float:
|
||||
@@ -638,7 +638,7 @@ def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_moved_on_after_max_iterations():
|
||||
@tool
|
||||
def get_final_answer() -> float:
|
||||
@@ -665,7 +665,7 @@ def test_agent_moved_on_after_max_iterations():
|
||||
assert output == "42"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_respect_the_max_rpm_set(capsys):
|
||||
@tool
|
||||
def get_final_answer() -> float:
|
||||
@@ -699,7 +699,7 @@ def test_agent_respect_the_max_rpm_set(capsys):
|
||||
moveon.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
@@ -737,7 +737,7 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
|
||||
moveon.assert_not_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_without_max_rpm_respects_crew_rpm(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
@@ -797,7 +797,7 @@ def test_agent_without_max_rpm_respects_crew_rpm(capsys):
|
||||
moveon.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_error_on_parsing_tool(capsys):
|
||||
from unittest.mock import patch
|
||||
|
||||
@@ -840,7 +840,7 @@ def test_agent_error_on_parsing_tool(capsys):
|
||||
assert "Error on parsing tool." in captured.out
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_remembers_output_format_after_using_tools_too_many_times():
|
||||
from unittest.mock import patch
|
||||
|
||||
@@ -875,7 +875,7 @@ def test_agent_remembers_output_format_after_using_tools_too_many_times():
|
||||
remember_format.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
agent1 = Agent(role="test role", goal="test goal", backstory="test backstory")
|
||||
agent2 = Agent(role="test role2", goal="test goal2", backstory="test backstory2")
|
||||
@@ -902,7 +902,7 @@ def test_agent_use_specific_tasks_output_as_context(capsys):
|
||||
assert "hi" in result.raw.lower() or "hello" in result.raw.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_step_callback():
|
||||
class StepCallback:
|
||||
def callback(self, step):
|
||||
@@ -936,7 +936,7 @@ def test_agent_step_callback():
|
||||
callback.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_function_calling_llm():
|
||||
from crewai.llm import LLM
|
||||
llm = LLM(model="gpt-4o", is_litellm=True)
|
||||
@@ -983,7 +983,7 @@ def test_agent_function_calling_llm():
|
||||
mock_original_tool_calling.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
@@ -1013,7 +1013,7 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
|
||||
assert result.raw == "Howdy!"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_tool_usage_information_is_appended_to_agent():
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
@@ -1068,7 +1068,7 @@ def test_agent_definition_based_on_dict():
|
||||
|
||||
|
||||
# test for human input
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_human_input():
|
||||
# Agent configuration
|
||||
config = {
|
||||
@@ -1216,7 +1216,7 @@ Thought:<|eot_id|>
|
||||
assert mock_format_prompt.return_value == expected_prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_task_allow_crewai_trigger_context():
|
||||
from crewai import Crew
|
||||
|
||||
@@ -1237,7 +1237,7 @@ def test_task_allow_crewai_trigger_context():
|
||||
assert "Trigger Payload: Important context data" in prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_task_without_allow_crewai_trigger_context():
|
||||
from crewai import Crew
|
||||
|
||||
@@ -1260,7 +1260,7 @@ def test_task_without_allow_crewai_trigger_context():
|
||||
assert "Important context data" not in prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_task_allow_crewai_trigger_context_no_payload():
|
||||
from crewai import Crew
|
||||
|
||||
@@ -1282,7 +1282,7 @@ def test_task_allow_crewai_trigger_context_no_payload():
|
||||
assert "Trigger Payload:" not in prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
|
||||
from crewai import Crew
|
||||
|
||||
@@ -1311,7 +1311,7 @@ def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
|
||||
assert "Trigger Payload: Initial context data" not in first_prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_first_task_auto_inject_trigger():
|
||||
from crewai import Crew
|
||||
|
||||
@@ -1344,7 +1344,7 @@ def test_first_task_auto_inject_trigger():
|
||||
assert "Trigger Payload:" not in second_prompt
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_ensure_first_task_allow_crewai_trigger_context_is_false_does_not_inject():
|
||||
from crewai import Crew
|
||||
|
||||
@@ -1549,7 +1549,7 @@ def test_agent_with_additional_kwargs():
|
||||
assert agent.llm.frequency_penalty == 0.1
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_llm_call():
|
||||
llm = LLM(model="gpt-3.5-turbo")
|
||||
messages = [{"role": "user", "content": "Say 'Hello, World!'"}]
|
||||
@@ -1558,7 +1558,7 @@ def test_llm_call():
|
||||
assert "Hello, World!" in response
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_llm_call_with_error():
|
||||
llm = LLM(model="non-existent-model")
|
||||
messages = [{"role": "user", "content": "This should fail"}]
|
||||
@@ -1567,7 +1567,7 @@ def test_llm_call_with_error():
|
||||
llm.call(messages)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_handle_context_length_exceeds_limit():
|
||||
# Import necessary modules
|
||||
from crewai.utilities.agent_utils import handle_context_length
|
||||
@@ -1620,7 +1620,7 @@ def test_handle_context_length_exceeds_limit():
|
||||
mock_summarize.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_handle_context_length_exceeds_limit_cli_no():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -1695,7 +1695,7 @@ def test_agent_with_all_llm_attributes():
|
||||
assert agent.llm.api_key == "sk-your-api-key-here"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_llm_call_with_all_attributes():
|
||||
llm = LLM(
|
||||
model="gpt-3.5-turbo",
|
||||
@@ -1712,7 +1712,7 @@ def test_llm_call_with_all_attributes():
|
||||
assert "STOP" not in response
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_with_ollama_llama3():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -1733,7 +1733,7 @@ def test_agent_with_ollama_llama3():
|
||||
assert "Llama3" in response or "AI" in response or "language model" in response
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_llm_call_with_ollama_llama3():
|
||||
llm = LLM(
|
||||
model="ollama/llama3.2:3b",
|
||||
@@ -1752,7 +1752,7 @@ def test_llm_call_with_ollama_llama3():
|
||||
assert "Llama3" in response or "AI" in response or "language model" in response
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execute_task_basic():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -1771,7 +1771,7 @@ def test_agent_execute_task_basic():
|
||||
assert "4" in result
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execute_task_with_context():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -1793,7 +1793,7 @@ def test_agent_execute_task_with_context():
|
||||
assert "fox" in result.lower() and "dog" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execute_task_with_tool():
|
||||
@tool
|
||||
def dummy_tool(query: str) -> str:
|
||||
@@ -1818,7 +1818,7 @@ def test_agent_execute_task_with_tool():
|
||||
assert "Dummy result for: test query" in result
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execute_task_with_custom_llm():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -1839,7 +1839,7 @@ def test_agent_execute_task_with_custom_llm():
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_execute_task_with_ollama():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
@@ -1859,7 +1859,7 @@ def test_agent_execute_task_with_ollama():
|
||||
assert "AI" in result or "artificial intelligence" in result.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_with_knowledge_sources():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
@@ -1891,7 +1891,7 @@ def test_agent_with_knowledge_sources():
|
||||
assert "red" in result.raw.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
@@ -1939,7 +1939,7 @@ def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold():
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold_default():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
@@ -1988,7 +1988,7 @@ def test_agent_with_knowledge_sources_with_query_limit_and_score_threshold_defau
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_with_knowledge_sources_extensive_role():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
@@ -2024,7 +2024,7 @@ def test_agent_with_knowledge_sources_extensive_role():
|
||||
assert "red" in result.raw.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_with_knowledge_sources_works_with_copy():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
@@ -2063,7 +2063,7 @@ def test_agent_with_knowledge_sources_works_with_copy():
|
||||
assert isinstance(agent_copy.llm, BaseLLM)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_with_knowledge_sources_generate_search_query():
|
||||
content = "Brandon's favorite color is red and he likes Mexican food."
|
||||
string_source = StringKnowledgeSource(content=content)
|
||||
@@ -2116,7 +2116,7 @@ def test_agent_with_knowledge_sources_generate_search_query():
|
||||
assert "red" in result.raw.lower()
|
||||
|
||||
|
||||
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_with_knowledge_with_no_crewai_knowledge():
|
||||
mock_knowledge = MagicMock(spec=Knowledge)
|
||||
|
||||
@@ -2143,7 +2143,7 @@ def test_agent_with_knowledge_with_no_crewai_knowledge():
|
||||
mock_knowledge.query.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_with_only_crewai_knowledge():
|
||||
mock_knowledge = MagicMock(spec=Knowledge)
|
||||
|
||||
@@ -2168,7 +2168,7 @@ def test_agent_with_only_crewai_knowledge():
|
||||
mock_knowledge.query.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(record_mode="none", filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_knowledege_with_crewai_knowledge():
|
||||
crew_knowledge = MagicMock(spec=Knowledge)
|
||||
agent_knowledge = MagicMock(spec=Knowledge)
|
||||
@@ -2197,7 +2197,7 @@ def test_agent_knowledege_with_crewai_knowledge():
|
||||
crew_knowledge.query.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_litellm_auth_error_handling():
|
||||
"""Test that LiteLLM authentication errors are handled correctly and not retried."""
|
||||
from litellm import AuthenticationError as LiteLLMAuthenticationError
|
||||
@@ -2326,7 +2326,7 @@ def test_litellm_anthropic_error_handling():
|
||||
mock_llm_call.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_get_knowledge_search_query():
|
||||
"""Test that _get_knowledge_search_query calls the LLM with the correct prompts."""
|
||||
from crewai.utilities.i18n import I18N
|
||||
|
||||
@@ -70,7 +70,7 @@ class ResearchResult(BaseModel):
|
||||
sources: list[str] = Field(description="List of sources used")
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.parametrize("verbose", [True, False])
|
||||
def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
|
||||
"""Test that LiteAgent is created with the correct parameters when Agent.kickoff() is called."""
|
||||
@@ -130,7 +130,7 @@ def test_lite_agent_created_with_correct_parameters(monkeypatch, verbose):
|
||||
assert created_lite_agent["response_format"] == TestResponse
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_with_tools():
|
||||
"""Test that Agent can use tools."""
|
||||
# Create a LiteAgent with tools
|
||||
@@ -174,7 +174,7 @@ def test_lite_agent_with_tools():
|
||||
assert event.tool_name == "search_web"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_structured_output():
|
||||
"""Test that Agent can return a simple structured output."""
|
||||
|
||||
@@ -217,7 +217,7 @@ def test_lite_agent_structured_output():
|
||||
return result
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_returns_usage_metrics():
|
||||
"""Test that LiteAgent returns usage metrics."""
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
@@ -238,7 +238,7 @@ def test_lite_agent_returns_usage_metrics():
|
||||
assert result.usage_metrics["total_tokens"] > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_output_includes_messages():
|
||||
"""Test that LiteAgentOutput includes messages from agent execution."""
|
||||
llm = LLM(model="gpt-4o-mini")
|
||||
@@ -259,7 +259,7 @@ def test_lite_agent_output_includes_messages():
|
||||
assert len(result.messages) > 0
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.asyncio
|
||||
async def test_lite_agent_returns_usage_metrics_async():
|
||||
"""Test that LiteAgent returns usage metrics when run asynchronously."""
|
||||
@@ -354,9 +354,9 @@ def test_sets_parent_flow_when_inside_flow():
|
||||
assert captured_agent.parent_flow is flow
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_guardrail_is_called_using_string():
|
||||
guardrail_events = defaultdict(list)
|
||||
guardrail_events: dict[str, list] = defaultdict(list)
|
||||
from crewai.events.event_types import (
|
||||
LLMGuardrailCompletedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
@@ -369,35 +369,33 @@ def test_guardrail_is_called_using_string():
|
||||
guardrail="""Only include Brazilian players, both women and men""",
|
||||
)
|
||||
|
||||
all_events_received = threading.Event()
|
||||
condition = threading.Condition()
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailStartedEvent)
|
||||
def capture_guardrail_started(source, event):
|
||||
assert isinstance(source, LiteAgent)
|
||||
assert source.original_agent == agent
|
||||
guardrail_events["started"].append(event)
|
||||
if (
|
||||
len(guardrail_events["started"]) == 2
|
||||
and len(guardrail_events["completed"]) == 2
|
||||
):
|
||||
all_events_received.set()
|
||||
with condition:
|
||||
guardrail_events["started"].append(event)
|
||||
condition.notify()
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
|
||||
def capture_guardrail_completed(source, event):
|
||||
assert isinstance(source, LiteAgent)
|
||||
assert source.original_agent == agent
|
||||
guardrail_events["completed"].append(event)
|
||||
if (
|
||||
len(guardrail_events["started"]) == 2
|
||||
and len(guardrail_events["completed"]) == 2
|
||||
):
|
||||
all_events_received.set()
|
||||
with condition:
|
||||
guardrail_events["completed"].append(event)
|
||||
condition.notify()
|
||||
|
||||
result = agent.kickoff(messages="Top 10 best players in the world?")
|
||||
|
||||
assert all_events_received.wait(timeout=10), (
|
||||
"Timeout waiting for all guardrail events"
|
||||
)
|
||||
with condition:
|
||||
success = condition.wait_for(
|
||||
lambda: len(guardrail_events["started"]) >= 2
|
||||
and len(guardrail_events["completed"]) >= 2,
|
||||
timeout=10,
|
||||
)
|
||||
assert success, "Timeout waiting for all guardrail events"
|
||||
assert len(guardrail_events["started"]) == 2
|
||||
assert len(guardrail_events["completed"]) == 2
|
||||
assert not guardrail_events["completed"][0].success
|
||||
@@ -408,33 +406,27 @@ def test_guardrail_is_called_using_string():
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_guardrail_is_called_using_callable():
|
||||
guardrail_events = defaultdict(list)
|
||||
guardrail_events: dict[str, list] = defaultdict(list)
|
||||
from crewai.events.event_types import (
|
||||
LLMGuardrailCompletedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
)
|
||||
|
||||
all_events_received = threading.Event()
|
||||
condition = threading.Condition()
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailStartedEvent)
|
||||
def capture_guardrail_started(source, event):
|
||||
guardrail_events["started"].append(event)
|
||||
if (
|
||||
len(guardrail_events["started"]) == 1
|
||||
and len(guardrail_events["completed"]) == 1
|
||||
):
|
||||
all_events_received.set()
|
||||
with condition:
|
||||
guardrail_events["started"].append(event)
|
||||
condition.notify()
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
|
||||
def capture_guardrail_completed(source, event):
|
||||
guardrail_events["completed"].append(event)
|
||||
if (
|
||||
len(guardrail_events["started"]) == 1
|
||||
and len(guardrail_events["completed"]) == 1
|
||||
):
|
||||
all_events_received.set()
|
||||
with condition:
|
||||
guardrail_events["completed"].append(event)
|
||||
condition.notify()
|
||||
|
||||
agent = Agent(
|
||||
role="Sports Analyst",
|
||||
@@ -445,42 +437,40 @@ def test_guardrail_is_called_using_callable():
|
||||
|
||||
result = agent.kickoff(messages="Top 1 best players in the world?")
|
||||
|
||||
assert all_events_received.wait(timeout=10), (
|
||||
"Timeout waiting for all guardrail events"
|
||||
)
|
||||
with condition:
|
||||
success = condition.wait_for(
|
||||
lambda: len(guardrail_events["started"]) >= 1
|
||||
and len(guardrail_events["completed"]) >= 1,
|
||||
timeout=10,
|
||||
)
|
||||
assert success, "Timeout waiting for all guardrail events"
|
||||
assert len(guardrail_events["started"]) == 1
|
||||
assert len(guardrail_events["completed"]) == 1
|
||||
assert guardrail_events["completed"][0].success
|
||||
assert "Pelé - Santos, 1958" in result.raw
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_guardrail_reached_attempt_limit():
|
||||
guardrail_events = defaultdict(list)
|
||||
guardrail_events: dict[str, list] = defaultdict(list)
|
||||
from crewai.events.event_types import (
|
||||
LLMGuardrailCompletedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
)
|
||||
|
||||
all_events_received = threading.Event()
|
||||
condition = threading.Condition()
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailStartedEvent)
|
||||
def capture_guardrail_started(source, event):
|
||||
guardrail_events["started"].append(event)
|
||||
if (
|
||||
len(guardrail_events["started"]) == 3
|
||||
and len(guardrail_events["completed"]) == 3
|
||||
):
|
||||
all_events_received.set()
|
||||
with condition:
|
||||
guardrail_events["started"].append(event)
|
||||
condition.notify()
|
||||
|
||||
@crewai_event_bus.on(LLMGuardrailCompletedEvent)
|
||||
def capture_guardrail_completed(source, event):
|
||||
guardrail_events["completed"].append(event)
|
||||
if (
|
||||
len(guardrail_events["started"]) == 3
|
||||
and len(guardrail_events["completed"]) == 3
|
||||
):
|
||||
all_events_received.set()
|
||||
with condition:
|
||||
guardrail_events["completed"].append(event)
|
||||
condition.notify()
|
||||
|
||||
agent = Agent(
|
||||
role="Sports Analyst",
|
||||
@@ -498,9 +488,13 @@ def test_guardrail_reached_attempt_limit():
|
||||
):
|
||||
agent.kickoff(messages="Top 10 best players in the world?")
|
||||
|
||||
assert all_events_received.wait(timeout=10), (
|
||||
"Timeout waiting for all guardrail events"
|
||||
)
|
||||
with condition:
|
||||
success = condition.wait_for(
|
||||
lambda: len(guardrail_events["started"]) >= 3
|
||||
and len(guardrail_events["completed"]) >= 3,
|
||||
timeout=10,
|
||||
)
|
||||
assert success, "Timeout waiting for all guardrail events"
|
||||
assert len(guardrail_events["started"]) == 3 # 2 retries + 1 initial call
|
||||
assert len(guardrail_events["completed"]) == 3 # 2 retries + 1 initial call
|
||||
assert not guardrail_events["completed"][0].success
|
||||
@@ -508,7 +502,7 @@ def test_guardrail_reached_attempt_limit():
|
||||
assert not guardrail_events["completed"][2].success
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_output_when_guardrail_returns_base_model():
|
||||
class Player(BaseModel):
|
||||
name: str
|
||||
@@ -599,7 +593,7 @@ def test_lite_agent_with_custom_llm_and_guardrails():
|
||||
assert result2.raw == "Modified by guardrail"
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_lite_agent_with_invalid_llm():
|
||||
"""Test that LiteAgent raises proper error when create_llm returns None."""
|
||||
with patch("crewai.lite_agent.create_llm", return_value=None):
|
||||
@@ -615,7 +609,7 @@ def test_lite_agent_with_invalid_llm():
|
||||
|
||||
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"})
|
||||
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get")
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_kickoff_with_platform_tools(mock_get):
|
||||
"""Test that Agent.kickoff() properly integrates platform tools with LiteAgent"""
|
||||
mock_response = Mock()
|
||||
@@ -657,7 +651,7 @@ def test_agent_kickoff_with_platform_tools(mock_get):
|
||||
|
||||
@patch.dict("os.environ", {"EXA_API_KEY": "test_exa_key"})
|
||||
@patch("crewai.agent.Agent._get_external_mcp_tools")
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@pytest.mark.vcr()
|
||||
def test_agent_kickoff_with_mcp_tools(mock_get_mcp_tools):
|
||||
"""Test that Agent.kickoff() properly integrates MCP tools with LiteAgent"""
|
||||
# Setup mock MCP tools - create a proper BaseTool instance
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"}, {"role": "user", "content": "\nCurrent Task: Say hello to
|
||||
the world\n\nThis is the expected criteria for your final answer: hello world\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}], "model": "gpt-4o-mini", "stop":
|
||||
["\nObservation:"]}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '825'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.93.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.93.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.9
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJdi9swEHz3r1j0HBc7ZydXvx1HC0evhT6UUtrDKNLa1lXWqpJ8aTjy
|
||||
34vsXOz0A/pi8M7OaGZ3nxMApiSrgImOB9Fbnd4Why/v79HeePeD37/Zfdx8evf5+rY4fNjdPbJV
|
||||
ZNDuEUV4Yb0S1FuNQZGZYOGQB4yq+bYsr7J1nhcj0JNEHWmtDWlBaa+MStfZukizbZpfn9gdKYGe
|
||||
VfA1AQB4Hr/Rp5H4k1WQrV4qPXrPW2TVuQmAOdKxwrj3ygduAlvNoCAT0IzW78DQHgQ30KonBA5t
|
||||
tA3c+D06gG/mrTJcw834X0GHWhPsyWm5FHTYDJ7HUGbQegFwYyjwOJQxysMJOZ7Na2qto53/jcoa
|
||||
ZZTvaofck4lGfSDLRvSYADyMQxoucjPrqLehDvQdx+fycjvpsXk3C/TqBAYKXC/q29NoL/VqiYEr
|
||||
7RdjZoKLDuVMnXfCB6loASSL1H+6+Zv2lFyZ9n/kZ0AItAFlbR1KJS4Tz20O4+n+q+085dEw8+ie
|
||||
lMA6KHRxExIbPujpoJg/+IB93SjTorNOTVfV2LrcZLzZYFm+Zskx+QUAAP//AwB1vYZ+YwMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 96fc9f29dea3cf1f-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 15 Aug 2025 23:55:15 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=oA9oTa3cE0ZaEUDRf0hCpnarSAQKzrVUhl6qDS4j09w-1755302115-1.0.1.1-gUUDl4ZqvBQkg7244DTwOmSiDUT2z_AiQu0P1xUaABjaufSpZuIlI5G0H7OSnW.ldypvpxjj45NGWesJ62M_2U7r20tHz_gMmDFw6D5ZiNc;
|
||||
path=/; expires=Sat, 16-Aug-25 00:25:15 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=ICenEGMmOE5jaOjwD30bAOwrF8.XRbSIKTBl1EyWs0o-1755302115700-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '735'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '753'
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999830'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_212fde9d945a462ba0d89ea856131dce
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,211 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "4d0d2b51-d83a-4054-b41e-8c2d17baa88f", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "crew", "flow_name": null, "crewai_version": "1.6.0", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-29T02:50:39.376314+00:00"},
|
||||
"ephemeral_trace_id": "4d0d2b51-d83a-4054-b41e-8c2d17baa88f"}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '488'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.6.0
|
||||
X-Crewai-Version:
|
||||
- 1.6.0
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/ephemeral/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"id":"71726285-2e63-4d2a-b4c4-4bbd0ff6a9f1","ephemeral_trace_id":"4d0d2b51-d83a-4054-b41e-8c2d17baa88f","execution_type":"crew","crew_name":"crew","flow_name":null,"status":"running","duration_ms":null,"crewai_version":"1.6.0","total_events":0,"execution_context":{"crew_fingerprint":null,"crew_name":"crew","flow_name":null,"crewai_version":"1.6.0","privacy_level":"standard"},"created_at":"2025-11-29T02:50:39.931Z","updated_at":"2025-11-29T02:50:39.931Z","access_code":"TRACE-bf7f3f49b3","user_identifier":null}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '515'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Sat, 29 Nov 2025 02:50:39 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- CSP-FILTERED
|
||||
etag:
|
||||
- ETAG-XXX
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- PERMISSIONS-POLICY-XXX
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- REFERRER-POLICY-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
x-frame-options:
|
||||
- X-FRAME-OPTIONS-XXX
|
||||
x-permitted-cross-domain-policies:
|
||||
- X-PERMITTED-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
x-runtime:
|
||||
- X-RUNTIME-XXX
|
||||
x-xss-protection:
|
||||
- X-XSS-PROTECTION-XXX
|
||||
status:
|
||||
code: 201
|
||||
message: Created
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are test role. test backstory\nYour
|
||||
personal goal is: test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Analyze the data\n\nThis
|
||||
is the expected criteria for your final answer: Analysis report\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate
|
||||
authorization:
|
||||
- AUTHORIZATION-XXX
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '785'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFbbbhtHDH33VxD71AKyECeynegtdW9uiiZI3AJpXRj0DHeXzSxnO5yV
|
||||
ohT594Kzq5WcpEBfdBkOycPDy/CfE4CKfbWGyrWYXdeH06v2/EX7li7T5uenL364rH/6NUn+/Ze/
|
||||
3nP99mW1MI14/xe5vNdautj1gTJHGcUuEWYyq2eXF6snT1cXT54VQRc9BVNr+ny6Wp6ddix8+vjR
|
||||
4/PTR6vTs9Wk3kZ2pNUa/jgBAPinfBpQ8fS+WsOjxf6kI1VsqFrPlwCqFIOdVKjKmlFytTgIXZRM
|
||||
UrDftHFo2ryGa5C4BYcCDW8IEBoLAFB0S+lWvmfBAM/LvzXcyq08Fww7ZYXX1MeU7ehsCdeSU/SD
|
||||
MyJu5aYlyKjvINHfAydSQMhtTOYTcG8g1pBbKn4FPGZcwk2EPsUNe0Ni1CZqSdSQpeJuUVTsMrSo
|
||||
cE8koDvN1GFmhyHsINGGaUt+AVvOLWC2mDkK5AieMnIAFA+JAm1QHNl5/gRwR5J1abE9XsK35u3l
|
||||
hpLZvZU3bEoSQXtyXLMb4WxR99g9sBSTfYpdXzCzTgEAqg5dYaQhobRXV8p7SHmPyMCQZqhjmlkz
|
||||
qgEF0OUBA6gjwcRxMVrpI0tW0MG1gAoydOYBA2wwDKQLcJipiYntd+aOQGn8ExPE3FKCDSbG+0AK
|
||||
2zgEb867guYej5I2BlMYejIx9CpFR6osza2cjkdXgVBYmjV8JzokluaQPlaoExHUKXbA4qJYxZK4
|
||||
AqfjYmnGbRmjlGKyrEzWX6YGhT+gJXcNV1NkH0zNrmtOg8uj1+LRaKS6JpdLpe8Jne39xjpgmA3+
|
||||
WgC4FlPWYrBJ2LdqyWFvJVXvICcSP0p7K7QkCl9xDdj3gZ3R+HXhaLWEuW9uyLXCltnimdQl7guk
|
||||
Nxkza2anFk5wQyhQjPOOUBbQkefyHT0twPrbY/LgacO4L/FBPKUiAkeSEwbIJJ7E7QpOz9pTUo5S
|
||||
Ir+xCGZwa7geQ2M3ux76rTmJCXzcSvk9hR03lMYqsnrnjk7Hahqb2axfxZRoiuLg46ol987I3cu0
|
||||
5d6aOW+tnz3XNSWSfKjFQuL5Er5n8SxNYe4F7eDVRPr6wOMIWrmREoXkQ2YsfJTYYTCQU4/OWK9F
|
||||
uWmzcSCZUp8ozxyU+jlK9rbFbNo74K4Pu6L/KpZBgwGucFDSNfy4662nlBSijIkJO4u7RpdjMgh1
|
||||
GKzkjxqjhHqxhKsoLgyWpxLtm6HrMO1KLSAL1BMTI/SulFuhspS5GauZQknb/aAspApl/r/PI+3k
|
||||
92NmZuA1udh1JP7IUj2kMhbQjVwkYNmQZm7KpYL2cgk/c8cjXQXtc9mZN80Jy0AicXEwVsnPwymY
|
||||
Cvlp/LnY02hdh7pmx5b/aVxz15v7PUnU59Z4OOrgW3m6hOd9T+Lt+Sw9+NlU/rpUZOnnBeRSV+Ng
|
||||
Gd0YtH0DYoA45H6YHoFvyFlKzX0im1yfTf+Hk5/14eifn7zpDYhDDpaSEk9HuY0+htjsHswtOsz9
|
||||
D/MMm+dX2C3hut4/A/uJihvkYJEtygjaTZwpgWbqFbYcAuxKYeAh7NIXJb+m+inawsB34o3y6eR4
|
||||
qUhUD4q22cgQwpEAReJUErbO/DlJPs4LTIhNn+K9fqJa1Sys7V0i1Ci2rGiOfVWkH08A/iyL0vBg
|
||||
96lGuu9yfEfF3dn5+WivOixoB+mTp88maY4Zw0FwvlotvmDwbqRKj3atyqFryR9UD4sZDp7jkeDk
|
||||
KOzP4XzJ9hg6S/N/zB8EzlGfyd/1iTy7hyEfriWyBfa/rs00F8CV2tbj6C4zJUuFpxqHMG6V1bh4
|
||||
3dUsjc1LHlfLur97dnlxQeerZ/ePq5OPJ/8CAAD//wMAwDPj9GkLAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Sat, 29 Nov 2025 02:50:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- X-CONTENT-TYPE-XXX
|
||||
access-control-expose-headers:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '5125'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '5227'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- X-RATELIMIT-LIMIT-REQUESTS-XXX
|
||||
x-ratelimit-limit-tokens:
|
||||
- X-RATELIMIT-LIMIT-TOKENS-XXX
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999830'
|
||||
x-ratelimit-remaining-requests:
|
||||
- X-RATELIMIT-REMAINING-REQUESTS-XXX
|
||||
x-ratelimit-remaining-tokens:
|
||||
- X-RATELIMIT-REMAINING-TOKENS-XXX
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- X-RATELIMIT-RESET-REQUESTS-XXX
|
||||
x-ratelimit-reset-tokens:
|
||||
- X-RATELIMIT-RESET-TOKENS-XXX
|
||||
x-request-id:
|
||||
- X-REQUEST-ID-XXX
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user