Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
428810bd6f | ||
|
|
610bc4b3f5 | ||
|
|
e73c5887d9 | ||
|
|
c5ac5fa78a | ||
|
|
5456c80556 | ||
|
|
df754dbcc8 | ||
|
|
e8356b777c | ||
|
|
ade425a543 | ||
|
|
d7f6f07a5d | ||
|
|
9e1dae0746 | ||
|
|
b5161c320d | ||
|
|
c793c829ea | ||
|
|
0fe9352149 | ||
|
|
548170e989 | ||
|
|
417a4e3d91 | ||
|
|
68dce92003 | ||
|
|
289b90f00a | ||
|
|
c591c1ac87 | ||
|
|
86f0dfc2d7 | ||
|
|
74b5c88834 | ||
|
|
13e5ec711d |
BIN
.cache/plugin/social/0b649b356e60b558dfaafe8bb095862e.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
.cache/plugin/social/0cce129b2747506603c430fd3fe2b3d6.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
.cache/plugin/social/0f18d6e26b8551d3f42ef92b0f786024.png
Normal file
|
After Width: | Height: | Size: 37 KiB |
BIN
.cache/plugin/social/14c48b40955d6021b47ae973d9aef723.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
.cache/plugin/social/17484ad7f45b09a1db146ba3ad3df79a.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
.cache/plugin/social/1d935acb34360e4768e35ae13479bbf9.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
.cache/plugin/social/216220c022e734cc7999210b48c9fb59.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
.cache/plugin/social/246dcba6c47283feac354f5871842fe8.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
.cache/plugin/social/259ba94ac7e93bd9f968c57ec4a15fe5.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
.cache/plugin/social/288fd82ce2209be4864d19bd50b21474.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
.cache/plugin/social/28a844df4871a1cdfcba05fdc87bb3e8.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
BIN
.cache/plugin/social/40770a96ef2fb657a7aa16a9facf702f.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/4747e68a5e5c0f0994cdc5b37682a37c.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
.cache/plugin/social/4809f4ae19b6e78539b900da82d8a1f6.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
.cache/plugin/social/481b171eb3fe3dec67ca86d2d923f598.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
.cache/plugin/social/4ae47a8f7da894db700b2f29242cd0c5.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
.cache/plugin/social/4c1fb3bfd02d6b1317779fe5101058a7.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
BIN
.cache/plugin/social/56e240bc0124af182495bc59877d8d11.png
Normal file
|
After Width: | Height: | Size: 49 KiB |
BIN
.cache/plugin/social/5d2431971fcde0af2c84e4680a4227a7.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
.cache/plugin/social/69bcd9a2304ea69e1244a7ac510dd98d.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
.cache/plugin/social/6b49f5ef597c15cabc3df9bac4fbcf44.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
.cache/plugin/social/7296e2d6c7b2c713ed7b2e4546e3acdb.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
.cache/plugin/social/805d7c5662a45ca18b52554eecbc34af.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
.cache/plugin/social/80f1492950494de7a34a1f20f6dd4368.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
.cache/plugin/social/834ad7f8096fa4c92637b815777bf2bd.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
BIN
.cache/plugin/social/8b089bdf12d22c016f481d654be39eb1.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/96f1c198bf51f822eb04a25adf7ca20c.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/9f88e9bd3010b149e527e0600c2e438c.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
.cache/plugin/social/Roboto-Black.ttf
Normal file
BIN
.cache/plugin/social/Roboto-BlackItalic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Bold.ttf
Normal file
BIN
.cache/plugin/social/Roboto-BoldItalic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Italic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Light.ttf
Normal file
BIN
.cache/plugin/social/Roboto-LightItalic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Medium.ttf
Normal file
BIN
.cache/plugin/social/Roboto-MediumItalic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Regular.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Thin.ttf
Normal file
BIN
.cache/plugin/social/Roboto-ThinItalic.ttf
Normal file
BIN
.cache/plugin/social/a0c21e9a7250afebc533da92c7050bed.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
.cache/plugin/social/a19c79f0bc7a3e5ffc6b511a68273e5d.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
.cache/plugin/social/a1d83c5e1feb928b579ad122a8d3786d.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
.cache/plugin/social/a3d8476a7b5c6630a5f91aed8c210173.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
BIN
.cache/plugin/social/ac9c4b6558565d4c349355101e95c74a.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
.cache/plugin/social/b417e4353162a563e70f1350a2777e2c.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
BIN
.cache/plugin/social/b84a1e5d0534be3c31f04a7d4a98b515.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
.cache/plugin/social/bca675d7c3c82f52ebd329487fb9ade1.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
BIN
.cache/plugin/social/bdf46ef3b5230ebb45ef648933f54fa2.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
.cache/plugin/social/beacb748aad822c66a972b39186dbef1.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
.cache/plugin/social/caa7abb72303dbe5a02ec11e6f1eba6b.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
.cache/plugin/social/cff5eb5aae0959e143c12945428558bc.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
.cache/plugin/social/d01b95e8266a0d2c5f825b88d98a97a1.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
.cache/plugin/social/d7db21df76b132d3ca3ae4313e23f77d.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
.cache/plugin/social/d87db72302152f8c0953d7105c28a206.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
.cache/plugin/social/e580fe32a1d3f15fc89057d053ae3e52.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/e9111c93e01f7c1dfec7bbab69843076.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
.cache/plugin/social/ebf70df39c2bfd2c4a89d70846a516ff.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
.cache/plugin/social/ed5690e7952bdee0372c8d3f1f5d98d7.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/f6d08b81ae945faa6c4a436de48d2da6.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
.cache/plugin/social/f875c8d6b0cd71d9ae38300c82361d77.png
Normal file
|
After Width: | Height: | Size: 37 KiB |
BIN
.cache/plugin/social/fc9a9f44881519178d4000f24000ef9d.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
21
.github/codeql/codeql-config.yml
vendored
@@ -1,21 +0,0 @@
|
||||
name: "CodeQL Config"
|
||||
|
||||
paths-ignore:
|
||||
# Ignore template files - these are boilerplate code that shouldn't be analyzed
|
||||
- "src/crewai/cli/templates/**"
|
||||
# Ignore test cassettes - these are test fixtures/recordings
|
||||
- "tests/cassettes/**"
|
||||
# Ignore cache and build artifacts
|
||||
- ".cache/**"
|
||||
# Ignore documentation build artifacts
|
||||
- "docs/.cache/**"
|
||||
|
||||
paths:
|
||||
# Include all Python source code
|
||||
- "src/**"
|
||||
# Include tests (but exclude cassettes)
|
||||
- "tests/**"
|
||||
|
||||
# Configure specific queries or packs if needed
|
||||
# queries:
|
||||
# - uses: security-and-quality
|
||||
2
.github/workflows/build-uv-cache.yml
vendored
@@ -7,8 +7,6 @@ on:
|
||||
paths:
|
||||
- "uv.lock"
|
||||
- "pyproject.toml"
|
||||
schedule:
|
||||
- cron: "0 0 */5 * *" # Run every 5 days at midnight UTC to prevent cache expiration
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
|
||||
5
.github/workflows/codeql.yml
vendored
@@ -15,11 +15,11 @@ on:
|
||||
push:
|
||||
branches: [ "main" ]
|
||||
paths-ignore:
|
||||
- "src/crewai/cli/templates/**"
|
||||
- "lib/crewai/src/crewai/cli/templates/**"
|
||||
pull_request:
|
||||
branches: [ "main" ]
|
||||
paths-ignore:
|
||||
- "src/crewai/cli/templates/**"
|
||||
- "lib/crewai/src/crewai/cli/templates/**"
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
@@ -73,7 +73,6 @@ jobs:
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
config-file: ./.github/codeql/codeql-config.yml
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
|
||||
8
.github/workflows/linter.yml
vendored
@@ -52,10 +52,10 @@ jobs:
|
||||
- name: Run Ruff on Changed Files
|
||||
if: ${{ steps.changed-files.outputs.files != '' }}
|
||||
run: |
|
||||
echo "${{ steps.changed-files.outputs.files }}" \
|
||||
| tr ' ' '\n' \
|
||||
| grep -v 'src/crewai/cli/templates/' \
|
||||
| xargs -I{} uv run ruff check "{}"
|
||||
echo "${{ steps.changed-files.outputs.files }}" \
|
||||
| tr ' ' '\n' \
|
||||
| grep -v 'src/crewai/cli/templates/' \
|
||||
| xargs -I{} uv run ruff check "{}"
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
|
||||
71
.github/workflows/publish.yml
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [ published ]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: github.event.release.prerelease == true
|
||||
name: Build packages
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Build packages
|
||||
run: |
|
||||
uv build --all-packages
|
||||
rm dist/.gitignore
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist/
|
||||
|
||||
publish:
|
||||
if: github.event.release.prerelease == true
|
||||
name: Publish to PyPI
|
||||
needs: build
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/crewai
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: "3.12"
|
||||
enable-cache: false
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: dist
|
||||
path: dist
|
||||
|
||||
- name: Publish to PyPI
|
||||
env:
|
||||
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
|
||||
run: |
|
||||
for package in dist/*; do
|
||||
echo "Publishing $package"
|
||||
uv publish "$package"
|
||||
done
|
||||
31
.github/workflows/tests.yml
vendored
@@ -8,6 +8,14 @@ permissions:
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
PYTHONUNBUFFERED: 1
|
||||
BRAVE_API_KEY: fake-brave-key
|
||||
SNOWFLAKE_USER: fake-snowflake-user
|
||||
SNOWFLAKE_PASSWORD: fake-snowflake-password
|
||||
SNOWFLAKE_ACCOUNT: fake-snowflake-account
|
||||
SNOWFLAKE_WAREHOUSE: fake-snowflake-warehouse
|
||||
SNOWFLAKE_DATABASE: fake-snowflake-database
|
||||
SNOWFLAKE_SCHEMA: fake-snowflake-schema
|
||||
EMBEDCHAIN_DB_URI: sqlite:///test.db
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
@@ -56,13 +64,13 @@ jobs:
|
||||
- name: Run tests (group ${{ matrix.group }} of 8)
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
DURATION_FILE=".test_durations_py${PYTHON_VERSION_SAFE}"
|
||||
|
||||
DURATION_FILE="../../.test_durations_py${PYTHON_VERSION_SAFE}"
|
||||
|
||||
# Temporarily always skip cached durations to fix test splitting
|
||||
# When durations don't match, pytest-split runs duplicate tests instead of splitting
|
||||
echo "Using even test splitting (duration cache disabled until fix merged)"
|
||||
DURATIONS_ARG=""
|
||||
|
||||
|
||||
# Original logic (disabled temporarily):
|
||||
# if [ ! -f "$DURATION_FILE" ]; then
|
||||
# echo "No cached durations found, tests will be split evenly"
|
||||
@@ -74,8 +82,8 @@ jobs:
|
||||
# echo "No test changes detected, using cached test durations for optimal splitting"
|
||||
# DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
||||
# fi
|
||||
|
||||
uv run pytest \
|
||||
|
||||
cd lib/crewai && uv run pytest \
|
||||
--block-network \
|
||||
--timeout=30 \
|
||||
-vv \
|
||||
@@ -86,6 +94,19 @@ jobs:
|
||||
-n auto \
|
||||
--maxfail=3
|
||||
|
||||
- name: Run tool tests (group ${{ matrix.group }} of 8)
|
||||
run: |
|
||||
cd lib/crewai-tools && uv run pytest \
|
||||
--block-network \
|
||||
--timeout=30 \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--group ${{ matrix.group }} \
|
||||
--durations=10 \
|
||||
-n auto \
|
||||
--maxfail=3
|
||||
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
|
||||
1
.gitignore
vendored
@@ -2,7 +2,6 @@
|
||||
.pytest_cache
|
||||
__pycache__
|
||||
dist/
|
||||
lib/
|
||||
.env
|
||||
assets/*
|
||||
.idea
|
||||
|
||||
@@ -6,14 +6,16 @@ repos:
|
||||
entry: uv run ruff check
|
||||
language: system
|
||||
types: [python]
|
||||
exclude: ^lib/crewai/
|
||||
- id: ruff-format
|
||||
name: ruff-format
|
||||
entry: uv run ruff format
|
||||
language: system
|
||||
types: [python]
|
||||
exclude: ^lib/crewai/
|
||||
- id: mypy
|
||||
name: mypy
|
||||
entry: uv run mypy
|
||||
language: system
|
||||
types: [python]
|
||||
exclude: ^tests/
|
||||
exclude: ^lib/crewai/
|
||||
|
||||
1737
crewAI.excalidraw
Normal file
@@ -397,7 +397,6 @@
|
||||
"en/enterprise/guides/kickoff-crew",
|
||||
"en/enterprise/guides/update-crew",
|
||||
"en/enterprise/guides/enable-crew-studio",
|
||||
"en/enterprise/guides/capture_telemetry_logs",
|
||||
"en/enterprise/guides/azure-openai-setup",
|
||||
"en/enterprise/guides/tool-repository",
|
||||
"en/enterprise/guides/react-component-export",
|
||||
@@ -422,7 +421,6 @@
|
||||
"en/api-reference/introduction",
|
||||
"en/api-reference/inputs",
|
||||
"en/api-reference/kickoff",
|
||||
"en/api-reference/resume",
|
||||
"en/api-reference/status"
|
||||
]
|
||||
}
|
||||
@@ -829,7 +827,6 @@
|
||||
"pt-BR/api-reference/introduction",
|
||||
"pt-BR/api-reference/inputs",
|
||||
"pt-BR/api-reference/kickoff",
|
||||
"pt-BR/api-reference/resume",
|
||||
"pt-BR/api-reference/status"
|
||||
]
|
||||
}
|
||||
@@ -1242,7 +1239,6 @@
|
||||
"ko/api-reference/introduction",
|
||||
"ko/api-reference/inputs",
|
||||
"ko/api-reference/kickoff",
|
||||
"ko/api-reference/resume",
|
||||
"ko/api-reference/status"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
title: "POST /resume"
|
||||
description: "Resume crew execution with human feedback"
|
||||
openapi: "/enterprise-api.en.yaml POST /resume"
|
||||
mode: "wide"
|
||||
---
|
||||
@@ -1,35 +0,0 @@
|
||||
---
|
||||
title: "Open Telemetry Logs"
|
||||
description: "Understand how to capture telemetry logs from your CrewAI AMP deployments"
|
||||
icon: "magnifying-glass-chart"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
CrewAI AMP provides a powerful way to capture telemetry logs from your deployments. This allows you to monitor the performance of your agents and workflows, and to debug issues that may arise.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="ENTERPRISE OTEL SETUP enabled" icon="users">
|
||||
Your organization should have ENTERPRISE OTEL SETUP enabled
|
||||
</Card>
|
||||
<Card title="OTEL collector setup" icon="server">
|
||||
Your organization should have an OTEL collector setup or a provider like Datadog log intake setup
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
|
||||
## How to capture telemetry logs
|
||||
|
||||
1. Go to settings/organization tab
|
||||
2. Configure your OTEL collector setup
|
||||
3. Save
|
||||
|
||||
|
||||
|
||||
Example to setup OTEL log collection capture to Datadog.
|
||||
|
||||
|
||||
<Frame>
|
||||

|
||||
</Frame>
|
||||
@@ -40,28 +40,6 @@ Human-In-The-Loop (HITL) is a powerful approach that combines artificial intelli
|
||||
<Frame>
|
||||
<img src="/images/enterprise/crew-resume-endpoint.png" alt="Crew Resume Endpoint" />
|
||||
</Frame>
|
||||
|
||||
<Warning>
|
||||
**Critical: Webhook URLs Must Be Provided Again**:
|
||||
You **must** provide the same webhook URLs (`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`) in the resume call that you used in the kickoff call. Webhook configurations are **NOT** automatically carried over from kickoff - they must be explicitly included in the resume request to continue receiving notifications for task completion, agent steps, and crew completion.
|
||||
</Warning>
|
||||
|
||||
Example resume call with webhooks:
|
||||
```bash
|
||||
curl -X POST {BASE_URL}/resume \
|
||||
-H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"execution_id": "abcd1234-5678-90ef-ghij-klmnopqrstuv",
|
||||
"task_id": "research_task",
|
||||
"human_feedback": "Great work! Please add more details.",
|
||||
"is_approve": true,
|
||||
"taskWebhookUrl": "https://your-server.com/webhooks/task",
|
||||
"stepWebhookUrl": "https://your-server.com/webhooks/step",
|
||||
"crewWebhookUrl": "https://your-server.com/webhooks/crew"
|
||||
}'
|
||||
```
|
||||
|
||||
<Warning>
|
||||
**Feedback Impact on Task Execution**:
|
||||
It's crucial to exercise care when providing feedback, as the entire feedback content will be incorporated as additional context for further task executions.
|
||||
@@ -98,4 +76,4 @@ HITL workflows are particularly valuable for:
|
||||
- Complex decision-making scenarios
|
||||
- Sensitive or high-stakes operations
|
||||
- Creative tasks requiring human judgment
|
||||
- Compliance and regulatory reviews
|
||||
- Compliance and regulatory reviews
|
||||
@@ -151,3 +151,5 @@ You can check the security check status of a tool at:
|
||||
<Card title="Need Help?" icon="headset" href="mailto:support@crewai.com">
|
||||
Contact our support team for assistance with API integration or troubleshooting.
|
||||
</Card>
|
||||
|
||||
|
||||
|
||||
@@ -79,28 +79,6 @@ Human-in-the-Loop (HITL) is a powerful approach that combines artificial intelli
|
||||
<Frame>
|
||||
<img src="/images/enterprise/crew-resume-endpoint.png" alt="Crew Resume Endpoint" />
|
||||
</Frame>
|
||||
|
||||
<Warning>
|
||||
**Critical: Webhook URLs Must Be Provided Again**:
|
||||
You **must** provide the same webhook URLs (`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`) in the resume call that you used in the kickoff call. Webhook configurations are **NOT** automatically carried over from kickoff - they must be explicitly included in the resume request to continue receiving notifications for task completion, agent steps, and crew completion.
|
||||
</Warning>
|
||||
|
||||
Example resume call with webhooks:
|
||||
```bash
|
||||
curl -X POST {BASE_URL}/resume \
|
||||
-H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"execution_id": "abcd1234-5678-90ef-ghij-klmnopqrstuv",
|
||||
"task_id": "research_task",
|
||||
"human_feedback": "Great work! Please add more details.",
|
||||
"is_approve": true,
|
||||
"taskWebhookUrl": "https://your-server.com/webhooks/task",
|
||||
"stepWebhookUrl": "https://your-server.com/webhooks/step",
|
||||
"crewWebhookUrl": "https://your-server.com/webhooks/crew"
|
||||
}'
|
||||
```
|
||||
|
||||
<Warning>
|
||||
**Feedback Impact on Task Execution**:
|
||||
It's crucial to exercise care when providing feedback, as the entire feedback content will be incorporated as additional context for further task executions.
|
||||
|
||||
@@ -276,134 +276,6 @@ paths:
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/resume:
|
||||
post:
|
||||
summary: Resume Crew Execution with Human Feedback
|
||||
description: |
|
||||
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
|
||||
|
||||
Resume a paused crew execution with human feedback for Human-in-the-Loop (HITL) workflows.
|
||||
When a task with `human_input=True` completes, the crew execution pauses and waits for human feedback.
|
||||
|
||||
**IMPORTANT**: You must provide the same webhook URLs (`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`)
|
||||
that were used in the original kickoff call. Webhook configurations are NOT automatically carried over -
|
||||
they must be explicitly provided in the resume request to continue receiving notifications.
|
||||
operationId: resumeCrewExecution
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- execution_id
|
||||
- task_id
|
||||
- human_feedback
|
||||
- is_approve
|
||||
properties:
|
||||
execution_id:
|
||||
type: string
|
||||
format: uuid
|
||||
description: The unique identifier for the crew execution (from kickoff)
|
||||
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id:
|
||||
type: string
|
||||
description: The ID of the task that requires human feedback
|
||||
example: "research_task"
|
||||
human_feedback:
|
||||
type: string
|
||||
description: Your feedback on the task output. This will be incorporated as additional context for subsequent task executions.
|
||||
example: "Great research! Please add more details about recent developments in the field."
|
||||
is_approve:
|
||||
type: boolean
|
||||
description: "Whether you approve the task output: true = positive feedback (continue), false = negative feedback (retry task)"
|
||||
example: true
|
||||
taskWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each task completion. MUST be provided to continue receiving task notifications.
|
||||
example: "https://your-server.com/webhooks/task"
|
||||
stepWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each agent thought/action. MUST be provided to continue receiving step notifications.
|
||||
example: "https://your-server.com/webhooks/step"
|
||||
crewWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed when the crew execution completes. MUST be provided to receive completion notification.
|
||||
example: "https://your-server.com/webhooks/crew"
|
||||
examples:
|
||||
approve_and_continue:
|
||||
summary: Approve task and continue execution
|
||||
value:
|
||||
execution_id: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id: "research_task"
|
||||
human_feedback: "Excellent research! Proceed to the next task."
|
||||
is_approve: true
|
||||
taskWebhookUrl: "https://api.example.com/webhooks/task"
|
||||
stepWebhookUrl: "https://api.example.com/webhooks/step"
|
||||
crewWebhookUrl: "https://api.example.com/webhooks/crew"
|
||||
request_revision:
|
||||
summary: Request task revision with feedback
|
||||
value:
|
||||
execution_id: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id: "analysis_task"
|
||||
human_feedback: "Please include more quantitative data and cite your sources."
|
||||
is_approve: false
|
||||
taskWebhookUrl: "https://api.example.com/webhooks/task"
|
||||
crewWebhookUrl: "https://api.example.com/webhooks/crew"
|
||||
responses:
|
||||
'200':
|
||||
description: Execution resumed successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["resumed", "retrying", "completed"]
|
||||
description: Status of the resumed execution
|
||||
example: "resumed"
|
||||
message:
|
||||
type: string
|
||||
description: Human-readable message about the resume operation
|
||||
example: "Execution resumed successfully"
|
||||
examples:
|
||||
resumed:
|
||||
summary: Execution resumed with positive feedback
|
||||
value:
|
||||
status: "resumed"
|
||||
message: "Execution resumed successfully"
|
||||
retrying:
|
||||
summary: Task will be retried with negative feedback
|
||||
value:
|
||||
status: "retrying"
|
||||
message: "Task will be retried with your feedback"
|
||||
'400':
|
||||
description: Invalid request body or execution not in pending state
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Invalid Request"
|
||||
message: "Execution is not in pending human input state"
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
description: Execution ID or Task ID not found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Not Found"
|
||||
message: "Execution ID not found"
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
BearerAuth:
|
||||
|
||||
@@ -276,134 +276,6 @@ paths:
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/resume:
|
||||
post:
|
||||
summary: Resume Crew Execution with Human Feedback
|
||||
description: |
|
||||
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
|
||||
|
||||
Resume a paused crew execution with human feedback for Human-in-the-Loop (HITL) workflows.
|
||||
When a task with `human_input=True` completes, the crew execution pauses and waits for human feedback.
|
||||
|
||||
**IMPORTANT**: You must provide the same webhook URLs (`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`)
|
||||
that were used in the original kickoff call. Webhook configurations are NOT automatically carried over -
|
||||
they must be explicitly provided in the resume request to continue receiving notifications.
|
||||
operationId: resumeCrewExecution
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- execution_id
|
||||
- task_id
|
||||
- human_feedback
|
||||
- is_approve
|
||||
properties:
|
||||
execution_id:
|
||||
type: string
|
||||
format: uuid
|
||||
description: The unique identifier for the crew execution (from kickoff)
|
||||
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id:
|
||||
type: string
|
||||
description: The ID of the task that requires human feedback
|
||||
example: "research_task"
|
||||
human_feedback:
|
||||
type: string
|
||||
description: Your feedback on the task output. This will be incorporated as additional context for subsequent task executions.
|
||||
example: "Great research! Please add more details about recent developments in the field."
|
||||
is_approve:
|
||||
type: boolean
|
||||
description: "Whether you approve the task output: true = positive feedback (continue), false = negative feedback (retry task)"
|
||||
example: true
|
||||
taskWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each task completion. MUST be provided to continue receiving task notifications.
|
||||
example: "https://your-server.com/webhooks/task"
|
||||
stepWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each agent thought/action. MUST be provided to continue receiving step notifications.
|
||||
example: "https://your-server.com/webhooks/step"
|
||||
crewWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed when the crew execution completes. MUST be provided to receive completion notification.
|
||||
example: "https://your-server.com/webhooks/crew"
|
||||
examples:
|
||||
approve_and_continue:
|
||||
summary: Approve task and continue execution
|
||||
value:
|
||||
execution_id: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id: "research_task"
|
||||
human_feedback: "Excellent research! Proceed to the next task."
|
||||
is_approve: true
|
||||
taskWebhookUrl: "https://api.example.com/webhooks/task"
|
||||
stepWebhookUrl: "https://api.example.com/webhooks/step"
|
||||
crewWebhookUrl: "https://api.example.com/webhooks/crew"
|
||||
request_revision:
|
||||
summary: Request task revision with feedback
|
||||
value:
|
||||
execution_id: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id: "analysis_task"
|
||||
human_feedback: "Please include more quantitative data and cite your sources."
|
||||
is_approve: false
|
||||
taskWebhookUrl: "https://api.example.com/webhooks/task"
|
||||
crewWebhookUrl: "https://api.example.com/webhooks/crew"
|
||||
responses:
|
||||
'200':
|
||||
description: Execution resumed successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["resumed", "retrying", "completed"]
|
||||
description: Status of the resumed execution
|
||||
example: "resumed"
|
||||
message:
|
||||
type: string
|
||||
description: Human-readable message about the resume operation
|
||||
example: "Execution resumed successfully"
|
||||
examples:
|
||||
resumed:
|
||||
summary: Execution resumed with positive feedback
|
||||
value:
|
||||
status: "resumed"
|
||||
message: "Execution resumed successfully"
|
||||
retrying:
|
||||
summary: Task will be retried with negative feedback
|
||||
value:
|
||||
status: "retrying"
|
||||
message: "Task will be retried with your feedback"
|
||||
'400':
|
||||
description: Invalid request body or execution not in pending state
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Invalid Request"
|
||||
message: "Execution is not in pending human input state"
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
description: Execution ID or Task ID not found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Not Found"
|
||||
message: "Execution ID not found"
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
BearerAuth:
|
||||
|
||||
@@ -120,134 +120,6 @@ paths:
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/resume:
|
||||
post:
|
||||
summary: Resume Crew Execution with Human Feedback
|
||||
description: |
|
||||
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
|
||||
|
||||
Resume a paused crew execution with human feedback for Human-in-the-Loop (HITL) workflows.
|
||||
When a task with `human_input=True` completes, the crew execution pauses and waits for human feedback.
|
||||
|
||||
**IMPORTANT**: You must provide the same webhook URLs (`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`)
|
||||
that were used in the original kickoff call. Webhook configurations are NOT automatically carried over -
|
||||
they must be explicitly provided in the resume request to continue receiving notifications.
|
||||
operationId: resumeCrewExecution
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- execution_id
|
||||
- task_id
|
||||
- human_feedback
|
||||
- is_approve
|
||||
properties:
|
||||
execution_id:
|
||||
type: string
|
||||
format: uuid
|
||||
description: The unique identifier for the crew execution (from kickoff)
|
||||
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id:
|
||||
type: string
|
||||
description: The ID of the task that requires human feedback
|
||||
example: "research_task"
|
||||
human_feedback:
|
||||
type: string
|
||||
description: Your feedback on the task output. This will be incorporated as additional context for subsequent task executions.
|
||||
example: "Great research! Please add more details about recent developments in the field."
|
||||
is_approve:
|
||||
type: boolean
|
||||
description: "Whether you approve the task output: true = positive feedback (continue), false = negative feedback (retry task)"
|
||||
example: true
|
||||
taskWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each task completion. MUST be provided to continue receiving task notifications.
|
||||
example: "https://your-server.com/webhooks/task"
|
||||
stepWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each agent thought/action. MUST be provided to continue receiving step notifications.
|
||||
example: "https://your-server.com/webhooks/step"
|
||||
crewWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed when the crew execution completes. MUST be provided to receive completion notification.
|
||||
example: "https://your-server.com/webhooks/crew"
|
||||
examples:
|
||||
approve_and_continue:
|
||||
summary: Approve task and continue execution
|
||||
value:
|
||||
execution_id: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id: "research_task"
|
||||
human_feedback: "Excellent research! Proceed to the next task."
|
||||
is_approve: true
|
||||
taskWebhookUrl: "https://api.example.com/webhooks/task"
|
||||
stepWebhookUrl: "https://api.example.com/webhooks/step"
|
||||
crewWebhookUrl: "https://api.example.com/webhooks/crew"
|
||||
request_revision:
|
||||
summary: Request task revision with feedback
|
||||
value:
|
||||
execution_id: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id: "analysis_task"
|
||||
human_feedback: "Please include more quantitative data and cite your sources."
|
||||
is_approve: false
|
||||
taskWebhookUrl: "https://api.example.com/webhooks/task"
|
||||
crewWebhookUrl: "https://api.example.com/webhooks/crew"
|
||||
responses:
|
||||
'200':
|
||||
description: Execution resumed successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["resumed", "retrying", "completed"]
|
||||
description: Status of the resumed execution
|
||||
example: "resumed"
|
||||
message:
|
||||
type: string
|
||||
description: Human-readable message about the resume operation
|
||||
example: "Execution resumed successfully"
|
||||
examples:
|
||||
resumed:
|
||||
summary: Execution resumed with positive feedback
|
||||
value:
|
||||
status: "resumed"
|
||||
message: "Execution resumed successfully"
|
||||
retrying:
|
||||
summary: Task will be retried with negative feedback
|
||||
value:
|
||||
status: "retrying"
|
||||
message: "Task will be retried with your feedback"
|
||||
'400':
|
||||
description: Invalid request body or execution not in pending state
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Invalid Request"
|
||||
message: "Execution is not in pending human input state"
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
description: Execution ID or Task ID not found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Not Found"
|
||||
message: "Execution ID not found"
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
BearerAuth:
|
||||
|
||||
@@ -156,134 +156,6 @@ paths:
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/resume:
|
||||
post:
|
||||
summary: Resume Crew Execution with Human Feedback
|
||||
description: |
|
||||
**📋 Reference Example Only** - *This shows the request format. To test with your actual crew, copy the cURL example and replace the URL + token with your real values.*
|
||||
|
||||
Resume a paused crew execution with human feedback for Human-in-the-Loop (HITL) workflows.
|
||||
When a task with `human_input=True` completes, the crew execution pauses and waits for human feedback.
|
||||
|
||||
**IMPORTANT**: You must provide the same webhook URLs (`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`)
|
||||
that were used in the original kickoff call. Webhook configurations are NOT automatically carried over -
|
||||
they must be explicitly provided in the resume request to continue receiving notifications.
|
||||
operationId: resumeCrewExecution
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- execution_id
|
||||
- task_id
|
||||
- human_feedback
|
||||
- is_approve
|
||||
properties:
|
||||
execution_id:
|
||||
type: string
|
||||
format: uuid
|
||||
description: The unique identifier for the crew execution (from kickoff)
|
||||
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id:
|
||||
type: string
|
||||
description: The ID of the task that requires human feedback
|
||||
example: "research_task"
|
||||
human_feedback:
|
||||
type: string
|
||||
description: Your feedback on the task output. This will be incorporated as additional context for subsequent task executions.
|
||||
example: "Great research! Please add more details about recent developments in the field."
|
||||
is_approve:
|
||||
type: boolean
|
||||
description: "Whether you approve the task output: true = positive feedback (continue), false = negative feedback (retry task)"
|
||||
example: true
|
||||
taskWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each task completion. MUST be provided to continue receiving task notifications.
|
||||
example: "https://your-server.com/webhooks/task"
|
||||
stepWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed after each agent thought/action. MUST be provided to continue receiving step notifications.
|
||||
example: "https://your-server.com/webhooks/step"
|
||||
crewWebhookUrl:
|
||||
type: string
|
||||
format: uri
|
||||
description: Callback URL executed when the crew execution completes. MUST be provided to receive completion notification.
|
||||
example: "https://your-server.com/webhooks/crew"
|
||||
examples:
|
||||
approve_and_continue:
|
||||
summary: Approve task and continue execution
|
||||
value:
|
||||
execution_id: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id: "research_task"
|
||||
human_feedback: "Excellent research! Proceed to the next task."
|
||||
is_approve: true
|
||||
taskWebhookUrl: "https://api.example.com/webhooks/task"
|
||||
stepWebhookUrl: "https://api.example.com/webhooks/step"
|
||||
crewWebhookUrl: "https://api.example.com/webhooks/crew"
|
||||
request_revision:
|
||||
summary: Request task revision with feedback
|
||||
value:
|
||||
execution_id: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
task_id: "analysis_task"
|
||||
human_feedback: "Please include more quantitative data and cite your sources."
|
||||
is_approve: false
|
||||
taskWebhookUrl: "https://api.example.com/webhooks/task"
|
||||
crewWebhookUrl: "https://api.example.com/webhooks/crew"
|
||||
responses:
|
||||
'200':
|
||||
description: Execution resumed successfully
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
status:
|
||||
type: string
|
||||
enum: ["resumed", "retrying", "completed"]
|
||||
description: Status of the resumed execution
|
||||
example: "resumed"
|
||||
message:
|
||||
type: string
|
||||
description: Human-readable message about the resume operation
|
||||
example: "Execution resumed successfully"
|
||||
examples:
|
||||
resumed:
|
||||
summary: Execution resumed with positive feedback
|
||||
value:
|
||||
status: "resumed"
|
||||
message: "Execution resumed successfully"
|
||||
retrying:
|
||||
summary: Task will be retried with negative feedback
|
||||
value:
|
||||
status: "retrying"
|
||||
message: "Task will be retried with your feedback"
|
||||
'400':
|
||||
description: Invalid request body or execution not in pending state
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Invalid Request"
|
||||
message: "Execution is not in pending human input state"
|
||||
'401':
|
||||
$ref: '#/components/responses/UnauthorizedError'
|
||||
'404':
|
||||
description: Execution ID or Task ID not found
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: '#/components/schemas/Error'
|
||||
example:
|
||||
error: "Not Found"
|
||||
message: "Execution ID not found"
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
components:
|
||||
securitySchemes:
|
||||
BearerAuth:
|
||||
|
||||
|
Before Width: | Height: | Size: 317 KiB |
@@ -1,6 +0,0 @@
|
||||
---
|
||||
title: "POST /resume"
|
||||
description: "인간 피드백으로 crew 실행 재개"
|
||||
openapi: "/enterprise-api.ko.yaml POST /resume"
|
||||
mode: "wide"
|
||||
---
|
||||
@@ -40,28 +40,6 @@ mode: "wide"
|
||||
<Frame>
|
||||
<img src="/images/enterprise/crew-resume-endpoint.png" alt="Crew Resume Endpoint" />
|
||||
</Frame>
|
||||
|
||||
<Warning>
|
||||
**중요: Webhook URL을 다시 제공해야 합니다**:
|
||||
kickoff 호출에서 사용한 것과 동일한 webhook URL(`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`)을 resume 호출에서 **반드시** 제공해야 합니다. Webhook 설정은 kickoff에서 자동으로 전달되지 **않으므로**, 작업 완료, 에이전트 단계, crew 완료에 대한 알림을 계속 받으려면 resume 요청에 명시적으로 포함해야 합니다.
|
||||
</Warning>
|
||||
|
||||
Webhook을 포함한 resume 호출 예시:
|
||||
```bash
|
||||
curl -X POST {BASE_URL}/resume \
|
||||
-H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"execution_id": "abcd1234-5678-90ef-ghij-klmnopqrstuv",
|
||||
"task_id": "research_task",
|
||||
"human_feedback": "훌륭한 작업입니다! 더 자세한 내용을 추가해주세요.",
|
||||
"is_approve": true,
|
||||
"taskWebhookUrl": "https://your-server.com/webhooks/task",
|
||||
"stepWebhookUrl": "https://your-server.com/webhooks/step",
|
||||
"crewWebhookUrl": "https://your-server.com/webhooks/crew"
|
||||
}'
|
||||
```
|
||||
|
||||
<Warning>
|
||||
**피드백이 작업 실행에 미치는 영향**:
|
||||
피드백 전체 내용이 이후 작업 실행을 위한 추가 컨텍스트로 통합되므로 피드백 제공 시 신중함이 매우 중요합니다.
|
||||
@@ -98,4 +76,4 @@ HITL 워크플로우는 특히 다음과 같은 경우에 유용합니다:
|
||||
- 복잡한 의사 결정 시나리오
|
||||
- 민감하거나 위험도가 높은 작업
|
||||
- 인간의 판단이 필요한 창의적 작업
|
||||
- 준수 및 규제 검토
|
||||
- 준수 및 규제 검토
|
||||
@@ -40,28 +40,6 @@ mode: "wide"
|
||||
<Frame>
|
||||
<img src="/images/enterprise/crew-resume-endpoint.png" alt="Crew Resume Endpoint" />
|
||||
</Frame>
|
||||
|
||||
<Warning>
|
||||
**중요: Webhook URL을 다시 제공해야 합니다**:
|
||||
kickoff 호출에서 사용한 것과 동일한 webhook URL(`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`)을 resume 호출에서 **반드시** 제공해야 합니다. Webhook 설정은 kickoff에서 자동으로 전달되지 **않으므로**, 작업 완료, 에이전트 단계, crew 완료에 대한 알림을 계속 받으려면 resume 요청에 명시적으로 포함해야 합니다.
|
||||
</Warning>
|
||||
|
||||
Webhook을 포함한 resume 호출 예시:
|
||||
```bash
|
||||
curl -X POST {BASE_URL}/resume \
|
||||
-H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"execution_id": "abcd1234-5678-90ef-ghij-klmnopqrstuv",
|
||||
"task_id": "research_task",
|
||||
"human_feedback": "훌륭한 작업입니다! 더 자세한 내용을 추가해주세요.",
|
||||
"is_approve": true,
|
||||
"taskWebhookUrl": "https://your-server.com/webhooks/task",
|
||||
"stepWebhookUrl": "https://your-server.com/webhooks/step",
|
||||
"crewWebhookUrl": "https://your-server.com/webhooks/crew"
|
||||
}'
|
||||
```
|
||||
|
||||
<Warning>
|
||||
**피드백이 작업 실행에 미치는 영향**:
|
||||
피드백의 전체 내용이 추가 컨텍스트로서 이후 작업 실행에 통합되므로, 피드백 제공 시 신중을 기하는 것이 매우 중요합니다.
|
||||
@@ -98,4 +76,4 @@ HITL 워크플로우는 다음과 같은 경우에 특히 유용합니다:
|
||||
- 복잡한 의사결정 시나리오
|
||||
- 민감하거나 고위험 작업
|
||||
- 인간의 판단이 필요한 창의적 과제
|
||||
- 컴플라이언스 및 규제 검토
|
||||
- 컴플라이언스 및 규제 검토
|
||||
@@ -1,6 +0,0 @@
|
||||
---
|
||||
title: "POST /resume"
|
||||
description: "Retomar execução do crew com feedback humano"
|
||||
openapi: "/enterprise-api.pt-BR.yaml POST /resume"
|
||||
mode: "wide"
|
||||
---
|
||||
@@ -40,28 +40,6 @@ Human-In-The-Loop (HITL) é uma abordagem poderosa que combina inteligência art
|
||||
<Frame>
|
||||
<img src="/images/enterprise/crew-resume-endpoint.png" alt="Crew Resume Endpoint" />
|
||||
</Frame>
|
||||
|
||||
<Warning>
|
||||
**Crítico: URLs de Webhook Devem Ser Fornecidas Novamente**:
|
||||
Você **deve** fornecer as mesmas URLs de webhook (`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`) na chamada de resume que você usou na chamada de kickoff. As configurações de webhook **NÃO** são automaticamente transferidas do kickoff - elas devem ser explicitamente incluídas na solicitação de resume para continuar recebendo notificações de conclusão de tarefa, etapas do agente e conclusão do crew.
|
||||
</Warning>
|
||||
|
||||
Exemplo de chamada resume com webhooks:
|
||||
```bash
|
||||
curl -X POST {BASE_URL}/resume \
|
||||
-H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"execution_id": "abcd1234-5678-90ef-ghij-klmnopqrstuv",
|
||||
"task_id": "research_task",
|
||||
"human_feedback": "Ótimo trabalho! Por favor, adicione mais detalhes.",
|
||||
"is_approve": true,
|
||||
"taskWebhookUrl": "https://your-server.com/webhooks/task",
|
||||
"stepWebhookUrl": "https://your-server.com/webhooks/step",
|
||||
"crewWebhookUrl": "https://your-server.com/webhooks/crew"
|
||||
}'
|
||||
```
|
||||
|
||||
<Warning>
|
||||
**Impacto do Feedback na Execução da Tarefa**:
|
||||
É crucial ter cuidado ao fornecer o feedback, pois todo o conteúdo do feedback será incorporado como contexto adicional para as próximas execuções da tarefa.
|
||||
@@ -98,4 +76,4 @@ Workflows HITL são particularmente valiosos para:
|
||||
- Cenários de tomada de decisão complexa
|
||||
- Operações sensíveis ou de alto risco
|
||||
- Tarefas criativas que exigem julgamento humano
|
||||
- Revisões de conformidade e regulatórias
|
||||
- Revisões de conformidade e regulatórias
|
||||
@@ -40,28 +40,6 @@ Human-in-the-Loop (HITL) é uma abordagem poderosa que combina a inteligência a
|
||||
<Frame>
|
||||
<img src="/images/enterprise/crew-resume-endpoint.png" alt="Endpoint de Retomada Crew" />
|
||||
</Frame>
|
||||
|
||||
<Warning>
|
||||
**Crítico: URLs de Webhook Devem Ser Fornecidas Novamente**:
|
||||
Você **deve** fornecer as mesmas URLs de webhook (`taskWebhookUrl`, `stepWebhookUrl`, `crewWebhookUrl`) na chamada de resume que você usou na chamada de kickoff. As configurações de webhook **NÃO** são automaticamente transferidas do kickoff - elas devem ser explicitamente incluídas na solicitação de resume para continuar recebendo notificações de conclusão de tarefa, etapas do agente e conclusão do crew.
|
||||
</Warning>
|
||||
|
||||
Exemplo de chamada resume com webhooks:
|
||||
```bash
|
||||
curl -X POST {BASE_URL}/resume \
|
||||
-H "Authorization: Bearer YOUR_API_TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"execution_id": "abcd1234-5678-90ef-ghij-klmnopqrstuv",
|
||||
"task_id": "research_task",
|
||||
"human_feedback": "Ótimo trabalho! Por favor, adicione mais detalhes.",
|
||||
"is_approve": true,
|
||||
"taskWebhookUrl": "https://your-server.com/webhooks/task",
|
||||
"stepWebhookUrl": "https://your-server.com/webhooks/step",
|
||||
"crewWebhookUrl": "https://your-server.com/webhooks/crew"
|
||||
}'
|
||||
```
|
||||
|
||||
<Warning>
|
||||
**Impacto do Feedback na Execução da Tarefa**:
|
||||
É fundamental ter cuidado ao fornecer feedback, pois todo o conteúdo do feedback será incorporado como contexto adicional para execuções futuras da tarefa.
|
||||
@@ -98,4 +76,4 @@ Workflows HITL são particularmente valiosos para:
|
||||
- Cenários de tomada de decisão complexa
|
||||
- Operações sensíveis ou de alto risco
|
||||
- Tarefas criativas que requerem julgamento humano
|
||||
- Revisões de conformidade e regulamentação
|
||||
- Revisões de conformidade e regulamentação
|
||||
335
lib/crewai-tools/BUILDING_TOOLS.md
Normal file
@@ -0,0 +1,335 @@
|
||||
## Building CrewAI Tools
|
||||
|
||||
This guide shows you how to build high‑quality CrewAI tools that match the patterns in this repository and are ready to be merged. It focuses on: architecture, conventions, environment variables, dependencies, testing, documentation, and a complete example.
|
||||
|
||||
### Who this is for
|
||||
- Contributors creating new tools under `crewai_tools/tools/*`
|
||||
- Maintainers reviewing PRs for consistency and DX
|
||||
|
||||
---
|
||||
|
||||
## Quick‑start checklist
|
||||
1. Create a new folder under `crewai_tools/tools/<your_tool_name>/` with a `README.md` and a `<your_tool_name>.py`.
|
||||
2. Implement a class that ends with `Tool` and subclasses `BaseTool` (or `RagTool` when appropriate).
|
||||
3. Define a Pydantic `args_schema` with explicit field descriptions and validation.
|
||||
4. Declare `env_vars` and `package_dependencies` in the class when needed.
|
||||
5. Lazily initialize clients in `__init__` or `_run` and handle missing credentials with clear errors.
|
||||
6. Implement `_run(...) -> str | dict` and, if needed, `_arun(...)`.
|
||||
7. Add tests under `tests/tools/` (unit, no real network calls; mock or record safely).
|
||||
8. Add a concise tool `README.md` with usage and required env vars.
|
||||
9. If you add optional dependencies, register them in `pyproject.toml` under `[project.optional-dependencies]` and reference that extra in your tool docs.
|
||||
10. Run `uv run pytest` and `pre-commit run -a` locally; ensure green.
|
||||
|
||||
---
|
||||
|
||||
## Tool anatomy and conventions
|
||||
|
||||
### BaseTool pattern
|
||||
All tools follow this structure:
|
||||
|
||||
```python
|
||||
from typing import Any, List, Optional, Type
|
||||
|
||||
import os
|
||||
from pydantic import BaseModel, Field
|
||||
from crewai.tools import BaseTool, EnvVar
|
||||
|
||||
|
||||
class MyToolInput(BaseModel):
|
||||
"""Input schema for MyTool."""
|
||||
query: str = Field(..., description="Your input description here")
|
||||
limit: int = Field(5, ge=1, le=50, description="Max items to return")
|
||||
|
||||
|
||||
class MyTool(BaseTool):
|
||||
name: str = "My Tool"
|
||||
description: str = "Explain succinctly what this tool does and when to use it."
|
||||
args_schema: Type[BaseModel] = MyToolInput
|
||||
|
||||
# Only include when applicable
|
||||
env_vars: List[EnvVar] = [
|
||||
EnvVar(name="MY_API_KEY", description="API key for My service", required=True),
|
||||
]
|
||||
package_dependencies: List[str] = ["my-sdk"]
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
super().__init__(**kwargs)
|
||||
# Lazy import to keep base install light
|
||||
try:
|
||||
import my_sdk # noqa: F401
|
||||
except Exception as exc:
|
||||
raise ImportError(
|
||||
"Missing optional dependency 'my-sdk'. Install with: \n"
|
||||
" uv add crewai-tools --extra my-sdk\n"
|
||||
"or\n"
|
||||
" pip install my-sdk\n"
|
||||
) from exc
|
||||
|
||||
if "MY_API_KEY" not in os.environ:
|
||||
raise ValueError("Environment variable MY_API_KEY is required for MyTool")
|
||||
|
||||
def _run(self, query: str, limit: int = 5, **_: Any) -> str:
|
||||
"""Synchronous execution. Return a concise string or JSON string."""
|
||||
# Implement your logic here; do not print. Return the content.
|
||||
# Handle errors gracefully, return clear messages.
|
||||
return f"Processed {query} with limit={limit}"
|
||||
|
||||
async def _arun(self, *args: Any, **kwargs: Any) -> str:
|
||||
"""Optional async counterpart if your client supports it."""
|
||||
# Prefer delegating to _run when the client is thread-safe
|
||||
return self._run(*args, **kwargs)
|
||||
```
|
||||
|
||||
Key points:
|
||||
- Class name must end with `Tool` to be auto‑discovered by our tooling.
|
||||
- Use `args_schema` for inputs; always include `description` and validation.
|
||||
- Validate env vars early and fail with actionable errors.
|
||||
- Keep outputs deterministic and compact; favor `str` (possibly JSON‑encoded) or small dicts converted to strings.
|
||||
- Avoid printing; return the final string.
|
||||
|
||||
### Error handling
|
||||
- Wrap network and I/O with try/except and return a helpful message. See `BraveSearchTool` and others for patterns.
|
||||
- Validate required inputs and environment configuration with clear messages.
|
||||
- Keep exceptions user‑friendly; do not leak stack traces.
|
||||
|
||||
### Rate limiting and retries
|
||||
- If the upstream API enforces request pacing, implement minimal rate limiting (see `BraveSearchTool`).
|
||||
- Consider idempotency and backoff for transient errors where appropriate.
|
||||
|
||||
### Async support
|
||||
- Implement `_arun` only if your library has a true async client or your sync calls are thread‑safe.
|
||||
- Otherwise, delegate `_arun` to `_run` as in multiple existing tools.
|
||||
|
||||
### Returning values
|
||||
- Return a string (or JSON string) that’s ready to display in an agent transcript.
|
||||
- If returning structured data, keep it small and human‑readable. Use stable keys and ordering.
|
||||
|
||||
---
|
||||
|
||||
## RAG tools and adapters
|
||||
|
||||
If your tool is a knowledge source, consider extending `RagTool` and/or creating an adapter.
|
||||
|
||||
- `RagTool` exposes `add(...)` and a `query(question: str) -> str` contract through an `Adapter`.
|
||||
- See `crewai_tools/tools/rag/rag_tool.py` and adapters like `embedchain_adapter.py` and `lancedb_adapter.py`.
|
||||
|
||||
Minimal adapter example:
|
||||
|
||||
```python
|
||||
from typing import Any
|
||||
from pydantic import BaseModel
|
||||
from crewai_tools.tools.rag.rag_tool import Adapter, RagTool
|
||||
|
||||
|
||||
class MemoryAdapter(Adapter):
|
||||
store: list[str] = []
|
||||
|
||||
def add(self, text: str, **_: Any) -> None:
|
||||
self.store.append(text)
|
||||
|
||||
def query(self, question: str) -> str:
|
||||
# naive demo: return all text containing any word from the question
|
||||
tokens = set(question.lower().split())
|
||||
hits = [t for t in self.store if tokens & set(t.lower().split())]
|
||||
return "\n".join(hits) if hits else "No relevant content found."
|
||||
|
||||
|
||||
class MemoryRagTool(RagTool):
|
||||
name: str = "In‑memory RAG"
|
||||
description: str = "Toy RAG that stores text in memory and returns matches."
|
||||
adapter: Adapter = MemoryAdapter()
|
||||
```
|
||||
|
||||
When using external vector DBs (MongoDB, Qdrant, Weaviate), study the existing tools to follow indexing, embedding, and query configuration patterns closely.
|
||||
|
||||
---
|
||||
|
||||
## Toolkits (multiple related tools)
|
||||
|
||||
Some integrations expose a toolkit (a group of tools) rather than a single class. See Bedrock `browser_toolkit.py` and `code_interpreter_toolkit.py`.
|
||||
|
||||
Guidelines:
|
||||
- Provide small, focused `BaseTool` classes for each operation (e.g., `navigate`, `click`, `extract_text`).
|
||||
- Offer a helper `create_<name>_toolkit(...) -> Tuple[ToolkitClass, List[BaseTool]]` to create tools and manage resources.
|
||||
- If you open external resources (browsers, interpreters), support cleanup methods and optionally context manager usage.
|
||||
|
||||
---
|
||||
|
||||
## Environment variables and dependencies
|
||||
|
||||
### env_vars
|
||||
- Declare as `env_vars: List[EnvVar]` with `name`, `description`, `required`, and optional `default`.
|
||||
- Validate presence in `__init__` or on first `_run` call.
|
||||
|
||||
### Dependencies
|
||||
- List runtime packages in `package_dependencies` on the class.
|
||||
- If they are genuinely optional, add an extra under `[project.optional-dependencies]` in `pyproject.toml` (e.g., `tavily-python`, `serpapi`, `scrapfly-sdk`).
|
||||
- Use lazy imports to avoid hard deps for users who don’t need the tool.
|
||||
|
||||
---
|
||||
|
||||
## Testing
|
||||
|
||||
Place tests under `tests/tools/` and follow these rules:
|
||||
- Do not hit real external services in CI. Use mocks, fakes, or recorded fixtures where allowed.
|
||||
- Validate input validation, env var handling, error messages, and happy path output formatting.
|
||||
- Keep tests fast and deterministic.
|
||||
|
||||
Example skeleton (`tests/tools/my_tool_test.py`):
|
||||
|
||||
```python
|
||||
import os
|
||||
import pytest
|
||||
from crewai_tools.tools.my_tool.my_tool import MyTool
|
||||
|
||||
|
||||
def test_requires_env_var(monkeypatch):
|
||||
monkeypatch.delenv("MY_API_KEY", raising=False)
|
||||
with pytest.raises(ValueError):
|
||||
MyTool()
|
||||
|
||||
|
||||
def test_happy_path(monkeypatch):
|
||||
monkeypatch.setenv("MY_API_KEY", "test")
|
||||
tool = MyTool()
|
||||
result = tool.run(query="hello", limit=2)
|
||||
assert "hello" in result
|
||||
```
|
||||
|
||||
Run locally:
|
||||
|
||||
```bash
|
||||
uv run pytest
|
||||
pre-commit run -a
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Documentation
|
||||
|
||||
Each tool must include a `README.md` in its folder with:
|
||||
- What it does and when to use it
|
||||
- Required env vars and optional extras (with install snippet)
|
||||
- Minimal usage example
|
||||
|
||||
Update the root `README.md` only if the tool introduces a new category or notable capability.
|
||||
|
||||
---
|
||||
|
||||
## Discovery and specs
|
||||
|
||||
Our internal tooling discovers classes whose names end with `Tool`. Keep your class exported from the module path under `crewai_tools/tools/...` to be picked up by scripts like `generate_tool_specs.py`.
|
||||
|
||||
---
|
||||
|
||||
## Full example: “Weather Search Tool”
|
||||
|
||||
This example demonstrates: `args_schema`, `env_vars`, `package_dependencies`, lazy imports, validation, and robust error handling.
|
||||
|
||||
```python
|
||||
# file: crewai_tools/tools/weather_tool/weather_tool.py
|
||||
from typing import Any, List, Optional, Type
|
||||
import os
|
||||
import requests
|
||||
from pydantic import BaseModel, Field
|
||||
from crewai.tools import BaseTool, EnvVar
|
||||
|
||||
|
||||
class WeatherToolInput(BaseModel):
|
||||
"""Input schema for WeatherTool."""
|
||||
city: str = Field(..., description="City name, e.g., 'Berlin'")
|
||||
country: Optional[str] = Field(None, description="ISO country code, e.g., 'DE'")
|
||||
units: str = Field(
|
||||
default="metric",
|
||||
description="Units system: 'metric' or 'imperial'",
|
||||
pattern=r"^(metric|imperial)$",
|
||||
)
|
||||
|
||||
|
||||
class WeatherTool(BaseTool):
|
||||
name: str = "Weather Search"
|
||||
description: str = (
|
||||
"Look up current weather for a city using a public weather API."
|
||||
)
|
||||
args_schema: Type[BaseModel] = WeatherToolInput
|
||||
|
||||
env_vars: List[EnvVar] = [
|
||||
EnvVar(
|
||||
name="WEATHER_API_KEY",
|
||||
description="API key for the weather service",
|
||||
required=True,
|
||||
),
|
||||
]
|
||||
package_dependencies: List[str] = ["requests"]
|
||||
|
||||
base_url: str = "https://api.openweathermap.org/data/2.5/weather"
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
super().__init__(**kwargs)
|
||||
if "WEATHER_API_KEY" not in os.environ:
|
||||
raise ValueError("WEATHER_API_KEY is required for WeatherTool")
|
||||
|
||||
def _run(self, city: str, country: Optional[str] = None, units: str = "metric") -> str:
|
||||
try:
|
||||
q = f"{city},{country}" if country else city
|
||||
params = {
|
||||
"q": q,
|
||||
"units": units,
|
||||
"appid": os.environ["WEATHER_API_KEY"],
|
||||
}
|
||||
resp = requests.get(self.base_url, params=params, timeout=10)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
|
||||
main = data.get("weather", [{}])[0].get("main", "Unknown")
|
||||
desc = data.get("weather", [{}])[0].get("description", "")
|
||||
temp = data.get("main", {}).get("temp")
|
||||
feels = data.get("main", {}).get("feels_like")
|
||||
city_name = data.get("name", city)
|
||||
|
||||
return (
|
||||
f"Weather in {city_name}: {main} ({desc}). "
|
||||
f"Temperature: {temp}°, feels like {feels}°."
|
||||
)
|
||||
except requests.Timeout:
|
||||
return "Weather service timed out. Please try again later."
|
||||
except requests.HTTPError as e:
|
||||
return f"Weather service error: {e.response.status_code} {e.response.text[:120]}"
|
||||
except Exception as e:
|
||||
return f"Unexpected error fetching weather: {e}"
|
||||
```
|
||||
|
||||
Folder layout:
|
||||
|
||||
```
|
||||
crewai_tools/tools/weather_tool/
|
||||
├─ weather_tool.py
|
||||
└─ README.md
|
||||
```
|
||||
|
||||
And `README.md` should document env vars and usage.
|
||||
|
||||
---
|
||||
|
||||
## PR checklist
|
||||
- [ ] Tool lives under `crewai_tools/tools/<name>/`
|
||||
- [ ] Class ends with `Tool` and subclasses `BaseTool` (or `RagTool`)
|
||||
- [ ] Precise `args_schema` with descriptions and validation
|
||||
- [ ] `env_vars` declared (if any) and validated
|
||||
- [ ] `package_dependencies` and optional extras added in `pyproject.toml` (if any)
|
||||
- [ ] Clear error handling; no prints
|
||||
- [ ] Unit tests added (`tests/tools/`), fast and deterministic
|
||||
- [ ] Tool `README.md` with usage and env vars
|
||||
- [ ] `pre-commit` and `pytest` pass locally
|
||||
|
||||
---
|
||||
|
||||
## Tips for great DX
|
||||
- Keep responses short and useful—agents quote your tool output directly.
|
||||
- Validate early; fail fast with actionable guidance.
|
||||
- Prefer lazy imports; minimize default install surface.
|
||||
- Mirror patterns from similar tools in this repo for a consistent developer experience.
|
||||
|
||||
Happy building!
|
||||
|
||||
|
||||
229
lib/crewai-tools/README.md
Normal file
@@ -0,0 +1,229 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||
|
||||
<div align="left">
|
||||
|
||||
# CrewAI Tools
|
||||
|
||||
Empower your CrewAI agents with powerful, customizable tools to elevate their capabilities and tackle sophisticated, real-world tasks.
|
||||
|
||||
CrewAI Tools provide the essential functionality to extend your agents, helping you rapidly enhance your automations with reliable, ready-to-use tools or custom-built solutions tailored precisely to your needs.
|
||||
|
||||
---
|
||||
|
||||
## Quick Links
|
||||
|
||||
[Homepage](https://www.crewai.com/) | [Documentation](https://docs.crewai.com/) | [Examples](https://github.com/crewAIInc/crewAI-examples) | [Community](https://community.crewai.com/)
|
||||
|
||||
---
|
||||
|
||||
## Available Tools
|
||||
|
||||
CrewAI provides an extensive collection of powerful tools ready to enhance your agents:
|
||||
|
||||
- **File Management**: `FileReadTool`, `FileWriteTool`
|
||||
- **Web Scraping**: `ScrapeWebsiteTool`, `SeleniumScrapingTool`
|
||||
- **Database Integrations**: `MySQLSearchTool`
|
||||
- **Vector Database Integrations**: `MongoDBVectorSearchTool`, `QdrantVectorSearchTool`, `WeaviateVectorSearchTool`
|
||||
- **API Integrations**: `SerperApiTool`, `EXASearchTool`
|
||||
- **AI-powered Tools**: `DallETool`, `VisionTool`, `StagehandTool`
|
||||
|
||||
And many more robust tools to simplify your agent integrations.
|
||||
|
||||
---
|
||||
|
||||
## Creating Custom Tools
|
||||
|
||||
CrewAI offers two straightforward approaches to creating custom tools:
|
||||
|
||||
### Subclassing `BaseTool`
|
||||
|
||||
Define your tool by subclassing:
|
||||
|
||||
```python
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Tool Name"
|
||||
description: str = "Detailed description here."
|
||||
|
||||
def _run(self, *args, **kwargs):
|
||||
# Your tool logic here
|
||||
```
|
||||
|
||||
### Using the `tool` Decorator
|
||||
|
||||
Quickly create lightweight tools using decorators:
|
||||
|
||||
```python
|
||||
from crewai import tool
|
||||
|
||||
@tool("Tool Name")
|
||||
def my_custom_function(input):
|
||||
# Tool logic here
|
||||
return output
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## CrewAI Tools and MCP
|
||||
|
||||
CrewAI Tools supports the Model Context Protocol (MCP). It gives you access to thousands of tools from the hundreds of MCP servers out there built by the community.
|
||||
|
||||
Before you start using MCP with CrewAI tools, you need to install the `mcp` extra dependencies:
|
||||
|
||||
```bash
|
||||
pip install crewai-tools[mcp]
|
||||
# or
|
||||
uv add crewai-tools --extra mcp
|
||||
```
|
||||
|
||||
To quickly get started with MCP in CrewAI you have 2 options:
|
||||
|
||||
### Option 1: Fully managed connection
|
||||
|
||||
In this scenario we use a contextmanager (`with` statement) to start and stop the the connection with the MCP server.
|
||||
This is done in the background and you only get to interact with the CrewAI tools corresponding to the MCP server's tools.
|
||||
|
||||
For an STDIO based MCP server:
|
||||
|
||||
```python
|
||||
from mcp import StdioServerParameters
|
||||
from crewai_tools import MCPServerAdapter
|
||||
|
||||
serverparams = StdioServerParameters(
|
||||
command="uvx",
|
||||
args=["--quiet", "pubmedmcp@0.1.3"],
|
||||
env={"UV_PYTHON": "3.12", **os.environ},
|
||||
)
|
||||
|
||||
with MCPServerAdapter(serverparams) as tools:
|
||||
# tools is now a list of CrewAI Tools matching 1:1 with the MCP server's tools
|
||||
agent = Agent(..., tools=tools)
|
||||
task = Task(...)
|
||||
crew = Crew(..., agents=[agent], tasks=[task])
|
||||
crew.kickoff(...)
|
||||
```
|
||||
For an SSE based MCP server:
|
||||
|
||||
```python
|
||||
serverparams = {"url": "http://localhost:8000/sse"}
|
||||
with MCPServerAdapter(serverparams) as tools:
|
||||
# tools is now a list of CrewAI Tools matching 1:1 with the MCP server's tools
|
||||
agent = Agent(..., tools=tools)
|
||||
task = Task(...)
|
||||
crew = Crew(..., agents=[agent], tasks=[task])
|
||||
crew.kickoff(...)
|
||||
```
|
||||
|
||||
### Option 2: More control over the MCP connection
|
||||
|
||||
If you need more control over the MCP connection, you can instanciate the MCPServerAdapter into an `mcp_server_adapter` object which can be used to manage the connection with the MCP server and access the available tools.
|
||||
|
||||
**important**: in this case you need to call `mcp_server_adapter.stop()` to make sure the connection is correctly stopped. We recommend that you use a `try ... finally` block run to make sure the `.stop()` is called even in case of errors.
|
||||
|
||||
Here is the same example for an STDIO MCP Server:
|
||||
|
||||
```python
|
||||
from mcp import StdioServerParameters
|
||||
from crewai_tools import MCPServerAdapter
|
||||
|
||||
serverparams = StdioServerParameters(
|
||||
command="uvx",
|
||||
args=["--quiet", "pubmedmcp@0.1.3"],
|
||||
env={"UV_PYTHON": "3.12", **os.environ},
|
||||
)
|
||||
|
||||
try:
|
||||
mcp_server_adapter = MCPServerAdapter(serverparams)
|
||||
tools = mcp_server_adapter.tools
|
||||
# tools is now a list of CrewAI Tools matching 1:1 with the MCP server's tools
|
||||
agent = Agent(..., tools=tools)
|
||||
task = Task(...)
|
||||
crew = Crew(..., agents=[agent], tasks=[task])
|
||||
crew.kickoff(...)
|
||||
|
||||
# ** important ** don't forget to stop the connection
|
||||
finally:
|
||||
mcp_server_adapter.stop()
|
||||
```
|
||||
|
||||
And finally the same thing but for an SSE MCP Server:
|
||||
|
||||
```python
|
||||
from mcp import StdioServerParameters
|
||||
from crewai_tools import MCPServerAdapter
|
||||
|
||||
serverparams = {"url": "http://localhost:8000/sse"}
|
||||
|
||||
try:
|
||||
mcp_server_adapter = MCPServerAdapter(serverparams)
|
||||
tools = mcp_server_adapter.tools
|
||||
# tools is now a list of CrewAI Tools matching 1:1 with the MCP server's tools
|
||||
agent = Agent(..., tools=tools)
|
||||
task = Task(...)
|
||||
crew = Crew(..., agents=[agent], tasks=[task])
|
||||
crew.kickoff(...)
|
||||
|
||||
# ** important ** don't forget to stop the connection
|
||||
finally:
|
||||
mcp_server_adapter.stop()
|
||||
```
|
||||
|
||||
### Considerations & Limitations
|
||||
|
||||
#### Staying Safe with MCP
|
||||
|
||||
Always make sure that you trust the MCP Server before using it. Using an STDIO server will execute code on your machine. Using SSE is still not a silver bullet with many injection possible into your application from a malicious MCP server.
|
||||
|
||||
#### Limitations
|
||||
|
||||
* At this time we only support tools from MCP Server not other type of primitives like prompts, resources...
|
||||
* We only return the first text output returned by the MCP Server tool using `.content[0].text`
|
||||
|
||||
---
|
||||
|
||||
## Why Use CrewAI Tools?
|
||||
|
||||
- **Simplicity & Flexibility**: Easy-to-use yet powerful enough for complex workflows.
|
||||
- **Rapid Integration**: Seamlessly incorporate external services, APIs, and databases.
|
||||
- **Enterprise Ready**: Built for stability, performance, and consistent results.
|
||||
|
||||
---
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
We welcome contributions from the community!
|
||||
|
||||
1. Fork and clone the repository.
|
||||
2. Create a new branch (`git checkout -b feature/my-feature`).
|
||||
3. Commit your changes (`git commit -m 'Add my feature'`).
|
||||
4. Push your branch (`git push origin feature/my-feature`).
|
||||
5. Open a pull request.
|
||||
|
||||
---
|
||||
|
||||
## Developer Quickstart
|
||||
|
||||
```shell
|
||||
pip install crewai[tools]
|
||||
```
|
||||
|
||||
### Development Setup
|
||||
|
||||
- Install dependencies: `uv sync`
|
||||
- Run tests: `uv run pytest`
|
||||
- Run static type checking: `uv run pyright`
|
||||
- Set up pre-commit hooks: `pre-commit install`
|
||||
|
||||
---
|
||||
|
||||
## Support and Community
|
||||
|
||||
Join our rapidly growing community and receive real-time support:
|
||||
|
||||
- [Discourse](https://community.crewai.com/)
|
||||
- [Open an Issue](https://github.com/crewAIInc/crewAI/issues)
|
||||
|
||||
Build smarter, faster, and more powerful AI solutions—powered by CrewAI Tools.
|
||||
156
lib/crewai-tools/generate_tool_specs.py
Normal file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
from collections.abc import Mapping
|
||||
import inspect
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Any, cast
|
||||
|
||||
from crewai.tools.base_tool import BaseTool, EnvVar
|
||||
from crewai_tools import tools
|
||||
from pydantic import BaseModel
|
||||
from pydantic.json_schema import GenerateJsonSchema
|
||||
from pydantic_core import PydanticOmit
|
||||
|
||||
|
||||
class SchemaGenerator(GenerateJsonSchema):
|
||||
def handle_invalid_for_json_schema(self, schema, error_info):
|
||||
raise PydanticOmit
|
||||
|
||||
|
||||
class ToolSpecExtractor:
|
||||
def __init__(self) -> None:
|
||||
self.tools_spec: list[dict[str, Any]] = []
|
||||
self.processed_tools: set[str] = set()
|
||||
|
||||
def extract_all_tools(self) -> list[dict[str, Any]]:
|
||||
for name in dir(tools):
|
||||
if name.endswith("Tool") and name not in self.processed_tools:
|
||||
obj = getattr(tools, name, None)
|
||||
if inspect.isclass(obj) and issubclass(obj, BaseTool):
|
||||
self.extract_tool_info(obj)
|
||||
self.processed_tools.add(name)
|
||||
return self.tools_spec
|
||||
|
||||
def extract_tool_info(self, tool_class: type[BaseTool]) -> None:
|
||||
try:
|
||||
core_schema = tool_class.__pydantic_core_schema__
|
||||
if not core_schema:
|
||||
return
|
||||
|
||||
schema = self._unwrap_schema(core_schema)
|
||||
fields = schema.get("schema", {}).get("fields", {})
|
||||
|
||||
tool_info = {
|
||||
"name": tool_class.__name__,
|
||||
"humanized_name": self._extract_field_default(
|
||||
fields.get("name"), fallback=tool_class.__name__
|
||||
),
|
||||
"description": str(
|
||||
self._extract_field_default(fields.get("description"))
|
||||
).strip(),
|
||||
"run_params_schema": self._extract_params(fields.get("args_schema")),
|
||||
"init_params_schema": self._extract_init_params(tool_class),
|
||||
"env_vars": self._extract_env_vars(fields.get("env_vars")),
|
||||
"package_dependencies": self._extract_field_default(
|
||||
fields.get("package_dependencies"), fallback=[]
|
||||
),
|
||||
}
|
||||
|
||||
self.tools_spec.append(tool_info)
|
||||
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _unwrap_schema(schema: Mapping[str, Any] | dict[str, Any]) -> dict[str, Any]:
|
||||
result: dict[str, Any] = dict(schema)
|
||||
while (
|
||||
result.get("type") in {"function-after", "default"} and "schema" in result
|
||||
):
|
||||
result = dict(result["schema"])
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _extract_field_default(
|
||||
field: dict | None, fallback: str | list[Any] = ""
|
||||
) -> str | list[Any] | int:
|
||||
if not field:
|
||||
return fallback
|
||||
|
||||
schema = field.get("schema", {})
|
||||
default = schema.get("default")
|
||||
return default if isinstance(default, (list, str, int)) else fallback
|
||||
|
||||
@staticmethod
|
||||
def _extract_params(args_schema_field: dict | None) -> dict[str, Any]:
|
||||
if not args_schema_field:
|
||||
return {}
|
||||
|
||||
args_schema_class = args_schema_field.get("schema", {}).get("default")
|
||||
if not (
|
||||
inspect.isclass(args_schema_class)
|
||||
and issubclass(args_schema_class, BaseModel)
|
||||
):
|
||||
return {}
|
||||
|
||||
# Cast to type[BaseModel] after runtime check
|
||||
schema_class = cast(type[BaseModel], args_schema_class)
|
||||
try:
|
||||
return schema_class.model_json_schema(schema_generator=SchemaGenerator)
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def _extract_env_vars(env_vars_field: dict | None) -> list[dict[str, Any]]:
|
||||
if not env_vars_field:
|
||||
return []
|
||||
|
||||
return [
|
||||
{
|
||||
"name": env_var.name,
|
||||
"description": env_var.description,
|
||||
"required": env_var.required,
|
||||
"default": env_var.default,
|
||||
}
|
||||
for env_var in env_vars_field.get("schema", {}).get("default", [])
|
||||
if isinstance(env_var, EnvVar)
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _extract_init_params(tool_class: type[BaseTool]) -> dict[str, Any]:
|
||||
ignored_init_params = [
|
||||
"name",
|
||||
"description",
|
||||
"env_vars",
|
||||
"args_schema",
|
||||
"description_updated",
|
||||
"cache_function",
|
||||
"result_as_answer",
|
||||
"max_usage_count",
|
||||
"current_usage_count",
|
||||
"package_dependencies",
|
||||
]
|
||||
|
||||
json_schema = tool_class.model_json_schema(
|
||||
schema_generator=SchemaGenerator, mode="serialization"
|
||||
)
|
||||
|
||||
json_schema["properties"] = {
|
||||
key: value
|
||||
for key, value in json_schema["properties"].items()
|
||||
if key not in ignored_init_params
|
||||
}
|
||||
return json_schema
|
||||
|
||||
def save_to_json(self, output_path: str) -> None:
|
||||
with open(output_path, "w", encoding="utf-8") as f:
|
||||
json.dump({"tools": self.tools_spec}, f, indent=2, sort_keys=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
output_file = Path(__file__).parent / "tool.specs.json"
|
||||
extractor = ToolSpecExtractor()
|
||||
|
||||
extractor.extract_all_tools()
|
||||
extractor.save_to_json(str(output_file))
|
||||
153
lib/crewai-tools/pyproject.toml
Normal file
@@ -0,0 +1,153 @@
|
||||
[project]
|
||||
name = "crewai-tools"
|
||||
dynamic = ["version"]
|
||||
description = "Set of tools for the crewAI framework"
|
||||
readme = "README.md"
|
||||
authors = [
|
||||
{ name = "João Moura", email = "joaomdmoura@gmail.com" },
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"lancedb>=0.5.4",
|
||||
"pytube>=15.0.0",
|
||||
"requests>=2.32.5",
|
||||
"docker>=7.1.0",
|
||||
"crewai==1.0.0a1",
|
||||
"lancedb>=0.5.4",
|
||||
"tiktoken>=0.8.0",
|
||||
"stagehand>=0.4.1",
|
||||
"beautifulsoup4>=4.13.4",
|
||||
"pypdf>=5.9.0",
|
||||
"python-docx>=1.2.0",
|
||||
"youtube-transcript-api>=1.2.2",
|
||||
]
|
||||
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
Documentation = "https://docs.crewai.com"
|
||||
|
||||
|
||||
[project.optional-dependencies]
|
||||
scrapfly-sdk = [
|
||||
"scrapfly-sdk>=0.8.19",
|
||||
]
|
||||
sqlalchemy = [
|
||||
"sqlalchemy>=2.0.35",
|
||||
]
|
||||
multion = [
|
||||
"multion>=1.1.0",
|
||||
]
|
||||
firecrawl-py = [
|
||||
"firecrawl-py>=1.8.0",
|
||||
]
|
||||
composio-core = [
|
||||
"composio-core>=0.6.11.post1",
|
||||
]
|
||||
browserbase = [
|
||||
"browserbase>=1.0.5",
|
||||
]
|
||||
weaviate-client = [
|
||||
"weaviate-client>=4.10.2",
|
||||
]
|
||||
patronus = [
|
||||
"patronus>=0.0.16",
|
||||
]
|
||||
serpapi = [
|
||||
"serpapi>=0.1.5",
|
||||
]
|
||||
beautifulsoup4 = [
|
||||
"beautifulsoup4>=4.12.3",
|
||||
]
|
||||
selenium = [
|
||||
"selenium>=4.27.1",
|
||||
]
|
||||
spider-client = [
|
||||
"spider-client>=0.1.25",
|
||||
]
|
||||
scrapegraph-py = [
|
||||
"scrapegraph-py>=1.9.0",
|
||||
]
|
||||
linkup-sdk = [
|
||||
"linkup-sdk>=0.2.2",
|
||||
]
|
||||
tavily-python = [
|
||||
"tavily-python>=0.5.4",
|
||||
]
|
||||
hyperbrowser = [
|
||||
"hyperbrowser>=0.18.0",
|
||||
]
|
||||
snowflake = [
|
||||
"cryptography>=43.0.3",
|
||||
"snowflake-connector-python>=3.12.4",
|
||||
"snowflake-sqlalchemy>=1.7.3",
|
||||
]
|
||||
singlestore = [
|
||||
"singlestoredb>=1.12.4",
|
||||
"SQLAlchemy>=2.0.40",
|
||||
]
|
||||
exa-py = [
|
||||
"exa-py>=1.8.7",
|
||||
]
|
||||
qdrant-client = [
|
||||
"qdrant-client>=1.12.1",
|
||||
]
|
||||
apify = [
|
||||
"langchain-apify>=0.1.2,<1.0.0",
|
||||
]
|
||||
|
||||
databricks-sdk = [
|
||||
"databricks-sdk>=0.46.0",
|
||||
]
|
||||
couchbase = [
|
||||
"couchbase>=4.3.5",
|
||||
]
|
||||
mcp = [
|
||||
"mcp>=1.6.0",
|
||||
"mcpadapt>=0.1.9",
|
||||
]
|
||||
stagehand = [
|
||||
"stagehand>=0.4.1",
|
||||
]
|
||||
github = [
|
||||
"gitpython==3.1.38",
|
||||
"PyGithub==1.59.1",
|
||||
]
|
||||
rag = [
|
||||
"python-docx>=1.1.0",
|
||||
"lxml>=5.3.0,<5.4.0", # Pin to avoid etree import issues in 5.4.0
|
||||
]
|
||||
xml = [
|
||||
"unstructured[local-inference, all-docs]>=0.17.2"
|
||||
]
|
||||
oxylabs = [
|
||||
"oxylabs==2.0.0"
|
||||
]
|
||||
mongodb = [
|
||||
"pymongo>=4.13"
|
||||
]
|
||||
mysql = [
|
||||
"pymysql>=1.1.1"
|
||||
]
|
||||
postgresql = [
|
||||
"psycopg2-binary>=2.9.10"
|
||||
]
|
||||
bedrock = [
|
||||
"beautifulsoup4>=4.13.4",
|
||||
"bedrock-agentcore>=0.1.0",
|
||||
"playwright>=1.52.0",
|
||||
"nest-asyncio>=1.6.0",
|
||||
]
|
||||
contextual = [
|
||||
"contextual-client>=0.1.0",
|
||||
"nest-asyncio>=1.6.0",
|
||||
]
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.version]
|
||||
path = "src/crewai_tools/__init__.py"
|
||||
294
lib/crewai-tools/src/crewai_tools/__init__.py
Normal file
@@ -0,0 +1,294 @@
|
||||
from crewai_tools.adapters.enterprise_adapter import EnterpriseActionTool
|
||||
from crewai_tools.adapters.mcp_adapter import MCPServerAdapter
|
||||
from crewai_tools.adapters.zapier_adapter import ZapierActionTool
|
||||
from crewai_tools.aws.bedrock.agents.invoke_agent_tool import BedrockInvokeAgentTool
|
||||
from crewai_tools.aws.bedrock.knowledge_base.retriever_tool import (
|
||||
BedrockKBRetrieverTool,
|
||||
)
|
||||
from crewai_tools.aws.s3.reader_tool import S3ReaderTool
|
||||
from crewai_tools.aws.s3.writer_tool import S3WriterTool
|
||||
from crewai_tools.tools.ai_mind_tool.ai_mind_tool import AIMindTool
|
||||
from crewai_tools.tools.apify_actors_tool.apify_actors_tool import ApifyActorsTool
|
||||
from crewai_tools.tools.arxiv_paper_tool.arxiv_paper_tool import ArxivPaperTool
|
||||
from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool
|
||||
from crewai_tools.tools.brightdata_tool.brightdata_dataset import (
|
||||
BrightDataDatasetTool,
|
||||
)
|
||||
from crewai_tools.tools.brightdata_tool.brightdata_serp import BrightDataSearchTool
|
||||
from crewai_tools.tools.brightdata_tool.brightdata_unlocker import (
|
||||
BrightDataWebUnlockerTool,
|
||||
)
|
||||
from crewai_tools.tools.browserbase_load_tool.browserbase_load_tool import (
|
||||
BrowserbaseLoadTool,
|
||||
)
|
||||
from crewai_tools.tools.code_docs_search_tool.code_docs_search_tool import (
|
||||
CodeDocsSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.code_interpreter_tool.code_interpreter_tool import (
|
||||
CodeInterpreterTool,
|
||||
)
|
||||
from crewai_tools.tools.composio_tool.composio_tool import ComposioTool
|
||||
from crewai_tools.tools.contextualai_create_agent_tool.contextual_create_agent_tool import (
|
||||
ContextualAICreateAgentTool,
|
||||
)
|
||||
from crewai_tools.tools.contextualai_parse_tool.contextual_parse_tool import (
|
||||
ContextualAIParseTool,
|
||||
)
|
||||
from crewai_tools.tools.contextualai_query_tool.contextual_query_tool import (
|
||||
ContextualAIQueryTool,
|
||||
)
|
||||
from crewai_tools.tools.contextualai_rerank_tool.contextual_rerank_tool import (
|
||||
ContextualAIRerankTool,
|
||||
)
|
||||
from crewai_tools.tools.couchbase_tool.couchbase_tool import (
|
||||
CouchbaseFTSVectorSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.crewai_enterprise_tools.crewai_enterprise_tools import (
|
||||
CrewaiEnterpriseTools,
|
||||
)
|
||||
from crewai_tools.tools.crewai_platform_tools.crewai_platform_tools import (
|
||||
CrewaiPlatformTools,
|
||||
)
|
||||
from crewai_tools.tools.csv_search_tool.csv_search_tool import CSVSearchTool
|
||||
from crewai_tools.tools.dalle_tool.dalle_tool import DallETool
|
||||
from crewai_tools.tools.databricks_query_tool.databricks_query_tool import (
|
||||
DatabricksQueryTool,
|
||||
)
|
||||
from crewai_tools.tools.directory_read_tool.directory_read_tool import (
|
||||
DirectoryReadTool,
|
||||
)
|
||||
from crewai_tools.tools.directory_search_tool.directory_search_tool import (
|
||||
DirectorySearchTool,
|
||||
)
|
||||
from crewai_tools.tools.docx_search_tool.docx_search_tool import DOCXSearchTool
|
||||
from crewai_tools.tools.exa_tools.exa_search_tool import EXASearchTool
|
||||
from crewai_tools.tools.file_read_tool.file_read_tool import FileReadTool
|
||||
from crewai_tools.tools.file_writer_tool.file_writer_tool import FileWriterTool
|
||||
from crewai_tools.tools.files_compressor_tool.files_compressor_tool import (
|
||||
FileCompressorTool,
|
||||
)
|
||||
from crewai_tools.tools.firecrawl_crawl_website_tool.firecrawl_crawl_website_tool import (
|
||||
FirecrawlCrawlWebsiteTool,
|
||||
)
|
||||
from crewai_tools.tools.firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import (
|
||||
FirecrawlScrapeWebsiteTool,
|
||||
)
|
||||
from crewai_tools.tools.firecrawl_search_tool.firecrawl_search_tool import (
|
||||
FirecrawlSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.generate_crewai_automation_tool.generate_crewai_automation_tool import (
|
||||
GenerateCrewaiAutomationTool,
|
||||
)
|
||||
from crewai_tools.tools.github_search_tool.github_search_tool import GithubSearchTool
|
||||
from crewai_tools.tools.hyperbrowser_load_tool.hyperbrowser_load_tool import (
|
||||
HyperbrowserLoadTool,
|
||||
)
|
||||
from crewai_tools.tools.invoke_crewai_automation_tool.invoke_crewai_automation_tool import (
|
||||
InvokeCrewAIAutomationTool,
|
||||
)
|
||||
from crewai_tools.tools.jina_scrape_website_tool.jina_scrape_website_tool import (
|
||||
JinaScrapeWebsiteTool,
|
||||
)
|
||||
from crewai_tools.tools.json_search_tool.json_search_tool import JSONSearchTool
|
||||
from crewai_tools.tools.linkup.linkup_search_tool import LinkupSearchTool
|
||||
from crewai_tools.tools.llamaindex_tool.llamaindex_tool import LlamaIndexTool
|
||||
from crewai_tools.tools.mdx_search_tool.mdx_search_tool import MDXSearchTool
|
||||
from crewai_tools.tools.mongodb_vector_search_tool.vector_search import (
|
||||
MongoDBVectorSearchConfig,
|
||||
MongoDBVectorSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.multion_tool.multion_tool import MultiOnTool
|
||||
from crewai_tools.tools.mysql_search_tool.mysql_search_tool import MySQLSearchTool
|
||||
from crewai_tools.tools.nl2sql.nl2sql_tool import NL2SQLTool
|
||||
from crewai_tools.tools.ocr_tool.ocr_tool import OCRTool
|
||||
from crewai_tools.tools.oxylabs_amazon_product_scraper_tool.oxylabs_amazon_product_scraper_tool import (
|
||||
OxylabsAmazonProductScraperTool,
|
||||
)
|
||||
from crewai_tools.tools.oxylabs_amazon_search_scraper_tool.oxylabs_amazon_search_scraper_tool import (
|
||||
OxylabsAmazonSearchScraperTool,
|
||||
)
|
||||
from crewai_tools.tools.oxylabs_google_search_scraper_tool.oxylabs_google_search_scraper_tool import (
|
||||
OxylabsGoogleSearchScraperTool,
|
||||
)
|
||||
from crewai_tools.tools.oxylabs_universal_scraper_tool.oxylabs_universal_scraper_tool import (
|
||||
OxylabsUniversalScraperTool,
|
||||
)
|
||||
from crewai_tools.tools.parallel_tools.parallel_search_tool import ParallelSearchTool
|
||||
from crewai_tools.tools.patronus_eval_tool.patronus_eval_tool import PatronusEvalTool
|
||||
from crewai_tools.tools.patronus_eval_tool.patronus_local_evaluator_tool import (
|
||||
PatronusLocalEvaluatorTool,
|
||||
)
|
||||
from crewai_tools.tools.patronus_eval_tool.patronus_predefined_criteria_eval_tool import (
|
||||
PatronusPredefinedCriteriaEvalTool,
|
||||
)
|
||||
from crewai_tools.tools.pdf_search_tool.pdf_search_tool import PDFSearchTool
|
||||
from crewai_tools.tools.qdrant_vector_search_tool.qdrant_search_tool import (
|
||||
QdrantVectorSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.rag.rag_tool import RagTool
|
||||
from crewai_tools.tools.scrape_element_from_website.scrape_element_from_website import (
|
||||
ScrapeElementFromWebsiteTool,
|
||||
)
|
||||
from crewai_tools.tools.scrape_website_tool.scrape_website_tool import (
|
||||
ScrapeWebsiteTool,
|
||||
)
|
||||
from crewai_tools.tools.scrapegraph_scrape_tool.scrapegraph_scrape_tool import (
|
||||
ScrapegraphScrapeTool,
|
||||
ScrapegraphScrapeToolSchema,
|
||||
)
|
||||
from crewai_tools.tools.scrapfly_scrape_website_tool.scrapfly_scrape_website_tool import (
|
||||
ScrapflyScrapeWebsiteTool,
|
||||
)
|
||||
from crewai_tools.tools.selenium_scraping_tool.selenium_scraping_tool import (
|
||||
SeleniumScrapingTool,
|
||||
)
|
||||
from crewai_tools.tools.serpapi_tool.serpapi_google_search_tool import (
|
||||
SerpApiGoogleSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.serpapi_tool.serpapi_google_shopping_tool import (
|
||||
SerpApiGoogleShoppingTool,
|
||||
)
|
||||
from crewai_tools.tools.serper_dev_tool.serper_dev_tool import SerperDevTool
|
||||
from crewai_tools.tools.serper_scrape_website_tool.serper_scrape_website_tool import (
|
||||
SerperScrapeWebsiteTool,
|
||||
)
|
||||
from crewai_tools.tools.serply_api_tool.serply_job_search_tool import (
|
||||
SerplyJobSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.serply_api_tool.serply_news_search_tool import (
|
||||
SerplyNewsSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.serply_api_tool.serply_scholar_search_tool import (
|
||||
SerplyScholarSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.serply_api_tool.serply_web_search_tool import (
|
||||
SerplyWebSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.serply_api_tool.serply_webpage_to_markdown_tool import (
|
||||
SerplyWebpageToMarkdownTool,
|
||||
)
|
||||
from crewai_tools.tools.singlestore_search_tool.singlestore_search_tool import (
|
||||
SingleStoreSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.snowflake_search_tool.snowflake_search_tool import (
|
||||
SnowflakeConfig,
|
||||
SnowflakeSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.spider_tool.spider_tool import SpiderTool
|
||||
from crewai_tools.tools.stagehand_tool.stagehand_tool import StagehandTool
|
||||
from crewai_tools.tools.tavily_extractor_tool.tavily_extractor_tool import (
|
||||
TavilyExtractorTool,
|
||||
)
|
||||
from crewai_tools.tools.tavily_search_tool.tavily_search_tool import TavilySearchTool
|
||||
from crewai_tools.tools.txt_search_tool.txt_search_tool import TXTSearchTool
|
||||
from crewai_tools.tools.vision_tool.vision_tool import VisionTool
|
||||
from crewai_tools.tools.weaviate_tool.vector_search import WeaviateVectorSearchTool
|
||||
from crewai_tools.tools.website_search.website_search_tool import WebsiteSearchTool
|
||||
from crewai_tools.tools.xml_search_tool.xml_search_tool import XMLSearchTool
|
||||
from crewai_tools.tools.youtube_channel_search_tool.youtube_channel_search_tool import (
|
||||
YoutubeChannelSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.youtube_video_search_tool.youtube_video_search_tool import (
|
||||
YoutubeVideoSearchTool,
|
||||
)
|
||||
from crewai_tools.tools.zapier_action_tool.zapier_action_tool import ZapierActionTools
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AIMindTool",
|
||||
"ApifyActorsTool",
|
||||
"ArxivPaperTool",
|
||||
"BedrockInvokeAgentTool",
|
||||
"BedrockKBRetrieverTool",
|
||||
"BraveSearchTool",
|
||||
"BrightDataDatasetTool",
|
||||
"BrightDataSearchTool",
|
||||
"BrightDataWebUnlockerTool",
|
||||
"BrowserbaseLoadTool",
|
||||
"CSVSearchTool",
|
||||
"CodeDocsSearchTool",
|
||||
"CodeInterpreterTool",
|
||||
"ComposioTool",
|
||||
"ContextualAICreateAgentTool",
|
||||
"ContextualAIParseTool",
|
||||
"ContextualAIQueryTool",
|
||||
"ContextualAIRerankTool",
|
||||
"CouchbaseFTSVectorSearchTool",
|
||||
"CrewaiEnterpriseTools",
|
||||
"CrewaiPlatformTools",
|
||||
"DOCXSearchTool",
|
||||
"DallETool",
|
||||
"DatabricksQueryTool",
|
||||
"DirectoryReadTool",
|
||||
"DirectorySearchTool",
|
||||
"EXASearchTool",
|
||||
"EnterpriseActionTool",
|
||||
"FileCompressorTool",
|
||||
"FileReadTool",
|
||||
"FileWriterTool",
|
||||
"FirecrawlCrawlWebsiteTool",
|
||||
"FirecrawlScrapeWebsiteTool",
|
||||
"FirecrawlSearchTool",
|
||||
"GenerateCrewaiAutomationTool",
|
||||
"GithubSearchTool",
|
||||
"HyperbrowserLoadTool",
|
||||
"InvokeCrewAIAutomationTool",
|
||||
"JSONSearchTool",
|
||||
"JinaScrapeWebsiteTool",
|
||||
"LinkupSearchTool",
|
||||
"LlamaIndexTool",
|
||||
"MCPServerAdapter",
|
||||
"MDXSearchTool",
|
||||
"MongoDBVectorSearchConfig",
|
||||
"MongoDBVectorSearchTool",
|
||||
"MultiOnTool",
|
||||
"MySQLSearchTool",
|
||||
"NL2SQLTool",
|
||||
"OCRTool",
|
||||
"OxylabsAmazonProductScraperTool",
|
||||
"OxylabsAmazonSearchScraperTool",
|
||||
"OxylabsGoogleSearchScraperTool",
|
||||
"OxylabsUniversalScraperTool",
|
||||
"PDFSearchTool",
|
||||
"ParallelSearchTool",
|
||||
"PatronusEvalTool",
|
||||
"PatronusLocalEvaluatorTool",
|
||||
"PatronusPredefinedCriteriaEvalTool",
|
||||
"QdrantVectorSearchTool",
|
||||
"RagTool",
|
||||
"S3ReaderTool",
|
||||
"S3WriterTool",
|
||||
"ScrapeElementFromWebsiteTool",
|
||||
"ScrapeWebsiteTool",
|
||||
"ScrapegraphScrapeTool",
|
||||
"ScrapegraphScrapeToolSchema",
|
||||
"ScrapflyScrapeWebsiteTool",
|
||||
"SeleniumScrapingTool",
|
||||
"SerpApiGoogleSearchTool",
|
||||
"SerpApiGoogleShoppingTool",
|
||||
"SerperDevTool",
|
||||
"SerperScrapeWebsiteTool",
|
||||
"SerplyJobSearchTool",
|
||||
"SerplyNewsSearchTool",
|
||||
"SerplyScholarSearchTool",
|
||||
"SerplyWebSearchTool",
|
||||
"SerplyWebpageToMarkdownTool",
|
||||
"SingleStoreSearchTool",
|
||||
"SnowflakeConfig",
|
||||
"SnowflakeSearchTool",
|
||||
"SpiderTool",
|
||||
"StagehandTool",
|
||||
"TXTSearchTool",
|
||||
"TavilyExtractorTool",
|
||||
"TavilySearchTool",
|
||||
"VisionTool",
|
||||
"WeaviateVectorSearchTool",
|
||||
"WebsiteSearchTool",
|
||||
"XMLSearchTool",
|
||||
"YoutubeChannelSearchTool",
|
||||
"YoutubeVideoSearchTool",
|
||||
"ZapierActionTool",
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.0.0a2"
|
||||
269
lib/crewai-tools/src/crewai_tools/adapters/crewai_rag_adapter.py
Normal file
@@ -0,0 +1,269 @@
|
||||
"""Adapter for CrewAI's native RAG system."""
|
||||
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
from typing import Any, TypeAlias, TypedDict
|
||||
|
||||
from crewai.rag.config.types import RagConfigType
|
||||
from crewai.rag.config.utils import get_rag_client
|
||||
from crewai.rag.core.base_client import BaseClient
|
||||
from crewai.rag.factory import create_client
|
||||
from crewai.rag.types import BaseRecord, SearchResult
|
||||
from pydantic import PrivateAttr
|
||||
from typing_extensions import Unpack
|
||||
|
||||
from crewai_tools.rag.data_types import DataType
|
||||
from crewai_tools.rag.misc import sanitize_metadata_for_chromadb
|
||||
from crewai_tools.tools.rag.rag_tool import Adapter
|
||||
|
||||
|
||||
ContentItem: TypeAlias = str | Path | dict[str, Any]
|
||||
|
||||
|
||||
class AddDocumentParams(TypedDict, total=False):
|
||||
"""Parameters for adding documents to the RAG system."""
|
||||
|
||||
data_type: DataType
|
||||
metadata: dict[str, Any]
|
||||
website: str
|
||||
url: str
|
||||
file_path: str | Path
|
||||
github_url: str
|
||||
youtube_url: str
|
||||
directory_path: str | Path
|
||||
|
||||
|
||||
class CrewAIRagAdapter(Adapter):
|
||||
"""Adapter that uses CrewAI's native RAG system.
|
||||
|
||||
Supports custom vector database configuration through the config parameter.
|
||||
"""
|
||||
|
||||
collection_name: str = "default"
|
||||
summarize: bool = False
|
||||
similarity_threshold: float = 0.6
|
||||
limit: int = 5
|
||||
config: RagConfigType | None = None
|
||||
_client: BaseClient | None = PrivateAttr(default=None)
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""Initialize the CrewAI RAG client after model initialization."""
|
||||
if self.config is not None:
|
||||
self._client = create_client(self.config)
|
||||
else:
|
||||
self._client = get_rag_client()
|
||||
self._client.get_or_create_collection(collection_name=self.collection_name)
|
||||
|
||||
def query(
|
||||
self,
|
||||
question: str,
|
||||
similarity_threshold: float | None = None,
|
||||
limit: int | None = None,
|
||||
) -> str:
|
||||
"""Query the knowledge base with a question.
|
||||
|
||||
Args:
|
||||
question: The question to ask
|
||||
similarity_threshold: Minimum similarity score for results (default: 0.6)
|
||||
limit: Maximum number of results to return (default: 5)
|
||||
|
||||
Returns:
|
||||
Relevant content from the knowledge base
|
||||
"""
|
||||
search_limit = limit if limit is not None else self.limit
|
||||
search_threshold = (
|
||||
similarity_threshold
|
||||
if similarity_threshold is not None
|
||||
else self.similarity_threshold
|
||||
)
|
||||
|
||||
results: list[SearchResult] = self._client.search(
|
||||
collection_name=self.collection_name,
|
||||
query=question,
|
||||
limit=search_limit,
|
||||
score_threshold=search_threshold,
|
||||
)
|
||||
|
||||
if not results:
|
||||
return "No relevant content found."
|
||||
|
||||
contents: list[str] = []
|
||||
for result in results:
|
||||
content: str = result.get("content", "")
|
||||
if content:
|
||||
contents.append(content)
|
||||
|
||||
return "\n\n".join(contents)
|
||||
|
||||
def add(self, *args: ContentItem, **kwargs: Unpack[AddDocumentParams]) -> None:
|
||||
"""Add content to the knowledge base.
|
||||
|
||||
This method handles various input types and converts them to documents
|
||||
for the vector database. It supports the data_type parameter for
|
||||
compatibility with existing tools.
|
||||
|
||||
Args:
|
||||
*args: Content items to add (strings, paths, or document dicts)
|
||||
**kwargs: Additional parameters including data_type, metadata, etc.
|
||||
"""
|
||||
import os
|
||||
|
||||
from crewai_tools.rag.base_loader import LoaderResult
|
||||
from crewai_tools.rag.data_types import DataType, DataTypes
|
||||
from crewai_tools.rag.source_content import SourceContent
|
||||
|
||||
documents: list[BaseRecord] = []
|
||||
data_type: DataType | None = kwargs.get("data_type")
|
||||
base_metadata: dict[str, Any] = kwargs.get("metadata", {})
|
||||
|
||||
for arg in args:
|
||||
source_ref: str
|
||||
if isinstance(arg, dict):
|
||||
source_ref = str(arg.get("source", arg.get("content", "")))
|
||||
else:
|
||||
source_ref = str(arg)
|
||||
|
||||
if not data_type:
|
||||
data_type = DataTypes.from_content(source_ref)
|
||||
|
||||
if data_type == DataType.DIRECTORY:
|
||||
if not os.path.isdir(source_ref):
|
||||
raise ValueError(f"Directory does not exist: {source_ref}")
|
||||
|
||||
# Define binary and non-text file extensions to skip
|
||||
binary_extensions = {
|
||||
".pyc",
|
||||
".pyo",
|
||||
".png",
|
||||
".jpg",
|
||||
".jpeg",
|
||||
".gif",
|
||||
".bmp",
|
||||
".ico",
|
||||
".svg",
|
||||
".webp",
|
||||
".pdf",
|
||||
".zip",
|
||||
".tar",
|
||||
".gz",
|
||||
".bz2",
|
||||
".7z",
|
||||
".rar",
|
||||
".exe",
|
||||
".dll",
|
||||
".so",
|
||||
".dylib",
|
||||
".bin",
|
||||
".dat",
|
||||
".db",
|
||||
".sqlite",
|
||||
".class",
|
||||
".jar",
|
||||
".war",
|
||||
".ear",
|
||||
}
|
||||
|
||||
for root, dirs, files in os.walk(source_ref):
|
||||
dirs[:] = [d for d in dirs if not d.startswith(".")]
|
||||
|
||||
for filename in files:
|
||||
if filename.startswith("."):
|
||||
continue
|
||||
|
||||
# Skip binary files based on extension
|
||||
file_ext = os.path.splitext(filename)[1].lower()
|
||||
if file_ext in binary_extensions:
|
||||
continue
|
||||
|
||||
# Skip __pycache__ directories
|
||||
if "__pycache__" in root:
|
||||
continue
|
||||
|
||||
file_path: str = os.path.join(root, filename)
|
||||
try:
|
||||
file_data_type: DataType = DataTypes.from_content(file_path)
|
||||
file_loader = file_data_type.get_loader()
|
||||
file_chunker = file_data_type.get_chunker()
|
||||
|
||||
file_source = SourceContent(file_path)
|
||||
file_result: LoaderResult = file_loader.load(file_source)
|
||||
|
||||
file_chunks = file_chunker.chunk(file_result.content)
|
||||
|
||||
for chunk_idx, file_chunk in enumerate(file_chunks):
|
||||
file_metadata: dict[str, Any] = base_metadata.copy()
|
||||
file_metadata.update(file_result.metadata)
|
||||
file_metadata["data_type"] = str(file_data_type)
|
||||
file_metadata["file_path"] = file_path
|
||||
file_metadata["chunk_index"] = chunk_idx
|
||||
file_metadata["total_chunks"] = len(file_chunks)
|
||||
|
||||
if isinstance(arg, dict):
|
||||
file_metadata.update(arg.get("metadata", {}))
|
||||
|
||||
chunk_id = hashlib.sha256(
|
||||
f"{file_result.doc_id}_{chunk_idx}_{file_chunk}".encode()
|
||||
).hexdigest()
|
||||
|
||||
documents.append(
|
||||
{
|
||||
"doc_id": chunk_id,
|
||||
"content": file_chunk,
|
||||
"metadata": sanitize_metadata_for_chromadb(
|
||||
file_metadata
|
||||
),
|
||||
}
|
||||
)
|
||||
except Exception: # noqa: S112
|
||||
# Silently skip files that can't be processed
|
||||
continue
|
||||
else:
|
||||
metadata: dict[str, Any] = base_metadata.copy()
|
||||
|
||||
if data_type in [
|
||||
DataType.PDF_FILE,
|
||||
DataType.TEXT_FILE,
|
||||
DataType.DOCX,
|
||||
DataType.CSV,
|
||||
DataType.JSON,
|
||||
DataType.XML,
|
||||
DataType.MDX,
|
||||
]:
|
||||
if not os.path.isfile(source_ref):
|
||||
raise FileNotFoundError(f"File does not exist: {source_ref}")
|
||||
|
||||
loader = data_type.get_loader()
|
||||
chunker = data_type.get_chunker()
|
||||
|
||||
source_content = SourceContent(source_ref)
|
||||
loader_result: LoaderResult = loader.load(source_content)
|
||||
|
||||
chunks = chunker.chunk(loader_result.content)
|
||||
|
||||
for i, chunk in enumerate(chunks):
|
||||
chunk_metadata: dict[str, Any] = metadata.copy()
|
||||
chunk_metadata.update(loader_result.metadata)
|
||||
chunk_metadata["data_type"] = str(data_type)
|
||||
chunk_metadata["chunk_index"] = i
|
||||
chunk_metadata["total_chunks"] = len(chunks)
|
||||
chunk_metadata["source"] = source_ref
|
||||
|
||||
if isinstance(arg, dict):
|
||||
chunk_metadata.update(arg.get("metadata", {}))
|
||||
|
||||
chunk_id = hashlib.sha256(
|
||||
f"{loader_result.doc_id}_{i}_{chunk}".encode()
|
||||
).hexdigest()
|
||||
|
||||
documents.append(
|
||||
{
|
||||
"doc_id": chunk_id,
|
||||
"content": chunk,
|
||||
"metadata": sanitize_metadata_for_chromadb(chunk_metadata),
|
||||
}
|
||||
)
|
||||
|
||||
if documents:
|
||||
self._client.add_documents(
|
||||
collection_name=self.collection_name, documents=documents
|
||||
)
|
||||
428
lib/crewai-tools/src/crewai_tools/adapters/enterprise_adapter.py
Normal file
@@ -0,0 +1,428 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from typing import Any, Literal, Optional, Union, cast, get_origin
|
||||
import warnings
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import Field, create_model
|
||||
import requests
|
||||
|
||||
|
||||
def get_enterprise_api_base_url() -> str:
|
||||
"""Get the enterprise API base URL from environment or use default."""
|
||||
base_url = os.getenv("CREWAI_PLUS_URL", "https://app.crewai.com")
|
||||
return f"{base_url}/crewai_plus/api/v1/integrations"
|
||||
|
||||
|
||||
ENTERPRISE_API_BASE_URL = get_enterprise_api_base_url()
|
||||
|
||||
|
||||
class EnterpriseActionTool(BaseTool):
|
||||
"""A tool that executes a specific enterprise action."""
|
||||
|
||||
enterprise_action_token: str = Field(
|
||||
default="", description="The enterprise action token"
|
||||
)
|
||||
action_name: str = Field(default="", description="The name of the action")
|
||||
action_schema: dict[str, Any] = Field(
|
||||
default={}, description="The schema of the action"
|
||||
)
|
||||
enterprise_api_base_url: str = Field(
|
||||
default=ENTERPRISE_API_BASE_URL, description="The base API URL"
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
description: str,
|
||||
enterprise_action_token: str,
|
||||
action_name: str,
|
||||
action_schema: dict[str, Any],
|
||||
enterprise_api_base_url: str | None = None,
|
||||
):
|
||||
self._model_registry = {}
|
||||
self._base_name = self._sanitize_name(name)
|
||||
|
||||
schema_props, required = self._extract_schema_info(action_schema)
|
||||
|
||||
# Define field definitions for the model
|
||||
field_definitions = {}
|
||||
for param_name, param_details in schema_props.items():
|
||||
param_desc = param_details.get("description", "")
|
||||
is_required = param_name in required
|
||||
|
||||
try:
|
||||
field_type = self._process_schema_type(
|
||||
param_details, self._sanitize_name(param_name).title()
|
||||
)
|
||||
except Exception:
|
||||
field_type = str
|
||||
|
||||
# Create field definition based on requirement
|
||||
field_definitions[param_name] = self._create_field_definition(
|
||||
field_type, is_required, param_desc
|
||||
)
|
||||
|
||||
# Create the model
|
||||
if field_definitions:
|
||||
try:
|
||||
args_schema = create_model(
|
||||
f"{self._base_name}Schema", **field_definitions
|
||||
)
|
||||
except Exception:
|
||||
args_schema = create_model(
|
||||
f"{self._base_name}Schema",
|
||||
input_text=(str, Field(description="Input for the action")),
|
||||
)
|
||||
else:
|
||||
# Fallback for empty schema
|
||||
args_schema = create_model(
|
||||
f"{self._base_name}Schema",
|
||||
input_text=(str, Field(description="Input for the action")),
|
||||
)
|
||||
|
||||
super().__init__(name=name, description=description, args_schema=args_schema)
|
||||
self.enterprise_action_token = enterprise_action_token
|
||||
self.action_name = action_name
|
||||
self.action_schema = action_schema
|
||||
self.enterprise_api_base_url = (
|
||||
enterprise_api_base_url or get_enterprise_api_base_url()
|
||||
)
|
||||
|
||||
def _sanitize_name(self, name: str) -> str:
|
||||
"""Sanitize names to create proper Python class names."""
|
||||
sanitized = re.sub(r"[^a-zA-Z0-9_]", "", name)
|
||||
parts = sanitized.split("_")
|
||||
return "".join(word.capitalize() for word in parts if word)
|
||||
|
||||
def _extract_schema_info(
|
||||
self, action_schema: dict[str, Any]
|
||||
) -> tuple[dict[str, Any], list[str]]:
|
||||
"""Extract schema properties and required fields from action schema."""
|
||||
schema_props = (
|
||||
action_schema.get("function", {})
|
||||
.get("parameters", {})
|
||||
.get("properties", {})
|
||||
)
|
||||
required = (
|
||||
action_schema.get("function", {}).get("parameters", {}).get("required", [])
|
||||
)
|
||||
return schema_props, required
|
||||
|
||||
def _process_schema_type(self, schema: dict[str, Any], type_name: str) -> type[Any]:
|
||||
"""Process a JSON schema and return appropriate Python type."""
|
||||
if "anyOf" in schema:
|
||||
any_of_types = schema["anyOf"]
|
||||
is_nullable = any(t.get("type") == "null" for t in any_of_types)
|
||||
non_null_types = [t for t in any_of_types if t.get("type") != "null"]
|
||||
|
||||
if non_null_types:
|
||||
base_type = self._process_schema_type(non_null_types[0], type_name)
|
||||
return Optional[base_type] if is_nullable else base_type # noqa: UP045
|
||||
return cast(type[Any], Optional[str]) # noqa: UP045
|
||||
|
||||
if "oneOf" in schema:
|
||||
return self._process_schema_type(schema["oneOf"][0], type_name)
|
||||
|
||||
if "allOf" in schema:
|
||||
return self._process_schema_type(schema["allOf"][0], type_name)
|
||||
|
||||
json_type = schema.get("type", "string")
|
||||
|
||||
if "enum" in schema:
|
||||
enum_values = schema["enum"]
|
||||
if not enum_values:
|
||||
return self._map_json_type_to_python(json_type)
|
||||
return Literal[tuple(enum_values)] # type: ignore[return-value]
|
||||
|
||||
if json_type == "array":
|
||||
items_schema = schema.get("items", {"type": "string"})
|
||||
item_type = self._process_schema_type(items_schema, f"{type_name}Item")
|
||||
return list[item_type]
|
||||
|
||||
if json_type == "object":
|
||||
return self._create_nested_model(schema, type_name)
|
||||
|
||||
return self._map_json_type_to_python(json_type)
|
||||
|
||||
def _create_nested_model(
|
||||
self, schema: dict[str, Any], model_name: str
|
||||
) -> type[Any]:
|
||||
"""Create a nested Pydantic model for complex objects."""
|
||||
full_model_name = f"{self._base_name}{model_name}"
|
||||
|
||||
if full_model_name in self._model_registry:
|
||||
return self._model_registry[full_model_name]
|
||||
|
||||
properties = schema.get("properties", {})
|
||||
required_fields = schema.get("required", [])
|
||||
|
||||
if not properties:
|
||||
return dict
|
||||
|
||||
field_definitions = {}
|
||||
for prop_name, prop_schema in properties.items():
|
||||
prop_desc = prop_schema.get("description", "")
|
||||
is_required = prop_name in required_fields
|
||||
|
||||
try:
|
||||
prop_type = self._process_schema_type(
|
||||
prop_schema, f"{model_name}{self._sanitize_name(prop_name).title()}"
|
||||
)
|
||||
except Exception:
|
||||
prop_type = str
|
||||
|
||||
field_definitions[prop_name] = self._create_field_definition(
|
||||
prop_type, is_required, prop_desc
|
||||
)
|
||||
|
||||
try:
|
||||
nested_model = create_model(full_model_name, **field_definitions)
|
||||
self._model_registry[full_model_name] = nested_model
|
||||
return nested_model
|
||||
except Exception:
|
||||
return dict
|
||||
|
||||
def _create_field_definition(
|
||||
self, field_type: type[Any], is_required: bool, description: str
|
||||
) -> tuple:
|
||||
"""Create Pydantic field definition based on type and requirement."""
|
||||
if is_required:
|
||||
return (field_type, Field(description=description))
|
||||
if get_origin(field_type) is Union:
|
||||
return (field_type, Field(default=None, description=description))
|
||||
return (
|
||||
Optional[field_type], # noqa: UP045
|
||||
Field(default=None, description=description),
|
||||
)
|
||||
|
||||
def _map_json_type_to_python(self, json_type: str) -> type[Any]:
|
||||
"""Map basic JSON schema types to Python types."""
|
||||
type_mapping = {
|
||||
"string": str,
|
||||
"integer": int,
|
||||
"number": float,
|
||||
"boolean": bool,
|
||||
"array": list,
|
||||
"object": dict,
|
||||
"null": type(None),
|
||||
}
|
||||
return type_mapping.get(json_type, str)
|
||||
|
||||
def _get_required_nullable_fields(self) -> list[str]:
|
||||
"""Get a list of required nullable fields from the action schema."""
|
||||
schema_props, required = self._extract_schema_info(self.action_schema)
|
||||
|
||||
required_nullable_fields = []
|
||||
for param_name in required:
|
||||
param_details = schema_props.get(param_name, {})
|
||||
if self._is_nullable_type(param_details):
|
||||
required_nullable_fields.append(param_name)
|
||||
|
||||
return required_nullable_fields
|
||||
|
||||
def _is_nullable_type(self, schema: dict[str, Any]) -> bool:
|
||||
"""Check if a schema represents a nullable type."""
|
||||
if "anyOf" in schema:
|
||||
return any(t.get("type") == "null" for t in schema["anyOf"])
|
||||
return schema.get("type") == "null"
|
||||
|
||||
def _run(self, **kwargs) -> str:
|
||||
"""Execute the specific enterprise action with validated parameters."""
|
||||
try:
|
||||
cleaned_kwargs = {}
|
||||
for key, value in kwargs.items():
|
||||
if value is not None:
|
||||
cleaned_kwargs[key] = value # noqa: PERF403
|
||||
|
||||
required_nullable_fields = self._get_required_nullable_fields()
|
||||
|
||||
for field_name in required_nullable_fields:
|
||||
if field_name not in cleaned_kwargs:
|
||||
cleaned_kwargs[field_name] = None
|
||||
|
||||
api_url = (
|
||||
f"{self.enterprise_api_base_url}/actions/{self.action_name}/execute"
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {self.enterprise_action_token}",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
payload = cleaned_kwargs
|
||||
|
||||
response = requests.post(
|
||||
url=api_url, headers=headers, json=payload, timeout=60
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
if not response.ok:
|
||||
error_message = data.get("error", {}).get("message", json.dumps(data))
|
||||
return f"API request failed: {error_message}"
|
||||
|
||||
return json.dumps(data, indent=2)
|
||||
|
||||
except Exception as e:
|
||||
return f"Error executing action {self.action_name}: {e!s}"
|
||||
|
||||
|
||||
class EnterpriseActionKitToolAdapter:
|
||||
"""Adapter that creates BaseTool instances for enterprise actions."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
enterprise_action_token: str,
|
||||
enterprise_api_base_url: str | None = None,
|
||||
):
|
||||
"""Initialize the adapter with an enterprise action token."""
|
||||
self._set_enterprise_action_token(enterprise_action_token)
|
||||
self._actions_schema = {}
|
||||
self._tools = None
|
||||
self.enterprise_api_base_url = (
|
||||
enterprise_api_base_url or get_enterprise_api_base_url()
|
||||
)
|
||||
|
||||
def tools(self) -> list[BaseTool]:
|
||||
"""Get the list of tools created from enterprise actions."""
|
||||
if self._tools is None:
|
||||
self._fetch_actions()
|
||||
self._create_tools()
|
||||
return self._tools or []
|
||||
|
||||
def _fetch_actions(self):
|
||||
"""Fetch available actions from the API."""
|
||||
try:
|
||||
actions_url = f"{self.enterprise_api_base_url}/actions"
|
||||
headers = {"Authorization": f"Bearer {self.enterprise_action_token}"}
|
||||
|
||||
response = requests.get(actions_url, headers=headers, timeout=30)
|
||||
response.raise_for_status()
|
||||
|
||||
raw_data = response.json()
|
||||
if "actions" not in raw_data:
|
||||
return
|
||||
|
||||
parsed_schema = {}
|
||||
action_categories = raw_data["actions"]
|
||||
|
||||
for action_list in action_categories.values():
|
||||
if isinstance(action_list, list):
|
||||
for action in action_list:
|
||||
action_name = action.get("name")
|
||||
if action_name:
|
||||
action_schema = {
|
||||
"function": {
|
||||
"name": action_name,
|
||||
"description": action.get(
|
||||
"description", f"Execute {action_name}"
|
||||
),
|
||||
"parameters": action.get("parameters", {}),
|
||||
}
|
||||
}
|
||||
parsed_schema[action_name] = action_schema
|
||||
|
||||
self._actions_schema = parsed_schema
|
||||
|
||||
except Exception:
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
def _generate_detailed_description(
|
||||
self, schema: dict[str, Any], indent: int = 0
|
||||
) -> list[str]:
|
||||
"""Generate detailed description for nested schema structures."""
|
||||
descriptions = []
|
||||
indent_str = " " * indent
|
||||
|
||||
schema_type = schema.get("type", "string")
|
||||
|
||||
if schema_type == "object":
|
||||
properties = schema.get("properties", {})
|
||||
required_fields = schema.get("required", [])
|
||||
|
||||
if properties:
|
||||
descriptions.append(f"{indent_str}Object with properties:")
|
||||
for prop_name, prop_schema in properties.items():
|
||||
prop_desc = prop_schema.get("description", "")
|
||||
is_required = prop_name in required_fields
|
||||
req_str = " (required)" if is_required else " (optional)"
|
||||
descriptions.append(
|
||||
f"{indent_str} - {prop_name}: {prop_desc}{req_str}"
|
||||
)
|
||||
|
||||
if prop_schema.get("type") == "object":
|
||||
descriptions.extend(
|
||||
self._generate_detailed_description(prop_schema, indent + 2)
|
||||
)
|
||||
elif prop_schema.get("type") == "array":
|
||||
items_schema = prop_schema.get("items", {})
|
||||
if items_schema.get("type") == "object":
|
||||
descriptions.append(f"{indent_str} Array of objects:")
|
||||
descriptions.extend(
|
||||
self._generate_detailed_description(
|
||||
items_schema, indent + 3
|
||||
)
|
||||
)
|
||||
elif "enum" in items_schema:
|
||||
descriptions.append(
|
||||
f"{indent_str} Array of enum values: {items_schema['enum']}"
|
||||
)
|
||||
elif "enum" in prop_schema:
|
||||
descriptions.append(
|
||||
f"{indent_str} Enum values: {prop_schema['enum']}"
|
||||
)
|
||||
|
||||
return descriptions
|
||||
|
||||
def _create_tools(self):
|
||||
"""Create BaseTool instances for each action."""
|
||||
tools = []
|
||||
|
||||
for action_name, action_schema in self._actions_schema.items():
|
||||
function_details = action_schema.get("function", {})
|
||||
description = function_details.get("description", f"Execute {action_name}")
|
||||
|
||||
parameters = function_details.get("parameters", {})
|
||||
param_descriptions = []
|
||||
|
||||
if parameters.get("properties"):
|
||||
param_descriptions.append("\nDetailed Parameter Structure:")
|
||||
param_descriptions.extend(
|
||||
self._generate_detailed_description(parameters)
|
||||
)
|
||||
|
||||
full_description = description + "\n".join(param_descriptions)
|
||||
|
||||
tool = EnterpriseActionTool(
|
||||
name=action_name.lower().replace(" ", "_"),
|
||||
description=full_description,
|
||||
action_name=action_name,
|
||||
action_schema=action_schema,
|
||||
enterprise_action_token=self.enterprise_action_token,
|
||||
enterprise_api_base_url=self.enterprise_api_base_url,
|
||||
)
|
||||
|
||||
tools.append(tool)
|
||||
|
||||
self._tools = tools
|
||||
|
||||
def _set_enterprise_action_token(self, enterprise_action_token: str | None):
|
||||
if enterprise_action_token and not enterprise_action_token.startswith("PK_"):
|
||||
warnings.warn(
|
||||
"Legacy token detected, please consider using the new Enterprise Action Auth token. Check out our docs for more information https://docs.crewai.com/en/enterprise/features/integrations.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
token = enterprise_action_token or os.environ.get(
|
||||
"CREWAI_ENTERPRISE_TOOLS_TOKEN"
|
||||
)
|
||||
|
||||
self.enterprise_action_token = token
|
||||
|
||||
def __enter__(self):
|
||||
return self.tools()
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
pass
|
||||
@@ -0,0 +1,56 @@
|
||||
from collections.abc import Callable
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from lancedb import DBConnection as LanceDBConnection, connect as lancedb_connect
|
||||
from lancedb.table import Table as LanceDBTable
|
||||
from openai import Client as OpenAIClient
|
||||
from pydantic import Field, PrivateAttr
|
||||
|
||||
from crewai_tools.tools.rag.rag_tool import Adapter
|
||||
|
||||
|
||||
def _default_embedding_function():
|
||||
client = OpenAIClient()
|
||||
|
||||
def _embedding_function(input):
|
||||
rs = client.embeddings.create(input=input, model="text-embedding-ada-002")
|
||||
return [record.embedding for record in rs.data]
|
||||
|
||||
return _embedding_function
|
||||
|
||||
|
||||
class LanceDBAdapter(Adapter):
|
||||
uri: str | Path
|
||||
table_name: str
|
||||
embedding_function: Callable = Field(default_factory=_default_embedding_function)
|
||||
top_k: int = 3
|
||||
vector_column_name: str = "vector"
|
||||
text_column_name: str = "text"
|
||||
|
||||
_db: LanceDBConnection = PrivateAttr()
|
||||
_table: LanceDBTable = PrivateAttr()
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
self._db = lancedb_connect(self.uri)
|
||||
self._table = self._db.open_table(self.table_name)
|
||||
|
||||
super().model_post_init(__context)
|
||||
|
||||
def query(self, question: str) -> str:
|
||||
query = self.embedding_function([question])[0]
|
||||
results = (
|
||||
self._table.search(query, vector_column_name=self.vector_column_name)
|
||||
.limit(self.top_k)
|
||||
.select([self.text_column_name])
|
||||
.to_list()
|
||||
)
|
||||
values = [result[self.text_column_name] for result in results]
|
||||
return "\n".join(values)
|
||||
|
||||
def add(
|
||||
self,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
self._table.add(*args, **kwargs)
|
||||
163
lib/crewai-tools/src/crewai_tools/adapters/mcp_adapter.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""MCPServer for CrewAI."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
from crewai_tools.adapters.tool_collection import ToolCollection
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from mcp import StdioServerParameters
|
||||
from mcpadapt.core import MCPAdapt
|
||||
from mcpadapt.crewai_adapter import CrewAIAdapter
|
||||
|
||||
|
||||
try:
|
||||
from mcp import StdioServerParameters
|
||||
from mcpadapt.core import MCPAdapt
|
||||
from mcpadapt.crewai_adapter import CrewAIAdapter
|
||||
|
||||
MCP_AVAILABLE = True
|
||||
except ImportError:
|
||||
MCP_AVAILABLE = False
|
||||
|
||||
|
||||
class MCPServerAdapter:
|
||||
"""Manages the lifecycle of an MCP server and make its tools available to CrewAI.
|
||||
|
||||
Note: tools can only be accessed after the server has been started with the
|
||||
`start()` method.
|
||||
|
||||
Attributes:
|
||||
tools: The CrewAI tools available from the MCP server.
|
||||
|
||||
Usage:
|
||||
# context manager + stdio
|
||||
with MCPServerAdapter(...) as tools:
|
||||
# tools is now available
|
||||
|
||||
# context manager + sse
|
||||
with MCPServerAdapter({"url": "http://localhost:8000/sse"}) as tools:
|
||||
# tools is now available
|
||||
|
||||
# context manager with filtered tools
|
||||
with MCPServerAdapter(..., "tool1", "tool2") as filtered_tools:
|
||||
# only tool1 and tool2 are available
|
||||
|
||||
# context manager with custom connect timeout (60 seconds)
|
||||
with MCPServerAdapter(..., connect_timeout=60) as tools:
|
||||
# tools is now available with longer timeout
|
||||
|
||||
# manually stop mcp server
|
||||
try:
|
||||
mcp_server = MCPServerAdapter(...)
|
||||
tools = mcp_server.tools # all tools
|
||||
|
||||
# or with filtered tools and custom timeout
|
||||
mcp_server = MCPServerAdapter(..., "tool1", "tool2", connect_timeout=45)
|
||||
filtered_tools = mcp_server.tools # only tool1 and tool2
|
||||
...
|
||||
finally:
|
||||
mcp_server.stop()
|
||||
|
||||
# Best practice is ensure cleanup is done after use.
|
||||
mcp_server.stop() # run after crew().kickoff()
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
serverparams: StdioServerParameters | dict[str, Any],
|
||||
*tool_names: str,
|
||||
connect_timeout: int = 30,
|
||||
) -> None:
|
||||
"""Initialize the MCP Server.
|
||||
|
||||
Args:
|
||||
serverparams: The parameters for the MCP server it supports either a
|
||||
`StdioServerParameters` or a `dict` respectively for STDIO and SSE.
|
||||
*tool_names: Optional names of tools to filter. If provided, only tools with
|
||||
matching names will be available.
|
||||
connect_timeout: Connection timeout in seconds to the MCP server (default is 30s).
|
||||
|
||||
"""
|
||||
super().__init__()
|
||||
self._adapter = None
|
||||
self._tools = None
|
||||
self._tool_names = list(tool_names) if tool_names else None
|
||||
|
||||
if not MCP_AVAILABLE:
|
||||
import click
|
||||
|
||||
if click.confirm(
|
||||
"You are missing the 'mcp' package. Would you like to install it?"
|
||||
):
|
||||
import subprocess
|
||||
|
||||
try:
|
||||
subprocess.run(["uv", "add", "mcp crewai-tools[mcp]"], check=True) # noqa: S607
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise ImportError("Failed to install mcp package") from e
|
||||
else:
|
||||
raise ImportError(
|
||||
"`mcp` package not found, please run `uv add crewai-tools[mcp]`"
|
||||
)
|
||||
|
||||
try:
|
||||
self._serverparams = serverparams
|
||||
self._adapter = MCPAdapt(
|
||||
self._serverparams, CrewAIAdapter(), connect_timeout
|
||||
)
|
||||
self.start()
|
||||
|
||||
except Exception as e:
|
||||
if self._adapter is not None:
|
||||
try:
|
||||
self.stop()
|
||||
except Exception as stop_e:
|
||||
logger.error(f"Error during stop cleanup: {stop_e}")
|
||||
raise RuntimeError(f"Failed to initialize MCP Adapter: {e}") from e
|
||||
|
||||
def start(self):
|
||||
"""Start the MCP server and initialize the tools."""
|
||||
self._tools = self._adapter.__enter__()
|
||||
|
||||
def stop(self):
|
||||
"""Stop the MCP server."""
|
||||
self._adapter.__exit__(None, None, None)
|
||||
|
||||
@property
|
||||
def tools(self) -> ToolCollection[BaseTool]:
|
||||
"""The CrewAI tools available from the MCP server.
|
||||
|
||||
Raises:
|
||||
ValueError: If the MCP server is not started.
|
||||
|
||||
Returns:
|
||||
The CrewAI tools available from the MCP server.
|
||||
"""
|
||||
if self._tools is None:
|
||||
raise ValueError(
|
||||
"MCP server not started, run `mcp_server.start()` first before accessing `tools`"
|
||||
)
|
||||
|
||||
tools_collection = ToolCollection(self._tools)
|
||||
if self._tool_names:
|
||||
return tools_collection.filter_by_names(self._tool_names)
|
||||
return tools_collection
|
||||
|
||||
def __enter__(self):
|
||||
"""Enter the context manager. Note that `__init__()` already starts the MCP server.
|
||||
So tools should already be available.
|
||||
"""
|
||||
return self.tools
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""Exit the context manager."""
|
||||
return self._adapter.__exit__(exc_type, exc_value, traceback)
|
||||
38
lib/crewai-tools/src/crewai_tools/adapters/rag_adapter.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from typing import Any
|
||||
|
||||
from crewai_tools.rag.core import RAG
|
||||
from crewai_tools.tools.rag.rag_tool import Adapter
|
||||
|
||||
|
||||
class RAGAdapter(Adapter):
|
||||
def __init__(
|
||||
self,
|
||||
collection_name: str = "crewai_knowledge_base",
|
||||
persist_directory: str | None = None,
|
||||
embedding_model: str = "text-embedding-3-small",
|
||||
top_k: int = 5,
|
||||
embedding_api_key: str | None = None,
|
||||
**embedding_kwargs,
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
# Prepare embedding configuration
|
||||
embedding_config = {"api_key": embedding_api_key, **embedding_kwargs}
|
||||
|
||||
self._adapter = RAG(
|
||||
collection_name=collection_name,
|
||||
persist_directory=persist_directory,
|
||||
embedding_model=embedding_model,
|
||||
top_k=top_k,
|
||||
embedding_config=embedding_config,
|
||||
)
|
||||
|
||||
def query(self, question: str) -> str:
|
||||
return self._adapter.query(question)
|
||||
|
||||
def add(
|
||||
self,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
self._adapter.add(*args, **kwargs)
|
||||
@@ -0,0 +1,77 @@
|
||||
from collections.abc import Callable
|
||||
from typing import Generic, TypeVar
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
T = TypeVar("T", bound=BaseTool)
|
||||
|
||||
|
||||
class ToolCollection(list, Generic[T]):
|
||||
"""A collection of tools that can be accessed by index or name.
|
||||
|
||||
This class extends the built-in list to provide dictionary-like
|
||||
access to tools based on their name property.
|
||||
|
||||
Usage:
|
||||
tools = ToolCollection(list_of_tools)
|
||||
# Access by index (regular list behavior)
|
||||
first_tool = tools[0]
|
||||
# Access by name (new functionality)
|
||||
search_tool = tools["search"]
|
||||
"""
|
||||
|
||||
def __init__(self, tools: list[T] | None = None):
|
||||
super().__init__(tools or [])
|
||||
self._name_cache: dict[str, T] = {}
|
||||
self._build_name_cache()
|
||||
|
||||
def _build_name_cache(self) -> None:
|
||||
self._name_cache = {tool.name.lower(): tool for tool in self}
|
||||
|
||||
def __getitem__(self, key: int | str) -> T:
|
||||
if isinstance(key, str):
|
||||
return self._name_cache[key.lower()]
|
||||
return super().__getitem__(key)
|
||||
|
||||
def append(self, tool: T) -> None:
|
||||
super().append(tool)
|
||||
self._name_cache[tool.name.lower()] = tool
|
||||
|
||||
def extend(self, tools: list[T]) -> None:
|
||||
super().extend(tools)
|
||||
self._build_name_cache()
|
||||
|
||||
def insert(self, index: int, tool: T) -> None:
|
||||
super().insert(index, tool)
|
||||
self._name_cache[tool.name.lower()] = tool
|
||||
|
||||
def remove(self, tool: T) -> None:
|
||||
super().remove(tool)
|
||||
if tool.name.lower() in self._name_cache:
|
||||
del self._name_cache[tool.name.lower()]
|
||||
|
||||
def pop(self, index: int = -1) -> T:
|
||||
tool = super().pop(index)
|
||||
if tool.name.lower() in self._name_cache:
|
||||
del self._name_cache[tool.name.lower()]
|
||||
return tool
|
||||
|
||||
def filter_by_names(self, names: list[str] | None = None) -> "ToolCollection[T]":
|
||||
if names is None:
|
||||
return self
|
||||
|
||||
return ToolCollection(
|
||||
[
|
||||
tool
|
||||
for name in names
|
||||
if (tool := self._name_cache.get(name.lower())) is not None
|
||||
]
|
||||
)
|
||||
|
||||
def filter_where(self, func: Callable[[T], bool]) -> "ToolCollection[T]":
|
||||
return ToolCollection([tool for tool in self if func(tool)])
|
||||
|
||||
def clear(self) -> None:
|
||||
super().clear()
|
||||
self._name_cache.clear()
|
||||