mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-05-08 02:29:00 +00:00
Compare commits
52 Commits
refactor/e
...
docs/oss-u
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
910f051eb2 | ||
|
|
0844ed3c4e | ||
|
|
874c34f1da | ||
|
|
853b15fb3d | ||
|
|
c67f6f63dc | ||
|
|
964066e86b | ||
|
|
74a1ff8db5 | ||
|
|
d73924d23a | ||
|
|
d6f7e7d5f8 | ||
|
|
d165bcb65f | ||
|
|
fa6287327d | ||
|
|
e961a005cb | ||
|
|
93e786d263 | ||
|
|
ec8a522c2c | ||
|
|
e25f6538a8 | ||
|
|
470d4035db | ||
|
|
57d1b338f7 | ||
|
|
01df19b029 | ||
|
|
dca2c3160f | ||
|
|
6494d68ffc | ||
|
|
f579aa53ae | ||
|
|
a23e118b11 | ||
|
|
095f796922 | ||
|
|
bfbdba426f | ||
|
|
a058a3b15b | ||
|
|
184c228ae9 | ||
|
|
c9100cb51d | ||
|
|
17e82743f6 | ||
|
|
3403f3cba9 | ||
|
|
5db72250b2 | ||
|
|
a071838e92 | ||
|
|
cd2b9ee38a | ||
|
|
07c4a30f2e | ||
|
|
b30fdbaa0e | ||
|
|
898f860916 | ||
|
|
2c0323c3fe | ||
|
|
c580d428f0 | ||
|
|
70f391994e | ||
|
|
864f0a8a91 | ||
|
|
9f13235037 | ||
|
|
c7f01048b7 | ||
|
|
14c3963d2c | ||
|
|
feb2e715a3 | ||
|
|
e0b86750c2 | ||
|
|
2a40316521 | ||
|
|
e2deac5575 | ||
|
|
e1b53f684a | ||
|
|
4b49fc9ac6 | ||
|
|
07667829e9 | ||
|
|
0154d16fd8 | ||
|
|
4c74dc0f86 | ||
|
|
13e0e9be6b |
5
.github/security.md
vendored
5
.github/security.md
vendored
@@ -5,7 +5,10 @@ CrewAI ecosystem.
|
||||
|
||||
### How to Report
|
||||
|
||||
Please submit reports to **crewai-vdp-ess@submit.bugcrowd.com**
|
||||
Please submit reports through one of the following channels:
|
||||
|
||||
- **crewai-vdp-ess@submit.bugcrowd.com**
|
||||
- https://security.crewai.com
|
||||
|
||||
- **Please do not** disclose vulnerabilities via public GitHub issues, pull requests,
|
||||
or social media
|
||||
|
||||
55
.github/workflows/nightly.yml
vendored
55
.github/workflows/nightly.yml
vendored
@@ -5,6 +5,10 @@ on:
|
||||
- cron: '0 6 * * *' # daily at 6am UTC
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: nightly-publish
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check for new commits
|
||||
@@ -18,10 +22,11 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check for commits in last 24h
|
||||
- name: Check for recent commits
|
||||
id: check
|
||||
run: |
|
||||
RECENT=$(git log --since="24 hours ago" --oneline | head -1)
|
||||
# 25h window absorbs cron-vs-commit timing skew at the boundary.
|
||||
RECENT=$(git log --since="25 hours ago" --oneline | head -1)
|
||||
if [ -n "$RECENT" ]; then
|
||||
echo "has_changes=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
@@ -38,34 +43,42 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.11.3"
|
||||
python-version: "3.12"
|
||||
enable-cache: false
|
||||
|
||||
- name: Stamp nightly versions
|
||||
run: |
|
||||
DATE=$(date +%Y%m%d)
|
||||
|
||||
# All workspace packages share the same base version and are released together.
|
||||
BASE=$(python -c "
|
||||
import re
|
||||
print(re.search(r'__version__\s*=\s*\"(.*?)\"', open('lib/crewai/src/crewai/__init__.py').read()).group(1))
|
||||
")
|
||||
NIGHTLY="${BASE}.dev${DATE}"
|
||||
echo "Nightly version: ${NIGHTLY}"
|
||||
|
||||
for init_file in \
|
||||
lib/crewai/src/crewai/__init__.py \
|
||||
lib/crewai-core/src/crewai_core/__init__.py \
|
||||
lib/crewai-tools/src/crewai_tools/__init__.py \
|
||||
lib/crewai-files/src/crewai_files/__init__.py; do
|
||||
CURRENT=$(python -c "
|
||||
import re
|
||||
text = open('$init_file').read()
|
||||
print(re.search(r'__version__\s*=\s*\"(.*?)\"\s*$', text, re.MULTILINE).group(1))
|
||||
")
|
||||
NIGHTLY="${CURRENT}.dev${DATE}"
|
||||
lib/crewai-files/src/crewai_files/__init__.py \
|
||||
lib/cli/src/crewai_cli/__init__.py; do
|
||||
sed -i "s/__version__ = .*/__version__ = \"${NIGHTLY}\"/" "$init_file"
|
||||
echo "$init_file: $CURRENT -> $NIGHTLY"
|
||||
echo "Stamped $init_file -> $NIGHTLY"
|
||||
done
|
||||
|
||||
# Update cross-package dependency pins to nightly versions
|
||||
sed -i "s/\"crewai-tools==[^\"]*\"/\"crewai-tools==${NIGHTLY}\"/" lib/crewai/pyproject.toml
|
||||
# Update all cross-package dependency pins to the nightly version.
|
||||
sed -i "s/\"crewai==[^\"]*\"/\"crewai==${NIGHTLY}\"/" lib/crewai-tools/pyproject.toml
|
||||
sed -i "s/\"crewai-core==[^\"]*\"/\"crewai-core==${NIGHTLY}\"/" lib/crewai/pyproject.toml
|
||||
sed -i "s/\"crewai-cli==[^\"]*\"/\"crewai-cli==${NIGHTLY}\"/" lib/crewai/pyproject.toml
|
||||
sed -i "s/\"crewai-tools==[^\"]*\"/\"crewai-tools==${NIGHTLY}\"/" lib/crewai/pyproject.toml
|
||||
sed -i "s/\"crewai-files==[^\"]*\"/\"crewai-files==${NIGHTLY}\"/" lib/crewai/pyproject.toml
|
||||
sed -i "s/\"crewai-core==[^\"]*\"/\"crewai-core==${NIGHTLY}\"/" lib/cli/pyproject.toml
|
||||
echo "Updated cross-package dependency pins to ${NIGHTLY}"
|
||||
|
||||
- name: Build packages
|
||||
@@ -85,13 +98,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/crewai
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
@@ -116,7 +126,8 @@ jobs:
|
||||
continue
|
||||
fi
|
||||
echo "Publishing $package"
|
||||
if ! uv publish "$package"; then
|
||||
# --check-url skips files already on PyPI so manual re-runs on the same day are idempotent.
|
||||
if ! uv publish --check-url https://pypi.org/simple/ "$package"; then
|
||||
echo "Failed to publish $package"
|
||||
failed=1
|
||||
fi
|
||||
|
||||
@@ -19,7 +19,7 @@ repos:
|
||||
language: system
|
||||
pass_filenames: true
|
||||
types: [python]
|
||||
exclude: ^(lib/crewai/src/crewai/cli/templates/|lib/crewai/tests/|lib/crewai-tools/tests/|lib/crewai-files/tests/)
|
||||
exclude: ^(lib/crewai/src/crewai/cli/templates/|lib/cli/src/crewai_cli/templates/|lib/cli/tests/|lib/crewai/tests/|lib/crewai-tools/tests/|lib/crewai-files/tests/|lib/devtools/tests/)
|
||||
- repo: https://github.com/astral-sh/uv-pre-commit
|
||||
rev: 0.11.3
|
||||
hooks:
|
||||
|
||||
@@ -54,12 +54,13 @@ _original_from_serialized_response = getattr(
|
||||
)
|
||||
|
||||
if _original_from_serialized_response is not None:
|
||||
_from_serialized: Any = _original_from_serialized_response
|
||||
|
||||
def _patched_from_serialized_response(
|
||||
request: Any, serialized_response: Any, history: Any = None
|
||||
) -> Any:
|
||||
"""Patched version that ensures response._content is properly set."""
|
||||
response = _original_from_serialized_response(request, serialized_response, history)
|
||||
response = _from_serialized(request, serialized_response, history)
|
||||
# Explicitly set _content to avoid ResponseNotRead errors
|
||||
# The content was passed to the constructor but the mocked read() prevents
|
||||
# proper initialization of the internal state
|
||||
@@ -255,7 +256,8 @@ def vcr_cassette_dir(request: Any) -> str:
|
||||
|
||||
for parent in test_file.parents:
|
||||
if (
|
||||
parent.name in ("crewai", "crewai-tools", "crewai-files")
|
||||
parent.name
|
||||
in ("crewai", "crewai-tools", "crewai-files", "cli", "crewai-core")
|
||||
and parent.parent.name == "lib"
|
||||
):
|
||||
package_root = parent
|
||||
|
||||
@@ -26,7 +26,7 @@ mode: "wide"
|
||||
</Step>
|
||||
|
||||
<Step title="مراقبة التقدم">
|
||||
استخدم `GET /{kickoff_id}/status` للتحقق من حالة التنفيذ واسترجاع النتائج.
|
||||
استخدم `GET /status/{kickoff_id}` للتحقق من حالة التنفيذ واسترجاع النتائج.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
@@ -65,7 +65,7 @@ https://your-crew-name.crewai.com
|
||||
|
||||
1. **الاكتشاف**: استدعِ `GET /inputs` لفهم ما يحتاجه طاقمك
|
||||
2. **التنفيذ**: أرسل المدخلات عبر `POST /kickoff` لبدء المعالجة
|
||||
3. **المراقبة**: استعلم عن `GET /{kickoff_id}/status` حتى الاكتمال
|
||||
3. **المراقبة**: استعلم عن `GET /status/{kickoff_id}` حتى الاكتمال
|
||||
4. **النتائج**: استخرج المخرجات النهائية من الاستجابة المكتملة
|
||||
|
||||
## معالجة الأخطاء
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "GET /{kickoff_id}/status"
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "الحصول على حالة التنفيذ"
|
||||
openapi: "/enterprise-api.en.yaml GET /{kickoff_id}/status"
|
||||
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
@@ -4,6 +4,152 @@ description: "تحديثات المنتج والتحسينات وإصلاحات
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="7 مايو 2026">
|
||||
## v1.14.5a3
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a3)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح مسار نقطة النهاية للحالة من /{kickoff_id}/status إلى /status/{kickoff_id}
|
||||
- تحديث تبعية gitpython إلى الإصدار >=3.1.47 للامتثال الأمني
|
||||
|
||||
### إعادة هيكلة
|
||||
- استخراج واجهة سطر الأوامر إلى حزمة crewai-cli المستقلة
|
||||
|
||||
### الوثائق
|
||||
- تحديث سجل التغييرات والإصدار للإصدار v1.14.5a2
|
||||
|
||||
## المساهمون
|
||||
|
||||
@greysonlalonde, @iris-clawd
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="4 مايو 2026">
|
||||
## v1.14.5a2
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a2)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح استعادة مخرجات المهام في كتلة finally
|
||||
- تضمين `thoughts_token_count` في رموز الإكمال
|
||||
- الحفاظ على مخرجات المهام عبر تفريغ دفعات غير متزامنة
|
||||
- تمرير kwargs إلى استدعاءات المحمل في `CrewAIRagAdapter`
|
||||
- منع `result_as_answer` من إرجاع رسالة كتلة الخطاف كإجابة نهائية
|
||||
- منع `result_as_answer` من إرجاع خطأ كإجابة نهائية
|
||||
- استخدام `acall` لتحويل المخرجات في المسارات غير المتزامنة
|
||||
- منع تغيير كلمات التوقف المشتركة في LLM عبر الوكلاء
|
||||
- التعامل مع مدخلات `BaseModel` في `convert_to_model`
|
||||
|
||||
### الوثائق
|
||||
- توثيق متغيرات البيئة الإضافية
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.14.5a1
|
||||
|
||||
## المساهمون
|
||||
|
||||
@NIK-TIGER-BILL, @greysonlalonde, @lorenzejay, @minasami-pr, @theCyberTech, @wishhyt
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="1 مايو 2026">
|
||||
## v1.14.5a1
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a1)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الميزات
|
||||
- إضافة معلمة بدء `restore_from_state_id`
|
||||
- إضافة تسليط الضوء على ExaSearchTool وإعادة تسميته من EXASearchTool
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح المواقع المفقودة لـ crewai في تدفق الإصدار
|
||||
- ضمان تحميل أحداث المهارات للآثار
|
||||
|
||||
### الوثائق
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.14.4
|
||||
|
||||
## المساهمون
|
||||
|
||||
@akaKuruma, @github-actions[bot], @greysonlalonde, @lorenzejay, @theishangoswami
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="1 مايو 2026">
|
||||
## v1.14.4
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.4)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الميزات
|
||||
- إضافة دعم لمفتاح الاستمرارية المخصص في @persist
|
||||
- إضافة دعم واجهة برمجة التطبيقات للردود لمزود Azure OpenAI
|
||||
- تمرير credential_scopes إلى عميل Azure AI Inference
|
||||
- إضافة دليل إعداد هوية عبء العمل لـ Vertex AI
|
||||
- إضافة Tavily Research والحصول على Research
|
||||
- إضافة أدوات MCP من You.com للبحث، البحث، واستخراج المحتوى
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح مشكلة السقوط عند عدم تطابق تعبير JSON regex مع JSON صالح
|
||||
- إصلاح للحفاظ على tool_calls عندما تحتوي الاستجابة أيضًا على نص
|
||||
- إصلاح لتمرير base_url و api_key إلى instructor.from_provider
|
||||
- إصلاح لتحذير وإرجاع فارغ عندما لا يُرجع خادم MCP الأصلي أي أدوات
|
||||
- إصلاح لاستخدام متغير الرسائل الموثقة في معالجات غير البث
|
||||
- إصلاح لحماية مساعدي وصف دردشة الطاقم ضد فشل LLM
|
||||
- إصلاح لإعادة تعيين الرسائل والتكرارات بين الاستدعاءات
|
||||
- إصلاح لتمرير ملف trained-agents من خلال replay و test
|
||||
- إصلاح لاحترام ملف trained-agents المخصص في الاستدلال
|
||||
- إصلاح لربط الوكلاء المخصصين بالمهام فقط بالطاقم لملفات الإدخال متعددة الأنماط
|
||||
- إصلاح لتسلسل callable الحواجز كـ null لتسجيل JSON
|
||||
- إصلاح إعادة تسمية force_final_answer لتجنب توجيه ذاتي
|
||||
- إصلاح زيادة litellm لإصلاح SSTI؛ تجاهل CVE غير القابل للإصلاح في pip
|
||||
|
||||
### الوثائق
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.14.4a1
|
||||
- إضافة صفحة أدوات E2B Sandbox
|
||||
- إضافة وثائق أدوات صندوق Daytona
|
||||
|
||||
## المساهمون
|
||||
|
||||
@EdwardIrby, @dependabot[bot], @factory-droid-oss, @factory-droid[bot], @greysonlalonde, @kunalk16, @lorenzejay, @lucasgomide, @manisrinivasan2k1, @mattatcha, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="29 أبريل 2026">
|
||||
## v1.14.4a1
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.4a1)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح مساعدي وصف دردشة الطاقم ضد فشل LLM.
|
||||
- إعادة تعيين الرسائل والتكرارات بين الاستدعاءات في المنفذ.
|
||||
- تمرير ملف الوكلاء المدربين عبر إعادة التشغيل والاختبار في CLI.
|
||||
- احترام ملف الوكلاء المدربين المخصص أثناء الاستدلال في الوكيل.
|
||||
- ربط الوكلاء المخصصين بالمهام فقط بالطاقم لضمان وصول ملفات الإدخال متعددة الوسائط إلى LLM.
|
||||
- تسلسل استدعاءات الحواجز كـ null لتسجيل النقاط في JSON.
|
||||
- إعادة تسمية `force_final_answer` في agent_executor لتجنب جهاز التوجيه الذاتي الإشارة.
|
||||
- تحديث `litellm` لإصلاح SSTI وتجاهل CVE pip غير القابل للإصلاح.
|
||||
|
||||
### الوثائق
|
||||
- إضافة صفحة أدوات Sandbox E2B.
|
||||
- إضافة وثائق أدوات Sandbox Daytona.
|
||||
- إضافة دليل إعداد هوية عبء العمل لـ Vertex AI.
|
||||
- إضافة أدوات MCP من You.com للبحث، البحث، واستخراج المحتوى.
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.14.3.
|
||||
|
||||
## المساهمون
|
||||
|
||||
@EdwardIrby, @dependabot[bot], @factory-droid-oss, @factory-droid[bot], @greysonlalonde, @lorenzejay, @manisrinivasan2k1, @mattatcha
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="25 أبريل 2026">
|
||||
## v1.14.3
|
||||
|
||||
|
||||
@@ -380,6 +380,42 @@ class AnotherFlow(Flow[dict]):
|
||||
print("Method-level persisted runs:", self.state["runs"])
|
||||
```
|
||||
|
||||
### تفرع الحالة المستمرة
|
||||
|
||||
يدعم `@persist` نمطين متميزين للترطيب في `kickoff` / `kickoff_async`:
|
||||
|
||||
- `kickoff(inputs={"id": <uuid>})` — **استئناف**: يحمّل أحدث لقطة لـ UUID المقدم ويستمر في الكتابة تحت نفس `flow_uuid`. يمتد التاريخ.
|
||||
- `kickoff(restore_from_state_id=<uuid>)` — **تفرع**: يحمّل أحدث لقطة لـ UUID المقدم، يرطّب حالة التشغيل الجديد منها، ثم يعيّن `state.id` جديدًا (مولّدًا تلقائيًا، أو `inputs["id"]` إذا تم تثبيته). تذهب كتابات `@persist` للتشغيل الجديد تحت `state.id` الجديد؛ يتم الحفاظ على تاريخ تدفق المصدر.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.flow.persistence import persist
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CounterState(BaseModel):
|
||||
id: str = ""
|
||||
counter: int = 0
|
||||
|
||||
@persist
|
||||
class CounterFlow(Flow[CounterState]):
|
||||
@start()
|
||||
def step(self):
|
||||
self.state.counter += 1
|
||||
print(f"[id={self.state.id}] counter={self.state.counter}")
|
||||
|
||||
# التشغيل 1: حالة جديدة، العداد 0 -> 1، محفوظ تحت flow_1.state.id
|
||||
flow_1 = CounterFlow()
|
||||
flow_1.kickoff()
|
||||
|
||||
# التفرع: ترطيب من أحدث لقطة لـ flow_1، لكن باستخدام state.id جديد
|
||||
flow_2 = CounterFlow()
|
||||
flow_2.kickoff(restore_from_state_id=flow_1.state.id)
|
||||
# يبدأ flow_2.state.counter بـ 1 (مرطّب)، ثم تزيده step() إلى 2.
|
||||
# flow_2.state.id != flow_1.state.id؛ تاريخ flow_1 لم يتغيّر.
|
||||
```
|
||||
|
||||
إذا لم يطابق `restore_from_state_id` المقدم أي حالة مستمرة، يعود kickoff بصمت إلى السلوك الافتراضي — نفس سلوك `inputs["id"]` عند عدم العثور عليه. الجمع بين `restore_from_state_id` و `from_checkpoint` يطلق `ValueError`؛ اختر مصدر ترطيب واحدًا. تثبيت `inputs["id"]` أثناء التفرع يشارك مفتاح الاستمرارية مع تدفق آخر — عادةً ما تريد استخدام `restore_from_state_id` فقط.
|
||||
|
||||
### كيف تعمل
|
||||
|
||||
1. **تعريف الحالة الفريد**
|
||||
|
||||
@@ -146,6 +146,14 @@ class ProductionFlow(Flow[AppState]):
|
||||
# ...
|
||||
```
|
||||
|
||||
افتراضيًا، يستأنف `@persist` تدفقًا عند توفير `kickoff(inputs={"id": <uuid>})`، مما يمدّ نفس تاريخ `flow_uuid`. لـ **تفرع** تدفق مستمر إلى نسبٍ جديد — ترطيب الحالة من تشغيل سابق ولكن الكتابة تحت `state.id` جديد — مرّر `restore_from_state_id`:
|
||||
|
||||
```python
|
||||
flow.kickoff(restore_from_state_id="<previous-run-state-id>")
|
||||
```
|
||||
|
||||
يحصل التشغيل الجديد على `state.id` جديد (مولّد تلقائيًا، أو `inputs["id"]` إذا تم تثبيته) لذا لا تمتد كتابات `@persist` الخاصة به إلى تاريخ المصدر. الجمع مع `from_checkpoint` يطلق `ValueError`؛ اختر مصدر ترطيب واحدًا.
|
||||
|
||||
## الخلاصة
|
||||
|
||||
- **ابدأ بتدفق.**
|
||||
|
||||
@@ -133,7 +133,7 @@ crew.kickoff()
|
||||
| **DirectorySearchTool** | أداة RAG للبحث في المجلدات، مفيدة للتنقل في أنظمة الملفات. |
|
||||
| **DOCXSearchTool** | أداة RAG للبحث في مستندات DOCX، مثالية لمعالجة ملفات Word. |
|
||||
| **DirectoryReadTool** | تسهّل قراءة ومعالجة هياكل المجلدات ومحتوياتها. |
|
||||
| **EXASearchTool** | أداة مصممة لإجراء عمليات بحث شاملة عبر مصادر بيانات متنوعة. |
|
||||
| **ExaSearchTool** | أداة مصممة لإجراء عمليات بحث شاملة عبر مصادر بيانات متنوعة. |
|
||||
| **FileReadTool** | تُمكّن قراءة واستخراج البيانات من الملفات، مع دعم تنسيقات ملفات متنوعة. |
|
||||
| **FirecrawlSearchTool** | أداة للبحث في صفحات الويب باستخدام Firecrawl وإرجاع النتائج. |
|
||||
| **FirecrawlCrawlWebsiteTool** | أداة لزحف صفحات الويب باستخدام Firecrawl. |
|
||||
|
||||
@@ -116,6 +116,48 @@ class PersistentCounterFlow(Flow[CounterState]):
|
||||
return self.state.value
|
||||
```
|
||||
|
||||
#### تفرع الحالة المستمرة
|
||||
|
||||
يدعم `@persist` نمطين متميزين للترطيب في `kickoff` / `kickoff_async`. استخدم **استئناف** (`inputs["id"]`) لمواصلة نفس النسب؛ استخدم **تفرع** (`restore_from_state_id`) لبدء نسبٍ جديد من لقطة:
|
||||
|
||||
| | `state.id` بعد kickoff | كتابات `@persist` تذهب إلى |
|
||||
|---|---|---|
|
||||
| `inputs["id"]` (استئناف) | المعرّف المقدم | المعرّف المقدم (يمد التاريخ) |
|
||||
| `restore_from_state_id` (تفرع) | معرّف جديد، أو `inputs["id"]` إذا ثُبّت | المعرّف الجديد (المصدر محفوظ) |
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.flow.persistence import persist
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CounterState(BaseModel):
|
||||
id: str = ""
|
||||
counter: int = 0
|
||||
|
||||
@persist
|
||||
class CounterFlow(Flow[CounterState]):
|
||||
@start()
|
||||
def step(self):
|
||||
self.state.counter += 1
|
||||
|
||||
# التشغيل 1: حالة جديدة، العداد 0 -> 1
|
||||
flow_1 = CounterFlow()
|
||||
flow_1.kickoff()
|
||||
|
||||
# التفرع: الترطيب من أحدث لقطة لـ flow_1، لكن الكتابة تحت state.id جديد
|
||||
flow_2 = CounterFlow()
|
||||
flow_2.kickoff(restore_from_state_id=flow_1.state.id)
|
||||
# يبدأ flow_2 بـ counter=1 (مرطّب)، ثم تزيده step() إلى 2.
|
||||
# تاريخ flow_uuid لـ flow_1 لم يتغيّر.
|
||||
```
|
||||
|
||||
ملاحظات السلوك:
|
||||
|
||||
- `restore_from_state_id` غير موجود في الاستمرارية → يعود kickoff بصمت إلى السلوك الافتراضي (يعكس سلوك `inputs["id"]` عند عدم العثور عليه). لا يُطلق أي استثناء.
|
||||
- الجمع بين `restore_from_state_id` و `from_checkpoint` يطلق `ValueError` — يستهدفان نظامي حالة مختلفين (`@persist` مقابل Checkpointing) ولا يمكن الجمع بينهما.
|
||||
- `restore_from_state_id=None` (افتراضي) متطابق بايت ببايت مع kickoff بدون المعامل.
|
||||
- تثبيت `inputs["id"]` أثناء التفرع يعني أن التشغيل الجديد يشارك مفتاح الاستمرارية مع تدفق آخر — عادةً ما تريد فقط `restore_from_state_id`.
|
||||
|
||||
## أنماط حالة متقدمة
|
||||
|
||||
### المنطق الشرطي المبني على الحالة
|
||||
|
||||
359
docs/ar/guides/migration/upgrading-crewai.mdx
Normal file
359
docs/ar/guides/migration/upgrading-crewai.mdx
Normal file
@@ -0,0 +1,359 @@
|
||||
---
|
||||
title: "ترقية وترحيل CrewAI"
|
||||
description: "كيفية ترقية CrewAI والتعامل مع التغييرات الجذرية وترحيل Crews إلى Flows."
|
||||
icon: "arrow-up-circle"
|
||||
---
|
||||
|
||||
## نظرة عامة
|
||||
|
||||
يتطور CrewAI بسرعة. الإصدارات الجديدة تقوم بانتظام بضبط مسارات الاستيراد، وتغيير الإعدادات الافتراضية لـ `Agent` و`Crew` و`Task`، وإدخال أساسيات تنسيق جديدة مثل `Flow` ونقاط الحفظ (checkpointing). يجمع هذا الدليل الخطوات العملية اللازمة من أجل:
|
||||
|
||||
- ترقية أداة سطر الأوامر العامة `crewai` والاعتمادية المثبّتة في مشروعك
|
||||
- التكيّف مع التغييرات الجذرية في الاستيرادات والمعاملات
|
||||
- ترحيل `Crew` مستقلة إلى `Flow` مكتوبة الأنواع
|
||||
- تجنّب الفخاخ التي تظهر في أول تشغيل لمشروع مُرقَّى
|
||||
|
||||
إذا كنت تبدأ من الصفر، راجع [التثبيت](/ar/installation). إذا كنت قادمًا من إطار عمل آخر، راجع [الترحيل من LangGraph](/ar/guides/migration/migrating-from-langgraph).
|
||||
|
||||
---
|
||||
|
||||
## الشيئان اللذان قد ترغب في ترقيتهما
|
||||
|
||||
يوجد CrewAI في مكانين على جهازك، ويتم ترقيتهما بشكل مستقل:
|
||||
|
||||
| ماذا | كيف يُثبَّت | كيف تتم الترقية |
|
||||
|---|---|---|
|
||||
| **أداة سطر الأوامر العامة `crewai`** | `uv tool install crewai` | `uv tool install crewai --upgrade` |
|
||||
| **بيئة venv للمشروع** (حيث يعمل الكود) | `crewai install` / `uv sync` | `uv add "crewai[...]>=X.Y.Z"` ثم `crewai install` |
|
||||
|
||||
يمكن لهما — وغالبًا ما يحدث — أن يخرجا عن التزامن. تشغيل `crewai --version` يُظهر إصدار سطر الأوامر. تشغيل `uv pip show crewai` داخل مشروعك يُظهر إصدار venv. إذا اختلفا، فهذا طبيعي؛ ما يهم بالنسبة للكود قيد التشغيل هو إصدار venv.
|
||||
|
||||
## لماذا لا يقوم `crewai install` وحده بالترقية
|
||||
|
||||
`crewai install` هو غلاف رفيع حول `uv sync`. يُثبّت بالضبط ما يقوله ملف `uv.lock` الحالي — وهو **لا** يرفع أي قيود إصدار.
|
||||
|
||||
إذا كان `pyproject.toml` يقول `crewai>=1.11.1` وقد قام ملف القفل بحلّه إلى `1.11.1`، فإن تشغيل `crewai install` سيُبقيك على `1.11.1` للأبد، حتى وإن كان الإصدار `1.14.4` متاحًا.
|
||||
|
||||
للترقية فعلًا، عليك:
|
||||
|
||||
1. تحديث قيد الإصدار في `pyproject.toml`
|
||||
2. إعادة حلّ ملف القفل
|
||||
3. مزامنة venv
|
||||
|
||||
`uv add` يقوم بالثلاثة في خطوة واحدة.
|
||||
|
||||
## كيفية ترقية مشروعك
|
||||
|
||||
```bash
|
||||
# يرفع القيد ويعيد القفل في أمر واحد
|
||||
uv add "crewai[tools]>=1.14.4"
|
||||
|
||||
# يزامن venv (crewai install يستدعي uv sync تحت الغطاء)
|
||||
crewai install
|
||||
|
||||
# تحقّق
|
||||
uv pip show crewai
|
||||
# → Version: 1.14.4
|
||||
```
|
||||
|
||||
استبدل `[tools]` بأي إضافات يستخدمها مشروعك (مثلًا `[tools,anthropic]`). تحقّق من قائمة `dependencies` في `pyproject.toml` إن لم تكن متأكدًا.
|
||||
|
||||
<Note>
|
||||
يحدّث `uv add` كلا من `pyproject.toml` **و**`uv.lock` بشكل ذرّي. إذا قمت بتحرير `pyproject.toml` يدويًا، فإنك لا تزال بحاجة إلى تشغيل `uv lock --upgrade-package crewai` لإعادة حلّ ملف القفل قبل أن يلتقط `crewai install` الإصدار الجديد.
|
||||
</Note>
|
||||
|
||||
## ترقية أداة سطر الأوامر العامة
|
||||
|
||||
أداة سطر الأوامر العامة منفصلة عن مشروعك. قم بترقيتها عبر:
|
||||
|
||||
```bash
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
|
||||
إذا حذّرك الـ shell بشأن `PATH` بعد الترقية، قم بتحديثه:
|
||||
|
||||
```bash
|
||||
uv tool update-shell
|
||||
```
|
||||
|
||||
هذا **لا** يمسّ بيئة venv الخاصة بمشروعك — لا تزال بحاجة إلى `uv add` + `crewai install` داخل المشروع.
|
||||
|
||||
## التحقق من تزامن الاثنين
|
||||
|
||||
```bash
|
||||
# إصدار سطر الأوامر العام
|
||||
crewai --version
|
||||
|
||||
# إصدار venv للمشروع
|
||||
uv pip show crewai | grep Version
|
||||
```
|
||||
|
||||
ليس من الضروري أن يتطابقا — لكن إصدار venv للمشروع هو ما يهم لسلوك التشغيل.
|
||||
|
||||
<Note>
|
||||
يتطلب CrewAI `Python >=3.10, <3.14`. إذا كان `uv` مثبَّتًا مقابل مفسّر أقدم، فأعد إنشاء venv للمشروع باستخدام إصدار Python مدعوم قبل تشغيل `crewai install`.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## التغييرات الجذرية وملاحظات الترحيل
|
||||
|
||||
تتطلب معظم الترقيات تعديلات صغيرة فقط. المناطق أدناه هي تلك التي تنكسر بصمت أو بتتبعات مكدّس مربكة.
|
||||
|
||||
### مسارات الاستيراد: tools و`BaseTool`
|
||||
|
||||
الموقع الرسمي لاستيراد الـ tools هو `crewai.tools`. لا تزال المسارات القديمة تظهر في الدروس لكن يجب تحديثها.
|
||||
|
||||
```python
|
||||
# قبل
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.agents.tools import tool
|
||||
|
||||
# بعد
|
||||
from crewai.tools import BaseTool, tool
|
||||
```
|
||||
|
||||
كلٌ من المُزخرف `@tool` والفئة الفرعية `BaseTool` يقعان في `crewai.tools`. `AgentFinish` والرموز الأخرى الداخلية للوكيل لم تعد جزءًا من السطح العام — إذا كنت تستوردها، فانتقل إلى event listeners أو callbacks الـ `Task` بدلًا منها.
|
||||
|
||||
### تغييرات معاملات `Agent`
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Find authoritative sources on {topic}",
|
||||
backstory="You are a careful, source-driven researcher.",
|
||||
llm="gpt-4o-mini", # اسم نموذج كسلسلة نصية أو كائن LLM
|
||||
verbose=True, # bool وليس مستوى عددي صحيح
|
||||
max_iter=15, # تغيّر الافتراضي بين الإصدارات — حدّده بشكل صريح
|
||||
allow_delegation=False,
|
||||
)
|
||||
```
|
||||
|
||||
- يقبل `llm` إما اسم نموذج كسلسلة نصية (يُحلَّ عبر المزوّد المهيّأ) أو كائن `LLM` للتحكم الدقيق.
|
||||
- `verbose` هو `bool` بسيط. تمرير عدد صحيح لم يعد يبدّل مستويات السجل.
|
||||
- تغيّرت افتراضات `max_iter` بين الإصدارات. إذا توقف وكيلك بصمت عن التكرار بعد أول استدعاء tool، فحدّد `max_iter` صراحةً.
|
||||
|
||||
### معاملات `Crew`
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential, # أو Process.hierarchical
|
||||
memory=True,
|
||||
cache=True,
|
||||
embedder={"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
)
|
||||
```
|
||||
|
||||
- يتطلب `process=Process.hierarchical` إما `manager_llm=` أو `manager_agent=`. بدون أحدهما، يرفع kickoff خطأً عند التحقّق.
|
||||
- `memory=True` مع مزوّد embedding غير افتراضي يحتاج إلى قاموس `embedder` — راجع [إعداد الذاكرة وembedder](#memory-embedder-config) أدناه.
|
||||
|
||||
### الإخراج المُهيكل لـ `Task`
|
||||
|
||||
استخدم `output_pydantic` أو `output_json` أو `output_file` لإلزام نتيجة المهمة بشكل مكتوب الأنواع:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from crewai import Task
|
||||
|
||||
class Article(BaseModel):
|
||||
title: str
|
||||
body: str
|
||||
|
||||
write = Task(
|
||||
description="Write an article about {topic}",
|
||||
expected_output="A short article with a title and body",
|
||||
agent=writer,
|
||||
output_pydantic=Article, # الفئة، وليس مثيلًا منها
|
||||
output_file="output/article.md",
|
||||
)
|
||||
```
|
||||
|
||||
`output_pydantic` يأخذ **الفئة** نفسها. تمرير `Article(title="", body="")` خطأ شائع ويفشل بخطأ تحقّق مربك.
|
||||
|
||||
### إعداد الذاكرة وembedder
|
||||
|
||||
إذا كان `memory=True` وأنت لا تستخدم embeddings الافتراضية الخاصة بـ OpenAI، فيجب أن تمرّر `embedder`:
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {"model": "nomic-embed-text"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
ضع بيانات اعتماد المزوّد المعنيّة (`OPENAI_API_KEY`, `OLLAMA_HOST`, إلخ) في ملف `.env`. مسارات تخزين الذاكرة محلية بالنسبة للمشروع افتراضيًا — احذف مجلد ذاكرة المشروع إذا غيّرت embedders، لأن الأبعاد لا تختلط.
|
||||
|
||||
---
|
||||
|
||||
## ترحيل Crew إلى Flow
|
||||
|
||||
`Crew` هي الأساس الصحيح عندما يكون لديك فريق واحد من الوكلاء ينفّذ سير عمل واحدًا. عندما تحتاج إلى تفرّعات أو عدة crews أو حالة مستمرة عبر التشغيلات، انتقل إلى `Flow`.
|
||||
|
||||
### متى تستخدم Flows مقابل Crews مستقلة
|
||||
|
||||
| الحالة | استخدم |
|
||||
| --- | --- |
|
||||
| فريق واحد، سير عمل خطّي/هرمي واحد | `Crew` |
|
||||
| تفرّعات شرطية، إعادات محاولة، توجيه بناءً على النتائج | `Flow` |
|
||||
| عدة crews متخصصة مرتبطة معًا | `Flow` |
|
||||
| حالة يجب أن تستمر بين الخطوات أو التشغيلات | `Flow` (مع checkpointing) |
|
||||
| تريد حالة مكتوبة الأنواع وملائمة لـ IDE | `Flow[MyState]` مع نموذج Pydantic |
|
||||
|
||||
إذا احتجت لأي من: التفرّعات، تعدّد الـ crew، أو الحالة المستمرة — ابدأ بـ `Flow`. الكود المتكرر صغير ولن تضطر إلى إعادة الكتابة لاحقًا.
|
||||
|
||||
### الترحيل خطوة بخطوة
|
||||
|
||||
**قبل — crew مستقلة:**
|
||||
|
||||
```python
|
||||
from crewai import Crew
|
||||
|
||||
crew = Crew(agents=[researcher, writer], tasks=[research_task, write_task])
|
||||
result = crew.kickoff(inputs={"topic": "vector databases"})
|
||||
print(result)
|
||||
```
|
||||
|
||||
**بعد — crew داخل Flow مكتوب الأنواع:**
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from pydantic import BaseModel
|
||||
|
||||
class MyState(BaseModel):
|
||||
input_data: str = ""
|
||||
result: str = ""
|
||||
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def run_crew(self):
|
||||
result = MyCrew().crew().kickoff(inputs={"topic": self.state.input_data})
|
||||
self.state.result = str(result)
|
||||
return self.state.result
|
||||
|
||||
flow = MyFlow()
|
||||
flow.kickoff(inputs={"input_data": "vector databases"})
|
||||
```
|
||||
|
||||
ما الذي تغيّر:
|
||||
|
||||
1. تُبنى الـ crew داخل دالة، وليس عند تحميل الموديول.
|
||||
2. تنساب المُدخلات عبر `self.state` بدلًا من تمريرها كـ kwargs.
|
||||
3. تُحدَّد نقطة الدخول بـ `@start()`. الخطوات اللاحقة تستخدم `@listen(run_crew)` للربط.
|
||||
|
||||
### إعداد الحالة المُهيكلة
|
||||
|
||||
فضّل الحالة المكتوبة الأنواع (`Flow[MyState]`) على المتغير القاموسي غير المكتوب. تحصل على إكمال تلقائي، تحقّق عند الحدود، وحالة قابلة للسلسلة من أجل checkpointing:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class ResearchState(BaseModel):
|
||||
topic: str = ""
|
||||
sources: list[str] = Field(default_factory=list)
|
||||
draft: str = ""
|
||||
final: str = ""
|
||||
```
|
||||
|
||||
الحالة غير المكتوبة (`Flow()` بدون نوع عام) لا تزال تعمل، لكنك تخسر الفحوص الساكنة ودقّة الـ checkpointing.
|
||||
|
||||
### نمط Flow متعدد الـ crews
|
||||
|
||||
ربط اثنين من الـ crews — بحث ثم كتابة — هو السبب الكلاسيكي لاعتماد Flows:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen, router
|
||||
from pydantic import BaseModel
|
||||
|
||||
class PipelineState(BaseModel):
|
||||
topic: str = ""
|
||||
research: str = ""
|
||||
article: str = ""
|
||||
|
||||
class ContentPipeline(Flow[PipelineState]):
|
||||
@start()
|
||||
def research(self):
|
||||
out = ResearchCrew().crew().kickoff(inputs={"topic": self.state.topic})
|
||||
self.state.research = str(out)
|
||||
return self.state.research
|
||||
|
||||
@router(research)
|
||||
def gate(self):
|
||||
return "write" if len(self.state.research) > 200 else "abort"
|
||||
|
||||
@listen("write")
|
||||
def write(self):
|
||||
out = WritingCrew().crew().kickoff(
|
||||
inputs={"topic": self.state.topic, "notes": self.state.research}
|
||||
)
|
||||
self.state.article = str(out)
|
||||
return self.state.article
|
||||
|
||||
@listen("abort")
|
||||
def bail(self):
|
||||
self.state.article = "Insufficient research."
|
||||
return self.state.article
|
||||
|
||||
ContentPipeline().kickoff(inputs={"topic": "vector databases"})
|
||||
```
|
||||
|
||||
`@start()` و`@listen()` و`@router()` هي المُزخرفات الثلاثة التي ستستخدمها 95% من الوقت. راجع [Flows](/ar/concepts/flows) للمرجع الكامل.
|
||||
|
||||
---
|
||||
|
||||
## الفخاخ الشائعة
|
||||
|
||||
1. **تشغيل `crewai install` وتوقّع ترقية.** يُزامن `crewai install` مع `uv.lock` الموجود. لرفع الإصدارات، شغّل `uv add "crewai[tools]>=X.Y.Z"` أولًا، ثم `crewai install`.
|
||||
2. **القيد هو حدّ أدنى وليس تثبيتًا.** `crewai>=1.11.1` يعني "أي إصدار يساوي 1.11.1 أو أعلى". لا يعيد `uv` الحل إلا عند تشغيل `uv add` أو `uv lock --upgrade-package crewai` صراحةً.
|
||||
3. **إسقاط الإضافات أثناء إعادة القفل.** إذا شغّلت `uv add "crewai>=1.14.4"` بدون إضافات، فقد يُسقط `uv` الـ `[tools]` من المجموعة المحلولة. ضمّن دائمًا الإضافات التي تحتاجها: `uv add "crewai[tools]>=1.14.4"`.
|
||||
4. **نسيان commit `uv.lock`.** بعد رفع الإصدار بـ `uv add`، قم بـ commit للـ `uv.lock` المُحدَّث حتى يحصل زملاؤك على نفس الإصدارات.
|
||||
5. **`pip install` بدلًا من `uv tool install`.** مزج `crewai` المُثبَّت بـ pip و`uv` يؤدي إلى ثنائيين في `PATH` وانحراف إصدارات مربك. اختر واحدًا — المدعوم هو `uv`.
|
||||
6. **تمرير مثيل Pydantic إلى `output_pydantic`.** يتوقع الفئة. `output_pydantic=Article` وليس `output_pydantic=Article(...)`.
|
||||
7. **العملية الهرمية بدون مدير.** يتطلب `process=Process.hierarchical` `manager_llm=` أو `manager_agent=`.
|
||||
8. **الذاكرة ممكّنة مع embedder خاطئ.** تبديل embedders دون تنظيف مجلد الذاكرة على القرص يسبب عدم تطابق في الأبعاد. احذف مخزن الذاكرة الخاص بالمشروع بعد تغيير المزوّدين.
|
||||
9. **حالة قاموس عندما كنت تريد حالة مكتوبة.** `Flow()` بدون نوع عام يعطيك قاموسًا. للفحص النوعي والـ checkpointing النظيف، استخدم `Flow[MyState]` مع `BaseModel`.
|
||||
10. **استيرادات tools قديمة.** `from crewai_tools import BaseTool` يعمل في بعض الإصدارات لكنه ليس المسار الرسمي. وحّد على `from crewai.tools import BaseTool, tool`.
|
||||
11. **انحراف إصدار Python.** يتطلب CrewAI `>=3.10, <3.14`. سيقوم `uv` بسعادة ببناء venv مقابل 3.14+ إذا كان الافتراضي؛ ثبّت إصدار Python في `pyproject.toml`.
|
||||
12. **`verbose=2` وأعلام عدد صحيح مماثلة.** `verbose` هو `bool`. استخدم event listeners للسجلات الأكثر تفصيلًا.
|
||||
13. **استدعاء `crew.kickoff()` من داخل Flow بدون التغليف في `inputs={}`.** الـ Flows تمرر state وليس kwargs. لا تزال الـ crew تتوقع `inputs={...}`.
|
||||
|
||||
---
|
||||
|
||||
## Checkpointing
|
||||
|
||||
Checkpointing هو إضافة أحدث تُديم حالة الـ agent والـ crew والـ flow بين التشغيلات. يسمح للسير العمل طويل الأمد بالاستئناف بعد انهيار، أو إيقاف يدوي، أو نشر.
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
checkpoint=True,
|
||||
)
|
||||
```
|
||||
|
||||
نفس العَلَم مدعوم على `Flow` و`Agent`. تُكتب الحالة في المخزن المحلي للمشروع وتُعاد تشغيلها في `kickoff()` التالي بنفس المعرّف.
|
||||
|
||||
<Note>
|
||||
Checkpointing في إصدار مبكر. قد تتغيّر واجهات APIs المتعلقة بدلالات الاستئناف، وخلفيات التخزين، والمعرّفات بين الإصدارات الثانوية — ثبّت إصدار `crewai` إذا كنت تعتمد عليه في الإنتاج.
|
||||
</Note>
|
||||
|
||||
راجع [Checkpointing](/ar/concepts/checkpointing) للمرجع الكامل للميزة.
|
||||
|
||||
---
|
||||
|
||||
## الحصول على المساعدة
|
||||
|
||||
- **سجل التغييرات** — كل تغيير جذري مُسجَّل في [ملاحظات الإصدار](/ar/changelog).
|
||||
- **GitHub Issues** — افتح واحدة في [github.com/crewAIInc/crewAI/issues](https://github.com/crewAIInc/crewAI/issues) مع إعادة إنتاج بسيطة ومخرجات `crewai --version`.
|
||||
- **Discord** — Discord مجتمع CrewAI هو أسرع طريق للحصول على مساعدة في تصحيح الأخطاء: [community.crewai.com](https://community.crewai.com).
|
||||
- **أدلة الترحيل** — إذا كنت قادمًا من إطار آخر، ابدأ من [الترحيل من LangGraph](/ar/guides/migration/migrating-from-langgraph).
|
||||
180
docs/ar/tools/ai-ml/daytona.mdx
Normal file
180
docs/ar/tools/ai-ml/daytona.mdx
Normal file
@@ -0,0 +1,180 @@
|
||||
---
|
||||
title: Daytona Sandbox Tools
|
||||
description: Run shell commands, execute Python, and manage files inside isolated [Daytona](https://www.daytona.io/) sandboxes.
|
||||
icon: box
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
# Daytona Sandbox Tools
|
||||
|
||||
## Description
|
||||
|
||||
The Daytona sandbox tools give CrewAI agents access to isolated, ephemeral compute environments powered by [Daytona](https://www.daytona.io/). Three tools are available so you can give an agent exactly the capabilities it needs:
|
||||
|
||||
- **`DaytonaExecTool`** — run any shell command inside a sandbox.
|
||||
- **`DaytonaPythonTool`** — execute a block of Python source code inside a sandbox.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox.
|
||||
|
||||
All three tools share the same sandbox lifecycle controls, so you can mix and match them while keeping state in a single persistent sandbox.
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
uv add "crewai-tools[daytona]"
|
||||
# or
|
||||
pip install "crewai-tools[daytona]"
|
||||
```
|
||||
|
||||
Set your API key:
|
||||
|
||||
```shell
|
||||
export DAYTONA_API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
`DAYTONA_API_URL` and `DAYTONA_TARGET` are also respected if set.
|
||||
|
||||
## Sandbox Lifecycle
|
||||
|
||||
All three tools inherit lifecycle controls from `DaytonaBaseTool`:
|
||||
|
||||
| Mode | How to enable | Sandbox created | Sandbox deleted |
|
||||
|------|--------------|-----------------|-----------------|
|
||||
| **Ephemeral** (default) | `persistent=False` (default) | On every `_run` call | At the end of that same call |
|
||||
| **Persistent** | `persistent=True` | Lazily on first use | At process exit (via `atexit`), or manually via `tool.close()` |
|
||||
| **Attach** | `sandbox_id="<id>"` | Never — attaches to an existing sandbox | Never — the tool will not delete a sandbox it did not create |
|
||||
|
||||
Ephemeral mode is the safe default: nothing leaks if the agent forgets to clean up. Use persistent mode when you want filesystem state or installed packages to carry across multiple tool calls — this is typical when pairing `DaytonaFileTool` with `DaytonaExecTool`.
|
||||
|
||||
## Examples
|
||||
|
||||
### One-shot Python execution (ephemeral)
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaPythonTool
|
||||
|
||||
tool = DaytonaPythonTool()
|
||||
result = tool.run(code="print(sum(range(10)))")
|
||||
print(result)
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": None}
|
||||
```
|
||||
|
||||
### Multi-step shell session (persistent)
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool, DaytonaFileTool
|
||||
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
# Install a package, then write and run a script — all in the same sandbox
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool.run(action="write", path="/workspace/fetch.py", content="import httpx; print(httpx.get('https://httpbin.org/get').status_code)")
|
||||
exec_tool.run(command="python /workspace/fetch.py")
|
||||
```
|
||||
|
||||
<Note>
|
||||
Each tool instance maintains its own persistent sandbox. To share **one** sandbox across two tools, create the first tool, grab its sandbox id via `tool._persistent_sandbox.id`, and pass it to the second tool via `sandbox_id=...`.
|
||||
</Note>
|
||||
|
||||
### Attach to an existing sandbox
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(sandbox_id="my-long-lived-sandbox")
|
||||
result = tool.run(command="ls /workspace")
|
||||
```
|
||||
|
||||
### Custom sandbox parameters
|
||||
|
||||
Pass Daytona's `CreateSandboxFromSnapshotParams` kwargs via `create_params`:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(
|
||||
persistent=True,
|
||||
create_params={
|
||||
"language": "python",
|
||||
"env_vars": {"MY_FLAG": "1"},
|
||||
"labels": {"owner": "crewai-agent"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
### Agent integration
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import DaytonaExecTool, DaytonaPythonTool, DaytonaFileTool
|
||||
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
python_tool = DaytonaPythonTool(persistent=True)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
coder = Agent(
|
||||
role="Sandbox Engineer",
|
||||
goal="Write and run code in an isolated environment",
|
||||
backstory="An engineer who uses Daytona sandboxes to safely execute code and manage files.",
|
||||
tools=[exec_tool, python_tool, file_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to /workspace/fib.py, and run it.",
|
||||
expected_output="The first 10 Fibonacci numbers printed to stdout.",
|
||||
agent=coder,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[coder], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
### Shared (`DaytonaBaseTool`)
|
||||
|
||||
All three tools accept these parameters at initialization:
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `api_key` | `str \| None` | `$DAYTONA_API_KEY` | Daytona API key. Falls back to the `DAYTONA_API_KEY` env var. |
|
||||
| `api_url` | `str \| None` | `$DAYTONA_API_URL` | Daytona API URL override. |
|
||||
| `target` | `str \| None` | `$DAYTONA_TARGET` | Daytona target region. |
|
||||
| `persistent` | `bool` | `False` | Reuse one sandbox across all calls and delete it at process exit. |
|
||||
| `sandbox_id` | `str \| None` | `None` | Attach to an existing sandbox by id or name. |
|
||||
| `create_params` | `dict \| None` | `None` | Extra kwargs forwarded to `CreateSandboxFromSnapshotParams` (e.g. `language`, `env_vars`, `labels`). |
|
||||
| `sandbox_timeout` | `float` | `60.0` | Timeout in seconds for sandbox create/delete operations. |
|
||||
|
||||
### `DaytonaExecTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `command` | `str` | ✓ | Shell command to execute. |
|
||||
| `cwd` | `str \| None` | | Working directory inside the sandbox. |
|
||||
| `env` | `dict[str, str] \| None` | | Extra environment variables for this command. |
|
||||
| `timeout` | `int \| None` | | Maximum seconds to wait for the command. |
|
||||
|
||||
### `DaytonaPythonTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `code` | `str` | ✓ | Python source code to execute. |
|
||||
| `argv` | `list[str] \| None` | | Argument vector forwarded via `CodeRunParams`. |
|
||||
| `env` | `dict[str, str] \| None` | | Environment variables forwarded via `CodeRunParams`. |
|
||||
| `timeout` | `int \| None` | | Maximum seconds to wait for execution. |
|
||||
|
||||
### `DaytonaFileTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`. |
|
||||
| `path` | `str` | ✓ | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | | Content to write or append. Required for `append`. |
|
||||
| `binary` | `bool` | | If `True`, `content` is base64 on write; returns base64 on read. |
|
||||
| `recursive` | `bool` | | For `delete`: remove directories recursively. |
|
||||
| `mode` | `str` | | For `mkdir`: octal permission string (default `"0755"`). |
|
||||
|
||||
<Tip>
|
||||
For files larger than a few KB, create the file first with `action="write"` and empty content, then send the body via multiple `action="append"` calls of ~4 KB each to stay within tool-call payload limits.
|
||||
</Tip>
|
||||
@@ -1,11 +1,11 @@
|
||||
---
|
||||
title: "أداة بحث Exa"
|
||||
description: "ابحث في الويب باستخدام Exa Search API للعثور على النتائج الأكثر صلة لأي استعلام، مع خيارات لمحتوى الصفحة الكامل والمقتطفات والملخصات."
|
||||
description: "ابحث في الويب باستخدام Exa Search API للعثور على النتائج الأكثر صلة لأي استعلام، مع خيارات لمحتوى الصفحة الكامل والمقتطفات."
|
||||
icon: "magnifying-glass"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
تتيح أداة `EXASearchTool` لوكلاء CrewAI البحث في الويب باستخدام [Exa](https://exa.ai/) search API. تُرجع النتائج الأكثر صلة لأي استعلام، مع خيارات لمحتوى الصفحة الكامل والملخصات المولّدة بالذكاء الاصطناعي.
|
||||
تتيح أداة `ExaSearchTool` لوكلاء CrewAI البحث في الويب باستخدام [Exa](https://exa.ai/) search API. تُرجع النتائج الأكثر صلة لأي استعلام، مع خيارات لمحتوى الصفحة الكامل والمقتطفات الموفرة للرموز.
|
||||
|
||||
## التثبيت
|
||||
|
||||
@@ -27,15 +27,15 @@ export EXA_API_KEY='your_exa_api_key'
|
||||
|
||||
## مثال على الاستخدام
|
||||
|
||||
إليك كيفية استخدام `EXASearchTool` مع وكيل CrewAI:
|
||||
إليك كيفية استخدام `ExaSearchTool` مع وكيل CrewAI:
|
||||
|
||||
```python
|
||||
import os
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import EXASearchTool
|
||||
from crewai_tools import ExaSearchTool
|
||||
|
||||
# Initialize the tool
|
||||
exa_tool = EXASearchTool()
|
||||
exa_tool = ExaSearchTool()
|
||||
|
||||
# Create an agent that uses the tool
|
||||
researcher = Agent(
|
||||
@@ -66,11 +66,11 @@ print(result)
|
||||
|
||||
## خيارات التكوين
|
||||
|
||||
تقبل أداة `EXASearchTool` المعاملات التالية أثناء التهيئة:
|
||||
تقبل أداة `ExaSearchTool` المعاملات التالية أثناء التهيئة:
|
||||
|
||||
- `type` (str، اختياري): نوع البحث المستخدم. الافتراضي هو `"auto"`. الخيارات: `"auto"`، `"instant"`، `"fast"`، `"deep"`.
|
||||
- `highlights` (bool أو dict، اختياري): إرجاع مقتطفات موفرة للرموز أكثر صلة بالاستعلام بدلاً من الصفحة الكاملة. الافتراضي هو `True`. مرر قاموسًا مثل `{"max_characters": 4000}` للتكوين، أو `False` للتعطيل.
|
||||
- `content` (bool، اختياري): ما إذا كان يجب تضمين محتوى الصفحة الكامل في النتائج. الافتراضي هو `False`.
|
||||
- `summary` (bool، اختياري): ما إذا كان يجب تضمين ملخصات مولّدة بالذكاء الاصطناعي لكل نتيجة. يتطلب `content=True`. الافتراضي هو `False`.
|
||||
- `api_key` (str، اختياري): مفتاح Exa API الخاص بك. يعود إلى متغير البيئة `EXA_API_KEY` إذا لم يتم تقديمه.
|
||||
- `base_url` (str، اختياري): عنوان URL مخصص لخادم API. يعود إلى متغير البيئة `EXA_BASE_URL` إذا لم يتم تقديمه.
|
||||
|
||||
@@ -86,25 +86,52 @@ print(result)
|
||||
يمكنك تكوين الأداة بمعاملات مخصصة للحصول على نتائج أغنى:
|
||||
|
||||
```python
|
||||
# Get full page content with AI summaries
|
||||
exa_tool = EXASearchTool(
|
||||
content=True,
|
||||
summary=True,
|
||||
# Use 'deep' for thorough, multi-step searches
|
||||
exa_tool = ExaSearchTool(
|
||||
highlights=True,
|
||||
type="deep"
|
||||
)
|
||||
|
||||
# Use it in an agent
|
||||
agent = Agent(
|
||||
role="Deep Researcher",
|
||||
goal="Conduct thorough research with full content and summaries",
|
||||
goal="Conduct thorough research",
|
||||
tools=[exa_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## استخدام Exa عبر MCP
|
||||
|
||||
يمكنك أيضًا ربط وكيلك بخادم MCP المستضاف من Exa. مرّر مفتاح API الخاص بك عبر ترويسة `x-api-key`:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from crewai.mcp import MCPServerHTTP
|
||||
|
||||
agent = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Find and analyze information on the web",
|
||||
backstory="Expert researcher with access to Exa's tools",
|
||||
mcps=[
|
||||
MCPServerHTTP(
|
||||
url="https://mcp.exa.ai/mcp",
|
||||
headers={"x-api-key": "YOUR_EXA_API_KEY"},
|
||||
),
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
احصل على مفتاح API من [لوحة تحكم Exa](https://dashboard.exa.ai/api-keys). لمزيد من المعلومات حول MCP في CrewAI، راجع [نظرة عامة على MCP](/ar/mcp/overview).
|
||||
|
||||
## الميزات
|
||||
|
||||
- **مقتطفات موفرة للرموز**: الحصول على المقتطفات الأكثر صلة من كل نتيجة، باستخدام رموز أقل بكثير من النص الكامل
|
||||
- **البحث الدلالي**: العثور على نتائج بناءً على المعنى، وليس الكلمات المفتاحية فقط
|
||||
- **استرجاع المحتوى الكامل**: الحصول على النص الكامل لصفحات الويب مع نتائج البحث
|
||||
- **ملخصات الذكاء الاصطناعي**: الحصول على ملخصات موجزة مولّدة بالذكاء الاصطناعي لكل نتيجة
|
||||
- **تصفية التاريخ**: تقييد النتائج لفترات زمنية محددة باستخدام فلاتر تاريخ النشر
|
||||
- **تصفية النطاقات**: تقييد عمليات البحث على نطاقات محددة
|
||||
- **تصفية النطاقات**: تقييد عمليات البحث على نطاقات محددة
|
||||
|
||||
## موارد
|
||||
|
||||
- [توثيق Exa](https://exa.ai/docs)
|
||||
- [لوحة تحكم Exa — إدارة مفاتيح API والاستخدام](https://dashboard.exa.ai)
|
||||
2069
docs/docs.json
2069
docs/docs.json
File diff suppressed because it is too large
Load Diff
@@ -26,7 +26,7 @@ Welcome to the CrewAI AMP API reference. This API allows you to programmatically
|
||||
</Step>
|
||||
|
||||
<Step title="Monitor Progress">
|
||||
Use `GET /{kickoff_id}/status` to check execution status and retrieve results.
|
||||
Use `GET /status/{kickoff_id}` to check execution status and retrieve results.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
@@ -65,7 +65,7 @@ Replace `your-crew-name` with your actual crew's URL from the dashboard.
|
||||
|
||||
1. **Discovery**: Call `GET /inputs` to understand what your crew needs
|
||||
2. **Execution**: Submit inputs via `POST /kickoff` to start processing
|
||||
3. **Monitoring**: Poll `GET /{kickoff_id}/status` until completion
|
||||
3. **Monitoring**: Poll `GET /status/{kickoff_id}` until completion
|
||||
4. **Results**: Extract the final output from the completed response
|
||||
|
||||
## Error Handling
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "GET /{kickoff_id}/status"
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "Get execution status"
|
||||
openapi: "/enterprise-api.en.yaml GET /{kickoff_id}/status"
|
||||
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
@@ -4,6 +4,152 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="May 07, 2026">
|
||||
## v1.14.5a3
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a3)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Bug Fixes
|
||||
- Fix status endpoint path from /{kickoff_id}/status to /status/{kickoff_id}
|
||||
- Bump gitpython dependency to version >=3.1.47 for security compliance
|
||||
|
||||
### Refactoring
|
||||
- Extract CLI into standalone crewai-cli package
|
||||
|
||||
### Documentation
|
||||
- Update changelog and version for v1.14.5a2
|
||||
|
||||
## Contributors
|
||||
|
||||
@greysonlalonde, @iris-clawd
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="May 04, 2026">
|
||||
## v1.14.5a2
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a2)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Bug Fixes
|
||||
- Fix task output restoration in finally block
|
||||
- Include `thoughts_token_count` in completion tokens
|
||||
- Preserve task outputs across async batch flush
|
||||
- Forward kwargs to loader calls in `CrewAIRagAdapter`
|
||||
- Prevent `result_as_answer` from returning hook-block message as final answer
|
||||
- Prevent `result_as_answer` from returning error as final answer
|
||||
- Use `acall` for output conversion in async paths
|
||||
- Prevent shared LLM stop words mutation across agents
|
||||
- Handle `BaseModel` input in `convert_to_model`
|
||||
|
||||
### Documentation
|
||||
- Document additional environment variables
|
||||
- Update changelog and version for v1.14.5a1
|
||||
|
||||
## Contributors
|
||||
|
||||
@NIK-TIGER-BILL, @greysonlalonde, @lorenzejay, @minasami-pr, @theCyberTech, @wishhyt
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="May 01, 2026">
|
||||
## v1.14.5a1
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a1)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Add `restore_from_state_id` kickoff parameter
|
||||
- Add highlights to ExaSearchTool and rename from EXASearchTool
|
||||
|
||||
### Bug Fixes
|
||||
- Fix missing crewai pin sites in release flow
|
||||
- Ensure skills loading events for traces
|
||||
|
||||
### Documentation
|
||||
- Update changelog and version for v1.14.4
|
||||
|
||||
## Contributors
|
||||
|
||||
@akaKuruma, @github-actions[bot], @greysonlalonde, @lorenzejay, @theishangoswami
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="May 01, 2026">
|
||||
## v1.14.4
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.4)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Add support for custom persistence key in @persist
|
||||
- Add Responses API support for Azure OpenAI provider
|
||||
- Forward credential_scopes to Azure AI Inference client
|
||||
- Add Vertex AI workload identity setup guide
|
||||
- Add Tavily Research and get Research
|
||||
- Add You.com MCP tools for search, research, and content extraction
|
||||
|
||||
### Bug Fixes
|
||||
- Fix fall through when JSON regex match isn't valid JSON
|
||||
- Fix to preserve tool_calls when response also contains text
|
||||
- Fix to forward base_url and api_key to instructor.from_provider
|
||||
- Fix to warn and return empty when native MCP server returns no tools
|
||||
- Fix to use validated messages variable in non-streaming handlers
|
||||
- Fix to guard crew chat description helpers against LLM failures
|
||||
- Fix to reset messages and iterations between invocations
|
||||
- Fix to forward trained-agents file through replay and test
|
||||
- Fix to honor custom trained-agents file at inference
|
||||
- Fix to bind task-only agents to crew for multimodal input_files
|
||||
- Fix to serialize guardrail callables as null for JSON checkpointing
|
||||
- Fix renaming of force_final_answer to avoid self-referential router
|
||||
- Fix bump of litellm for SSTI fix; ignore unfixable pip CVE
|
||||
|
||||
### Documentation
|
||||
- Update changelog and version for v1.14.4a1
|
||||
- Add E2B Sandbox Tools page
|
||||
- Add Daytona sandbox tools documentation
|
||||
|
||||
## Contributors
|
||||
|
||||
@EdwardIrby, @dependabot[bot], @factory-droid-oss, @factory-droid[bot], @greysonlalonde, @kunalk16, @lorenzejay, @lucasgomide, @manisrinivasan2k1, @mattatcha, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Apr 29, 2026">
|
||||
## v1.14.4a1
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.4a1)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Bug Fixes
|
||||
- Fix crew chat description helpers against LLM failures.
|
||||
- Reset messages and iterations between invocations in executor.
|
||||
- Forward trained-agents file through replay and test in CLI.
|
||||
- Honor custom trained-agents file at inference in agent.
|
||||
- Bind task-only agents to crew to ensure multimodal input_files reach the LLM.
|
||||
- Serialize guardrail callables as null for JSON checkpointing.
|
||||
- Rename `force_final_answer` in agent_executor to avoid self-referential router.
|
||||
- Bump `litellm` for SSTI fix and ignore unfixable pip CVE.
|
||||
|
||||
### Documentation
|
||||
- Add E2B Sandbox Tools page.
|
||||
- Add Daytona sandbox tools documentation.
|
||||
- Add Vertex AI workload identity setup guide.
|
||||
- Add You.com MCP tools for search, research, and content extraction.
|
||||
- Update changelog and version for v1.14.3.
|
||||
|
||||
## Contributors
|
||||
|
||||
@EdwardIrby, @dependabot[bot], @factory-droid-oss, @factory-droid[bot], @greysonlalonde, @lorenzejay, @manisrinivasan2k1, @mattatcha
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Apr 25, 2026">
|
||||
## v1.14.3
|
||||
|
||||
|
||||
@@ -380,6 +380,42 @@ class AnotherFlow(Flow[dict]):
|
||||
print("Method-level persisted runs:", self.state["runs"])
|
||||
```
|
||||
|
||||
### Forking Persisted State
|
||||
|
||||
`@persist` supports two distinct hydration modes on `kickoff` / `kickoff_async`:
|
||||
|
||||
- `kickoff(inputs={"id": <uuid>})` — **resume**: load the latest snapshot for the supplied UUID and continue writing under the same `flow_uuid`. The history extends.
|
||||
- `kickoff(restore_from_state_id=<uuid>)` — **fork**: load the latest snapshot for the supplied UUID, hydrate the new run's state from it, and assign a fresh `state.id` (auto-generated, or `inputs["id"]` if pinned). The new run's `@persist` writes land under the new `state.id`; the source flow's history is preserved.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.flow.persistence import persist
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CounterState(BaseModel):
|
||||
id: str = ""
|
||||
counter: int = 0
|
||||
|
||||
@persist
|
||||
class CounterFlow(Flow[CounterState]):
|
||||
@start()
|
||||
def step(self):
|
||||
self.state.counter += 1
|
||||
print(f"[id={self.state.id}] counter={self.state.counter}")
|
||||
|
||||
# Run 1: fresh state, counter 0 -> 1, persisted under flow_1.state.id
|
||||
flow_1 = CounterFlow()
|
||||
flow_1.kickoff()
|
||||
|
||||
# Fork: hydrate from flow_1's latest snapshot, but use a NEW state.id
|
||||
flow_2 = CounterFlow()
|
||||
flow_2.kickoff(restore_from_state_id=flow_1.state.id)
|
||||
# flow_2.state.counter starts at 1 (hydrated), then step() bumps it to 2.
|
||||
# flow_2.state.id != flow_1.state.id; flow_1's history is unchanged.
|
||||
```
|
||||
|
||||
If the supplied `restore_from_state_id` does not match any persisted state, the kickoff falls back silently — same as the existing `inputs["id"]` resume not-found behavior. Combining `restore_from_state_id` with `from_checkpoint` raises a `ValueError`; pick one hydration source. Pinning `inputs["id"]` while forking shares a persistence key with another flow — usually you want only `restore_from_state_id`.
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Unique State Identification**
|
||||
|
||||
@@ -146,6 +146,14 @@ class ProductionFlow(Flow[AppState]):
|
||||
# ...
|
||||
```
|
||||
|
||||
By default, `@persist` resumes a flow when `kickoff(inputs={"id": <uuid>})` is supplied, extending the same `flow_uuid` history. To **fork** a persisted flow into a new lineage — hydrate state from a previous run but write under a fresh `state.id` — pass `restore_from_state_id`:
|
||||
|
||||
```python
|
||||
flow.kickoff(restore_from_state_id="<previous-run-state-id>")
|
||||
```
|
||||
|
||||
The new run gets a fresh `state.id` (auto-generated, or `inputs["id"]` if pinned) so its `@persist` writes don't extend the source's history. Combining with `from_checkpoint` raises a `ValueError`; pick one hydration source.
|
||||
|
||||
## Summary
|
||||
|
||||
- **Start with a Flow.**
|
||||
|
||||
@@ -133,7 +133,7 @@ Here is a list of the available tools and their descriptions:
|
||||
| **DirectorySearchTool** | A RAG tool for searching within directories, useful for navigating through file systems. |
|
||||
| **DOCXSearchTool** | A RAG tool aimed at searching within DOCX documents, ideal for processing Word files. |
|
||||
| **DirectoryReadTool** | Facilitates reading and processing of directory structures and their contents. |
|
||||
| **EXASearchTool** | A tool designed for performing exhaustive searches across various data sources. |
|
||||
| **ExaSearchTool** | Search the web with Exa, the fastest and most accurate web search API. Supports token-efficient highlights and full page content. |
|
||||
| **FileReadTool** | Enables reading and extracting data from files, supporting various file formats. |
|
||||
| **FirecrawlSearchTool** | A tool to search webpages using Firecrawl and return the results. |
|
||||
| **FirecrawlCrawlWebsiteTool** | A tool for crawling webpages using Firecrawl. |
|
||||
|
||||
@@ -346,6 +346,48 @@ class SelectivePersistFlow(Flow):
|
||||
return f"Complete with count {self.state['count']}"
|
||||
```
|
||||
|
||||
#### Forking Persisted State
|
||||
|
||||
`@persist` supports two distinct hydration modes on `kickoff` / `kickoff_async`. Use **resume** (`inputs["id"]`) to continue the same lineage; use **fork** (`restore_from_state_id`) to start a new lineage seeded from a snapshot:
|
||||
|
||||
| | `state.id` after kickoff | `@persist` writes land under |
|
||||
|---|---|---|
|
||||
| `inputs["id"]` (resume) | supplied id | supplied id (extends history) |
|
||||
| `restore_from_state_id` (fork) | fresh id, or `inputs["id"]` if pinned | new id (source preserved) |
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.flow.persistence import persist
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CounterState(BaseModel):
|
||||
id: str = ""
|
||||
counter: int = 0
|
||||
|
||||
@persist
|
||||
class CounterFlow(Flow[CounterState]):
|
||||
@start()
|
||||
def step(self):
|
||||
self.state.counter += 1
|
||||
|
||||
# Run 1: fresh state, counter 0 -> 1
|
||||
flow_1 = CounterFlow()
|
||||
flow_1.kickoff()
|
||||
|
||||
# Fork: hydrate from flow_1's latest snapshot, but write under a NEW state.id
|
||||
flow_2 = CounterFlow()
|
||||
flow_2.kickoff(restore_from_state_id=flow_1.state.id)
|
||||
# flow_2 starts with counter=1 (hydrated), then step() bumps it to 2.
|
||||
# flow_1's flow_uuid history is unchanged.
|
||||
```
|
||||
|
||||
Behavior notes:
|
||||
|
||||
- `restore_from_state_id` not found in persistence → the kickoff falls back silently to default behavior (mirrors the existing `inputs["id"]` resume not-found behavior). No exception is raised.
|
||||
- Combining `restore_from_state_id` with `from_checkpoint` raises a `ValueError` — they target different state systems (`@persist` vs. Checkpointing) and cannot be combined.
|
||||
- `restore_from_state_id=None` (default) is byte-identical to a kickoff without the parameter.
|
||||
- Pinning `inputs["id"]` while forking means the new run shares a persistence key with another flow — usually you want only `restore_from_state_id`.
|
||||
|
||||
|
||||
## Advanced State Patterns
|
||||
|
||||
|
||||
359
docs/en/guides/migration/upgrading-crewai.mdx
Normal file
359
docs/en/guides/migration/upgrading-crewai.mdx
Normal file
@@ -0,0 +1,359 @@
|
||||
---
|
||||
title: "Upgrading & Migrating CrewAI"
|
||||
description: "How to upgrade CrewAI in your project, migrate around breaking changes, and move standalone Crews onto Flows."
|
||||
icon: "arrow-up-circle"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
CrewAI moves quickly. New releases regularly tighten import paths, change defaults on `Agent`, `Crew`, and `Task`, and introduce new orchestration primitives like `Flow` and checkpointing. This guide collects the practical steps needed to:
|
||||
|
||||
- Upgrade the global `crewai` CLI and your project's pinned dependency
|
||||
- Adapt to breaking changes in imports and parameters
|
||||
- Migrate a standalone `Crew` to a typed `Flow`
|
||||
- Avoid the gotchas that show up the first time you re-run an upgraded project
|
||||
|
||||
If you're starting fresh, see [Installation](/en/installation). If you're coming from another framework, see [Migrating from LangGraph](/en/guides/migration/migrating-from-langgraph).
|
||||
|
||||
---
|
||||
|
||||
## The Two Things You Might Want to Upgrade
|
||||
|
||||
CrewAI lives in two places on your machine, and they upgrade independently:
|
||||
|
||||
| What | How it's installed | How to upgrade |
|
||||
|---|---|---|
|
||||
| The **global `crewai` CLI** | `uv tool install crewai` | `uv tool install crewai --upgrade` |
|
||||
| The **project venv** (what your code runs) | `crewai install` / `uv sync` | `uv add "crewai[...]>=X.Y.Z"` then `crewai install` |
|
||||
|
||||
These can — and often do — get out of sync. Running `crewai --version` tells you the CLI version. Running `uv pip show crewai` inside your project tells you the venv version. If they differ, that's normal; what matters for your running code is the venv version.
|
||||
|
||||
## Why `crewai install` Alone Doesn't Upgrade
|
||||
|
||||
`crewai install` is a thin wrapper around `uv sync`. It installs exactly what the current `uv.lock` file says — it does **not** bump any version constraints.
|
||||
|
||||
If your `pyproject.toml` says `crewai>=1.11.1` and the lock file resolved to `1.11.1`, running `crewai install` will keep you on `1.11.1` forever, even if `1.14.4` is available.
|
||||
|
||||
To actually upgrade, you need to:
|
||||
|
||||
1. Update the version constraint in `pyproject.toml`
|
||||
2. Re-solve the lock file
|
||||
3. Sync the venv
|
||||
|
||||
`uv add` does all three in one shot.
|
||||
|
||||
## How to Upgrade Your Project
|
||||
|
||||
```bash
|
||||
# Bump the constraint and re-lock in one command
|
||||
uv add "crewai[tools]>=1.14.4"
|
||||
|
||||
# Sync the venv (crewai install calls uv sync under the hood)
|
||||
crewai install
|
||||
|
||||
# Verify
|
||||
uv pip show crewai
|
||||
# → Version: 1.14.4
|
||||
```
|
||||
|
||||
Replace `[tools]` with whatever extras your project uses (e.g. `[tools,anthropic]`). Check your `pyproject.toml` `dependencies` list if you're unsure.
|
||||
|
||||
<Note>
|
||||
`uv add` updates both `pyproject.toml` **and** `uv.lock` atomically. If you edit `pyproject.toml` manually, you still need to run `uv lock --upgrade-package crewai` to re-solve the lock file before `crewai install` will pick up the new version.
|
||||
</Note>
|
||||
|
||||
## Upgrading the Global CLI
|
||||
|
||||
The global CLI is separate from your project. Upgrade it with:
|
||||
|
||||
```bash
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
|
||||
If your shell warns about `PATH` after the upgrade, refresh it:
|
||||
|
||||
```bash
|
||||
uv tool update-shell
|
||||
```
|
||||
|
||||
This does **not** touch your project's venv — you still need `uv add` + `crewai install` inside the project.
|
||||
|
||||
## Verify Both Are in Sync
|
||||
|
||||
```bash
|
||||
# Global CLI version
|
||||
crewai --version
|
||||
|
||||
# Project venv version
|
||||
uv pip show crewai | grep Version
|
||||
```
|
||||
|
||||
They don't need to match — but your project venv version is what matters for runtime behavior.
|
||||
|
||||
<Note>
|
||||
CrewAI requires `Python >=3.10, <3.14`. If `uv` was installed against an older interpreter, recreate the project venv with a supported Python before running `crewai install`.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## Breaking Changes & Migration Notes
|
||||
|
||||
Most upgrades only require small adjustments. The areas below are the ones that break silently or with confusing tracebacks.
|
||||
|
||||
### Import paths: tools and `BaseTool`
|
||||
|
||||
The canonical import location for tools is `crewai.tools`. Older paths still surface in tutorials but should be updated.
|
||||
|
||||
```python
|
||||
# Before
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.agents.tools import tool
|
||||
|
||||
# After
|
||||
from crewai.tools import BaseTool, tool
|
||||
```
|
||||
|
||||
The `@tool` decorator and `BaseTool` subclass both live in `crewai.tools`. `AgentFinish` and other internal-agent symbols are no longer part of the public surface — if you were importing them, switch to event listeners or `Task` callbacks instead.
|
||||
|
||||
### `Agent` parameter changes
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Find authoritative sources on {topic}",
|
||||
backstory="You are a careful, source-driven researcher.",
|
||||
llm="gpt-4o-mini", # string model name OR an LLM object
|
||||
verbose=True, # bool, not an int level
|
||||
max_iter=15, # default has changed across versions — set explicitly
|
||||
allow_delegation=False,
|
||||
)
|
||||
```
|
||||
|
||||
- `llm` accepts either a string model name (resolved via the configured provider) or an `LLM` object for fine-grained control.
|
||||
- `verbose` is a plain `bool`. Passing an integer no longer toggles log levels.
|
||||
- `max_iter` defaults have shifted between releases. If your agent silently stops looping after the first tool call, set `max_iter` explicitly.
|
||||
|
||||
### `Crew` parameters
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential, # or Process.hierarchical
|
||||
memory=True,
|
||||
cache=True,
|
||||
embedder={"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
)
|
||||
```
|
||||
|
||||
- `process=Process.hierarchical` requires either `manager_llm=` or `manager_agent=`. Without one, kickoff raises at validation time.
|
||||
- `memory=True` with a non-default embedding provider needs an `embedder` dict — see [Memory & embedder config](#memory-embedder-config) below.
|
||||
|
||||
### `Task` structured output
|
||||
|
||||
Use `output_pydantic`, `output_json`, or `output_file` to coerce a task's result into a typed shape:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from crewai import Task
|
||||
|
||||
class Article(BaseModel):
|
||||
title: str
|
||||
body: str
|
||||
|
||||
write = Task(
|
||||
description="Write an article about {topic}",
|
||||
expected_output="A short article with a title and body",
|
||||
agent=writer,
|
||||
output_pydantic=Article, # the class, NOT an instance
|
||||
output_file="output/article.md",
|
||||
)
|
||||
```
|
||||
|
||||
`output_pydantic` takes the **class** itself. Passing `Article(title="", body="")` is a common mistake and fails with a confusing validation error.
|
||||
|
||||
### Memory & embedder config
|
||||
|
||||
If `memory=True` and you're not using the default OpenAI embeddings, you must pass an `embedder`:
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {"model": "nomic-embed-text"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
Set the relevant provider credentials (`OPENAI_API_KEY`, `OLLAMA_HOST`, etc.) in your `.env` file. Memory storage paths are project-local by default — delete the project's memory directory if you change embedders, since dimensions don't mix.
|
||||
|
||||
---
|
||||
|
||||
## Migrating a Crew to a Flow
|
||||
|
||||
`Crew` is the right primitive when you have a single team of agents executing one workflow. Once you need branching, multiple crews, or persistent state across runs, reach for `Flow`.
|
||||
|
||||
### When to use Flows vs standalone Crews
|
||||
|
||||
| Situation | Use |
|
||||
| --- | --- |
|
||||
| Single team, single linear/hierarchical workflow | `Crew` |
|
||||
| Conditional branches, retries, routing on results | `Flow` |
|
||||
| Multiple specialized crews chained together | `Flow` |
|
||||
| State that must persist between steps or runs | `Flow` (with checkpointing) |
|
||||
| You want typed, IDE-friendly state | `Flow[MyState]` with a Pydantic model |
|
||||
|
||||
If you only need one of: branching, multi-crew, or persistent state — start with a `Flow`. The boilerplate is small and you won't have to rewrite later.
|
||||
|
||||
### Step-by-step migration
|
||||
|
||||
**Before — standalone crew:**
|
||||
|
||||
```python
|
||||
from crewai import Crew
|
||||
|
||||
crew = Crew(agents=[researcher, writer], tasks=[research_task, write_task])
|
||||
result = crew.kickoff(inputs={"topic": "vector databases"})
|
||||
print(result)
|
||||
```
|
||||
|
||||
**After — crew inside a typed Flow:**
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from pydantic import BaseModel
|
||||
|
||||
class MyState(BaseModel):
|
||||
input_data: str = ""
|
||||
result: str = ""
|
||||
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def run_crew(self):
|
||||
result = MyCrew().crew().kickoff(inputs={"topic": self.state.input_data})
|
||||
self.state.result = str(result)
|
||||
return self.state.result
|
||||
|
||||
flow = MyFlow()
|
||||
flow.kickoff(inputs={"input_data": "vector databases"})
|
||||
```
|
||||
|
||||
What changed:
|
||||
|
||||
1. The crew is constructed inside a method, not at module load.
|
||||
2. Inputs flow through `self.state` instead of being threaded as kwargs.
|
||||
3. The entry point is marked with `@start()`. Subsequent steps use `@listen(run_crew)` to chain.
|
||||
|
||||
### Structured state setup
|
||||
|
||||
Prefer typed state (`Flow[MyState]`) over the untyped dict variant. You get autocompletion, validation at the boundary, and serializable state for checkpointing:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class ResearchState(BaseModel):
|
||||
topic: str = ""
|
||||
sources: list[str] = Field(default_factory=list)
|
||||
draft: str = ""
|
||||
final: str = ""
|
||||
```
|
||||
|
||||
Untyped state (`Flow()` with no generic) still works, but you lose static checks and checkpointing fidelity.
|
||||
|
||||
### Multi-crew Flow pattern
|
||||
|
||||
Chaining two crews — research, then writing — is the canonical reason to adopt Flows:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen, router
|
||||
from pydantic import BaseModel
|
||||
|
||||
class PipelineState(BaseModel):
|
||||
topic: str = ""
|
||||
research: str = ""
|
||||
article: str = ""
|
||||
|
||||
class ContentPipeline(Flow[PipelineState]):
|
||||
@start()
|
||||
def research(self):
|
||||
out = ResearchCrew().crew().kickoff(inputs={"topic": self.state.topic})
|
||||
self.state.research = str(out)
|
||||
return self.state.research
|
||||
|
||||
@router(research)
|
||||
def gate(self):
|
||||
return "write" if len(self.state.research) > 200 else "abort"
|
||||
|
||||
@listen("write")
|
||||
def write(self):
|
||||
out = WritingCrew().crew().kickoff(
|
||||
inputs={"topic": self.state.topic, "notes": self.state.research}
|
||||
)
|
||||
self.state.article = str(out)
|
||||
return self.state.article
|
||||
|
||||
@listen("abort")
|
||||
def bail(self):
|
||||
self.state.article = "Insufficient research."
|
||||
return self.state.article
|
||||
|
||||
ContentPipeline().kickoff(inputs={"topic": "vector databases"})
|
||||
```
|
||||
|
||||
`@start()`, `@listen()`, and `@router()` are the three decorators you'll use 95% of the time. See [Flows](/en/concepts/flows) for the full reference.
|
||||
|
||||
---
|
||||
|
||||
## Common Gotchas
|
||||
|
||||
1. **Running `crewai install` and expecting an upgrade.** `crewai install` syncs against the existing `uv.lock`. To bump versions, run `uv add "crewai[tools]>=X.Y.Z"` first, then `crewai install`.
|
||||
2. **The constraint is a floor, not a pin.** `crewai>=1.11.1` means "any version at or above 1.11.1." `uv` only re-resolves when you explicitly run `uv add` or `uv lock --upgrade-package crewai`.
|
||||
3. **Extras dropped during re-lock.** If you run `uv add "crewai>=1.14.4"` without extras, `uv` may drop `[tools]` from the resolved set. Always include the extras you need: `uv add "crewai[tools]>=1.14.4"`.
|
||||
4. **Forgetting to commit `uv.lock`.** After bumping with `uv add`, commit the updated `uv.lock` so teammates get the same versions.
|
||||
5. **`pip install` instead of `uv tool install`.** Mixing pip-installed and uv-installed `crewai` leads to two binaries on `PATH` and confusing version skew. Pick one — the supported one is `uv`.
|
||||
6. **Passing a Pydantic instance to `output_pydantic`.** It expects the class. `output_pydantic=Article`, not `output_pydantic=Article(...)`.
|
||||
7. **Hierarchical process with no manager.** `process=Process.hierarchical` requires `manager_llm=` or `manager_agent=`.
|
||||
8. **Memory enabled with the wrong embedder.** Switching embedders without clearing the on-disk memory directory causes dimension mismatches. Delete the project's memory store after changing providers.
|
||||
9. **Dict state when you wanted typed state.** `Flow()` with no generic gives you a dict. For type checking and clean checkpointing, use `Flow[MyState]` with a `BaseModel`.
|
||||
10. **Stale tool imports.** `from crewai_tools import BaseTool` works in some versions but is not the canonical path. Standardize on `from crewai.tools import BaseTool, tool`.
|
||||
11. **Python version drift.** CrewAI requires `>=3.10, <3.14`. `uv` will happily build a venv against 3.14+ if it's the default; pin the Python version in `pyproject.toml`.
|
||||
12. **`verbose=2` and similar integer flags.** `verbose` is a `bool`. Use event listeners for finer-grained logging.
|
||||
13. **Calling `crew.kickoff()` from inside a Flow without wrapping in `inputs={}`.** Flows pass state, not kwargs. The crew still expects `inputs={...}`.
|
||||
|
||||
---
|
||||
|
||||
## Checkpointing
|
||||
|
||||
Checkpointing is a newer addition that persists agent, crew, and flow state between runs. It lets long-running workflows resume after a crash, a manual stop, or a deploy.
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
checkpoint=True,
|
||||
)
|
||||
```
|
||||
|
||||
The same flag is supported on `Flow` and `Agent`. State is written to the project's local store and replayed on the next `kickoff()` with the same identifier.
|
||||
|
||||
<Note>
|
||||
Checkpointing is in early release. APIs around resume semantics, storage backends, and identifiers may still shift between minor versions — pin your `crewai` version if you depend on it in production.
|
||||
</Note>
|
||||
|
||||
See [Checkpointing](/en/concepts/checkpointing) for the full feature reference.
|
||||
|
||||
---
|
||||
|
||||
## Getting Help
|
||||
|
||||
- **Changelog** — every breaking change is noted in the [release notes](/en/changelog).
|
||||
- **GitHub Issues** — open one at [github.com/crewAIInc/crewAI/issues](https://github.com/crewAIInc/crewAI/issues) with a minimal repro and your `crewai --version` output.
|
||||
- **Discord** — the CrewAI community Discord is the fastest path to debugging help: [community.crewai.com](https://community.crewai.com).
|
||||
- **Migration guides** — if you're moving from another framework, start at [Migrating from LangGraph](/en/guides/migration/migrating-from-langgraph).
|
||||
@@ -106,6 +106,9 @@ If you haven't installed `uv` yet, follow **step 1** to quickly get it set up on
|
||||
```shell
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
<Note>
|
||||
This upgrades the **global `crewai` CLI tool** only. To upgrade the `crewai` version inside your project's virtual environment, see [Upgrading CrewAI in a project](/en/guides/migration/upgrading-crewai).
|
||||
</Note>
|
||||
<Check>Installation successful! You're ready to create your first crew! 🎉</Check>
|
||||
</Step>
|
||||
|
||||
|
||||
180
docs/en/tools/ai-ml/daytona.mdx
Normal file
180
docs/en/tools/ai-ml/daytona.mdx
Normal file
@@ -0,0 +1,180 @@
|
||||
---
|
||||
title: Daytona Sandbox Tools
|
||||
description: Run shell commands, execute Python, and manage files inside isolated [Daytona](https://www.daytona.io/) sandboxes.
|
||||
icon: box
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
# Daytona Sandbox Tools
|
||||
|
||||
## Description
|
||||
|
||||
The Daytona sandbox tools give CrewAI agents access to isolated, ephemeral compute environments powered by [Daytona](https://www.daytona.io/). Three tools are available so you can give an agent exactly the capabilities it needs:
|
||||
|
||||
- **`DaytonaExecTool`** — run any shell command inside a sandbox.
|
||||
- **`DaytonaPythonTool`** — execute a block of Python source code inside a sandbox.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox.
|
||||
|
||||
All three tools share the same sandbox lifecycle controls, so you can mix and match them while keeping state in a single persistent sandbox.
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
uv add "crewai-tools[daytona]"
|
||||
# or
|
||||
pip install "crewai-tools[daytona]"
|
||||
```
|
||||
|
||||
Set your API key:
|
||||
|
||||
```shell
|
||||
export DAYTONA_API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
`DAYTONA_API_URL` and `DAYTONA_TARGET` are also respected if set.
|
||||
|
||||
## Sandbox Lifecycle
|
||||
|
||||
All three tools inherit lifecycle controls from `DaytonaBaseTool`:
|
||||
|
||||
| Mode | How to enable | Sandbox created | Sandbox deleted |
|
||||
|------|--------------|-----------------|-----------------|
|
||||
| **Ephemeral** (default) | `persistent=False` (default) | On every `_run` call | At the end of that same call |
|
||||
| **Persistent** | `persistent=True` | Lazily on first use | At process exit (via `atexit`), or manually via `tool.close()` |
|
||||
| **Attach** | `sandbox_id="<id>"` | Never — attaches to an existing sandbox | Never — the tool will not delete a sandbox it did not create |
|
||||
|
||||
Ephemeral mode is the safe default: nothing leaks if the agent forgets to clean up. Use persistent mode when you want filesystem state or installed packages to carry across multiple tool calls — this is typical when pairing `DaytonaFileTool` with `DaytonaExecTool`.
|
||||
|
||||
## Examples
|
||||
|
||||
### One-shot Python execution (ephemeral)
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaPythonTool
|
||||
|
||||
tool = DaytonaPythonTool()
|
||||
result = tool.run(code="print(sum(range(10)))")
|
||||
print(result)
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": None}
|
||||
```
|
||||
|
||||
### Multi-step shell session (persistent)
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool, DaytonaFileTool
|
||||
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
# Install a package, then write and run a script — all in the same sandbox
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool.run(action="write", path="/workspace/fetch.py", content="import httpx; print(httpx.get('https://httpbin.org/get').status_code)")
|
||||
exec_tool.run(command="python /workspace/fetch.py")
|
||||
```
|
||||
|
||||
<Note>
|
||||
Each tool instance maintains its own persistent sandbox. To share **one** sandbox across two tools, create the first tool, grab its sandbox id via `tool._persistent_sandbox.id`, and pass it to the second tool via `sandbox_id=...`.
|
||||
</Note>
|
||||
|
||||
### Attach to an existing sandbox
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(sandbox_id="my-long-lived-sandbox")
|
||||
result = tool.run(command="ls /workspace")
|
||||
```
|
||||
|
||||
### Custom sandbox parameters
|
||||
|
||||
Pass Daytona's `CreateSandboxFromSnapshotParams` kwargs via `create_params`:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(
|
||||
persistent=True,
|
||||
create_params={
|
||||
"language": "python",
|
||||
"env_vars": {"MY_FLAG": "1"},
|
||||
"labels": {"owner": "crewai-agent"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
### Agent integration
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import DaytonaExecTool, DaytonaPythonTool, DaytonaFileTool
|
||||
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
python_tool = DaytonaPythonTool(persistent=True)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
coder = Agent(
|
||||
role="Sandbox Engineer",
|
||||
goal="Write and run code in an isolated environment",
|
||||
backstory="An engineer who uses Daytona sandboxes to safely execute code and manage files.",
|
||||
tools=[exec_tool, python_tool, file_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to /workspace/fib.py, and run it.",
|
||||
expected_output="The first 10 Fibonacci numbers printed to stdout.",
|
||||
agent=coder,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[coder], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
### Shared (`DaytonaBaseTool`)
|
||||
|
||||
All three tools accept these parameters at initialization:
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `api_key` | `str \| None` | `$DAYTONA_API_KEY` | Daytona API key. Falls back to the `DAYTONA_API_KEY` env var. |
|
||||
| `api_url` | `str \| None` | `$DAYTONA_API_URL` | Daytona API URL override. |
|
||||
| `target` | `str \| None` | `$DAYTONA_TARGET` | Daytona target region. |
|
||||
| `persistent` | `bool` | `False` | Reuse one sandbox across all calls and delete it at process exit. |
|
||||
| `sandbox_id` | `str \| None` | `None` | Attach to an existing sandbox by id or name. |
|
||||
| `create_params` | `dict \| None` | `None` | Extra kwargs forwarded to `CreateSandboxFromSnapshotParams` (e.g. `language`, `env_vars`, `labels`). |
|
||||
| `sandbox_timeout` | `float` | `60.0` | Timeout in seconds for sandbox create/delete operations. |
|
||||
|
||||
### `DaytonaExecTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `command` | `str` | ✓ | Shell command to execute. |
|
||||
| `cwd` | `str \| None` | | Working directory inside the sandbox. |
|
||||
| `env` | `dict[str, str] \| None` | | Extra environment variables for this command. |
|
||||
| `timeout` | `int \| None` | | Maximum seconds to wait for the command. |
|
||||
|
||||
### `DaytonaPythonTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `code` | `str` | ✓ | Python source code to execute. |
|
||||
| `argv` | `list[str] \| None` | | Argument vector forwarded via `CodeRunParams`. |
|
||||
| `env` | `dict[str, str] \| None` | | Environment variables forwarded via `CodeRunParams`. |
|
||||
| `timeout` | `int \| None` | | Maximum seconds to wait for execution. |
|
||||
|
||||
### `DaytonaFileTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`. |
|
||||
| `path` | `str` | ✓ | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | | Content to write or append. Required for `append`. |
|
||||
| `binary` | `bool` | | If `True`, `content` is base64 on write; returns base64 on read. |
|
||||
| `recursive` | `bool` | | For `delete`: remove directories recursively. |
|
||||
| `mode` | `str` | | For `mkdir`: octal permission string (default `"0755"`). |
|
||||
|
||||
<Tip>
|
||||
For files larger than a few KB, create the file first with `action="write"` and empty content, then send the body via multiple `action="append"` calls of ~4 KB each to stay within tool-call payload limits.
|
||||
</Tip>
|
||||
196
docs/en/tools/ai-ml/e2bsandboxtools.mdx
Normal file
196
docs/en/tools/ai-ml/e2bsandboxtools.mdx
Normal file
@@ -0,0 +1,196 @@
|
||||
---
|
||||
title: E2B Sandbox Tools
|
||||
description: The `E2BExecTool`, `E2BPythonTool`, and `E2BFileTool` give CrewAI agents shell, Python, and filesystem access inside isolated, ephemeral E2B remote sandboxes.
|
||||
icon: box
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
# E2B Sandbox Tools
|
||||
|
||||
## Description
|
||||
|
||||
The E2B sandbox tools let CrewAI agents run code in isolated, ephemeral VMs hosted by [E2B](https://e2b.dev). Three tools share a common base class and connection model:
|
||||
|
||||
- `E2BExecTool` — execute shell commands.
|
||||
- `E2BPythonTool` — execute Python in a Jupyter-style code interpreter (returns stdout, stderr, and rich results such as charts, dataframes, HTML, SVG, and PNG).
|
||||
- `E2BFileTool` — perform filesystem operations (read, write, append, list, delete, mkdir, info, exists), including binary content via base64.
|
||||
|
||||
Use these tools when you want to give an agent the ability to run arbitrary code or perform file operations without exposing the host environment.
|
||||
|
||||
## Installation
|
||||
|
||||
Install the `e2b` extra for `crewai-tools` and set your E2B API key:
|
||||
|
||||
```shell
|
||||
uv add "crewai-tools[e2b]"
|
||||
```
|
||||
|
||||
```shell
|
||||
export E2B_API_KEY="e2b_..."
|
||||
```
|
||||
|
||||
## Tools
|
||||
|
||||
### `E2BExecTool`
|
||||
|
||||
Runs shell commands inside the sandbox via `sandbox.commands.run`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `command: str` — Required. The shell command to execute.
|
||||
- `cwd: str | None` — Optional. Working directory for the command.
|
||||
- `envs: dict[str, str] | None` — Optional. Per-call environment variables.
|
||||
- `timeout: float | None` — Optional. Timeout in seconds.
|
||||
|
||||
**Returns**
|
||||
|
||||
```json
|
||||
{
|
||||
"exit_code": 0,
|
||||
"stdout": "...",
|
||||
"stderr": "...",
|
||||
"error": null
|
||||
}
|
||||
```
|
||||
|
||||
### `E2BPythonTool`
|
||||
|
||||
Runs Python code in a Jupyter-style code interpreter using the `e2b_code_interpreter` SDK.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `code: str` — Required. The code to execute.
|
||||
- `language: str | None` — Optional. Language identifier (defaults to Python).
|
||||
- `envs: dict[str, str] | None` — Optional. Per-call environment variables.
|
||||
- `timeout: float | None` — Optional. Timeout in seconds.
|
||||
|
||||
**Returns**
|
||||
|
||||
```json
|
||||
{
|
||||
"text": "...",
|
||||
"stdout": "...",
|
||||
"stderr": "...",
|
||||
"error": null,
|
||||
"results": [],
|
||||
"execution_count": 1
|
||||
}
|
||||
```
|
||||
|
||||
`results` can include charts, dataframes, HTML, SVG, and PNG output produced by the cell.
|
||||
|
||||
### `E2BFileTool`
|
||||
|
||||
Performs filesystem operations inside the sandbox. Auto-creates parent directories on write and handles binary content via base64.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `action: "read" | "write" | "append" | "list" | "delete" | "mkdir" | "info" | "exists"` — Required.
|
||||
- `path: str` — Required. Target path inside the sandbox.
|
||||
- `content: str | None` — Optional. Content for `write` / `append`. Base64-encoded when `binary=True`.
|
||||
- `binary: bool` — Optional. Treat `content` as binary (base64). Default `False`.
|
||||
- `depth: int` — Optional. Recursion depth for `list`.
|
||||
|
||||
## Shared parameters (`E2BBaseTool`)
|
||||
|
||||
All three tools accept the same connection / lifecycle parameters:
|
||||
|
||||
- `api_key: SecretStr | None` — Falls back to the `E2B_API_KEY` environment variable.
|
||||
- `domain: str | None` — Falls back to the `E2B_DOMAIN` environment variable.
|
||||
- `template: str | None` — Custom sandbox template or snapshot.
|
||||
- `persistent: bool` — Default `False`. See [Sandbox modes](#sandbox-modes).
|
||||
- `sandbox_id: str | None` — Attach to an existing sandbox.
|
||||
- `sandbox_timeout: int` — Idle timeout in seconds. Default `300`.
|
||||
- `envs: dict[str, str] | None` — Environment variables injected at sandbox creation.
|
||||
- `metadata: dict[str, str] | None` — Metadata attached at sandbox creation.
|
||||
|
||||
## Sandbox modes
|
||||
|
||||
| Mode | How to activate | Sandbox lifetime |
|
||||
| --- | --- | --- |
|
||||
| Ephemeral (default) | `persistent=False` | A new sandbox is created and killed for every `_run` call. |
|
||||
| Persistent | `persistent=True` | A sandbox is lazily created on the first call and killed at process exit via `atexit`. |
|
||||
| Attach | `sandbox_id="sbx_..."` | The tool attaches to an existing sandbox and never kills it. |
|
||||
|
||||
Use ephemeral mode for one-off tasks — it minimizes blast radius. Use persistent mode when an agent needs to keep state across multiple tool calls (e.g. a shell session plus filesystem ops on the same files). Use attach mode when an outside system manages the sandbox lifecycle.
|
||||
|
||||
## Examples
|
||||
|
||||
### One-shot Python (ephemeral)
|
||||
|
||||
```python Code
|
||||
from crewai_tools import E2BPythonTool
|
||||
|
||||
tool = E2BPythonTool()
|
||||
result = tool.run(code="print(sum(range(10)))")
|
||||
```
|
||||
|
||||
### Persistent shell + filesystem session
|
||||
|
||||
```python Code
|
||||
from crewai_tools import E2BExecTool, E2BFileTool
|
||||
|
||||
exec_tool = E2BExecTool(persistent=True)
|
||||
file_tool = E2BFileTool(persistent=True)
|
||||
```
|
||||
|
||||
When the process exits, both tools clean up the sandbox via `atexit`.
|
||||
|
||||
### Attach to an existing sandbox
|
||||
|
||||
```python Code
|
||||
from crewai_tools import E2BExecTool
|
||||
|
||||
tool = E2BExecTool(sandbox_id="sbx_...")
|
||||
```
|
||||
|
||||
The tool will not kill a sandbox it attached to.
|
||||
|
||||
### Custom template, timeout, env vars, and metadata
|
||||
|
||||
```python Code
|
||||
from crewai_tools import E2BExecTool
|
||||
|
||||
tool = E2BExecTool(
|
||||
persistent=True,
|
||||
template="my-custom-template",
|
||||
sandbox_timeout=600,
|
||||
envs={"MY_FLAG": "1"},
|
||||
metadata={"owner": "crewai-agent"},
|
||||
)
|
||||
```
|
||||
|
||||
### Full agent example
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai_tools import E2BPythonTool
|
||||
|
||||
python_tool = E2BPythonTool()
|
||||
|
||||
analyst = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Run Python in a sandbox to answer analytical questions",
|
||||
backstory="An analyst who delegates computation to an isolated E2B sandbox.",
|
||||
tools=[python_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Compute the mean of [1, 2, 3, 4, 5] and return the result.",
|
||||
expected_output="The numerical mean.",
|
||||
agent=analyst,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[analyst], tasks=[task], process=Process.sequential)
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Security considerations
|
||||
|
||||
These tools give agents arbitrary shell, Python, and filesystem access inside the sandbox. The sandbox isolates execution from your host, but you should still treat tool output as untrusted and design with prompt-injection in mind:
|
||||
|
||||
- Ephemeral mode is the primary blast-radius control — every `_run` call gets a fresh VM. Prefer it unless persistent state is required.
|
||||
- Persistent and attached sandboxes accumulate state across calls. Anything seeded into them (credentials, tokens, files) is reachable by every subsequent tool invocation, including ones whose inputs were influenced by untrusted content.
|
||||
- Avoid injecting secrets into long-lived sandboxes that an agent can read or exfiltrate. Use short-lived credentials and the smallest scope necessary.
|
||||
- `sandbox_timeout` bounds idle time but does not cap total execution. Set it to the smallest value that fits your workload.
|
||||
@@ -1,11 +1,11 @@
|
||||
---
|
||||
title: "Exa Search Tool"
|
||||
description: "Search the web using the Exa Search API to find the most relevant results for any query, with options for full page content, highlights, and summaries."
|
||||
description: "Search the web with Exa, the fastest and most accurate web search API. Get token-efficient highlights and full page content."
|
||||
icon: "magnifying-glass"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
The `EXASearchTool` lets CrewAI agents search the web using the [Exa](https://exa.ai/) search API. It returns the most relevant results for any query, with options for full page content and AI-generated summaries.
|
||||
The `ExaSearchTool` lets CrewAI agents search the web using [Exa](https://exa.ai/), the fastest and most accurate web search API. It returns the most relevant results for any query, with options for token-efficient highlights and full page content.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -27,15 +27,15 @@ Get an API key from the [Exa dashboard](https://dashboard.exa.ai/api-keys).
|
||||
|
||||
## Example Usage
|
||||
|
||||
Here's how to use the `EXASearchTool` within a CrewAI agent:
|
||||
Here's how to use the `ExaSearchTool` within a CrewAI agent:
|
||||
|
||||
```python
|
||||
import os
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import EXASearchTool
|
||||
from crewai_tools import ExaSearchTool
|
||||
|
||||
# Initialize the tool
|
||||
exa_tool = EXASearchTool()
|
||||
exa_tool = ExaSearchTool()
|
||||
|
||||
# Create an agent that uses the tool
|
||||
researcher = Agent(
|
||||
@@ -66,11 +66,11 @@ print(result)
|
||||
|
||||
## Configuration Options
|
||||
|
||||
The `EXASearchTool` accepts the following parameters during initialization:
|
||||
The `ExaSearchTool` accepts the following parameters during initialization:
|
||||
|
||||
- `type` (str, optional): The search type to use. Defaults to `"auto"`. Options: `"auto"`, `"instant"`, `"fast"`, `"deep"`.
|
||||
- `highlights` (bool or dict, optional): Return token-efficient excerpts most relevant to the query instead of the full page. Defaults to `True`. Pass a dict like `{"max_characters": 4000}` to configure, or `False` to disable.
|
||||
- `content` (bool, optional): Whether to include full page content in results. Defaults to `False`.
|
||||
- `summary` (bool, optional): Whether to include AI-generated summaries of each result. Requires `content=True`. Defaults to `False`.
|
||||
- `api_key` (str, optional): Your Exa API key. Falls back to the `EXA_API_KEY` environment variable if not provided.
|
||||
- `base_url` (str, optional): Custom API server URL. Falls back to the `EXA_BASE_URL` environment variable if not provided.
|
||||
|
||||
@@ -83,28 +83,70 @@ When calling the tool (or when an agent invokes it), the following search parame
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
You can configure the tool with custom parameters for richer results:
|
||||
For most agent workflows we recommend `highlights` — it returns the most relevant excerpts from each result and uses far fewer tokens than full page content:
|
||||
|
||||
```python
|
||||
# Get full page content with AI summaries
|
||||
exa_tool = EXASearchTool(
|
||||
content=True,
|
||||
summary=True,
|
||||
type="deep"
|
||||
# Get token-efficient excerpts most relevant to the query
|
||||
exa_tool = ExaSearchTool(
|
||||
highlights=True,
|
||||
type="auto",
|
||||
)
|
||||
|
||||
# Use it in an agent
|
||||
agent = Agent(
|
||||
role="Deep Researcher",
|
||||
goal="Conduct thorough research with full content and summaries",
|
||||
role="Researcher",
|
||||
goal="Answer questions with current web data",
|
||||
tools=[exa_tool]
|
||||
)
|
||||
```
|
||||
|
||||
For thorough, multi-step searches, use `type="deep"`:
|
||||
|
||||
```python
|
||||
exa_tool = ExaSearchTool(
|
||||
highlights=True,
|
||||
type="deep",
|
||||
)
|
||||
```
|
||||
|
||||
For more on choosing between highlights and full content, see the [Exa search best practices](https://exa.ai/docs/reference/search-best-practices).
|
||||
|
||||
## Using Exa via MCP
|
||||
|
||||
You can also connect your agent to Exa's hosted MCP server. Pass your API key with the `x-api-key` header:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from crewai.mcp import MCPServerHTTP
|
||||
|
||||
agent = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Find and analyze information on the web",
|
||||
backstory="Expert researcher with access to Exa's tools",
|
||||
mcps=[
|
||||
MCPServerHTTP(
|
||||
url="https://mcp.exa.ai/mcp",
|
||||
headers={"x-api-key": "YOUR_EXA_API_KEY"},
|
||||
),
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
Get your API key from the [Exa dashboard](https://dashboard.exa.ai/api-keys). For more on MCP in CrewAI, see the [MCP overview](/en/mcp/overview).
|
||||
|
||||
## Features
|
||||
|
||||
- **Token-Efficient Highlights**: Get the most relevant excerpts from each result, ~10x fewer tokens than full text
|
||||
- **Semantic Search**: Find results based on meaning, not just keywords
|
||||
- **Full Content Retrieval**: Get the full text of web pages alongside search results
|
||||
- **AI Summaries**: Get concise, AI-generated summaries of each result
|
||||
- **Date Filtering**: Limit results to specific time periods with published date filters
|
||||
- **Domain Filtering**: Restrict searches to specific domains
|
||||
|
||||
<Note>
|
||||
`EXASearchTool` is a deprecated alias for `ExaSearchTool`. Existing imports continue to work but will emit a deprecation warning; please migrate to `ExaSearchTool`.
|
||||
</Note>
|
||||
|
||||
## Resources
|
||||
|
||||
- [Exa documentation](https://exa.ai/docs)
|
||||
- [Exa dashboard — manage API keys and usage](https://dashboard.exa.ai)
|
||||
|
||||
@@ -35,7 +35,7 @@ info:
|
||||
|
||||
1. **Discover inputs** using `GET /inputs`
|
||||
2. **Start execution** using `POST /kickoff`
|
||||
3. **Monitor progress** using `GET /{kickoff_id}/status`
|
||||
3. **Monitor progress** using `GET /status/{kickoff_id}`
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: CrewAI Support
|
||||
@@ -207,7 +207,7 @@ paths:
|
||||
"500":
|
||||
$ref: "#/components/responses/ServerError"
|
||||
|
||||
/{kickoff_id}/status:
|
||||
/status/{kickoff_id}:
|
||||
get:
|
||||
summary: Get Execution Status
|
||||
description: |
|
||||
|
||||
@@ -35,7 +35,7 @@ info:
|
||||
|
||||
1. **Discover inputs** using `GET /inputs`
|
||||
2. **Start execution** using `POST /kickoff`
|
||||
3. **Monitor progress** using `GET /{kickoff_id}/status`
|
||||
3. **Monitor progress** using `GET /status/{kickoff_id}`
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: CrewAI Support
|
||||
@@ -207,7 +207,7 @@ paths:
|
||||
"500":
|
||||
$ref: "#/components/responses/ServerError"
|
||||
|
||||
/{kickoff_id}/status:
|
||||
/status/{kickoff_id}:
|
||||
get:
|
||||
summary: Get Execution Status
|
||||
description: |
|
||||
|
||||
@@ -84,7 +84,7 @@ paths:
|
||||
'500':
|
||||
$ref: '#/components/responses/ServerError'
|
||||
|
||||
/{kickoff_id}/status:
|
||||
/status/{kickoff_id}:
|
||||
get:
|
||||
summary: 실행 상태 조회
|
||||
description: |
|
||||
|
||||
@@ -35,7 +35,7 @@ info:
|
||||
|
||||
1. **Descubra os inputs** usando `GET /inputs`
|
||||
2. **Inicie a execução** usando `POST /kickoff`
|
||||
3. **Monitore o progresso** usando `GET /{kickoff_id}/status`
|
||||
3. **Monitore o progresso** usando `GET /status/{kickoff_id}`
|
||||
version: 1.0.0
|
||||
contact:
|
||||
name: CrewAI Suporte
|
||||
@@ -120,7 +120,7 @@ paths:
|
||||
"500":
|
||||
$ref: "#/components/responses/ServerError"
|
||||
|
||||
/{kickoff_id}/status:
|
||||
/status/{kickoff_id}:
|
||||
get:
|
||||
summary: Obter Status da Execução
|
||||
description: |
|
||||
|
||||
@@ -26,7 +26,7 @@ CrewAI 엔터프라이즈 API 참고 자료에 오신 것을 환영합니다.
|
||||
</Step>
|
||||
|
||||
<Step title="진행 상황 모니터링">
|
||||
`GET /{kickoff_id}/status`를 사용하여 실행 상태를 확인하고 결과를 조회하세요.
|
||||
`GET /status/{kickoff_id}`를 사용하여 실행 상태를 확인하고 결과를 조회하세요.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
@@ -65,7 +65,7 @@ https://your-crew-name.crewai.com
|
||||
|
||||
1. **탐색**: `GET /inputs`를 호출하여 crew가 필요한 것을 파악합니다.
|
||||
2. **실행**: `POST /kickoff`를 통해 입력값을 제출하여 처리를 시작합니다.
|
||||
3. **모니터링**: 완료될 때까지 `GET /{kickoff_id}/status`를 주기적으로 조회합니다.
|
||||
3. **모니터링**: 완료될 때까지 `GET /status/{kickoff_id}`를 주기적으로 조회합니다.
|
||||
4. **결과**: 완료된 응답에서 최종 출력을 추출합니다.
|
||||
|
||||
## 오류 처리
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "GET /{kickoff_id}/status"
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "실행 상태 조회"
|
||||
openapi: "/enterprise-api.ko.yaml GET /{kickoff_id}/status"
|
||||
openapi: "/enterprise-api.ko.yaml GET /status/{kickoff_id}"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
@@ -4,6 +4,152 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="2026년 5월 7일">
|
||||
## v1.14.5a3
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a3)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 버그 수정
|
||||
- 상태 엔드포인트 경로를 /{kickoff_id}/status에서 /status/{kickoff_id}로 수정
|
||||
- 보안 준수를 위해 gitpython 의존성을 버전 >=3.1.47로 업데이트
|
||||
|
||||
### 리팩토링
|
||||
- CLI를 독립형 crewai-cli 패키지로 분리
|
||||
|
||||
### 문서
|
||||
- v1.14.5a2에 대한 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@greysonlalonde, @iris-clawd
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 5월 4일">
|
||||
## v1.14.5a2
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a2)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 버그 수정
|
||||
- finally 블록에서 작업 출력 복원 수정
|
||||
- 완료 토큰에 `thoughts_token_count` 포함
|
||||
- 비동기 배치 플러시 간 작업 출력 보존
|
||||
- `CrewAIRagAdapter`의 로더 호출에 kwargs 전달
|
||||
- `result_as_answer`가 후크 차단 메시지를 최종 답변으로 반환하지 않도록 방지
|
||||
- `result_as_answer`가 오류를 최종 답변으로 반환하지 않도록 방지
|
||||
- 비동기 경로에서 출력 변환을 위해 `acall` 사용
|
||||
- 에이전트 간 공유 LLM 중지 단어 변형 방지
|
||||
- `convert_to_model`에서 `BaseModel` 입력 처리
|
||||
|
||||
### 문서화
|
||||
- 추가 환경 변수 문서화
|
||||
- v1.14.5a1에 대한 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@NIK-TIGER-BILL, @greysonlalonde, @lorenzejay, @minasami-pr, @theCyberTech, @wishhyt
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 5월 1일">
|
||||
## v1.14.5a1
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a1)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- `restore_from_state_id` 시작 매개변수 추가
|
||||
- ExaSearchTool에 하이라이트 추가 및 EXASearchTool에서 이름 변경
|
||||
|
||||
### 버그 수정
|
||||
- 릴리스 흐름에서 crewai 핀 사이트 누락 수정
|
||||
- 트레이스를 위한 기술 로딩 이벤트 보장
|
||||
|
||||
### 문서
|
||||
- v1.14.4에 대한 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@akaKuruma, @github-actions[bot], @greysonlalonde, @lorenzejay, @theishangoswami
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 5월 1일">
|
||||
## v1.14.4
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.4)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- @persist에서 사용자 정의 지속성 키 지원 추가
|
||||
- Azure OpenAI 공급자를 위한 응답 API 지원 추가
|
||||
- Azure AI 추론 클라이언트에 credential_scopes 전달
|
||||
- Vertex AI 작업 부하 신원 설정 가이드 추가
|
||||
- Tavily Research 및 Research 가져오기 추가
|
||||
- 검색, 연구 및 콘텐츠 추출을 위한 You.com MCP 도구 추가
|
||||
|
||||
### 버그 수정
|
||||
- JSON 정규 표현식이 유효한 JSON이 아닐 때의 fall through 수정
|
||||
- 응답에 텍스트가 포함될 때 tool_calls를 보존하도록 수정
|
||||
- instructor.from_provider에 base_url 및 api_key를 전달하도록 수정
|
||||
- 기본 MCP 서버가 도구를 반환하지 않을 때 경고하고 빈 값을 반환하도록 수정
|
||||
- 비스트리밍 핸들러에서 검증된 메시지 변수를 사용하도록 수정
|
||||
- LLM 실패에 대한 크루 채팅 설명 도우미를 보호하도록 수정
|
||||
- 호출 간 메시지 및 반복을 재설정하도록 수정
|
||||
- replay 및 test를 통해 훈련된 에이전트 파일을 전달하도록 수정
|
||||
- 추론 시 사용자 정의 훈련된 에이전트 파일을 존중하도록 수정
|
||||
- 다중 모드 input_files에 대해 작업 전용 에이전트를 크루에 바인딩하도록 수정
|
||||
- JSON 체크포인팅을 위해 가드레일 호출 가능 항목을 null로 직렬화하도록 수정
|
||||
- 자기 참조 라우터를 피하기 위해 force_final_answer의 이름 변경 수정
|
||||
- SSTI 수정을 위한 litellm 버전 증가; 수정할 수 없는 pip CVE 무시
|
||||
|
||||
### 문서
|
||||
- v1.14.4a1에 대한 변경 로그 및 버전 업데이트
|
||||
- E2B 샌드박스 도구 페이지 추가
|
||||
- Daytona 샌드박스 도구 문서 추가
|
||||
|
||||
## 기여자
|
||||
|
||||
@EdwardIrby, @dependabot[bot], @factory-droid-oss, @factory-droid[bot], @greysonlalonde, @kunalk16, @lorenzejay, @lucasgomide, @manisrinivasan2k1, @mattatcha, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 4월 29일">
|
||||
## v1.14.4a1
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.4a1)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 버그 수정
|
||||
- LLM 실패에 대한 크루 채팅 설명 도우미 수정.
|
||||
- 실행기에서 호출 간 메시지 및 반복 초기화.
|
||||
- CLI에서 재생 및 테스트를 통해 훈련된 에이전트 파일 전달.
|
||||
- 에이전트에서 추론 시 사용자 정의 훈련된 에이전트 파일 존중.
|
||||
- 다중 모드 입력 파일이 LLM에 도달하도록 작업 전용 에이전트를 크루에 바인딩.
|
||||
- JSON 체크포인트를 위해 가드레일 호출 가능 항목을 null로 직렬화.
|
||||
- 자기 참조 라우터를 피하기 위해 agent_executor에서 `force_final_answer` 이름 변경.
|
||||
- SSTI 수정을 위한 `litellm` 버전 증가 및 수정 불가능한 pip CVE 무시.
|
||||
|
||||
### 문서
|
||||
- E2B 샌드박스 도구 페이지 추가.
|
||||
- Daytona 샌드박스 도구 문서 추가.
|
||||
- Vertex AI 작업 부하 신원 설정 가이드 추가.
|
||||
- 검색, 연구 및 콘텐츠 추출을 위한 You.com MCP 도구 추가.
|
||||
- v1.14.3에 대한 변경 로그 및 버전 업데이트.
|
||||
|
||||
## 기여자
|
||||
|
||||
@EdwardIrby, @dependabot[bot], @factory-droid-oss, @factory-droid[bot], @greysonlalonde, @lorenzejay, @manisrinivasan2k1, @mattatcha
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 4월 25일">
|
||||
## v1.14.3
|
||||
|
||||
|
||||
@@ -373,6 +373,42 @@ class AnotherFlow(Flow[dict]):
|
||||
print("Method-level persisted runs:", self.state["runs"])
|
||||
```
|
||||
|
||||
### 영속 상태 포크하기
|
||||
|
||||
`@persist`는 `kickoff` / `kickoff_async`에서 두 가지 별개의 하이드레이션 모드를 지원합니다:
|
||||
|
||||
- `kickoff(inputs={"id": <uuid>})` — **재개(resume)**: 제공된 UUID에 대한 최신 스냅샷을 로드하고 동일한 `flow_uuid` 아래에서 계속 기록합니다. 기록이 확장됩니다.
|
||||
- `kickoff(restore_from_state_id=<uuid>)` — **포크(fork)**: 제공된 UUID에 대한 최신 스냅샷을 로드하고 새 실행의 상태를 하이드레이트한 후, 새로운 `state.id`(자동 생성, 또는 `inputs["id"]`가 고정된 경우 그 값)를 할당합니다. 새 실행의 `@persist` 기록은 새로운 `state.id` 아래에 저장되며, 원본 플로우의 기록은 보존됩니다.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.flow.persistence import persist
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CounterState(BaseModel):
|
||||
id: str = ""
|
||||
counter: int = 0
|
||||
|
||||
@persist
|
||||
class CounterFlow(Flow[CounterState]):
|
||||
@start()
|
||||
def step(self):
|
||||
self.state.counter += 1
|
||||
print(f"[id={self.state.id}] counter={self.state.counter}")
|
||||
|
||||
# 실행 1: 새 상태, counter 0 -> 1, flow_1.state.id 아래에 저장됨
|
||||
flow_1 = CounterFlow()
|
||||
flow_1.kickoff()
|
||||
|
||||
# 포크: flow_1의 최신 스냅샷에서 하이드레이트하지만, 새 state.id를 사용
|
||||
flow_2 = CounterFlow()
|
||||
flow_2.kickoff(restore_from_state_id=flow_1.state.id)
|
||||
# flow_2.state.counter는 1(하이드레이트)로 시작하고, step()이 2로 증가시킵니다.
|
||||
# flow_2.state.id != flow_1.state.id; flow_1의 기록은 변경되지 않습니다.
|
||||
```
|
||||
|
||||
제공된 `restore_from_state_id`가 어떤 영속 상태와도 일치하지 않으면, kickoff는 조용히 기본 동작으로 폴백됩니다 — 기존 `inputs["id"]`의 미발견 동작과 동일합니다. `restore_from_state_id`를 `from_checkpoint`와 결합하면 `ValueError`가 발생합니다; 하나의 하이드레이션 소스를 선택하세요. 포크 중 `inputs["id"]`를 고정하면 다른 플로우와 영속 키를 공유하게 됩니다 — 일반적으로 `restore_from_state_id`만 사용하는 것이 좋습니다.
|
||||
|
||||
### 작동 방식
|
||||
|
||||
1. **고유 상태 식별**
|
||||
|
||||
@@ -146,6 +146,14 @@ class ProductionFlow(Flow[AppState]):
|
||||
# ...
|
||||
```
|
||||
|
||||
기본적으로, `@persist`는 `kickoff(inputs={"id": <uuid>})`가 제공될 때 플로우를 재개하여 동일한 `flow_uuid` 기록을 확장합니다. 영속된 플로우를 새 계보로 **포크**하려면 — 이전 실행에서 상태를 하이드레이트하지만 새로운 `state.id` 아래에 기록 — `restore_from_state_id`를 전달하세요:
|
||||
|
||||
```python
|
||||
flow.kickoff(restore_from_state_id="<previous-run-state-id>")
|
||||
```
|
||||
|
||||
새 실행은 새로운 `state.id`(자동 생성, 또는 `inputs["id"]`가 고정된 경우 그 값)를 받아 `@persist` 기록이 원본의 기록을 확장하지 않도록 합니다. `from_checkpoint`와 결합하면 `ValueError`가 발생합니다; 하나의 하이드레이션 소스를 선택하세요.
|
||||
|
||||
## 요약
|
||||
|
||||
- **Flow로 시작하세요.**
|
||||
|
||||
@@ -132,7 +132,7 @@ crew.kickoff()
|
||||
| **DirectorySearchTool** | 디렉터리 내에서 검색하는 RAG 도구로, 파일 시스템을 탐색할 때 유용합니다. |
|
||||
| **DOCXSearchTool** | DOCX 문서 내에서 검색하는 데 특화된 RAG 도구로, Word 파일을 처리할 때 이상적입니다. |
|
||||
| **DirectoryReadTool** | 디렉터리 구조와 그 내용을 읽고 처리하도록 지원하는 도구입니다. |
|
||||
| **EXASearchTool** | 다양한 데이터 소스를 폭넓게 검색하기 위해 설계된 도구입니다. |
|
||||
| **ExaSearchTool** | 다양한 데이터 소스를 폭넓게 검색하기 위해 설계된 도구입니다. |
|
||||
| **FileReadTool** | 다양한 파일 형식을 지원하며 파일에서 데이터를 읽고 추출할 수 있는 도구입니다. |
|
||||
| **FirecrawlSearchTool** | Firecrawl을 이용해 웹페이지를 검색하고 결과를 반환하는 도구입니다. |
|
||||
| **FirecrawlCrawlWebsiteTool** | Firecrawl을 사용해 웹페이지를 크롤링하는 도구입니다. |
|
||||
|
||||
@@ -346,6 +346,48 @@ class SelectivePersistFlow(Flow):
|
||||
return f"Complete with count {self.state['count']}"
|
||||
```
|
||||
|
||||
#### 영속 상태 포크하기
|
||||
|
||||
`@persist`는 `kickoff` / `kickoff_async`에서 두 가지 별개의 하이드레이션 모드를 지원합니다. 동일한 계보를 계속하려면 **재개**(`inputs["id"]`)를 사용하고, 스냅샷에서 시작하는 새 계보를 시작하려면 **포크**(`restore_from_state_id`)를 사용하세요:
|
||||
|
||||
| | kickoff 후 `state.id` | `@persist` 기록 위치 |
|
||||
|---|---|---|
|
||||
| `inputs["id"]` (재개) | 제공된 id | 제공된 id (기록 확장) |
|
||||
| `restore_from_state_id` (포크) | 새 id, 또는 고정 시 `inputs["id"]` | 새 id (원본 보존) |
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.flow.persistence import persist
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CounterState(BaseModel):
|
||||
id: str = ""
|
||||
counter: int = 0
|
||||
|
||||
@persist
|
||||
class CounterFlow(Flow[CounterState]):
|
||||
@start()
|
||||
def step(self):
|
||||
self.state.counter += 1
|
||||
|
||||
# 실행 1: 새 상태, counter 0 -> 1
|
||||
flow_1 = CounterFlow()
|
||||
flow_1.kickoff()
|
||||
|
||||
# 포크: flow_1의 최신 스냅샷에서 하이드레이트, 단 새 state.id에 기록
|
||||
flow_2 = CounterFlow()
|
||||
flow_2.kickoff(restore_from_state_id=flow_1.state.id)
|
||||
# flow_2는 counter=1(하이드레이트)로 시작하고, step()이 2로 증가시킵니다.
|
||||
# flow_1의 flow_uuid 기록은 변경되지 않습니다.
|
||||
```
|
||||
|
||||
동작 노트:
|
||||
|
||||
- `restore_from_state_id`가 영속에서 발견되지 않음 → kickoff는 조용히 기본 동작으로 폴백됩니다 (기존 `inputs["id"]`의 미발견 동작 미러링). 예외는 발생하지 않습니다.
|
||||
- `restore_from_state_id`를 `from_checkpoint`와 결합하면 `ValueError`가 발생합니다 — 서로 다른 상태 시스템(`@persist` 대 Checkpointing)을 대상으로 하므로 결합할 수 없습니다.
|
||||
- `restore_from_state_id=None`(기본값)은 매개변수 없는 kickoff와 바이트 단위로 동일합니다.
|
||||
- 포크 중 `inputs["id"]`를 고정하면 새 실행이 다른 플로우와 영속 키를 공유함을 의미합니다 — 일반적으로 `restore_from_state_id`만 사용하는 것이 좋습니다.
|
||||
|
||||
## 고급 상태 패턴
|
||||
|
||||
### 상태 기반 조건부 로직
|
||||
|
||||
359
docs/ko/guides/migration/upgrading-crewai.mdx
Normal file
359
docs/ko/guides/migration/upgrading-crewai.mdx
Normal file
@@ -0,0 +1,359 @@
|
||||
---
|
||||
title: "CrewAI 업그레이드 및 마이그레이션"
|
||||
description: "CrewAI 업그레이드 방법, 브레이킹 체인지 처리, Crew에서 Flow로 마이그레이션하는 방법."
|
||||
icon: "arrow-up-circle"
|
||||
---
|
||||
|
||||
## 개요
|
||||
|
||||
CrewAI는 빠르게 발전합니다. 새로운 릴리스에서는 import 경로가 정비되고, `Agent`, `Crew`, `Task`의 기본값이 변경되며, `Flow`와 checkpointing 같은 새로운 오케스트레이션 프리미티브가 도입됩니다. 이 가이드는 다음에 필요한 실용적인 단계들을 모아둔 것입니다:
|
||||
|
||||
- 전역 `crewai` CLI와 프로젝트의 고정된 의존성 업그레이드
|
||||
- import와 파라미터의 브레이킹 체인지에 적응
|
||||
- 독립 실행형 `Crew`를 타입이 지정된 `Flow`로 마이그레이션
|
||||
- 업그레이드된 프로젝트를 처음 다시 실행할 때 나타나는 함정 피하기
|
||||
|
||||
새로 시작한다면 [설치](/ko/installation)를 참고하세요. 다른 프레임워크에서 옮겨오는 경우라면 [LangGraph에서 마이그레이션](/ko/guides/migration/migrating-from-langgraph)을 참고하세요.
|
||||
|
||||
---
|
||||
|
||||
## 업그레이드할 수 있는 두 가지
|
||||
|
||||
CrewAI는 사용자의 머신에 두 곳에 존재하며, 각각 독립적으로 업그레이드됩니다:
|
||||
|
||||
| 무엇 | 설치 방법 | 업그레이드 방법 |
|
||||
|---|---|---|
|
||||
| **전역 `crewai` CLI** | `uv tool install crewai` | `uv tool install crewai --upgrade` |
|
||||
| **프로젝트 venv** (코드가 실행되는 곳) | `crewai install` / `uv sync` | `uv add "crewai[...]>=X.Y.Z"` 후 `crewai install` |
|
||||
|
||||
이 둘은 — 그리고 자주 — 동기화가 어긋날 수 있습니다. `crewai --version`은 CLI 버전을 알려줍니다. 프로젝트 안에서 `uv pip show crewai`를 실행하면 venv 버전을 알려줍니다. 둘이 다른 것은 정상이며, 실행 중인 코드에 중요한 것은 venv 버전입니다.
|
||||
|
||||
## 왜 `crewai install`만으로는 업그레이드되지 않는가
|
||||
|
||||
`crewai install`은 `uv sync`를 감싼 얇은 래퍼입니다. 현재 `uv.lock` 파일이 지시하는 것 그대로를 설치할 뿐이며 — 어떤 버전 제약도 올리지 **않습니다**.
|
||||
|
||||
`pyproject.toml`이 `crewai>=1.11.1`이라 적혀 있고 lock 파일이 `1.11.1`로 해소되었다면, `crewai install`을 실행해도 `1.14.4`가 사용 가능하더라도 영원히 `1.11.1`에 머무릅니다.
|
||||
|
||||
실제로 업그레이드하려면 다음을 해야 합니다:
|
||||
|
||||
1. `pyproject.toml`의 버전 제약 업데이트
|
||||
2. lock 파일 재해소
|
||||
3. venv 동기화
|
||||
|
||||
`uv add`는 이 세 가지를 한 번에 처리합니다.
|
||||
|
||||
## 프로젝트 업그레이드 방법
|
||||
|
||||
```bash
|
||||
# 제약을 올리고 lock을 다시 만드는 한 번의 명령
|
||||
uv add "crewai[tools]>=1.14.4"
|
||||
|
||||
# venv 동기화 (crewai install은 내부적으로 uv sync를 호출)
|
||||
crewai install
|
||||
|
||||
# 확인
|
||||
uv pip show crewai
|
||||
# → Version: 1.14.4
|
||||
```
|
||||
|
||||
`[tools]`를 프로젝트에서 사용하는 extras로 바꾸세요 (예: `[tools,anthropic]`). 잘 모르겠다면 `pyproject.toml`의 `dependencies` 목록을 확인하세요.
|
||||
|
||||
<Note>
|
||||
`uv add`는 `pyproject.toml`과 `uv.lock`을 **둘 다** 원자적으로 업데이트합니다. `pyproject.toml`을 수동으로 편집하는 경우, `crewai install`이 새 버전을 가져가도록 하기 전에 `uv lock --upgrade-package crewai`를 실행해 lock 파일을 다시 해소해야 합니다.
|
||||
</Note>
|
||||
|
||||
## 전역 CLI 업그레이드
|
||||
|
||||
전역 CLI는 프로젝트와 분리되어 있습니다. 다음 명령으로 업그레이드하세요:
|
||||
|
||||
```bash
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
|
||||
업그레이드 후 셸이 `PATH`에 대해 경고하면 새로고침하세요:
|
||||
|
||||
```bash
|
||||
uv tool update-shell
|
||||
```
|
||||
|
||||
이 명령은 프로젝트의 venv를 **건드리지 않습니다** — 프로젝트 내부에서는 여전히 `uv add` + `crewai install`이 필요합니다.
|
||||
|
||||
## 둘이 동기화되었는지 확인
|
||||
|
||||
```bash
|
||||
# 전역 CLI 버전
|
||||
crewai --version
|
||||
|
||||
# 프로젝트 venv 버전
|
||||
uv pip show crewai | grep Version
|
||||
```
|
||||
|
||||
둘이 일치할 필요는 없지만 — 런타임 동작에 중요한 것은 프로젝트 venv 버전입니다.
|
||||
|
||||
<Note>
|
||||
CrewAI는 `Python >=3.10, <3.14`를 요구합니다. `uv`가 더 오래된 인터프리터로 설치되어 있다면, `crewai install`을 실행하기 전에 지원되는 Python으로 프로젝트 venv를 다시 만드세요.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## 브레이킹 체인지 및 마이그레이션 노트
|
||||
|
||||
대부분의 업그레이드는 작은 조정만 필요합니다. 아래 항목들은 조용히 깨지거나 헷갈리는 트레이스백을 내는 영역들입니다.
|
||||
|
||||
### Import 경로: tools와 `BaseTool`
|
||||
|
||||
tools의 정식 import 위치는 `crewai.tools`입니다. 옛 경로들이 아직 튜토리얼에 등장하지만 업데이트해야 합니다.
|
||||
|
||||
```python
|
||||
# 이전
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.agents.tools import tool
|
||||
|
||||
# 이후
|
||||
from crewai.tools import BaseTool, tool
|
||||
```
|
||||
|
||||
`@tool` 데코레이터와 `BaseTool` 서브클래스는 모두 `crewai.tools`에 있습니다. `AgentFinish` 등 내부 에이전트 심볼들은 더 이상 공개 표면이 아닙니다 — import 중이었다면 event listener나 `Task` 콜백으로 전환하세요.
|
||||
|
||||
### `Agent` 파라미터 변경
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Find authoritative sources on {topic}",
|
||||
backstory="You are a careful, source-driven researcher.",
|
||||
llm="gpt-4o-mini", # 모델명 문자열 또는 LLM 객체
|
||||
verbose=True, # 정수 레벨이 아닌 bool
|
||||
max_iter=15, # 버전마다 기본값이 바뀌었음 — 명시적으로 지정
|
||||
allow_delegation=False,
|
||||
)
|
||||
```
|
||||
|
||||
- `llm`은 문자열 모델명(설정된 provider를 통해 해소)이나 세밀한 제어를 위한 `LLM` 객체를 받습니다.
|
||||
- `verbose`는 일반 `bool`입니다. 정수를 전달해도 더 이상 로그 레벨을 토글하지 않습니다.
|
||||
- `max_iter`의 기본값은 릴리스 사이에 변경되었습니다. 첫 tool 호출 후 에이전트가 조용히 반복을 멈춘다면 `max_iter`를 명시적으로 지정하세요.
|
||||
|
||||
### `Crew` 파라미터
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential, # 또는 Process.hierarchical
|
||||
memory=True,
|
||||
cache=True,
|
||||
embedder={"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
)
|
||||
```
|
||||
|
||||
- `process=Process.hierarchical`은 `manager_llm=` 또는 `manager_agent=` 중 하나가 필요합니다. 둘 다 없으면 kickoff 시 검증 단계에서 오류가 발생합니다.
|
||||
- 기본이 아닌 임베딩 provider와 함께 `memory=True`를 쓰려면 `embedder` dict가 필요합니다 — 아래의 [메모리와 embedder 설정](#memory-embedder-config)을 참고하세요.
|
||||
|
||||
### `Task` 구조화된 출력
|
||||
|
||||
`output_pydantic`, `output_json`, 또는 `output_file`을 사용해 task 결과를 타입이 지정된 형태로 강제할 수 있습니다:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from crewai import Task
|
||||
|
||||
class Article(BaseModel):
|
||||
title: str
|
||||
body: str
|
||||
|
||||
write = Task(
|
||||
description="Write an article about {topic}",
|
||||
expected_output="A short article with a title and body",
|
||||
agent=writer,
|
||||
output_pydantic=Article, # 인스턴스가 아닌 클래스
|
||||
output_file="output/article.md",
|
||||
)
|
||||
```
|
||||
|
||||
`output_pydantic`은 **클래스** 자체를 받습니다. `Article(title="", body="")`을 전달하는 것은 흔한 실수이며 헷갈리는 검증 오류로 실패합니다.
|
||||
|
||||
### 메모리와 embedder 설정
|
||||
|
||||
`memory=True`이고 OpenAI의 기본 임베딩을 사용하지 않는다면, `embedder`를 반드시 전달해야 합니다:
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {"model": "nomic-embed-text"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
해당 provider의 자격 증명(`OPENAI_API_KEY`, `OLLAMA_HOST` 등)을 `.env` 파일에 설정하세요. 메모리 저장 경로는 기본적으로 프로젝트-로컬입니다 — embedder를 바꾸면 차원이 호환되지 않으므로 프로젝트의 메모리 디렉터리를 삭제하세요.
|
||||
|
||||
---
|
||||
|
||||
## Crew를 Flow로 마이그레이션
|
||||
|
||||
`Crew`는 단일 에이전트 팀이 하나의 워크플로우를 실행할 때 적합한 프리미티브입니다. 분기, 여러 crew, 또는 실행 간 영속 상태가 필요해지면 `Flow`로 넘어가세요.
|
||||
|
||||
### Flow 대 독립 Crew, 언제 무엇을 쓰나
|
||||
|
||||
| 상황 | 사용 |
|
||||
| --- | --- |
|
||||
| 단일 팀, 단일 선형/계층적 워크플로우 | `Crew` |
|
||||
| 조건부 분기, 재시도, 결과 기반 라우팅 | `Flow` |
|
||||
| 여러 전문 crew를 체인으로 연결 | `Flow` |
|
||||
| 단계나 실행 사이에 유지되어야 하는 상태 | `Flow` (checkpointing 포함) |
|
||||
| 타입이 지정된 IDE-친화적 상태가 필요 | Pydantic 모델과 함께 `Flow[MyState]` |
|
||||
|
||||
분기, 멀티-crew, 영속 상태 중 단 하나라도 필요하다면 — `Flow`로 시작하세요. 보일러플레이트는 적고 나중에 다시 작성할 필요가 없습니다.
|
||||
|
||||
### 단계별 마이그레이션
|
||||
|
||||
**이전 — 독립 crew:**
|
||||
|
||||
```python
|
||||
from crewai import Crew
|
||||
|
||||
crew = Crew(agents=[researcher, writer], tasks=[research_task, write_task])
|
||||
result = crew.kickoff(inputs={"topic": "vector databases"})
|
||||
print(result)
|
||||
```
|
||||
|
||||
**이후 — 타입이 지정된 Flow 안의 crew:**
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from pydantic import BaseModel
|
||||
|
||||
class MyState(BaseModel):
|
||||
input_data: str = ""
|
||||
result: str = ""
|
||||
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def run_crew(self):
|
||||
result = MyCrew().crew().kickoff(inputs={"topic": self.state.input_data})
|
||||
self.state.result = str(result)
|
||||
return self.state.result
|
||||
|
||||
flow = MyFlow()
|
||||
flow.kickoff(inputs={"input_data": "vector databases"})
|
||||
```
|
||||
|
||||
달라진 점:
|
||||
|
||||
1. crew는 모듈 로드 시점이 아니라 메서드 안에서 생성됩니다.
|
||||
2. 입력은 kwargs로 넘기는 대신 `self.state`를 통해 흐릅니다.
|
||||
3. 진입점은 `@start()`로 표시됩니다. 이후 단계는 `@listen(run_crew)`로 체인합니다.
|
||||
|
||||
### 구조화된 상태 설정
|
||||
|
||||
dict 무타입 변형보다 타입이 지정된 상태(`Flow[MyState]`)를 선호하세요. 자동완성, 경계에서의 검증, checkpointing을 위한 직렬화 가능한 상태를 얻을 수 있습니다:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class ResearchState(BaseModel):
|
||||
topic: str = ""
|
||||
sources: list[str] = Field(default_factory=list)
|
||||
draft: str = ""
|
||||
final: str = ""
|
||||
```
|
||||
|
||||
타입이 지정되지 않은 상태(제네릭 없는 `Flow()`)도 여전히 동작하지만, 정적 검사와 checkpointing 충실도를 잃게 됩니다.
|
||||
|
||||
### 멀티-crew Flow 패턴
|
||||
|
||||
두 crew를 — 조사 후 작성으로 — 체인으로 연결하는 것이 Flow를 도입하는 가장 전형적인 이유입니다:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen, router
|
||||
from pydantic import BaseModel
|
||||
|
||||
class PipelineState(BaseModel):
|
||||
topic: str = ""
|
||||
research: str = ""
|
||||
article: str = ""
|
||||
|
||||
class ContentPipeline(Flow[PipelineState]):
|
||||
@start()
|
||||
def research(self):
|
||||
out = ResearchCrew().crew().kickoff(inputs={"topic": self.state.topic})
|
||||
self.state.research = str(out)
|
||||
return self.state.research
|
||||
|
||||
@router(research)
|
||||
def gate(self):
|
||||
return "write" if len(self.state.research) > 200 else "abort"
|
||||
|
||||
@listen("write")
|
||||
def write(self):
|
||||
out = WritingCrew().crew().kickoff(
|
||||
inputs={"topic": self.state.topic, "notes": self.state.research}
|
||||
)
|
||||
self.state.article = str(out)
|
||||
return self.state.article
|
||||
|
||||
@listen("abort")
|
||||
def bail(self):
|
||||
self.state.article = "Insufficient research."
|
||||
return self.state.article
|
||||
|
||||
ContentPipeline().kickoff(inputs={"topic": "vector databases"})
|
||||
```
|
||||
|
||||
`@start()`, `@listen()`, `@router()`는 95%의 시간 동안 사용하게 될 세 가지 데코레이터입니다. 전체 레퍼런스는 [Flows](/ko/concepts/flows)를 참고하세요.
|
||||
|
||||
---
|
||||
|
||||
## 흔한 함정
|
||||
|
||||
1. **`crewai install`을 실행하고 업그레이드를 기대하기.** `crewai install`은 기존 `uv.lock`에 맞춰 동기화합니다. 버전을 올리려면 먼저 `uv add "crewai[tools]>=X.Y.Z"`를 실행하고 그다음 `crewai install`을 실행하세요.
|
||||
2. **제약은 하한이지 핀이 아닙니다.** `crewai>=1.11.1`은 "1.11.1 이상의 어떤 버전이든"을 의미합니다. `uv`는 `uv add` 또는 `uv lock --upgrade-package crewai`를 명시적으로 실행할 때만 재해소합니다.
|
||||
3. **재-락 중에 extras가 누락됨.** `uv add "crewai>=1.14.4"`를 extras 없이 실행하면 `uv`가 해소된 집합에서 `[tools]`를 떨어뜨릴 수 있습니다. 필요한 extras는 항상 포함하세요: `uv add "crewai[tools]>=1.14.4"`.
|
||||
4. **`uv.lock`을 commit하는 것을 잊기.** `uv add`로 버전을 올린 후, 팀원들이 같은 버전을 받을 수 있도록 업데이트된 `uv.lock`을 commit하세요.
|
||||
5. **`uv tool install` 대신 `pip install`.** pip로 설치한 `crewai`와 uv로 설치한 것을 섞으면 `PATH`에 두 개의 바이너리가 생기고 헷갈리는 버전 차이가 생깁니다. 하나를 고르세요 — 지원되는 것은 `uv`입니다.
|
||||
6. **`output_pydantic`에 Pydantic 인스턴스를 전달.** 클래스를 기대합니다. `output_pydantic=Article`이지 `output_pydantic=Article(...)`가 아닙니다.
|
||||
7. **manager 없이 hierarchical 프로세스.** `process=Process.hierarchical`은 `manager_llm=` 또는 `manager_agent=`를 요구합니다.
|
||||
8. **잘못된 embedder로 메모리 활성화.** 디스크의 메모리 디렉터리를 비우지 않고 embedder를 바꾸면 차원 불일치가 발생합니다. provider를 변경한 후 프로젝트의 메모리 저장소를 삭제하세요.
|
||||
9. **타입이 지정된 상태를 원했는데 dict 상태가 됨.** 제네릭이 없는 `Flow()`는 dict를 줍니다. 타입 검사와 깨끗한 checkpointing을 위해서는 `Flow[MyState]`와 `BaseModel`을 사용하세요.
|
||||
10. **오래된 tool import.** `from crewai_tools import BaseTool`은 일부 버전에서 동작하지만 정식 경로가 아닙니다. `from crewai.tools import BaseTool, tool`로 표준화하세요.
|
||||
11. **Python 버전 드리프트.** CrewAI는 `>=3.10, <3.14`를 요구합니다. `uv`는 기본이 3.14+라면 기쁘게 그것으로 venv를 빌드합니다; `pyproject.toml`에서 Python 버전을 핀하세요.
|
||||
12. **`verbose=2`와 같은 정수 플래그.** `verbose`는 `bool`입니다. 더 세밀한 로깅에는 event listener를 사용하세요.
|
||||
13. **Flow 안에서 `inputs={}` 없이 `crew.kickoff()` 호출.** Flow는 kwargs가 아닌 state를 전달합니다. crew는 여전히 `inputs={...}`를 기대합니다.
|
||||
|
||||
---
|
||||
|
||||
## Checkpointing
|
||||
|
||||
Checkpointing은 실행 사이에 agent, crew, flow 상태를 영속화하는 비교적 새로운 기능입니다. 장시간 실행되는 워크플로우가 크래시, 수동 중지, 또는 배포 이후에 재개될 수 있게 해줍니다.
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
checkpoint=True,
|
||||
)
|
||||
```
|
||||
|
||||
같은 플래그가 `Flow`와 `Agent`에서도 지원됩니다. 상태는 프로젝트의 로컬 저장소에 기록되고 동일한 식별자로 다음 `kickoff()` 시 재생됩니다.
|
||||
|
||||
<Note>
|
||||
Checkpointing은 초기 릴리스 단계입니다. 재개 의미론, 저장 백엔드, 식별자에 관한 API는 마이너 버전 사이에서도 변경될 수 있습니다 — 프로덕션에서 의존한다면 `crewai` 버전을 핀하세요.
|
||||
</Note>
|
||||
|
||||
전체 기능 레퍼런스는 [Checkpointing](/ko/concepts/checkpointing)을 참고하세요.
|
||||
|
||||
---
|
||||
|
||||
## 도움 받기
|
||||
|
||||
- **체인지로그** — 모든 브레이킹 체인지는 [릴리스 노트](/ko/changelog)에 기록됩니다.
|
||||
- **GitHub Issues** — 최소 재현 코드와 `crewai --version` 출력과 함께 [github.com/crewAIInc/crewAI/issues](https://github.com/crewAIInc/crewAI/issues)에 이슈를 열어주세요.
|
||||
- **Discord** — CrewAI 커뮤니티 Discord는 디버깅 도움을 가장 빠르게 받을 수 있는 경로입니다: [community.crewai.com](https://community.crewai.com).
|
||||
- **마이그레이션 가이드** — 다른 프레임워크에서 옮겨오는 경우 [LangGraph에서 마이그레이션](/ko/guides/migration/migrating-from-langgraph)부터 시작하세요.
|
||||
180
docs/ko/tools/ai-ml/daytona.mdx
Normal file
180
docs/ko/tools/ai-ml/daytona.mdx
Normal file
@@ -0,0 +1,180 @@
|
||||
---
|
||||
title: Daytona Sandbox Tools
|
||||
description: Run shell commands, execute Python, and manage files inside isolated [Daytona](https://www.daytona.io/) sandboxes.
|
||||
icon: box
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
# Daytona Sandbox Tools
|
||||
|
||||
## Description
|
||||
|
||||
The Daytona sandbox tools give CrewAI agents access to isolated, ephemeral compute environments powered by [Daytona](https://www.daytona.io/). Three tools are available so you can give an agent exactly the capabilities it needs:
|
||||
|
||||
- **`DaytonaExecTool`** — run any shell command inside a sandbox.
|
||||
- **`DaytonaPythonTool`** — execute a block of Python source code inside a sandbox.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox.
|
||||
|
||||
All three tools share the same sandbox lifecycle controls, so you can mix and match them while keeping state in a single persistent sandbox.
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
uv add "crewai-tools[daytona]"
|
||||
# or
|
||||
pip install "crewai-tools[daytona]"
|
||||
```
|
||||
|
||||
Set your API key:
|
||||
|
||||
```shell
|
||||
export DAYTONA_API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
`DAYTONA_API_URL` and `DAYTONA_TARGET` are also respected if set.
|
||||
|
||||
## Sandbox Lifecycle
|
||||
|
||||
All three tools inherit lifecycle controls from `DaytonaBaseTool`:
|
||||
|
||||
| Mode | How to enable | Sandbox created | Sandbox deleted |
|
||||
|------|--------------|-----------------|-----------------|
|
||||
| **Ephemeral** (default) | `persistent=False` (default) | On every `_run` call | At the end of that same call |
|
||||
| **Persistent** | `persistent=True` | Lazily on first use | At process exit (via `atexit`), or manually via `tool.close()` |
|
||||
| **Attach** | `sandbox_id="<id>"` | Never — attaches to an existing sandbox | Never — the tool will not delete a sandbox it did not create |
|
||||
|
||||
Ephemeral mode is the safe default: nothing leaks if the agent forgets to clean up. Use persistent mode when you want filesystem state or installed packages to carry across multiple tool calls — this is typical when pairing `DaytonaFileTool` with `DaytonaExecTool`.
|
||||
|
||||
## Examples
|
||||
|
||||
### One-shot Python execution (ephemeral)
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaPythonTool
|
||||
|
||||
tool = DaytonaPythonTool()
|
||||
result = tool.run(code="print(sum(range(10)))")
|
||||
print(result)
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": None}
|
||||
```
|
||||
|
||||
### Multi-step shell session (persistent)
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool, DaytonaFileTool
|
||||
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
# Install a package, then write and run a script — all in the same sandbox
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool.run(action="write", path="/workspace/fetch.py", content="import httpx; print(httpx.get('https://httpbin.org/get').status_code)")
|
||||
exec_tool.run(command="python /workspace/fetch.py")
|
||||
```
|
||||
|
||||
<Note>
|
||||
Each tool instance maintains its own persistent sandbox. To share **one** sandbox across two tools, create the first tool, grab its sandbox id via `tool._persistent_sandbox.id`, and pass it to the second tool via `sandbox_id=...`.
|
||||
</Note>
|
||||
|
||||
### Attach to an existing sandbox
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(sandbox_id="my-long-lived-sandbox")
|
||||
result = tool.run(command="ls /workspace")
|
||||
```
|
||||
|
||||
### Custom sandbox parameters
|
||||
|
||||
Pass Daytona's `CreateSandboxFromSnapshotParams` kwargs via `create_params`:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(
|
||||
persistent=True,
|
||||
create_params={
|
||||
"language": "python",
|
||||
"env_vars": {"MY_FLAG": "1"},
|
||||
"labels": {"owner": "crewai-agent"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
### Agent integration
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import DaytonaExecTool, DaytonaPythonTool, DaytonaFileTool
|
||||
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
python_tool = DaytonaPythonTool(persistent=True)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
coder = Agent(
|
||||
role="Sandbox Engineer",
|
||||
goal="Write and run code in an isolated environment",
|
||||
backstory="An engineer who uses Daytona sandboxes to safely execute code and manage files.",
|
||||
tools=[exec_tool, python_tool, file_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to /workspace/fib.py, and run it.",
|
||||
expected_output="The first 10 Fibonacci numbers printed to stdout.",
|
||||
agent=coder,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[coder], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
### Shared (`DaytonaBaseTool`)
|
||||
|
||||
All three tools accept these parameters at initialization:
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `api_key` | `str \| None` | `$DAYTONA_API_KEY` | Daytona API key. Falls back to the `DAYTONA_API_KEY` env var. |
|
||||
| `api_url` | `str \| None` | `$DAYTONA_API_URL` | Daytona API URL override. |
|
||||
| `target` | `str \| None` | `$DAYTONA_TARGET` | Daytona target region. |
|
||||
| `persistent` | `bool` | `False` | Reuse one sandbox across all calls and delete it at process exit. |
|
||||
| `sandbox_id` | `str \| None` | `None` | Attach to an existing sandbox by id or name. |
|
||||
| `create_params` | `dict \| None` | `None` | Extra kwargs forwarded to `CreateSandboxFromSnapshotParams` (e.g. `language`, `env_vars`, `labels`). |
|
||||
| `sandbox_timeout` | `float` | `60.0` | Timeout in seconds for sandbox create/delete operations. |
|
||||
|
||||
### `DaytonaExecTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `command` | `str` | ✓ | Shell command to execute. |
|
||||
| `cwd` | `str \| None` | | Working directory inside the sandbox. |
|
||||
| `env` | `dict[str, str] \| None` | | Extra environment variables for this command. |
|
||||
| `timeout` | `int \| None` | | Maximum seconds to wait for the command. |
|
||||
|
||||
### `DaytonaPythonTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `code` | `str` | ✓ | Python source code to execute. |
|
||||
| `argv` | `list[str] \| None` | | Argument vector forwarded via `CodeRunParams`. |
|
||||
| `env` | `dict[str, str] \| None` | | Environment variables forwarded via `CodeRunParams`. |
|
||||
| `timeout` | `int \| None` | | Maximum seconds to wait for execution. |
|
||||
|
||||
### `DaytonaFileTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`. |
|
||||
| `path` | `str` | ✓ | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | | Content to write or append. Required for `append`. |
|
||||
| `binary` | `bool` | | If `True`, `content` is base64 on write; returns base64 on read. |
|
||||
| `recursive` | `bool` | | For `delete`: remove directories recursively. |
|
||||
| `mode` | `str` | | For `mkdir`: octal permission string (default `"0755"`). |
|
||||
|
||||
<Tip>
|
||||
For files larger than a few KB, create the file first with `action="write"` and empty content, then send the body via multiple `action="append"` calls of ~4 KB each to stay within tool-call payload limits.
|
||||
</Tip>
|
||||
@@ -1,15 +1,15 @@
|
||||
---
|
||||
title: EXA 검색 웹 로더
|
||||
description: EXASearchTool은 인터넷 전반에 걸쳐 텍스트의 내용에서 지정된 쿼리에 대한 시맨틱 검색을 수행하도록 설계되었습니다.
|
||||
description: ExaSearchTool은 인터넷 전반에 걸쳐 텍스트의 내용에서 지정된 쿼리에 대한 시맨틱 검색을 수행하도록 설계되었습니다.
|
||||
icon: globe-pointer
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
# `EXASearchTool`
|
||||
# `ExaSearchTool`
|
||||
|
||||
## 설명
|
||||
|
||||
EXASearchTool은 텍스트의 내용을 기반으로 지정된 쿼리를 인터넷 전반에 걸쳐 의미론적으로 검색하도록 설계되었습니다.
|
||||
ExaSearchTool은 텍스트의 내용을 기반으로 지정된 쿼리를 인터넷 전반에 걸쳐 의미론적으로 검색하도록 설계되었습니다.
|
||||
사용자가 제공한 쿼리를 기반으로 가장 관련성 높은 검색 결과를 가져오고 표시하기 위해 [exa.ai](https://exa.ai/) API를 활용합니다.
|
||||
|
||||
## 설치
|
||||
@@ -25,15 +25,15 @@ pip install 'crewai[tools]'
|
||||
다음 예제는 도구를 초기화하고 주어진 쿼리로 검색을 실행하는 방법을 보여줍니다:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import EXASearchTool
|
||||
from crewai_tools import ExaSearchTool
|
||||
|
||||
# Initialize the tool for internet searching capabilities
|
||||
tool = EXASearchTool()
|
||||
tool = ExaSearchTool()
|
||||
```
|
||||
|
||||
## 시작 단계
|
||||
|
||||
EXASearchTool을 효과적으로 사용하려면 다음 단계를 따르세요:
|
||||
ExaSearchTool을 효과적으로 사용하려면 다음 단계를 따르세요:
|
||||
|
||||
<Steps>
|
||||
<Step title="패키지 설치">
|
||||
@@ -47,7 +47,35 @@ EXASearchTool을 효과적으로 사용하려면 다음 단계를 따르세요:
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## MCP를 통한 Exa 사용
|
||||
|
||||
Exa가 호스팅하는 MCP 서버에 에이전트를 연결할 수도 있습니다. API 키는 `x-api-key` 헤더로 전달하세요:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from crewai.mcp import MCPServerHTTP
|
||||
|
||||
agent = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Find and analyze information on the web",
|
||||
backstory="Expert researcher with access to Exa's tools",
|
||||
mcps=[
|
||||
MCPServerHTTP(
|
||||
url="https://mcp.exa.ai/mcp",
|
||||
headers={"x-api-key": "YOUR_EXA_API_KEY"},
|
||||
),
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
API 키는 [Exa 대시보드](https://dashboard.exa.ai/api-keys)에서 발급받을 수 있습니다. CrewAI에서의 MCP 사용에 대한 자세한 내용은 [MCP 개요](/ko/mcp/overview)를 참고하세요.
|
||||
|
||||
## 결론
|
||||
|
||||
`EXASearchTool`을 Python 프로젝트에 통합함으로써, 사용자는 애플리케이션 내에서 실시간으로 인터넷을 직접 검색할 수 있는 능력을 얻게 됩니다.
|
||||
`ExaSearchTool`을 Python 프로젝트에 통합함으로써, 사용자는 애플리케이션 내에서 실시간으로 인터넷을 직접 검색할 수 있는 능력을 얻게 됩니다.
|
||||
제공된 설정 및 사용 지침을 따르면, 이 도구를 프로젝트에 포함하는 과정이 간편하고 직관적입니다.
|
||||
|
||||
## 참고 자료
|
||||
|
||||
- [Exa 공식 문서](https://exa.ai/docs)
|
||||
- [Exa 대시보드 — API 키 및 사용량 관리](https://dashboard.exa.ai)
|
||||
|
||||
@@ -26,7 +26,7 @@ Bem-vindo à referência da API do CrewAI AMP. Esta API permite que você intera
|
||||
</Step>
|
||||
|
||||
<Step title="Monitore o Progresso">
|
||||
Use `GET /{kickoff_id}/status` para checar o status da execução e recuperar os resultados.
|
||||
Use `GET /status/{kickoff_id}` para checar o status da execução e recuperar os resultados.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
@@ -65,7 +65,7 @@ Substitua `your-crew-name` pela URL real do seu crew no painel.
|
||||
|
||||
1. **Descoberta**: Chame `GET /inputs` para entender o que seu crew precisa
|
||||
2. **Execução**: Envie os inputs via `POST /kickoff` para iniciar o processamento
|
||||
3. **Monitoramento**: Faça polling em `GET /{kickoff_id}/status` até a conclusão
|
||||
3. **Monitoramento**: Faça polling em `GET /status/{kickoff_id}` até a conclusão
|
||||
4. **Resultados**: Extraia o output final da resposta concluída
|
||||
|
||||
## Tratamento de Erros
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: "GET /{kickoff_id}/status"
|
||||
title: "GET /status/{kickoff_id}"
|
||||
description: "Obter o status da execução"
|
||||
openapi: "/enterprise-api.pt-BR.yaml GET /{kickoff_id}/status"
|
||||
openapi: "/enterprise-api.pt-BR.yaml GET /status/{kickoff_id}"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
@@ -4,6 +4,152 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="07 mai 2026">
|
||||
## v1.14.5a3
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a3)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir o caminho do endpoint de status de /{kickoff_id}/status para /status/{kickoff_id}
|
||||
- Atualizar a dependência gitpython para a versão >=3.1.47 para conformidade de segurança
|
||||
|
||||
### Refatoração
|
||||
- Extrair CLI para o pacote independente crewai-cli
|
||||
|
||||
### Documentação
|
||||
- Atualizar o changelog e a versão para v1.14.5a2
|
||||
|
||||
## Contributors
|
||||
|
||||
@greysonlalonde, @iris-clawd
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="04 mai 2026">
|
||||
## v1.14.5a2
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a2)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir a restauração da saída da tarefa no bloco finally
|
||||
- Incluir `thoughts_token_count` nos tokens de conclusão
|
||||
- Preservar as saídas das tarefas durante o descarregamento assíncrono em lote
|
||||
- Encaminhar kwargs para chamadas de carregador em `CrewAIRagAdapter`
|
||||
- Impedir que `result_as_answer` retorne mensagem de bloqueio de hook como resposta final
|
||||
- Impedir que `result_as_answer` retorne erro como resposta final
|
||||
- Usar `acall` para conversão de saída em caminhos assíncronos
|
||||
- Prevenir a mutação de palavras de parada compartilhadas do LLM entre agentes
|
||||
- Lidar com entrada `BaseModel` em `convert_to_model`
|
||||
|
||||
### Documentação
|
||||
- Documentar variáveis de ambiente adicionais
|
||||
- Atualizar changelog e versão para v1.14.5a1
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@NIK-TIGER-BILL, @greysonlalonde, @lorenzejay, @minasami-pr, @theCyberTech, @wishhyt
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="01 mai 2026">
|
||||
## v1.14.5a1
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a1)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Recursos
|
||||
- Adicionar parâmetro de início `restore_from_state_id`
|
||||
- Adicionar destaques ao ExaSearchTool e renomear de EXASearchTool
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir sites de pinos do crewai ausentes no fluxo de lançamento
|
||||
- Garantir eventos de carregamento de habilidades para rastros
|
||||
|
||||
### Documentação
|
||||
- Atualizar changelog e versão para v1.14.4
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@akaKuruma, @github-actions[bot], @greysonlalonde, @lorenzejay, @theishangoswami
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="01 mai 2026">
|
||||
## v1.14.4
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.4)
|
||||
|
||||
## O que mudou
|
||||
|
||||
### Recursos
|
||||
- Adicionar suporte para chave de persistência personalizada em @persist
|
||||
- Adicionar suporte à API de Respostas para o provedor Azure OpenAI
|
||||
- Encaminhar credential_scopes para o cliente de Inferência da Azure AI
|
||||
- Adicionar guia de configuração de identidade de carga de trabalho do Vertex AI
|
||||
- Adicionar Tavily Research e obter Pesquisa
|
||||
- Adicionar ferramentas MCP do You.com para pesquisa, pesquisa e extração de conteúdo
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir falha quando a correspondência de regex JSON não é um JSON válido
|
||||
- Corrigir para preservar tool_calls quando a resposta também contém texto
|
||||
- Corrigir para encaminhar base_url e api_key para instructor.from_provider
|
||||
- Corrigir para avisar e retornar vazio quando o servidor MCP nativo não retorna ferramentas
|
||||
- Corrigir para usar a variável de mensagens validadas em manipuladores não-streaming
|
||||
- Corrigir para proteger os ajudantes de descrição do chat da equipe contra falhas do LLM
|
||||
- Corrigir para redefinir mensagens e iterações entre invocações
|
||||
- Corrigir para encaminhar o arquivo de agentes treinados através de replay e teste
|
||||
- Corrigir para honrar o arquivo de agentes treinados personalizados na inferência
|
||||
- Corrigir para vincular agentes apenas de tarefa à equipe para arquivos de entrada multimodal
|
||||
- Corrigir para serializar chamadas de guardrail como nulas para checkpointing JSON
|
||||
- Corrigir renomeação de force_final_answer para evitar roteador autorreferencial
|
||||
- Corrigir aumento de litellm para correção de SSTI; ignorar CVE pip não corrigível
|
||||
|
||||
### Documentação
|
||||
- Atualizar changelog e versão para v1.14.4a1
|
||||
- Adicionar página de Ferramentas do Sandbox E2B
|
||||
- Adicionar documentação de ferramentas do sandbox Daytona
|
||||
|
||||
## Contributors
|
||||
|
||||
@EdwardIrby, @dependabot[bot], @factory-droid-oss, @factory-droid[bot], @greysonlalonde, @kunalk16, @lorenzejay, @lucasgomide, @manisrinivasan2k1, @mattatcha, @vinibrsl
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="29 abr 2026">
|
||||
## v1.14.4a1
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.4a1)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir os ajudantes de descrição do chat da equipe contra falhas do LLM.
|
||||
- Redefinir mensagens e iterações entre invocações no executor.
|
||||
- Encaminhar arquivo de agentes treinados através de replay e teste no CLI.
|
||||
- Respeitar arquivo de agentes treinados personalizados na inferência no agente.
|
||||
- Vincular agentes apenas de tarefa à equipe para garantir que os input_files multimodais cheguem ao LLM.
|
||||
- Serializar chamadas de guardrail como nulas para checkpointing JSON.
|
||||
- Renomear `force_final_answer` no agent_executor para evitar roteador autorreferencial.
|
||||
- Atualizar `litellm` para correção de SSTI e ignorar CVE pip não corrigível.
|
||||
|
||||
### Documentação
|
||||
- Adicionar página de Ferramentas de Sandbox E2B.
|
||||
- Adicionar documentação de ferramentas de sandbox Daytona.
|
||||
- Adicionar guia de configuração de identidade de carga de trabalho do Vertex AI.
|
||||
- Adicionar ferramentas MCP do You.com para pesquisa, investigação e extração de conteúdo.
|
||||
- Atualizar changelog e versão para v1.14.3.
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@EdwardIrby, @dependabot[bot], @factory-droid-oss, @factory-droid[bot], @greysonlalonde, @lorenzejay, @manisrinivasan2k1, @mattatcha
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="25 abr 2026">
|
||||
## v1.14.3
|
||||
|
||||
|
||||
@@ -193,6 +193,42 @@ Para um controle mais granular, você pode aplicar @persist em métodos específ
|
||||
# (O código não é traduzido)
|
||||
```
|
||||
|
||||
### Forking de Estado Persistido
|
||||
|
||||
`@persist` suporta dois modos distintos de hidratação em `kickoff` / `kickoff_async`:
|
||||
|
||||
- `kickoff(inputs={"id": <uuid>})` — **resume**: carrega o snapshot mais recente do UUID informado e continua escrevendo sob o mesmo `flow_uuid`. O histórico se estende.
|
||||
- `kickoff(restore_from_state_id=<uuid>)` — **fork**: carrega o snapshot mais recente do UUID informado, hidrata o estado da nova execução a partir dele, e atribui um novo `state.id` (auto-gerado, ou `inputs["id"]` se fixado). As escritas do `@persist` da nova execução vão para o novo `state.id`; o histórico do flow de origem é preservado.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.flow.persistence import persist
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CounterState(BaseModel):
|
||||
id: str = ""
|
||||
counter: int = 0
|
||||
|
||||
@persist
|
||||
class CounterFlow(Flow[CounterState]):
|
||||
@start()
|
||||
def step(self):
|
||||
self.state.counter += 1
|
||||
print(f"[id={self.state.id}] counter={self.state.counter}")
|
||||
|
||||
# Execução 1: estado novo, counter 0 -> 1, persistido sob flow_1.state.id
|
||||
flow_1 = CounterFlow()
|
||||
flow_1.kickoff()
|
||||
|
||||
# Fork: hidrata do snapshot mais recente de flow_1, mas usa um state.id NOVO
|
||||
flow_2 = CounterFlow()
|
||||
flow_2.kickoff(restore_from_state_id=flow_1.state.id)
|
||||
# flow_2.state.counter começa em 1 (hidratado), e step() incrementa para 2.
|
||||
# flow_2.state.id != flow_1.state.id; o histórico de flow_1 não é alterado.
|
||||
```
|
||||
|
||||
Se o `restore_from_state_id` informado não corresponder a nenhum estado persistido, o kickoff retorna silenciosamente ao comportamento padrão — o mesmo comportamento do `inputs["id"]` quando não encontrado. Combinar `restore_from_state_id` com `from_checkpoint` lança um `ValueError`; escolha uma única fonte de hidratação. Fixar `inputs["id"]` durante o fork compartilha uma chave de persistência com outro flow — geralmente você quer apenas `restore_from_state_id`.
|
||||
|
||||
### Como Funciona
|
||||
|
||||
1. **Identificação Única do Estado**
|
||||
|
||||
@@ -146,6 +146,14 @@ class ProductionFlow(Flow[AppState]):
|
||||
# ...
|
||||
```
|
||||
|
||||
Por padrão, `@persist` retoma um flow quando `kickoff(inputs={"id": <uuid>})` é informado, estendendo o mesmo histórico do `flow_uuid`. Para **forkar** um flow persistido em uma nova linhagem — hidratar o estado a partir de uma execução anterior mas escrever sob um novo `state.id` — passe `restore_from_state_id`:
|
||||
|
||||
```python
|
||||
flow.kickoff(restore_from_state_id="<previous-run-state-id>")
|
||||
```
|
||||
|
||||
A nova execução recebe um novo `state.id` (auto-gerado, ou `inputs["id"]` se fixado), então suas escritas do `@persist` não estendem o histórico da origem. Combinar com `from_checkpoint` lança um `ValueError`; escolha uma única fonte de hidratação.
|
||||
|
||||
## Resumo
|
||||
|
||||
- **Comece com um Flow.**
|
||||
|
||||
@@ -133,7 +133,7 @@ Aqui está uma lista das ferramentas disponíveis e suas descrições:
|
||||
| **DirectorySearchTool** | Ferramenta RAG para busca em diretórios, útil para navegação em sistemas de arquivos. |
|
||||
| **DOCXSearchTool** | Ferramenta RAG voltada para busca em documentos DOCX, ideal para processar arquivos Word. |
|
||||
| **DirectoryReadTool** | Facilita a leitura e processamento de estruturas de diretórios e seus conteúdos. |
|
||||
| **EXASearchTool** | Ferramenta projetada para buscas exaustivas em diversas fontes de dados. |
|
||||
| **ExaSearchTool** | Ferramenta projetada para buscas exaustivas em diversas fontes de dados. |
|
||||
| **FileReadTool** | Permite a leitura e extração de dados de arquivos, suportando diversos formatos. |
|
||||
| **FirecrawlSearchTool** | Ferramenta para buscar páginas web usando Firecrawl e retornar os resultados. |
|
||||
| **FirecrawlCrawlWebsiteTool** | Ferramenta para rastrear páginas web utilizando o Firecrawl. |
|
||||
|
||||
@@ -167,6 +167,48 @@ Para mais controle, você pode aplicar `@persist()` em métodos específicos:
|
||||
# código não traduzido
|
||||
```
|
||||
|
||||
#### Forking de Estado Persistido
|
||||
|
||||
`@persist` suporta dois modos distintos de hidratação em `kickoff` / `kickoff_async`. Use **resume** (`inputs["id"]`) para continuar a mesma linhagem; use **fork** (`restore_from_state_id`) para iniciar uma nova linhagem a partir de um snapshot:
|
||||
|
||||
| | `state.id` após o kickoff | Escritas do `@persist` vão para |
|
||||
|---|---|---|
|
||||
| `inputs["id"]` (resume) | id informado | id informado (estende o histórico) |
|
||||
| `restore_from_state_id` (fork) | id novo, ou `inputs["id"]` se fixado | id novo (origem preservada) |
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.flow.persistence import persist
|
||||
from pydantic import BaseModel
|
||||
|
||||
class CounterState(BaseModel):
|
||||
id: str = ""
|
||||
counter: int = 0
|
||||
|
||||
@persist
|
||||
class CounterFlow(Flow[CounterState]):
|
||||
@start()
|
||||
def step(self):
|
||||
self.state.counter += 1
|
||||
|
||||
# Execução 1: estado novo, counter 0 -> 1
|
||||
flow_1 = CounterFlow()
|
||||
flow_1.kickoff()
|
||||
|
||||
# Fork: hidrata do snapshot mais recente de flow_1, mas escreve sob um state.id NOVO
|
||||
flow_2 = CounterFlow()
|
||||
flow_2.kickoff(restore_from_state_id=flow_1.state.id)
|
||||
# flow_2 começa com counter=1 (hidratado), e step() incrementa para 2.
|
||||
# O histórico do flow_uuid de flow_1 não é alterado.
|
||||
```
|
||||
|
||||
Notas sobre o comportamento:
|
||||
|
||||
- `restore_from_state_id` não encontrado na persistência → o kickoff retorna silenciosamente ao comportamento padrão (espelha o comportamento de `inputs["id"]` quando não encontrado). Nenhuma exceção é lançada.
|
||||
- Combinar `restore_from_state_id` com `from_checkpoint` lança um `ValueError` — eles miram sistemas de estado diferentes (`@persist` vs. Checkpointing) e não podem ser combinados.
|
||||
- `restore_from_state_id=None` (padrão) é byte-idêntico a um kickoff sem o parâmetro.
|
||||
- Fixar `inputs["id"]` durante o fork significa que a nova execução compartilha uma chave de persistência com outro flow — geralmente você quer apenas `restore_from_state_id`.
|
||||
|
||||
## Padrões Avançados de Estado
|
||||
|
||||
### Lógica Condicional Baseada no Estado
|
||||
|
||||
359
docs/pt-BR/guides/migration/upgrading-crewai.mdx
Normal file
359
docs/pt-BR/guides/migration/upgrading-crewai.mdx
Normal file
@@ -0,0 +1,359 @@
|
||||
---
|
||||
title: "Atualizando e Migrando o CrewAI"
|
||||
description: "Como atualizar o CrewAI, lidar com breaking changes e migrar Crews para Flows."
|
||||
icon: "arrow-up-circle"
|
||||
---
|
||||
|
||||
## Visão Geral
|
||||
|
||||
O CrewAI evolui rapidamente. Novas versões frequentemente ajustam caminhos de import, alteram defaults de `Agent`, `Crew` e `Task`, e introduzem novas primitivas de orquestração como `Flow` e checkpointing. Este guia reúne os passos práticos necessários para:
|
||||
|
||||
- Atualizar a CLI global `crewai` e a dependência fixada do seu projeto
|
||||
- Adaptar-se a breaking changes em imports e parâmetros
|
||||
- Migrar uma `Crew` independente para um `Flow` tipado
|
||||
- Evitar as armadilhas que aparecem na primeira execução de um projeto atualizado
|
||||
|
||||
Se você está começando do zero, veja [Instalação](/pt-BR/installation). Se está vindo de outro framework, veja [Migrando do LangGraph](/pt-BR/guides/migration/migrating-from-langgraph).
|
||||
|
||||
---
|
||||
|
||||
## As Duas Coisas Que Você Pode Querer Atualizar
|
||||
|
||||
O CrewAI vive em dois lugares na sua máquina, e cada um se atualiza de forma independente:
|
||||
|
||||
| O quê | Como é instalado | Como atualizar |
|
||||
|---|---|---|
|
||||
| A **CLI global `crewai`** | `uv tool install crewai` | `uv tool install crewai --upgrade` |
|
||||
| O **venv do projeto** (onde seu código roda) | `crewai install` / `uv sync` | `uv add "crewai[...]>=X.Y.Z"` e depois `crewai install` |
|
||||
|
||||
Esses dois podem — e frequentemente ficam — fora de sincronia. Rodar `crewai --version` mostra a versão da CLI. Rodar `uv pip show crewai` dentro do seu projeto mostra a versão do venv. Se forem diferentes, isso é normal; o que importa para o código em execução é a versão do venv.
|
||||
|
||||
## Por Que `crewai install` Sozinho Não Atualiza
|
||||
|
||||
`crewai install` é um wrapper fino em torno de `uv sync`. Ele instala exatamente o que o arquivo `uv.lock` atual diz — ele **não** muda nenhuma restrição de versão.
|
||||
|
||||
Se seu `pyproject.toml` diz `crewai>=1.11.1` e o lock file resolveu para `1.11.1`, executar `crewai install` vai te manter em `1.11.1` para sempre, mesmo que `1.14.4` esteja disponível.
|
||||
|
||||
Para realmente atualizar, você precisa:
|
||||
|
||||
1. Atualizar a restrição de versão em `pyproject.toml`
|
||||
2. Re-resolver o lock file
|
||||
3. Sincronizar o venv
|
||||
|
||||
`uv add` faz os três de uma vez só.
|
||||
|
||||
## Como Atualizar Seu Projeto
|
||||
|
||||
```bash
|
||||
# Aumenta a restrição e re-resolve o lock em um único comando
|
||||
uv add "crewai[tools]>=1.14.4"
|
||||
|
||||
# Sincroniza o venv (crewai install chama uv sync por baixo dos panos)
|
||||
crewai install
|
||||
|
||||
# Verifica
|
||||
uv pip show crewai
|
||||
# → Version: 1.14.4
|
||||
```
|
||||
|
||||
Substitua `[tools]` por quaisquer extras que seu projeto utilize (ex.: `[tools,anthropic]`). Verifique a lista de `dependencies` do seu `pyproject.toml` se estiver em dúvida.
|
||||
|
||||
<Note>
|
||||
`uv add` atualiza tanto `pyproject.toml` **quanto** `uv.lock` atomicamente. Se você editar `pyproject.toml` manualmente, ainda precisa rodar `uv lock --upgrade-package crewai` para re-resolver o lock file antes que `crewai install` pegue a nova versão.
|
||||
</Note>
|
||||
|
||||
## Atualizando a CLI Global
|
||||
|
||||
A CLI global é separada do seu projeto. Atualize com:
|
||||
|
||||
```bash
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
|
||||
Se seu shell avisar sobre o `PATH` após a atualização, recarregue-o:
|
||||
|
||||
```bash
|
||||
uv tool update-shell
|
||||
```
|
||||
|
||||
Isso **não** mexe no venv do seu projeto — você ainda precisa de `uv add` + `crewai install` dentro do projeto.
|
||||
|
||||
## Verifique Se Ambos Estão em Sincronia
|
||||
|
||||
```bash
|
||||
# Versão da CLI global
|
||||
crewai --version
|
||||
|
||||
# Versão do venv do projeto
|
||||
uv pip show crewai | grep Version
|
||||
```
|
||||
|
||||
Eles não precisam coincidir — mas a versão do venv do projeto é o que importa para o comportamento em runtime.
|
||||
|
||||
<Note>
|
||||
CrewAI requer `Python >=3.10, <3.14`. Se o `uv` foi instalado contra um interpretador mais antigo, recrie o venv do projeto com uma versão suportada do Python antes de rodar `crewai install`.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## Breaking Changes e Notas de Migração
|
||||
|
||||
A maioria das atualizações requer apenas pequenos ajustes. As áreas abaixo são as que quebram silenciosamente ou com tracebacks confusos.
|
||||
|
||||
### Caminhos de import: tools e `BaseTool`
|
||||
|
||||
O caminho canônico para tools é `crewai.tools`. Caminhos antigos ainda aparecem em tutoriais, mas devem ser atualizados.
|
||||
|
||||
```python
|
||||
# Antes
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.agents.tools import tool
|
||||
|
||||
# Depois
|
||||
from crewai.tools import BaseTool, tool
|
||||
```
|
||||
|
||||
O decorador `@tool` e a subclasse `BaseTool` ambos vivem em `crewai.tools`. `AgentFinish` e outros símbolos internos do agente não fazem mais parte da superfície pública — se você os estava importando, mude para event listeners ou callbacks de `Task`.
|
||||
|
||||
### Mudanças de parâmetros em `Agent`
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Find authoritative sources on {topic}",
|
||||
backstory="You are a careful, source-driven researcher.",
|
||||
llm="gpt-4o-mini", # nome do modelo como string OU um objeto LLM
|
||||
verbose=True, # bool, não um nível inteiro
|
||||
max_iter=15, # default mudou entre versões — defina explicitamente
|
||||
allow_delegation=False,
|
||||
)
|
||||
```
|
||||
|
||||
- `llm` aceita tanto um nome de modelo como string (resolvido pelo provedor configurado) quanto um objeto `LLM` para controle granular.
|
||||
- `verbose` é um `bool` puro. Passar um inteiro não alterna mais níveis de log.
|
||||
- Os defaults de `max_iter` mudaram entre releases. Se seu agente para silenciosamente de iterar após a primeira chamada de tool, defina `max_iter` explicitamente.
|
||||
|
||||
### Parâmetros de `Crew`
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential, # ou Process.hierarchical
|
||||
memory=True,
|
||||
cache=True,
|
||||
embedder={"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
)
|
||||
```
|
||||
|
||||
- `process=Process.hierarchical` requer ou `manager_llm=` ou `manager_agent=`. Sem um deles, o kickoff lança erro na validação.
|
||||
- `memory=True` com um provedor de embedding não-default precisa de um dicionário `embedder` — veja [Configuração de memória e embedder](#memory-embedder-config) abaixo.
|
||||
|
||||
### Saída estruturada de `Task`
|
||||
|
||||
Use `output_pydantic`, `output_json` ou `output_file` para forçar o resultado de uma task em um formato tipado:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from crewai import Task
|
||||
|
||||
class Article(BaseModel):
|
||||
title: str
|
||||
body: str
|
||||
|
||||
write = Task(
|
||||
description="Write an article about {topic}",
|
||||
expected_output="A short article with a title and body",
|
||||
agent=writer,
|
||||
output_pydantic=Article, # a classe, NÃO uma instância
|
||||
output_file="output/article.md",
|
||||
)
|
||||
```
|
||||
|
||||
`output_pydantic` recebe a **classe** em si. Passar `Article(title="", body="")` é um erro comum e falha com um erro de validação confuso.
|
||||
|
||||
### Configuração de memória e embedder
|
||||
|
||||
Se `memory=True` e você não está usando os embeddings padrão da OpenAI, é preciso passar um `embedder`:
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {"model": "nomic-embed-text"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
Defina as credenciais do provedor relevante (`OPENAI_API_KEY`, `OLLAMA_HOST`, etc.) no seu arquivo `.env`. Os caminhos de armazenamento de memória são locais ao projeto por default — apague o diretório de memória do projeto se trocar de embedder, já que dimensões diferentes não se misturam.
|
||||
|
||||
---
|
||||
|
||||
## Migrando uma Crew para um Flow
|
||||
|
||||
`Crew` é a primitiva certa quando você tem um único time de agentes executando um workflow. Quando você precisa de branches, múltiplas crews ou estado persistente entre execuções, parta para `Flow`.
|
||||
|
||||
### Quando usar Flows vs Crews independentes
|
||||
|
||||
| Situação | Use |
|
||||
| --- | --- |
|
||||
| Time único, workflow linear/hierárquico | `Crew` |
|
||||
| Branches condicionais, retries, roteamento por resultado | `Flow` |
|
||||
| Múltiplas crews especializadas encadeadas | `Flow` |
|
||||
| Estado que precisa persistir entre etapas ou execuções | `Flow` (com checkpointing) |
|
||||
| Você quer estado tipado e amigável à IDE | `Flow[MyState]` com um modelo Pydantic |
|
||||
|
||||
Se você precisa de qualquer um destes: branches, multi-crew ou estado persistente — comece com um `Flow`. O boilerplate é pequeno e você não precisará reescrever depois.
|
||||
|
||||
### Migração passo a passo
|
||||
|
||||
**Antes — crew independente:**
|
||||
|
||||
```python
|
||||
from crewai import Crew
|
||||
|
||||
crew = Crew(agents=[researcher, writer], tasks=[research_task, write_task])
|
||||
result = crew.kickoff(inputs={"topic": "vector databases"})
|
||||
print(result)
|
||||
```
|
||||
|
||||
**Depois — crew dentro de um Flow tipado:**
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from pydantic import BaseModel
|
||||
|
||||
class MyState(BaseModel):
|
||||
input_data: str = ""
|
||||
result: str = ""
|
||||
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def run_crew(self):
|
||||
result = MyCrew().crew().kickoff(inputs={"topic": self.state.input_data})
|
||||
self.state.result = str(result)
|
||||
return self.state.result
|
||||
|
||||
flow = MyFlow()
|
||||
flow.kickoff(inputs={"input_data": "vector databases"})
|
||||
```
|
||||
|
||||
O que mudou:
|
||||
|
||||
1. A crew é construída dentro de um método, não no carregamento do módulo.
|
||||
2. Inputs fluem por `self.state` em vez de serem passados como kwargs.
|
||||
3. O ponto de entrada é marcado com `@start()`. Etapas seguintes usam `@listen(run_crew)` para encadear.
|
||||
|
||||
### Configuração de estado estruturado
|
||||
|
||||
Prefira estado tipado (`Flow[MyState]`) em vez da variante de dict não tipado. Você ganha autocompletar, validação na fronteira e estado serializável para checkpointing:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class ResearchState(BaseModel):
|
||||
topic: str = ""
|
||||
sources: list[str] = Field(default_factory=list)
|
||||
draft: str = ""
|
||||
final: str = ""
|
||||
```
|
||||
|
||||
Estado não tipado (`Flow()` sem genérico) ainda funciona, mas você perde checagens estáticas e fidelidade no checkpointing.
|
||||
|
||||
### Padrão de Flow multi-crew
|
||||
|
||||
Encadear duas crews — pesquisa, depois escrita — é o motivo canônico para adotar Flows:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen, router
|
||||
from pydantic import BaseModel
|
||||
|
||||
class PipelineState(BaseModel):
|
||||
topic: str = ""
|
||||
research: str = ""
|
||||
article: str = ""
|
||||
|
||||
class ContentPipeline(Flow[PipelineState]):
|
||||
@start()
|
||||
def research(self):
|
||||
out = ResearchCrew().crew().kickoff(inputs={"topic": self.state.topic})
|
||||
self.state.research = str(out)
|
||||
return self.state.research
|
||||
|
||||
@router(research)
|
||||
def gate(self):
|
||||
return "write" if len(self.state.research) > 200 else "abort"
|
||||
|
||||
@listen("write")
|
||||
def write(self):
|
||||
out = WritingCrew().crew().kickoff(
|
||||
inputs={"topic": self.state.topic, "notes": self.state.research}
|
||||
)
|
||||
self.state.article = str(out)
|
||||
return self.state.article
|
||||
|
||||
@listen("abort")
|
||||
def bail(self):
|
||||
self.state.article = "Insufficient research."
|
||||
return self.state.article
|
||||
|
||||
ContentPipeline().kickoff(inputs={"topic": "vector databases"})
|
||||
```
|
||||
|
||||
`@start()`, `@listen()` e `@router()` são os três decoradores que você usará 95% do tempo. Veja [Flows](/pt-BR/concepts/flows) para a referência completa.
|
||||
|
||||
---
|
||||
|
||||
## Armadilhas Comuns
|
||||
|
||||
1. **Rodar `crewai install` esperando uma atualização.** `crewai install` sincroniza com base no `uv.lock` existente. Para subir versões, rode `uv add "crewai[tools]>=X.Y.Z"` primeiro e depois `crewai install`.
|
||||
2. **A restrição é um piso, não um pin.** `crewai>=1.11.1` significa "qualquer versão a partir de 1.11.1". O `uv` só re-resolve quando você executa explicitamente `uv add` ou `uv lock --upgrade-package crewai`.
|
||||
3. **Extras descartados durante o re-lock.** Se você rodar `uv add "crewai>=1.14.4"` sem extras, o `uv` pode descartar `[tools]` do conjunto resolvido. Sempre inclua os extras de que precisa: `uv add "crewai[tools]>=1.14.4"`.
|
||||
4. **Esquecer de commitar `uv.lock`.** Após subir a versão com `uv add`, commite o `uv.lock` atualizado para que seus colegas tenham as mesmas versões.
|
||||
5. **`pip install` em vez de `uv tool install`.** Misturar `crewai` instalado por pip e por uv leva a dois binários no `PATH` e divergência de versões confusa. Escolha um — o suportado é o `uv`.
|
||||
6. **Passar uma instância Pydantic para `output_pydantic`.** Ele espera a classe. `output_pydantic=Article`, não `output_pydantic=Article(...)`.
|
||||
7. **Processo hierárquico sem manager.** `process=Process.hierarchical` requer `manager_llm=` ou `manager_agent=`.
|
||||
8. **Memória ativada com o embedder errado.** Trocar de embedder sem limpar o diretório de memória em disco causa incompatibilidade de dimensões. Apague o store de memória do projeto após mudar de provedor.
|
||||
9. **Estado de dict quando você queria estado tipado.** `Flow()` sem genérico te dá um dict. Para checagem de tipos e checkpointing limpo, use `Flow[MyState]` com um `BaseModel`.
|
||||
10. **Imports antigos de tools.** `from crewai_tools import BaseTool` funciona em algumas versões, mas não é o caminho canônico. Padronize com `from crewai.tools import BaseTool, tool`.
|
||||
11. **Drift de versão do Python.** O CrewAI requer `>=3.10, <3.14`. O `uv` vai felizmente construir um venv contra 3.14+ se for o default; pin a versão do Python no `pyproject.toml`.
|
||||
12. **`verbose=2` e flags inteiras semelhantes.** `verbose` é um `bool`. Use event listeners para logging mais granular.
|
||||
13. **Chamar `crew.kickoff()` de dentro de um Flow sem encapsular em `inputs={}`.** Flows passam estado, não kwargs. A crew ainda espera `inputs={...}`.
|
||||
|
||||
---
|
||||
|
||||
## Checkpointing
|
||||
|
||||
Checkpointing é uma adição mais recente que persiste o estado de agent, crew e flow entre execuções. Permite que workflows de longa duração retomem após um crash, uma parada manual ou um deploy.
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
checkpoint=True,
|
||||
)
|
||||
```
|
||||
|
||||
A mesma flag é suportada em `Flow` e `Agent`. O estado é gravado no store local do projeto e reproduzido na próxima `kickoff()` com o mesmo identificador.
|
||||
|
||||
<Note>
|
||||
Checkpointing está em release inicial. APIs em torno de semântica de retomada, backends de storage e identificadores ainda podem mudar entre versões menores — pin sua versão do `crewai` se você depende disso em produção.
|
||||
</Note>
|
||||
|
||||
Veja [Checkpointing](/pt-BR/concepts/checkpointing) para a referência completa do recurso.
|
||||
|
||||
---
|
||||
|
||||
## Obtendo Ajuda
|
||||
|
||||
- **Changelog** — toda breaking change é registrada nas [release notes](/pt-BR/changelog).
|
||||
- **GitHub Issues** — abra uma em [github.com/crewAIInc/crewAI/issues](https://github.com/crewAIInc/crewAI/issues) com um repro mínimo e a saída de `crewai --version`.
|
||||
- **Discord** — o Discord da comunidade CrewAI é o caminho mais rápido para ajuda em debugging: [community.crewai.com](https://community.crewai.com).
|
||||
- **Guias de migração** — se você está vindo de outro framework, comece em [Migrando do LangGraph](/pt-BR/guides/migration/migrating-from-langgraph).
|
||||
180
docs/pt-BR/tools/ai-ml/daytona.mdx
Normal file
180
docs/pt-BR/tools/ai-ml/daytona.mdx
Normal file
@@ -0,0 +1,180 @@
|
||||
---
|
||||
title: Daytona Sandbox Tools
|
||||
description: Run shell commands, execute Python, and manage files inside isolated [Daytona](https://www.daytona.io/) sandboxes.
|
||||
icon: box
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
# Daytona Sandbox Tools
|
||||
|
||||
## Description
|
||||
|
||||
The Daytona sandbox tools give CrewAI agents access to isolated, ephemeral compute environments powered by [Daytona](https://www.daytona.io/). Three tools are available so you can give an agent exactly the capabilities it needs:
|
||||
|
||||
- **`DaytonaExecTool`** — run any shell command inside a sandbox.
|
||||
- **`DaytonaPythonTool`** — execute a block of Python source code inside a sandbox.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox.
|
||||
|
||||
All three tools share the same sandbox lifecycle controls, so you can mix and match them while keeping state in a single persistent sandbox.
|
||||
|
||||
## Installation
|
||||
|
||||
```shell
|
||||
uv add "crewai-tools[daytona]"
|
||||
# or
|
||||
pip install "crewai-tools[daytona]"
|
||||
```
|
||||
|
||||
Set your API key:
|
||||
|
||||
```shell
|
||||
export DAYTONA_API_KEY="your-api-key"
|
||||
```
|
||||
|
||||
`DAYTONA_API_URL` and `DAYTONA_TARGET` are also respected if set.
|
||||
|
||||
## Sandbox Lifecycle
|
||||
|
||||
All three tools inherit lifecycle controls from `DaytonaBaseTool`:
|
||||
|
||||
| Mode | How to enable | Sandbox created | Sandbox deleted |
|
||||
|------|--------------|-----------------|-----------------|
|
||||
| **Ephemeral** (default) | `persistent=False` (default) | On every `_run` call | At the end of that same call |
|
||||
| **Persistent** | `persistent=True` | Lazily on first use | At process exit (via `atexit`), or manually via `tool.close()` |
|
||||
| **Attach** | `sandbox_id="<id>"` | Never — attaches to an existing sandbox | Never — the tool will not delete a sandbox it did not create |
|
||||
|
||||
Ephemeral mode is the safe default: nothing leaks if the agent forgets to clean up. Use persistent mode when you want filesystem state or installed packages to carry across multiple tool calls — this is typical when pairing `DaytonaFileTool` with `DaytonaExecTool`.
|
||||
|
||||
## Examples
|
||||
|
||||
### One-shot Python execution (ephemeral)
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaPythonTool
|
||||
|
||||
tool = DaytonaPythonTool()
|
||||
result = tool.run(code="print(sum(range(10)))")
|
||||
print(result)
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": None}
|
||||
```
|
||||
|
||||
### Multi-step shell session (persistent)
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool, DaytonaFileTool
|
||||
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
# Install a package, then write and run a script — all in the same sandbox
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool.run(action="write", path="/workspace/fetch.py", content="import httpx; print(httpx.get('https://httpbin.org/get').status_code)")
|
||||
exec_tool.run(command="python /workspace/fetch.py")
|
||||
```
|
||||
|
||||
<Note>
|
||||
Each tool instance maintains its own persistent sandbox. To share **one** sandbox across two tools, create the first tool, grab its sandbox id via `tool._persistent_sandbox.id`, and pass it to the second tool via `sandbox_id=...`.
|
||||
</Note>
|
||||
|
||||
### Attach to an existing sandbox
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(sandbox_id="my-long-lived-sandbox")
|
||||
result = tool.run(command="ls /workspace")
|
||||
```
|
||||
|
||||
### Custom sandbox parameters
|
||||
|
||||
Pass Daytona's `CreateSandboxFromSnapshotParams` kwargs via `create_params`:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(
|
||||
persistent=True,
|
||||
create_params={
|
||||
"language": "python",
|
||||
"env_vars": {"MY_FLAG": "1"},
|
||||
"labels": {"owner": "crewai-agent"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
### Agent integration
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import DaytonaExecTool, DaytonaPythonTool, DaytonaFileTool
|
||||
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
python_tool = DaytonaPythonTool(persistent=True)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
coder = Agent(
|
||||
role="Sandbox Engineer",
|
||||
goal="Write and run code in an isolated environment",
|
||||
backstory="An engineer who uses Daytona sandboxes to safely execute code and manage files.",
|
||||
tools=[exec_tool, python_tool, file_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to /workspace/fib.py, and run it.",
|
||||
expected_output="The first 10 Fibonacci numbers printed to stdout.",
|
||||
agent=coder,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[coder], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
### Shared (`DaytonaBaseTool`)
|
||||
|
||||
All three tools accept these parameters at initialization:
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|-----------|------|---------|-------------|
|
||||
| `api_key` | `str \| None` | `$DAYTONA_API_KEY` | Daytona API key. Falls back to the `DAYTONA_API_KEY` env var. |
|
||||
| `api_url` | `str \| None` | `$DAYTONA_API_URL` | Daytona API URL override. |
|
||||
| `target` | `str \| None` | `$DAYTONA_TARGET` | Daytona target region. |
|
||||
| `persistent` | `bool` | `False` | Reuse one sandbox across all calls and delete it at process exit. |
|
||||
| `sandbox_id` | `str \| None` | `None` | Attach to an existing sandbox by id or name. |
|
||||
| `create_params` | `dict \| None` | `None` | Extra kwargs forwarded to `CreateSandboxFromSnapshotParams` (e.g. `language`, `env_vars`, `labels`). |
|
||||
| `sandbox_timeout` | `float` | `60.0` | Timeout in seconds for sandbox create/delete operations. |
|
||||
|
||||
### `DaytonaExecTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `command` | `str` | ✓ | Shell command to execute. |
|
||||
| `cwd` | `str \| None` | | Working directory inside the sandbox. |
|
||||
| `env` | `dict[str, str] \| None` | | Extra environment variables for this command. |
|
||||
| `timeout` | `int \| None` | | Maximum seconds to wait for the command. |
|
||||
|
||||
### `DaytonaPythonTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `code` | `str` | ✓ | Python source code to execute. |
|
||||
| `argv` | `list[str] \| None` | | Argument vector forwarded via `CodeRunParams`. |
|
||||
| `env` | `dict[str, str] \| None` | | Environment variables forwarded via `CodeRunParams`. |
|
||||
| `timeout` | `int \| None` | | Maximum seconds to wait for execution. |
|
||||
|
||||
### `DaytonaFileTool`
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`. |
|
||||
| `path` | `str` | ✓ | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | | Content to write or append. Required for `append`. |
|
||||
| `binary` | `bool` | | If `True`, `content` is base64 on write; returns base64 on read. |
|
||||
| `recursive` | `bool` | | For `delete`: remove directories recursively. |
|
||||
| `mode` | `str` | | For `mkdir`: octal permission string (default `"0755"`). |
|
||||
|
||||
<Tip>
|
||||
For files larger than a few KB, create the file first with `action="write"` and empty content, then send the body via multiple `action="append"` calls of ~4 KB each to stay within tool-call payload limits.
|
||||
</Tip>
|
||||
@@ -1,15 +1,15 @@
|
||||
---
|
||||
title: Carregador Web EXA Search
|
||||
description: O `EXASearchTool` foi projetado para realizar uma busca semântica para uma consulta especificada a partir do conteúdo de um texto em toda a internet.
|
||||
description: O `ExaSearchTool` foi projetado para realizar uma busca semântica para uma consulta especificada a partir do conteúdo de um texto em toda a internet.
|
||||
icon: globe-pointer
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
# `EXASearchTool`
|
||||
# `ExaSearchTool`
|
||||
|
||||
## Descrição
|
||||
|
||||
O EXASearchTool foi projetado para realizar uma busca semântica para uma consulta especificada a partir do conteúdo de um texto em toda a internet.
|
||||
O ExaSearchTool foi projetado para realizar uma busca semântica para uma consulta especificada a partir do conteúdo de um texto em toda a internet.
|
||||
Ele utiliza a API da [exa.ai](https://exa.ai/) para buscar e exibir os resultados de pesquisa mais relevantes com base na consulta fornecida pelo usuário.
|
||||
|
||||
## Instalação
|
||||
@@ -25,15 +25,15 @@ pip install 'crewai[tools]'
|
||||
O exemplo a seguir demonstra como inicializar a ferramenta e executar uma busca com uma consulta determinada:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import EXASearchTool
|
||||
from crewai_tools import ExaSearchTool
|
||||
|
||||
# Initialize the tool for internet searching capabilities
|
||||
tool = EXASearchTool()
|
||||
tool = ExaSearchTool()
|
||||
```
|
||||
|
||||
## Etapas para Começar
|
||||
|
||||
Para usar o EXASearchTool de forma eficaz, siga estas etapas:
|
||||
Para usar o ExaSearchTool de forma eficaz, siga estas etapas:
|
||||
|
||||
<Steps>
|
||||
<Step title="Instalação do Pacote">
|
||||
@@ -47,7 +47,35 @@ Para usar o EXASearchTool de forma eficaz, siga estas etapas:
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Usando o Exa via MCP
|
||||
|
||||
Você também pode conectar seu agente ao servidor MCP hospedado pelo Exa. Passe sua chave de API no cabeçalho `x-api-key`:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from crewai.mcp import MCPServerHTTP
|
||||
|
||||
agent = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Find and analyze information on the web",
|
||||
backstory="Expert researcher with access to Exa's tools",
|
||||
mcps=[
|
||||
MCPServerHTTP(
|
||||
url="https://mcp.exa.ai/mcp",
|
||||
headers={"x-api-key": "YOUR_EXA_API_KEY"},
|
||||
),
|
||||
],
|
||||
)
|
||||
```
|
||||
|
||||
Obtenha sua chave de API no [painel da Exa](https://dashboard.exa.ai/api-keys). Para mais informações sobre MCP no CrewAI, consulte a [visão geral do MCP](/pt-BR/mcp/overview).
|
||||
|
||||
## Conclusão
|
||||
|
||||
Ao integrar o `EXASearchTool` em projetos Python, os usuários ganham a capacidade de realizar buscas relevantes e em tempo real pela internet diretamente de suas aplicações.
|
||||
Seguindo as orientações de configuração e uso fornecidas, a incorporação desta ferramenta em projetos torna-se simples e direta.
|
||||
Ao integrar o `ExaSearchTool` em projetos Python, os usuários ganham a capacidade de realizar buscas relevantes e em tempo real pela internet diretamente de suas aplicações.
|
||||
Seguindo as orientações de configuração e uso fornecidas, a incorporação desta ferramenta em projetos torna-se simples e direta.
|
||||
|
||||
## Recursos
|
||||
|
||||
- [Documentação do Exa](https://exa.ai/docs)
|
||||
- [Painel do Exa — gerenciar chaves de API e uso](https://dashboard.exa.ai)
|
||||
26
lib/cli/README.md
Normal file
26
lib/cli/README.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# crewai-cli
|
||||
|
||||
CLI for CrewAI — scaffold, run, deploy and manage AI agent crews without
|
||||
installing the full framework.
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install crewai-cli
|
||||
```
|
||||
|
||||
This pulls in `crewai-core` (shared utilities) but not the `crewai` framework
|
||||
itself, so commands that don't need a crew loaded — `crewai version`,
|
||||
`crewai login`, `crewai org list`, `crewai config *`, `crewai traces *`,
|
||||
`crewai create`, `crewai template *` — work standalone.
|
||||
|
||||
Commands that load a user's crew or flow (`crewai run`, `crewai train`,
|
||||
`crewai test`, `crewai chat`, `crewai replay`, `crewai reset-memories`,
|
||||
`crewai deploy push`, `crewai tool publish`) require `crewai` to be installed
|
||||
in the project's environment. They print a clear error if it is missing.
|
||||
|
||||
To install both at once:
|
||||
|
||||
```bash
|
||||
pip install crewai[cli]
|
||||
```
|
||||
45
lib/cli/pyproject.toml
Normal file
45
lib/cli/pyproject.toml
Normal file
@@ -0,0 +1,45 @@
|
||||
[project]
|
||||
name = "crewai-cli"
|
||||
dynamic = ["version"]
|
||||
description = "CLI for CrewAI — scaffold, run, deploy and manage AI agent crews."
|
||||
readme = "README.md"
|
||||
authors = [
|
||||
{ name = "Joao Moura", email = "joao@crewai.com" }
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"crewai-core==1.14.5a3",
|
||||
"click~=8.1.7",
|
||||
"pydantic>=2.11.9,<2.13",
|
||||
"pydantic-settings~=2.10.1",
|
||||
"appdirs~=1.4.4",
|
||||
"cryptography>=42.0",
|
||||
"httpx~=0.28.1",
|
||||
"pyjwt>=2.9.0,<3",
|
||||
"rich>=13.7.1",
|
||||
"tomli~=2.0.2",
|
||||
"tomli-w~=1.1.0",
|
||||
"packaging>=23.0",
|
||||
"python-dotenv>=1.2.2,<2",
|
||||
"uv~=0.11.6",
|
||||
"textual>=7.5.0",
|
||||
"certifi",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://crewai.com"
|
||||
Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.scripts]
|
||||
crewai = "crewai_cli.cli:crewai"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.version]
|
||||
path = "src/crewai_cli/__init__.py"
|
||||
|
||||
[tool.hatch.build.targets.wheel]
|
||||
packages = ["src/crewai_cli"]
|
||||
1
lib/cli/src/crewai_cli/__init__.py
Normal file
1
lib/cli/src/crewai_cli/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
__version__ = "1.14.5a3"
|
||||
@@ -1,9 +1,9 @@
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
from crewai_core.printer import PRINTER
|
||||
|
||||
from crewai.cli.utils import copy_template
|
||||
from crewai.utilities.printer import PRINTER
|
||||
from crewai_cli.utils import copy_template
|
||||
|
||||
|
||||
def add_crew_to_flow(crew_name: str) -> None:
|
||||
8
lib/cli/src/crewai_cli/authentication/__init__.py
Normal file
8
lib/cli/src/crewai_cli/authentication/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""CLI authentication entry point."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_cli.authentication.main import AuthenticationCommand
|
||||
|
||||
|
||||
__all__ = ["AuthenticationCommand"]
|
||||
8
lib/cli/src/crewai_cli/authentication/constants.py
Normal file
8
lib/cli/src/crewai_cli/authentication/constants.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Re-export of authentication constants from ``crewai_core.auth.constants``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.constants import ALGORITHMS as ALGORITHMS
|
||||
|
||||
|
||||
__all__ = ["ALGORITHMS"]
|
||||
60
lib/cli/src/crewai_cli/authentication/main.py
Normal file
60
lib/cli/src/crewai_cli/authentication/main.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""CLI-side authentication wiring.
|
||||
|
||||
Re-exports the OAuth2 primitives from ``crewai_core.auth`` and overrides the
|
||||
``_post_login`` hook to also log into the tool repository.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.oauth2 import (
|
||||
AuthenticationCommand as _BaseAuthenticationCommand,
|
||||
Oauth2Settings as Oauth2Settings,
|
||||
ProviderFactory as ProviderFactory,
|
||||
console,
|
||||
)
|
||||
from crewai_core.settings import Settings
|
||||
|
||||
|
||||
__all__ = ["AuthenticationCommand", "Oauth2Settings", "ProviderFactory"]
|
||||
|
||||
|
||||
class AuthenticationCommand(_BaseAuthenticationCommand):
|
||||
"""CLI-side login that also signs the user into the tool repository."""
|
||||
|
||||
def _post_login(self) -> None:
|
||||
self._login_to_tool_repository()
|
||||
|
||||
def _login_to_tool_repository(self) -> None:
|
||||
from crewai_cli.tools.main import ToolCommand
|
||||
|
||||
try:
|
||||
console.print(
|
||||
"Now logging you in to the Tool Repository... ",
|
||||
style="bold blue",
|
||||
end="",
|
||||
)
|
||||
|
||||
ToolCommand().login()
|
||||
|
||||
console.print(
|
||||
"Success!\n",
|
||||
style="bold green",
|
||||
)
|
||||
|
||||
settings = Settings()
|
||||
|
||||
console.print(
|
||||
f"You are now authenticated to the tool repository for organization [bold cyan]'{settings.org_name if settings.org_name else settings.org_uuid}'[/bold cyan]",
|
||||
style="green",
|
||||
)
|
||||
except (Exception, SystemExit):
|
||||
console.print(
|
||||
"\n[bold yellow]Warning:[/bold yellow] Authentication with the Tool Repository failed.",
|
||||
style="yellow",
|
||||
)
|
||||
console.print(
|
||||
"Other features will work normally, but you may experience limitations "
|
||||
"with downloading and publishing tools."
|
||||
"\nRun [bold]crewai login[/bold] to try logging in again.\n",
|
||||
style="yellow",
|
||||
)
|
||||
@@ -0,0 +1 @@
|
||||
"""OAuth2 authentication providers — re-exported from ``crewai_core.auth.providers``."""
|
||||
8
lib/cli/src/crewai_cli/authentication/providers/auth0.py
Normal file
8
lib/cli/src/crewai_cli/authentication/providers/auth0.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Re-export of ``Auth0Provider`` from ``crewai_core.auth.providers.auth0``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.providers.auth0 import Auth0Provider as Auth0Provider
|
||||
|
||||
|
||||
__all__ = ["Auth0Provider"]
|
||||
@@ -0,0 +1,8 @@
|
||||
"""Re-export of ``BaseProvider`` from ``crewai_core.auth.providers.base_provider``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.providers.base_provider import BaseProvider as BaseProvider
|
||||
|
||||
|
||||
__all__ = ["BaseProvider"]
|
||||
@@ -0,0 +1,8 @@
|
||||
"""Re-export of ``EntraIdProvider`` from ``crewai_core.auth.providers.entra_id``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.providers.entra_id import EntraIdProvider as EntraIdProvider
|
||||
|
||||
|
||||
__all__ = ["EntraIdProvider"]
|
||||
@@ -0,0 +1,8 @@
|
||||
"""Re-export of ``KeycloakProvider`` from ``crewai_core.auth.providers.keycloak``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.providers.keycloak import KeycloakProvider as KeycloakProvider
|
||||
|
||||
|
||||
__all__ = ["KeycloakProvider"]
|
||||
8
lib/cli/src/crewai_cli/authentication/providers/okta.py
Normal file
8
lib/cli/src/crewai_cli/authentication/providers/okta.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Re-export of ``OktaProvider`` from ``crewai_core.auth.providers.okta``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.providers.okta import OktaProvider as OktaProvider
|
||||
|
||||
|
||||
__all__ = ["OktaProvider"]
|
||||
@@ -0,0 +1,8 @@
|
||||
"""Re-export of ``WorkosProvider`` from ``crewai_core.auth.providers.workos``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.providers.workos import WorkosProvider as WorkosProvider
|
||||
|
||||
|
||||
__all__ = ["WorkosProvider"]
|
||||
11
lib/cli/src/crewai_cli/authentication/token.py
Normal file
11
lib/cli/src/crewai_cli/authentication/token.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""Re-exports of authentication token helpers from ``crewai_core.auth.token``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.token import (
|
||||
AuthError as AuthError,
|
||||
get_auth_token as get_auth_token,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["AuthError", "get_auth_token"]
|
||||
8
lib/cli/src/crewai_cli/authentication/utils.py
Normal file
8
lib/cli/src/crewai_cli/authentication/utils.py
Normal file
@@ -0,0 +1,8 @@
|
||||
"""Re-export of ``validate_jwt_token`` from ``crewai_core.auth.utils``."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.auth.utils import validate_jwt_token as validate_jwt_token
|
||||
|
||||
|
||||
__all__ = ["validate_jwt_token"]
|
||||
@@ -21,7 +21,7 @@ from textual.widgets import (
|
||||
Tree,
|
||||
)
|
||||
|
||||
from crewai.cli.checkpoint_cli import (
|
||||
from crewai_cli.checkpoint_cli import (
|
||||
_format_size,
|
||||
_is_sqlite,
|
||||
_list_json,
|
||||
@@ -1,50 +1,66 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from importlib.metadata import version as get_version
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Any
|
||||
|
||||
import click
|
||||
from crewai_core.token_manager import TokenManager
|
||||
|
||||
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
||||
from crewai.cli.authentication.main import AuthenticationCommand
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.create_crew import create_crew
|
||||
from crewai.cli.create_flow import create_flow
|
||||
from crewai.cli.crew_chat import run_chat
|
||||
from crewai.cli.deploy.main import DeployCommand
|
||||
from crewai.cli.enterprise.main import EnterpriseConfigureCommand
|
||||
from crewai.cli.evaluate_crew import evaluate_crew
|
||||
from crewai.cli.install_crew import install_crew
|
||||
from crewai.cli.kickoff_flow import kickoff_flow
|
||||
from crewai.cli.organization.main import OrganizationCommand
|
||||
from crewai.cli.plot_flow import plot_flow
|
||||
from crewai.cli.remote_template.main import TemplateCommand
|
||||
from crewai.cli.replay_from_task import replay_task_command
|
||||
from crewai.cli.reset_memories_command import reset_memories_command
|
||||
from crewai.cli.run_crew import run_crew
|
||||
from crewai.cli.settings.main import SettingsCommand
|
||||
from crewai.cli.shared.token_manager import TokenManager
|
||||
from crewai.cli.tools.main import ToolCommand
|
||||
from crewai.cli.train_crew import train_crew
|
||||
from crewai.cli.triggers.main import TriggersCommand
|
||||
from crewai.cli.update_crew import update_crew
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials, read_toml
|
||||
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||
KickoffTaskOutputsSQLiteStorage,
|
||||
from crewai_cli.add_crew_to_flow import add_crew_to_flow
|
||||
from crewai_cli.authentication.main import AuthenticationCommand
|
||||
from crewai_cli.config import Settings
|
||||
from crewai_cli.create_crew import create_crew
|
||||
from crewai_cli.create_flow import create_flow
|
||||
from crewai_cli.crew_chat import run_chat
|
||||
from crewai_cli.deploy.main import DeployCommand
|
||||
from crewai_cli.enterprise.main import EnterpriseConfigureCommand
|
||||
from crewai_cli.evaluate_crew import evaluate_crew
|
||||
from crewai_cli.install_crew import install_crew
|
||||
from crewai_cli.kickoff_flow import kickoff_flow
|
||||
from crewai_cli.organization.main import OrganizationCommand
|
||||
from crewai_cli.plot_flow import plot_flow
|
||||
from crewai_cli.remote_template.main import TemplateCommand
|
||||
from crewai_cli.replay_from_task import replay_task_command
|
||||
from crewai_cli.reset_memories_command import reset_memories_command
|
||||
from crewai_cli.run_crew import run_crew
|
||||
from crewai_cli.settings.main import SettingsCommand
|
||||
from crewai_cli.task_outputs import load_task_outputs
|
||||
from crewai_cli.tools.main import ToolCommand
|
||||
from crewai_cli.train_crew import train_crew
|
||||
from crewai_cli.triggers.main import TriggersCommand
|
||||
from crewai_cli.update_crew import update_crew
|
||||
from crewai_cli.user_data import (
|
||||
_load_user_data,
|
||||
is_tracing_enabled,
|
||||
update_user_data,
|
||||
)
|
||||
from crewai_cli.utils import build_env_with_all_tool_credentials, read_toml
|
||||
|
||||
|
||||
def _get_cli_version() -> str:
|
||||
"""Return the best available version string for the CLI."""
|
||||
# Prefer crewai version if installed (keeps existing UX)
|
||||
try:
|
||||
return get_version("crewai")
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
try:
|
||||
return get_version("crewai-cli")
|
||||
except Exception:
|
||||
return "unknown"
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(get_version("crewai"))
|
||||
@click.version_option(_get_cli_version())
|
||||
def crewai() -> None:
|
||||
"""Top-level command group for crewai."""
|
||||
|
||||
|
||||
@crewai.command(
|
||||
name="uv",
|
||||
context_settings=dict(
|
||||
ignore_unknown_options=True,
|
||||
),
|
||||
context_settings={"ignore_unknown_options": True},
|
||||
)
|
||||
@click.argument("uv_args", nargs=-1, type=click.UNPROCESSED)
|
||||
def uv(uv_args: tuple[str, ...]) -> None:
|
||||
@@ -105,7 +121,7 @@ def version(tools: bool) -> None:
|
||||
|
||||
if tools:
|
||||
try:
|
||||
tools_version = get_version("crewai")
|
||||
tools_version = get_version("crewai-tools")
|
||||
click.echo(f"crewai tools version: {tools_version}")
|
||||
except Exception:
|
||||
click.echo("crewai tools not installed")
|
||||
@@ -168,12 +184,9 @@ def replay(task_id: str, trained_agents_file: str | None) -> None:
|
||||
|
||||
@crewai.command()
|
||||
def log_tasks_outputs() -> None:
|
||||
"""
|
||||
Retrieve your latest crew.kickoff() task outputs.
|
||||
"""
|
||||
"""Retrieve your latest crew.kickoff() task outputs."""
|
||||
try:
|
||||
storage = KickoffTaskOutputsSQLiteStorage()
|
||||
tasks = storage.load()
|
||||
tasks = load_task_outputs()
|
||||
|
||||
if not tasks:
|
||||
click.echo(
|
||||
@@ -231,11 +244,8 @@ def reset_memories(
|
||||
agent_knowledge: bool,
|
||||
all: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories (memory, knowledge, agent_knowledge, kickoff_outputs). This will delete all the data saved.
|
||||
"""
|
||||
"""Reset the crew memories (memory, knowledge, agent_knowledge, kickoff_outputs). This will delete all the data saved."""
|
||||
try:
|
||||
# Treat legacy flags as --memory with a deprecation warning
|
||||
if long or short or entities:
|
||||
legacy_used = [
|
||||
f
|
||||
@@ -302,7 +312,7 @@ def memory(
|
||||
) -> None:
|
||||
"""Open the Memory TUI to browse scopes and recall memories."""
|
||||
try:
|
||||
from crewai.cli.memory_tui import MemoryTUI
|
||||
from crewai_cli.memory_tui import MemoryTUI
|
||||
except ImportError as exc:
|
||||
click.echo(
|
||||
"Textual is required for the memory TUI but could not be imported. "
|
||||
@@ -365,10 +375,10 @@ def test(n_iterations: int, model: str, trained_agents_file: str | None) -> None
|
||||
|
||||
|
||||
@crewai.command(
|
||||
context_settings=dict(
|
||||
ignore_unknown_options=True,
|
||||
allow_extra_args=True,
|
||||
)
|
||||
context_settings={
|
||||
"ignore_unknown_options": True,
|
||||
"allow_extra_args": True,
|
||||
}
|
||||
)
|
||||
@click.pass_context
|
||||
def install(context: click.Context) -> None:
|
||||
@@ -471,7 +481,7 @@ def deploy_validate() -> None:
|
||||
`crewai deploy push` run automatically, without contacting the platform.
|
||||
Exits non-zero if any blocking issues are found.
|
||||
"""
|
||||
from crewai.cli.deploy.validate import run_validate_command
|
||||
from crewai_cli.deploy.validate import run_validate_command
|
||||
|
||||
run_validate_command()
|
||||
|
||||
@@ -612,14 +622,12 @@ def triggers_run(trigger_path: str) -> None:
|
||||
|
||||
@crewai.command()
|
||||
def chat() -> None:
|
||||
"""
|
||||
Start a conversation with the Crew, collecting user-supplied inputs,
|
||||
"""Start a conversation with the Crew, collecting user-supplied inputs,
|
||||
and using the Chat LLM to generate responses.
|
||||
"""
|
||||
click.secho(
|
||||
"\nStarting a conversation with the Crew\nType 'exit' or Ctrl+C to quit.\n",
|
||||
)
|
||||
|
||||
run_chat()
|
||||
|
||||
|
||||
@@ -784,16 +792,14 @@ def traces_enable() -> None:
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.listeners.tracing.utils import update_user_data
|
||||
|
||||
console = Console()
|
||||
|
||||
update_user_data({"trace_consent": True, "first_execution_done": True})
|
||||
|
||||
panel = Panel(
|
||||
"✅ Trace collection has been enabled!\n\n"
|
||||
"✅ Trace collection enabled.\n\n"
|
||||
"Your crew/flow executions will now send traces to CrewAI+.\n"
|
||||
"Use 'crewai traces disable' to turn off trace collection.",
|
||||
"Use 'crewai traces disable' to opt out.",
|
||||
title="Traces Enabled",
|
||||
border_style="green",
|
||||
padding=(1, 2),
|
||||
@@ -807,16 +813,16 @@ def traces_disable() -> None:
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.events.listeners.tracing.utils import update_user_data
|
||||
|
||||
console = Console()
|
||||
|
||||
update_user_data({"trace_consent": False, "first_execution_done": True})
|
||||
|
||||
panel = Panel(
|
||||
"❌ Trace collection has been disabled!\n\n"
|
||||
"Your crew/flow executions will no longer send traces.\n"
|
||||
"Use 'crewai traces enable' to turn trace collection back on.",
|
||||
"❌ Trace collection disabled.\n\n"
|
||||
"Your crew/flow executions will no longer send traces "
|
||||
"(unless [bold]CREWAI_TRACING_ENABLED=true[/bold] is set in the environment, "
|
||||
"which overrides the opt-out).\n"
|
||||
"Use 'crewai traces enable' to opt back in.",
|
||||
title="Traces Disabled",
|
||||
border_style="red",
|
||||
padding=(1, 2),
|
||||
@@ -832,11 +838,6 @@ def traces_status() -> None:
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
_load_user_data,
|
||||
is_tracing_enabled,
|
||||
)
|
||||
|
||||
console = Console()
|
||||
user_data = _load_user_data()
|
||||
|
||||
@@ -883,13 +884,13 @@ def traces_status() -> None:
|
||||
@click.pass_context
|
||||
def checkpoint(ctx: click.Context, location: str) -> None:
|
||||
"""Browse and inspect checkpoints. Launches a TUI when called without a subcommand."""
|
||||
from crewai.cli.checkpoint_cli import _detect_location
|
||||
from crewai_cli.checkpoint_cli import _detect_location
|
||||
|
||||
location = _detect_location(location)
|
||||
ctx.ensure_object(dict)
|
||||
ctx.obj["location"] = location
|
||||
if ctx.invoked_subcommand is None:
|
||||
from crewai.cli.checkpoint_tui import run_checkpoint_tui
|
||||
from crewai_cli.checkpoint_tui import run_checkpoint_tui
|
||||
|
||||
run_checkpoint_tui(location)
|
||||
|
||||
@@ -898,7 +899,7 @@ def checkpoint(ctx: click.Context, location: str) -> None:
|
||||
@click.argument("location", default="./.checkpoints")
|
||||
def checkpoint_list(location: str) -> None:
|
||||
"""List checkpoints in a directory."""
|
||||
from crewai.cli.checkpoint_cli import _detect_location, list_checkpoints
|
||||
from crewai_cli.checkpoint_cli import _detect_location, list_checkpoints
|
||||
|
||||
list_checkpoints(_detect_location(location))
|
||||
|
||||
@@ -907,7 +908,7 @@ def checkpoint_list(location: str) -> None:
|
||||
@click.argument("path", default="./.checkpoints")
|
||||
def checkpoint_info(path: str) -> None:
|
||||
"""Show details of a checkpoint. Pass a file or directory for latest."""
|
||||
from crewai.cli.checkpoint_cli import _detect_location, info_checkpoint
|
||||
from crewai_cli.checkpoint_cli import _detect_location, info_checkpoint
|
||||
|
||||
info_checkpoint(_detect_location(path))
|
||||
|
||||
@@ -917,7 +918,7 @@ def checkpoint_info(path: str) -> None:
|
||||
@click.pass_context
|
||||
def checkpoint_resume(ctx: click.Context, checkpoint_id: str | None) -> None:
|
||||
"""Resume from a checkpoint. Defaults to the most recent."""
|
||||
from crewai.cli.checkpoint_cli import resume_checkpoint
|
||||
from crewai_cli.checkpoint_cli import resume_checkpoint
|
||||
|
||||
resume_checkpoint(ctx.obj["location"], checkpoint_id)
|
||||
|
||||
@@ -928,7 +929,7 @@ def checkpoint_resume(ctx: click.Context, checkpoint_id: str | None) -> None:
|
||||
@click.pass_context
|
||||
def checkpoint_diff(ctx: click.Context, id1: str, id2: str) -> None:
|
||||
"""Compare two checkpoints side-by-side."""
|
||||
from crewai.cli.checkpoint_cli import diff_checkpoints
|
||||
from crewai_cli.checkpoint_cli import diff_checkpoints
|
||||
|
||||
diff_checkpoints(ctx.obj["location"], id1, id2)
|
||||
|
||||
@@ -950,7 +951,7 @@ def checkpoint_prune(
|
||||
ctx: click.Context, keep: int | None, older_than: str | None, dry_run: bool
|
||||
) -> None:
|
||||
"""Remove old checkpoints."""
|
||||
from crewai.cli.checkpoint_cli import prune_checkpoints
|
||||
from crewai_cli.checkpoint_cli import prune_checkpoints
|
||||
|
||||
prune_checkpoints(ctx.obj["location"], keep, older_than, dry_run)
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from crewai_core.telemetry import Telemetry
|
||||
import httpx
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai_cli.authentication.token import get_auth_token
|
||||
from crewai_cli.plus_api import PlusAPI
|
||||
|
||||
|
||||
console = Console()
|
||||
@@ -32,11 +34,10 @@ class PlusAPIMixin:
|
||||
raise SystemExit from None
|
||||
|
||||
def _validate_response(self, response: httpx.Response) -> None:
|
||||
"""
|
||||
Handle and display error messages from API responses.
|
||||
"""Handle and display error messages from API responses.
|
||||
|
||||
Args:
|
||||
response (httpx.Response): The response from the Plus API
|
||||
response: The response from the Plus API.
|
||||
"""
|
||||
try:
|
||||
json_response = response.json()
|
||||
30
lib/cli/src/crewai_cli/config.py
Normal file
30
lib/cli/src/crewai_cli/config.py
Normal file
@@ -0,0 +1,30 @@
|
||||
"""Re-exports of shared settings from ``crewai_core.settings``.
|
||||
|
||||
Kept as a stable import path for the CLI; new code should import from
|
||||
``crewai_core.settings`` directly.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.settings import (
|
||||
CLI_SETTINGS_KEYS as CLI_SETTINGS_KEYS,
|
||||
DEFAULT_CLI_SETTINGS as DEFAULT_CLI_SETTINGS,
|
||||
DEFAULT_CONFIG_PATH as DEFAULT_CONFIG_PATH,
|
||||
HIDDEN_SETTINGS_KEYS as HIDDEN_SETTINGS_KEYS,
|
||||
READONLY_SETTINGS_KEYS as READONLY_SETTINGS_KEYS,
|
||||
USER_SETTINGS_KEYS as USER_SETTINGS_KEYS,
|
||||
Settings as Settings,
|
||||
get_writable_config_path as get_writable_config_path,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"CLI_SETTINGS_KEYS",
|
||||
"DEFAULT_CLI_SETTINGS",
|
||||
"DEFAULT_CONFIG_PATH",
|
||||
"HIDDEN_SETTINGS_KEYS",
|
||||
"READONLY_SETTINGS_KEYS",
|
||||
"USER_SETTINGS_KEYS",
|
||||
"Settings",
|
||||
"get_writable_config_path",
|
||||
]
|
||||
@@ -132,19 +132,44 @@ PROVIDERS: list[str] = [
|
||||
|
||||
MODELS: dict[str, list[str]] = {
|
||||
"openai": [
|
||||
"gpt-4",
|
||||
"gpt-5.5",
|
||||
"gpt-5.5-pro",
|
||||
"gpt-5.4",
|
||||
"gpt-5.4-pro",
|
||||
"gpt-5.4-mini",
|
||||
"gpt-5.4-nano",
|
||||
"gpt-5.2",
|
||||
"gpt-5.2-pro",
|
||||
"gpt-5.1",
|
||||
"gpt-5",
|
||||
"gpt-5-pro",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-nano",
|
||||
"gpt-4.1",
|
||||
"gpt-4.1-mini-2025-04-14",
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
"gpt-4.1-mini",
|
||||
"gpt-4.1-nano",
|
||||
"gpt-4o",
|
||||
"gpt-4o-mini",
|
||||
"o4-mini",
|
||||
"o3",
|
||||
"o3-mini",
|
||||
"o1",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
"gpt-4",
|
||||
"gpt-4.1-mini-2025-04-14",
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
],
|
||||
"anthropic": [
|
||||
"claude-opus-4-6",
|
||||
"claude-sonnet-4-6",
|
||||
"claude-haiku-4-5-20251001",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
],
|
||||
"gemini": [
|
||||
@@ -5,13 +5,13 @@ import sys
|
||||
import click
|
||||
import tomli
|
||||
|
||||
from crewai.cli.constants import ENV_VARS, MODELS
|
||||
from crewai.cli.provider import (
|
||||
from crewai_cli.constants import ENV_VARS, MODELS
|
||||
from crewai_cli.provider import (
|
||||
get_provider_data,
|
||||
select_model,
|
||||
select_provider,
|
||||
)
|
||||
from crewai.cli.utils import copy_template, load_env_vars, write_env_file
|
||||
from crewai_cli.utils import copy_template, load_env_vars, write_env_file
|
||||
|
||||
|
||||
def get_reserved_script_names() -> set[str]:
|
||||
@@ -2,8 +2,7 @@ from pathlib import Path
|
||||
import shutil
|
||||
|
||||
import click
|
||||
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai_core.telemetry import Telemetry
|
||||
|
||||
|
||||
def create_flow(name: str) -> None:
|
||||
@@ -18,7 +17,6 @@ def create_flow(name: str) -> None:
|
||||
click.secho(f"Error: Folder {folder_name} already exists.", fg="red")
|
||||
return
|
||||
|
||||
# Initialize telemetry
|
||||
telemetry = Telemetry()
|
||||
telemetry.flow_creation_span(class_name)
|
||||
|
||||
23
lib/cli/src/crewai_cli/crew_chat.py
Normal file
23
lib/cli/src/crewai_cli/crew_chat.py
Normal file
@@ -0,0 +1,23 @@
|
||||
"""Wrapper for the crew chat command.
|
||||
|
||||
Delegates to ``crewai.utilities.crew_chat.run_chat`` when the full crewai
|
||||
package is installed, otherwise prints a helpful error message.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import click
|
||||
|
||||
|
||||
def run_chat() -> None:
|
||||
try:
|
||||
from crewai.utilities.crew_chat import run_chat as _run_chat
|
||||
except ImportError:
|
||||
click.secho(
|
||||
"The 'chat' command requires the full crewai package.\n"
|
||||
"Install it with: pip install crewai",
|
||||
fg="red",
|
||||
)
|
||||
raise SystemExit(1) from None
|
||||
|
||||
_run_chat()
|
||||
@@ -2,10 +2,10 @@ from typing import Any
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli import git
|
||||
from crewai.cli.command import BaseCommand, PlusAPIMixin
|
||||
from crewai.cli.deploy.validate import validate_project
|
||||
from crewai.cli.utils import fetch_and_json_env_file, get_project_name
|
||||
from crewai_cli import git
|
||||
from crewai_cli.command import BaseCommand, PlusAPIMixin
|
||||
from crewai_cli.deploy.validate import validate_project
|
||||
from crewai_cli.utils import fetch_and_json_env_file, get_project_name
|
||||
|
||||
|
||||
console = Console()
|
||||
@@ -40,7 +40,7 @@ from typing import Any
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.utils import parse_toml
|
||||
from crewai_cli.utils import parse_toml
|
||||
|
||||
|
||||
console = Console()
|
||||
@@ -438,7 +438,7 @@ class DeployValidator:
|
||||
"import json, sys, traceback, os\n"
|
||||
"os.chdir(sys.argv[1])\n"
|
||||
"try:\n"
|
||||
" from crewai.cli.utils import get_crews, get_flows\n"
|
||||
" from crewai.utilities.project_utils import get_crews, get_flows\n"
|
||||
" is_flow = sys.argv[2] == 'flow'\n"
|
||||
" if is_flow:\n"
|
||||
" instances = get_flows()\n"
|
||||
@@ -4,10 +4,10 @@ from typing import Any, cast
|
||||
import httpx
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.main import Oauth2Settings, ProviderFactory
|
||||
from crewai.cli.command import BaseCommand
|
||||
from crewai.cli.settings.main import SettingsCommand
|
||||
from crewai.utilities.version import get_crewai_version
|
||||
from crewai_cli.authentication.main import Oauth2Settings, ProviderFactory
|
||||
from crewai_cli.command import BaseCommand
|
||||
from crewai_cli.settings.main import SettingsCommand
|
||||
from crewai_cli.version import get_crewai_version
|
||||
|
||||
|
||||
console = Console()
|
||||
@@ -1,9 +1,9 @@
|
||||
import subprocess
|
||||
|
||||
import click
|
||||
from crewai_core.constants import CREWAI_TRAINED_AGENTS_FILE_ENV
|
||||
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials
|
||||
from crewai.utilities.constants import CREWAI_TRAINED_AGENTS_FILE_ENV
|
||||
from crewai_cli.utils import build_env_with_all_tool_credentials
|
||||
|
||||
|
||||
def evaluate_crew(
|
||||
@@ -2,7 +2,7 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials
|
||||
from crewai_cli.utils import build_env_with_all_tool_credentials
|
||||
|
||||
|
||||
# Be mindful about changing this.
|
||||
@@ -2,8 +2,8 @@ from httpx import HTTPStatusError
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from crewai.cli.command import BaseCommand, PlusAPIMixin
|
||||
from crewai.cli.config import Settings
|
||||
from crewai_cli.command import BaseCommand, PlusAPIMixin
|
||||
from crewai_cli.config import Settings
|
||||
|
||||
|
||||
console = Console()
|
||||
12
lib/cli/src/crewai_cli/plus_api.py
Normal file
12
lib/cli/src/crewai_cli/plus_api.py
Normal file
@@ -0,0 +1,12 @@
|
||||
"""Re-export of ``crewai_core.plus_api.PlusAPI``.
|
||||
|
||||
Kept as a stable import path for the CLI; new code should import from
|
||||
``crewai_core.plus_api`` directly.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai_core.plus_api import PlusAPI as PlusAPI
|
||||
|
||||
|
||||
__all__ = ["PlusAPI"]
|
||||
@@ -10,7 +10,7 @@ import certifi
|
||||
import click
|
||||
import httpx
|
||||
|
||||
from crewai.cli.constants import JSON_URL, MODELS, PROVIDERS
|
||||
from crewai_cli.constants import JSON_URL, MODELS, PROVIDERS
|
||||
|
||||
|
||||
def select_choice(prompt_message: str, choices: Sequence[str]) -> str | None:
|
||||
@@ -11,7 +11,7 @@ from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
from crewai.cli.command import BaseCommand
|
||||
from crewai_cli.command import BaseCommand
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -1,9 +1,9 @@
|
||||
import subprocess
|
||||
|
||||
import click
|
||||
from crewai_core.constants import CREWAI_TRAINED_AGENTS_FILE_ENV
|
||||
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials
|
||||
from crewai.utilities.constants import CREWAI_TRAINED_AGENTS_FILE_ENV
|
||||
from crewai_cli.utils import build_env_with_all_tool_credentials
|
||||
|
||||
|
||||
def replay_task_command(task_id: str, trained_agents_file: str | None = None) -> None:
|
||||
31
lib/cli/src/crewai_cli/reset_memories_command.py
Normal file
31
lib/cli/src/crewai_cli/reset_memories_command.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""Wrapper for the reset-memories command.
|
||||
|
||||
Delegates to ``crewai.utilities.reset_memories`` when the full crewai
|
||||
package is installed, otherwise prints a helpful error message.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import click
|
||||
|
||||
|
||||
def reset_memories_command(
|
||||
memory: bool,
|
||||
knowledge: bool,
|
||||
agent_knowledge: bool,
|
||||
kickoff_outputs: bool,
|
||||
all: bool,
|
||||
) -> None:
|
||||
try:
|
||||
from crewai.utilities.reset_memories import (
|
||||
reset_memories_command as _reset,
|
||||
)
|
||||
except ImportError:
|
||||
click.secho(
|
||||
"The 'reset-memories' command requires the full crewai package.\n"
|
||||
"Install it with: pip install crewai",
|
||||
fg="red",
|
||||
)
|
||||
raise SystemExit(1) from None
|
||||
|
||||
_reset(memory, knowledge, agent_knowledge, kickoff_outputs, all)
|
||||
@@ -2,11 +2,11 @@ from enum import Enum
|
||||
import subprocess
|
||||
|
||||
import click
|
||||
from crewai_core.constants import CREWAI_TRAINED_AGENTS_FILE_ENV
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials, read_toml
|
||||
from crewai.utilities.constants import CREWAI_TRAINED_AGENTS_FILE_ENV
|
||||
from crewai.utilities.version import get_crewai_version
|
||||
from crewai_cli.utils import build_env_with_all_tool_credentials, read_toml
|
||||
from crewai_cli.version import get_crewai_version
|
||||
|
||||
|
||||
class CrewType(Enum):
|
||||
@@ -5,9 +5,9 @@ from typing import Any
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
from crewai.cli.command import BaseCommand
|
||||
from crewai.cli.config import HIDDEN_SETTINGS_KEYS, READONLY_SETTINGS_KEYS, Settings
|
||||
from crewai.events.listeners.tracing.utils import _load_user_data
|
||||
from crewai_cli.command import BaseCommand
|
||||
from crewai_cli.config import HIDDEN_SETTINGS_KEYS, READONLY_SETTINGS_KEYS, Settings
|
||||
from crewai_cli.user_data import _load_user_data
|
||||
|
||||
|
||||
console = Console()
|
||||
@@ -91,7 +91,7 @@ class SettingsCommand(BaseCommand):
|
||||
style="bold red",
|
||||
)
|
||||
console.print("Available keys:", style="yellow")
|
||||
for field_name in Settings.model_fields.keys():
|
||||
for field_name in Settings.model_fields:
|
||||
if field_name not in readonly_settings:
|
||||
console.print(f" - {field_name}", style="yellow")
|
||||
raise SystemExit(1)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user