mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-05-14 05:28:12 +00:00
Compare commits
1 Commits
docs/custo
...
rn/enumera
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed1b51e4b4 |
53
.github/workflows/nightly.yml
vendored
53
.github/workflows/nightly.yml
vendored
@@ -5,10 +5,6 @@ on:
|
||||
- cron: '0 6 * * *' # daily at 6am UTC
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: nightly-publish
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check for new commits
|
||||
@@ -22,11 +18,10 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check for recent commits
|
||||
- name: Check for commits in last 24h
|
||||
id: check
|
||||
run: |
|
||||
# 25h window absorbs cron-vs-commit timing skew at the boundary.
|
||||
RECENT=$(git log --since="25 hours ago" --oneline | head -1)
|
||||
RECENT=$(git log --since="24 hours ago" --oneline | head -1)
|
||||
if [ -n "$RECENT" ]; then
|
||||
echo "has_changes=true" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
@@ -43,42 +38,34 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
version: "0.11.3"
|
||||
python-version: "3.12"
|
||||
enable-cache: false
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v4
|
||||
|
||||
- name: Stamp nightly versions
|
||||
run: |
|
||||
DATE=$(date +%Y%m%d)
|
||||
|
||||
# All workspace packages share the same base version and are released together.
|
||||
BASE=$(python -c "
|
||||
import re
|
||||
print(re.search(r'__version__\s*=\s*\"(.*?)\"', open('lib/crewai/src/crewai/__init__.py').read()).group(1))
|
||||
")
|
||||
NIGHTLY="${BASE}.dev${DATE}"
|
||||
echo "Nightly version: ${NIGHTLY}"
|
||||
|
||||
for init_file in \
|
||||
lib/crewai/src/crewai/__init__.py \
|
||||
lib/crewai-core/src/crewai_core/__init__.py \
|
||||
lib/crewai-tools/src/crewai_tools/__init__.py \
|
||||
lib/crewai-files/src/crewai_files/__init__.py \
|
||||
lib/cli/src/crewai_cli/__init__.py; do
|
||||
lib/crewai-files/src/crewai_files/__init__.py; do
|
||||
CURRENT=$(python -c "
|
||||
import re
|
||||
text = open('$init_file').read()
|
||||
print(re.search(r'__version__\s*=\s*\"(.*?)\"\s*$', text, re.MULTILINE).group(1))
|
||||
")
|
||||
NIGHTLY="${CURRENT}.dev${DATE}"
|
||||
sed -i "s/__version__ = .*/__version__ = \"${NIGHTLY}\"/" "$init_file"
|
||||
echo "Stamped $init_file -> $NIGHTLY"
|
||||
echo "$init_file: $CURRENT -> $NIGHTLY"
|
||||
done
|
||||
|
||||
# Update all cross-package dependency pins to the nightly version.
|
||||
sed -i "s/\"crewai==[^\"]*\"/\"crewai==${NIGHTLY}\"/" lib/crewai-tools/pyproject.toml
|
||||
sed -i "s/\"crewai-core==[^\"]*\"/\"crewai-core==${NIGHTLY}\"/" lib/crewai/pyproject.toml
|
||||
sed -i "s/\"crewai-cli==[^\"]*\"/\"crewai-cli==${NIGHTLY}\"/" lib/crewai/pyproject.toml
|
||||
# Update cross-package dependency pins to nightly versions
|
||||
sed -i "s/\"crewai-tools==[^\"]*\"/\"crewai-tools==${NIGHTLY}\"/" lib/crewai/pyproject.toml
|
||||
sed -i "s/\"crewai-files==[^\"]*\"/\"crewai-files==${NIGHTLY}\"/" lib/crewai/pyproject.toml
|
||||
sed -i "s/\"crewai-core==[^\"]*\"/\"crewai-core==${NIGHTLY}\"/" lib/cli/pyproject.toml
|
||||
sed -i "s/\"crewai==[^\"]*\"/\"crewai==${NIGHTLY}\"/" lib/crewai-tools/pyproject.toml
|
||||
echo "Updated cross-package dependency pins to ${NIGHTLY}"
|
||||
|
||||
- name: Build packages
|
||||
@@ -98,10 +85,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: pypi
|
||||
url: https://pypi.org/p/crewai
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
@@ -126,8 +116,7 @@ jobs:
|
||||
continue
|
||||
fi
|
||||
echo "Publishing $package"
|
||||
# --check-url skips files already on PyPI so manual re-runs on the same day are idempotent.
|
||||
if ! uv publish --check-url https://pypi.org/simple/ "$package"; then
|
||||
if ! uv publish "$package"; then
|
||||
echo "Failed to publish $package"
|
||||
failed=1
|
||||
fi
|
||||
|
||||
6
.github/workflows/vulnerability-scan.yml
vendored
6
.github/workflows/vulnerability-scan.yml
vendored
@@ -46,11 +46,9 @@ jobs:
|
||||
- name: Run pip-audit
|
||||
run: |
|
||||
uv run pip-audit --desc --aliases --skip-editable --format json --output pip-audit-report.json \
|
||||
--ignore-vuln CVE-2026-3219 \
|
||||
--ignore-vuln GHSA-r374-rxx8-8654
|
||||
--ignore-vuln CVE-2026-3219
|
||||
# Ignored CVEs:
|
||||
# CVE-2026-3219 - pip 26.0.1 (GHSA-58qw-9mgm-455v): no fix available, archive handling issue
|
||||
# GHSA-r374-rxx8-8654 - paramiko 4.0.0 (SHA-1 in rsakey.py): no fix available; transitive via composio-core
|
||||
# CVE-2026-3219 - pip 26.0.1 (GHSA-58qw-9mgm-455v): no fix available, archive handling issue
|
||||
continue-on-error: true
|
||||
|
||||
- name: Display results
|
||||
|
||||
@@ -4,80 +4,6 @@ description: "تحديثات المنتج والتحسينات وإصلاحات
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="13 مايو 2026">
|
||||
## v1.14.5a5
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a5)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الميزات
|
||||
- إلغاء استخدام CrewAgentExecutor، وتعيين وكلاء Crew الافتراضيين إلى AgentExecutor
|
||||
- تحسين أدوات صندوق الرمل Daytona
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح كتلة الكود المفقودة في دليل التدفق الأول باللغة البرتغالية (pt-BR)
|
||||
- تسجيل أخطاء المراجعة المسبقة والتقطير HITL، إضافة learn_strict
|
||||
- تصحيح urllib3 للثغرات الأمنية
|
||||
- تصحيح gitpython و langchain-core؛ تجاهل CVE paramiko غير المصححة
|
||||
- تحديث جميع حزم مساحة العمل المنشورة على uv lock/sync
|
||||
|
||||
### الوثائق
|
||||
- إضافة دليل ترحيل لـ `inputs.id` إلى `restoreFromStateId`
|
||||
- إضافة دليل ترقية OSS ودليل ترحيل crew-to-flow
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.14.5a4
|
||||
|
||||
## المساهمون
|
||||
|
||||
@akaKuruma, @greysonlalonde, @iris-clawd, @lorenzejay, @mislavivanda
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="9 مايو 2026">
|
||||
## v1.14.5a4
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a4)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الميزات
|
||||
- تحديث قوائم LLM
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح مشكلة الاعتماد من خلال نقل `textual` إلى `crewai-cli` وإضافة `certifi`
|
||||
|
||||
### الوثائق
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.14.5a3
|
||||
|
||||
## المساهمون
|
||||
|
||||
@cgoeppinger, @greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="7 مايو 2026">
|
||||
## v1.14.5a3
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a3)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح مسار نقطة النهاية للحالة من /{kickoff_id}/status إلى /status/{kickoff_id}
|
||||
- تحديث تبعية gitpython إلى الإصدار >=3.1.47 للامتثال الأمني
|
||||
|
||||
### إعادة هيكلة
|
||||
- استخراج واجهة سطر الأوامر إلى حزمة crewai-cli المستقلة
|
||||
|
||||
### الوثائق
|
||||
- تحديث سجل التغييرات والإصدار للإصدار v1.14.5a2
|
||||
|
||||
## المساهمون
|
||||
|
||||
@greysonlalonde, @iris-clawd
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="4 مايو 2026">
|
||||
## v1.14.5a2
|
||||
|
||||
|
||||
@@ -1,102 +0,0 @@
|
||||
---
|
||||
title: "الانتقال من inputs.id إلى restore_from_state_id"
|
||||
description: "نقل تدفقات @persist من ترطيب inputs.id المهجور إلى حقل restore_from_state_id المدعوم"
|
||||
icon: "arrow-right-arrow-left"
|
||||
---
|
||||
|
||||
<Warning>
|
||||
تمرير `id` داخل `inputs` لترطيب تدفق `@persist` هو **مهجور** ومقرر إزالته في إصدار مستقبلي. البديل، `restore_from_state_id`، متاح في CrewAI **v1.14.5 وما بعده** — الخطوات أدناه تنطبق بمجرد أن تقوم بالتحديث.
|
||||
</Warning>
|
||||
|
||||
## نظرة عامة
|
||||
|
||||
الطريقة الموثقة لترطيب تدفق `@persist` من تنفيذ سابق هي تمرير UUID لذلك التنفيذ كـ `inputs.id`. الآن، تكشف CrewAI عن حقل مخصص، `restore_from_state_id`، الذي يقوم بنفس الترطيب دون تحميل حمولة `inputs` — ودون ربط مفتاح الترطيب بهوية التنفيذ الجديد.
|
||||
|
||||
## الانتقال
|
||||
|
||||
إذا كنت حالياً تبدأ تدفق `@persist` باستخدام `inputs={"id": ...}`:
|
||||
|
||||
```python
|
||||
# مهجور
|
||||
flow = CounterFlow()
|
||||
flow.kickoff(inputs={"id": "abcd1234-5678-90ef-ghij-klmnopqrstuv"})
|
||||
```
|
||||
|
||||
انتقل إلى `restore_from_state_id`:
|
||||
|
||||
```python
|
||||
# مدعوم
|
||||
flow = CounterFlow()
|
||||
flow.kickoff(restore_from_state_id="abcd1234-5678-90ef-ghij-klmnopqrstuv")
|
||||
```
|
||||
|
||||
تتمتع الوضعيتان بمعاني سلالة مختلفة:
|
||||
|
||||
- `inputs={"id": <uuid>}` (مهجور) — **استئناف**: تكتب الكتابات تحت المعرف المقدم، مما يمدد نفس تاريخ `flow_uuid`.
|
||||
- `restore_from_state_id=<uuid>` — **تفرع**: يترطب الحالة من اللقطة، ثم يكتب تحت `state.id` جديدة. يتم الحفاظ على تاريخ التدفق المصدر.
|
||||
|
||||
لأغلب سيناريوهات الإنتاج — إعادة تشغيل تدفق تم تهيئته من حالة سابقة — فإن التفرع هو ما تريده. راجع [إتقان حالة التدفق](/ar/guides/flows/mastering-flow-state) للحصول على النموذج الذهني الكامل.
|
||||
|
||||
إذا كنت تبدأ تدفقك عبر واجهة برمجة تطبيقات CrewAI AMP REST، راجع [AMP](#amp) أدناه لهجرة الحمولة المعادلة.
|
||||
|
||||
## لماذا نقوم بإهمال `inputs.id` لـ `@persist`
|
||||
|
||||
`inputs.id` هو حالياً الطريقة الموثقة لاستئناف تدفق `@persist` من تنفيذ سابق. المشكلة هي أن نفس UUID يقوم بوظيفتين في وقت واحد:
|
||||
|
||||
1. **يحدد أي لقطة يترطب منها `@persist`** — تحميل الحالة المحفوظة تحت ذلك UUID.
|
||||
2. **يصبح معرف تنفيذ التدفق الجديد** (`state.id` في SDK؛ يظهر كـ `flow_id` في بعض السياقات) — كل كتابة `@persist` من هذه البداية أيضاً تقع تحت نفس UUID.
|
||||
|
||||
هذه الوظيفة المزدوجة هي السبب الجذري للمشاكل التي يصفها هذا الدليل. لأن UUID المقدم هو أيضاً معرف التنفيذ الجديد، فإن بدايتين تمرران نفس `inputs.id` ليست تنفيذين متميزين — إنهما تشتركان في معرف، وتشاركان في سجل الاستمرارية، و(على AMP) تشتركان في صف في قائمة التنفيذات. لا توجد طريقة للقول "ترطب من هذه اللقطة، ولكن سجل هذا التشغيل بشكل منفصل" دون تقسيم المسؤوليتين.
|
||||
|
||||
`restore_from_state_id` هو هذا الانقسام. إنه يخبر `@persist` من أي لقطة يترطب، بينما يترك التنفيذ الجديد حراً لاستلام `state.id` جديدة. لم يعد مصدر الترطيب والتشغيل المسجل نفس UUID — وهو ما تريده معظم سيناريوهات الإنتاج فعلياً.
|
||||
|
||||
## جدول إزالة
|
||||
|
||||
من المقرر إزالة `inputs.id` لترطيب `@persist` في إصدار مستقبلي من CrewAI. لا يوجد قطع صارم فوري — تظل التدفقات الحالية تعمل — ولكن بمجرد أن تقوم بالتحديث إلى v1.14.5 أو ما بعده، يجب أن يستخدم الكود الجديد `restore_from_state_id`، ويجب أن تهاجر التدفقات الحالية في الفرصة المناسبة التالية.
|
||||
|
||||
## AMP
|
||||
|
||||
إذا كنت تنشر تدفقك إلى CrewAI AMP، فإن الهجرة تمتد إلى الحمولة التي تبدأ بها المرسلة إلى طاقمك المنشور، وتظهر الأعراض المرئية لإعادة استخدام `inputs.id` على لوحة معلومات النشر. تغطي القسمان الفرعيان أدناه كلاهما.
|
||||
|
||||
### هجرة حمولة البداية
|
||||
|
||||
إذا كنت حالياً تبدأ تدفقاً منشوراً عن طريق تضمين `id` في `inputs`:
|
||||
|
||||
```bash
|
||||
# مهجور
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_CREW_TOKEN" \
|
||||
-d '{"inputs": {"id": "abcd1234-5678-90ef-ghij-klmnopqrstuv", "topic": "AI Agent Frameworks"}}' \
|
||||
https://your-crew-url.crewai.com/kickoff
|
||||
```
|
||||
|
||||
نقل UUID إلى حقل `restoreFromStateId` في المستوى الأعلى:
|
||||
|
||||
```bash
|
||||
# مدعوم
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_CREW_TOKEN" \
|
||||
-d '{
|
||||
"inputs": {"topic": "AI Agent Frameworks"},
|
||||
"restoreFromStateId": "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
}' \
|
||||
https://your-crew-url.crewai.com/kickoff
|
||||
```
|
||||
|
||||
يجلس `restoreFromStateId` بجانب `inputs` في حمولة البداية، وليس داخلها. الآن، يحمل كائن `inputs` فقط القيم التي تستهلكها تدفقك فعلياً.
|
||||
|
||||
### ماذا يحدث عند إعادة استخدام `inputs.id`
|
||||
|
||||
عندما تتلقى AMP بداية لتدفق يتطابق `inputs.id` الخاص به مع تنفيذ موجود، فإنه يحل إلى السجل الموجود بدلاً من إنشاء سجل جديد. من لوحة معلومات النشر سترى:
|
||||
|
||||
- **حالة التنفيذ** — حالة التشغيل الجديد تحل محل حالة التشغيل السابق. يمكن أن تعود تنفيذات مكتملة إلى `جارية`، أو يمكن أن تتحول تشغيلات `مكتملة` إلى `خطأ` إذا فشلت البداية الجديدة — في كلتا الحالتين، لم تعد لوحة المعلومات تعكس التشغيل الأصلي.
|
||||
- **التتبع** — تتراكم تتبعات OTel عبر البدايات لأنها تشترك في نفس معرف التنفيذ؛ تتبعات التشغيل السابق إما تُستبدل بـ، أو تُخلط مع، تشغيل الجديد. لم يعد إعادة التشغيل خطوة بخطوة يتوافق مع تنفيذ واحد.
|
||||
- **قائمة التنفيذات** — البدايات التي يجب أن تظهر كصفوف منفصلة تتقلص إلى إدخال واحد، مما يخفي التاريخ.
|
||||
|
||||
تساعد الهجرة إلى `restoreFromStateId` في الحفاظ على كل بداية كتنفيذ خاص بها — مع حالتها الخاصة، وتتبعها، وصفها في القائمة — بينما لا تزال ترطب الحالة من تشغيل سابق.
|
||||
|
||||
<Card title="هل تحتاج مساعدة؟" icon="headset" href="mailto:support@crewai.com">
|
||||
اتصل بفريق الدعم لدينا إذا لم تكن متأكداً من أي وضع يحتاجه تدفقك أو واجهت مشاكل أثناء الهجرة.
|
||||
</Card>
|
||||
@@ -1,190 +0,0 @@
|
||||
---
|
||||
title: "ترقية CrewAI"
|
||||
description: "كيفية ترقية CrewAI في مشروعك والتكيّف مع التغييرات الجذرية بين الإصدارات."
|
||||
icon: "arrow-up-circle"
|
||||
---
|
||||
|
||||
## نظرة عامة
|
||||
|
||||
تجلب إصدارات CrewAI قدرات جديدة بانتظام. يرشدك هذا الدليل خلال الخطوات العملية للحفاظ على تثبيتك محدّثًا — سواء أداة سطر الأوامر أو البيئة الافتراضية لمشروعك.
|
||||
|
||||
إذا كنت تبدأ من الصفر، راجع [التثبيت](/ar/installation). إذا كنت قادمًا من إطار عمل آخر، راجع [الترحيل من LangGraph](/ar/guides/migration/migrating-from-langgraph).
|
||||
|
||||
---
|
||||
|
||||
## الشيئان اللذان قد ترغب في ترقيتهما
|
||||
|
||||
يوجد CrewAI في مكانين على جهازك، ويتم ترقيتهما بشكل مستقل:
|
||||
|
||||
| ماذا | كيف يُثبَّت | كيف تتم الترقية |
|
||||
|---|---|---|
|
||||
| **أداة سطر الأوامر العامة `crewai`** | `uv tool install crewai` | `uv tool install crewai --upgrade` |
|
||||
| **بيئة venv للمشروع** (حيث يعمل الكود) | `crewai install` / `uv sync` | `uv add "crewai[...]>=X.Y.Z"` ثم `crewai install` |
|
||||
|
||||
يمكن لهما — وغالبًا ما يحدث — أن يخرجا عن التزامن. تشغيل `crewai --version` يُظهر إصدار سطر الأوامر. تشغيل `uv pip show crewai` داخل مشروعك يُظهر إصدار venv. إذا اختلفا، فهذا طبيعي؛ ما يهم بالنسبة للكود قيد التشغيل هو إصدار venv.
|
||||
|
||||
## لماذا لا يقوم `crewai install` وحده بالترقية
|
||||
|
||||
`crewai install` هو غلاف رفيع حول `uv sync`. يُثبّت بالضبط ما يقوله ملف `uv.lock` الحالي — وهو **لا** يرفع أي قيود إصدار.
|
||||
|
||||
إذا كان `pyproject.toml` يقول `crewai>=1.11.1` وقد قام ملف القفل بحلّه إلى `1.11.1`، فإن تشغيل `crewai install` سيُبقيك على `1.11.1` للأبد، حتى وإن كان الإصدار `1.14.4` متاحًا.
|
||||
|
||||
للترقية فعلًا، عليك:
|
||||
|
||||
1. تحديث قيد الإصدار في `pyproject.toml`
|
||||
2. إعادة حلّ ملف القفل
|
||||
3. مزامنة venv
|
||||
|
||||
`uv add` يقوم بالثلاثة في خطوة واحدة.
|
||||
|
||||
## كيفية ترقية مشروعك
|
||||
|
||||
```bash
|
||||
# يرفع القيد ويعيد القفل في أمر واحد
|
||||
uv add "crewai[tools]>=1.14.4"
|
||||
|
||||
# يزامن venv (crewai install يستدعي uv sync تحت الغطاء)
|
||||
crewai install
|
||||
|
||||
# تحقّق
|
||||
uv pip show crewai
|
||||
# → Version: 1.14.4
|
||||
```
|
||||
|
||||
استبدل `[tools]` بأي إضافات يستخدمها مشروعك (مثلًا `[tools,anthropic]`). تحقّق من قائمة `dependencies` في `pyproject.toml` إن لم تكن متأكدًا.
|
||||
|
||||
<Note>
|
||||
يحدّث `uv add` كلا من `pyproject.toml` **و** `uv.lock` بشكل ذرّي. إذا قمت بتحرير `pyproject.toml` يدويًا، فإنك لا تزال بحاجة إلى تشغيل `uv lock --upgrade-package crewai` لإعادة حلّ ملف القفل قبل أن يلتقط `crewai install` الإصدار الجديد.
|
||||
</Note>
|
||||
|
||||
## ترقية أداة سطر الأوامر العامة
|
||||
|
||||
أداة سطر الأوامر العامة منفصلة عن مشروعك. قم بترقيتها عبر:
|
||||
|
||||
```bash
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
|
||||
إذا حذّرك الـ shell بشأن `PATH` بعد الترقية، قم بتحديثه:
|
||||
|
||||
```bash
|
||||
uv tool update-shell
|
||||
```
|
||||
|
||||
هذا **لا** يمسّ بيئة venv الخاصة بمشروعك — لا تزال بحاجة إلى `uv add` + `crewai install` داخل المشروع.
|
||||
|
||||
## التحقق من تزامن الاثنين
|
||||
|
||||
```bash
|
||||
# إصدار سطر الأوامر العام
|
||||
crewai --version
|
||||
|
||||
# إصدار venv للمشروع
|
||||
uv pip show crewai | grep Version
|
||||
```
|
||||
|
||||
ليس من الضروري أن يتطابقا — لكن إصدار venv للمشروع هو ما يهم لسلوك التشغيل.
|
||||
|
||||
<Note>
|
||||
يتطلب CrewAI `Python >=3.10, <3.14`. إذا كان `uv` مثبَّتًا مقابل مفسّر أقدم، فأعد إنشاء venv للمشروع باستخدام إصدار Python مدعوم قبل تشغيل `crewai install`.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## التغييرات الجذرية وملاحظات الترحيل
|
||||
|
||||
تتطلب معظم الترقيات تعديلات صغيرة فقط. المناطق أدناه هي تلك التي تنكسر بصمت أو بتتبعات مكدّس مربكة.
|
||||
|
||||
### مسارات الاستيراد: tools و`BaseTool`
|
||||
|
||||
الموقع الرسمي لاستيراد الـ tools هو `crewai.tools`. لا تزال المسارات القديمة تظهر في الدروس لكن يجب تحديثها.
|
||||
|
||||
```python
|
||||
# قبل
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.agents.tools import tool
|
||||
|
||||
# بعد
|
||||
from crewai.tools import BaseTool, tool
|
||||
```
|
||||
|
||||
كلٌ من المُزخرف `@tool` والفئة الفرعية `BaseTool` يقعان في `crewai.tools`. `AgentFinish` والرموز الأخرى الداخلية للوكيل لم تعد جزءًا من السطح العام — إذا كنت تستوردها، فانتقل إلى event listeners أو callbacks الـ `Task` بدلًا منها.
|
||||
|
||||
### تغييرات معاملات `Agent`
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Find authoritative sources on {topic}",
|
||||
backstory="You are a careful, source-driven researcher.",
|
||||
llm="gpt-4o-mini", # اسم نموذج كسلسلة نصية أو كائن LLM
|
||||
verbose=True, # bool وليس مستوى عددي صحيح
|
||||
max_iter=15, # تغيّر الافتراضي بين الإصدارات — حدّده بشكل صريح
|
||||
allow_delegation=False,
|
||||
)
|
||||
```
|
||||
|
||||
- يقبل `llm` إما اسم نموذج كسلسلة نصية (يُحلَّ عبر المزوّد المهيّأ) أو كائن `LLM` للتحكم الدقيق.
|
||||
- `verbose` هو `bool` بسيط. تمرير عدد صحيح لم يعد يبدّل مستويات السجل.
|
||||
- تغيّرت افتراضات `max_iter` بين الإصدارات. إذا توقف وكيلك بصمت عن التكرار بعد أول استدعاء tool، فحدّد `max_iter` صراحةً.
|
||||
|
||||
### معاملات `Crew`
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential, # أو Process.hierarchical
|
||||
memory=True,
|
||||
cache=True,
|
||||
embedder={"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
)
|
||||
```
|
||||
|
||||
- يتطلب `process=Process.hierarchical` إما `manager_llm=` أو `manager_agent=`. بدون أحدهما، يرفع kickoff خطأً عند التحقّق.
|
||||
- `memory=True` مع مزوّد embedding غير افتراضي يحتاج إلى قاموس `embedder` — راجع [إعداد الذاكرة وembedder](#memory-embedder-config) أدناه.
|
||||
|
||||
### الإخراج المُهيكل لـ `Task`
|
||||
|
||||
استخدم `output_pydantic` أو `output_json` أو `output_file` لإلزام نتيجة المهمة بشكل مكتوب الأنواع:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from crewai import Task
|
||||
|
||||
class Article(BaseModel):
|
||||
title: str
|
||||
body: str
|
||||
|
||||
write = Task(
|
||||
description="Write an article about {topic}",
|
||||
expected_output="A short article with a title and body",
|
||||
agent=writer,
|
||||
output_pydantic=Article, # الفئة، وليس مثيلًا منها
|
||||
output_file="output/article.md",
|
||||
)
|
||||
```
|
||||
|
||||
`output_pydantic` يأخذ **الفئة** نفسها. تمرير `Article(title="", body="")` خطأ شائع ويفشل بخطأ تحقّق مربك.
|
||||
|
||||
### إعداد الذاكرة وembedder {#memory-embedder-config}
|
||||
|
||||
إذا كان `memory=True` وأنت لا تستخدم embeddings الافتراضية الخاصة بـ OpenAI، فيجب أن تمرّر `embedder`:
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {"model": "nomic-embed-text"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
ضع بيانات اعتماد المزوّد المعنيّة (`OPENAI_API_KEY`, `OLLAMA_HOST`, إلخ) في ملف `.env`. مسارات تخزين الذاكرة محلية بالنسبة للمشروع افتراضيًا — احذف مجلد ذاكرة المشروع إذا غيّرت embedders، لأن الأبعاد لا تختلط.
|
||||
@@ -13,7 +13,7 @@ The Daytona sandbox tools give CrewAI agents access to isolated, ephemeral compu
|
||||
|
||||
- **`DaytonaExecTool`** — run any shell command inside a sandbox.
|
||||
- **`DaytonaPythonTool`** — execute a block of Python source code inside a sandbox.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox; also supports `move`, `find` (content grep), `search` (filename glob), `chmod` (permissions), `replace` (bulk find-and-replace), and `exists`.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox.
|
||||
|
||||
All three tools share the same sandbox lifecycle controls, so you can mix and match them while keeping state in a single persistent sandbox.
|
||||
|
||||
@@ -55,7 +55,7 @@ from crewai_tools import DaytonaPythonTool
|
||||
tool = DaytonaPythonTool()
|
||||
result = tool.run(code="print(sum(range(10)))")
|
||||
print(result)
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": ExecutionArtifacts(stdout="45\n", charts=[])}
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": None}
|
||||
```
|
||||
|
||||
### Multi-step shell session (persistent)
|
||||
@@ -63,22 +63,17 @@ print(result)
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool, DaytonaFileTool
|
||||
|
||||
# Create the persistent sandbox via the first tool, then attach the second
|
||||
# tool to it so both share state (installed packages, files, env vars).
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool = DaytonaFileTool(sandbox_id=exec_tool.active_sandbox_id)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
file_tool.run(
|
||||
action="write",
|
||||
path="workspace/script.py",
|
||||
content="import httpx; print(f'httpx loaded, version {httpx.__version__}')",
|
||||
)
|
||||
exec_tool.run(command="python workspace/script.py")
|
||||
# Install a package, then write and run a script — all in the same sandbox
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool.run(action="write", path="/workspace/fetch.py", content="import httpx; print(httpx.get('https://httpbin.org/get').status_code)")
|
||||
exec_tool.run(command="python /workspace/fetch.py")
|
||||
```
|
||||
|
||||
<Note>
|
||||
By default, each tool with `persistent=True` lazily creates its **own** sandbox on first use. The pattern above shares a single sandbox across multiple tools by reading the first tool's `active_sandbox_id` after a `.run()` call and passing it to the others via `sandbox_id=...`. With `persistent=False` (the default), every `.run()` call gets a fresh sandbox that's deleted at the end of that call.
|
||||
Each tool instance maintains its own persistent sandbox. To share **one** sandbox across two tools, create the first tool, grab its sandbox id via `tool._persistent_sandbox.id`, and pass it to the second tool via `sandbox_id=...`.
|
||||
</Note>
|
||||
|
||||
### Attach to an existing sandbox
|
||||
@@ -87,7 +82,7 @@ By default, each tool with `persistent=True` lazily creates its **own** sandbox
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(sandbox_id="my-long-lived-sandbox")
|
||||
result = tool.run(command="ls workspace")
|
||||
result = tool.run(command="ls /workspace")
|
||||
```
|
||||
|
||||
### Custom sandbox parameters
|
||||
@@ -107,41 +102,6 @@ tool = DaytonaExecTool(
|
||||
)
|
||||
```
|
||||
|
||||
### Searching, moving, and modifying files
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaFileTool
|
||||
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
# Find every TODO in the source tree (grep file contents recursively)
|
||||
file_tool.run(action="find", path="workspace/src", pattern="TODO:")
|
||||
|
||||
# Find all Python files (glob match on filenames)
|
||||
file_tool.run(action="search", path="workspace", pattern="*.py")
|
||||
|
||||
# Make a script executable
|
||||
file_tool.run(action="chmod", path="workspace/run.sh", mode="755")
|
||||
|
||||
# Rename or move a file
|
||||
file_tool.run(
|
||||
action="move",
|
||||
path="workspace/draft.md",
|
||||
destination="workspace/final.md",
|
||||
)
|
||||
|
||||
# Bulk find-and-replace across multiple files
|
||||
file_tool.run(
|
||||
action="replace",
|
||||
paths=["workspace/src/a.py", "workspace/src/b.py"],
|
||||
pattern="old_function",
|
||||
replacement="new_function",
|
||||
)
|
||||
|
||||
# Quick existence check before a destructive op
|
||||
file_tool.run(action="exists", path="workspace/cache.db")
|
||||
```
|
||||
|
||||
### Agent integration
|
||||
|
||||
```python Code
|
||||
@@ -161,7 +121,7 @@ coder = Agent(
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to workspace/fib.py, and run it.",
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to /workspace/fib.py, and run it.",
|
||||
expected_output="The first 10 Fibonacci numbers printed to stdout.",
|
||||
agent=coder,
|
||||
)
|
||||
@@ -208,22 +168,12 @@ All three tools accept these parameters at initialization:
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`, `exists`, `move`, `find`, `search`, `chmod`, `replace`. |
|
||||
| `path` | `str \| None` | ✓ for all actions except `replace` | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | ✓ for `append` | Content to write or append. |
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`. |
|
||||
| `path` | `str` | ✓ | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | | Content to write or append. Required for `append`. |
|
||||
| `binary` | `bool` | | If `True`, `content` is base64 on write; returns base64 on read. |
|
||||
| `recursive` | `bool` | | For `delete`: remove directories recursively. |
|
||||
| `mode` | `str \| None` | | For `mkdir`: octal permissions for the new directory (defaults to `"0755"`). For `chmod`: octal permissions to apply to the target. |
|
||||
| `destination` | `str \| None` | ✓ for `move` | Destination path for `move`. |
|
||||
| `pattern` | `str \| None` | ✓ for `find`, `search`, `replace` | For `find`: substring matched against file CONTENTS. For `search`: glob matched against file NAMES (e.g. `*.py`). For `replace`: text to replace inside files. |
|
||||
| `replacement` | `str \| None` | ✓ for `replace` | Replacement text for `pattern`. |
|
||||
| `paths` | `list[str] \| None` | ✓ for `replace` | List of file paths in which to replace text. |
|
||||
| `owner` | `str \| None` | | For `chmod`: new file owner. |
|
||||
| `group` | `str \| None` | | For `chmod`: new file group. |
|
||||
|
||||
<Note>
|
||||
For `chmod`, pass at least one of `mode`, `owner`, or `group` — any field left as `None` is left unchanged on the target.
|
||||
</Note>
|
||||
| `mode` | `str` | | For `mkdir`: octal permission string (default `"0755"`). |
|
||||
|
||||
<Tip>
|
||||
For files larger than a few KB, create the file first with `action="write"` and empty content, then send the body via multiple `action="append"` calls of ~4 KB each to stay within tool-call payload limits.
|
||||
|
||||
2688
docs/docs.json
2688
docs/docs.json
File diff suppressed because it is too large
Load Diff
@@ -4,80 +4,6 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="May 13, 2026">
|
||||
## v1.14.5a5
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a5)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Deprecate CrewAgentExecutor, default Crew agents to AgentExecutor
|
||||
- Improve Daytona sandbox tools
|
||||
|
||||
### Bug Fixes
|
||||
- Fix missing code block in pt-BR first-flow guide
|
||||
- Log HITL pre-review and distillation failures, add learn_strict
|
||||
- Patch urllib3 for security vulnerabilities
|
||||
- Patch gitpython and langchain-core; ignore unpatched paramiko CVE
|
||||
- Refresh all published workspace packages on uv lock/sync
|
||||
|
||||
### Documentation
|
||||
- Add migration guide for `inputs.id` to `restoreFromStateId`
|
||||
- Add OSS upgrade and crew-to-flow migration guide
|
||||
- Update changelog and version for v1.14.5a4
|
||||
|
||||
## Contributors
|
||||
|
||||
@akaKuruma, @greysonlalonde, @iris-clawd, @lorenzejay, @mislavivanda
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="May 09, 2026">
|
||||
## v1.14.5a4
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a4)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Update LLM listings
|
||||
|
||||
### Bug Fixes
|
||||
- Fix dependency issue by moving `textual` to `crewai-cli` and adding `certifi`
|
||||
|
||||
### Documentation
|
||||
- Update changelog and version for v1.14.5a3
|
||||
|
||||
## Contributors
|
||||
|
||||
@cgoeppinger, @greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="May 07, 2026">
|
||||
## v1.14.5a3
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a3)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Bug Fixes
|
||||
- Fix status endpoint path from /{kickoff_id}/status to /status/{kickoff_id}
|
||||
- Bump gitpython dependency to version >=3.1.47 for security compliance
|
||||
|
||||
### Refactoring
|
||||
- Extract CLI into standalone crewai-cli package
|
||||
|
||||
### Documentation
|
||||
- Update changelog and version for v1.14.5a2
|
||||
|
||||
## Contributors
|
||||
|
||||
@greysonlalonde, @iris-clawd
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="May 04, 2026">
|
||||
## v1.14.5a2
|
||||
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
---
|
||||
title: "Migrating from inputs.id to restore_from_state_id"
|
||||
description: "Move @persist flows off the deprecated inputs.id hydration onto the supported restore_from_state_id field"
|
||||
icon: "arrow-right-arrow-left"
|
||||
---
|
||||
|
||||
<Warning>
|
||||
Passing `id` inside `inputs` to hydrate a `@persist` flow is **deprecated** and
|
||||
scheduled for removal in a future release. The replacement, `restore_from_state_id`,
|
||||
is available in CrewAI **v1.14.5 and later** — the steps below apply once you
|
||||
upgrade.
|
||||
</Warning>
|
||||
|
||||
## Overview
|
||||
|
||||
The documented way to hydrate a `@persist` flow from a previous execution is to pass
|
||||
that execution's UUID as `inputs.id`. CrewAI now exposes a dedicated field,
|
||||
`restore_from_state_id`, that performs the same hydration without overloading the
|
||||
`inputs` payload — and without coupling the hydration key to the new execution's
|
||||
identity.
|
||||
|
||||
## Migration
|
||||
|
||||
If you currently kickoff a `@persist` flow with `inputs={"id": ...}`:
|
||||
|
||||
```python
|
||||
# Deprecated
|
||||
flow = CounterFlow()
|
||||
flow.kickoff(inputs={"id": "abcd1234-5678-90ef-ghij-klmnopqrstuv"})
|
||||
```
|
||||
|
||||
Switch to `restore_from_state_id`:
|
||||
|
||||
```python
|
||||
# Supported
|
||||
flow = CounterFlow()
|
||||
flow.kickoff(restore_from_state_id="abcd1234-5678-90ef-ghij-klmnopqrstuv")
|
||||
```
|
||||
|
||||
The two modes have different lineage semantics:
|
||||
|
||||
- `inputs={"id": <uuid>}` (deprecated) — **resume**: writes land under the supplied
|
||||
id, extending the same `flow_uuid` history.
|
||||
- `restore_from_state_id=<uuid>` — **fork**: hydrates state from the snapshot, then
|
||||
writes under a fresh `state.id`. The source flow's history is preserved.
|
||||
|
||||
For most production scenarios — re-running a flow seeded from a previous state — fork
|
||||
is what you want. See [Mastering Flow State](/en/guides/flows/mastering-flow-state)
|
||||
for the full mental model.
|
||||
|
||||
If you kickoff your flow over the CrewAI AMP REST API, see [AMP](#amp) below for the
|
||||
equivalent payload migration.
|
||||
|
||||
## Why we are deprecating `inputs.id` for `@persist`
|
||||
|
||||
`inputs.id` is currently the documented way to resume a `@persist` flow from a
|
||||
previous execution. The problem is that the same UUID does two jobs at once:
|
||||
|
||||
1. **It selects which snapshot `@persist` hydrates from** — load the state saved
|
||||
under that UUID.
|
||||
2. **It becomes the new execution's Flow Execution ID** (`state.id` in the SDK;
|
||||
surfaced as `flow_id` in some contexts) — every `@persist` write from this
|
||||
kickoff also lands under that same UUID.
|
||||
|
||||
This dual role is the root cause of the issues this guide describes. Because the
|
||||
supplied UUID is also the new execution's id, two kickoffs that pass the same
|
||||
`inputs.id` are not two distinct executions — they share an id, share a persistence
|
||||
record, and (on AMP) share a row in the executions list. There is no way to say
|
||||
"hydrate from this snapshot, but record this run separately" without splitting the
|
||||
two responsibilities.
|
||||
|
||||
`restore_from_state_id` is that split. It tells `@persist` which snapshot to hydrate
|
||||
from, while leaving the new execution free to receive a fresh `state.id`. The
|
||||
hydration source and the recorded run are no longer the same UUID — which is what
|
||||
most production scenarios actually want.
|
||||
|
||||
## Removal timeline
|
||||
|
||||
`inputs.id` for `@persist` hydration is scheduled for removal in a future release of
|
||||
CrewAI. There is no immediate hard cut-off — existing flows continue to work — but
|
||||
once you upgrade to v1.14.5 or later, new code should use `restore_from_state_id`, and
|
||||
existing flows should migrate at the next convenient opportunity.
|
||||
|
||||
## AMP
|
||||
|
||||
If you deploy your flow to CrewAI AMP, the migration extends to the kickoff payload
|
||||
sent to your deployed crew, and the visible symptoms of reusing `inputs.id` show up
|
||||
on the deployment dashboard. The two subsections below cover both.
|
||||
|
||||
### Migrating the kickoff payload
|
||||
|
||||
If you currently kickoff a deployed flow by embedding `id` in `inputs`:
|
||||
|
||||
```bash
|
||||
# Deprecated
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_CREW_TOKEN" \
|
||||
-d '{"inputs": {"id": "abcd1234-5678-90ef-ghij-klmnopqrstuv", "topic": "AI Agent Frameworks"}}' \
|
||||
https://your-crew-url.crewai.com/kickoff
|
||||
```
|
||||
|
||||
Move the UUID to the top-level `restoreFromStateId` field:
|
||||
|
||||
```bash
|
||||
# Supported
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_CREW_TOKEN" \
|
||||
-d '{
|
||||
"inputs": {"topic": "AI Agent Frameworks"},
|
||||
"restoreFromStateId": "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
}' \
|
||||
https://your-crew-url.crewai.com/kickoff
|
||||
```
|
||||
|
||||
`restoreFromStateId` sits next to `inputs` in the kickoff payload, not inside it. The
|
||||
`inputs` object now only carries values your flow actually consumes.
|
||||
|
||||
### What happens when `inputs.id` is reused
|
||||
|
||||
When AMP receives a kickoff for a flow whose `inputs.id` matches an existing
|
||||
execution, it resolves to the existing record rather than creating a new one. From
|
||||
the deployment dashboard you'll see:
|
||||
|
||||
- **Execution status** — the new run's status overwrites the previous run's. A
|
||||
finished execution can flip back to `running`, or a `completed` run can flip to
|
||||
`error` if the new kickoff fails — either way the dashboard no longer reflects
|
||||
the original run.
|
||||
- **Traces** — OTel traces stack across kickoffs because they share the same
|
||||
execution id; the previous run's traces are either replaced by, or mixed with,
|
||||
the new run's. A step-by-step replay no longer corresponds to a single execution.
|
||||
- **Executions list** — kickoffs that should appear as separate rows collapse into
|
||||
a single entry, hiding history.
|
||||
|
||||
Migrating to `restoreFromStateId` keeps every kickoff as its own execution — with
|
||||
its own status, traces, and row in the list — while still hydrating state from a
|
||||
previous run.
|
||||
|
||||
<Card title="Need Help?" icon="headset" href="mailto:support@crewai.com">
|
||||
Contact our support team if you're unsure which mode your flow needs or hit issues
|
||||
during the migration.
|
||||
</Card>
|
||||
@@ -1,190 +0,0 @@
|
||||
---
|
||||
title: "Upgrading CrewAI"
|
||||
description: "How to upgrade CrewAI in your project and adapt to breaking changes between versions."
|
||||
icon: "arrow-up-circle"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
CrewAI releases ship new capabilities regularly. This guide walks you through the practical steps to keep your installation up to date — both the CLI and your project's virtual environment.
|
||||
|
||||
If you're starting fresh, see [Installation](/en/installation). If you're coming from another framework, see [Migrating from LangGraph](/en/guides/migration/migrating-from-langgraph).
|
||||
|
||||
---
|
||||
|
||||
## The Two Things You Might Want to Upgrade
|
||||
|
||||
CrewAI lives in two places on your machine, and they upgrade independently:
|
||||
|
||||
| What | How it's installed | How to upgrade |
|
||||
|---|---|---|
|
||||
| The **global `crewai` CLI** | `uv tool install crewai` | `uv tool install crewai --upgrade` |
|
||||
| The **project venv** (what your code runs) | `crewai install` / `uv sync` | `uv add "crewai[...]>=X.Y.Z"` then `crewai install` |
|
||||
|
||||
These can — and often do — get out of sync. Running `crewai --version` tells you the CLI version. Running `uv pip show crewai` inside your project tells you the venv version. If they differ, that's normal; what matters for your running code is the venv version.
|
||||
|
||||
## Why `crewai install` Alone Doesn't Upgrade
|
||||
|
||||
`crewai install` is a thin wrapper around `uv sync`. It installs exactly what the current `uv.lock` file says — it does **not** bump any version constraints.
|
||||
|
||||
If your `pyproject.toml` says `crewai>=1.11.1` and the lock file resolved to `1.11.1`, running `crewai install` will keep you on `1.11.1` forever, even if `1.14.4` is available.
|
||||
|
||||
To actually upgrade, you need to:
|
||||
|
||||
1. Update the version constraint in `pyproject.toml`
|
||||
2. Re-solve the lock file
|
||||
3. Sync the venv
|
||||
|
||||
`uv add` does all three in one shot.
|
||||
|
||||
## How to Upgrade Your Project
|
||||
|
||||
```bash
|
||||
# Bump the constraint and re-lock in one command
|
||||
uv add "crewai[tools]>=1.14.4"
|
||||
|
||||
# Sync the venv (crewai install calls uv sync under the hood)
|
||||
crewai install
|
||||
|
||||
# Verify
|
||||
uv pip show crewai
|
||||
# → Version: 1.14.4
|
||||
```
|
||||
|
||||
Replace `[tools]` with whatever extras your project uses (e.g. `[tools,anthropic]`). Check your `pyproject.toml` `dependencies` list if you're unsure.
|
||||
|
||||
<Note>
|
||||
`uv add` updates both `pyproject.toml` **and** `uv.lock` atomically. If you edit `pyproject.toml` manually, you still need to run `uv lock --upgrade-package crewai` to re-solve the lock file before `crewai install` will pick up the new version.
|
||||
</Note>
|
||||
|
||||
## Upgrading the Global CLI
|
||||
|
||||
The global CLI is separate from your project. Upgrade it with:
|
||||
|
||||
```bash
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
|
||||
If your shell warns about `PATH` after the upgrade, refresh it:
|
||||
|
||||
```bash
|
||||
uv tool update-shell
|
||||
```
|
||||
|
||||
This does **not** touch your project's venv — you still need `uv add` + `crewai install` inside the project.
|
||||
|
||||
## Verify Both Are in Sync
|
||||
|
||||
```bash
|
||||
# Global CLI version
|
||||
crewai --version
|
||||
|
||||
# Project venv version
|
||||
uv pip show crewai | grep Version
|
||||
```
|
||||
|
||||
They don't need to match — but your project venv version is what matters for runtime behavior.
|
||||
|
||||
<Note>
|
||||
CrewAI requires `Python >=3.10, <3.14`. If `uv` was installed against an older interpreter, recreate the project venv with a supported Python before running `crewai install`.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## Breaking Changes & Migration Notes
|
||||
|
||||
Most upgrades only require small adjustments. The areas below are the ones that break silently or with confusing tracebacks.
|
||||
|
||||
### Import paths: tools and `BaseTool`
|
||||
|
||||
The canonical import location for tools is `crewai.tools`. Older paths still surface in tutorials but should be updated.
|
||||
|
||||
```python
|
||||
# Before
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.agents.tools import tool
|
||||
|
||||
# After
|
||||
from crewai.tools import BaseTool, tool
|
||||
```
|
||||
|
||||
The `@tool` decorator and `BaseTool` subclass both live in `crewai.tools`. `AgentFinish` and other internal-agent symbols are no longer part of the public surface — if you were importing them, switch to event listeners or `Task` callbacks instead.
|
||||
|
||||
### `Agent` parameter changes
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Find authoritative sources on {topic}",
|
||||
backstory="You are a careful, source-driven researcher.",
|
||||
llm="gpt-4o-mini", # string model name OR an LLM object
|
||||
verbose=True, # bool, not an int level
|
||||
max_iter=15, # default has changed across versions — set explicitly
|
||||
allow_delegation=False,
|
||||
)
|
||||
```
|
||||
|
||||
- `llm` accepts either a string model name (resolved via the configured provider) or an `LLM` object for fine-grained control.
|
||||
- `verbose` is a plain `bool`. Passing an integer no longer toggles log levels.
|
||||
- `max_iter` defaults have shifted between releases. If your agent silently stops looping after the first tool call, set `max_iter` explicitly.
|
||||
|
||||
### `Crew` parameters
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential, # or Process.hierarchical
|
||||
memory=True,
|
||||
cache=True,
|
||||
embedder={"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
)
|
||||
```
|
||||
|
||||
- `process=Process.hierarchical` requires either `manager_llm=` or `manager_agent=`. Without one, kickoff raises at validation time.
|
||||
- `memory=True` with a non-default embedding provider needs an `embedder` dict — see [Memory & embedder config](#memory-embedder-config) below.
|
||||
|
||||
### `Task` structured output
|
||||
|
||||
Use `output_pydantic`, `output_json`, or `output_file` to coerce a task's result into a typed shape:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from crewai import Task
|
||||
|
||||
class Article(BaseModel):
|
||||
title: str
|
||||
body: str
|
||||
|
||||
write = Task(
|
||||
description="Write an article about {topic}",
|
||||
expected_output="A short article with a title and body",
|
||||
agent=writer,
|
||||
output_pydantic=Article, # the class, NOT an instance
|
||||
output_file="output/article.md",
|
||||
)
|
||||
```
|
||||
|
||||
`output_pydantic` takes the **class** itself. Passing `Article(title="", body="")` is a common mistake and fails with a confusing validation error.
|
||||
|
||||
### Memory & embedder config {#memory-embedder-config}
|
||||
|
||||
If `memory=True` and you're not using the default OpenAI embeddings, you must pass an `embedder`:
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {"model": "nomic-embed-text"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
Set the relevant provider credentials (`OPENAI_API_KEY`, `OLLAMA_HOST`, etc.) in your `.env` file. Memory storage paths are project-local by default — delete the project's memory directory if you change embedders, since dimensions don't mix.
|
||||
@@ -1,139 +0,0 @@
|
||||
---
|
||||
title: Platform Tools CLI
|
||||
description: Create, publish, and install custom tools on the CrewAI platform using the CLI.
|
||||
icon: terminal
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
The CrewAI CLI provides commands to manage custom tools on the **CrewAI platform** — a hosted tool registry that lets you share tools within your organization and across the community without publishing to PyPI.
|
||||
|
||||
| Command | Purpose |
|
||||
|---------|---------|
|
||||
| `crewai tool create <handle>` | Scaffold a new tool project |
|
||||
| `crewai tool publish` | Publish the tool to the CrewAI platform |
|
||||
| `crewai tool install <handle>` | Install a platform tool into your crew project |
|
||||
|
||||
<Note type="info" title="Platform vs PyPI">
|
||||
These commands manage tools on the **CrewAI platform registry**. If you want to publish a standalone Python package to PyPI instead, see the [Publish Custom Tools to PyPI](/en/guides/tools/publish-custom-tools) guide.
|
||||
</Note>
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **CrewAI CLI** installed (`pip install crewai`)
|
||||
- **Authenticated** with the platform — run `crewai login` first
|
||||
|
||||
---
|
||||
|
||||
## Step 1: Create a Tool Project
|
||||
|
||||
Scaffold a new tool project:
|
||||
|
||||
```bash
|
||||
crewai tool create my_custom_tool
|
||||
```
|
||||
|
||||
This generates a project structure with the boilerplate you need to start building your tool.
|
||||
|
||||
<Tip>
|
||||
The `handle` is the unique identifier for your tool on the platform. Choose something descriptive and specific to what the tool does.
|
||||
</Tip>
|
||||
|
||||
### Implement Your Tool
|
||||
|
||||
Edit the generated tool file to add your logic. The tool follows the standard CrewAI tools contract — you can subclass `BaseTool` or use the `@tool` decorator:
|
||||
|
||||
```python
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "My Custom Tool"
|
||||
description: str = "Description of what this tool does — be specific so agents know when to use it."
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Your tool logic here
|
||||
return "result"
|
||||
```
|
||||
|
||||
For the full tools API reference (input schemas, caching, async support, error handling), see the [Create Custom Tools](/en/learn/create-custom-tools) guide.
|
||||
|
||||
---
|
||||
|
||||
## Step 2: Publish to the Platform
|
||||
|
||||
From your tool project directory, publish it to the CrewAI platform:
|
||||
|
||||
```bash
|
||||
crewai tool publish
|
||||
```
|
||||
|
||||
### Visibility Options
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `--public` | Make the tool available to all platform users |
|
||||
| `--private` | Restrict visibility to your organization |
|
||||
| `--force` | Bypass Git remote validations |
|
||||
|
||||
```bash
|
||||
# Publish as a public tool
|
||||
crewai tool publish --public
|
||||
|
||||
# Publish privately (organization only)
|
||||
crewai tool publish --private
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3: Install a Platform Tool
|
||||
|
||||
To install a tool that's been published to the platform:
|
||||
|
||||
```bash
|
||||
crewai tool install my_custom_tool
|
||||
```
|
||||
|
||||
Once installed, you can use the tool in your crew like any other tool — assign it to an agent via the `tools` parameter.
|
||||
|
||||
---
|
||||
|
||||
## Full Lifecycle Example
|
||||
|
||||
```bash
|
||||
# 1. Authenticate with the platform
|
||||
crewai login
|
||||
|
||||
# 2. Scaffold a new tool
|
||||
crewai tool create weather_lookup
|
||||
|
||||
# 3. Implement your logic in the generated project
|
||||
cd weather_lookup
|
||||
# ... edit the tool file ...
|
||||
|
||||
# 4. Publish to the platform
|
||||
crewai tool publish --public
|
||||
|
||||
# 5. In another project, install and use it
|
||||
crewai tool install weather_lookup
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Platform Tools vs PyPI Packages
|
||||
|
||||
| | Platform Tools | PyPI Packages |
|
||||
|---|---|---|
|
||||
| **Publish** | `crewai tool publish` | `uv build` + `uv publish` |
|
||||
| **Registry** | CrewAI platform | PyPI |
|
||||
| **Install** | `crewai tool install <handle>` | `pip install <package>` |
|
||||
| **Auth** | `crewai login` | PyPI account + token |
|
||||
| **Visibility** | `--public` / `--private` flags | Always public |
|
||||
| **Guide** | This page | [Publish Custom Tools](/en/guides/tools/publish-custom-tools) |
|
||||
|
||||
---
|
||||
|
||||
## Related
|
||||
|
||||
- [Create Custom Tools](/en/learn/create-custom-tools) — Python API reference for building tools (BaseTool, @tool decorator)
|
||||
- [Publish Custom Tools to PyPI](/en/guides/tools/publish-custom-tools) — package and distribute tools as standalone Python libraries
|
||||
@@ -106,9 +106,6 @@ If you haven't installed `uv` yet, follow **step 1** to quickly get it set up on
|
||||
```shell
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
<Note>
|
||||
This upgrades the **global `crewai` CLI tool** only. To upgrade the `crewai` version inside your project's virtual environment, see [Upgrading CrewAI in a project](/en/guides/migration/upgrading-crewai).
|
||||
</Note>
|
||||
<Check>Installation successful! You're ready to create your first crew! 🎉</Check>
|
||||
</Step>
|
||||
|
||||
|
||||
@@ -12,9 +12,7 @@ incorporating the latest functionalities such as tool delegation, error handling
|
||||
enabling agents to perform a wide range of actions.
|
||||
|
||||
<Tip>
|
||||
**Want to publish your tool to the CrewAI platform?** Use the CLI to scaffold, publish, and share tools directly on the platform — see the [Platform Tools CLI](/en/guides/tools/platform-tools-cli) guide.
|
||||
|
||||
**Prefer publishing to PyPI?** Check out the [Publish Custom Tools](/en/guides/tools/publish-custom-tools) guide to package and distribute your tool as a standalone Python library.
|
||||
**Want to publish your tool for the community?** If you're building a tool that others could benefit from, check out the [Publish Custom Tools](/en/guides/tools/publish-custom-tools) guide to learn how to package and distribute your tool on PyPI.
|
||||
</Tip>
|
||||
|
||||
### Subclassing `BaseTool`
|
||||
|
||||
@@ -13,7 +13,7 @@ The Daytona sandbox tools give CrewAI agents access to isolated, ephemeral compu
|
||||
|
||||
- **`DaytonaExecTool`** — run any shell command inside a sandbox.
|
||||
- **`DaytonaPythonTool`** — execute a block of Python source code inside a sandbox.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox; also supports `move`, `find` (content grep), `search` (filename glob), `chmod` (permissions), `replace` (bulk find-and-replace), and `exists`.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox.
|
||||
|
||||
All three tools share the same sandbox lifecycle controls, so you can mix and match them while keeping state in a single persistent sandbox.
|
||||
|
||||
@@ -55,7 +55,7 @@ from crewai_tools import DaytonaPythonTool
|
||||
tool = DaytonaPythonTool()
|
||||
result = tool.run(code="print(sum(range(10)))")
|
||||
print(result)
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": ExecutionArtifacts(stdout="45\n", charts=[])}
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": None}
|
||||
```
|
||||
|
||||
### Multi-step shell session (persistent)
|
||||
@@ -63,22 +63,17 @@ print(result)
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool, DaytonaFileTool
|
||||
|
||||
# Create the persistent sandbox via the first tool, then attach the second
|
||||
# tool to it so both share state (installed packages, files, env vars).
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool = DaytonaFileTool(sandbox_id=exec_tool.active_sandbox_id)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
file_tool.run(
|
||||
action="write",
|
||||
path="workspace/script.py",
|
||||
content="import httpx; print(f'httpx loaded, version {httpx.__version__}')",
|
||||
)
|
||||
exec_tool.run(command="python workspace/script.py")
|
||||
# Install a package, then write and run a script — all in the same sandbox
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool.run(action="write", path="/workspace/fetch.py", content="import httpx; print(httpx.get('https://httpbin.org/get').status_code)")
|
||||
exec_tool.run(command="python /workspace/fetch.py")
|
||||
```
|
||||
|
||||
<Note>
|
||||
By default, each tool with `persistent=True` lazily creates its **own** sandbox on first use. The pattern above shares a single sandbox across multiple tools by reading the first tool's `active_sandbox_id` after a `.run()` call and passing it to the others via `sandbox_id=...`. With `persistent=False` (the default), every `.run()` call gets a fresh sandbox that's deleted at the end of that call.
|
||||
Each tool instance maintains its own persistent sandbox. To share **one** sandbox across two tools, create the first tool, grab its sandbox id via `tool._persistent_sandbox.id`, and pass it to the second tool via `sandbox_id=...`.
|
||||
</Note>
|
||||
|
||||
### Attach to an existing sandbox
|
||||
@@ -87,7 +82,7 @@ By default, each tool with `persistent=True` lazily creates its **own** sandbox
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(sandbox_id="my-long-lived-sandbox")
|
||||
result = tool.run(command="ls workspace")
|
||||
result = tool.run(command="ls /workspace")
|
||||
```
|
||||
|
||||
### Custom sandbox parameters
|
||||
@@ -107,41 +102,6 @@ tool = DaytonaExecTool(
|
||||
)
|
||||
```
|
||||
|
||||
### Searching, moving, and modifying files
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaFileTool
|
||||
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
# Find every TODO in the source tree (grep file contents recursively)
|
||||
file_tool.run(action="find", path="workspace/src", pattern="TODO:")
|
||||
|
||||
# Find all Python files (glob match on filenames)
|
||||
file_tool.run(action="search", path="workspace", pattern="*.py")
|
||||
|
||||
# Make a script executable
|
||||
file_tool.run(action="chmod", path="workspace/run.sh", mode="755")
|
||||
|
||||
# Rename or move a file
|
||||
file_tool.run(
|
||||
action="move",
|
||||
path="workspace/draft.md",
|
||||
destination="workspace/final.md",
|
||||
)
|
||||
|
||||
# Bulk find-and-replace across multiple files
|
||||
file_tool.run(
|
||||
action="replace",
|
||||
paths=["workspace/src/a.py", "workspace/src/b.py"],
|
||||
pattern="old_function",
|
||||
replacement="new_function",
|
||||
)
|
||||
|
||||
# Quick existence check before a destructive op
|
||||
file_tool.run(action="exists", path="workspace/cache.db")
|
||||
```
|
||||
|
||||
### Agent integration
|
||||
|
||||
```python Code
|
||||
@@ -161,7 +121,7 @@ coder = Agent(
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to workspace/fib.py, and run it.",
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to /workspace/fib.py, and run it.",
|
||||
expected_output="The first 10 Fibonacci numbers printed to stdout.",
|
||||
agent=coder,
|
||||
)
|
||||
@@ -208,22 +168,12 @@ All three tools accept these parameters at initialization:
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`, `exists`, `move`, `find`, `search`, `chmod`, `replace`. |
|
||||
| `path` | `str \| None` | ✓ for all actions except `replace` | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | ✓ for `append` | Content to write or append. |
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`. |
|
||||
| `path` | `str` | ✓ | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | | Content to write or append. Required for `append`. |
|
||||
| `binary` | `bool` | | If `True`, `content` is base64 on write; returns base64 on read. |
|
||||
| `recursive` | `bool` | | For `delete`: remove directories recursively. |
|
||||
| `mode` | `str \| None` | | For `mkdir`: octal permissions for the new directory (defaults to `"0755"`). For `chmod`: octal permissions to apply to the target. |
|
||||
| `destination` | `str \| None` | ✓ for `move` | Destination path for `move`. |
|
||||
| `pattern` | `str \| None` | ✓ for `find`, `search`, `replace` | For `find`: substring matched against file CONTENTS. For `search`: glob matched against file NAMES (e.g. `*.py`). For `replace`: text to replace inside files. |
|
||||
| `replacement` | `str \| None` | ✓ for `replace` | Replacement text for `pattern`. |
|
||||
| `paths` | `list[str] \| None` | ✓ for `replace` | List of file paths in which to replace text. |
|
||||
| `owner` | `str \| None` | | For `chmod`: new file owner. |
|
||||
| `group` | `str \| None` | | For `chmod`: new file group. |
|
||||
|
||||
<Note>
|
||||
For `chmod`, pass at least one of `mode`, `owner`, or `group` — any field left as `None` is left unchanged on the target.
|
||||
</Note>
|
||||
| `mode` | `str` | | For `mkdir`: octal permission string (default `"0755"`). |
|
||||
|
||||
<Tip>
|
||||
For files larger than a few KB, create the file first with `action="write"` and empty content, then send the body via multiple `action="append"` calls of ~4 KB each to stay within tool-call payload limits.
|
||||
|
||||
@@ -54,14 +54,6 @@ These tools enable your agents to search the web, research topics, and find info
|
||||
Extract structured content from web pages using the Tavily API.
|
||||
</Card>
|
||||
|
||||
<Card title="Tavily Research Tool" icon="flask" href="/en/tools/search-research/tavilyresearchtool">
|
||||
Run multi-step research tasks and get cited reports using the Tavily Research API.
|
||||
</Card>
|
||||
|
||||
<Card title="Tavily Get Research Tool" icon="clipboard-list" href="/en/tools/search-research/tavilygetresearchtool">
|
||||
Retrieve the status and results of an existing Tavily research task.
|
||||
</Card>
|
||||
|
||||
<Card title="Arxiv Paper Tool" icon="box-archive" href="/en/tools/search-research/arxivpapertool">
|
||||
Search arXiv and optionally download PDFs.
|
||||
</Card>
|
||||
@@ -84,15 +76,7 @@ These tools enable your agents to search the web, research topics, and find info
|
||||
- **Academic Research**: Find scholarly articles and technical papers
|
||||
|
||||
```python
|
||||
from crewai_tools import (
|
||||
GitHubSearchTool,
|
||||
SerperDevTool,
|
||||
TavilyExtractorTool,
|
||||
TavilyGetResearchTool,
|
||||
TavilyResearchTool,
|
||||
TavilySearchTool,
|
||||
YoutubeVideoSearchTool,
|
||||
)
|
||||
from crewai_tools import SerperDevTool, GitHubSearchTool, YoutubeVideoSearchTool, TavilySearchTool, TavilyExtractorTool
|
||||
|
||||
# Create research tools
|
||||
web_search = SerperDevTool()
|
||||
@@ -100,21 +84,11 @@ code_search = GitHubSearchTool()
|
||||
video_research = YoutubeVideoSearchTool()
|
||||
tavily_search = TavilySearchTool()
|
||||
content_extractor = TavilyExtractorTool()
|
||||
tavily_research = TavilyResearchTool()
|
||||
tavily_get_research = TavilyGetResearchTool()
|
||||
|
||||
# Add to your agent
|
||||
agent = Agent(
|
||||
role="Research Analyst",
|
||||
tools=[
|
||||
web_search,
|
||||
code_search,
|
||||
video_research,
|
||||
tavily_search,
|
||||
content_extractor,
|
||||
tavily_research,
|
||||
tavily_get_research,
|
||||
],
|
||||
tools=[web_search, code_search, video_research, tavily_search, content_extractor],
|
||||
goal="Gather comprehensive information on any topic"
|
||||
)
|
||||
```
|
||||
|
||||
@@ -1,85 +0,0 @@
|
||||
---
|
||||
title: "Tavily Get Research Tool"
|
||||
description: "Retrieve the status and results of an existing Tavily research task"
|
||||
icon: "clipboard-list"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
The `TavilyGetResearchTool` lets CrewAI agents check an existing Tavily research task by `request_id`. Use it when a research task was started earlier and you need to retrieve its current status or final results.
|
||||
|
||||
If you need to start a new research job, use the [Tavily Research Tool](/en/tools/search-research/tavilyresearchtool). This tool is specifically for looking up an existing Tavily research request after you already have its `request_id`.
|
||||
|
||||
## Installation
|
||||
|
||||
To use the `TavilyGetResearchTool`, install the `tavily-python` library alongside `crewai-tools`:
|
||||
|
||||
```shell
|
||||
uv add 'crewai[tools]' tavily-python
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Set your Tavily API key:
|
||||
|
||||
```bash
|
||||
export TAVILY_API_KEY='your_tavily_api_key'
|
||||
```
|
||||
|
||||
Get an API key at [https://app.tavily.com/](https://app.tavily.com/) (sign up, then create a key).
|
||||
|
||||
## Example Usage
|
||||
|
||||
```python
|
||||
from crewai_tools import TavilyGetResearchTool
|
||||
|
||||
tavily_get_research_tool = TavilyGetResearchTool()
|
||||
|
||||
status_result = tavily_get_research_tool.run(
|
||||
request_id="your-research-request-id"
|
||||
)
|
||||
|
||||
print(status_result)
|
||||
```
|
||||
|
||||
## Common Workflow
|
||||
|
||||
Use `TavilyGetResearchTool` when your application or another service has already created a Tavily research task and saved its `request_id`.
|
||||
|
||||
Typical cases include:
|
||||
|
||||
- Polling for completion after kicking off research in a background job.
|
||||
- Looking up the latest status of a long-running research task.
|
||||
- Fetching final research output from a previously created Tavily request.
|
||||
|
||||
## Configuration Options
|
||||
|
||||
The `TavilyGetResearchTool` accepts the following argument when calling the `run` method:
|
||||
|
||||
- `request_id` (str): **Required.** The existing Tavily research request ID to retrieve.
|
||||
|
||||
## Async Usage
|
||||
|
||||
Use `_arun` when your application is already running inside an async event loop:
|
||||
|
||||
```python
|
||||
from crewai_tools import TavilyGetResearchTool
|
||||
|
||||
tavily_get_research_tool = TavilyGetResearchTool()
|
||||
|
||||
status_result = await tavily_get_research_tool._arun(
|
||||
request_id="your-research-request-id"
|
||||
)
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Research status retrieval**: Fetch the current status of an existing Tavily research task.
|
||||
- **Result retrieval**: Return available research output once Tavily has completed the task.
|
||||
- **Sync and async**: Use either `_run`/`run` or `_arun` depending on your application's runtime.
|
||||
- **JSON output**: Returns Tavily responses as formatted JSON strings.
|
||||
|
||||
## Response Format
|
||||
|
||||
The tool returns a JSON string containing the current research task status and any available results from Tavily. The exact response shape depends on the task state returned by Tavily, so incomplete tasks may return status information before the final research output is available.
|
||||
|
||||
Refer to the [Tavily API documentation](https://docs.tavily.com/) for full details on the Research API.
|
||||
@@ -4,80 +4,6 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="2026년 5월 13일">
|
||||
## v1.14.5a5
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a5)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- CrewAgentExecutor 사용 중단, 기본 Crew 에이전트를 AgentExecutor로 설정
|
||||
- Daytona 샌드박스 도구 개선
|
||||
|
||||
### 버그 수정
|
||||
- pt-BR 첫 번째 흐름 가이드에서 누락된 코드 블록 수정
|
||||
- HITL 사전 검토 및 증류 실패 로그 기록, learn_strict 추가
|
||||
- 보안 취약점을 위한 urllib3 패치
|
||||
- gitpython 및 langchain-core 패치; 패치되지 않은 paramiko CVE 무시
|
||||
- uv 잠금/동기화 시 모든 게시된 작업공간 패키지 새로 고침
|
||||
|
||||
### 문서
|
||||
- `inputs.id`에서 `restoreFromStateId`로의 마이그레이션 가이드 추가
|
||||
- OSS 업그레이드 및 crew-to-flow 마이그레이션 가이드 추가
|
||||
- v1.14.5a4의 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@akaKuruma, @greysonlalonde, @iris-clawd, @lorenzejay, @mislavivanda
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 5월 9일">
|
||||
## v1.14.5a4
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a4)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- LLM 목록 업데이트
|
||||
|
||||
### 버그 수정
|
||||
- `textual`을 `crewai-cli`로 이동하고 `certifi`를 추가하여 의존성 문제 수정
|
||||
|
||||
### 문서
|
||||
- v1.14.5a3의 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@cgoeppinger, @greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 5월 7일">
|
||||
## v1.14.5a3
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a3)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 버그 수정
|
||||
- 상태 엔드포인트 경로를 /{kickoff_id}/status에서 /status/{kickoff_id}로 수정
|
||||
- 보안 준수를 위해 gitpython 의존성을 버전 >=3.1.47로 업데이트
|
||||
|
||||
### 리팩토링
|
||||
- CLI를 독립형 crewai-cli 패키지로 분리
|
||||
|
||||
### 문서
|
||||
- v1.14.5a2에 대한 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@greysonlalonde, @iris-clawd
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 5월 4일">
|
||||
## v1.14.5a2
|
||||
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
---
|
||||
title: "inputs.id에서 restore_from_state_id로 마이그레이션"
|
||||
description: "더 이상 지원되지 않는 inputs.id 하이드레이션에서 지원되는 restore_from_state_id 필드로 @persist 흐름을 이동"
|
||||
icon: "arrow-right-arrow-left"
|
||||
---
|
||||
|
||||
<Warning>
|
||||
`inputs` 내에서 `id`를 전달하여 `@persist` 흐름을 하이드레이트하는 것은 **더 이상 지원되지 않으며**
|
||||
향후 릴리스에서 제거될 예정입니다. 대체품인 `restore_from_state_id`는 CrewAI **v1.14.5 이상**에서 사용할 수 있으며,
|
||||
아래 단계는 업그레이드 후 적용됩니다.
|
||||
</Warning>
|
||||
|
||||
## 개요
|
||||
|
||||
이전 실행에서 `@persist` 흐름을 하이드레이트하는 문서화된 방법은
|
||||
해당 실행의 UUID를 `inputs.id`로 전달하는 것입니다. CrewAI는 이제
|
||||
`inputs` 페이로드를 과부하하지 않고 동일한 하이드레이션을 수행하는 전용 필드인
|
||||
`restore_from_state_id`를 제공합니다 — 그리고 하이드레이션 키를 새로운 실행의
|
||||
정체성과 결합하지 않습니다.
|
||||
|
||||
## 마이그레이션
|
||||
|
||||
현재 `inputs={"id": ...}`로 `@persist` 흐름을 시작하는 경우:
|
||||
|
||||
```python
|
||||
# 더 이상 지원되지 않음
|
||||
flow = CounterFlow()
|
||||
flow.kickoff(inputs={"id": "abcd1234-5678-90ef-ghij-klmnopqrstuv"})
|
||||
```
|
||||
|
||||
`restore_from_state_id`로 전환하십시오:
|
||||
|
||||
```python
|
||||
# 지원됨
|
||||
flow = CounterFlow()
|
||||
flow.kickoff(restore_from_state_id="abcd1234-5678-90ef-ghij-klmnopqrstuv")
|
||||
```
|
||||
|
||||
두 모드는 서로 다른 계보 의미론을 가지고 있습니다:
|
||||
|
||||
- `inputs={"id": <uuid>}` (더 이상 지원되지 않음) — **재개**: 제공된
|
||||
id 아래에 기록이 작성되어 동일한 `flow_uuid` 이력이 확장됩니다.
|
||||
- `restore_from_state_id=<uuid>` — **분기**: 스냅샷에서 상태를 하이드레이트한 후
|
||||
새로운 `state.id` 아래에 기록합니다. 원본 흐름의 이력은 보존됩니다.
|
||||
|
||||
대부분의 프로덕션 시나리오에서는 — 이전 상태에서 시드된 흐름을 다시 실행하는 경우 — 분기가
|
||||
필요합니다. 전체 정신 모델은 [Flow State 마스터링](/ko/guides/flows/mastering-flow-state)을 참조하십시오.
|
||||
|
||||
CrewAI AMP REST API를 통해 흐름을 시작하는 경우, 아래 [AMP](#amp)에서
|
||||
동일한 페이로드 마이그레이션을 참조하십시오.
|
||||
|
||||
## 왜 `@persist`에 대해 `inputs.id`를 더 이상 지원하지 않습니까?
|
||||
|
||||
`inputs.id`는 현재 이전 실행에서 `@persist` 흐름을 재개하는 문서화된 방법입니다. 문제는
|
||||
동일한 UUID가 두 가지 작업을 동시에 수행한다는 것입니다:
|
||||
|
||||
1. **어떤 스냅샷에서 `@persist`가 하이드레이트되는지를 선택합니다** — 해당 UUID 아래에 저장된 상태를 로드합니다.
|
||||
2. **새 실행의 흐름 실행 ID가 됩니다** (`state.id`는 SDK에서; 일부 컨텍스트에서는 `flow_id`로 표시됨) — 이
|
||||
시작에서의 모든 `@persist` 기록도 동일한 UUID 아래에 작성됩니다.
|
||||
|
||||
이 이중 역할이 이 가이드에서 설명하는 문제의 근본 원인입니다. 제공된 UUID가 새 실행의 id이기도 하므로,
|
||||
동일한 `inputs.id`를 전달하는 두 번의 시작은 두 개의 별도 실행이 아닙니다 — 그들은 id를 공유하고,
|
||||
지속성 기록을 공유하며, (AMP에서) 실행 목록에서 행을 공유합니다. "이 스냅샷에서 하이드레이트하지만,
|
||||
이 실행을 별도로 기록하십시오"라고 말할 방법이 없습니다.
|
||||
|
||||
`restore_from_state_id`가 그 분리입니다. 이는 `@persist`에 어떤 스냅샷에서 하이드레이트할지를 알려주며,
|
||||
새 실행이 새로운 `state.id`를 받을 수 있도록 합니다. 하이드레이션 소스와 기록된 실행은 더 이상 동일한 UUID가 아닙니다 — 이는 대부분의 프로덕션 시나리오에서 실제로 원하는 것입니다.
|
||||
|
||||
## 제거 일정
|
||||
|
||||
`@persist` 하이드레이션을 위한 `inputs.id`는 CrewAI의 향후 릴리스에서 제거될 예정입니다. 즉각적인 강제 종료는 없으며 — 기존 흐름은 계속 작동합니다 — 하지만 v1.14.5 이상으로 업그레이드하면,
|
||||
새 코드에서는 `restore_from_state_id`를 사용해야 하며, 기존 흐름은 다음 편리한 기회에 마이그레이션해야 합니다.
|
||||
|
||||
## AMP
|
||||
|
||||
흐름을 CrewAI AMP에 배포하는 경우, 마이그레이션은 배포된 팀에 전송되는 시작 페이로드로 확장되며,
|
||||
`inputs.id`를 재사용하는 가시적인 증상은 배포 대시보드에 나타납니다. 아래 두 개의 하위 섹션이 이를 다룹니다.
|
||||
|
||||
### 시작 페이로드 마이그레이션
|
||||
|
||||
현재 `inputs`에 `id`를 포함하여 배포된 흐름을 시작하는 경우:
|
||||
|
||||
```bash
|
||||
# 더 이상 지원되지 않음
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_CREW_TOKEN" \
|
||||
-d '{"inputs": {"id": "abcd1234-5678-90ef-ghij-klmnopqrstuv", "topic": "AI Agent Frameworks"}}' \
|
||||
https://your-crew-url.crewai.com/kickoff
|
||||
```
|
||||
|
||||
UUID를 최상위 `restoreFromStateId` 필드로 이동하십시오:
|
||||
|
||||
```bash
|
||||
# 지원됨
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_CREW_TOKEN" \
|
||||
-d '{
|
||||
"inputs": {"topic": "AI Agent Frameworks"},
|
||||
"restoreFromStateId": "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
}' \
|
||||
https://your-crew-url.crewai.com/kickoff
|
||||
```
|
||||
|
||||
`restoreFromStateId`는 시작 페이로드에서 `inputs` 옆에 위치하며, 내부에 있지 않습니다.
|
||||
`inputs` 객체는 이제 흐름이 실제로 소비하는 값만 포함합니다.
|
||||
|
||||
### `inputs.id`가 재사용될 때 발생하는 일
|
||||
|
||||
AMP가 기존 실행과 `inputs.id`가 일치하는 흐름의 시작을 수신하면,
|
||||
새로운 기록을 생성하는 대신 기존 기록으로 해결됩니다. 배포 대시보드에서 다음을 확인할 수 있습니다:
|
||||
|
||||
- **실행 상태** — 새로운 실행의 상태가 이전 실행의 상태를 덮어씁니다. 완료된 실행은
|
||||
다시 `실행 중`으로 전환되거나, `완료`된 실행은 새로운 시작이 실패할 경우 `오류`로 전환될 수 있습니다 — 어쨌든 대시보드는 더 이상
|
||||
원래 실행을 반영하지 않습니다.
|
||||
- **추적** — OTel 추적이 시작 간에 쌓이기 때문에 동일한 실행 id를 공유합니다; 이전 실행의 추적은
|
||||
새로운 실행의 추적과 교체되거나 혼합됩니다. 단계별 재생은 더 이상 단일 실행에 해당하지 않습니다.
|
||||
- **실행 목록** — 별도의 행으로 나타나야 할 시작이 단일 항목으로 축소되어 이력을 숨깁니다.
|
||||
|
||||
`restoreFromStateId`로 마이그레이션하면 모든 시작이 자체 실행으로 유지됩니다 — 각자의 상태, 추적 및 목록의 행을 가지며 — 여전히 이전 실행에서 상태를 하이드레이트합니다.
|
||||
|
||||
<Card title="도움이 필요하신가요?" icon="headset" href="mailto:support@crewai.com">
|
||||
흐름이 어떤 모드가 필요한지 확실하지 않거나 마이그레이션 중 문제가 발생하면 지원 팀에 문의하십시오.
|
||||
</Card>
|
||||
@@ -1,190 +0,0 @@
|
||||
---
|
||||
title: "CrewAI 업그레이드"
|
||||
description: "프로젝트에서 CrewAI를 업그레이드하고 버전 간 브레이킹 체인지에 적응하는 방법."
|
||||
icon: "arrow-up-circle"
|
||||
---
|
||||
|
||||
## 개요
|
||||
|
||||
CrewAI 릴리스는 정기적으로 새로운 기능을 제공합니다. 이 가이드는 CLI와 프로젝트의 가상 환경을 모두 최신 상태로 유지하기 위한 실용적인 단계를 안내합니다.
|
||||
|
||||
새로 시작한다면 [설치](/ko/installation)를 참고하세요. 다른 프레임워크에서 옮겨오는 경우라면 [LangGraph에서 마이그레이션](/ko/guides/migration/migrating-from-langgraph)을 참고하세요.
|
||||
|
||||
---
|
||||
|
||||
## 업그레이드할 수 있는 두 가지
|
||||
|
||||
CrewAI는 사용자의 머신에 두 곳에 존재하며, 각각 독립적으로 업그레이드됩니다:
|
||||
|
||||
| 무엇 | 설치 방법 | 업그레이드 방법 |
|
||||
|---|---|---|
|
||||
| **전역 `crewai` CLI** | `uv tool install crewai` | `uv tool install crewai --upgrade` |
|
||||
| **프로젝트 venv** (코드가 실행되는 곳) | `crewai install` / `uv sync` | `uv add "crewai[...]>=X.Y.Z"` 후 `crewai install` |
|
||||
|
||||
이 둘은 — 그리고 자주 — 동기화가 어긋날 수 있습니다. `crewai --version`은 CLI 버전을 알려줍니다. 프로젝트 안에서 `uv pip show crewai`를 실행하면 venv 버전을 알려줍니다. 둘이 다른 것은 정상이며, 실행 중인 코드에 중요한 것은 venv 버전입니다.
|
||||
|
||||
## 왜 `crewai install`만으로는 업그레이드되지 않는가
|
||||
|
||||
`crewai install`은 `uv sync`를 감싼 얇은 래퍼입니다. 현재 `uv.lock` 파일이 지시하는 것 그대로를 설치할 뿐이며 — 어떤 버전 제약도 올리지 **않습니다**.
|
||||
|
||||
`pyproject.toml`이 `crewai>=1.11.1`이라 적혀 있고 lock 파일이 `1.11.1`로 해소되었다면, `crewai install`을 실행해도 `1.14.4`가 사용 가능하더라도 영원히 `1.11.1`에 머무릅니다.
|
||||
|
||||
실제로 업그레이드하려면 다음을 해야 합니다:
|
||||
|
||||
1. `pyproject.toml`의 버전 제약 업데이트
|
||||
2. lock 파일 재해소
|
||||
3. venv 동기화
|
||||
|
||||
`uv add`는 이 세 가지를 한 번에 처리합니다.
|
||||
|
||||
## 프로젝트 업그레이드 방법
|
||||
|
||||
```bash
|
||||
# 제약을 올리고 lock을 다시 만드는 한 번의 명령
|
||||
uv add "crewai[tools]>=1.14.4"
|
||||
|
||||
# venv 동기화 (crewai install은 내부적으로 uv sync를 호출)
|
||||
crewai install
|
||||
|
||||
# 확인
|
||||
uv pip show crewai
|
||||
# → Version: 1.14.4
|
||||
```
|
||||
|
||||
`[tools]`를 프로젝트에서 사용하는 extras로 바꾸세요 (예: `[tools,anthropic]`). 잘 모르겠다면 `pyproject.toml`의 `dependencies` 목록을 확인하세요.
|
||||
|
||||
<Note>
|
||||
`uv add`는 `pyproject.toml`과 `uv.lock`을 **둘 다** 원자적으로 업데이트합니다. `pyproject.toml`을 수동으로 편집하는 경우, `crewai install`이 새 버전을 가져가도록 하기 전에 `uv lock --upgrade-package crewai`를 실행해 lock 파일을 다시 해소해야 합니다.
|
||||
</Note>
|
||||
|
||||
## 전역 CLI 업그레이드
|
||||
|
||||
전역 CLI는 프로젝트와 분리되어 있습니다. 다음 명령으로 업그레이드하세요:
|
||||
|
||||
```bash
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
|
||||
업그레이드 후 셸이 `PATH`에 대해 경고하면 새로고침하세요:
|
||||
|
||||
```bash
|
||||
uv tool update-shell
|
||||
```
|
||||
|
||||
이 명령은 프로젝트의 venv를 **건드리지 않습니다** — 프로젝트 내부에서는 여전히 `uv add` + `crewai install`이 필요합니다.
|
||||
|
||||
## 둘이 동기화되었는지 확인
|
||||
|
||||
```bash
|
||||
# 전역 CLI 버전
|
||||
crewai --version
|
||||
|
||||
# 프로젝트 venv 버전
|
||||
uv pip show crewai | grep Version
|
||||
```
|
||||
|
||||
둘이 일치할 필요는 없지만 — 런타임 동작에 중요한 것은 프로젝트 venv 버전입니다.
|
||||
|
||||
<Note>
|
||||
CrewAI는 `Python >=3.10, <3.14`를 요구합니다. `uv`가 더 오래된 인터프리터로 설치되어 있다면, `crewai install`을 실행하기 전에 지원되는 Python으로 프로젝트 venv를 다시 만드세요.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## 브레이킹 체인지 및 마이그레이션 노트
|
||||
|
||||
대부분의 업그레이드는 작은 조정만 필요합니다. 아래 항목들은 조용히 깨지거나 헷갈리는 트레이스백을 내는 영역들입니다.
|
||||
|
||||
### Import 경로: tools와 `BaseTool`
|
||||
|
||||
tools의 정식 import 위치는 `crewai.tools`입니다. 옛 경로들이 아직 튜토리얼에 등장하지만 업데이트해야 합니다.
|
||||
|
||||
```python
|
||||
# 이전
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.agents.tools import tool
|
||||
|
||||
# 이후
|
||||
from crewai.tools import BaseTool, tool
|
||||
```
|
||||
|
||||
`@tool` 데코레이터와 `BaseTool` 서브클래스는 모두 `crewai.tools`에 있습니다. `AgentFinish` 등 내부 에이전트 심볼들은 더 이상 공개 표면이 아닙니다 — import 중이었다면 event listener나 `Task` 콜백으로 전환하세요.
|
||||
|
||||
### `Agent` 파라미터 변경
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Find authoritative sources on {topic}",
|
||||
backstory="You are a careful, source-driven researcher.",
|
||||
llm="gpt-4o-mini", # 모델명 문자열 또는 LLM 객체
|
||||
verbose=True, # 정수 레벨이 아닌 bool
|
||||
max_iter=15, # 버전마다 기본값이 바뀌었음 — 명시적으로 지정
|
||||
allow_delegation=False,
|
||||
)
|
||||
```
|
||||
|
||||
- `llm`은 문자열 모델명(설정된 provider를 통해 해소)이나 세밀한 제어를 위한 `LLM` 객체를 받습니다.
|
||||
- `verbose`는 일반 `bool`입니다. 정수를 전달해도 더 이상 로그 레벨을 토글하지 않습니다.
|
||||
- `max_iter`의 기본값은 릴리스 사이에 변경되었습니다. 첫 tool 호출 후 에이전트가 조용히 반복을 멈춘다면 `max_iter`를 명시적으로 지정하세요.
|
||||
|
||||
### `Crew` 파라미터
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential, # 또는 Process.hierarchical
|
||||
memory=True,
|
||||
cache=True,
|
||||
embedder={"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
)
|
||||
```
|
||||
|
||||
- `process=Process.hierarchical`은 `manager_llm=` 또는 `manager_agent=` 중 하나가 필요합니다. 둘 다 없으면 kickoff 시 검증 단계에서 오류가 발생합니다.
|
||||
- 기본이 아닌 임베딩 provider와 함께 `memory=True`를 쓰려면 `embedder` dict가 필요합니다 — 아래의 [메모리와 embedder 설정](#memory-embedder-config)을 참고하세요.
|
||||
|
||||
### `Task` 구조화된 출력
|
||||
|
||||
`output_pydantic`, `output_json`, 또는 `output_file`을 사용해 task 결과를 타입이 지정된 형태로 강제할 수 있습니다:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from crewai import Task
|
||||
|
||||
class Article(BaseModel):
|
||||
title: str
|
||||
body: str
|
||||
|
||||
write = Task(
|
||||
description="Write an article about {topic}",
|
||||
expected_output="A short article with a title and body",
|
||||
agent=writer,
|
||||
output_pydantic=Article, # 인스턴스가 아닌 클래스
|
||||
output_file="output/article.md",
|
||||
)
|
||||
```
|
||||
|
||||
`output_pydantic`은 **클래스** 자체를 받습니다. `Article(title="", body="")`을 전달하는 것은 흔한 실수이며 헷갈리는 검증 오류로 실패합니다.
|
||||
|
||||
### 메모리와 embedder 설정 {#memory-embedder-config}
|
||||
|
||||
`memory=True`이고 OpenAI의 기본 임베딩을 사용하지 않는다면, `embedder`를 반드시 전달해야 합니다:
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {"model": "nomic-embed-text"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
해당 provider의 자격 증명(`OPENAI_API_KEY`, `OLLAMA_HOST` 등)을 `.env` 파일에 설정하세요. 메모리 저장 경로는 기본적으로 프로젝트-로컬입니다 — embedder를 바꾸면 차원이 호환되지 않으므로 프로젝트의 메모리 디렉터리를 삭제하세요.
|
||||
@@ -13,7 +13,7 @@ The Daytona sandbox tools give CrewAI agents access to isolated, ephemeral compu
|
||||
|
||||
- **`DaytonaExecTool`** — run any shell command inside a sandbox.
|
||||
- **`DaytonaPythonTool`** — execute a block of Python source code inside a sandbox.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox; also supports `move`, `find` (content grep), `search` (filename glob), `chmod` (permissions), `replace` (bulk find-and-replace), and `exists`.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox.
|
||||
|
||||
All three tools share the same sandbox lifecycle controls, so you can mix and match them while keeping state in a single persistent sandbox.
|
||||
|
||||
@@ -55,7 +55,7 @@ from crewai_tools import DaytonaPythonTool
|
||||
tool = DaytonaPythonTool()
|
||||
result = tool.run(code="print(sum(range(10)))")
|
||||
print(result)
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": ExecutionArtifacts(stdout="45\n", charts=[])}
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": None}
|
||||
```
|
||||
|
||||
### Multi-step shell session (persistent)
|
||||
@@ -63,22 +63,17 @@ print(result)
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool, DaytonaFileTool
|
||||
|
||||
# Create the persistent sandbox via the first tool, then attach the second
|
||||
# tool to it so both share state (installed packages, files, env vars).
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool = DaytonaFileTool(sandbox_id=exec_tool.active_sandbox_id)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
file_tool.run(
|
||||
action="write",
|
||||
path="workspace/script.py",
|
||||
content="import httpx; print(f'httpx loaded, version {httpx.__version__}')",
|
||||
)
|
||||
exec_tool.run(command="python workspace/script.py")
|
||||
# Install a package, then write and run a script — all in the same sandbox
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool.run(action="write", path="/workspace/fetch.py", content="import httpx; print(httpx.get('https://httpbin.org/get').status_code)")
|
||||
exec_tool.run(command="python /workspace/fetch.py")
|
||||
```
|
||||
|
||||
<Note>
|
||||
By default, each tool with `persistent=True` lazily creates its **own** sandbox on first use. The pattern above shares a single sandbox across multiple tools by reading the first tool's `active_sandbox_id` after a `.run()` call and passing it to the others via `sandbox_id=...`. With `persistent=False` (the default), every `.run()` call gets a fresh sandbox that's deleted at the end of that call.
|
||||
Each tool instance maintains its own persistent sandbox. To share **one** sandbox across two tools, create the first tool, grab its sandbox id via `tool._persistent_sandbox.id`, and pass it to the second tool via `sandbox_id=...`.
|
||||
</Note>
|
||||
|
||||
### Attach to an existing sandbox
|
||||
@@ -87,7 +82,7 @@ By default, each tool with `persistent=True` lazily creates its **own** sandbox
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(sandbox_id="my-long-lived-sandbox")
|
||||
result = tool.run(command="ls workspace")
|
||||
result = tool.run(command="ls /workspace")
|
||||
```
|
||||
|
||||
### Custom sandbox parameters
|
||||
@@ -107,41 +102,6 @@ tool = DaytonaExecTool(
|
||||
)
|
||||
```
|
||||
|
||||
### Searching, moving, and modifying files
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaFileTool
|
||||
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
# Find every TODO in the source tree (grep file contents recursively)
|
||||
file_tool.run(action="find", path="workspace/src", pattern="TODO:")
|
||||
|
||||
# Find all Python files (glob match on filenames)
|
||||
file_tool.run(action="search", path="workspace", pattern="*.py")
|
||||
|
||||
# Make a script executable
|
||||
file_tool.run(action="chmod", path="workspace/run.sh", mode="755")
|
||||
|
||||
# Rename or move a file
|
||||
file_tool.run(
|
||||
action="move",
|
||||
path="workspace/draft.md",
|
||||
destination="workspace/final.md",
|
||||
)
|
||||
|
||||
# Bulk find-and-replace across multiple files
|
||||
file_tool.run(
|
||||
action="replace",
|
||||
paths=["workspace/src/a.py", "workspace/src/b.py"],
|
||||
pattern="old_function",
|
||||
replacement="new_function",
|
||||
)
|
||||
|
||||
# Quick existence check before a destructive op
|
||||
file_tool.run(action="exists", path="workspace/cache.db")
|
||||
```
|
||||
|
||||
### Agent integration
|
||||
|
||||
```python Code
|
||||
@@ -161,7 +121,7 @@ coder = Agent(
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to workspace/fib.py, and run it.",
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to /workspace/fib.py, and run it.",
|
||||
expected_output="The first 10 Fibonacci numbers printed to stdout.",
|
||||
agent=coder,
|
||||
)
|
||||
@@ -208,22 +168,12 @@ All three tools accept these parameters at initialization:
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`, `exists`, `move`, `find`, `search`, `chmod`, `replace`. |
|
||||
| `path` | `str \| None` | ✓ for all actions except `replace` | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | ✓ for `append` | Content to write or append. |
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`. |
|
||||
| `path` | `str` | ✓ | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | | Content to write or append. Required for `append`. |
|
||||
| `binary` | `bool` | | If `True`, `content` is base64 on write; returns base64 on read. |
|
||||
| `recursive` | `bool` | | For `delete`: remove directories recursively. |
|
||||
| `mode` | `str \| None` | | For `mkdir`: octal permissions for the new directory (defaults to `"0755"`). For `chmod`: octal permissions to apply to the target. |
|
||||
| `destination` | `str \| None` | ✓ for `move` | Destination path for `move`. |
|
||||
| `pattern` | `str \| None` | ✓ for `find`, `search`, `replace` | For `find`: substring matched against file CONTENTS. For `search`: glob matched against file NAMES (e.g. `*.py`). For `replace`: text to replace inside files. |
|
||||
| `replacement` | `str \| None` | ✓ for `replace` | Replacement text for `pattern`. |
|
||||
| `paths` | `list[str] \| None` | ✓ for `replace` | List of file paths in which to replace text. |
|
||||
| `owner` | `str \| None` | | For `chmod`: new file owner. |
|
||||
| `group` | `str \| None` | | For `chmod`: new file group. |
|
||||
|
||||
<Note>
|
||||
For `chmod`, pass at least one of `mode`, `owner`, or `group` — any field left as `None` is left unchanged on the target.
|
||||
</Note>
|
||||
| `mode` | `str` | | For `mkdir`: octal permission string (default `"0755"`). |
|
||||
|
||||
<Tip>
|
||||
For files larger than a few KB, create the file first with `action="write"` and empty content, then send the body via multiple `action="append"` calls of ~4 KB each to stay within tool-call payload limits.
|
||||
|
||||
@@ -4,80 +4,6 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="13 mai 2026">
|
||||
## v1.14.5a5
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a5)
|
||||
|
||||
## O Que Mudou
|
||||
|
||||
### Recursos
|
||||
- Deprecar CrewAgentExecutor, definir agentes Crew como AgentExecutor
|
||||
- Melhorar ferramentas de sandbox Daytona
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir bloco de código ausente no guia de primeiro fluxo em pt-BR
|
||||
- Registrar falhas de pré-revisão e destilação HITL, adicionar learn_strict
|
||||
- Corrigir urllib3 para vulnerabilidades de segurança
|
||||
- Corrigir gitpython e langchain-core; ignorar CVE paramiko não corrigido
|
||||
- Atualizar todos os pacotes de workspace publicados no bloqueio/sincronização uv
|
||||
|
||||
### Documentação
|
||||
- Adicionar guia de migração de `inputs.id` para `restoreFromStateId`
|
||||
- Adicionar guia de atualização OSS e migração de crew para flow
|
||||
- Atualizar changelog e versão para v1.14.5a4
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@akaKuruma, @greysonlalonde, @iris-clawd, @lorenzejay, @mislavivanda
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="09 mai 2026">
|
||||
## v1.14.5a4
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a4)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Funcionalidades
|
||||
- Atualizar listagens de LLM
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir problema de dependência movendo `textual` para `crewai-cli` e adicionando `certifi`
|
||||
|
||||
### Documentação
|
||||
- Atualizar changelog e versão para v1.14.5a3
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@cgoeppinger, @greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="07 mai 2026">
|
||||
## v1.14.5a3
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.5a3)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir o caminho do endpoint de status de /{kickoff_id}/status para /status/{kickoff_id}
|
||||
- Atualizar a dependência gitpython para a versão >=3.1.47 para conformidade de segurança
|
||||
|
||||
### Refatoração
|
||||
- Extrair CLI para o pacote independente crewai-cli
|
||||
|
||||
### Documentação
|
||||
- Atualizar o changelog e a versão para v1.14.5a2
|
||||
|
||||
## Contributors
|
||||
|
||||
@greysonlalonde, @iris-clawd
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="04 mai 2026">
|
||||
## v1.14.5a2
|
||||
|
||||
|
||||
@@ -266,165 +266,7 @@ Nosso flow irá:
|
||||
Vamos criar nosso flow no arquivo `main.py`:
|
||||
|
||||
```python
|
||||
#!/usr/bin/env python
|
||||
import json
|
||||
import os
|
||||
from typing import List, Dict
|
||||
from pydantic import BaseModel, Field
|
||||
from crewai import LLM
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from guide_creator_flow.crews.content_crew.content_crew import ContentCrew
|
||||
|
||||
# Definir nossos modelos para dados estruturados
|
||||
class Section(BaseModel):
|
||||
title: str = Field(description="Title of the section")
|
||||
description: str = Field(description="Brief description of what the section should cover")
|
||||
|
||||
class GuideOutline(BaseModel):
|
||||
title: str = Field(description="Title of the guide")
|
||||
introduction: str = Field(description="Introduction to the topic")
|
||||
target_audience: str = Field(description="Description of the target audience")
|
||||
sections: List[Section] = Field(description="List of sections in the guide")
|
||||
conclusion: str = Field(description="Conclusion or summary of the guide")
|
||||
|
||||
# Definir o estado do nosso flow
|
||||
class GuideCreatorState(BaseModel):
|
||||
topic: str = ""
|
||||
audience_level: str = ""
|
||||
guide_outline: GuideOutline = None
|
||||
sections_content: Dict[str, str] = {}
|
||||
|
||||
class GuideCreatorFlow(Flow[GuideCreatorState]):
|
||||
"""Flow para criar um guia abrangente sobre qualquer tópico"""
|
||||
|
||||
@start()
|
||||
def get_user_input(self):
|
||||
"""Obter entrada do usuário sobre o tópico e público do guia"""
|
||||
print("\n=== Create Your Comprehensive Guide ===\n")
|
||||
|
||||
# Obter entrada do usuário
|
||||
self.state.topic = input("What topic would you like to create a guide for? ")
|
||||
|
||||
# Obter nível do público com validação
|
||||
while True:
|
||||
audience = input("Who is your target audience? (beginner/intermediate/advanced) ").lower()
|
||||
if audience in ["beginner", "intermediate", "advanced"]:
|
||||
self.state.audience_level = audience
|
||||
break
|
||||
print("Please enter 'beginner', 'intermediate', or 'advanced'")
|
||||
|
||||
print(f"\nCreating a guide on {self.state.topic} for {self.state.audience_level} audience...\n")
|
||||
return self.state
|
||||
|
||||
@listen(get_user_input)
|
||||
def create_guide_outline(self, state):
|
||||
"""Criar um esboço estruturado para o guia usando uma chamada direta ao LLM"""
|
||||
print("Creating guide outline...")
|
||||
|
||||
# Inicializar o LLM
|
||||
llm = LLM(model="openai/gpt-4o-mini", response_format=GuideOutline)
|
||||
|
||||
# Criar as mensagens para o esboço
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant designed to output JSON."},
|
||||
{"role": "user", "content": f"""
|
||||
Create a detailed outline for a comprehensive guide on "{state.topic}" for {state.audience_level} level learners.
|
||||
|
||||
The outline should include:
|
||||
1. A compelling title for the guide
|
||||
2. An introduction to the topic
|
||||
3. 4-6 main sections that cover the most important aspects of the topic
|
||||
4. A conclusion or summary
|
||||
|
||||
For each section, provide a clear title and a brief description of what it should cover.
|
||||
"""}
|
||||
]
|
||||
|
||||
# Fazer a chamada ao LLM com formato de resposta JSON
|
||||
response = llm.call(messages=messages)
|
||||
|
||||
# Analisar a resposta JSON
|
||||
outline_dict = json.loads(response)
|
||||
self.state.guide_outline = GuideOutline(**outline_dict)
|
||||
|
||||
# Garantir que o diretório de saída exista antes de salvar
|
||||
os.makedirs("output", exist_ok=True)
|
||||
|
||||
# Salvar o esboço em um arquivo
|
||||
with open("output/guide_outline.json", "w") as f:
|
||||
json.dump(outline_dict, f, indent=2)
|
||||
|
||||
print(f"Guide outline created with {len(self.state.guide_outline.sections)} sections")
|
||||
return self.state.guide_outline
|
||||
|
||||
@listen(create_guide_outline)
|
||||
def write_and_compile_guide(self, outline):
|
||||
"""Escrever todas as seções e compilar o guia"""
|
||||
print("Writing guide sections and compiling...")
|
||||
completed_sections = []
|
||||
|
||||
# Processar seções uma por uma para manter o fluxo de contexto
|
||||
for section in outline.sections:
|
||||
print(f"Processing section: {section.title}")
|
||||
|
||||
# Construir contexto a partir das seções anteriores
|
||||
previous_sections_text = ""
|
||||
if completed_sections:
|
||||
previous_sections_text = "# Previously Written Sections\n\n"
|
||||
for title in completed_sections:
|
||||
previous_sections_text += f"## {title}\n\n"
|
||||
previous_sections_text += self.state.sections_content.get(title, "") + "\n\n"
|
||||
else:
|
||||
previous_sections_text = "No previous sections written yet."
|
||||
|
||||
# Executar a crew de conteúdo para esta seção
|
||||
result = ContentCrew().crew().kickoff(inputs={
|
||||
"section_title": section.title,
|
||||
"section_description": section.description,
|
||||
"audience_level": self.state.audience_level,
|
||||
"previous_sections": previous_sections_text,
|
||||
"draft_content": ""
|
||||
})
|
||||
|
||||
# Armazenar o conteúdo
|
||||
self.state.sections_content[section.title] = result.raw
|
||||
completed_sections.append(section.title)
|
||||
print(f"Section completed: {section.title}")
|
||||
|
||||
# Compilar o guia final
|
||||
guide_content = f"# {outline.title}\n\n"
|
||||
guide_content += f"## Introduction\n\n{outline.introduction}\n\n"
|
||||
|
||||
# Adicionar cada seção em ordem
|
||||
for section in outline.sections:
|
||||
section_content = self.state.sections_content.get(section.title, "")
|
||||
guide_content += f"\n\n{section_content}\n\n"
|
||||
|
||||
# Adicionar conclusão
|
||||
guide_content += f"## Conclusion\n\n{outline.conclusion}\n\n"
|
||||
|
||||
# Salvar o guia
|
||||
with open("output/complete_guide.md", "w") as f:
|
||||
f.write(guide_content)
|
||||
|
||||
print("\nComplete guide compiled and saved to output/complete_guide.md")
|
||||
return "Guide creation completed successfully"
|
||||
|
||||
def kickoff():
|
||||
"""Executar o flow criador de guias"""
|
||||
GuideCreatorFlow().kickoff()
|
||||
print("\n=== Flow Complete ===")
|
||||
print("Your comprehensive guide is ready in the output directory.")
|
||||
print("Open output/complete_guide.md to view it.")
|
||||
|
||||
def plot():
|
||||
"""Gerar uma visualização do flow"""
|
||||
flow = GuideCreatorFlow()
|
||||
flow.plot("guide_creator_flow")
|
||||
print("Flow visualization saved to guide_creator_flow.html")
|
||||
|
||||
if __name__ == "__main__":
|
||||
kickoff()
|
||||
# [CÓDIGO NÃO TRADUZIDO, MANTER COMO ESTÁ]
|
||||
```
|
||||
|
||||
Vamos analisar o que está acontecendo neste flow:
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
---
|
||||
title: "Migrando de inputs.id para restore_from_state_id"
|
||||
description: "Mover fluxos @persist da hidratação obsoleta inputs.id para o campo suportado restore_from_state_id"
|
||||
icon: "arrow-right-arrow-left"
|
||||
---
|
||||
|
||||
<Warning>
|
||||
Passar `id` dentro de `inputs` para hidratar um fluxo `@persist` é **obsoleto** e
|
||||
programado para remoção em uma versão futura. A substituição, `restore_from_state_id`,
|
||||
está disponível no CrewAI **v1.14.5 e posterior** — os passos abaixo se aplicam uma vez que você
|
||||
faça a atualização.
|
||||
</Warning>
|
||||
|
||||
## Visão Geral
|
||||
|
||||
A maneira documentada de hidratar um fluxo `@persist` de uma execução anterior é passar
|
||||
o UUID dessa execução como `inputs.id`. O CrewAI agora expõe um campo dedicado,
|
||||
`restore_from_state_id`, que realiza a mesma hidratação sem sobrecarregar a
|
||||
carga útil de `inputs` — e sem acoplar a chave de hidratação à identidade da nova execução.
|
||||
|
||||
## Migração
|
||||
|
||||
Se você atualmente inicia um fluxo `@persist` com `inputs={"id": ...}`:
|
||||
|
||||
```python
|
||||
# Obsoleto
|
||||
flow = CounterFlow()
|
||||
flow.kickoff(inputs={"id": "abcd1234-5678-90ef-ghij-klmnopqrstuv"})
|
||||
```
|
||||
|
||||
Mude para `restore_from_state_id`:
|
||||
|
||||
```python
|
||||
# Suportado
|
||||
flow = CounterFlow()
|
||||
flow.kickoff(restore_from_state_id="abcd1234-5678-90ef-ghij-klmnopqrstuv")
|
||||
```
|
||||
|
||||
Os dois modos têm semânticas de linhagem diferentes:
|
||||
|
||||
- `inputs={"id": <uuid>}` (obsoleto) — **retomar**: as gravações são feitas sob o id fornecido,
|
||||
estendendo a mesma história de `flow_uuid`.
|
||||
- `restore_from_state_id=<uuid>` — **dividir**: hidrata o estado a partir de um snapshot, então
|
||||
grava sob um novo `state.id`. A história do fluxo de origem é preservada.
|
||||
|
||||
Para a maioria dos cenários de produção — reexecutar um fluxo hidratado de um estado anterior — criar um fork
|
||||
é o que você deseja. Veja [Dominando o Estado do Fluxo](/pt-BR/guides/flows/mastering-flow-state)
|
||||
para o modelo mental completo.
|
||||
|
||||
Se você iniciar seu fluxo pela API REST do CrewAI AMP, veja [AMP](#amp) abaixo para a
|
||||
migração equivalente da carga útil.
|
||||
|
||||
## Por que estamos descontinuando `inputs.id` para `@persist`?
|
||||
|
||||
`inputs.id` é atualmente a maneira documentada de retomar um fluxo `@persist` de uma
|
||||
execução anterior. O problema é que o mesmo UUID faz duas funções ao mesmo tempo:
|
||||
|
||||
1. **Seleciona qual snapshot o `@persist` usa para hidratar** — carrega o estado salvo
|
||||
sob aquele UUID.
|
||||
2. **Torna-se o ID de Execução do Fluxo da nova execução** (`state.id` no SDK;
|
||||
apresentado como `flow_id` em alguns contextos) — cada gravação `@persist` a partir desta
|
||||
inicialização também cai sob aquele mesmo UUID.
|
||||
|
||||
Esse papel duplo é a causa raiz dos problemas que este guia descreve. Como o
|
||||
UUID fornecido também é o id da nova execução, duas inicializações que passam o mesmo
|
||||
`inputs.id` não são duas execuções distintas — elas compartilham um id, compartilham um registro
|
||||
de persistência e (no AMP) compartilham uma linha na lista de execuções. Não há como dizer
|
||||
"hidratar a partir deste snapshot, mas registrar esta execução separadamente" sem dividir as
|
||||
duas responsabilidades.
|
||||
|
||||
`restore_from_state_id` é essa divisão. Ele informa ao `@persist` de qual snapshot hidratar,
|
||||
enquanto deixa a nova execução livre para receber um novo `state.id`. A
|
||||
fonte de hidratação e a execução registrada não são mais o mesmo UUID — que é o que
|
||||
a maioria dos cenários de produção realmente deseja.
|
||||
|
||||
## Cronograma de remoção
|
||||
|
||||
`inputs.id` para hidratação `@persist` está programado para remoção em uma versão futura do
|
||||
CrewAI. Não há um corte imediato — fluxos existentes continuam a funcionar — mas
|
||||
uma vez que você atualize para v1.14.5 ou posterior, novo código deve usar `restore_from_state_id`, e
|
||||
fluxos existentes devem migrar na próxima oportunidade conveniente.
|
||||
|
||||
## AMP
|
||||
|
||||
Se você implantar seu fluxo no CrewAI AMP, a migração se estende à carga útil de inicialização
|
||||
enviada para sua Crew implantada, e os sintomas visíveis de reutilização de `inputs.id` aparecem
|
||||
no painel de controle de implantação. As duas subseções abaixo cobrem ambos.
|
||||
|
||||
### Migrando a carga útil de inicialização
|
||||
|
||||
Se você atualmente inicia um fluxo implantado incorporando `id` em `inputs`:
|
||||
|
||||
```bash
|
||||
# Obsoleto
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_CREW_TOKEN" \
|
||||
-d '{"inputs": {"id": "abcd1234-5678-90ef-ghij-klmnopqrstuv", "topic": "AI Agent Frameworks"}}' \
|
||||
https://your-crew-url.crewai.com/kickoff
|
||||
```
|
||||
|
||||
Mova o UUID para o campo `restoreFromStateId` de nível superior:
|
||||
|
||||
```bash
|
||||
# Suportado
|
||||
curl -X POST \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer YOUR_CREW_TOKEN" \
|
||||
-d '{
|
||||
"inputs": {"topic": "AI Agent Frameworks"},
|
||||
"restoreFromStateId": "abcd1234-5678-90ef-ghij-klmnopqrstuv"
|
||||
}' \
|
||||
https://your-crew-url.crewai.com/kickoff
|
||||
```
|
||||
|
||||
`restoreFromStateId` fica ao lado de `inputs` na carga útil de inicialização, não dentro dela. O
|
||||
objeto `inputs` agora carrega apenas valores que seu fluxo realmente consome.
|
||||
|
||||
### O que acontece quando `inputs.id` é reutilizado
|
||||
|
||||
Quando o AMP recebe um kickoff para um fluxo cujo `inputs.id` corresponde a uma execução
|
||||
existente, ele resolve para o registro existente em vez de criar um novo. A partir
|
||||
do painel de controle de implantação, você verá:
|
||||
|
||||
- **Status da execução** — o status da nova execução sobrescreve o status da execução anterior. Uma
|
||||
execução finalizada pode voltar para `running`, ou uma execução `completed` pode mudar para
|
||||
`error` se a nova inicialização falhar — de qualquer forma, o painel não reflete mais
|
||||
a execução original.
|
||||
- **Rastros** — Os OTel traces se acumulam entre as inicializações porque compartilham o mesmo
|
||||
id de execução; os traces da execução anterior são substituídos ou misturados
|
||||
com os da nova execução. Uma reprodução passo a passo não corresponde mais a uma única execução.
|
||||
- **Lista de execuções** — kickoffs que deveriam aparecer como linhas separadas colapsam em
|
||||
uma única entrada, ocultando o histórico.
|
||||
|
||||
Migrar para `restoreFromStateId` mantém cada kickoff como sua própria execução — com
|
||||
seu próprio status, traces e entrada na lista — enquanto ainda hidrata o estado de uma
|
||||
execução anterior.
|
||||
|
||||
<Card title="Precisa de Ajuda?" icon="headset" href="mailto:support@crewai.com">
|
||||
Entre em contato com nossa equipe de suporte se você não tiver certeza de qual modo seu fluxo precisa ou se encontrar problemas
|
||||
durante a migração.
|
||||
</Card>
|
||||
@@ -1,190 +0,0 @@
|
||||
---
|
||||
title: "Atualizando o CrewAI"
|
||||
description: "Como atualizar o CrewAI no seu projeto e adaptar-se a breaking changes entre versões."
|
||||
icon: "arrow-up-circle"
|
||||
---
|
||||
|
||||
## Visão Geral
|
||||
|
||||
Os lançamentos do CrewAI trazem novos recursos regularmente. Este guia mostra os passos práticos para manter sua instalação atualizada — tanto a CLI quanto o ambiente virtual do seu projeto.
|
||||
|
||||
Se você está começando do zero, veja [Instalação](/pt-BR/installation). Se está vindo de outro framework, veja [Migrando do LangGraph](/pt-BR/guides/migration/migrating-from-langgraph).
|
||||
|
||||
---
|
||||
|
||||
## As Duas Coisas Que Você Pode Querer Atualizar
|
||||
|
||||
O CrewAI vive em dois lugares na sua máquina, e cada um se atualiza de forma independente:
|
||||
|
||||
| O quê | Como é instalado | Como atualizar |
|
||||
|---|---|---|
|
||||
| A **CLI global `crewai`** | `uv tool install crewai` | `uv tool install crewai --upgrade` |
|
||||
| O **venv do projeto** (onde seu código roda) | `crewai install` / `uv sync` | `uv add "crewai[...]>=X.Y.Z"` e depois `crewai install` |
|
||||
|
||||
Esses dois podem — e frequentemente ficam — fora de sincronia. Rodar `crewai --version` mostra a versão da CLI. Rodar `uv pip show crewai` dentro do seu projeto mostra a versão do venv. Se forem diferentes, isso é normal; o que importa para o código em execução é a versão do venv.
|
||||
|
||||
## Por Que `crewai install` Sozinho Não Atualiza
|
||||
|
||||
`crewai install` é um wrapper fino em torno de `uv sync`. Ele instala exatamente o que o arquivo `uv.lock` atual diz — ele **não** muda nenhuma restrição de versão.
|
||||
|
||||
Se seu `pyproject.toml` diz `crewai>=1.11.1` e o lock file resolveu para `1.11.1`, executar `crewai install` vai te manter em `1.11.1` para sempre, mesmo que `1.14.4` esteja disponível.
|
||||
|
||||
Para realmente atualizar, você precisa:
|
||||
|
||||
1. Atualizar a restrição de versão em `pyproject.toml`
|
||||
2. Re-resolver o lock file
|
||||
3. Sincronizar o venv
|
||||
|
||||
`uv add` faz os três de uma vez só.
|
||||
|
||||
## Como Atualizar Seu Projeto
|
||||
|
||||
```bash
|
||||
# Aumenta a restrição e re-resolve o lock em um único comando
|
||||
uv add "crewai[tools]>=1.14.4"
|
||||
|
||||
# Sincroniza o venv (crewai install chama uv sync por baixo dos panos)
|
||||
crewai install
|
||||
|
||||
# Verifica
|
||||
uv pip show crewai
|
||||
# → Version: 1.14.4
|
||||
```
|
||||
|
||||
Substitua `[tools]` por quaisquer extras que seu projeto utilize (ex.: `[tools,anthropic]`). Verifique a lista de `dependencies` do seu `pyproject.toml` se estiver em dúvida.
|
||||
|
||||
<Note>
|
||||
`uv add` atualiza tanto `pyproject.toml` **quanto** `uv.lock` atomicamente. Se você editar `pyproject.toml` manualmente, ainda precisa rodar `uv lock --upgrade-package crewai` para re-resolver o lock file antes que `crewai install` pegue a nova versão.
|
||||
</Note>
|
||||
|
||||
## Atualizando a CLI Global
|
||||
|
||||
A CLI global é separada do seu projeto. Atualize com:
|
||||
|
||||
```bash
|
||||
uv tool install crewai --upgrade
|
||||
```
|
||||
|
||||
Se seu shell avisar sobre o `PATH` após a atualização, recarregue-o:
|
||||
|
||||
```bash
|
||||
uv tool update-shell
|
||||
```
|
||||
|
||||
Isso **não** mexe no venv do seu projeto — você ainda precisa de `uv add` + `crewai install` dentro do projeto.
|
||||
|
||||
## Verifique Se Ambos Estão em Sincronia
|
||||
|
||||
```bash
|
||||
# Versão da CLI global
|
||||
crewai --version
|
||||
|
||||
# Versão do venv do projeto
|
||||
uv pip show crewai | grep Version
|
||||
```
|
||||
|
||||
Eles não precisam coincidir — mas a versão do venv do projeto é o que importa para o comportamento em runtime.
|
||||
|
||||
<Note>
|
||||
CrewAI requer `Python >=3.10, <3.14`. Se o `uv` foi instalado contra um interpretador mais antigo, recrie o venv do projeto com uma versão suportada do Python antes de rodar `crewai install`.
|
||||
</Note>
|
||||
|
||||
---
|
||||
|
||||
## Breaking Changes e Notas de Migração
|
||||
|
||||
A maioria das atualizações requer apenas pequenos ajustes. As áreas abaixo são as que quebram silenciosamente ou com tracebacks confusos.
|
||||
|
||||
### Caminhos de import: tools e `BaseTool`
|
||||
|
||||
O caminho canônico para tools é `crewai.tools`. Caminhos antigos ainda aparecem em tutoriais, mas devem ser atualizados.
|
||||
|
||||
```python
|
||||
# Antes
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.agents.tools import tool
|
||||
|
||||
# Depois
|
||||
from crewai.tools import BaseTool, tool
|
||||
```
|
||||
|
||||
O decorador `@tool` e a subclasse `BaseTool` ambos vivem em `crewai.tools`. `AgentFinish` e outros símbolos internos do agente não fazem mais parte da superfície pública — se você os estava importando, mude para event listeners ou callbacks de `Task`.
|
||||
|
||||
### Mudanças de parâmetros em `Agent`
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Find authoritative sources on {topic}",
|
||||
backstory="You are a careful, source-driven researcher.",
|
||||
llm="gpt-4o-mini", # nome do modelo como string OU um objeto LLM
|
||||
verbose=True, # bool, não um nível inteiro
|
||||
max_iter=15, # default mudou entre versões — defina explicitamente
|
||||
allow_delegation=False,
|
||||
)
|
||||
```
|
||||
|
||||
- `llm` aceita tanto um nome de modelo como string (resolvido pelo provedor configurado) quanto um objeto `LLM` para controle granular.
|
||||
- `verbose` é um `bool` puro. Passar um inteiro não alterna mais níveis de log.
|
||||
- Os defaults de `max_iter` mudaram entre releases. Se seu agente para silenciosamente de iterar após a primeira chamada de tool, defina `max_iter` explicitamente.
|
||||
|
||||
### Parâmetros de `Crew`
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential, # ou Process.hierarchical
|
||||
memory=True,
|
||||
cache=True,
|
||||
embedder={"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
)
|
||||
```
|
||||
|
||||
- `process=Process.hierarchical` requer ou `manager_llm=` ou `manager_agent=`. Sem um deles, o kickoff lança erro na validação.
|
||||
- `memory=True` com um provedor de embedding não-default precisa de um dicionário `embedder` — veja [Configuração de memória e embedder](#memory-embedder-config) abaixo.
|
||||
|
||||
### Saída estruturada de `Task`
|
||||
|
||||
Use `output_pydantic`, `output_json` ou `output_file` para forçar o resultado de uma task em um formato tipado:
|
||||
|
||||
```python
|
||||
from pydantic import BaseModel
|
||||
from crewai import Task
|
||||
|
||||
class Article(BaseModel):
|
||||
title: str
|
||||
body: str
|
||||
|
||||
write = Task(
|
||||
description="Write an article about {topic}",
|
||||
expected_output="A short article with a title and body",
|
||||
agent=writer,
|
||||
output_pydantic=Article, # a classe, NÃO uma instância
|
||||
output_file="output/article.md",
|
||||
)
|
||||
```
|
||||
|
||||
`output_pydantic` recebe a **classe** em si. Passar `Article(title="", body="")` é um erro comum e falha com um erro de validação confuso.
|
||||
|
||||
### Configuração de memória e embedder {#memory-embedder-config}
|
||||
|
||||
Se `memory=True` e você não está usando os embeddings padrão da OpenAI, é preciso passar um `embedder`:
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {"model": "nomic-embed-text"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
Defina as credenciais do provedor relevante (`OPENAI_API_KEY`, `OLLAMA_HOST`, etc.) no seu arquivo `.env`. Os caminhos de armazenamento de memória são locais ao projeto por default — apague o diretório de memória do projeto se trocar de embedder, já que dimensões diferentes não se misturam.
|
||||
@@ -13,7 +13,7 @@ The Daytona sandbox tools give CrewAI agents access to isolated, ephemeral compu
|
||||
|
||||
- **`DaytonaExecTool`** — run any shell command inside a sandbox.
|
||||
- **`DaytonaPythonTool`** — execute a block of Python source code inside a sandbox.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox; also supports `move`, `find` (content grep), `search` (filename glob), `chmod` (permissions), `replace` (bulk find-and-replace), and `exists`.
|
||||
- **`DaytonaFileTool`** — read, write, append, list, delete, and inspect files inside a sandbox.
|
||||
|
||||
All three tools share the same sandbox lifecycle controls, so you can mix and match them while keeping state in a single persistent sandbox.
|
||||
|
||||
@@ -55,7 +55,7 @@ from crewai_tools import DaytonaPythonTool
|
||||
tool = DaytonaPythonTool()
|
||||
result = tool.run(code="print(sum(range(10)))")
|
||||
print(result)
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": ExecutionArtifacts(stdout="45\n", charts=[])}
|
||||
# {"exit_code": 0, "result": "45\n", "artifacts": None}
|
||||
```
|
||||
|
||||
### Multi-step shell session (persistent)
|
||||
@@ -63,22 +63,17 @@ print(result)
|
||||
```python Code
|
||||
from crewai_tools import DaytonaExecTool, DaytonaFileTool
|
||||
|
||||
# Create the persistent sandbox via the first tool, then attach the second
|
||||
# tool to it so both share state (installed packages, files, env vars).
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool = DaytonaFileTool(sandbox_id=exec_tool.active_sandbox_id)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
file_tool.run(
|
||||
action="write",
|
||||
path="workspace/script.py",
|
||||
content="import httpx; print(f'httpx loaded, version {httpx.__version__}')",
|
||||
)
|
||||
exec_tool.run(command="python workspace/script.py")
|
||||
# Install a package, then write and run a script — all in the same sandbox
|
||||
exec_tool.run(command="pip install httpx -q")
|
||||
file_tool.run(action="write", path="/workspace/fetch.py", content="import httpx; print(httpx.get('https://httpbin.org/get').status_code)")
|
||||
exec_tool.run(command="python /workspace/fetch.py")
|
||||
```
|
||||
|
||||
<Note>
|
||||
By default, each tool with `persistent=True` lazily creates its **own** sandbox on first use. The pattern above shares a single sandbox across multiple tools by reading the first tool's `active_sandbox_id` after a `.run()` call and passing it to the others via `sandbox_id=...`. With `persistent=False` (the default), every `.run()` call gets a fresh sandbox that's deleted at the end of that call.
|
||||
Each tool instance maintains its own persistent sandbox. To share **one** sandbox across two tools, create the first tool, grab its sandbox id via `tool._persistent_sandbox.id`, and pass it to the second tool via `sandbox_id=...`.
|
||||
</Note>
|
||||
|
||||
### Attach to an existing sandbox
|
||||
@@ -87,7 +82,7 @@ By default, each tool with `persistent=True` lazily creates its **own** sandbox
|
||||
from crewai_tools import DaytonaExecTool
|
||||
|
||||
tool = DaytonaExecTool(sandbox_id="my-long-lived-sandbox")
|
||||
result = tool.run(command="ls workspace")
|
||||
result = tool.run(command="ls /workspace")
|
||||
```
|
||||
|
||||
### Custom sandbox parameters
|
||||
@@ -107,41 +102,6 @@ tool = DaytonaExecTool(
|
||||
)
|
||||
```
|
||||
|
||||
### Searching, moving, and modifying files
|
||||
|
||||
```python Code
|
||||
from crewai_tools import DaytonaFileTool
|
||||
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
# Find every TODO in the source tree (grep file contents recursively)
|
||||
file_tool.run(action="find", path="workspace/src", pattern="TODO:")
|
||||
|
||||
# Find all Python files (glob match on filenames)
|
||||
file_tool.run(action="search", path="workspace", pattern="*.py")
|
||||
|
||||
# Make a script executable
|
||||
file_tool.run(action="chmod", path="workspace/run.sh", mode="755")
|
||||
|
||||
# Rename or move a file
|
||||
file_tool.run(
|
||||
action="move",
|
||||
path="workspace/draft.md",
|
||||
destination="workspace/final.md",
|
||||
)
|
||||
|
||||
# Bulk find-and-replace across multiple files
|
||||
file_tool.run(
|
||||
action="replace",
|
||||
paths=["workspace/src/a.py", "workspace/src/b.py"],
|
||||
pattern="old_function",
|
||||
replacement="new_function",
|
||||
)
|
||||
|
||||
# Quick existence check before a destructive op
|
||||
file_tool.run(action="exists", path="workspace/cache.db")
|
||||
```
|
||||
|
||||
### Agent integration
|
||||
|
||||
```python Code
|
||||
@@ -161,7 +121,7 @@ coder = Agent(
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to workspace/fib.py, and run it.",
|
||||
description="Write a Python script that prints the first 10 Fibonacci numbers, save it to /workspace/fib.py, and run it.",
|
||||
expected_output="The first 10 Fibonacci numbers printed to stdout.",
|
||||
agent=coder,
|
||||
)
|
||||
@@ -208,22 +168,12 @@ All three tools accept these parameters at initialization:
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
|-----------|------|----------|-------------|
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`, `exists`, `move`, `find`, `search`, `chmod`, `replace`. |
|
||||
| `path` | `str \| None` | ✓ for all actions except `replace` | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | ✓ for `append` | Content to write or append. |
|
||||
| `action` | `str` | ✓ | One of: `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`. |
|
||||
| `path` | `str` | ✓ | Absolute path inside the sandbox. |
|
||||
| `content` | `str \| None` | | Content to write or append. Required for `append`. |
|
||||
| `binary` | `bool` | | If `True`, `content` is base64 on write; returns base64 on read. |
|
||||
| `recursive` | `bool` | | For `delete`: remove directories recursively. |
|
||||
| `mode` | `str \| None` | | For `mkdir`: octal permissions for the new directory (defaults to `"0755"`). For `chmod`: octal permissions to apply to the target. |
|
||||
| `destination` | `str \| None` | ✓ for `move` | Destination path for `move`. |
|
||||
| `pattern` | `str \| None` | ✓ for `find`, `search`, `replace` | For `find`: substring matched against file CONTENTS. For `search`: glob matched against file NAMES (e.g. `*.py`). For `replace`: text to replace inside files. |
|
||||
| `replacement` | `str \| None` | ✓ for `replace` | Replacement text for `pattern`. |
|
||||
| `paths` | `list[str] \| None` | ✓ for `replace` | List of file paths in which to replace text. |
|
||||
| `owner` | `str \| None` | | For `chmod`: new file owner. |
|
||||
| `group` | `str \| None` | | For `chmod`: new file group. |
|
||||
|
||||
<Note>
|
||||
For `chmod`, pass at least one of `mode`, `owner`, or `group` — any field left as `None` is left unchanged on the target.
|
||||
</Note>
|
||||
| `mode` | `str` | | For `mkdir`: octal permission string (default `"0755"`). |
|
||||
|
||||
<Tip>
|
||||
For files larger than a few KB, create the file first with `action="write"` and empty content, then send the body via multiple `action="append"` calls of ~4 KB each to stay within tool-call payload limits.
|
||||
|
||||
@@ -8,7 +8,7 @@ authors = [
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"crewai-core==1.14.5a5",
|
||||
"crewai-core>=1.14.5a2",
|
||||
"click~=8.1.7",
|
||||
"pydantic>=2.11.9,<2.13",
|
||||
"pydantic-settings~=2.10.1",
|
||||
@@ -22,8 +22,6 @@ dependencies = [
|
||||
"packaging>=23.0",
|
||||
"python-dotenv>=1.2.2,<2",
|
||||
"uv~=0.11.6",
|
||||
"textual>=7.5.0",
|
||||
"certifi",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "1.14.5a5"
|
||||
__version__ = "1.14.5a2"
|
||||
|
||||
@@ -132,44 +132,19 @@ PROVIDERS: list[str] = [
|
||||
|
||||
MODELS: dict[str, list[str]] = {
|
||||
"openai": [
|
||||
"gpt-5.5",
|
||||
"gpt-5.5-pro",
|
||||
"gpt-5.4",
|
||||
"gpt-5.4-pro",
|
||||
"gpt-5.4-mini",
|
||||
"gpt-5.4-nano",
|
||||
"gpt-5.2",
|
||||
"gpt-5.2-pro",
|
||||
"gpt-5.1",
|
||||
"gpt-5",
|
||||
"gpt-5-pro",
|
||||
"gpt-5-mini",
|
||||
"gpt-5-nano",
|
||||
"gpt-4.1",
|
||||
"gpt-4.1-mini",
|
||||
"gpt-4.1-nano",
|
||||
"gpt-4o",
|
||||
"gpt-4o-mini",
|
||||
"o4-mini",
|
||||
"o3",
|
||||
"o3-mini",
|
||||
"o1",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
"gpt-4",
|
||||
"gpt-4.1",
|
||||
"gpt-4.1-mini-2025-04-14",
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
"gpt-4o",
|
||||
"gpt-4o-mini",
|
||||
"o1-mini",
|
||||
"o1-preview",
|
||||
],
|
||||
"anthropic": [
|
||||
"claude-opus-4-6",
|
||||
"claude-sonnet-4-6",
|
||||
"claude-haiku-4-5-20251001",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
],
|
||||
"gemini": [
|
||||
|
||||
@@ -1 +1 @@
|
||||
__version__ = "1.14.5a5"
|
||||
__version__ = "1.14.5a2"
|
||||
|
||||
@@ -152,4 +152,4 @@ __all__ = [
|
||||
"wrap_file_source",
|
||||
]
|
||||
|
||||
__version__ = "1.14.5a5"
|
||||
__version__ = "1.14.5a2"
|
||||
|
||||
@@ -10,7 +10,7 @@ requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests>=2.33.0,<3",
|
||||
"crewai==1.14.5a5",
|
||||
"crewai==1.14.5a2",
|
||||
"tiktoken>=0.8.0,<0.13",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
"python-docx~=1.2.0",
|
||||
@@ -107,7 +107,7 @@ stagehand = [
|
||||
"stagehand>=0.4.1",
|
||||
]
|
||||
github = [
|
||||
"gitpython>=3.1.50,<4",
|
||||
"gitpython>=3.1.47,<4",
|
||||
"PyGithub==1.59.1",
|
||||
]
|
||||
rag = [
|
||||
|
||||
@@ -330,4 +330,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.14.5a5"
|
||||
__version__ = "1.14.5a2"
|
||||
|
||||
@@ -55,11 +55,10 @@ from crewai_tools import DaytonaExecTool, DaytonaFileTool
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
file_tool = DaytonaFileTool(persistent=True)
|
||||
|
||||
# Agent writes a script, then runs it — but each tool keeps its OWN persistent
|
||||
# sandbox. To share the *same* sandbox across two tools, create and use the
|
||||
# first tool, then read its `active_sandbox_id` and pass it to the second:
|
||||
# exec_tool.run(command="pip install httpx")
|
||||
# file_tool = DaytonaFileTool(sandbox_id=exec_tool.active_sandbox_id)
|
||||
# Agent writes a script, then runs it — both share the same sandbox instance
|
||||
# because they each keep their own persistent sandbox. If you need the *same*
|
||||
# sandbox across two tools, create one tool, grab the sandbox id via
|
||||
# `tool._persistent_sandbox.id`, and pass it to the other via `sandbox_id=...`.
|
||||
```
|
||||
|
||||
### Attach to an existing sandbox
|
||||
@@ -100,14 +99,9 @@ tool = DaytonaExecTool(
|
||||
- `timeout: int | None` — seconds.
|
||||
|
||||
### `DaytonaFileTool`
|
||||
- `action`: one of `read`, `write`, `append`, `list`, `delete`, `mkdir`, `info`, `exists`, `move`, `find`, `search`, `chmod`, `replace`.
|
||||
- `path: str | None` — absolute path inside the sandbox. Required for all actions except `replace`.
|
||||
- `content: str | None` — required for `append`; optional for `write`.
|
||||
- `action: "read" | "write" | "list" | "delete" | "mkdir" | "info"`
|
||||
- `path: str` — absolute path inside the sandbox.
|
||||
- `content: str | None` — required for `write`.
|
||||
- `binary: bool` — if `True`, `content` is base64 on write / returned as base64 on read.
|
||||
- `recursive: bool` — for `delete`, removes directories recursively.
|
||||
- `mode: str | None` — for `mkdir` (defaults to `"0755"`) or for `chmod` (e.g. `"755"`).
|
||||
- `destination: str | None` — required for `move`.
|
||||
- `pattern: str | None` — required for `find` (content grep), `search` (filename glob), and `replace`.
|
||||
- `replacement: str | None` — required for `replace`.
|
||||
- `paths: list[str] | None` — required for `replace`; list of files to operate on.
|
||||
- `owner: str | None` / `group: str | None` — for `chmod`. Pass at least one of `mode`, `owner`, or `group`.
|
||||
- `mode: str` — for `mkdir`, octal permission string (default `"0755"`).
|
||||
|
||||
@@ -196,27 +196,3 @@ class DaytonaBaseTool(BaseTool):
|
||||
"the sandbox may need manual deletion.",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
@property
|
||||
def active_sandbox_id(self) -> str | None:
|
||||
"""The id of the sandbox this tool is currently bound to, if any.
|
||||
|
||||
Returns:
|
||||
- the explicitly attached `sandbox_id`, if set at construction;
|
||||
- the id of the lazily-created persistent sandbox, once a call has
|
||||
triggered creation;
|
||||
- None for ephemeral mode (where no sandbox lives between calls).
|
||||
|
||||
Use this to share one sandbox across multiple tool instances:
|
||||
|
||||
exec_tool = DaytonaExecTool(persistent=True)
|
||||
exec_tool.run(command="pip install httpx")
|
||||
file_tool = DaytonaFileTool(sandbox_id=exec_tool.active_sandbox_id)
|
||||
"""
|
||||
if self.sandbox_id:
|
||||
return self.sandbox_id
|
||||
with self._lock:
|
||||
sandbox = self._persistent_sandbox
|
||||
if sandbox is None:
|
||||
return None
|
||||
return getattr(sandbox, "id", None)
|
||||
|
||||
@@ -4,9 +4,7 @@ import base64
|
||||
from builtins import type as type_
|
||||
import logging
|
||||
import posixpath
|
||||
import shlex
|
||||
from typing import Any, Literal
|
||||
import uuid
|
||||
|
||||
from pydantic import BaseModel, Field, model_validator
|
||||
|
||||
@@ -16,110 +14,22 @@ from crewai_tools.tools.daytona_sandbox_tool.daytona_base_tool import DaytonaBas
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
FileAction = Literal[
|
||||
"read",
|
||||
"write",
|
||||
"append",
|
||||
"list",
|
||||
"delete",
|
||||
"mkdir",
|
||||
"info",
|
||||
"exists",
|
||||
"move",
|
||||
"find",
|
||||
"search",
|
||||
"chmod",
|
||||
"replace",
|
||||
]
|
||||
|
||||
|
||||
def _daytona_file_schema_extra(schema: dict[str, Any]) -> None:
|
||||
schema["allOf"] = [
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"enum": [
|
||||
"read",
|
||||
"write",
|
||||
"append",
|
||||
"list",
|
||||
"delete",
|
||||
"mkdir",
|
||||
"info",
|
||||
"exists",
|
||||
"move",
|
||||
"find",
|
||||
"search",
|
||||
"chmod",
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {"required": ["path"]},
|
||||
},
|
||||
{
|
||||
"if": {"properties": {"action": {"const": "append"}}},
|
||||
"then": {"required": ["content"]},
|
||||
},
|
||||
{
|
||||
"if": {"properties": {"action": {"const": "move"}}},
|
||||
"then": {"required": ["destination"]},
|
||||
},
|
||||
{
|
||||
"if": {"properties": {"action": {"enum": ["find", "search"]}}},
|
||||
"then": {"required": ["pattern"]},
|
||||
},
|
||||
{
|
||||
"if": {"properties": {"action": {"const": "replace"}}},
|
||||
"then": {"required": ["paths", "pattern", "replacement"]},
|
||||
},
|
||||
{
|
||||
"if": {"properties": {"action": {"const": "chmod"}}},
|
||||
"then": {
|
||||
"anyOf": [
|
||||
{"required": ["mode"]},
|
||||
{"required": ["owner"]},
|
||||
{"required": ["group"]},
|
||||
]
|
||||
},
|
||||
},
|
||||
]
|
||||
FileAction = Literal["read", "write", "append", "list", "delete", "mkdir", "info"]
|
||||
|
||||
|
||||
class DaytonaFileToolSchema(BaseModel):
|
||||
model_config = {"json_schema_extra": _daytona_file_schema_extra}
|
||||
|
||||
action: FileAction = Field(
|
||||
...,
|
||||
description=(
|
||||
"The filesystem action to perform: "
|
||||
"'read' (returns file contents); "
|
||||
"'write' (create or replace a file with content); "
|
||||
"'append' (append content to an existing file — use this for "
|
||||
"writing large files in chunks to avoid hitting tool-call size "
|
||||
"limits); "
|
||||
"'list' (lists a directory); "
|
||||
"'delete' (removes a file/dir); "
|
||||
"'mkdir' (creates a directory); "
|
||||
"'info' (returns file metadata); "
|
||||
"'exists' (returns whether a path exists); "
|
||||
"'move' (rename or relocate a file/dir; requires 'destination'); "
|
||||
"'find' (grep file CONTENTS recursively; requires 'pattern'); "
|
||||
"'search' (find files by NAME pattern; requires 'pattern'); "
|
||||
"'chmod' (change permissions/owner/group; pass at least one of "
|
||||
"'mode', 'owner', 'group'); "
|
||||
"'replace' (find-and-replace text across files; requires "
|
||||
"'paths', 'pattern', and 'replacement')."
|
||||
),
|
||||
)
|
||||
path: str | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"Absolute path inside the sandbox. Required for all actions "
|
||||
"except 'replace' (which uses 'paths' instead)."
|
||||
"The filesystem action to perform: 'read' (returns file contents), "
|
||||
"'write' (create or replace a file with content), 'append' (append "
|
||||
"content to an existing file — use this for writing large files in "
|
||||
"chunks to avoid hitting tool-call size limits), 'list' (lists a "
|
||||
"directory), 'delete' (removes a file/dir), 'mkdir' (creates a "
|
||||
"directory), 'info' (returns file metadata)."
|
||||
),
|
||||
)
|
||||
path: str = Field(..., description="Absolute path inside the sandbox.")
|
||||
content: str | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
@@ -140,78 +50,18 @@ class DaytonaFileToolSchema(BaseModel):
|
||||
default=False,
|
||||
description="For action='delete': remove directories recursively.",
|
||||
)
|
||||
mode: str | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"Octal permission string. For 'mkdir' it sets the new directory "
|
||||
"permissions (defaults to '0755' if omitted). For 'chmod' it sets "
|
||||
"the target's mode (e.g. '755' to make a script executable). "
|
||||
"Ignored for other actions."
|
||||
),
|
||||
)
|
||||
destination: str | None = Field(
|
||||
default=None,
|
||||
description="For action='move': absolute destination path.",
|
||||
)
|
||||
pattern: str | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"For 'find': substring matched against file CONTENTS. "
|
||||
"For 'search': glob-style pattern matched against file NAMES "
|
||||
"(e.g. '*.py'). "
|
||||
"For 'replace': text to replace inside files."
|
||||
),
|
||||
)
|
||||
replacement: str | None = Field(
|
||||
default=None,
|
||||
description="For action='replace': replacement text for 'pattern'.",
|
||||
)
|
||||
paths: list[str] | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"For action='replace': list of absolute file paths in which to "
|
||||
"replace 'pattern' with 'replacement'."
|
||||
),
|
||||
)
|
||||
owner: str | None = Field(
|
||||
default=None,
|
||||
description="For action='chmod': new file owner (user name).",
|
||||
)
|
||||
group: str | None = Field(
|
||||
default=None,
|
||||
description="For action='chmod': new file group.",
|
||||
mode: str = Field(
|
||||
default="0755",
|
||||
description="For action='mkdir': octal permission string (default 0755).",
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _validate_action_args(self) -> DaytonaFileToolSchema:
|
||||
if self.action != "replace" and not self.path:
|
||||
raise ValueError(f"action={self.action!r} requires 'path'.")
|
||||
if self.action == "append" and self.content is None:
|
||||
raise ValueError(
|
||||
"action='append' requires 'content'. Pass the chunk to append "
|
||||
"in the 'content' field."
|
||||
)
|
||||
if self.action == "move" and not self.destination:
|
||||
raise ValueError("action='move' requires 'destination'.")
|
||||
if self.action == "find" and not self.pattern:
|
||||
raise ValueError(
|
||||
"action='find' requires 'pattern' (text to search for inside files)."
|
||||
)
|
||||
if self.action == "search" and not self.pattern:
|
||||
raise ValueError("action='search' requires 'pattern' (glob, e.g. '*.py').")
|
||||
if self.action == "chmod" and not (self.mode or self.owner or self.group):
|
||||
raise ValueError(
|
||||
"action='chmod' requires at least one of 'mode', 'owner', or 'group'."
|
||||
)
|
||||
if self.action == "replace":
|
||||
if not self.paths:
|
||||
raise ValueError(
|
||||
"action='replace' requires 'paths' (list of file paths)."
|
||||
)
|
||||
if not self.pattern:
|
||||
raise ValueError("action='replace' requires 'pattern'.")
|
||||
if self.replacement is None:
|
||||
raise ValueError("action='replace' requires 'replacement'.")
|
||||
return self
|
||||
|
||||
|
||||
@@ -225,10 +75,9 @@ class DaytonaFileTool(DaytonaBaseTool):
|
||||
|
||||
name: str = "Daytona Sandbox Files"
|
||||
description: str = (
|
||||
"Perform filesystem operations inside a Daytona sandbox: read, "
|
||||
"write, append, list, delete, mkdir, info, exists, move, find "
|
||||
"(content grep), search (filename glob), chmod (permissions/owner/"
|
||||
"group), and replace (bulk find-and-replace across files). "
|
||||
"Perform filesystem operations inside a Daytona sandbox: read a file, "
|
||||
"write content to a path, append content to an existing file, list a "
|
||||
"directory, delete a path, make a directory, or fetch file metadata. "
|
||||
"For files larger than a few KB, create the file with action='write' "
|
||||
"and empty content, then send the body via multiple 'append' calls of "
|
||||
"~4KB each to stay within tool-call payload limits."
|
||||
@@ -238,79 +87,30 @@ class DaytonaFileTool(DaytonaBaseTool):
|
||||
def _run(
|
||||
self,
|
||||
action: FileAction,
|
||||
path: str | None = None,
|
||||
path: str,
|
||||
content: str | None = None,
|
||||
binary: bool = False,
|
||||
recursive: bool = False,
|
||||
mode: str | None = None,
|
||||
destination: str | None = None,
|
||||
pattern: str | None = None,
|
||||
replacement: str | None = None,
|
||||
paths: list[str] | None = None,
|
||||
owner: str | None = None,
|
||||
group: str | None = None,
|
||||
mode: str = "0755",
|
||||
) -> Any:
|
||||
sandbox, should_delete = self._acquire_sandbox()
|
||||
try:
|
||||
if action == "read":
|
||||
if path is None:
|
||||
raise ValueError("action='read' requires 'path'")
|
||||
return self._read(sandbox, path, binary=binary)
|
||||
if action == "write":
|
||||
if path is None:
|
||||
raise ValueError("action='write' requires 'path'")
|
||||
return self._write(sandbox, path, content or "", binary=binary)
|
||||
if action == "append":
|
||||
if path is None:
|
||||
raise ValueError("action='append' requires 'path'")
|
||||
return self._append(sandbox, path, content or "", binary=binary)
|
||||
if action == "list":
|
||||
if path is None:
|
||||
raise ValueError("action='list' requires 'path'")
|
||||
return self._list(sandbox, path)
|
||||
if action == "delete":
|
||||
if path is None:
|
||||
raise ValueError("action='delete' requires 'path'")
|
||||
sandbox.fs.delete_file(path, recursive=recursive)
|
||||
return {"status": "deleted", "path": path}
|
||||
if action == "mkdir":
|
||||
if path is None:
|
||||
raise ValueError("action='mkdir' requires 'path'")
|
||||
mkdir_mode = mode or "0755"
|
||||
sandbox.fs.create_folder(path, mkdir_mode)
|
||||
return {"status": "created", "path": path, "mode": mkdir_mode}
|
||||
sandbox.fs.create_folder(path, mode)
|
||||
return {"status": "created", "path": path, "mode": mode}
|
||||
if action == "info":
|
||||
if path is None:
|
||||
raise ValueError("action='info' requires 'path'")
|
||||
return self._info(sandbox, path)
|
||||
if action == "exists":
|
||||
if path is None:
|
||||
raise ValueError("action='exists' requires 'path'")
|
||||
return self._exists(sandbox, path)
|
||||
if action == "move":
|
||||
if path is None or destination is None:
|
||||
raise ValueError("action='move' requires 'path' and 'destination'")
|
||||
sandbox.fs.move_files(path, destination)
|
||||
return {"status": "moved", "from": path, "to": destination}
|
||||
if action == "find":
|
||||
if path is None or pattern is None:
|
||||
raise ValueError("action='find' requires 'path' and 'pattern'")
|
||||
return self._find(sandbox, path, pattern)
|
||||
if action == "search":
|
||||
if path is None or pattern is None:
|
||||
raise ValueError("action='search' requires 'path' and 'pattern'")
|
||||
return self._search(sandbox, path, pattern)
|
||||
if action == "chmod":
|
||||
if path is None:
|
||||
raise ValueError("action='chmod' requires 'path'")
|
||||
return self._chmod(sandbox, path, mode=mode, owner=owner, group=group)
|
||||
if action == "replace":
|
||||
if paths is None or pattern is None or replacement is None:
|
||||
raise ValueError(
|
||||
"action='replace' requires 'paths', 'pattern', and "
|
||||
"'replacement'"
|
||||
)
|
||||
return self._replace(sandbox, paths, pattern, replacement)
|
||||
raise ValueError(f"Unknown action: {action}")
|
||||
finally:
|
||||
self._release_sandbox(sandbox, should_delete)
|
||||
@@ -346,46 +146,17 @@ class DaytonaFileTool(DaytonaBaseTool):
|
||||
) -> dict[str, Any]:
|
||||
chunk = base64.b64decode(content) if binary else content.encode("utf-8")
|
||||
self._ensure_parent_dir(sandbox, path)
|
||||
|
||||
# Server-side `cat >>` keeps this O(chunk_size) per call. The naive
|
||||
# download-concat-reupload alternative is O(N^2) in total transfer.
|
||||
# /tmp/ is on the sandbox's ephemeral filesystem, not the host.
|
||||
temp_path = f"/tmp/.crewai-append-{uuid.uuid4().hex}" # noqa: S108
|
||||
sandbox.fs.upload_file(chunk, temp_path)
|
||||
|
||||
quoted_temp = shlex.quote(temp_path)
|
||||
quoted_target = shlex.quote(path)
|
||||
response = sandbox.process.exec(
|
||||
f"cat {quoted_temp} >> {quoted_target}; "
|
||||
f"rc=$?; rm -f {quoted_temp}; exit $rc"
|
||||
)
|
||||
|
||||
exit_code = getattr(response, "exit_code", 0)
|
||||
if exit_code != 0:
|
||||
try:
|
||||
sandbox.fs.delete_file(temp_path)
|
||||
except Exception:
|
||||
logger.debug(
|
||||
"Best-effort temp-file cleanup failed after append "
|
||||
"error; the file may need manual deletion.",
|
||||
exc_info=True,
|
||||
)
|
||||
raise RuntimeError(
|
||||
f"append failed: exit_code={exit_code}, "
|
||||
f"output={getattr(response, 'result', '')!r}"
|
||||
)
|
||||
|
||||
try:
|
||||
info = sandbox.fs.get_file_info(path)
|
||||
total_bytes = getattr(info, "size", None)
|
||||
existing: bytes = sandbox.fs.download_file(path)
|
||||
except Exception:
|
||||
total_bytes = None
|
||||
|
||||
existing = b""
|
||||
payload = existing + chunk
|
||||
sandbox.fs.upload_file(payload, path)
|
||||
return {
|
||||
"status": "appended",
|
||||
"path": path,
|
||||
"appended_bytes": len(chunk),
|
||||
"total_bytes": total_bytes,
|
||||
"total_bytes": len(payload),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
@@ -419,77 +190,6 @@ class DaytonaFileTool(DaytonaBaseTool):
|
||||
def _info(self, sandbox: Any, path: str) -> dict[str, Any]:
|
||||
return self._file_info_to_dict(sandbox.fs.get_file_info(path))
|
||||
|
||||
def _exists(self, sandbox: Any, path: str) -> dict[str, Any]:
|
||||
try:
|
||||
info = sandbox.fs.get_file_info(path)
|
||||
except Exception:
|
||||
return {"path": path, "exists": False}
|
||||
return {
|
||||
"path": path,
|
||||
"exists": True,
|
||||
"is_dir": getattr(info, "is_dir", False),
|
||||
}
|
||||
|
||||
def _find(self, sandbox: Any, path: str, pattern: str) -> dict[str, Any]:
|
||||
matches = sandbox.fs.find_files(path, pattern)
|
||||
return {
|
||||
"path": path,
|
||||
"pattern": pattern,
|
||||
"matches": [
|
||||
{
|
||||
"file": getattr(m, "file", None),
|
||||
"line": getattr(m, "line", None),
|
||||
"content": getattr(m, "content", None),
|
||||
}
|
||||
for m in matches
|
||||
],
|
||||
}
|
||||
|
||||
def _search(self, sandbox: Any, path: str, pattern: str) -> dict[str, Any]:
|
||||
response = sandbox.fs.search_files(path, pattern)
|
||||
files = getattr(response, "files", None) or []
|
||||
return {"path": path, "pattern": pattern, "files": list(files)}
|
||||
|
||||
def _chmod(
|
||||
self,
|
||||
sandbox: Any,
|
||||
path: str,
|
||||
*,
|
||||
mode: str | None,
|
||||
owner: str | None,
|
||||
group: str | None,
|
||||
) -> dict[str, Any]:
|
||||
kwargs: dict[str, str] = {}
|
||||
if mode is not None:
|
||||
kwargs["mode"] = mode
|
||||
if owner is not None:
|
||||
kwargs["owner"] = owner
|
||||
if group is not None:
|
||||
kwargs["group"] = group
|
||||
sandbox.fs.set_file_permissions(path, **kwargs)
|
||||
return {"status": "permissions_set", "path": path, **kwargs}
|
||||
|
||||
def _replace(
|
||||
self,
|
||||
sandbox: Any,
|
||||
paths: list[str],
|
||||
pattern: str,
|
||||
replacement: str,
|
||||
) -> dict[str, Any]:
|
||||
results = sandbox.fs.replace_in_files(paths, pattern, replacement)
|
||||
return {
|
||||
"pattern": pattern,
|
||||
"replacement": replacement,
|
||||
"results": [
|
||||
{
|
||||
"file": getattr(r, "file", None),
|
||||
"success": getattr(r, "success", None),
|
||||
"error": getattr(r, "error", None),
|
||||
}
|
||||
for r in (results or [])
|
||||
],
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _file_info_to_dict(info: Any) -> dict[str, Any]:
|
||||
fields = (
|
||||
|
||||
@@ -7184,7 +7184,7 @@
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "Perform filesystem operations inside a Daytona sandbox: read, write, append, list, delete, mkdir, info, exists, move, find (content grep), search (filename glob), chmod (permissions/owner/group), and replace (bulk find-and-replace across files). For files larger than a few KB, create the file with action='write' and empty content, then send the body via multiple 'append' calls of ~4KB each to stay within tool-call payload limits.",
|
||||
"description": "Perform filesystem operations inside a Daytona sandbox: read a file, write content to a path, append content to an existing file, list a directory, delete a path, make a directory, or fetch file metadata. For files larger than a few KB, create the file with action='write' and empty content, then send the body via multiple 'append' calls of ~4KB each to stay within tool-call payload limits.",
|
||||
"env_vars": [
|
||||
{
|
||||
"default": null,
|
||||
@@ -7334,127 +7334,9 @@
|
||||
"daytona"
|
||||
],
|
||||
"run_params_schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"enum": [
|
||||
"read",
|
||||
"write",
|
||||
"append",
|
||||
"list",
|
||||
"delete",
|
||||
"mkdir",
|
||||
"info",
|
||||
"exists",
|
||||
"move",
|
||||
"find",
|
||||
"search",
|
||||
"chmod"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"required": [
|
||||
"path"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"const": "append"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"required": [
|
||||
"content"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"const": "move"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"required": [
|
||||
"destination"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"enum": [
|
||||
"find",
|
||||
"search"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"required": [
|
||||
"pattern"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"const": "replace"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"required": [
|
||||
"paths",
|
||||
"pattern",
|
||||
"replacement"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"if": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"const": "chmod"
|
||||
}
|
||||
}
|
||||
},
|
||||
"then": {
|
||||
"anyOf": [
|
||||
{
|
||||
"required": [
|
||||
"mode"
|
||||
]
|
||||
},
|
||||
{
|
||||
"required": [
|
||||
"owner"
|
||||
]
|
||||
},
|
||||
{
|
||||
"required": [
|
||||
"group"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"action": {
|
||||
"description": "The filesystem action to perform: 'read' (returns file contents); 'write' (create or replace a file with content); 'append' (append content to an existing file \u2014 use this for writing large files in chunks to avoid hitting tool-call size limits); 'list' (lists a directory); 'delete' (removes a file/dir); 'mkdir' (creates a directory); 'info' (returns file metadata); 'exists' (returns whether a path exists); 'move' (rename or relocate a file/dir; requires 'destination'); 'find' (grep file CONTENTS recursively; requires 'pattern'); 'search' (find files by NAME pattern; requires 'pattern'); 'chmod' (change permissions/owner/group; pass at least one of 'mode', 'owner', 'group'); 'replace' (find-and-replace text across files; requires 'paths', 'pattern', and 'replacement').",
|
||||
"description": "The filesystem action to perform: 'read' (returns file contents), 'write' (create or replace a file with content), 'append' (append content to an existing file \u2014 use this for writing large files in chunks to avoid hitting tool-call size limits), 'list' (lists a directory), 'delete' (removes a file/dir), 'mkdir' (creates a directory), 'info' (returns file metadata).",
|
||||
"enum": [
|
||||
"read",
|
||||
"write",
|
||||
@@ -7462,13 +7344,7 @@
|
||||
"list",
|
||||
"delete",
|
||||
"mkdir",
|
||||
"info",
|
||||
"exists",
|
||||
"move",
|
||||
"find",
|
||||
"search",
|
||||
"chmod",
|
||||
"replace"
|
||||
"info"
|
||||
],
|
||||
"title": "Action",
|
||||
"type": "string"
|
||||
@@ -7492,122 +7368,27 @@
|
||||
"description": "Content to write or append. If omitted for 'write', an empty file is created. For files larger than a few KB, prefer one 'write' with empty content followed by multiple 'append' calls of ~4KB each to stay within tool-call payload limits.",
|
||||
"title": "Content"
|
||||
},
|
||||
"destination": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "For action='move': absolute destination path.",
|
||||
"title": "Destination"
|
||||
},
|
||||
"group": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "For action='chmod': new file group.",
|
||||
"title": "Group"
|
||||
},
|
||||
"mode": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "Octal permission string. For 'mkdir' it sets the new directory permissions (defaults to '0755' if omitted). For 'chmod' it sets the target's mode (e.g. '755' to make a script executable). Ignored for other actions.",
|
||||
"title": "Mode"
|
||||
},
|
||||
"owner": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "For action='chmod': new file owner (user name).",
|
||||
"title": "Owner"
|
||||
"default": "0755",
|
||||
"description": "For action='mkdir': octal permission string (default 0755).",
|
||||
"title": "Mode",
|
||||
"type": "string"
|
||||
},
|
||||
"path": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "Absolute path inside the sandbox. Required for all actions except 'replace' (which uses 'paths' instead).",
|
||||
"title": "Path"
|
||||
},
|
||||
"paths": {
|
||||
"anyOf": [
|
||||
{
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "For action='replace': list of absolute file paths in which to replace 'pattern' with 'replacement'.",
|
||||
"title": "Paths"
|
||||
},
|
||||
"pattern": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "For 'find': substring matched against file CONTENTS. For 'search': glob-style pattern matched against file NAMES (e.g. '*.py'). For 'replace': text to replace inside files.",
|
||||
"title": "Pattern"
|
||||
"description": "Absolute path inside the sandbox.",
|
||||
"title": "Path",
|
||||
"type": "string"
|
||||
},
|
||||
"recursive": {
|
||||
"default": false,
|
||||
"description": "For action='delete': remove directories recursively.",
|
||||
"title": "Recursive",
|
||||
"type": "boolean"
|
||||
},
|
||||
"replacement": {
|
||||
"anyOf": [
|
||||
{
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"type": "null"
|
||||
}
|
||||
],
|
||||
"default": null,
|
||||
"description": "For action='replace': replacement text for 'pattern'.",
|
||||
"title": "Replacement"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action"
|
||||
"action",
|
||||
"path"
|
||||
],
|
||||
"title": "DaytonaFileToolSchema",
|
||||
"type": "object"
|
||||
|
||||
@@ -8,8 +8,8 @@ authors = [
|
||||
]
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"crewai-core==1.14.5a5",
|
||||
"crewai-cli==1.14.5a5",
|
||||
"crewai-core==1.14.5a2",
|
||||
"crewai-cli==1.14.5a2",
|
||||
# Core Dependencies
|
||||
"pydantic>=2.11.9,<2.13",
|
||||
"openai>=2.30.0,<3",
|
||||
@@ -28,6 +28,8 @@ dependencies = [
|
||||
# Authentication and Security
|
||||
"python-dotenv>=1.2.2,<2",
|
||||
"pyjwt>=2.9.0,<3",
|
||||
# TUI
|
||||
"textual>=7.5.0",
|
||||
# Configuration and Utils
|
||||
"click~=8.1.7",
|
||||
"appdirs~=1.4.4",
|
||||
@@ -54,7 +56,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.14.5a5",
|
||||
"crewai-tools==1.14.5a2",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken>=0.8.0,<0.13"
|
||||
@@ -65,7 +67,7 @@ pandas = [
|
||||
openpyxl = [
|
||||
"openpyxl~=3.1.5",
|
||||
]
|
||||
mem0 = ["mem0ai>=2.0.0,<3"]
|
||||
mem0 = ["mem0ai~=0.1.94"]
|
||||
docling = [
|
||||
"docling~=2.84.0",
|
||||
]
|
||||
@@ -105,7 +107,7 @@ a2a = [
|
||||
"aiocache[redis,memcached]~=0.12.3",
|
||||
]
|
||||
file-processing = [
|
||||
"crewai-files==1.14.5a5",
|
||||
"crewai-files",
|
||||
]
|
||||
qdrant-edge = [
|
||||
"qdrant-edge-py>=0.6.0",
|
||||
|
||||
@@ -48,7 +48,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.14.5a5"
|
||||
__version__ = "1.14.5a2"
|
||||
|
||||
_LAZY_IMPORTS: dict[str, tuple[str, str]] = {
|
||||
"Memory": ("crewai.memory.unified_memory", "Memory"),
|
||||
|
||||
@@ -7,7 +7,6 @@ from collections.abc import Callable, Coroutine, Sequence
|
||||
import concurrent.futures
|
||||
import contextvars
|
||||
from datetime import datetime
|
||||
import inspect
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
@@ -36,11 +35,13 @@ from typing_extensions import Self, TypeIs
|
||||
from crewai.agent.planning_config import PlanningConfig
|
||||
from crewai.agent.utils import (
|
||||
ahandle_knowledge_retrieval,
|
||||
append_skill_context,
|
||||
apply_training_data,
|
||||
build_task_prompt_with_schema,
|
||||
format_task_with_context,
|
||||
get_knowledge_config,
|
||||
handle_knowledge_retrieval,
|
||||
handle_reasoning,
|
||||
prepare_tools,
|
||||
process_tool_results,
|
||||
save_last_messages,
|
||||
@@ -149,17 +150,7 @@ def _validate_executor_class(value: Any) -> Any:
|
||||
cls = _EXECUTOR_CLASS_MAP.get(value)
|
||||
if cls is None:
|
||||
raise ValueError(f"Unknown executor class: {value}")
|
||||
value = cls
|
||||
import warnings
|
||||
|
||||
if value is CrewAgentExecutor:
|
||||
warnings.warn(
|
||||
"CrewAgentExecutor is deprecated and will be removed in a future release. "
|
||||
"Agents inside Crews now use AgentExecutor by default. "
|
||||
"Switch to crewai.experimental.AgentExecutor.",
|
||||
DeprecationWarning,
|
||||
stacklevel=3,
|
||||
)
|
||||
return cls
|
||||
return value
|
||||
|
||||
|
||||
@@ -334,8 +325,8 @@ class Agent(BaseAgent):
|
||||
BeforeValidator(_validate_executor_class),
|
||||
PlainSerializer(_serialize_executor_class, return_type=str, when_used="json"),
|
||||
] = Field(
|
||||
default=AgentExecutor,
|
||||
description="Class to use for the agent executor. Defaults to AgentExecutor, can optionally use CrewAgentExecutor.",
|
||||
default=CrewAgentExecutor,
|
||||
description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use AgentExecutor.",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@@ -521,6 +512,8 @@ class Agent(BaseAgent):
|
||||
The task prompt after memory retrieval, ready for knowledge lookup.
|
||||
"""
|
||||
get_env_context()
|
||||
if self.executor_class is not AgentExecutor:
|
||||
handle_reasoning(self, task)
|
||||
|
||||
self._inject_date_to_task(task)
|
||||
|
||||
@@ -548,6 +541,7 @@ class Agent(BaseAgent):
|
||||
Returns:
|
||||
The fully prepared task prompt.
|
||||
"""
|
||||
task_prompt = append_skill_context(self, task_prompt)
|
||||
prepare_tools(self, tools, task)
|
||||
|
||||
return apply_training_data(self, task_prompt)
|
||||
@@ -849,22 +843,18 @@ class Agent(BaseAgent):
|
||||
if not self.agent_executor:
|
||||
raise RuntimeError("Agent executor is not initialized.")
|
||||
|
||||
invoke_result = self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
"ask_for_human_input": task.human_input,
|
||||
}
|
||||
result = cast(
|
||||
dict[str, Any],
|
||||
self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
"ask_for_human_input": task.human_input,
|
||||
}
|
||||
),
|
||||
)
|
||||
if inspect.isawaitable(invoke_result):
|
||||
invoke_result.close()
|
||||
raise RuntimeError(
|
||||
"Agent execution was invoked synchronously from within a running "
|
||||
"event loop. Use `agent.kickoff_async()` / `crew.kickoff_async()` "
|
||||
"(or `await agent.aexecute_task(...)`) when calling from async code."
|
||||
)
|
||||
return invoke_result["output"]
|
||||
return result["output"]
|
||||
|
||||
async def aexecute_task(
|
||||
self,
|
||||
@@ -1484,6 +1474,8 @@ class Agent(BaseAgent):
|
||||
),
|
||||
)
|
||||
|
||||
formatted_messages = append_skill_context(self, formatted_messages)
|
||||
|
||||
inputs: dict[str, Any] = {
|
||||
"input": formatted_messages,
|
||||
"tool_names": get_tool_names(parsed_tools),
|
||||
|
||||
@@ -213,6 +213,30 @@ def _combine_knowledge_context(agent: Agent) -> str:
|
||||
return agent_ctx + separator + crew_ctx
|
||||
|
||||
|
||||
def append_skill_context(agent: Agent, task_prompt: str) -> str:
|
||||
"""Append activated skill context sections to the task prompt.
|
||||
|
||||
Args:
|
||||
agent: The agent with optional skills.
|
||||
task_prompt: The current task prompt.
|
||||
|
||||
Returns:
|
||||
The task prompt with skill context appended.
|
||||
"""
|
||||
if not agent.skills:
|
||||
return task_prompt
|
||||
|
||||
from crewai.skills.loader import format_skill_context
|
||||
from crewai.skills.models import Skill
|
||||
|
||||
skill_sections = [
|
||||
format_skill_context(s) for s in agent.skills if isinstance(s, Skill)
|
||||
]
|
||||
if skill_sections:
|
||||
task_prompt += "\n\n" + "\n\n".join(skill_sections)
|
||||
return task_prompt
|
||||
|
||||
|
||||
def apply_training_data(agent: Agent, task_prompt: str) -> str:
|
||||
"""Apply training data to the task prompt.
|
||||
|
||||
|
||||
@@ -1,28 +1,13 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.parser import AgentAction, AgentFinish, OutputParserError, parse
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AgentAction",
|
||||
"AgentFinish",
|
||||
"CacheHandler",
|
||||
"CrewAgentExecutor",
|
||||
"OutputParserError",
|
||||
"ToolsHandler",
|
||||
"parse",
|
||||
]
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
if name == "CrewAgentExecutor":
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
|
||||
return CrewAgentExecutor
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
|
||||
|
||||
@@ -14,7 +14,6 @@ import contextvars
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Literal, cast
|
||||
import warnings
|
||||
|
||||
from crewai_core.printer import PRINTER
|
||||
from pydantic import (
|
||||
@@ -139,13 +138,6 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
|
||||
def __init__(self, **kwargs: Any) -> None:
|
||||
super().__init__(**kwargs)
|
||||
warnings.warn(
|
||||
"CrewAgentExecutor is deprecated and will be removed in a future release.\n"
|
||||
"Agents inside Crews now use AgentExecutor (crewai.experimental.AgentExecutor) by default.\n"
|
||||
"To suppress this warning, migrate to AgentExecutor.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
if not self.before_llm_call_hooks:
|
||||
self.before_llm_call_hooks.extend(get_before_llm_call_hooks())
|
||||
if not self.after_llm_call_hooks:
|
||||
@@ -174,8 +166,6 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
if provider.setup_messages(cast(ExecutorContext, cast(object, self))):
|
||||
return
|
||||
|
||||
from crewai.llms.cache import mark_cache_breakpoint
|
||||
|
||||
if self.prompt is not None and "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(
|
||||
cast(str, self.prompt.get("system", "")), inputs
|
||||
@@ -183,22 +173,11 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
user_prompt = self._format_prompt(
|
||||
cast(str, self.prompt.get("user", "")), inputs
|
||||
)
|
||||
# Cache breakpoints: end-of-system caches the per-agent stable
|
||||
# prefix; end-of-user caches the per-task stable prefix across
|
||||
# ReAct-loop iterations.
|
||||
self.messages.append(
|
||||
mark_cache_breakpoint(
|
||||
format_message_for_llm(system_prompt, role="system")
|
||||
)
|
||||
)
|
||||
self.messages.append(
|
||||
mark_cache_breakpoint(format_message_for_llm(user_prompt))
|
||||
)
|
||||
self.messages.append(format_message_for_llm(system_prompt, role="system"))
|
||||
self.messages.append(format_message_for_llm(user_prompt))
|
||||
elif self.prompt is not None:
|
||||
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
|
||||
self.messages.append(
|
||||
mark_cache_breakpoint(format_message_for_llm(user_prompt))
|
||||
)
|
||||
self.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
provider.post_setup_messages(cast(ExecutorContext, cast(object, self)))
|
||||
|
||||
|
||||
@@ -156,6 +156,36 @@ def _resolve_agents(value: Any, info: Any) -> Any:
|
||||
return [_resolve_agent(a, info) for a in value]
|
||||
|
||||
|
||||
def _mcp_label(mcp: Any) -> str:
|
||||
if isinstance(mcp, str):
|
||||
return mcp
|
||||
url = getattr(mcp, "url", None)
|
||||
if url:
|
||||
return str(url)
|
||||
command = getattr(mcp, "command", None)
|
||||
if command:
|
||||
return str(command)
|
||||
return type(mcp).__name__
|
||||
|
||||
|
||||
def _app_placeholder(app: Any) -> str:
|
||||
raw = app if isinstance(app, str) else str(app)
|
||||
if "#" in raw:
|
||||
base, action = raw.split("#", 1)
|
||||
return f"app:{base}:{action}"
|
||||
return f"app:{raw}:*"
|
||||
|
||||
|
||||
def _dedupe_preserve_order(values: list[str]) -> list[str]:
|
||||
seen: set[str] = set()
|
||||
out: list[str] = []
|
||||
for v in values:
|
||||
if v not in seen:
|
||||
seen.add(v)
|
||||
out.append(v)
|
||||
return out
|
||||
|
||||
|
||||
class Crew(FlowTrackable, BaseModel):
|
||||
"""
|
||||
Represents a group of agents, defining how they should collaborate and the
|
||||
@@ -1927,6 +1957,81 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
return required_inputs
|
||||
|
||||
def list_tools(self) -> dict[str, list[str]]:
|
||||
"""Enumerate tool names available to each agent in this Crew.
|
||||
|
||||
Mirrors the runtime tool resolution in ``_prepare_tools`` without
|
||||
performing any I/O: tools sourced from external services (MCP
|
||||
servers, platform apps) are returned as ``"mcp:<id>:*"`` /
|
||||
``"app:<id>[:action]"`` placeholders since their concrete tool
|
||||
names require live fetches.
|
||||
|
||||
Returns:
|
||||
Mapping of agent role to a deduplicated list of tool names.
|
||||
In hierarchical mode the manager agent is included as an
|
||||
extra entry keyed by its role.
|
||||
"""
|
||||
tasks_by_agent: dict[int, list[Task]] = {}
|
||||
for task in self.tasks:
|
||||
if task.agent is not None:
|
||||
tasks_by_agent.setdefault(id(task.agent), []).append(task)
|
||||
|
||||
result: dict[str, list[str]] = {}
|
||||
|
||||
for agent in self.agents:
|
||||
candidates: list[str] = [tool.name for tool in agent.tools or []]
|
||||
|
||||
for task in tasks_by_agent.get(id(agent), []):
|
||||
candidates.extend(tool.name for tool in task.tools or [])
|
||||
|
||||
if (
|
||||
self.process != Process.hierarchical
|
||||
and getattr(agent, "allow_delegation", False)
|
||||
and len(self.agents) > 1
|
||||
):
|
||||
candidates.append("Delegate work to coworker")
|
||||
candidates.append("Ask question to coworker")
|
||||
|
||||
if getattr(agent, "multimodal", False):
|
||||
llm = getattr(agent, "llm", None)
|
||||
if not (isinstance(llm, BaseLLM) and llm.supports_multimodal()):
|
||||
candidates.append("Add image to content")
|
||||
|
||||
if getattr(agent, "memory", None) is not None or self._memory is not None:
|
||||
candidates.append("Search memory")
|
||||
candidates.append("Save to memory")
|
||||
|
||||
candidates.extend(
|
||||
f"mcp:{_mcp_label(mcp)}:*" for mcp in getattr(agent, "mcps", None) or []
|
||||
)
|
||||
candidates.extend(
|
||||
_app_placeholder(app) for app in getattr(agent, "apps", None) or []
|
||||
)
|
||||
|
||||
for task in tasks_by_agent.get(id(agent), []):
|
||||
if get_all_files(self.id, task.id):
|
||||
candidates.append("read_file")
|
||||
break
|
||||
|
||||
result[agent.role] = _dedupe_preserve_order(candidates)
|
||||
|
||||
if self.process == Process.hierarchical:
|
||||
if self.manager_agent is not None:
|
||||
mgr_candidates: list[str] = [
|
||||
tool.name for tool in self.manager_agent.tools or []
|
||||
]
|
||||
mgr_role = self.manager_agent.role
|
||||
else:
|
||||
mgr_candidates = []
|
||||
mgr_role = get_i18n(prompt_file=self.prompt_file).retrieve(
|
||||
"hierarchical_manager_agent", "role"
|
||||
)
|
||||
mgr_candidates.append("Delegate work to coworker")
|
||||
mgr_candidates.append("Ask question to coworker")
|
||||
result[mgr_role] = _dedupe_preserve_order(mgr_candidates)
|
||||
|
||||
return result
|
||||
|
||||
def copy(self) -> Crew: # type: ignore[override]
|
||||
"""
|
||||
Creates a deep copy of the Crew instance.
|
||||
|
||||
@@ -1191,13 +1191,6 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor):
|
||||
@router("force_final_answer")
|
||||
def ensure_force_final_answer(self) -> Literal["agent_finished"]:
|
||||
"""Force agent to provide final answer when max iterations exceeded."""
|
||||
# The flow framework can route here more than once per execution when the
|
||||
# "initialized" label is emitted by both initialize_reasoning and
|
||||
# increment_and_continue in the same listener pass. Skip the extra LLM
|
||||
# round-trip once we've already produced a forced final answer.
|
||||
if self.state.is_finished:
|
||||
return "agent_finished"
|
||||
|
||||
formatted_answer = handle_max_iterations_exceeded(
|
||||
formatted_answer=None,
|
||||
printer=PRINTER,
|
||||
@@ -2586,26 +2579,16 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor):
|
||||
self._kickoff_input = inputs.get("input", "")
|
||||
|
||||
if "system" in self.prompt:
|
||||
from crewai.llms.cache import mark_cache_breakpoint
|
||||
|
||||
prompt = cast("SystemPromptResult", self.prompt)
|
||||
system_prompt = self._format_prompt(prompt["system"], inputs)
|
||||
user_prompt = self._format_prompt(prompt["user"], inputs)
|
||||
self.state.messages.append(
|
||||
mark_cache_breakpoint(
|
||||
format_message_for_llm(system_prompt, role="system")
|
||||
)
|
||||
)
|
||||
self.state.messages.append(
|
||||
mark_cache_breakpoint(format_message_for_llm(user_prompt))
|
||||
format_message_for_llm(system_prompt, role="system")
|
||||
)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
else:
|
||||
from crewai.llms.cache import mark_cache_breakpoint
|
||||
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.state.messages.append(
|
||||
mark_cache_breakpoint(format_message_for_llm(user_prompt))
|
||||
)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self._inject_files_from_inputs(inputs)
|
||||
|
||||
@@ -2687,26 +2670,16 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor):
|
||||
self._kickoff_input = inputs.get("input", "")
|
||||
|
||||
if "system" in self.prompt:
|
||||
from crewai.llms.cache import mark_cache_breakpoint
|
||||
|
||||
prompt = cast("SystemPromptResult", self.prompt)
|
||||
system_prompt = self._format_prompt(prompt["system"], inputs)
|
||||
user_prompt = self._format_prompt(prompt["user"], inputs)
|
||||
self.state.messages.append(
|
||||
mark_cache_breakpoint(
|
||||
format_message_for_llm(system_prompt, role="system")
|
||||
)
|
||||
)
|
||||
self.state.messages.append(
|
||||
mark_cache_breakpoint(format_message_for_llm(user_prompt))
|
||||
format_message_for_llm(system_prompt, role="system")
|
||||
)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
else:
|
||||
from crewai.llms.cache import mark_cache_breakpoint
|
||||
|
||||
user_prompt = self._format_prompt(self.prompt["prompt"], inputs)
|
||||
self.state.messages.append(
|
||||
mark_cache_breakpoint(format_message_for_llm(user_prompt))
|
||||
)
|
||||
self.state.messages.append(format_message_for_llm(user_prompt))
|
||||
|
||||
self._inject_files_from_inputs(inputs)
|
||||
|
||||
|
||||
@@ -1864,6 +1864,34 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta):
|
||||
except (AttributeError, TypeError):
|
||||
return "" # Safely handle any unexpected attribute access issues
|
||||
|
||||
def list_tools(self) -> dict[str, dict[str, list[str]]]:
|
||||
"""Enumerate tools available across the Crews attached to this Flow.
|
||||
|
||||
Inspects public instance attributes for :class:`~crewai.crew.Crew`
|
||||
instances and lists/tuples of Crews, delegating to
|
||||
:meth:`Crew.list_tools` for each. Crews instantiated lazily inside
|
||||
flow methods are not discovered — to opt them in, store them as
|
||||
instance attributes (typically in ``__init__``).
|
||||
|
||||
Returns:
|
||||
Mapping of crew identifier (the attribute name, with
|
||||
``[index]`` suffix when stored in a list/tuple) to that
|
||||
Crew's ``list_tools()`` output.
|
||||
"""
|
||||
from crewai.crew import Crew
|
||||
|
||||
result: dict[str, dict[str, list[str]]] = {}
|
||||
for attr_name, value in vars(self).items():
|
||||
if attr_name.startswith("_"):
|
||||
continue
|
||||
if isinstance(value, Crew):
|
||||
result[attr_name] = value.list_tools()
|
||||
elif isinstance(value, (list, tuple)):
|
||||
for i, item in enumerate(value):
|
||||
if isinstance(item, Crew):
|
||||
result[f"{attr_name}[{i}]"] = item.list_tools()
|
||||
return result
|
||||
|
||||
def _initialize_state(self, inputs: dict[str, Any]) -> None:
|
||||
"""Initialize or update flow state with new inputs.
|
||||
|
||||
|
||||
@@ -60,7 +60,6 @@ from collections.abc import Callable, Sequence
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
@@ -74,8 +73,6 @@ if TYPE_CHECKING:
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
F = TypeVar("F", bound=Callable[..., Any])
|
||||
|
||||
|
||||
@@ -191,7 +188,6 @@ class HumanFeedbackConfig:
|
||||
provider: HumanFeedbackProvider | None = None
|
||||
learn: bool = False
|
||||
learn_source: str = "hitl"
|
||||
learn_strict: bool = False
|
||||
|
||||
|
||||
class HumanFeedbackMethod(FlowMethod[Any, Any]):
|
||||
@@ -241,7 +237,6 @@ def human_feedback(
|
||||
provider: HumanFeedbackProvider | None = None,
|
||||
learn: bool = False,
|
||||
learn_source: str = "hitl",
|
||||
learn_strict: bool = False,
|
||||
) -> Callable[[F], F]:
|
||||
"""Decorator for Flow methods that require human feedback.
|
||||
|
||||
@@ -280,14 +275,6 @@ def human_feedback(
|
||||
external systems like Slack, Teams, or webhooks. When the
|
||||
provider raises HumanFeedbackPending, the flow pauses and
|
||||
can be resumed later with Flow.resume().
|
||||
learn: Enable HITL learning. Recall past lessons to pre-review
|
||||
output before the human sees it, and distill new lessons
|
||||
from feedback after.
|
||||
learn_source: Memory source tag for stored/recalled lessons.
|
||||
learn_strict: When True, re-raise exceptions from the pre-review
|
||||
and distillation steps instead of falling back to raw output.
|
||||
Default False preserves graceful degradation; failures are
|
||||
always logged via ``logger.warning`` regardless of this flag.
|
||||
|
||||
Returns:
|
||||
A decorator function that wraps the method with human feedback
|
||||
@@ -417,19 +404,7 @@ def human_feedback(
|
||||
reviewed = llm_inst.call(messages)
|
||||
return reviewed if isinstance(reviewed, str) else str(reviewed)
|
||||
except Exception:
|
||||
if learn_strict:
|
||||
logger.warning(
|
||||
"HITL pre-review failed for %s; re-raising (learn_strict=True)",
|
||||
func.__name__,
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
logger.warning(
|
||||
"HITL pre-review failed for %s; falling back to raw output",
|
||||
func.__name__,
|
||||
exc_info=True,
|
||||
)
|
||||
return method_output
|
||||
return method_output # fallback to raw output on any failure
|
||||
|
||||
def _distill_and_store_lessons(
|
||||
flow_instance: Flow[Any], method_output: Any, raw_feedback: str
|
||||
@@ -471,19 +446,8 @@ def human_feedback(
|
||||
|
||||
if lessons:
|
||||
mem.remember_many(lessons, source=learn_source) # type: ignore[union-attr]
|
||||
except Exception:
|
||||
if learn_strict:
|
||||
logger.warning(
|
||||
"HITL lesson distillation failed for %s; re-raising (learn_strict=True)",
|
||||
func.__name__,
|
||||
exc_info=True,
|
||||
)
|
||||
raise
|
||||
logger.warning(
|
||||
"HITL lesson distillation failed for %s; no lessons stored",
|
||||
func.__name__,
|
||||
exc_info=True,
|
||||
)
|
||||
except Exception: # noqa: S110
|
||||
pass # non-critical: don't fail the flow because lesson storage failed
|
||||
|
||||
# -- Core feedback helpers ------------------------------------
|
||||
|
||||
@@ -690,7 +654,6 @@ def human_feedback(
|
||||
provider=provider,
|
||||
learn=learn,
|
||||
learn_source=learn_source,
|
||||
learn_strict=learn_strict,
|
||||
)
|
||||
wrapper.__is_flow_method__ = True
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ from datetime import datetime
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal, cast
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal
|
||||
import uuid
|
||||
|
||||
from pydantic import (
|
||||
@@ -703,19 +703,10 @@ class BaseLLM(BaseModel, ABC):
|
||||
Raises:
|
||||
ValueError: If message format is invalid
|
||||
"""
|
||||
from crewai.llms.cache import CACHE_BREAKPOINT_KEY
|
||||
from crewai.utilities.types import LLMMessage as _LLMMessage
|
||||
|
||||
if isinstance(messages, str):
|
||||
return [{"role": "user", "content": messages}]
|
||||
|
||||
# Validate then copy each message, dropping the cache-breakpoint
|
||||
# flag in the copy only. The caller (e.g. CrewAgentExecutor,
|
||||
# experimental.AgentExecutor) reuses its messages buffer across
|
||||
# many LLM calls in the tool-use loop; mutating their dicts
|
||||
# in place would erase the markers after the first call and
|
||||
# break prompt caching for every subsequent iteration.
|
||||
cleaned: list[LLMMessage] = []
|
||||
# Validate message format
|
||||
for i, msg in enumerate(messages):
|
||||
if not isinstance(msg, dict):
|
||||
raise ValueError(f"Message at index {i} must be a dictionary")
|
||||
@@ -723,12 +714,8 @@ class BaseLLM(BaseModel, ABC):
|
||||
raise ValueError(
|
||||
f"Message at index {i} must have 'role' and 'content' keys"
|
||||
)
|
||||
copy: dict[str, Any] = {
|
||||
k: v for k, v in msg.items() if k != CACHE_BREAKPOINT_KEY
|
||||
}
|
||||
cleaned.append(cast(_LLMMessage, copy))
|
||||
|
||||
return self._process_message_files(cleaned)
|
||||
return self._process_message_files(messages)
|
||||
|
||||
def _process_message_files(self, messages: list[LLMMessage]) -> list[LLMMessage]:
|
||||
"""Process files attached to messages and format for the provider.
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
"""Provider-agnostic prompt-cache breakpoint marker.
|
||||
|
||||
Application code (prompt builders, agent executors) marks messages where a
|
||||
stable prefix ends. Provider adapters then translate the marker into the
|
||||
cache directive their API expects, or strip it for providers that cache
|
||||
implicitly (OpenAI, Gemini) or do not cache at all.
|
||||
|
||||
Usage:
|
||||
|
||||
from crewai.llms.cache import mark_cache_breakpoint
|
||||
|
||||
messages = [
|
||||
mark_cache_breakpoint({"role": "system", "content": stable_system}),
|
||||
mark_cache_breakpoint({"role": "user", "content": stable_user_prefix}),
|
||||
{"role": "user", "content": volatile_query},
|
||||
]
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
CACHE_BREAKPOINT_KEY = "cache_breakpoint"
|
||||
|
||||
|
||||
def mark_cache_breakpoint(message: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Return ``message`` with the cache-breakpoint flag set.
|
||||
|
||||
Returns a new dict so callers can safely pass literal dicts.
|
||||
"""
|
||||
return {**message, CACHE_BREAKPOINT_KEY: True}
|
||||
|
||||
|
||||
def strip_cache_breakpoint(message: dict[str, Any]) -> None:
|
||||
"""Remove the breakpoint flag from a message in place."""
|
||||
message.pop(CACHE_BREAKPOINT_KEY, None)
|
||||
@@ -425,7 +425,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
def _prepare_completion_params(
|
||||
self,
|
||||
messages: list[LLMMessage],
|
||||
system_message: str | list[dict[str, Any]] | None = None,
|
||||
system_message: str | None = None,
|
||||
tools: list[dict[str, Any]] | None = None,
|
||||
available_functions: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
@@ -665,7 +665,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
def _format_messages_for_anthropic(
|
||||
self, messages: str | list[LLMMessage]
|
||||
) -> tuple[list[LLMMessage], str | list[dict[str, Any]] | None]:
|
||||
) -> tuple[list[LLMMessage], str | None]:
|
||||
"""Format messages for Anthropic API.
|
||||
|
||||
Anthropic has specific requirements:
|
||||
@@ -679,51 +679,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
messages: Input messages
|
||||
|
||||
Returns:
|
||||
Tuple of (formatted_messages, system_message). `system_message` is
|
||||
a list of content blocks (with cache_control stamped) when any
|
||||
system message in the input carried a cache_breakpoint flag;
|
||||
otherwise a plain string for backwards compatibility.
|
||||
Tuple of (formatted_messages, system_message)
|
||||
"""
|
||||
from crewai.llms.cache import CACHE_BREAKPOINT_KEY
|
||||
|
||||
# Read cache_breakpoint flags from raw input BEFORE super strips them.
|
||||
# We track the CONTENT of marked user/assistant messages so we can
|
||||
# locate the corresponding block in formatted_messages — Anthropic
|
||||
# rewrites tool results into user messages, so positional indices
|
||||
# do not survive the conversion. We must stamp the original stable
|
||||
# message (typically the initial task prompt), not whatever happens
|
||||
# to be the trailing user-role block after tool_result expansion.
|
||||
cache_system = False
|
||||
cache_match_contents: list[str] = []
|
||||
if not isinstance(messages, str):
|
||||
for m in messages:
|
||||
if not (isinstance(m, dict) and m.get(CACHE_BREAKPOINT_KEY)):
|
||||
continue
|
||||
role = m.get("role")
|
||||
if role == "system":
|
||||
cache_system = True
|
||||
continue
|
||||
if role != "user":
|
||||
# Only user messages survive Anthropic's role-coalescing
|
||||
# in a stable, addressable position. Markers on assistant
|
||||
# or tool messages have no reliable stamp target after
|
||||
# tool_result expansion, so we ignore them.
|
||||
continue
|
||||
raw_content = m.get("content")
|
||||
if isinstance(raw_content, str) and raw_content:
|
||||
cache_match_contents.append(raw_content)
|
||||
continue
|
||||
if isinstance(raw_content, list):
|
||||
# Pull text from a single-text-block list so callers that
|
||||
# pre-format content blocks still match cleanly.
|
||||
text_blocks = [
|
||||
b.get("text")
|
||||
for b in raw_content
|
||||
if isinstance(b, dict) and b.get("type") == "text"
|
||||
]
|
||||
if len(text_blocks) == 1 and isinstance(text_blocks[0], str):
|
||||
cache_match_contents.append(text_blocks[0])
|
||||
|
||||
# Use base class formatting first
|
||||
base_formatted = super()._format_messages(messages)
|
||||
|
||||
@@ -831,62 +788,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
# If first message is not from user, insert a user message at the beginning
|
||||
formatted_messages.insert(0, {"role": "user", "content": "Hello"})
|
||||
|
||||
# Stamp cache_control on the message(s) whose original content was
|
||||
# marked. We scan formatted_messages in order and stamp the first
|
||||
# match per marked content — Anthropic permits up to 4 cache
|
||||
# breakpoints per request, which is more than enough for our usage.
|
||||
# Matching by content (rather than position) handles the ReAct
|
||||
# case where tool_result blocks get expanded into trailing user
|
||||
# messages: the stable initial-task prompt still maps cleanly.
|
||||
for needle in cache_match_contents:
|
||||
for fm in formatted_messages:
|
||||
if fm.get("role") != "user":
|
||||
continue
|
||||
content = fm.get("content")
|
||||
if isinstance(content, str) and content == needle:
|
||||
self._stamp_cache_control_on_message(fm)
|
||||
break
|
||||
if isinstance(content, list):
|
||||
fm_texts: list[str] = [
|
||||
b.get("text", "")
|
||||
for b in content
|
||||
if isinstance(b, dict) and b.get("type") == "text"
|
||||
]
|
||||
if len(fm_texts) == 1 and fm_texts[0] == needle:
|
||||
self._stamp_cache_control_on_message(fm)
|
||||
break
|
||||
|
||||
# Convert system to content-block form when caching is requested.
|
||||
system_payload: str | list[dict[str, Any]] | None = system_message
|
||||
if system_message and cache_system:
|
||||
system_payload = [
|
||||
{
|
||||
"type": "text",
|
||||
"text": system_message,
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
]
|
||||
|
||||
return formatted_messages, system_payload
|
||||
|
||||
@staticmethod
|
||||
def _stamp_cache_control_on_message(message: LLMMessage) -> None:
|
||||
"""Stamp cache_control on the last content block of an Anthropic message."""
|
||||
msg = cast(dict[str, Any], message)
|
||||
content = msg.get("content")
|
||||
if isinstance(content, str):
|
||||
msg["content"] = [
|
||||
{
|
||||
"type": "text",
|
||||
"text": content,
|
||||
"cache_control": {"type": "ephemeral"},
|
||||
}
|
||||
]
|
||||
return
|
||||
if isinstance(content, list) and content:
|
||||
last = content[-1]
|
||||
if isinstance(last, dict):
|
||||
last["cache_control"] = {"type": "ephemeral"}
|
||||
return formatted_messages, system_message
|
||||
|
||||
def _handle_completion(
|
||||
self,
|
||||
|
||||
@@ -161,9 +161,6 @@ def format_skill_context(skill: Skill) -> str:
|
||||
At METADATA level: returns name and description only.
|
||||
At INSTRUCTIONS level or above: returns full SKILL.md body.
|
||||
|
||||
Output is wrapped in <skill name="..."> XML tags so the block can serve
|
||||
as a stable cache anchor when injected into the system prompt.
|
||||
|
||||
Args:
|
||||
skill: The skill to format.
|
||||
|
||||
@@ -172,7 +169,7 @@ def format_skill_context(skill: Skill) -> str:
|
||||
"""
|
||||
if skill.disclosure_level >= INSTRUCTIONS and skill.instructions:
|
||||
parts = [
|
||||
f'<skill name="{skill.name}">',
|
||||
f"## Skill: {skill.name}",
|
||||
skill.description,
|
||||
"",
|
||||
skill.instructions,
|
||||
@@ -183,6 +180,5 @@ def format_skill_context(skill: Skill) -> str:
|
||||
for dir_name, files in sorted(skill.resource_files.items()):
|
||||
if files:
|
||||
parts.append(f"- **{dir_name}/**: {', '.join(files)}")
|
||||
parts.append("</skill>")
|
||||
return "\n".join(parts)
|
||||
return f'<skill name="{skill.name}">\n{skill.description}\n</skill>'
|
||||
return f"## Skill: {skill.name}\n{skill.description}"
|
||||
|
||||
@@ -86,7 +86,7 @@ class Prompts(BaseModel):
|
||||
slices.append("tools")
|
||||
else:
|
||||
slices.append("no_tools")
|
||||
system: str = self._build_prompt(slices) + self._build_skill_block()
|
||||
system: str = self._build_prompt(slices)
|
||||
|
||||
# Determine which task slice to use:
|
||||
task_slice: COMPONENTS
|
||||
@@ -106,7 +106,7 @@ class Prompts(BaseModel):
|
||||
return SystemPromptResult(
|
||||
system=system,
|
||||
user=self._build_prompt([task_slice]),
|
||||
prompt=self._build_prompt(slices) + self._build_skill_block(),
|
||||
prompt=self._build_prompt(slices),
|
||||
)
|
||||
return StandardPromptResult(
|
||||
prompt=self._build_prompt(
|
||||
@@ -115,27 +115,8 @@ class Prompts(BaseModel):
|
||||
self.prompt_template,
|
||||
self.response_template,
|
||||
)
|
||||
+ self._build_skill_block()
|
||||
)
|
||||
|
||||
def _build_skill_block(self) -> str:
|
||||
"""Render the agent's activated skills as a stable XML block.
|
||||
|
||||
Skills are agent-scoped (do not change per task), so they live in the
|
||||
system prompt where prompt-cache prefixes can survive across calls.
|
||||
"""
|
||||
skills = getattr(self.agent, "skills", None)
|
||||
if not skills:
|
||||
return ""
|
||||
|
||||
from crewai.skills.loader import format_skill_context
|
||||
from crewai.skills.models import Skill
|
||||
|
||||
sections = [format_skill_context(s) for s in skills if isinstance(s, Skill)]
|
||||
if not sections:
|
||||
return ""
|
||||
return "\n\n<skills>\n" + "\n\n".join(sections) + "\n</skills>"
|
||||
|
||||
def _build_prompt(
|
||||
self,
|
||||
components: list[COMPONENTS],
|
||||
|
||||
@@ -389,8 +389,10 @@ def test_agent_custom_max_iterations():
|
||||
assert result is not None
|
||||
assert isinstance(result, str)
|
||||
assert len(result) > 0
|
||||
# With max_iter=1, exactly two provider calls are expected:
|
||||
# one inside the reasoning loop and one for the forced final answer.
|
||||
assert call_count > 0
|
||||
# With max_iter=1, expect 2 calls:
|
||||
# - Call 1: iteration 0
|
||||
# - Call 2: iteration 1 (max reached, handle_max_iterations_exceeded called, then loop breaks)
|
||||
assert call_count == 2
|
||||
|
||||
|
||||
@@ -700,7 +702,6 @@ def test_agent_definition_based_on_dict():
|
||||
|
||||
# test for human input
|
||||
@pytest.mark.vcr()
|
||||
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
|
||||
def test_agent_human_input():
|
||||
from crewai.core.providers.human_input import SyncHumanInputProvider
|
||||
|
||||
@@ -709,7 +710,6 @@ def test_agent_human_input():
|
||||
"role": "test role",
|
||||
"goal": "test goal",
|
||||
"backstory": "test backstory",
|
||||
"executor_class": CrewAgentExecutor,
|
||||
}
|
||||
|
||||
agent = Agent(**config)
|
||||
@@ -839,9 +839,7 @@ Thought:<|eot_id|>
|
||||
|
||||
"""
|
||||
|
||||
from crewai.experimental.agent_executor import AgentExecutor
|
||||
|
||||
with patch.object(AgentExecutor, "_format_prompt") as mock_format_prompt:
|
||||
with patch.object(CrewAgentExecutor, "_format_prompt") as mock_format_prompt:
|
||||
mock_format_prompt.return_value = expected_prompt
|
||||
|
||||
# Trigger the _format_prompt method
|
||||
@@ -1100,11 +1098,9 @@ def test_agent_max_retry_limit():
|
||||
|
||||
agent.create_agent_executor(task=task)
|
||||
|
||||
from crewai.experimental.agent_executor import AgentExecutor
|
||||
|
||||
error_message = "Error happening while sending prompt to model."
|
||||
with patch.object(
|
||||
AgentExecutor, "invoke", wraps=agent.agent_executor.invoke
|
||||
CrewAgentExecutor, "invoke", wraps=agent.agent_executor.invoke
|
||||
) as invoke_mock:
|
||||
invoke_mock.side_effect = Exception(error_message)
|
||||
|
||||
@@ -1287,10 +1283,8 @@ def test_handle_context_length_exceeds_limit_cli_no():
|
||||
|
||||
agent.create_agent_executor(task=task)
|
||||
|
||||
from crewai.experimental.agent_executor import AgentExecutor
|
||||
|
||||
with patch.object(
|
||||
AgentExecutor, "invoke", wraps=agent.agent_executor.invoke
|
||||
CrewAgentExecutor, "invoke", wraps=agent.agent_executor.invoke
|
||||
) as private_mock:
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
|
||||
@@ -286,6 +286,8 @@ def test_agent_execute_task_with_planning():
|
||||
|
||||
assert result is not None
|
||||
assert "20" in str(result)
|
||||
# Planning should be appended to task description
|
||||
assert "Planning:" in task.description
|
||||
|
||||
|
||||
@pytest.mark.vcr()
|
||||
@@ -340,3 +342,4 @@ def test_agent_execute_task_with_planning_refine():
|
||||
assert result is not None
|
||||
# Area = pi * r^2 = 3.14 * 25 = 78.5
|
||||
assert "78" in str(result) or "79" in str(result)
|
||||
assert "Planning:" in task.description
|
||||
|
||||
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1,13 +1,17 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"\nCurrent Task:
|
||||
Describe the file(s) you see. Be brief, one sentence max.\n\nInput files (content
|
||||
already loaded in conversation):\n - \"document\" (agents.pdf)\n\nThis is the
|
||||
expected criteria for your final answer: A brief description of the file.\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nProvide
|
||||
your complete response:"}],"model":"claude-sonnet-4-20250514","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
Describe the file(s) you see. Be brief, one sentence max.\n\nAvailable input
|
||||
files (use the name in quotes with read_file tool):\n - \"document\" (document,
|
||||
application/pdf)\n\nThis is the expected criteria for your final answer: A brief
|
||||
description of the file.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nThis is VERY important to you, your job depends
|
||||
on it!"}],"model":"claude-sonnet-4-20250514","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately"}'
|
||||
is: Analyze and describe files accurately","tools":[{"name":"read_file","description":"Read
|
||||
content from an input file by name. Returns file content as text for text files,
|
||||
or base64 for binary files.","input_schema":{"properties":{"file_name":{"description":"The
|
||||
name of the input file to read","title":"File Name","type":"string"}},"required":["file_name"],"type":"object"}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -20,7 +24,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '648'
|
||||
- '1035'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -36,305 +40,30 @@ interactions:
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.98.0
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: "{\"model\":\"claude-sonnet-4-20250514\",\"id\":\"msg_01JyzdLa2yekgTp7vjoG9V8n\",\"type\":\"message\",\"role\":\"assistant\",\"content\":[{\"type\":\"text\",\"text\":\"The
|
||||
file \\\"agents.pdf\\\" is a research paper titled \\\"Generative Agents:
|
||||
Interactive Simulacra of Human Behavior\\\" that presents a computational
|
||||
architecture for creating believable AI agents that can simulate human-like
|
||||
social behaviors in interactive environments.\\n\\nSince you requested the
|
||||
actual complete content, here is the full text of the PDF:\\n\\n**Generative
|
||||
Agents: Interactive Simulacra of Human Behavior**\\n\\n*Joon Sung Park, Joseph
|
||||
C. O'Brien, Carrie J. Cai, Meredith Ringel Morris, Percy Liang, Michael S.
|
||||
Bernstein*\\n\\nStanford University, Google Research\\n\\n**Abstract**\\n\\nBelievable
|
||||
proxies of human behavior can empower interactive applications ranging from
|
||||
immersive environments to rehearsal spaces for interpersonal communication
|
||||
to prototyping tools. In this paper, we introduce generative agents\u2014computational
|
||||
software agents that simulate believable human behavior. Generative agents
|
||||
wake up, cook breakfast, and head to work; artists paint, while authors write;
|
||||
they form opinions, notice each other, and initiate conversations; they remember
|
||||
and reflect on days past as they plan the next day. To enable generative agents,
|
||||
we describe an architecture that extends a large language model to store a
|
||||
complete record of the agent's experiences using natural language, synthesize
|
||||
those memories over time into higher-level reflections, and retrieve them
|
||||
dynamically to plan behavior. We instantiate generative agents to populate
|
||||
an interactive sandbox environment inspired by The Sims, where end users can
|
||||
interact with a small town of twenty five agents using natural language. In
|
||||
an evaluation, these generative agents produce believable individual and emergent
|
||||
social behaviors: for example, starting with only a single user-specified
|
||||
notion that one agent wants to throw a Valentine's Day party, the agents autonomously
|
||||
spread invitations to the party over the next two days, make new acquaintances,
|
||||
ask each other out on dates, and coordinate to show up for the party together
|
||||
at the right time. We demonstrate through ablation that the components of
|
||||
our agent architecture\u2014observation, planning, and reflection\u2014each
|
||||
contribute critically to the believability of agent behavior. By fusing large
|
||||
language models with computational, interactive agents, this work introduces
|
||||
architectural and interaction design patterns for enabling believable simulations
|
||||
of human behavior.\\n\\n**1 Introduction**\\n\\nHow might we craft an interactive
|
||||
artificial society filled with believable proxies of human behavior? From
|
||||
sandbox games such as The Sims to applications in education, dialogue systems
|
||||
to immersive environments, and social simulacra to prototyping tools, this
|
||||
vision of believable agents has inspired creators, theorists, and technologists
|
||||
for decades [7, 10, 69]. In these visions, people could populate a virtual
|
||||
space with interactive agents that reflect the diversity and richness of human
|
||||
social behavior, getting a second opinion on a presentation before making
|
||||
it to a client, or testing out ideas that are difficult to try in real life.\\n\\nPrior
|
||||
research in human-AI interaction has paved the way by recognizing that believable
|
||||
agents do not necessarily need to be indistinguishable from humans, but they
|
||||
should behave consistently with our expectations of human behavior in a given
|
||||
context [80]. Such agents should be able to live in their environment by retaining
|
||||
what has happened, interacting with other agents, and making decisions that
|
||||
build on their past experiences in believable ways.\\n\\nHowever, prior approaches
|
||||
to creating believable agents often depend on human authoring (e.g., in commercial
|
||||
games [26]) or focus on narrow contexts that may not generalize (e.g., job
|
||||
interviews [44] or small group communication [43, 78]). The space of human
|
||||
behavior is vast and complex\u2014human agents draw on their memory of past
|
||||
experiences, reflect on their core characteristics, and dynamically reason
|
||||
about their environment and relationships to act believably. As a result,
|
||||
agent architectures that rely on a small number of hand-crafted rules or narrow
|
||||
training will fall short of our ideal of believable behavior.\\n\\nIn this
|
||||
paper, we introduce generative agents, computational software agents that
|
||||
simulate believable human behavior. Generative agents are designed to represent
|
||||
individual people: they have memory, personality, goals, and relationships,
|
||||
and they behave consistently with these traits. A generative agent wakes up,
|
||||
brushes their teeth, makes breakfast, and heads to work. At work, a generative
|
||||
agent building teacher may teach students, while a generative agent college
|
||||
student may attend classes, study at the library, and chat with classmates.
|
||||
Along the way, they form new relationships, reflect on their past and present,
|
||||
and coordinate with other agents they encounter.\\n\\nTo accomplish this,
|
||||
generative agents operate in an agent architecture that extends a large language
|
||||
model with three key components. First, we equip agents with memory: a record
|
||||
of their experiences stored in natural language. We extend this memory with
|
||||
a retrieval function that surfaces the most relevant memories given the agent's
|
||||
current situation. Second, we introduce reflection: a process by which agents,
|
||||
over time, synthesize their observations into higher-level inferences about
|
||||
themselves and others, which can guide future behavior. For example, an agent
|
||||
might infer that another agent is interested in them romantically, or that
|
||||
they themselves are becoming more popular. Third, we add planning: a process
|
||||
by which agents translate their conclusions about themselves, others, and
|
||||
their environment into coherent sequences of actions. For example, an agent
|
||||
might decide to cook dinner for their partner, set the table, and invite them
|
||||
for a romantic evening.\\n\\nWe instantiate generative agents as characters
|
||||
in an interactive sandbox environment inspired by The Sims, to demonstrate
|
||||
their potential for creating believable, emergent social interactions. In
|
||||
our environment\u2014a small town called Smallville\u2014we situate twenty-five
|
||||
unique generative agents with distinct personalities, occupations, and relationships.
|
||||
Over the course of two full game days, we find that the agents demonstrate
|
||||
believable individual behaviors (e.g., a character with an interest in paintings
|
||||
creates a new painting, a character who is running for mayor talks to constituents)
|
||||
and believable social behaviors (e.g., agents ask each other out on dates,
|
||||
coordinate parties, spread news and gossip). Starting with only a single user-specified
|
||||
seed\u2014that one character wants to throw a Valentine's Day party\u2014the
|
||||
agents autonomously spread invitations to the party over the course of two
|
||||
days, make new acquaintances, ask each other out on dates, and show up to
|
||||
the party together at the right time.\\n\\nWe evaluate the behavior of our
|
||||
generative agents through interviews with the agents themselves, as well as
|
||||
interviews with human participants who have watched replays of the agents'
|
||||
behavior. We demonstrate that each component of our architecture\u2014memory,
|
||||
reflection, and planning\u2014contributes to more believable behavior through
|
||||
ablations that disable each component.\\n\\nOur approach draws on recent advances
|
||||
in large language models [12, 21, 64, 74]. These models demonstrate increasingly
|
||||
sophisticated behavior, from question answering [74] to code generation [21]
|
||||
to creative writing [12]. However, their success has been in the context of
|
||||
turns in dialogue, not in the context of a persistent agent that needs to
|
||||
manage its attention and behavior over time while living in an environment
|
||||
with other agents. Our work demonstrates how large language models can be
|
||||
extended to power agents that can believably simulate human behavior over
|
||||
time.\\n\\n**2 Related Work**\\n\\n**Human behavior simulation.** Creating
|
||||
believable agents requires computational models that can simulate the breadth
|
||||
of human behavior. Psychology and cognitive science have contributed formal
|
||||
models of human behavior [1, 18]. However, these models typically focus on
|
||||
specific facets of human behavior and do not easily extend to the breadth
|
||||
of social situations that people navigate. For example, a theory of personality
|
||||
[18] may help us understand individual differences in behavior, but it may
|
||||
not help us simulate realistic conversational behavior.\\n\\nResearch in intelligent
|
||||
user interfaces [56] and intelligent virtual agents [65] has demonstrated
|
||||
that people can form social relationships with agents and prefer agents that
|
||||
maintain some consistency in their behavior and personality [15]. However,
|
||||
these works typically rely on rule-based systems to achieve believability
|
||||
[9, 48], with behavior trees and finite state machines as common approaches
|
||||
for encoding agent behavior [49, 61]. While these systems can perform well
|
||||
in constrained domains, hand-authoring believable behavior that can handle
|
||||
the full space of possible interactions remains a challenge.\\n\\n**Large
|
||||
language models.** Recent progress in large language models has demonstrated
|
||||
that these models can produce behavior that appears human-like across a wide
|
||||
range of contexts. However, this behavior is typically seen at the scale of
|
||||
a single conversation turn, not in the context of a persistent agent that
|
||||
needs to manage its behavior over time. Our work demonstrates how to extend
|
||||
large language models to create agents that can maintain consistent behavior
|
||||
and personality over time, manage their attention and memory, and coordinate
|
||||
with other agents.\\n\\nRecent work has explored using language models to
|
||||
create interactive agents in various contexts, including dialogue systems
|
||||
[73], task-oriented agents [46], and game-playing agents [33]. However, these
|
||||
approaches typically focus on narrow tasks or short-term interactions, rather
|
||||
than the kind of persistent, long-term agent behavior that we explore in this
|
||||
work.\\n\\n**Interactive narrative and games.** Our work builds on a long
|
||||
tradition of interactive narrative and games that aim to create believable
|
||||
virtual characters. Commercial games like The Sims [53] have demonstrated
|
||||
that players are interested in complex virtual societies where they can interact
|
||||
with autonomous agents. However, these games typically rely on hand-crafted
|
||||
behaviors that, while entertaining, are limited in their ability to handle
|
||||
novel situations or exhibit the full richness of human social behavior.\\n\\nAcademic
|
||||
research in interactive narrative has explored ways to create more believable
|
||||
virtual characters, including work on character believability [11], emergent
|
||||
narrative [6], and social simulation [70]. However, these approaches have
|
||||
typically been limited by the complexity of hand-authoring believable behavior
|
||||
or by the narrow focus of the models used.\\n\\n**3 Generative Agents**\\n\\nThis
|
||||
section introduces our generative agent architecture. We begin by laying out
|
||||
our design goals, then present the agent architecture, and finally walk through
|
||||
an example that illustrates how the architecture works in practice.\\n\\n**3.1
|
||||
Agent Architecture Overview**\\n\\nOur agent architecture comprises three
|
||||
main components that work together to retrieve relevant information and synthesize
|
||||
it into believable behavior: **memory**, **reflection**, and **planning**.\\n\\n**Memory**
|
||||
allows generative agents to remember experiences and retrieve them later to
|
||||
inform their behavior. Without memory, an agent would not be able to build
|
||||
relationships, learn from past experiences, or maintain consistency in their
|
||||
behavior over time. The memory system stores a comprehensive record of the
|
||||
agent's experiences in natural language.\\n\\n**Reflection** allows generative
|
||||
agents to synthesize memories into higher level, more abstract thoughts and
|
||||
guide behavior. Agents reflect periodically on recent experiences to form
|
||||
new memories about their patterns of behavior, preferences, and beliefs about
|
||||
themselves and others in their environment. These reflections can be about
|
||||
the agent's own behavior patterns (e.g., \\\"I tend to be more productive
|
||||
in the mornings\\\"), the behavior of others (e.g., \\\"John is always late
|
||||
to meetings\\\"), or more abstract concepts (e.g., \\\"I think I'm becoming
|
||||
more popular\\\"). \\n\\n**Planning** allows generative agents to plan out
|
||||
their behavior, both in terms of how to act in their current situation and
|
||||
how to schedule their future activities. Plans are stored as natural language
|
||||
descriptions of intended actions and are dynamically adjusted based on the
|
||||
agent's current situation and goals.\\n\\n**3.2 Memory and Retrieval**\\n\\nGenerative
|
||||
agents need to be able to retrieve relevant memories to inform their current
|
||||
behavior. However, not all memories are equally relevant in every situation.
|
||||
For example, if an agent is deciding what to eat for breakfast, their memory
|
||||
of what they had for dinner last night may be more relevant than their memory
|
||||
of a conversation they had with a friend last week.\\n\\nTo handle this challenge,
|
||||
we implement a retrieval function that surfaces memories based on three key
|
||||
factors:\\n\\n**Recency**: More recent memories should be more likely to be
|
||||
retrieved. We assign each memory a recency score based on when it was formed,
|
||||
with more recent memories receiving higher scores.\\n\\n**Importance**: More
|
||||
important memories should be more likely to be retrieved. We use the language
|
||||
model to assess the importance of each memory on a scale from 1 to 10, where
|
||||
1 represents a mundane event and 10 represents a extremely important, poignant,
|
||||
or meaningful event.\\n\\n**Relevance**: Memories that are more relevant to
|
||||
the current situation should be more likely to be retrieved. We use embedding
|
||||
similarity between the memory and the current situation to assess relevance.\\n\\nThe
|
||||
retrieval function combines these three factors using a weighted sum to produce
|
||||
a retrieval score for each memory, then returns the memories with the highest
|
||||
scores.\\n\\n**3.3 Reflection**\\n\\nGenerative agents create higher level
|
||||
thoughts through **reflection**. These reflections synthesize memories into
|
||||
higher level questions and insights about behaviors and preferences. For example,
|
||||
Klaus Mueller, a generative agent in our implementation, reflects on his interactions
|
||||
with others and concludes, \\\"Klaus Mueller is dedicated to his research
|
||||
on mathematical music composition\\\" and \\\"Klaus Mueller likes to help
|
||||
people and understands math and physics and he is a teacher.\\\"\\n\\nAgents
|
||||
reflect when the sum of the importance scores of their latest experiences
|
||||
exceeds a threshold (in our implementation, 150). This ensures that agents
|
||||
reflect when they have had sufficient important experiences, rather than on
|
||||
a fixed schedule.\\n\\nTo generate reflections, we query the agent's memory
|
||||
for the 100 most recent records and ask the language model: \\\"Given only
|
||||
the information above, what are 3 most salient high-level questions we can
|
||||
answer about this person?\\\" We then ask the language model to answer each
|
||||
of these questions by retrieving relevant memories and synthesizing them into
|
||||
insights.\\n\\n**3.4 Planning and Reacting**\\n\\nGenerative agents create
|
||||
plans that guide their behavior. These plans are stored as natural language
|
||||
descriptions and are dynamically updated as situations change. Plans operate
|
||||
at different time horizons: broad strokes plans for the day (e.g., \\\"wake
|
||||
up, eat breakfast, go to work, eat lunch, work more, go home, eat dinner,
|
||||
watch TV, go to sleep\\\"), medium-term plans for specific activities (e.g.,
|
||||
\\\"eat breakfast: go to kitchen, prepare cereal, eat cereal, clean up\\\"),
|
||||
and moment-to-moment reactions to immediate events in their environment.\\n\\nTo
|
||||
create daily plans, agents begin each day by reflecting on their identity
|
||||
and broad goals, then creating a plan for the day. For example, John Lin might
|
||||
plan: \\\"Wake up at 7:00 am, shower, have breakfast, review research notes,
|
||||
meet with PhD students, have lunch, review more research notes, go home, have
|
||||
dinner with family, watch TV, go to sleep at 11:00 pm.\\\"\\n\\nAs agents
|
||||
execute their plans, they may encounter events that require them to react.
|
||||
When this happens, they update their current activity based on their assessment
|
||||
of the situation. For example, if John Lin encounters his neighbor while walking
|
||||
to work, he might decide to stop and chat, temporarily deviating from his
|
||||
planned route to work.\\n\\n**4 Evaluation**\\n\\nWe evaluate our generative
|
||||
agents through two main approaches: (1) controlled studies that measure individual
|
||||
aspects of agent behavior, and (2) an end-to-end evaluation in which we deploy
|
||||
agents in an environment and measure emergent individual and social behaviors.\\n\\n**4.1
|
||||
Controlled Studies**\\n\\nWe conducted three controlled studies to validate
|
||||
aspects of our approach:\\n\\n**Study 1: Interview Study**. We conducted interviews
|
||||
with five of our agents, asking them questions about themselves, their relationships,
|
||||
and their plans. We found that agents gave responses that were consistent
|
||||
with their established personalities and relationships. For example, when
|
||||
asked about his relationship with his wife, John Lin described their relationship
|
||||
in terms consistent with the interactions we had observed between them in
|
||||
the environment.\\n\\n**Study 2: Emergent Behavior Study**. We seeded one
|
||||
agent (Isabella Rodriguez) with the goal of organizing a Valentine's Day party
|
||||
and observed how this information propagated through the community of agents.
|
||||
Over the course of two days, we observed agents autonomously spreading invitations,
|
||||
making new acquaintances, asking each other out on dates, and coordinating
|
||||
to attend the party together.\\n\\n**Study 3: Ablation Study**. We conducted
|
||||
ablation studies in which we disabled each component of our architecture (memory,
|
||||
reflection, and planning) and measured the effect on agent believability.
|
||||
We found that each component contributed significantly to more believable
|
||||
agent behavior.\\n\\n**4.2 Human Evaluation**\\n\\nWe recruited human evaluators
|
||||
to watch replays of agent behavior and assess their believability. Evaluators
|
||||
watched agents in different conditions (with and without different components
|
||||
of our architecture) and rated the agents on dimensions including believability,
|
||||
consistency, and human-likeness. We found that agents with the full architecture
|
||||
were rated as significantly more believable than agents with components disabled.\\n\\n**5
|
||||
Discussion**\\n\\nOur approach demonstrates that large language models can
|
||||
be extended to create agents that exhibit believable human behavior over extended
|
||||
periods of time. The key insight is that believable behavior emerges from
|
||||
the interaction between memory, reflection, and planning\u2014agents that
|
||||
can remember past experiences, reflect on patterns in their behavior, and
|
||||
plan future actions exhibit much more coherent and believable behavior than
|
||||
agents that lack these capabilities.\\n\\n**5.1 Limitations**\\n\\nOur approach
|
||||
has several limitations. First, the behavior of generative agents is ultimately
|
||||
limited by the capabilities of the underlying language model. While current
|
||||
language models are quite sophisticated, they still make errors and exhibit
|
||||
biases that can affect agent behavior.\\n\\nSecond, our evaluation focuses
|
||||
primarily on short-term behavior (two days in our main evaluation). It remains
|
||||
an open question how well our approach would scale to longer time periods
|
||||
or more complex social structures.\\n\\nThird, our agents operate in a relatively
|
||||
simple environment. It is unclear how well our approach would generalize to
|
||||
more complex environments or tasks that require specialized knowledge or skills.\\n\\n**5.2
|
||||
Future Work**\\n\\nThere are several promising directions for future work.
|
||||
First, we could explore more sophisticated memory and retrieval mechanisms
|
||||
that better capture the complexity of human memory. Second, we could investigate
|
||||
how to enable agents to learn and adapt their behavior over longer time periods.
|
||||
Third, we could explore how to scale our approach to larger communities of
|
||||
agents or more complex environments.\\n\\n**6 Conclusion**\\n\\nWe have introduced
|
||||
generative agents, computational software agents that simulate believable
|
||||
human behavior through an architecture that combines memory, reflection, and
|
||||
planning. Our approach demonstrates that large language models can be extended
|
||||
to create agents that exhibit coherent behavior over time, form relationships
|
||||
with other agents, and coordinate complex social interactions.\\n\\nBy enabling
|
||||
believable simulations of human behavior, generative agents open up new possibilities
|
||||
for interactive applications, from sandbox games to social simulations to
|
||||
educational tools. Our work provides architectural and interaction design
|
||||
patterns that can serve as a foundation for future research and development
|
||||
in this area.\\n\\nThe code and data for this work will be made available
|
||||
to enable further research in this area.\\n\\n**References**\\n\\n[1] Gordon
|
||||
W Allport. Personality: A psychological interpretation. 1937.\\n\\n[2] Ruth
|
||||
Aylett and Sandy\"}],\"stop_reason\":\"max_tokens\",\"stop_sequence\":null,\"stop_details\":null,\"usage\":{\"input_tokens\":118,\"cache_creation_input_tokens\":0,\"cache_read_input_tokens\":0,\"cache_creation\":{\"ephemeral_5m_input_tokens\":0,\"ephemeral_1h_input_tokens\":0},\"output_tokens\":4096,\"service_tier\":\"standard\",\"inference_geo\":\"not_available\"}}"
|
||||
string: '{"model":"claude-sonnet-4-20250514","id":"msg_01QQ1BGjRzaj6vneE9LNtCoz","type":"message","role":"assistant","content":[{"type":"text","text":"I''ll
|
||||
read and analyze the PDF document file for you."},{"type":"tool_use","id":"toolu_01QU7Hu64D5PxA5UUu5LG7Ff","name":"read_file","input":{"file_name":"document"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":545,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":68,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Security-Policy:
|
||||
- CSP-FILTERED
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 07 May 2026 20:55:25 GMT
|
||||
- Fri, 23 Jan 2026 19:08:52 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
@@ -355,12 +84,6 @@ interactions:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-requests-limit:
|
||||
- '20000'
|
||||
anthropic-ratelimit-requests-remaining:
|
||||
- '19999'
|
||||
anthropic-ratelimit-requests-reset:
|
||||
- '2026-05-07T20:53:45Z'
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
@@ -371,16 +94,116 @@ interactions:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
traceresponse:
|
||||
- 00-f095e9c7565a1bcd82ed46e1b1b23dec-65cd307f1dc6144d-01
|
||||
vary:
|
||||
- Accept-Encoding
|
||||
x-envoy-upstream-service-time:
|
||||
- '100630'
|
||||
- '2123'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"\nCurrent Task:
|
||||
Describe the file(s) you see. Be brief, one sentence max.\n\nAvailable input
|
||||
files (use the name in quotes with read_file tool):\n - \"document\" (document,
|
||||
application/pdf)\n\nThis is the expected criteria for your final answer: A brief
|
||||
description of the file.\nyou MUST return the actual complete content as the
|
||||
final answer, not a summary.\n\nThis is VERY important to you, your job depends
|
||||
on it!"},{"role":"assistant","content":[{"type":"tool_use","id":"toolu_01QU7Hu64D5PxA5UUu5LG7Ff","name":"read_file","input":{"file_name":"document"}}]},{"role":"user","content":[{"type":"tool_result","tool_use_id":"toolu_01QU7Hu64D5PxA5UUu5LG7Ff","content":"[Binary
|
||||
file: document (application/pdf)]\nBase64: JVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}]},{"role":"user","content":"Analyze
|
||||
the tool result. If requirements are met, provide the Final Answer. Otherwise,
|
||||
call the next tool. Deliver only the answer without meta-commentary."}],"model":"claude-sonnet-4-20250514","stop_sequences":["\nObservation:"],"stream":false,"system":"You
|
||||
are File Analyst. Expert at analyzing various file types.\nYour personal goal
|
||||
is: Analyze and describe files accurately","tools":[{"name":"read_file","description":"Read
|
||||
content from an input file by name. Returns file content as text for text files,
|
||||
or base64 for binary files.","input_schema":{"properties":{"file_name":{"description":"The
|
||||
name of the input file to read","title":"File Name","type":"string"}},"required":["file_name"],"type":"object"}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- ACCEPT-ENCODING-XXX
|
||||
anthropic-version:
|
||||
- '2023-06-01'
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1960'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.anthropic.com
|
||||
x-api-key:
|
||||
- X-API-KEY-XXX
|
||||
x-stainless-arch:
|
||||
- X-STAINLESS-ARCH-XXX
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 0.71.1
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.12.10
|
||||
x-stainless-timeout:
|
||||
- NOT_GIVEN
|
||||
method: POST
|
||||
uri: https://api.anthropic.com/v1/messages
|
||||
response:
|
||||
body:
|
||||
string: '{"model":"claude-sonnet-4-20250514","id":"msg_01CjWBqSxyLeArjUhqUTbedN","type":"message","role":"assistant","content":[{"type":"text","text":"The
|
||||
document is a minimal PDF file with basic structure containing one empty page
|
||||
with standard letter dimensions (612x792 points).\n\nJVBERi0xLjQKMSAwIG9iaiA8PCAvVHlwZSAvQ2F0YWxvZyAvUGFnZXMgMiAwIFIgPj4gZW5kb2JqCjIgMCBvYmogPDwgL1R5cGUgL1BhZ2VzIC9LaWRzIFszIDAgUl0gL0NvdW50IDEgPj4gZW5kb2JqCjMgMCBvYmogPDwgL1R5cGUgL1BhZ2UgL1BhcmVudCAyIDAgUiAvTWVkaWFCb3ggWzAgMCA2MTIgNzkyXSA+PiBlbmRvYmoKeHJlZgowIDQKMDAwMDAwMDAwMCA2NTUzNSBmCjAwMDAwMDAwMDkgMDAwMDAgbgowMDAwMDAwMDU4IDAwMDAwIG4KMDAwMDAwMDExNSAwMDAwMCBuCnRyYWlsZXIgPDwgL1NpemUgNCAvUm9vdCAxIDAgUiA+PgpzdGFydHhyZWYKMTk2CiUlRU9GCg=="}],"stop_reason":"end_turn","stop_sequence":null,"usage":{"input_tokens":1035,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":400,"service_tier":"standard"}}'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Fri, 23 Jan 2026 19:08:58 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Robots-Tag:
|
||||
- none
|
||||
anthropic-organization-id:
|
||||
- ANTHROPIC-ORGANIZATION-ID-XXX
|
||||
anthropic-ratelimit-input-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-input-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-input-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-output-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-output-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-output-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
|
||||
anthropic-ratelimit-tokens-limit:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
|
||||
anthropic-ratelimit-tokens-remaining:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
|
||||
anthropic-ratelimit-tokens-reset:
|
||||
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
request-id:
|
||||
- REQUEST-ID-XXX
|
||||
strict-transport-security:
|
||||
- STS-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '5453'
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
|
||||
@@ -4,8 +4,9 @@ interactions:
|
||||
personal goal is: test goal"},{"role":"user","content":"\nCurrent Task: The
|
||||
final answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\n\nThis is the expected criteria for your final answer: The final answer\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_final_answer","description":"Get
|
||||
the final answer but don''t give it yet, just re-use this\ntool non-stop.","strict":true,"parameters":{"properties":{},"type":"object","additionalProperties":false,"required":[]}}}]}'
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nThis
|
||||
is VERY important to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"get_final_answer","description":"Get
|
||||
the final answer but don''t give it yet, just re-use this\ntool non-stop.","parameters":{"properties":{},"type":"object"}}}]}'
|
||||
headers:
|
||||
User-Agent:
|
||||
- X-USER-AGENT-XXX
|
||||
@@ -18,7 +19,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '715'
|
||||
- '716'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
@@ -32,7 +33,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 2.32.0
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
@@ -45,34 +46,34 @@ interactions:
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Dd0fdKJb6WSBc7P3rJxJVnCq2vvRD\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1778189741,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
string: "{\n \"id\": \"chatcmpl-D0tOle0pg0F6zmEmkzpoufrjhkjn5\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769105323,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n
|
||||
\ \"id\": \"call_csVoybNhrmr6ORevkL4wQfVy\",\n \"type\":
|
||||
\ \"id\": \"call_BM9xxRm0ADf91mYTDZ4kKExm\",\n \"type\":
|
||||
\"function\",\n \"function\": {\n \"name\": \"get_final_answer\",\n
|
||||
\ \"arguments\": \"{}\"\n }\n }\n ],\n
|
||||
\ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\":
|
||||
null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\":
|
||||
{\n \"prompt_tokens\": 127,\n \"completion_tokens\": 11,\n \"total_tokens\":
|
||||
138,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\":
|
||||
{\n \"prompt_tokens\": 140,\n \"completion_tokens\": 11,\n \"total_tokens\":
|
||||
151,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\":
|
||||
0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\":
|
||||
0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n
|
||||
\ \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_f7311f7f0a\"\n}\n"
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-Ray:
|
||||
- 9f835b12bbeaeb2a-SJC
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 07 May 2026 21:35:41 GMT
|
||||
- Thu, 22 Jan 2026 18:08:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- SET-COOKIE-XXX
|
||||
Strict-Transport-Security:
|
||||
- STS-XXX
|
||||
Transfer-Encoding:
|
||||
@@ -83,16 +84,18 @@ interactions:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '570'
|
||||
- '373'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
set-cookie:
|
||||
- SET-COOKIE-XXX
|
||||
x-envoy-upstream-service-time:
|
||||
- '651'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
@@ -117,7 +120,10 @@ interactions:
|
||||
personal goal is: test goal"},{"role":"user","content":"\nCurrent Task: The
|
||||
final answer is 42. But don''t give it yet, instead keep using the `get_final_answer`
|
||||
tool.\n\nThis is the expected criteria for your final answer: The final answer\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary."},{"role":"assistant","content":null,"tool_calls":[{"id":"call_csVoybNhrmr6ORevkL4wQfVy","type":"function","function":{"name":"get_final_answer","arguments":"{}"}}]},{"role":"tool","tool_call_id":"call_csVoybNhrmr6ORevkL4wQfVy","name":"get_final_answer","content":"42"},{"role":"assistant","content":"Now
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nThis
|
||||
is VERY important to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_BM9xxRm0ADf91mYTDZ4kKExm","type":"function","function":{"name":"get_final_answer","arguments":"{}"}}]},{"role":"tool","tool_call_id":"call_BM9xxRm0ADf91mYTDZ4kKExm","content":"42"},{"role":"user","content":"Analyze
|
||||
the tool result. If requirements are met, provide the Final Answer. Otherwise,
|
||||
call the next tool. Deliver only the answer without meta-commentary."},{"role":"assistant","content":"Now
|
||||
it''s time you MUST give your absolute best final answer. You''ll ignore all
|
||||
previous instructions, stop using any tools, and just return your absolute BEST
|
||||
Final answer."}],"model":"gpt-4.1-mini"}'
|
||||
@@ -133,7 +139,7 @@ interactions:
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '902'
|
||||
- '1118'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
@@ -149,7 +155,7 @@ interactions:
|
||||
x-stainless-os:
|
||||
- X-STAINLESS-OS-XXX
|
||||
x-stainless-package-version:
|
||||
- 2.32.0
|
||||
- 1.83.0
|
||||
x-stainless-read-timeout:
|
||||
- X-STAINLESS-READ-TIMEOUT-XXX
|
||||
x-stainless-retry-count:
|
||||
@@ -162,28 +168,26 @@ interactions:
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: "{\n \"id\": \"chatcmpl-Dd0fed0I3y5RVgM0YZTzjD6hdh8Tr\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1778189742,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
string: "{\n \"id\": \"chatcmpl-D0tOmVwqqvewf7s2CNMsKBksanbID\",\n \"object\":
|
||||
\"chat.completion\",\n \"created\": 1769105324,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n
|
||||
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||
\"assistant\",\n \"content\": \"42\",\n \"refusal\": null,\n
|
||||
\ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 142,\n \"completion_tokens\":
|
||||
1,\n \"total_tokens\": 143,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
\"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 190,\n \"completion_tokens\":
|
||||
1,\n \"total_tokens\": 191,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
|
||||
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||
\"default\",\n \"system_fingerprint\": \"fp_31316814ed\"\n}\n"
|
||||
\"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n"
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-Ray:
|
||||
- 9f835b1e3c58eb2a-SJC
|
||||
CF-RAY:
|
||||
- CF-RAY-XXX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Thu, 07 May 2026 21:35:42 GMT
|
||||
- Thu, 22 Jan 2026 18:08:44 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
@@ -196,14 +200,18 @@ interactions:
|
||||
- ACCESS-CONTROL-XXX
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- OPENAI-ORG-XXX
|
||||
openai-processing-ms:
|
||||
- '478'
|
||||
- '166'
|
||||
openai-project:
|
||||
- OPENAI-PROJECT-XXX
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '180'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-requests:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,196 +0,0 @@
|
||||
"""Regression tests for the provider-agnostic prompt-cache breakpoint flag."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai.llms.cache import (
|
||||
CACHE_BREAKPOINT_KEY,
|
||||
mark_cache_breakpoint,
|
||||
strip_cache_breakpoint,
|
||||
)
|
||||
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
|
||||
from crewai.llms.providers.openai.completion import OpenAICompletion
|
||||
|
||||
|
||||
class TestCacheMarkerHelpers:
|
||||
def test_mark_returns_new_dict(self) -> None:
|
||||
original = {"role": "user", "content": "hi"}
|
||||
marked = mark_cache_breakpoint(original)
|
||||
assert marked[CACHE_BREAKPOINT_KEY] is True
|
||||
# Marker must NOT bleed back into the caller's dict — callers may
|
||||
# pass literal dicts and reuse them across calls.
|
||||
assert CACHE_BREAKPOINT_KEY not in original
|
||||
|
||||
def test_strip_is_idempotent(self) -> None:
|
||||
msg = {"role": "user", "content": "hi", CACHE_BREAKPOINT_KEY: True}
|
||||
strip_cache_breakpoint(msg)
|
||||
assert CACHE_BREAKPOINT_KEY not in msg
|
||||
strip_cache_breakpoint(msg)
|
||||
assert CACHE_BREAKPOINT_KEY not in msg
|
||||
|
||||
|
||||
class TestBaseFormatDoesNotMutate:
|
||||
"""The strip-on-format pass must not erase markers from the caller's
|
||||
messages list — executors reuse a single list across many LLM calls,
|
||||
and mutating it would defeat caching on every iteration after the first.
|
||||
"""
|
||||
|
||||
def test_repeated_format_preserves_markers(self) -> None:
|
||||
llm = OpenAICompletion(model="gpt-4o-mini")
|
||||
messages = [
|
||||
mark_cache_breakpoint({"role": "system", "content": "stable system"}),
|
||||
mark_cache_breakpoint({"role": "user", "content": "stable user"}),
|
||||
]
|
||||
# First call: provider strips markers from the returned (copied) list
|
||||
first = llm._format_messages(messages)
|
||||
assert all(CACHE_BREAKPOINT_KEY not in m for m in first)
|
||||
# Original list must STILL carry the markers
|
||||
assert messages[0][CACHE_BREAKPOINT_KEY] is True
|
||||
assert messages[1][CACHE_BREAKPOINT_KEY] is True
|
||||
# Second call from the same list still sees the markers
|
||||
second = llm._format_messages(messages)
|
||||
assert all(CACHE_BREAKPOINT_KEY not in m for m in second)
|
||||
assert messages[0][CACHE_BREAKPOINT_KEY] is True
|
||||
assert messages[1][CACHE_BREAKPOINT_KEY] is True
|
||||
|
||||
|
||||
class TestAnthropicCacheStamping:
|
||||
def test_stamps_system_with_cache_control(self) -> None:
|
||||
llm = AnthropicCompletion(model="claude-sonnet-4-5")
|
||||
messages = [
|
||||
mark_cache_breakpoint({"role": "system", "content": "you are helpful"}),
|
||||
mark_cache_breakpoint({"role": "user", "content": "ping"}),
|
||||
]
|
||||
formatted, system = llm._format_messages_for_anthropic(messages)
|
||||
assert isinstance(system, list)
|
||||
assert system[0]["cache_control"] == {"type": "ephemeral"}
|
||||
assert system[0]["text"] == "you are helpful"
|
||||
# First user block carries cache_control too
|
||||
last_block = formatted[0]["content"][-1]
|
||||
assert last_block["cache_control"] == {"type": "ephemeral"}
|
||||
|
||||
def test_stamps_stable_user_not_tool_result(self) -> None:
|
||||
"""Within a ReAct loop, tool results are flattened into a trailing
|
||||
user message. We must NOT stamp that volatile trailing block — we
|
||||
must stamp the original stable user prompt instead.
|
||||
"""
|
||||
llm = AnthropicCompletion(model="claude-sonnet-4-5")
|
||||
messages = [
|
||||
mark_cache_breakpoint({"role": "system", "content": "you are helpful"}),
|
||||
mark_cache_breakpoint({"role": "user", "content": "stable task prompt"}),
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "",
|
||||
"tool_calls": [
|
||||
{
|
||||
"id": "tc_1",
|
||||
"function": {"name": "ping", "arguments": "{}"},
|
||||
}
|
||||
],
|
||||
},
|
||||
{"role": "tool", "tool_call_id": "tc_1", "content": "volatile tool result"},
|
||||
]
|
||||
formatted, _system = llm._format_messages_for_anthropic(messages)
|
||||
# Find the message that holds the stable prompt
|
||||
stable = next(
|
||||
fm
|
||||
for fm in formatted
|
||||
if fm["role"] == "user"
|
||||
and isinstance(fm["content"], list)
|
||||
and any(
|
||||
isinstance(b, dict)
|
||||
and b.get("type") == "text"
|
||||
and b.get("text") == "stable task prompt"
|
||||
for b in fm["content"]
|
||||
)
|
||||
)
|
||||
text_block = next(
|
||||
b for b in stable["content"] if isinstance(b, dict) and b.get("type") == "text"
|
||||
)
|
||||
assert text_block.get("cache_control") == {"type": "ephemeral"}
|
||||
# The tool_result-bearing user message must NOT be stamped
|
||||
tool_carrier = next(
|
||||
fm
|
||||
for fm in formatted
|
||||
if fm["role"] == "user"
|
||||
and isinstance(fm["content"], list)
|
||||
and any(
|
||||
isinstance(b, dict) and b.get("type") == "tool_result"
|
||||
for b in fm["content"]
|
||||
)
|
||||
)
|
||||
for block in tool_carrier["content"]:
|
||||
assert "cache_control" not in block
|
||||
|
||||
def test_assistant_marker_is_ignored(self) -> None:
|
||||
"""Markers on assistant messages have no stable stamp target after
|
||||
Anthropic's role coalescing, so they should be silently ignored
|
||||
rather than collected and then dropped on a mismatch.
|
||||
"""
|
||||
llm = AnthropicCompletion(model="claude-sonnet-4-5")
|
||||
messages = [
|
||||
mark_cache_breakpoint({"role": "system", "content": "you are helpful"}),
|
||||
mark_cache_breakpoint(
|
||||
{"role": "assistant", "content": "I will help you out."}
|
||||
),
|
||||
{"role": "user", "content": "ping"},
|
||||
]
|
||||
formatted, system = llm._format_messages_for_anthropic(messages)
|
||||
# System still cached
|
||||
assert isinstance(system, list)
|
||||
# No user message was marked → no user message should carry cache_control
|
||||
for fm in formatted:
|
||||
if fm.get("role") != "user":
|
||||
continue
|
||||
content = fm.get("content")
|
||||
if isinstance(content, list):
|
||||
for block in content:
|
||||
if isinstance(block, dict):
|
||||
assert "cache_control" not in block
|
||||
|
||||
def test_list_content_user_marker_matches(self) -> None:
|
||||
"""A pre-formatted user message with a single text block should still
|
||||
match against the post-format user message.
|
||||
"""
|
||||
llm = AnthropicCompletion(model="claude-sonnet-4-5")
|
||||
messages = [
|
||||
mark_cache_breakpoint(
|
||||
{
|
||||
"role": "user",
|
||||
"content": [{"type": "text", "text": "stable list prompt"}],
|
||||
}
|
||||
),
|
||||
]
|
||||
formatted, _system = llm._format_messages_for_anthropic(messages)
|
||||
user_msg = next(fm for fm in formatted if fm["role"] == "user")
|
||||
content = user_msg["content"]
|
||||
assert isinstance(content, list)
|
||||
text_block = next(b for b in content if isinstance(b, dict) and b.get("type") == "text")
|
||||
assert text_block.get("cache_control") == {"type": "ephemeral"}
|
||||
|
||||
def test_unmarked_messages_get_no_cache_control(self) -> None:
|
||||
llm = AnthropicCompletion(model="claude-sonnet-4-5")
|
||||
messages = [
|
||||
{"role": "system", "content": "no caching here"},
|
||||
{"role": "user", "content": "no caching here either"},
|
||||
]
|
||||
formatted, system = llm._format_messages_for_anthropic(messages)
|
||||
# No marker → system stays a plain string (no content-block conversion)
|
||||
assert isinstance(system, str)
|
||||
# No marker → no cache_control anywhere in formatted messages
|
||||
for fm in formatted:
|
||||
content = fm.get("content")
|
||||
if isinstance(content, list):
|
||||
for block in content:
|
||||
assert "cache_control" not in block
|
||||
|
||||
|
||||
class TestNonAnthropicStripsMarker:
|
||||
def test_openai_format_strips_marker_from_wire_payload(self) -> None:
|
||||
llm = OpenAICompletion(model="gpt-4o-mini")
|
||||
messages = [
|
||||
mark_cache_breakpoint({"role": "system", "content": "stable"}),
|
||||
mark_cache_breakpoint({"role": "user", "content": "hi"}),
|
||||
]
|
||||
formatted = llm._format_messages(messages)
|
||||
for m in formatted:
|
||||
assert CACHE_BREAKPOINT_KEY not in m
|
||||
@@ -5,9 +5,9 @@ from pathlib import Path
|
||||
import pytest
|
||||
|
||||
from crewai import Agent
|
||||
from crewai.agent.utils import append_skill_context
|
||||
from crewai.skills.loader import activate_skill, discover_skills, format_skill_context
|
||||
from crewai.skills.models import INSTRUCTIONS, METADATA
|
||||
from crewai.utilities.prompts import Prompts
|
||||
|
||||
|
||||
def _create_skill_dir(parent: Path, name: str, body: str = "Body.") -> Path:
|
||||
@@ -34,7 +34,7 @@ class TestSkillDiscoveryAndActivation:
|
||||
assert activated.instructions == "Use this skill."
|
||||
|
||||
context = format_skill_context(activated)
|
||||
assert '<skill name="my-skill">' in context
|
||||
assert "## Skill: my-skill" in context
|
||||
assert "Use this skill." in context
|
||||
|
||||
def test_filter_by_skill_names(self, tmp_path: Path) -> None:
|
||||
@@ -94,9 +94,7 @@ class TestSkillDiscoveryAndActivation:
|
||||
assert agent.skills[0].disclosure_level == METADATA
|
||||
assert agent.skills[0].instructions is None
|
||||
|
||||
result = Prompts(agent=agent, has_tools=False, use_system_prompt=True).task_execution()
|
||||
system = getattr(result, "system", "") or result.prompt
|
||||
assert '<skill name="travel">' in system
|
||||
assert "Skill travel" in system
|
||||
# METADATA-level skills must not leak full instructions into the prompt
|
||||
assert "Use this skill for travel planning." not in system
|
||||
prompt = append_skill_context(agent, "Plan a 10-day Japan itinerary.")
|
||||
assert "## Skill: travel" in prompt
|
||||
assert "Skill travel" in prompt
|
||||
assert "Use this skill for travel planning." not in prompt
|
||||
|
||||
@@ -105,7 +105,7 @@ class TestFormatSkillContext:
|
||||
frontmatter=fm, path=tmp_path, disclosure_level=METADATA
|
||||
)
|
||||
ctx = format_skill_context(skill)
|
||||
assert '<skill name="test-skill">' in ctx
|
||||
assert "## Skill: test-skill" in ctx
|
||||
assert "A skill" in ctx
|
||||
|
||||
def test_instructions_level(self, tmp_path: Path) -> None:
|
||||
@@ -117,7 +117,7 @@ class TestFormatSkillContext:
|
||||
instructions="Do these things.",
|
||||
)
|
||||
ctx = format_skill_context(skill)
|
||||
assert '<skill name="test-skill">' in ctx
|
||||
assert "## Skill: test-skill" in ctx
|
||||
assert "Do these things." in ctx
|
||||
|
||||
def test_no_instructions_at_instructions_level(self, tmp_path: Path) -> None:
|
||||
@@ -129,7 +129,7 @@ class TestFormatSkillContext:
|
||||
instructions=None,
|
||||
)
|
||||
ctx = format_skill_context(skill)
|
||||
assert ctx == '<skill name="test-skill">\nA skill\n</skill>'
|
||||
assert ctx == "## Skill: test-skill\nA skill"
|
||||
|
||||
def test_resources_level(self, tmp_path: Path) -> None:
|
||||
fm = SkillFrontmatter(name="test-skill", description="A skill")
|
||||
|
||||
@@ -256,11 +256,6 @@ def test_multiple_crews_in_flow_span_lifecycle():
|
||||
mock_llm_2.call.assert_called()
|
||||
|
||||
|
||||
@pytest.mark.skip(
|
||||
reason="Sync Agent.execute_task does not await AgentExecutor.invoke when invoke "
|
||||
"auto-returns a coroutine inside an async flow. Needs a fix in agent/core.py "
|
||||
"_execute_without_timeout (out of scope for this test cleanup pass)."
|
||||
)
|
||||
@pytest.mark.asyncio
|
||||
async def test_crew_execution_span_in_async_flow():
|
||||
"""Test that crew execution spans work in async flow methods.
|
||||
|
||||
@@ -2990,12 +2990,6 @@ def test_manager_agent_with_tools_raises_exception(researcher, writer):
|
||||
crew.kickoff()
|
||||
|
||||
|
||||
@pytest.mark.xfail(
|
||||
strict=True,
|
||||
reason="crew.train() relies on CrewAgentExecutor._format_feedback_message; "
|
||||
"AgentExecutor (the new default) does not implement training feedback yet. "
|
||||
"Remove this xfail once training is migrated to AgentExecutor.",
|
||||
)
|
||||
@pytest.mark.vcr()
|
||||
def test_crew_train_success(researcher, writer, monkeypatch):
|
||||
task = Task(
|
||||
|
||||
@@ -596,134 +596,6 @@ class TestHumanFeedbackLearn:
|
||||
# llm defaults to "gpt-4o-mini" at the function level
|
||||
assert config.llm == "gpt-4o-mini"
|
||||
|
||||
def test_pre_review_failure_logs_and_returns_raw_output(self, caplog):
|
||||
"""Pre-review LLM failure falls back to raw output AND logs a warning."""
|
||||
from crewai.memory.types import MemoryMatch, MemoryRecord
|
||||
|
||||
class LearnFlow(Flow):
|
||||
@start()
|
||||
@human_feedback(message="Review:", llm="gpt-4o-mini", learn=True)
|
||||
def produce(self):
|
||||
return "raw draft"
|
||||
|
||||
flow = LearnFlow()
|
||||
flow.memory = MagicMock()
|
||||
flow.memory.recall.return_value = [
|
||||
MemoryMatch(
|
||||
record=MemoryRecord(content="some lesson", embedding=[]),
|
||||
score=0.9,
|
||||
match_reasons=["semantic"],
|
||||
)
|
||||
]
|
||||
|
||||
captured: dict[str, Any] = {}
|
||||
|
||||
def capture_feedback(message, output, metadata=None, emit=None):
|
||||
captured["shown_to_human"] = output
|
||||
return "" # empty -> no distillation path
|
||||
|
||||
with (
|
||||
patch.object(flow, "_request_human_feedback", side_effect=capture_feedback),
|
||||
patch("crewai.llm.LLM") as MockLLM,
|
||||
caplog.at_level("WARNING", logger="crewai.flow.human_feedback"),
|
||||
):
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.supports_function_calling.return_value = True
|
||||
mock_llm.call.side_effect = RuntimeError("simulated pre-review failure")
|
||||
MockLLM.return_value = mock_llm
|
||||
|
||||
flow.produce()
|
||||
|
||||
assert captured["shown_to_human"] == "raw draft"
|
||||
assert any(
|
||||
"HITL pre-review failed" in rec.message
|
||||
and rec.levelname == "WARNING"
|
||||
and rec.exc_info is not None
|
||||
for rec in caplog.records
|
||||
)
|
||||
|
||||
def test_pre_review_failure_strict_reraises(self):
|
||||
"""When learn_strict=True, pre-review failures propagate instead of falling back."""
|
||||
from crewai.memory.types import MemoryMatch, MemoryRecord
|
||||
|
||||
class LearnFlow(Flow):
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="Review:",
|
||||
llm="gpt-4o-mini",
|
||||
learn=True,
|
||||
learn_strict=True,
|
||||
)
|
||||
def produce(self):
|
||||
return "raw draft"
|
||||
|
||||
flow = LearnFlow()
|
||||
flow.memory = MagicMock()
|
||||
flow.memory.recall.return_value = [
|
||||
MemoryMatch(
|
||||
record=MemoryRecord(content="some lesson", embedding=[]),
|
||||
score=0.9,
|
||||
match_reasons=["semantic"],
|
||||
)
|
||||
]
|
||||
|
||||
with (
|
||||
patch.object(flow, "_request_human_feedback", return_value=""),
|
||||
patch("crewai.llm.LLM") as MockLLM,
|
||||
):
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.supports_function_calling.return_value = True
|
||||
mock_llm.call.side_effect = RuntimeError("simulated pre-review failure")
|
||||
MockLLM.return_value = mock_llm
|
||||
|
||||
with pytest.raises(RuntimeError, match="simulated pre-review failure"):
|
||||
flow.produce()
|
||||
|
||||
def test_distillation_failure_logs_and_does_not_block_flow(self, caplog):
|
||||
"""Distillation LLM failure logs a warning but does not break the flow."""
|
||||
|
||||
class LearnFlow(Flow):
|
||||
@start()
|
||||
@human_feedback(message="Review:", llm="gpt-4o-mini", learn=True)
|
||||
def produce(self):
|
||||
return "raw draft"
|
||||
|
||||
flow = LearnFlow()
|
||||
flow.memory = MagicMock()
|
||||
flow.memory.recall.return_value = [] # no pre-review path
|
||||
|
||||
with (
|
||||
patch.object(
|
||||
flow, "_request_human_feedback", return_value="please add citations"
|
||||
),
|
||||
patch("crewai.llm.LLM") as MockLLM,
|
||||
caplog.at_level("WARNING", logger="crewai.flow.human_feedback"),
|
||||
):
|
||||
mock_llm = MagicMock()
|
||||
mock_llm.supports_function_calling.return_value = True
|
||||
mock_llm.call.side_effect = RuntimeError("simulated distill failure")
|
||||
MockLLM.return_value = mock_llm
|
||||
|
||||
flow.produce() # must not raise
|
||||
|
||||
flow.memory.remember_many.assert_not_called()
|
||||
assert any(
|
||||
"HITL lesson distillation failed" in rec.message
|
||||
and rec.levelname == "WARNING"
|
||||
for rec in caplog.records
|
||||
)
|
||||
|
||||
def test_learn_strict_config_propagates(self):
|
||||
"""learn_strict is captured on the decorator config."""
|
||||
|
||||
@human_feedback(message="Review:", learn=True, learn_strict=True)
|
||||
def test_method(self):
|
||||
return "output"
|
||||
|
||||
config = test_method.__human_feedback_config__
|
||||
assert config is not None
|
||||
assert config.learn_strict is True
|
||||
|
||||
|
||||
class TestHumanFeedbackFinalOutputPreservation:
|
||||
"""Tests for preserving method return value as flow's final output when @human_feedback with emit is terminal.
|
||||
|
||||
207
lib/crewai/tests/test_list_tools_crew.py
Normal file
207
lib/crewai/tests/test_list_tools_crew.py
Normal file
@@ -0,0 +1,207 @@
|
||||
"""Tests for Crew.list_tools()."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class _Args(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
def _tool(tool_name: str) -> BaseTool:
|
||||
class _T(BaseTool):
|
||||
name: str = tool_name
|
||||
description: str = "test tool"
|
||||
args_schema: type = _Args
|
||||
|
||||
def _run(self, **_: Any) -> str:
|
||||
return ""
|
||||
|
||||
return _T()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def writer():
|
||||
return Agent(role="writer", goal="g", backstory="b", tools=[_tool("search")])
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def editor():
|
||||
return Agent(role="editor", goal="g", backstory="b")
|
||||
|
||||
|
||||
def test_lists_user_defined_agent_tools(writer):
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(agents=[writer], tasks=[task])
|
||||
|
||||
assert crew.list_tools() == {"writer": ["search"]}
|
||||
|
||||
|
||||
def test_includes_task_level_tool_overrides(writer):
|
||||
extra = _tool("calculator")
|
||||
task = Task(description="d", expected_output="e", agent=writer, tools=[extra])
|
||||
crew = Crew(agents=[writer], tasks=[task])
|
||||
|
||||
assert crew.list_tools() == {"writer": ["search", "calculator"]}
|
||||
|
||||
|
||||
def test_dedupes_when_agent_and_task_share_a_tool(writer):
|
||||
duplicate = _tool("search")
|
||||
task = Task(description="d", expected_output="e", agent=writer, tools=[duplicate])
|
||||
crew = Crew(agents=[writer], tasks=[task])
|
||||
|
||||
assert crew.list_tools() == {"writer": ["search"]}
|
||||
|
||||
|
||||
def test_peer_delegation_adds_delegate_and_ask_tools(writer, editor):
|
||||
writer.allow_delegation = True
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(agents=[writer, editor], tasks=[task])
|
||||
|
||||
tools = crew.list_tools()
|
||||
assert "Delegate work to coworker" in tools["writer"]
|
||||
assert "Ask question to coworker" in tools["writer"]
|
||||
assert "Delegate work to coworker" not in tools["editor"]
|
||||
|
||||
|
||||
def test_peer_delegation_skipped_when_only_one_agent(writer):
|
||||
writer.allow_delegation = True
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(agents=[writer], tasks=[task])
|
||||
|
||||
assert "Delegate work to coworker" not in crew.list_tools()["writer"]
|
||||
|
||||
|
||||
def test_hierarchical_includes_default_manager(writer, editor):
|
||||
writer.allow_delegation = True
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(
|
||||
agents=[writer, editor],
|
||||
tasks=[task],
|
||||
process=Process.hierarchical,
|
||||
manager_llm="gpt-4o-mini",
|
||||
)
|
||||
|
||||
tools = crew.list_tools()
|
||||
assert "writer" in tools
|
||||
assert "Delegate work to coworker" not in tools["writer"]
|
||||
# Default manager role from i18n.
|
||||
manager_keys = [k for k in tools if k not in {"writer", "editor"}]
|
||||
assert len(manager_keys) == 1
|
||||
manager_role = manager_keys[0]
|
||||
assert tools[manager_role] == [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]
|
||||
|
||||
|
||||
def test_hierarchical_uses_user_provided_manager_role(writer, editor):
|
||||
manager = Agent(role="Chief", goal="g", backstory="b", allow_delegation=True)
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(
|
||||
agents=[writer, editor],
|
||||
tasks=[task],
|
||||
process=Process.hierarchical,
|
||||
manager_agent=manager,
|
||||
manager_llm="gpt-4o-mini",
|
||||
)
|
||||
|
||||
tools = crew.list_tools()
|
||||
assert "Chief" in tools
|
||||
assert tools["Chief"] == [
|
||||
"Delegate work to coworker",
|
||||
"Ask question to coworker",
|
||||
]
|
||||
|
||||
|
||||
def test_multimodal_added_when_llm_does_not_support_it(writer):
|
||||
writer.multimodal = True
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(agents=[writer], tasks=[task])
|
||||
|
||||
with patch.object(type(writer.llm), "supports_multimodal", return_value=False):
|
||||
tools = crew.list_tools()
|
||||
|
||||
assert "Add image to content" in tools["writer"]
|
||||
|
||||
|
||||
def test_multimodal_skipped_when_llm_supports_it(writer):
|
||||
writer.multimodal = True
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(agents=[writer], tasks=[task])
|
||||
|
||||
with patch.object(type(writer.llm), "supports_multimodal", return_value=True):
|
||||
tools = crew.list_tools()
|
||||
|
||||
assert "Add image to content" not in tools["writer"]
|
||||
|
||||
|
||||
def test_crew_level_memory_adds_search_and_save(writer):
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(agents=[writer], tasks=[task], memory=True)
|
||||
|
||||
tools = crew.list_tools()
|
||||
assert "Search memory" in tools["writer"]
|
||||
assert "Save to memory" in tools["writer"]
|
||||
|
||||
|
||||
def test_no_memory_means_no_memory_tools(writer):
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(agents=[writer], tasks=[task]) # memory defaults to False
|
||||
|
||||
tools = crew.list_tools()
|
||||
assert "Search memory" not in tools["writer"]
|
||||
assert "Save to memory" not in tools["writer"]
|
||||
|
||||
|
||||
def test_mcp_emits_placeholder_per_server():
|
||||
a = Agent(role="r", goal="g", backstory="b", mcps=["github", "slack"])
|
||||
task = Task(description="d", expected_output="e", agent=a)
|
||||
crew = Crew(agents=[a], tasks=[task])
|
||||
|
||||
assert crew.list_tools()["r"] == ["mcp:github:*", "mcp:slack:*"]
|
||||
|
||||
|
||||
def test_apps_emit_placeholder_with_action_split():
|
||||
a = Agent(
|
||||
role="r",
|
||||
goal="g",
|
||||
backstory="b",
|
||||
apps=["gmail", "slack#send_message"],
|
||||
)
|
||||
task = Task(description="d", expected_output="e", agent=a)
|
||||
crew = Crew(agents=[a], tasks=[task])
|
||||
|
||||
assert crew.list_tools()["r"] == ["app:gmail:*", "app:slack:send_message"]
|
||||
|
||||
|
||||
def test_file_reader_added_when_task_has_input_files(writer):
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(agents=[writer], tasks=[task])
|
||||
|
||||
sentinel_files = {"foo.txt": object()}
|
||||
with patch("crewai.crew.get_all_files", return_value=sentinel_files):
|
||||
tools = crew.list_tools()
|
||||
|
||||
assert "read_file" in tools["writer"]
|
||||
|
||||
|
||||
def test_file_reader_not_added_when_no_input_files(writer):
|
||||
task = Task(description="d", expected_output="e", agent=writer)
|
||||
crew = Crew(agents=[writer], tasks=[task])
|
||||
|
||||
with patch("crewai.crew.get_all_files", return_value={}):
|
||||
tools = crew.list_tools()
|
||||
|
||||
assert "read_file" not in tools["writer"]
|
||||
132
lib/crewai/tests/test_list_tools_flow.py
Normal file
132
lib/crewai/tests/test_list_tools_flow.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""Tests for Flow.list_tools()."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class _Args(BaseModel):
|
||||
pass
|
||||
|
||||
|
||||
def _tool(tool_name: str) -> BaseTool:
|
||||
class _T(BaseTool):
|
||||
name: str = tool_name
|
||||
description: str = "test"
|
||||
args_schema: type = _Args
|
||||
|
||||
def _run(self, **_: Any) -> str:
|
||||
return ""
|
||||
|
||||
return _T()
|
||||
|
||||
|
||||
def _crew(role: str = "writer", tool_name: str = "search") -> Crew:
|
||||
agent = Agent(role=role, goal="g", backstory="b", tools=[_tool(tool_name)])
|
||||
task = Task(description="d", expected_output="e", agent=agent)
|
||||
return Crew(agents=[agent], tasks=[task])
|
||||
|
||||
|
||||
def test_empty_flow_returns_empty_dict():
|
||||
class EmptyFlow(Flow):
|
||||
@start()
|
||||
def kickoff(self):
|
||||
return None
|
||||
|
||||
assert EmptyFlow().list_tools() == {}
|
||||
|
||||
|
||||
def test_crew_attribute_keyed_by_attribute_name():
|
||||
class SingleCrewFlow(Flow):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.poem_crew = _crew()
|
||||
|
||||
@start()
|
||||
def kickoff(self):
|
||||
return self.poem_crew.kickoff()
|
||||
|
||||
assert SingleCrewFlow().list_tools() == {"poem_crew": {"writer": ["search"]}}
|
||||
|
||||
|
||||
def test_list_of_crews_keyed_with_index_suffix():
|
||||
class ListFlow(Flow):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.research_crews = [_crew("a", "t1"), _crew("b", "t2")]
|
||||
|
||||
@start()
|
||||
def kickoff(self):
|
||||
return None
|
||||
|
||||
tools = ListFlow().list_tools()
|
||||
assert tools == {
|
||||
"research_crews[0]": {"a": ["t1"]},
|
||||
"research_crews[1]": {"b": ["t2"]},
|
||||
}
|
||||
|
||||
|
||||
def test_tuple_of_crews_supported():
|
||||
class TupleFlow(Flow):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.crews_tuple = (_crew("a", "t1"),)
|
||||
|
||||
@start()
|
||||
def kickoff(self):
|
||||
return None
|
||||
|
||||
assert TupleFlow().list_tools() == {"crews_tuple[0]": {"a": ["t1"]}}
|
||||
|
||||
|
||||
def test_underscore_prefixed_attributes_ignored():
|
||||
class HiddenFlow(Flow):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._private_crew = _crew("a", "t1")
|
||||
self.public_crew = _crew("b", "t2")
|
||||
|
||||
@start()
|
||||
def kickoff(self):
|
||||
return None
|
||||
|
||||
tools = HiddenFlow().list_tools()
|
||||
assert "_private_crew" not in tools
|
||||
assert tools == {"public_crew": {"b": ["t2"]}}
|
||||
|
||||
|
||||
def test_non_crew_attributes_skipped():
|
||||
class MixedFlow(Flow):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.label = "some-string"
|
||||
self.config = {"k": "v"}
|
||||
self.poem_crew = _crew()
|
||||
|
||||
@start()
|
||||
def kickoff(self):
|
||||
return None
|
||||
|
||||
assert MixedFlow().list_tools() == {"poem_crew": {"writer": ["search"]}}
|
||||
|
||||
|
||||
def test_list_with_non_crew_items_filtered():
|
||||
class PartialFlow(Flow):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.things = [_crew("a", "t1"), "not a crew", 42]
|
||||
|
||||
@start()
|
||||
def kickoff(self):
|
||||
return None
|
||||
|
||||
assert PartialFlow().list_tools() == {"things[0]": {"a": ["t1"]}}
|
||||
@@ -346,14 +346,12 @@ def test_agent_emits_execution_error_event(base_agent, base_task):
|
||||
received_events.append(event)
|
||||
event_received.set()
|
||||
|
||||
from crewai.experimental.agent_executor import AgentExecutor
|
||||
|
||||
error_message = "Error happening while sending prompt to model."
|
||||
base_agent.max_retry_limit = 0
|
||||
|
||||
# Patch at the class level since agent_executor is created lazily
|
||||
with patch.object(
|
||||
AgentExecutor, "invoke", side_effect=Exception(error_message)
|
||||
CrewAgentExecutor, "invoke", side_effect=Exception(error_message)
|
||||
):
|
||||
with pytest.raises(Exception): # noqa: B017
|
||||
base_agent.execute_task(
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""CrewAI development tools."""
|
||||
|
||||
__version__ = "1.14.5a5"
|
||||
__version__ = "1.14.5a2"
|
||||
|
||||
@@ -323,11 +323,8 @@ def update_pyproject_version(file_path: Path, new_version: str) -> bool:
|
||||
|
||||
_DEFAULT_WORKSPACE_PACKAGES: Final[list[str]] = [
|
||||
"crewai",
|
||||
"crewai-cli",
|
||||
"crewai-core",
|
||||
"crewai-devtools",
|
||||
"crewai-files",
|
||||
"crewai-tools",
|
||||
"crewai-devtools",
|
||||
]
|
||||
|
||||
|
||||
@@ -1354,14 +1351,6 @@ def _repin_crewai_install(run_value: str, version: str) -> str:
|
||||
|
||||
_DEPLOYMENT_TEST_REPO: Final[str] = "crewAIInc/crew_deployment_test"
|
||||
|
||||
_PUBLISHED_WORKSPACE_PACKAGES: Final[tuple[str, ...]] = (
|
||||
"crewai",
|
||||
"crewai-cli",
|
||||
"crewai-core",
|
||||
"crewai-files",
|
||||
"crewai-tools",
|
||||
)
|
||||
|
||||
_PYPI_POLL_INTERVAL: Final[int] = 15
|
||||
_PYPI_POLL_TIMEOUT: Final[int] = 600
|
||||
|
||||
@@ -1414,9 +1403,14 @@ def _update_deployment_test_repo(version: str, is_prerelease: bool) -> None:
|
||||
]
|
||||
|
||||
if pyproject_changed:
|
||||
lock_cmd = ["uv", "lock"]
|
||||
for pkg in _PUBLISHED_WORKSPACE_PACKAGES:
|
||||
lock_cmd.extend(["--refresh-package", pkg])
|
||||
lock_cmd = [
|
||||
"uv",
|
||||
"lock",
|
||||
"--refresh-package",
|
||||
"crewai",
|
||||
"--refresh-package",
|
||||
"crewai-tools",
|
||||
]
|
||||
if is_prerelease:
|
||||
lock_cmd.append("--prerelease=allow")
|
||||
|
||||
@@ -1621,9 +1615,16 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
|
||||
_wait_for_pypi("crewai", version)
|
||||
|
||||
console.print("\nSyncing workspace...")
|
||||
sync_cmd = ["uv", "sync"]
|
||||
for pkg in _PUBLISHED_WORKSPACE_PACKAGES:
|
||||
sync_cmd.extend(["--refresh-package", pkg])
|
||||
sync_cmd = [
|
||||
"uv",
|
||||
"sync",
|
||||
"--refresh-package",
|
||||
"crewai",
|
||||
"--refresh-package",
|
||||
"crewai-tools",
|
||||
"--refresh-package",
|
||||
"crewai-files",
|
||||
]
|
||||
if is_prerelease:
|
||||
sync_cmd.append("--prerelease=allow")
|
||||
|
||||
|
||||
@@ -4,10 +4,8 @@ from pathlib import Path
|
||||
from textwrap import dedent
|
||||
|
||||
from crewai_devtools.cli import (
|
||||
_DEFAULT_WORKSPACE_PACKAGES,
|
||||
_pin_crewai_deps,
|
||||
_repin_crewai_install,
|
||||
update_pyproject_dependencies,
|
||||
update_pyproject_version,
|
||||
update_template_dependencies,
|
||||
)
|
||||
@@ -228,79 +226,6 @@ class TestRepinCrewaiInstall:
|
||||
assert _repin_crewai_install(cmd, "2.0.0") == cmd
|
||||
|
||||
|
||||
# --- update_pyproject_dependencies ---
|
||||
|
||||
|
||||
class TestUpdatePyprojectDependencies:
|
||||
def test_default_packages_cover_all_workspace_members(self) -> None:
|
||||
"""Every workspace member must be in the default rewrite list.
|
||||
|
||||
Without this, a version bump silently leaves stale pins behind for any
|
||||
workspace package missing from the list (see incident with 1.14.5a5).
|
||||
"""
|
||||
import tomlkit
|
||||
|
||||
workspace_root = Path(__file__).resolve().parents[3]
|
||||
root_pyproject = (workspace_root / "pyproject.toml").read_text()
|
||||
|
||||
members = tomlkit.parse(root_pyproject)["tool"]["uv"]["workspace"]["members"]
|
||||
expected = {
|
||||
tomlkit.parse((workspace_root / m / "pyproject.toml").read_text())[
|
||||
"project"
|
||||
]["name"]
|
||||
for m in members
|
||||
}
|
||||
|
||||
assert expected.issubset(set(_DEFAULT_WORKSPACE_PACKAGES))
|
||||
|
||||
def test_rewrites_all_workspace_pins(self, tmp_path: Path) -> None:
|
||||
pyproject = tmp_path / "pyproject.toml"
|
||||
pyproject.write_text(
|
||||
dedent("""\
|
||||
[project]
|
||||
dependencies = [
|
||||
"crewai-core==1.0.0",
|
||||
"crewai-cli==1.0.0",
|
||||
"requests>=2.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.0.0",
|
||||
]
|
||||
files = [
|
||||
"crewai-files==1.0.0",
|
||||
]
|
||||
""")
|
||||
)
|
||||
|
||||
assert update_pyproject_dependencies(pyproject, "2.0.0") is True
|
||||
result = pyproject.read_text()
|
||||
assert '"crewai-core==2.0.0"' in result
|
||||
assert '"crewai-cli==2.0.0"' in result
|
||||
assert '"crewai-tools==2.0.0"' in result
|
||||
assert '"crewai-files==2.0.0"' in result
|
||||
assert '"requests>=2.0"' in result
|
||||
|
||||
def test_leaves_bare_crewai_pin_alone(self, tmp_path: Path) -> None:
|
||||
"""`crewai==` must not collide with `crewai-core==` etc."""
|
||||
pyproject = tmp_path / "pyproject.toml"
|
||||
pyproject.write_text(
|
||||
dedent("""\
|
||||
[project]
|
||||
dependencies = [
|
||||
"crewai==1.0.0",
|
||||
"crewai-core==1.0.0",
|
||||
]
|
||||
""")
|
||||
)
|
||||
|
||||
update_pyproject_dependencies(pyproject, "2.0.0")
|
||||
result = pyproject.read_text()
|
||||
assert '"crewai==2.0.0"' in result
|
||||
assert '"crewai-core==2.0.0"' in result
|
||||
|
||||
|
||||
# --- update_template_dependencies ---
|
||||
|
||||
|
||||
|
||||
@@ -170,21 +170,21 @@ info = "Commits must follow Conventional Commits 1.0.0."
|
||||
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
# Pinned to include the security patch releases (authlib 1.6.11,
|
||||
# langchain-text-splitters 1.1.2) uploaded on 2026-04-16, and the
|
||||
# litellm 1.83.7+ SSTI fix (GHSA-xqmj-j6mv-4862) uploaded on 2026-04-13.
|
||||
exclude-newer = "2026-04-27"
|
||||
|
||||
# composio-core pins rich<14 but textual requires rich>=14.
|
||||
# onnxruntime 1.24+ dropped Python 3.10 wheels; cap it so qdrant[fastembed] resolves on 3.10.
|
||||
# fastembed 0.7.x and docling 2.63 cap pillow<12; the removed APIs don't affect them.
|
||||
# langchain-core <1.2.31 has GHSA-926x-3r5x-gfhw and is required by langchain-text-splitters 1.1.2+.
|
||||
# langchain-core 1.0.0-1.3.2 has GHSA-pjwx-r37v-7724 (unsafe deserialization via broad load() allowlists); force 1.3.3+.
|
||||
# langchain-text-splitters <1.1.2 has GHSA-fv5p-p927-qmxr (SSRF bypass in split_text_from_url).
|
||||
# transformers 4.57.6 has CVE-2026-1839; force 5.4+ (docling 2.84 allows huggingface-hub>=1).
|
||||
# cryptography 46.0.6 has CVE-2026-39892; force 46.0.7+.
|
||||
# pypdf <6.10.2 has GHSA-4pxv-j86v-mhcw, GHSA-7gw9-cf7v-778f, GHSA-x284-j5p8-9c5p; force 6.10.2+.
|
||||
# uv <0.11.6 has GHSA-pjjw-68hj-v9mw; force 0.11.6+.
|
||||
# python-multipart <0.0.27 has GHSA-pp6c-gr5w-3c5g (DoS via unbounded multipart headers).
|
||||
# gitpython <3.1.50 has GHSA-mv93-w799-cj2w (config_writer newline injection bypassing the 3.1.49 patch -> RCE via core.hooksPath).
|
||||
# urllib3 <2.7.0 has GHSA-qccp-gfcp-xxvc (ProxyManager cross-origin redirect leaks Authorization/Cookie) and GHSA-mf9v-mfxr-j63j (streaming decompression-bomb bypass); force 2.7.0+.
|
||||
# python-multipart <0.0.26 has GHSA-mj87-hwqh-73pj; force 0.0.26+.
|
||||
# langsmith <0.7.31 has GHSA-rr7j-v2q5-chgv (streaming token redaction bypass); force 0.7.31+.
|
||||
# authlib <1.6.11 has GHSA-jj8c-mmj3-mmgv (CSRF bypass in cache-based state storage).
|
||||
# litellm 1.83.8+ hard-pins openai==2.24.0, missing openai.types.responses used by crewai;
|
||||
@@ -194,15 +194,14 @@ override-dependencies = [
|
||||
"rich>=13.7.1",
|
||||
"onnxruntime<1.24; python_version < '3.11'",
|
||||
"pillow>=12.1.1",
|
||||
"langchain-core>=1.3.3,<2",
|
||||
"langchain-core>=1.2.31,<2",
|
||||
"langchain-text-splitters>=1.1.2,<2",
|
||||
"urllib3>=2.7.0",
|
||||
"urllib3>=2.6.3",
|
||||
"transformers>=5.4.0; python_version >= '3.10'",
|
||||
"cryptography>=46.0.7",
|
||||
"pypdf>=6.10.2,<7",
|
||||
"uv>=0.11.6,<1",
|
||||
"python-multipart>=0.0.27,<1",
|
||||
"gitpython>=3.1.50,<4",
|
||||
"python-multipart>=0.0.26,<1",
|
||||
"langsmith>=0.7.31,<0.8",
|
||||
"authlib>=1.6.11",
|
||||
]
|
||||
|
||||
63
uv.lock
generated
63
uv.lock
generated
@@ -13,8 +13,7 @@ resolution-markers = [
|
||||
]
|
||||
|
||||
[options]
|
||||
exclude-newer = "2026-05-08T16:33:02.834109Z"
|
||||
exclude-newer-span = "P3D"
|
||||
exclude-newer = "2026-04-27T16:00:00Z"
|
||||
|
||||
[manifest]
|
||||
members = [
|
||||
@@ -28,18 +27,17 @@ members = [
|
||||
overrides = [
|
||||
{ name = "authlib", specifier = ">=1.6.11" },
|
||||
{ name = "cryptography", specifier = ">=46.0.7" },
|
||||
{ name = "gitpython", specifier = ">=3.1.50,<4" },
|
||||
{ name = "langchain-core", specifier = ">=1.3.3,<2" },
|
||||
{ name = "langchain-core", specifier = ">=1.2.31,<2" },
|
||||
{ name = "langchain-text-splitters", specifier = ">=1.1.2,<2" },
|
||||
{ name = "langsmith", specifier = ">=0.7.31,<0.8" },
|
||||
{ name = "onnxruntime", marker = "python_full_version < '3.11'", specifier = "<1.24" },
|
||||
{ name = "openai", specifier = ">=2.30.0,<3" },
|
||||
{ name = "pillow", specifier = ">=12.1.1" },
|
||||
{ name = "pypdf", specifier = ">=6.10.2,<7" },
|
||||
{ name = "python-multipart", specifier = ">=0.0.27,<1" },
|
||||
{ name = "python-multipart", specifier = ">=0.0.26,<1" },
|
||||
{ name = "rich", specifier = ">=13.7.1" },
|
||||
{ name = "transformers", marker = "python_full_version >= '3.10'", specifier = ">=5.4.0" },
|
||||
{ name = "urllib3", specifier = ">=2.7.0" },
|
||||
{ name = "urllib3", specifier = ">=2.6.3" },
|
||||
{ name = "uv", specifier = ">=0.11.6,<1" },
|
||||
]
|
||||
|
||||
@@ -1306,6 +1304,7 @@ dependencies = [
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "pyyaml" },
|
||||
{ name = "regex" },
|
||||
{ name = "textual" },
|
||||
{ name = "tokenizers" },
|
||||
{ name = "tomli" },
|
||||
{ name = "tomli-w" },
|
||||
@@ -1404,7 +1403,7 @@ requires-dist = [
|
||||
{ name = "lancedb", specifier = ">=0.29.2,<0.30.1" },
|
||||
{ name = "litellm", marker = "extra == 'litellm'", specifier = ">=1.83.7,<1.84" },
|
||||
{ name = "mcp", specifier = "~=1.26.0" },
|
||||
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = ">=2.0.0,<3" },
|
||||
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = "~=0.1.94" },
|
||||
{ name = "openai", specifier = ">=2.30.0,<3" },
|
||||
{ name = "openpyxl", specifier = "~=3.1.5" },
|
||||
{ name = "openpyxl", marker = "extra == 'openpyxl'", specifier = "~=3.1.5" },
|
||||
@@ -1422,6 +1421,7 @@ requires-dist = [
|
||||
{ name = "qdrant-client", extras = ["fastembed"], marker = "extra == 'qdrant'", specifier = "~=1.14.3" },
|
||||
{ name = "qdrant-edge-py", marker = "extra == 'qdrant-edge'", specifier = ">=0.6.0" },
|
||||
{ name = "regex", specifier = "~=2026.1.15" },
|
||||
{ name = "textual", specifier = ">=7.5.0" },
|
||||
{ name = "tiktoken", marker = "extra == 'embeddings'", specifier = ">=0.8.0,<0.13" },
|
||||
{ name = "tokenizers", specifier = ">=0.21,<1" },
|
||||
{ name = "tomli", specifier = "~=2.0.2" },
|
||||
@@ -1435,7 +1435,6 @@ name = "crewai-cli"
|
||||
source = { editable = "lib/cli" }
|
||||
dependencies = [
|
||||
{ name = "appdirs" },
|
||||
{ name = "certifi" },
|
||||
{ name = "click" },
|
||||
{ name = "crewai-core" },
|
||||
{ name = "cryptography" },
|
||||
@@ -1446,7 +1445,6 @@ dependencies = [
|
||||
{ name = "pyjwt" },
|
||||
{ name = "python-dotenv" },
|
||||
{ name = "rich" },
|
||||
{ name = "textual" },
|
||||
{ name = "tomli" },
|
||||
{ name = "tomli-w" },
|
||||
{ name = "uv" },
|
||||
@@ -1455,7 +1453,6 @@ dependencies = [
|
||||
[package.metadata]
|
||||
requires-dist = [
|
||||
{ name = "appdirs", specifier = "~=1.4.4" },
|
||||
{ name = "certifi" },
|
||||
{ name = "click", specifier = "~=8.1.7" },
|
||||
{ name = "crewai-core", editable = "lib/crewai-core" },
|
||||
{ name = "cryptography", specifier = ">=42.0" },
|
||||
@@ -1466,7 +1463,6 @@ requires-dist = [
|
||||
{ name = "pyjwt", specifier = ">=2.9.0,<3" },
|
||||
{ name = "python-dotenv", specifier = ">=1.2.2,<2" },
|
||||
{ name = "rich", specifier = ">=13.7.1" },
|
||||
{ name = "textual", specifier = ">=7.5.0" },
|
||||
{ name = "tomli", specifier = "~=2.0.2" },
|
||||
{ name = "tomli-w", specifier = "~=1.1.0" },
|
||||
{ name = "uv", specifier = "~=0.11.6" },
|
||||
@@ -1707,7 +1703,7 @@ requires-dist = [
|
||||
{ name = "e2b-code-interpreter", marker = "extra == 'e2b'", specifier = "~=2.6.0" },
|
||||
{ name = "exa-py", marker = "extra == 'exa-py'", specifier = ">=1.8.7" },
|
||||
{ name = "firecrawl-py", marker = "extra == 'firecrawl-py'", specifier = ">=1.8.0" },
|
||||
{ name = "gitpython", marker = "extra == 'github'", specifier = ">=3.1.50,<4" },
|
||||
{ name = "gitpython", marker = "extra == 'github'", specifier = ">=3.1.47,<4" },
|
||||
{ name = "hyperbrowser", marker = "extra == 'hyperbrowser'", specifier = ">=0.18.0" },
|
||||
{ name = "langchain-apify", marker = "extra == 'apify'", specifier = ">=0.1.2,<1.0.0" },
|
||||
{ name = "linkup-sdk", marker = "extra == 'linkup-sdk'", specifier = ">=0.2.2" },
|
||||
@@ -2700,14 +2696,14 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "gitpython"
|
||||
version = "3.1.50"
|
||||
version = "3.1.47"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "gitdb" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/33/f6/354ae6491228b5eb40e10d89c4d13c651fe1cf7556e35ebdded50cff57ce/gitpython-3.1.50.tar.gz", hash = "sha256:80da2d12504d52e1f998772dc5baf6e553f8d2fcfe1fcc226c9d9a2ee3372dcc", size = 219798, upload-time = "2026-05-06T04:01:26.571Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c1/bd/50db468e9b1310529a19fce651b3b0e753b5c07954d486cba31bbee9a5d5/gitpython-3.1.47.tar.gz", hash = "sha256:dba27f922bd2b42cb54c87a8ab3cb6beb6bf07f3d564e21ac848913a05a8a3cd", size = 216978, upload-time = "2026-04-22T02:44:44.059Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/20/7a/1c6e3562dfd8950adbb11ffbc65d21e7c89d01a6e4f137fa981056de25c5/gitpython-3.1.50-py3-none-any.whl", hash = "sha256:d352abe2908d07355014abdd21ddf798c2a961469239afec4962e9da884858f9", size = 212507, upload-time = "2026-05-06T04:01:23.799Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/c5/a1bc0996af85757903cf2bf444a7824e68e0035ce63fb41d6f76f9def68b/gitpython-3.1.47-py3-none-any.whl", hash = "sha256:489f590edfd6d20571b2c0e72c6a6ac6915ee8b8cd04572330e3842207a78905", size = 209547, upload-time = "2026-04-22T02:44:41.271Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3835,11 +3831,10 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "langchain-core"
|
||||
version = "1.3.3"
|
||||
version = "1.3.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "jsonpatch" },
|
||||
{ name = "langchain-protocol" },
|
||||
{ name = "langsmith" },
|
||||
{ name = "packaging" },
|
||||
{ name = "pydantic" },
|
||||
@@ -3848,21 +3843,9 @@ dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
{ name = "uuid-utils" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/d3/ae/8b74458fc3850ec3d150eb9f45e857db129dafa801fb5cf173dfc9f8bbf3/langchain_core-1.3.3.tar.gz", hash = "sha256:fa510a5db8efdc0c6ff41c0939fb5c00a0183c11f6b84233e892e3227ff69182", size = 915041, upload-time = "2026-05-05T19:02:36.612Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/92/fe/20190232d9b513242899dbb0c2bb77e31b4d61e343743adbe90ebc2603d2/langchain_core-1.3.0.tar.gz", hash = "sha256:14a39f528bf459aa3aa40d0a7f7f1bae7520d435ef991ae14a4ceb74d8c49046", size = 860755, upload-time = "2026-04-17T14:51:38.298Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/01/4771b7ab2af1d1aba5b710bd8f13d9225c609425214b357590a17b01be77/langchain_core-1.3.3-py3-none-any.whl", hash = "sha256:18aae8506f37da7f74398492279a7d6efcee4f8e23c4c41c7af080eeb7ef7bd1", size = 543857, upload-time = "2026-05-05T19:02:34.52Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "langchain-protocol"
|
||||
version = "0.0.15"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "typing-extensions" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/4f/24/9777489d6fbbee64af0c8f96d4f840239c408cf694f3394672807dafc490/langchain_protocol-0.0.15.tar.gz", hash = "sha256:9ab2d11ee73944754f10e037e717098d3a6796f0e58afa9cadda6154e7655ade", size = 5862, upload-time = "2026-05-01T22:30:04.748Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1d/7a/9c97a7b9cbe4c5dc6a44cdb1545450c28f0c8ce89b9c1f0ee7fbad896263/langchain_protocol-0.0.15-py3-none-any.whl", hash = "sha256:461eb794358f83d5e42635a5797799ffec7b4702314e34edf73ac21e75d3ef79", size = 6982, upload-time = "2026-05-01T22:30:03.877Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f8/e2/dbfa347aa072a6dc4cd38d6f9ebfc730b4c14c258c47f480f4c5c546f177/langchain_core-1.3.0-py3-none-any.whl", hash = "sha256:baf16ee028475df177b9ab8869a751c79406d64a6f12125b93802991b566cced", size = 515140, upload-time = "2026-04-17T14:51:36.274Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4431,7 +4414,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "mem0ai"
|
||||
version = "2.0.1"
|
||||
version = "0.1.116"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "openai" },
|
||||
@@ -4442,9 +4425,9 @@ dependencies = [
|
||||
{ name = "qdrant-client" },
|
||||
{ name = "sqlalchemy" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/ef/03/3dc535b98310912e4f10083acdbbca2c5e2dfccb3921230a460464f9f4d0/mem0ai-2.0.1.tar.gz", hash = "sha256:070dbc3f1f332c8908379b42a81ab3a96ab169f2f9fa537e6ac719df02478f9c", size = 211820, upload-time = "2026-04-25T17:39:06.744Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/60/a0/10482cc437e96d609d5fbbb65ad8eae144fc84f0cb2655d913bfb58d7dff/mem0ai-0.1.116.tar.gz", hash = "sha256:c33e08c5464f96b1cf109893dba5d394d8cc5788a8400d85cb1ceed696ee3204", size = 122053, upload-time = "2025-08-13T20:19:41.119Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/a0/96/e6153262f1464f4d412208732fea31496d9983ade155dd2c5c5492f8f8a4/mem0ai-2.0.1-py3-none-any.whl", hash = "sha256:63da5f50ad0c2514e27c2f380ef03f2ceea47c97873096ddfd997785b58043ec", size = 299461, upload-time = "2026-04-25T17:39:04.143Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4b/70/810bd12d76576402e7c447ffb683f40fdab8cf49eaae6df3db4af48b358f/mem0ai-0.1.116-py3-none-any.whl", hash = "sha256:245b08f1e615e057ebacc52462ab729a7282abe05e8d4957236d893b3d32a990", size = 190315, upload-time = "2025-08-13T20:19:39.649Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7394,11 +7377,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "python-multipart"
|
||||
version = "0.0.27"
|
||||
version = "0.0.26"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/69/9b/f23807317a113dc36e74e75eb265a02dd1a4d9082abc3c1064acd22997c4/python_multipart-0.0.27.tar.gz", hash = "sha256:9870a6a8c5a20a5bf4f07c017bd1489006ff8836cff097b6933355ee2b49b602", size = 44043, upload-time = "2026-04-27T10:51:26.649Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/88/71/b145a380824a960ebd60e1014256dbb7d2253f2316ff2d73dfd8928ec2c3/python_multipart-0.0.26.tar.gz", hash = "sha256:08fadc45918cd615e26846437f50c5d6d23304da32c341f289a617127b081f17", size = 43501, upload-time = "2026-04-10T14:09:59.473Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/99/78/4126abcbdbd3c559d43e0db7f7b9173fc6befe45d39a2856cc0b8ec2a5a6/python_multipart-0.0.27-py3-none-any.whl", hash = "sha256:6fccfad17a27334bd0193681b369f476eda3409f17381a2d65aa7df3f7275645", size = 29254, upload-time = "2026-04-27T10:51:24.997Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/9a/22/f1925cdda983ab66fc8ec6ec8014b959262747e58bdca26a4e3d1da29d56/python_multipart-0.0.26-py3-none-any.whl", hash = "sha256:c0b169f8c4484c13b0dcf2ef0ec3a4adb255c4b7d18d8e420477d2b1dd03f185", size = 28847, upload-time = "2026-04-10T14:09:58.131Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -9405,11 +9388,11 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "2.7.0"
|
||||
version = "2.6.3"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/53/0c/06f8b233b8fd13b9e5ee11424ef85419ba0d8ba0b3138bf360be2ff56953/urllib3-2.7.0.tar.gz", hash = "sha256:231e0ec3b63ceb14667c67be60f2f2c40a518cb38b03af60abc813da26505f4c", size = 433602, upload-time = "2026-05-07T16:13:18.596Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/7f/3e/5db95bcf282c52709639744ca2a8b149baccf648e39c8cc87553df9eae0c/urllib3-2.7.0-py3-none-any.whl", hash = "sha256:9fb4c81ebbb1ce9531cce37674bbc6f1360472bc18ca9a553ede278ef7276897", size = 131087, upload-time = "2026-05-07T16:13:17.151Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
Reference in New Issue
Block a user