mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-10 21:12:37 +00:00
Compare commits
18 Commits
fix/oss-9-
...
1.14.0a2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
baf15a409b | ||
|
|
c907ce473b | ||
|
|
e46402d10d | ||
|
|
bce10f5978 | ||
|
|
d2e57e375b | ||
|
|
d039a075aa | ||
|
|
ce99312db1 | ||
|
|
c571620f8c | ||
|
|
931f3556cf | ||
|
|
914776b7ed | ||
|
|
6ef6fada4d | ||
|
|
1b7be63b60 | ||
|
|
59aa5b2243 | ||
|
|
2e2fae02d2 | ||
|
|
804c26bd01 | ||
|
|
4e46913045 | ||
|
|
335130cb15 | ||
|
|
186ea77c63 |
105
.github/workflows/vulnerability-scan.yml
vendored
Normal file
105
.github/workflows/vulnerability-scan.yml
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
name: Vulnerability Scan
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
schedule:
|
||||
# Run weekly on Monday at 9:00 UTC
|
||||
- cron: '0 9 * * 1'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pip-audit:
|
||||
name: pip-audit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py3.11-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: "3.11"
|
||||
enable-cache: false
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras --no-install-project
|
||||
|
||||
- name: Install pip-audit
|
||||
run: uv pip install pip-audit
|
||||
|
||||
- name: Run pip-audit
|
||||
run: |
|
||||
uv run pip-audit --desc --aliases --skip-editable --format json --output pip-audit-report.json \
|
||||
--ignore-vuln CVE-2025-69872 \
|
||||
--ignore-vuln CVE-2026-25645 \
|
||||
--ignore-vuln CVE-2026-27448 \
|
||||
--ignore-vuln CVE-2026-27459 \
|
||||
--ignore-vuln PYSEC-2023-235
|
||||
# Ignored CVEs:
|
||||
# CVE-2025-69872 - diskcache 5.6.3: no fix available (latest version)
|
||||
# CVE-2026-25645 - requests 2.32.5: fix requires 2.33.0, blocked by crewai-tools ~=2.32.5 pin
|
||||
# CVE-2026-27448 - pyopenssl 25.3.0: fix requires 26.0.0, blocked by snowflake-connector-python <26.0.0 pin
|
||||
# CVE-2026-27459 - pyopenssl 25.3.0: same as above
|
||||
# PYSEC-2023-235 - couchbase: fixed in 4.6.0 (already upgraded), advisory not yet updated
|
||||
continue-on-error: true
|
||||
|
||||
- name: Display results
|
||||
if: always()
|
||||
run: |
|
||||
if [ -f pip-audit-report.json ]; then
|
||||
echo "## pip-audit Results" >> $GITHUB_STEP_SUMMARY
|
||||
echo '```json' >> $GITHUB_STEP_SUMMARY
|
||||
cat pip-audit-report.json | python3 -m json.tool >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
# Fail if vulnerabilities found
|
||||
python3 -c "
|
||||
import json, sys
|
||||
with open('pip-audit-report.json') as f:
|
||||
data = json.load(f)
|
||||
vulns = [d for d in data.get('dependencies', []) if d.get('vulns')]
|
||||
if vulns:
|
||||
print(f'::error::Found vulnerabilities in {len(vulns)} package(s)')
|
||||
for v in vulns:
|
||||
for vuln in v['vulns']:
|
||||
print(f' - {v[\"name\"]}=={v[\"version\"]}: {vuln[\"id\"]}')
|
||||
sys.exit(1)
|
||||
print('No known vulnerabilities found')
|
||||
"
|
||||
else
|
||||
echo "::error::pip-audit failed to produce a report. Check the pip-audit step logs."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload pip-audit report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: pip-audit-report
|
||||
path: pip-audit-report.json
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
|
||||
@@ -4,6 +4,83 @@ description: "تحديثات المنتج والتحسينات وإصلاحات
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="6 أبريل 2026">
|
||||
## v1.14.0a2
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0a2)
|
||||
|
||||
# ملاحظات الإصدار 1.14.0a2
|
||||
|
||||
## التعليمات:
|
||||
- ترجم جميع عناوين الأقسام والوصف بشكل طبيعي
|
||||
- احتفظ بتنسيق markdown (##، ###، -، إلخ) كما هو
|
||||
- احتفظ بجميع الأسماء الصحيحة، ومعرفات الشيفرة، وأسماء الفئات، والمصطلحات التقنية دون تغيير
|
||||
(مثل "CrewAI"، "LiteAgent"، "ChromaDB"، "MCP"، "@username")
|
||||
- احتفظ بقسم ## المساهمون وأسماء مستخدمي GitHub كما هي
|
||||
- لا تضف أو تزيل أي محتوى، فقط ترجم
|
||||
|
||||
## المميزات الجديدة
|
||||
- تمت إضافة دعم لـ "ChromaDB" لتحسين أداء قاعدة البيانات.
|
||||
- تحسينات على "LiteAgent" لزيادة الكفاءة.
|
||||
|
||||
## الإصلاحات
|
||||
- إصلاح مشكلة تتعلق بـ "MCP" التي كانت تؤدي إلى تعطل التطبيق.
|
||||
- معالجة الأخطاء المتعلقة بواجهة المستخدم في "CrewAI".
|
||||
|
||||
## المساهمون
|
||||
- @username1
|
||||
- @username2
|
||||
- @username3
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2 أبريل 2026">
|
||||
## v1.13.0
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الميزات
|
||||
- إضافة نموذج RuntimeState RootModel لتوحيد تسلسل الحالة
|
||||
- تعزيز مستمع الأحداث مع نطاقات جديدة للقياس عن أحداث المهارة والذاكرة
|
||||
- إضافة امتداد A2UI مع دعم v0.8/v0.9، والمخططات، والوثائق
|
||||
- إصدار بيانات استخدام الرموز في حدث LLMCallCompletedEvent
|
||||
- تحديث تلقائي لمستودع اختبار النشر أثناء الإصدار
|
||||
- تحسين مرونة الإصدار المؤسسي وتجربة المستخدم
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إضافة بيانات اعتماد مستودع الأدوات إلى تثبيت crewai
|
||||
- إضافة بيانات اعتماد مستودع الأدوات إلى بناء uv في نشر الأدوات
|
||||
- تمرير بيانات التعريف عبر الإعدادات بدلاً من معلمات الأدوات
|
||||
- معالجة نماذج GPT-5.x التي لا تدعم معلمة API `stop`
|
||||
- إضافة GPT-5 وسلسلة o إلى بادئات الرؤية متعددة الوسائط
|
||||
- مسح ذاكرة التخزين المؤقت uv للحزم التي تم نشرها حديثًا في الإصدار المؤسسي
|
||||
- تحديد lancedb أقل من 0.30.1 لضمان التوافق مع Windows
|
||||
- إصلاح مستويات أذونات RBAC لتتناسب مع خيارات واجهة المستخدم الفعلية
|
||||
- إصلاح عدم الدقة في قدرات الوكيل عبر جميع اللغات
|
||||
|
||||
### الوثائق
|
||||
- إضافة فيديو توضيحي لمهارات وكيل البرمجة إلى صفحات البدء
|
||||
- إضافة دليل شامل لتكوين SSO
|
||||
- إضافة مصفوفة شاملة لأذونات RBAC ودليل النشر
|
||||
- تحديث سجل التغييرات والإصدار إلى v1.13.0
|
||||
|
||||
### الأداء
|
||||
- تقليل الحمل الزائد للإطار باستخدام حافلة الأحداث الكسولة، وتخطي التتبع عند تعطيله
|
||||
|
||||
### إعادة الهيكلة
|
||||
- تحويل Flow إلى Pydantic BaseModel
|
||||
- تحويل فئات LLM إلى Pydantic BaseModel
|
||||
- استبدال InstanceOf[T] بتعليقات نوع عادية
|
||||
- إزالة دليل LLM الخاص بالطرف الثالث غير المستخدم
|
||||
|
||||
## المساهمون
|
||||
|
||||
@alex-clawd, @dependabot[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide, @thiagomoretto
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2 أبريل 2026">
|
||||
## v1.13.0a7
|
||||
|
||||
|
||||
132
docs/ar/enterprise/guides/training-crews.mdx
Normal file
132
docs/ar/enterprise/guides/training-crews.mdx
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: "تدريب الطواقم"
|
||||
description: "قم بتدريب طواقمك المنشورة مباشرة من منصة CrewAI AMP لتحسين أداء الوكلاء بمرور الوقت"
|
||||
icon: "dumbbell"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
يتيح لك التدريب تحسين أداء الطاقم من خلال تشغيل جلسات تدريب تكرارية مباشرة من علامة تبويب **Training** في CrewAI AMP. تستخدم المنصة **وضع التدريب التلقائي** — حيث تتولى العملية التكرارية تلقائياً، على عكس تدريب CLI الذي يتطلب ملاحظات بشرية تفاعلية لكل تكرار.
|
||||
|
||||
بعد اكتمال التدريب، يقوم CrewAI بتقييم مخرجات الوكلاء ودمج الملاحظات في اقتراحات قابلة للتنفيذ لكل وكيل. يتم بعد ذلك تطبيق هذه الاقتراحات على تشغيلات الطاقم المستقبلية لتحسين جودة المخرجات.
|
||||
|
||||
<Tip>
|
||||
للحصول على تفاصيل حول كيفية عمل تدريب CrewAI، راجع صفحة [مفاهيم التدريب](/ar/concepts/training).
|
||||
</Tip>
|
||||
|
||||
## المتطلبات الأساسية
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="نشر نشط" icon="rocket">
|
||||
تحتاج إلى حساب CrewAI AMP مع نشر نشط في حالة **Ready** (نوع Crew).
|
||||
</Card>
|
||||
<Card title="صلاحية التشغيل" icon="key">
|
||||
يجب أن يكون لحسابك صلاحية تشغيل للنشر الذي تريد تدريبه.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## كيفية تدريب طاقم
|
||||
|
||||
<Steps>
|
||||
<Step title="افتح علامة تبويب Training">
|
||||
انتقل إلى **Deployments**، انقر على نشرك، ثم اختر علامة تبويب **Training**.
|
||||
</Step>
|
||||
|
||||
<Step title="أدخل اسم التدريب">
|
||||
قدم **Training Name** — سيصبح هذا اسم ملف `.pkl` المستخدم لتخزين نتائج التدريب. على سبيل المثال، "Expert Mode Training" ينتج `expert_mode_training.pkl`.
|
||||
</Step>
|
||||
|
||||
<Step title="املأ مدخلات الطاقم">
|
||||
أدخل حقول إدخال الطاقم. هذه هي نفس المدخلات التي ستقدمها للتشغيل العادي — يتم تحميلها ديناميكياً بناءً على تكوين طاقمك.
|
||||
</Step>
|
||||
|
||||
<Step title="ابدأ التدريب">
|
||||
انقر على **Train Crew**. يتغير الزر إلى "Training..." مع مؤشر دوران أثناء تشغيل العملية.
|
||||
|
||||
خلف الكواليس:
|
||||
- يتم إنشاء سجل تدريب للنشر الخاص بك
|
||||
- تستدعي المنصة نقطة نهاية التدريب التلقائي للنشر
|
||||
- يقوم الطاقم بتشغيل تكراراته تلقائياً — لا حاجة لملاحظات يدوية
|
||||
</Step>
|
||||
|
||||
<Step title="راقب التقدم">
|
||||
تعرض لوحة **Current Training Status**:
|
||||
- **Status** — الحالة الحالية لجلسة التدريب
|
||||
- **Nº Iterations** — عدد تكرارات التدريب المُهيأة
|
||||
- **Filename** — ملف `.pkl` الذي يتم إنشاؤه
|
||||
- **Started At** — وقت بدء التدريب
|
||||
- **Training Inputs** — المدخلات التي قدمتها
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## فهم نتائج التدريب
|
||||
|
||||
بمجرد اكتمال التدريب، سترى بطاقات نتائج لكل وكيل تحتوي على المعلومات التالية:
|
||||
|
||||
- **Agent Role** — اسم/دور الوكيل في طاقمك
|
||||
- **Final Quality** — درجة من 0 إلى 10 تقيّم جودة مخرجات الوكيل
|
||||
- **Final Summary** — ملخص لأداء الوكيل أثناء التدريب
|
||||
- **Suggestions** — توصيات قابلة للتنفيذ لتحسين سلوك الوكيل
|
||||
|
||||
### تحرير الاقتراحات
|
||||
|
||||
يمكنك تحسين الاقتراحات لأي وكيل:
|
||||
|
||||
<Steps>
|
||||
<Step title="انقر على Edit">
|
||||
في بطاقة نتائج أي وكيل، انقر على زر **Edit** بجوار الاقتراحات.
|
||||
</Step>
|
||||
|
||||
<Step title="عدّل الاقتراحات">
|
||||
حدّث نص الاقتراحات ليعكس التحسينات التي تريدها بشكل أفضل.
|
||||
</Step>
|
||||
|
||||
<Step title="احفظ التغييرات">
|
||||
انقر على **Save**. تتم مزامنة الاقتراحات المُعدّلة مع النشر وتُستخدم في جميع التشغيلات المستقبلية.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## استخدام بيانات التدريب
|
||||
|
||||
لتطبيق نتائج التدريب على طاقمك:
|
||||
|
||||
1. لاحظ **Training Filename** (ملف `.pkl`) من جلسة التدريب المكتملة.
|
||||
2. حدد اسم الملف هذا في تكوين kickoff أو التشغيل الخاص بنشرك.
|
||||
3. يقوم الطاقم تلقائياً بتحميل ملف التدريب وتطبيق الاقتراحات المخزنة على كل وكيل.
|
||||
|
||||
هذا يعني أن الوكلاء يستفيدون من الملاحظات المُنشأة أثناء التدريب في كل تشغيل لاحق.
|
||||
|
||||
## التدريبات السابقة
|
||||
|
||||
يعرض الجزء السفلي من علامة تبويب Training **سجل جميع جلسات التدريب السابقة** للنشر. استخدم هذا لمراجعة التدريبات السابقة، ومقارنة النتائج، أو اختيار ملف تدريب مختلف للاستخدام.
|
||||
|
||||
## معالجة الأخطاء
|
||||
|
||||
إذا فشل تشغيل التدريب، تعرض لوحة الحالة حالة خطأ مع رسالة تصف ما حدث خطأ.
|
||||
|
||||
الأسباب الشائعة لفشل التدريب:
|
||||
- **لم يتم تحديث وقت تشغيل النشر** — تأكد من أن نشرك يعمل بأحدث إصدار
|
||||
- **أخطاء تنفيذ الطاقم** — مشاكل في منطق مهام الطاقم أو تكوين الوكيل
|
||||
- **مشاكل الشبكة** — مشاكل الاتصال بين المنصة والنشر
|
||||
|
||||
## القيود
|
||||
|
||||
<Info>
|
||||
ضع هذه القيود في الاعتبار عند التخطيط لسير عمل التدريب الخاص بك:
|
||||
- **تدريب نشط واحد في كل مرة** لكل نشر — انتظر حتى ينتهي التشغيل الحالي قبل بدء آخر
|
||||
- **وضع التدريب التلقائي فقط** — لا تدعم المنصة الملاحظات التفاعلية لكل تكرار مثل CLI
|
||||
- **بيانات التدريب خاصة بالنشر** — ترتبط نتائج التدريب بمثيل وإصدار النشر المحدد
|
||||
</Info>
|
||||
|
||||
## الموارد ذات الصلة
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="مفاهيم التدريب" icon="book" href="/ar/concepts/training">
|
||||
تعلم كيف يعمل تدريب CrewAI.
|
||||
</Card>
|
||||
<Card title="تشغيل الطاقم" icon="play" href="/ar/enterprise/guides/kickoff-crew">
|
||||
قم بتشغيل طاقمك المنشور من منصة AMP.
|
||||
</Card>
|
||||
<Card title="النشر على AMP" icon="cloud-arrow-up" href="/ar/enterprise/guides/deploy-to-amp">
|
||||
انشر طاقمك واجعله جاهزاً للتدريب.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -5,6 +5,14 @@ icon: wrench
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### شاهد: بناء Agents و Flows في CrewAI باستخدام Coding Agent Skills
|
||||
|
||||
قم بتثبيت مهارات وكيل البرمجة الخاصة بنا (Claude Code، Codex، ...) لتشغيل وكلاء البرمجة بسرعة مع CrewAI.
|
||||
|
||||
يمكنك تثبيتها باستخدام `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## فيديو تعليمي
|
||||
|
||||
شاهد هذا الفيديو التعليمي لعرض تفصيلي لعملية التثبيت:
|
||||
|
||||
@@ -16,6 +16,14 @@ mode: "wide"
|
||||
|
||||
مع أكثر من 100,000 مطور معتمد عبر دوراتنا المجتمعية، يُعد CrewAI المعيار لأتمتة الذكاء الاصطناعي الجاهزة للمؤسسات.
|
||||
|
||||
### شاهد: بناء Agents و Flows في CrewAI باستخدام Coding Agent Skills
|
||||
|
||||
قم بتثبيت مهارات وكيل البرمجة الخاصة بنا (Claude Code، Codex، ...) لتشغيل وكلاء البرمجة بسرعة مع CrewAI.
|
||||
|
||||
يمكنك تثبيتها باستخدام `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## بنية CrewAI المعمارية
|
||||
|
||||
صُممت بنية CrewAI لتحقيق التوازن بين الاستقلالية والتحكم.
|
||||
|
||||
@@ -5,6 +5,14 @@ icon: rocket
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### شاهد: بناء Agents و Flows في CrewAI باستخدام Coding Agent Skills
|
||||
|
||||
قم بتثبيت مهارات وكيل البرمجة الخاصة بنا (Claude Code، Codex، ...) لتشغيل وكلاء البرمجة بسرعة مع CrewAI.
|
||||
|
||||
يمكنك تثبيتها باستخدام `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## ابنِ أول وكيل CrewAI
|
||||
|
||||
لننشئ طاقماً بسيطاً يساعدنا في `البحث` و`إعداد التقارير` عن `أحدث تطورات الذكاء الاصطناعي` لموضوع أو مجال معين.
|
||||
|
||||
3199
docs/docs.json
3199
docs/docs.json
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,62 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="Apr 06, 2026">
|
||||
## v1.14.0a2
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0a2)
|
||||
|
||||
Release 1.14.0a2
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Apr 02, 2026">
|
||||
## v1.13.0
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Add RuntimeState RootModel for unified state serialization
|
||||
- Enhance event listener with new telemetry spans for skill and memory events
|
||||
- Add A2UI extension with v0.8/v0.9 support, schemas, and docs
|
||||
- Emit token usage data in LLMCallCompletedEvent
|
||||
- Auto-update deployment test repo during release
|
||||
- Improve enterprise release resilience and UX
|
||||
|
||||
### Bug Fixes
|
||||
- Add tool repository credentials to crewai install
|
||||
- Add tool repository credentials to uv build in tool publish
|
||||
- Pass fingerprint metadata via config instead of tool args
|
||||
- Handle GPT-5.x models not supporting the `stop` API parameter
|
||||
- Add GPT-5 and o-series to multimodal vision prefixes
|
||||
- Bust uv cache for freshly published packages in enterprise release
|
||||
- Cap lancedb below 0.30.1 for Windows compatibility
|
||||
- Fix RBAC permission levels to match actual UI options
|
||||
- Fix inaccuracies in agent-capabilities across all languages
|
||||
|
||||
### Documentation
|
||||
- Add coding agent skills demo video to getting started pages
|
||||
- Add comprehensive SSO configuration guide
|
||||
- Add comprehensive RBAC permissions matrix and deployment guide
|
||||
- Update changelog and version for v1.13.0
|
||||
|
||||
### Performance
|
||||
- Reduce framework overhead with lazy event bus, skip tracing when disabled
|
||||
|
||||
### Refactoring
|
||||
- Convert Flow to Pydantic BaseModel
|
||||
- Convert LLM classes to Pydantic BaseModel
|
||||
- Replace InstanceOf[T] with plain type annotations
|
||||
- Remove unused third_party LLM directory
|
||||
|
||||
## Contributors
|
||||
|
||||
@alex-clawd, @dependabot[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide, @thiagomoretto
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Apr 02, 2026">
|
||||
## v1.13.0a7
|
||||
|
||||
|
||||
132
docs/en/enterprise/guides/training-crews.mdx
Normal file
132
docs/en/enterprise/guides/training-crews.mdx
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: "Training Crews"
|
||||
description: "Train your deployed crews directly from the CrewAI AMP platform to improve agent performance over time"
|
||||
icon: "dumbbell"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
Training lets you improve crew performance by running iterative training sessions directly from the **Training** tab in CrewAI AMP. The platform uses **auto-train mode** — it handles the iterative process automatically, unlike CLI training which requires interactive human feedback per iteration.
|
||||
|
||||
After training completes, CrewAI evaluates agent outputs and consolidates feedback into actionable suggestions for each agent. These suggestions are then applied to future crew runs to improve output quality.
|
||||
|
||||
<Tip>
|
||||
For details on how CrewAI training works under the hood, see the [Training Concepts](/en/concepts/training) page.
|
||||
</Tip>
|
||||
|
||||
## Prerequisites
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Active deployment" icon="rocket">
|
||||
You need a CrewAI AMP account with an active deployment in **Ready** status (Crew type).
|
||||
</Card>
|
||||
<Card title="Run permission" icon="key">
|
||||
Your account must have run permission for the deployment you want to train.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## How to train a crew
|
||||
|
||||
<Steps>
|
||||
<Step title="Open the Training tab">
|
||||
Navigate to **Deployments**, click your deployment, then select the **Training** tab.
|
||||
</Step>
|
||||
|
||||
<Step title="Enter a training name">
|
||||
Provide a **Training Name** — this becomes the `.pkl` filename used to store training results. For example, "Expert Mode Training" produces `expert_mode_training.pkl`.
|
||||
</Step>
|
||||
|
||||
<Step title="Fill in the crew inputs">
|
||||
Enter the crew's input fields. These are the same inputs you'd provide for a normal kickoff — they're dynamically loaded based on your crew's configuration.
|
||||
</Step>
|
||||
|
||||
<Step title="Start training">
|
||||
Click **Train Crew**. The button changes to "Training..." with a spinner while the process runs.
|
||||
|
||||
Behind the scenes:
|
||||
- A training record is created for your deployment
|
||||
- The platform calls the deployment's auto-train endpoint
|
||||
- The crew runs its iterations automatically — no manual feedback required
|
||||
</Step>
|
||||
|
||||
<Step title="Monitor progress">
|
||||
The **Current Training Status** panel displays:
|
||||
- **Status** — Current state of the training run
|
||||
- **Nº Iterations** — Number of training iterations configured
|
||||
- **Filename** — The `.pkl` file being generated
|
||||
- **Started At** — When training began
|
||||
- **Training Inputs** — The inputs you provided
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Understanding training results
|
||||
|
||||
Once training completes, you'll see per-agent result cards with the following information:
|
||||
|
||||
- **Agent Role** — The name/role of the agent in your crew
|
||||
- **Final Quality** — A score from 0 to 10 evaluating the agent's output quality
|
||||
- **Final Summary** — A summary of the agent's performance during training
|
||||
- **Suggestions** — Actionable recommendations for improving the agent's behavior
|
||||
|
||||
### Editing suggestions
|
||||
|
||||
You can refine the suggestions for any agent:
|
||||
|
||||
<Steps>
|
||||
<Step title="Click Edit">
|
||||
On any agent's result card, click the **Edit** button next to the suggestions.
|
||||
</Step>
|
||||
|
||||
<Step title="Modify suggestions">
|
||||
Update the suggestions text to better reflect the improvements you want.
|
||||
</Step>
|
||||
|
||||
<Step title="Save changes">
|
||||
Click **Save**. The edited suggestions sync back to the deployment and are used in all future runs.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Using trained data
|
||||
|
||||
To apply training results to your crew:
|
||||
|
||||
1. Note the **Training Filename** (the `.pkl` file) from your completed training session.
|
||||
2. Specify this filename in your deployment's kickoff or run configuration.
|
||||
3. The crew automatically loads the training file and applies the stored suggestions to each agent.
|
||||
|
||||
This means agents benefit from the feedback generated during training on every subsequent run.
|
||||
|
||||
## Previous trainings
|
||||
|
||||
The bottom of the Training tab displays a **history of all past training sessions** for the deployment. Use this to review previous training runs, compare results, or select a different training file to use.
|
||||
|
||||
## Error handling
|
||||
|
||||
If a training run fails, the status panel shows an error state along with a message describing what went wrong.
|
||||
|
||||
Common causes of training failures:
|
||||
- **Deployment runtime not updated** — Ensure your deployment is running the latest version
|
||||
- **Crew execution errors** — Issues within the crew's task logic or agent configuration
|
||||
- **Network issues** — Connectivity problems between the platform and the deployment
|
||||
|
||||
## Limitations
|
||||
|
||||
<Info>
|
||||
Keep these constraints in mind when planning your training workflow:
|
||||
- **One active training at a time** per deployment — wait for the current run to finish before starting another
|
||||
- **Auto-train mode only** — the platform does not support interactive per-iteration feedback like the CLI does
|
||||
- **Training data is deployment-specific** — training results are tied to the specific deployment instance and version
|
||||
</Info>
|
||||
|
||||
## Related resources
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Training Concepts" icon="book" href="/en/concepts/training">
|
||||
Learn how CrewAI training works under the hood.
|
||||
</Card>
|
||||
<Card title="Kickoff Crew" icon="play" href="/en/enterprise/guides/kickoff-crew">
|
||||
Run your deployed crew from the AMP platform.
|
||||
</Card>
|
||||
<Card title="Deploy to AMP" icon="cloud-arrow-up" href="/en/enterprise/guides/deploy-to-amp">
|
||||
Get your crew deployed and ready for training.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -5,6 +5,14 @@ icon: wrench
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### Watch: Building CrewAI Agents & Flows with Coding Agent Skills
|
||||
|
||||
Install our coding agent skills (Claude Code, Codex, ...) to quickly get your coding agents up and running with CrewAI.
|
||||
|
||||
You can install it with `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## Video Tutorial
|
||||
|
||||
Watch this video tutorial for a step-by-step demonstration of the installation process:
|
||||
@@ -163,6 +171,9 @@ We recommend using the `YAML` template scaffolding for a structured approach to
|
||||
```shell
|
||||
uv add <package-name>
|
||||
```
|
||||
<Note>
|
||||
As a supply-chain security measure, CrewAI's internal packages use `exclude-newer = "3 days"` in their `pyproject.toml` files. This means transitive dependencies pulled in by CrewAI won't resolve packages released less than 3 days ago. Your own direct dependencies are not affected by this policy. If you notice a transitive dependency is behind, you can pin the version you want explicitly in your project's dependencies.
|
||||
</Note>
|
||||
- To run your crew, execute the following command in the root of your project:
|
||||
```bash
|
||||
crewai run
|
||||
|
||||
@@ -16,6 +16,14 @@ It empowers developers to build production-ready multi-agent systems by combinin
|
||||
|
||||
With over 100,000 developers certified through our community courses, CrewAI is the standard for enterprise-ready AI automation.
|
||||
|
||||
### Watch: Building CrewAI Agents & Flows with Coding Agent Skills
|
||||
|
||||
Install our coding agent skills (Claude Code, Codex, ...) to quickly get your coding agents up and running with CrewAI.
|
||||
|
||||
You can install it with `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## The CrewAI Architecture
|
||||
|
||||
CrewAI's architecture is designed to balance autonomy with control.
|
||||
|
||||
@@ -5,6 +5,14 @@ icon: rocket
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### Watch: Building CrewAI Agents & Flows with Coding Agent Skills
|
||||
|
||||
Install our coding agent skills (Claude Code, Codex, ...) to quickly get your coding agents up and running with CrewAI.
|
||||
|
||||
You can install it with `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## Build your first CrewAI Agent
|
||||
|
||||
Let's create a simple crew that will help us `research` and `report` on the `latest AI developments` for a given topic or subject.
|
||||
|
||||
@@ -4,6 +4,70 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="2026년 4월 6일">
|
||||
## v1.14.0a2
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0a2)
|
||||
|
||||
## 릴리스 1.14.0a2
|
||||
|
||||
### 지침:
|
||||
- 모든 섹션 제목과 설명을 자연스럽게 번역합니다.
|
||||
- 마크다운 형식을 그대로 유지합니다 (##, ###, -, 등).
|
||||
- 모든 고유 명사, 코드 식별자, 클래스 이름 및 기술 용어는 변경하지 않습니다.
|
||||
(예: "CrewAI", "LiteAgent", "ChromaDB", "MCP", "@username")
|
||||
- ## 기여자 섹션과 GitHub 사용자 이름은 변경하지 않습니다.
|
||||
- 내용을 추가하거나 제거하지 않고 오직 번역만 합니다.
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 4월 2일">
|
||||
## v1.13.0
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- 통합 상태 직렬화를 위한 RuntimeState RootModel 추가
|
||||
- 기술 및 메모리 이벤트에 대한 새로운 텔레메트리 스팬으로 이벤트 리스너 강화
|
||||
- v0.8/v0.9 지원, 스키마 및 문서가 포함된 A2UI 확장 추가
|
||||
- LLMCallCompletedEvent에서 토큰 사용 데이터 방출
|
||||
- 릴리스 중 배포 테스트 리포 자동 업데이트
|
||||
- 기업 릴리스의 복원력 및 사용자 경험 개선
|
||||
|
||||
### 버그 수정
|
||||
- crewai 설치에 도구 리포지토리 자격 증명 추가
|
||||
- 도구 게시의 uv 빌드에 도구 리포지토리 자격 증명 추가
|
||||
- 도구 인수 대신 구성으로 지문 메타데이터 전달
|
||||
- `stop` API 매개변수를 지원하지 않는 GPT-5.x 모델 처리
|
||||
- 멀티모달 비전 접두사에 GPT-5 및 o-series 추가
|
||||
- 기업 릴리스에서 새로 게시된 패키지에 대한 uv 캐시 무효화
|
||||
- Windows 호환성을 위해 lancedb를 0.30.1 이하로 제한
|
||||
- 실제 UI 옵션과 일치하도록 RBAC 권한 수준 수정
|
||||
- 모든 언어에서 에이전트 기능의 부정확성 수정
|
||||
|
||||
### 문서
|
||||
- 시작하기 페이지에 코딩 에이전트 기술 데모 비디오 추가
|
||||
- 포괄적인 SSO 구성 가이드 추가
|
||||
- 포괄적인 RBAC 권한 매트릭스 및 배포 가이드 추가
|
||||
- v1.13.0에 대한 변경 로그 및 버전 업데이트
|
||||
|
||||
### 성능
|
||||
- 비활성화 시 추적 건너뛰기와 함께 지연 이벤트 버스를 사용하여 프레임워크 오버헤드 감소
|
||||
|
||||
### 리팩토링
|
||||
- Flow를 Pydantic BaseModel로 변환
|
||||
- LLM 클래스를 Pydantic BaseModel로 변환
|
||||
- InstanceOf[T]를 일반 타입 주석으로 교체
|
||||
- 사용되지 않는 third_party LLM 디렉토리 제거
|
||||
|
||||
## 기여자
|
||||
|
||||
@alex-clawd, @dependabot[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide, @thiagomoretto
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 4월 2일">
|
||||
## v1.13.0a7
|
||||
|
||||
|
||||
132
docs/ko/enterprise/guides/training-crews.mdx
Normal file
132
docs/ko/enterprise/guides/training-crews.mdx
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: "Crew 훈련"
|
||||
description: "CrewAI AMP 플랫폼에서 직접 배포된 Crew를 훈련하여 시간이 지남에 따라 에이전트 성능을 개선하세요"
|
||||
icon: "dumbbell"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
훈련을 통해 CrewAI AMP의 **Training** 탭에서 직접 반복 훈련 세션을 실행하여 Crew 성능을 개선할 수 있습니다. 플랫폼은 **자동 훈련 모드**를 사용합니다 — 반복 프로세스를 자동으로 처리하며, 반복마다 대화형 피드백이 필요한 CLI 훈련과는 다릅니다.
|
||||
|
||||
훈련이 완료되면 CrewAI는 에이전트 출력을 평가하고 각 에이전트에 대한 실행 가능한 제안으로 피드백을 통합합니다. 이러한 제안은 향후 Crew 실행에 적용되어 출력 품질을 개선합니다.
|
||||
|
||||
<Tip>
|
||||
CrewAI 훈련이 내부적으로 어떻게 작동하는지에 대한 자세한 내용은 [훈련 개념](/ko/concepts/training) 페이지를 참조하세요.
|
||||
</Tip>
|
||||
|
||||
## 사전 요구 사항
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="활성 배포" icon="rocket">
|
||||
**Ready** 상태의 활성 배포(Crew 유형)가 있는 CrewAI AMP 계정이 필요합니다.
|
||||
</Card>
|
||||
<Card title="실행 권한" icon="key">
|
||||
훈련하려는 배포에 대한 실행 권한이 계정에 있어야 합니다.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Crew 훈련 방법
|
||||
|
||||
<Steps>
|
||||
<Step title="Training 탭 열기">
|
||||
**Deployments**로 이동하여 배포를 클릭한 다음 **Training** 탭을 선택합니다.
|
||||
</Step>
|
||||
|
||||
<Step title="훈련 이름 입력">
|
||||
**Training Name**을 입력합니다 — 이것은 훈련 결과를 저장하는 데 사용되는 `.pkl` 파일 이름이 됩니다. 예를 들어, "Expert Mode Training"은 `expert_mode_training.pkl`을 생성합니다.
|
||||
</Step>
|
||||
|
||||
<Step title="Crew 입력값 작성">
|
||||
Crew의 입력 필드를 입력합니다. 이는 일반 kickoff에 제공하는 것과 동일한 입력값입니다 — Crew 구성에 따라 동적으로 로드됩니다.
|
||||
</Step>
|
||||
|
||||
<Step title="훈련 시작">
|
||||
**Train Crew**를 클릭합니다. 프로세스가 실행되는 동안 버튼이 스피너와 함께 "Training..."으로 변경됩니다.
|
||||
|
||||
내부적으로:
|
||||
- 배포에 대한 훈련 레코드가 생성됩니다
|
||||
- 플랫폼이 배포의 자동 훈련 엔드포인트를 호출합니다
|
||||
- Crew가 자동으로 반복을 실행합니다 — 수동 피드백이 필요하지 않습니다
|
||||
</Step>
|
||||
|
||||
<Step title="진행 상황 모니터링">
|
||||
**Current Training Status** 패널에 다음이 표시됩니다:
|
||||
- **Status** — 훈련 실행의 현재 상태
|
||||
- **Nº Iterations** — 구성된 훈련 반복 횟수
|
||||
- **Filename** — 생성 중인 `.pkl` 파일
|
||||
- **Started At** — 훈련 시작 시간
|
||||
- **Training Inputs** — 제공한 입력값
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 훈련 결과 이해
|
||||
|
||||
훈련이 완료되면 다음 정보가 포함된 에이전트별 결과 카드가 표시됩니다:
|
||||
|
||||
- **Agent Role** — Crew에서 에이전트의 이름/역할
|
||||
- **Final Quality** — 에이전트 출력 품질을 평가하는 0~10점 점수
|
||||
- **Final Summary** — 훈련 중 에이전트 성능 요약
|
||||
- **Suggestions** — 에이전트 동작 개선을 위한 실행 가능한 권장 사항
|
||||
|
||||
### 제안 편집
|
||||
|
||||
모든 에이전트의 제안을 개선할 수 있습니다:
|
||||
|
||||
<Steps>
|
||||
<Step title="Edit 클릭">
|
||||
에이전트의 결과 카드에서 제안 옆에 있는 **Edit** 버튼을 클릭합니다.
|
||||
</Step>
|
||||
|
||||
<Step title="제안 수정">
|
||||
원하는 개선 사항을 더 잘 반영하도록 제안 텍스트를 업데이트합니다.
|
||||
</Step>
|
||||
|
||||
<Step title="변경 사항 저장">
|
||||
**Save**를 클릭합니다. 편집된 제안이 배포에 다시 동기화되고 이후 모든 실행에 사용됩니다.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 훈련 데이터 사용
|
||||
|
||||
Crew에 훈련 결과를 적용하려면:
|
||||
|
||||
1. 완료된 훈련 세션에서 **Training Filename**(`.pkl` 파일)을 확인합니다.
|
||||
2. 배포의 kickoff 또는 실행 구성에서 이 파일 이름을 지정합니다.
|
||||
3. Crew가 자동으로 훈련 파일을 로드하고 저장된 제안을 각 에이전트에 적용합니다.
|
||||
|
||||
이는 에이전트가 이후 모든 실행에서 훈련 중에 생성된 피드백의 혜택을 받는다는 것을 의미합니다.
|
||||
|
||||
## 이전 훈련
|
||||
|
||||
Training 탭 하단에는 배포에 대한 **모든 과거 훈련 세션 기록**이 표시됩니다. 이전 훈련 실행을 검토하거나 결과를 비교하거나 사용할 다른 훈련 파일을 선택하는 데 사용합니다.
|
||||
|
||||
## 오류 처리
|
||||
|
||||
훈련 실행이 실패하면 상태 패널에 무엇이 잘못되었는지 설명하는 메시지와 함께 오류 상태가 표시됩니다.
|
||||
|
||||
훈련 실패의 일반적인 원인:
|
||||
- **배포 런타임이 업데이트되지 않음** — 배포가 최신 버전을 실행하고 있는지 확인하세요
|
||||
- **Crew 실행 오류** — Crew의 작업 로직 또는 에이전트 구성 내 문제
|
||||
- **네트워크 문제** — 플랫폼과 배포 간의 연결 문제
|
||||
|
||||
## 제한 사항
|
||||
|
||||
<Info>
|
||||
훈련 워크플로를 계획할 때 다음 제약 사항을 염두에 두세요:
|
||||
- **배포당 한 번에 하나의 활성 훈련** — 다른 훈련을 시작하기 전에 현재 실행이 완료될 때까지 기다리세요
|
||||
- **자동 훈련 모드만** — 플랫폼은 CLI처럼 반복당 대화형 피드백을 지원하지 않습니다
|
||||
- **훈련 데이터는 배포별** — 훈련 결과는 특정 배포 인스턴스 및 버전에 연결됩니다
|
||||
</Info>
|
||||
|
||||
## 관련 리소스
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="훈련 개념" icon="book" href="/ko/concepts/training">
|
||||
CrewAI 훈련이 내부적으로 어떻게 작동하는지 알아보세요.
|
||||
</Card>
|
||||
<Card title="Crew 시작" icon="play" href="/ko/enterprise/guides/kickoff-crew">
|
||||
AMP 플랫폼에서 배포된 Crew를 실행하세요.
|
||||
</Card>
|
||||
<Card title="AMP에 배포" icon="cloud-arrow-up" href="/ko/enterprise/guides/deploy-to-amp">
|
||||
Crew를 배포하고 훈련 준비를 완료하세요.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -5,6 +5,14 @@ icon: wrench
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### 영상: 코딩 에이전트 스킬을 활용한 CrewAI Agents & Flows 구축
|
||||
|
||||
코딩 에이전트 스킬(Claude Code, Codex 등)을 설치하여 CrewAI로 코딩 에이전트를 빠르게 시작하세요.
|
||||
|
||||
`npx skills add crewaiinc/skills` 명령어로 설치할 수 있습니다
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## 비디오 튜토리얼
|
||||
|
||||
설치 과정을 단계별로 시연하는 비디오 튜토리얼을 시청하세요:
|
||||
|
||||
@@ -16,6 +16,14 @@ mode: "wide"
|
||||
|
||||
10만 명이 넘는 개발자가 커뮤니티 과정을 통해 인증을 받았으며, CrewAI는 기업용 AI 자동화의 표준입니다.
|
||||
|
||||
### 영상: 코딩 에이전트 스킬을 활용한 CrewAI Agents & Flows 구축
|
||||
|
||||
코딩 에이전트 스킬(Claude Code, Codex 등)을 설치하여 CrewAI로 코딩 에이전트를 빠르게 시작하세요.
|
||||
|
||||
`npx skills add crewaiinc/skills` 명령어로 설치할 수 있습니다
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## CrewAI 아키텍처
|
||||
|
||||
CrewAI의 아키텍처는 자율성과 제어의 균형을 맞추도록 설계되었습니다.
|
||||
|
||||
@@ -5,6 +5,14 @@ icon: rocket
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### 영상: 코딩 에이전트 스킬을 활용한 CrewAI Agents & Flows 구축
|
||||
|
||||
코딩 에이전트 스킬(Claude Code, Codex 등)을 설치하여 CrewAI로 코딩 에이전트를 빠르게 시작하세요.
|
||||
|
||||
`npx skills add crewaiinc/skills` 명령어로 설치할 수 있습니다
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## 첫 번째 CrewAI Agent 만들기
|
||||
|
||||
이제 주어진 주제나 항목에 대해 `최신 AI 개발 동향`을 `연구`하고 `보고`하는 간단한 crew를 만들어보겠습니다.
|
||||
|
||||
@@ -4,6 +4,70 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="06 abr 2026">
|
||||
## v1.14.0a2
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.0a2)
|
||||
|
||||
## Lançamento 1.14.0a2
|
||||
|
||||
### Instruções:
|
||||
- Traduza todos os cabeçalhos de seção e descrições de forma natural
|
||||
- Mantenha a formatação markdown (##, ###, -, etc.) exatamente como está
|
||||
- Mantenha todos os nomes próprios, identificadores de código, nomes de classes e termos técnicos inalterados
|
||||
(por exemplo, "CrewAI", "LiteAgent", "ChromaDB", "MCP", "@username")
|
||||
- Mantenha a seção ## Contribuidores e os nomes de usuários do GitHub inalterados
|
||||
- Não adicione nem remova nenhum conteúdo, apenas traduza
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="02 abr 2026">
|
||||
## v1.13.0
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Funcionalidades
|
||||
- Adicionar RuntimeState RootModel para serialização de estado unificado
|
||||
- Melhorar o listener de eventos com novos spans de telemetria para eventos de habilidade e memória
|
||||
- Adicionar extensão A2UI com suporte a v0.8/v0.9, esquemas e documentação
|
||||
- Emitir dados de uso de token no LLMCallCompletedEvent
|
||||
- Atualizar automaticamente o repositório de testes de implantação durante o lançamento
|
||||
- Melhorar a resiliência e a experiência do usuário na versão empresarial
|
||||
|
||||
### Correções de Bugs
|
||||
- Adicionar credenciais do repositório de ferramentas ao crewai install
|
||||
- Adicionar credenciais do repositório de ferramentas ao uv build na publicação de ferramentas
|
||||
- Passar metadados de impressão digital via configuração em vez de argumentos de ferramenta
|
||||
- Lidar com modelos GPT-5.x que não suportam o parâmetro API `stop`
|
||||
- Adicionar GPT-5 e a série o aos prefixos de visão multimodal
|
||||
- Limpar cache uv para pacotes recém-publicados na versão empresarial
|
||||
- Limitar lancedb abaixo de 0.30.1 para compatibilidade com Windows
|
||||
- Corrigir níveis de permissão RBAC para corresponder às opções reais da interface do usuário
|
||||
- Corrigir imprecisões nas capacidades do agente em todos os idiomas
|
||||
|
||||
### Documentação
|
||||
- Adicionar vídeo de demonstração de habilidades do agente de codificação às páginas de introdução
|
||||
- Adicionar guia abrangente de configuração SSO
|
||||
- Adicionar matriz de permissões RBAC abrangente e guia de implantação
|
||||
- Atualizar changelog e versão para v1.13.0
|
||||
|
||||
### Desempenho
|
||||
- Reduzir a sobrecarga do framework com bus de eventos preguiçoso, pular rastreamento quando desativado
|
||||
|
||||
### Refatoração
|
||||
- Converter Flow para Pydantic BaseModel
|
||||
- Converter classes LLM para Pydantic BaseModel
|
||||
- Substituir InstanceOf[T] por anotações de tipo simples
|
||||
- Remover diretório LLM de terceiros não utilizado
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@alex-clawd, @dependabot[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide, @thiagomoretto
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="02 abr 2026">
|
||||
## v1.13.0a7
|
||||
|
||||
|
||||
132
docs/pt-BR/enterprise/guides/training-crews.mdx
Normal file
132
docs/pt-BR/enterprise/guides/training-crews.mdx
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: "Treinamento de Crews"
|
||||
description: "Treine seus crews implantados diretamente da plataforma CrewAI AMP para melhorar o desempenho dos agentes ao longo do tempo"
|
||||
icon: "dumbbell"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
O treinamento permite que você melhore o desempenho do crew executando sessões de treinamento iterativas diretamente da aba **Training** no CrewAI AMP. A plataforma usa o **modo de auto-treinamento** — ela gerencia o processo iterativo automaticamente, diferente do treinamento via CLI que requer feedback humano interativo por iteração.
|
||||
|
||||
Após a conclusão do treinamento, o CrewAI avalia as saídas dos agentes e consolida o feedback em sugestões acionáveis para cada agente. Essas sugestões são então aplicadas às execuções futuras do crew para melhorar a qualidade das saídas.
|
||||
|
||||
<Tip>
|
||||
Para detalhes sobre como o treinamento do CrewAI funciona internamente, consulte a página [Conceitos de Treinamento](/pt-BR/concepts/training).
|
||||
</Tip>
|
||||
|
||||
## Pré-requisitos
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Implantação ativa" icon="rocket">
|
||||
Você precisa de uma conta CrewAI AMP com uma implantação ativa em status **Ready** (tipo Crew).
|
||||
</Card>
|
||||
<Card title="Permissão de execução" icon="key">
|
||||
Sua conta deve ter permissão de execução para a implantação que deseja treinar.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Como treinar um crew
|
||||
|
||||
<Steps>
|
||||
<Step title="Abra a aba Training">
|
||||
Navegue até **Deployments**, clique na sua implantação e selecione a aba **Training**.
|
||||
</Step>
|
||||
|
||||
<Step title="Insira um nome de treinamento">
|
||||
Forneça um **Training Name** — este será o nome do arquivo `.pkl` usado para armazenar os resultados do treinamento. Por exemplo, "Expert Mode Training" produz `expert_mode_training.pkl`.
|
||||
</Step>
|
||||
|
||||
<Step title="Preencha as entradas do crew">
|
||||
Insira os campos de entrada do crew. Estas são as mesmas entradas que você forneceria para um kickoff normal — elas são carregadas dinamicamente com base na configuração do seu crew.
|
||||
</Step>
|
||||
|
||||
<Step title="Inicie o treinamento">
|
||||
Clique em **Train Crew**. O botão muda para "Training..." com um spinner enquanto o processo é executado.
|
||||
|
||||
Por trás dos panos:
|
||||
- Um registro de treinamento é criado para sua implantação
|
||||
- A plataforma chama o endpoint de auto-treinamento da implantação
|
||||
- O crew executa suas iterações automaticamente — nenhum feedback manual é necessário
|
||||
</Step>
|
||||
|
||||
<Step title="Monitore o progresso">
|
||||
O painel **Current Training Status** exibe:
|
||||
- **Status** — Estado atual da execução do treinamento
|
||||
- **Nº Iterations** — Número de iterações de treinamento configuradas
|
||||
- **Filename** — O arquivo `.pkl` sendo gerado
|
||||
- **Started At** — Quando o treinamento começou
|
||||
- **Training Inputs** — As entradas que você forneceu
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Entendendo os resultados do treinamento
|
||||
|
||||
Uma vez que o treinamento for concluído, você verá cards de resultado por agente com as seguintes informações:
|
||||
|
||||
- **Agent Role** — O nome/função do agente no seu crew
|
||||
- **Final Quality** — Uma pontuação de 0 a 10 avaliando a qualidade da saída do agente
|
||||
- **Final Summary** — Um resumo do desempenho do agente durante o treinamento
|
||||
- **Suggestions** — Recomendações acionáveis para melhorar o comportamento do agente
|
||||
|
||||
### Editando sugestões
|
||||
|
||||
Você pode refinar as sugestões para qualquer agente:
|
||||
|
||||
<Steps>
|
||||
<Step title="Clique em Edit">
|
||||
No card de resultado de qualquer agente, clique no botão **Edit** ao lado das sugestões.
|
||||
</Step>
|
||||
|
||||
<Step title="Modifique as sugestões">
|
||||
Atualize o texto das sugestões para refletir melhor as melhorias que você deseja.
|
||||
</Step>
|
||||
|
||||
<Step title="Salve as alterações">
|
||||
Clique em **Save**. As sugestões editadas são sincronizadas de volta à implantação e usadas em todas as execuções futuras.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Usando dados de treinamento
|
||||
|
||||
Para aplicar os resultados do treinamento ao seu crew:
|
||||
|
||||
1. Anote o **Training Filename** (o arquivo `.pkl`) da sua sessão de treinamento concluída.
|
||||
2. Especifique este nome de arquivo na configuração de kickoff ou execução da sua implantação.
|
||||
3. O crew carrega automaticamente o arquivo de treinamento e aplica as sugestões armazenadas a cada agente.
|
||||
|
||||
Isso significa que os agentes se beneficiam do feedback gerado durante o treinamento em cada execução subsequente.
|
||||
|
||||
## Treinamentos anteriores
|
||||
|
||||
A parte inferior da aba Training exibe um **histórico de todas as sessões de treinamento anteriores** da implantação. Use isso para revisar execuções de treinamento anteriores, comparar resultados ou selecionar um arquivo de treinamento diferente para usar.
|
||||
|
||||
## Tratamento de erros
|
||||
|
||||
Se uma execução de treinamento falhar, o painel de status mostra um estado de erro junto com uma mensagem descrevendo o que deu errado.
|
||||
|
||||
Causas comuns de falhas de treinamento:
|
||||
- **Runtime da implantação não atualizado** — Certifique-se de que sua implantação está executando a versão mais recente
|
||||
- **Erros de execução do crew** — Problemas na lógica de tarefas do crew ou configuração do agente
|
||||
- **Problemas de rede** — Problemas de conectividade entre a plataforma e a implantação
|
||||
|
||||
## Limitações
|
||||
|
||||
<Info>
|
||||
Tenha estas restrições em mente ao planejar seu fluxo de trabalho de treinamento:
|
||||
- **Um treinamento ativo por vez** por implantação — aguarde a execução atual terminar antes de iniciar outra
|
||||
- **Apenas modo de auto-treinamento** — a plataforma não suporta feedback interativo por iteração como o CLI
|
||||
- **Dados de treinamento são específicos da implantação** — os resultados do treinamento estão vinculados à instância e versão específicas da implantação
|
||||
</Info>
|
||||
|
||||
## Recursos relacionados
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Conceitos de Treinamento" icon="book" href="/pt-BR/concepts/training">
|
||||
Aprenda como o treinamento do CrewAI funciona internamente.
|
||||
</Card>
|
||||
<Card title="Kickoff Crew" icon="play" href="/pt-BR/enterprise/guides/kickoff-crew">
|
||||
Execute seu crew implantado a partir da plataforma AMP.
|
||||
</Card>
|
||||
<Card title="Implantar no AMP" icon="cloud-arrow-up" href="/pt-BR/enterprise/guides/deploy-to-amp">
|
||||
Faça a implantação do seu crew e deixe-o pronto para treinamento.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -5,6 +5,14 @@ icon: wrench
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### Assista: Construindo Agents e Flows CrewAI com Coding Agent Skills
|
||||
|
||||
Instale nossas coding agent skills (Claude Code, Codex, ...) para colocar seus agentes de código para funcionar rapidamente com o CrewAI.
|
||||
|
||||
Você pode instalar com `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## Tutorial em Vídeo
|
||||
|
||||
Assista a este tutorial em vídeo para uma demonstração passo a passo do processo de instalação:
|
||||
|
||||
@@ -16,6 +16,14 @@ Ele capacita desenvolvedores a construir sistemas multi-agente prontos para prod
|
||||
|
||||
Com mais de 100.000 desenvolvedores certificados em nossos cursos comunitários, o CrewAI é o padrão para automação de IA pronta para empresas.
|
||||
|
||||
### Assista: Construindo Agents e Flows CrewAI com Coding Agent Skills
|
||||
|
||||
Instale nossas coding agent skills (Claude Code, Codex, ...) para colocar seus agentes de código para funcionar rapidamente com o CrewAI.
|
||||
|
||||
Você pode instalar com `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## A Arquitetura do CrewAI
|
||||
|
||||
A arquitetura do CrewAI foi projetada para equilibrar autonomia com controle.
|
||||
|
||||
@@ -5,6 +5,14 @@ icon: rocket
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### Assista: Construindo Agents e Flows CrewAI com Coding Agent Skills
|
||||
|
||||
Instale nossas coding agent skills (Claude Code, Codex, ...) para colocar seus agentes de código para funcionar rapidamente com o CrewAI.
|
||||
|
||||
Você pode instalar com `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## Construa seu primeiro Agente CrewAI
|
||||
|
||||
Vamos criar uma tripulação simples que nos ajudará a `pesquisar` e `relatar` sobre os `últimos avanços em IA` para um determinado tópico ou assunto.
|
||||
|
||||
@@ -17,6 +17,9 @@ dependencies = [
|
||||
"av~=13.0.0",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
@@ -152,4 +152,4 @@ __all__ = [
|
||||
"wrap_file_source",
|
||||
]
|
||||
|
||||
__version__ = "1.13.0a7"
|
||||
__version__ = "1.14.0a2"
|
||||
|
||||
@@ -11,7 +11,7 @@ dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.13.0a7",
|
||||
"crewai==1.14.0a2",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
"python-docx~=1.2.0",
|
||||
@@ -142,6 +142,9 @@ contextual = [
|
||||
]
|
||||
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
@@ -309,4 +309,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.13.0a7"
|
||||
__version__ = "1.14.0a2"
|
||||
|
||||
@@ -54,7 +54,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.13.0a7",
|
||||
"crewai-tools==1.14.0a2",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
@@ -115,6 +115,9 @@ qdrant-edge = [
|
||||
crewai = "crewai.cli.cli:crewai"
|
||||
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
# PyTorch index configuration, since torch 2.5.0 is not compatible with python 3.13
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-nightly"
|
||||
|
||||
@@ -8,14 +8,15 @@ from pydantic import PydanticUserError
|
||||
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.agent.planning_config import PlanningConfig
|
||||
from crewai.context import ExecutionContext
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.execution_context import ExecutionContext
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.process import Process
|
||||
from crewai.runtime_state import _entity_discriminator
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -45,7 +46,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.13.0a7"
|
||||
__version__ = "1.14.0a2"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
@@ -112,10 +113,13 @@ try:
|
||||
|
||||
_base_namespace: dict[str, type] = {
|
||||
"Agent": Agent,
|
||||
"BaseAgent": _BaseAgent,
|
||||
"Crew": Crew,
|
||||
"Flow": Flow,
|
||||
"BaseLLM": BaseLLM,
|
||||
"Task": Task,
|
||||
"CrewAgentExecutorMixin": _CrewAgentExecutorMixin,
|
||||
"ExecutionContext": ExecutionContext,
|
||||
}
|
||||
|
||||
try:
|
||||
@@ -154,13 +158,34 @@ try:
|
||||
for _mod_name in (
|
||||
_BaseAgent.__module__,
|
||||
Agent.__module__,
|
||||
Crew.__module__,
|
||||
Flow.__module__,
|
||||
Task.__module__,
|
||||
_AgentExecutor.__module__,
|
||||
):
|
||||
sys.modules[_mod_name].__dict__.update(_resolve_namespace)
|
||||
|
||||
from crewai.tasks.conditional_task import ConditionalTask as _ConditionalTask
|
||||
|
||||
_BaseAgent.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
Task.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
_ConditionalTask.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
Crew.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
Flow.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
_AgentExecutor.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
from pydantic import Discriminator, RootModel, Tag
|
||||
|
||||
Entity = Annotated[
|
||||
Annotated[Flow, Tag("flow")] # type: ignore[type-arg]
|
||||
| Annotated[Crew, Tag("crew")]
|
||||
| Annotated[Agent, Tag("agent")],
|
||||
Discriminator(_entity_discriminator),
|
||||
]
|
||||
RuntimeState = RootModel[list[Entity]]
|
||||
|
||||
try:
|
||||
Agent.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
except PydanticUserError:
|
||||
@@ -172,6 +197,7 @@ except (ImportError, PydanticUserError):
|
||||
"model_rebuild() failed; forward refs may be unresolved.",
|
||||
exc_info=True,
|
||||
)
|
||||
RuntimeState = None # type: ignore[assignment,misc]
|
||||
|
||||
__all__ = [
|
||||
"LLM",
|
||||
@@ -186,6 +212,7 @@ __all__ = [
|
||||
"Memory",
|
||||
"PlanningConfig",
|
||||
"Process",
|
||||
"RuntimeState",
|
||||
"Task",
|
||||
"TaskOutput",
|
||||
"__version__",
|
||||
|
||||
@@ -14,6 +14,7 @@ import subprocess
|
||||
import time
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
Any,
|
||||
Literal,
|
||||
NoReturn,
|
||||
@@ -23,12 +24,14 @@ import warnings
|
||||
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic.functional_serializers import PlainSerializer
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.agent.planning_config import PlanningConfig
|
||||
@@ -46,7 +49,11 @@ from crewai.agent.utils import (
|
||||
save_last_messages,
|
||||
validate_max_execution_time,
|
||||
)
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent import (
|
||||
BaseAgent,
|
||||
_serialize_llm_ref,
|
||||
_validate_llm_ref,
|
||||
)
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
@@ -122,6 +129,24 @@ if TYPE_CHECKING:
|
||||
|
||||
_passthrough_exceptions: tuple[type[Exception], ...] = ()
|
||||
|
||||
_EXECUTOR_CLASS_MAP: dict[str, type] = {
|
||||
"CrewAgentExecutor": CrewAgentExecutor,
|
||||
"AgentExecutor": AgentExecutor,
|
||||
}
|
||||
|
||||
|
||||
def _validate_executor_class(value: Any) -> Any:
|
||||
if isinstance(value, str):
|
||||
cls = _EXECUTOR_CLASS_MAP.get(value)
|
||||
if cls is None:
|
||||
raise ValueError(f"Unknown executor class: {value}")
|
||||
return cls
|
||||
return value
|
||||
|
||||
|
||||
def _serialize_executor_class(value: Any) -> str:
|
||||
return value.__name__ if isinstance(value, type) else str(value)
|
||||
|
||||
|
||||
class Agent(BaseAgent):
|
||||
"""Represents an agent in a system.
|
||||
@@ -167,12 +192,16 @@ class Agent(BaseAgent):
|
||||
default=True,
|
||||
description="Use system prompt for the agent.",
|
||||
)
|
||||
llm: str | BaseLLM | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
function_calling_llm: str | BaseLLM | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(description="Language model that will run the agent.", default=None)
|
||||
function_calling_llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(description="Language model that will run the agent.", default=None)
|
||||
system_template: str | None = Field(
|
||||
default=None, description="System format for the agent."
|
||||
)
|
||||
@@ -271,7 +300,11 @@ class Agent(BaseAgent):
|
||||
agent_executor: InstanceOf[CrewAgentExecutor] | InstanceOf[AgentExecutor] | None = (
|
||||
Field(default=None, description="An instance of the CrewAgentExecutor class.")
|
||||
)
|
||||
executor_class: type[CrewAgentExecutor] | type[AgentExecutor] = Field(
|
||||
executor_class: Annotated[
|
||||
type[CrewAgentExecutor] | type[AgentExecutor],
|
||||
BeforeValidator(_validate_executor_class),
|
||||
PlainSerializer(_serialize_executor_class, return_type=str, when_used="json"),
|
||||
] = Field(
|
||||
default=CrewAgentExecutor,
|
||||
description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use AgentExecutor.",
|
||||
)
|
||||
@@ -1053,7 +1086,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
)
|
||||
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
def get_delegation_tools(self, agents: Sequence[BaseAgent]) -> list[BaseTool]:
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
return agent_tools.tools()
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ with CrewAI's agent system. Provides memory persistence, tool integration, and s
|
||||
output functionality.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, PrivateAttr
|
||||
@@ -30,6 +30,7 @@ from crewai.events.types.agent_events import (
|
||||
)
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.types.callback import SerializableCallable
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.import_utils import require
|
||||
@@ -50,7 +51,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
_memory: Any = PrivateAttr(default=None)
|
||||
_max_iterations: int = PrivateAttr(default=10)
|
||||
function_calling_llm: Any = Field(default=None)
|
||||
step_callback: Callable[..., Any] | None = Field(default=None)
|
||||
step_callback: SerializableCallable | None = Field(default=None)
|
||||
|
||||
model: str = Field(default="gpt-4o")
|
||||
verbose: bool = Field(default=False)
|
||||
@@ -272,7 +273,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
available_tools: list[Any] = self._tool_adapter.tools()
|
||||
self._graph.tools = available_tools
|
||||
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
def get_delegation_tools(self, agents: Sequence[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support for LangGraph.
|
||||
|
||||
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||
|
||||
@@ -4,6 +4,7 @@ This module contains the OpenAIAgentAdapter class that integrates OpenAI Assista
|
||||
with CrewAI's agent system, providing tool integration and structured output support.
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, PrivateAttr
|
||||
@@ -221,7 +222,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
"""
|
||||
return self._converter_adapter.post_process_result(result.final_output)
|
||||
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
def get_delegation_tools(self, agents: Sequence[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support.
|
||||
|
||||
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Sequence
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from pathlib import Path
|
||||
@@ -48,6 +49,7 @@ from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.context import ExecutionContext
|
||||
from crewai.crew import Crew
|
||||
|
||||
|
||||
@@ -61,6 +63,26 @@ def _serialize_crew_ref(value: Any) -> str | None:
|
||||
return str(value.id) if hasattr(value, "id") else str(value)
|
||||
|
||||
|
||||
def _validate_llm_ref(value: Any) -> Any:
|
||||
return value
|
||||
|
||||
|
||||
def _resolve_agent(value: Any, info: Any) -> Any:
|
||||
if isinstance(value, BaseAgent) or value is None or not isinstance(value, dict):
|
||||
return value
|
||||
from crewai.agent.core import Agent
|
||||
|
||||
return Agent.model_validate(value, context=getattr(info, "context", None))
|
||||
|
||||
|
||||
def _serialize_llm_ref(value: Any) -> str | None:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
return getattr(value, "model", str(value))
|
||||
|
||||
|
||||
_SLUG_RE: Final[re.Pattern[str]] = re.compile(
|
||||
r"^(?:crewai-amp:)?[a-zA-Z0-9][a-zA-Z0-9_-]*(?:#[\w-]+)?$"
|
||||
)
|
||||
@@ -138,6 +160,8 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
Set private attributes.
|
||||
"""
|
||||
|
||||
entity_type: Literal["agent"] = "agent"
|
||||
|
||||
__hash__ = object.__hash__
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
|
||||
_rpm_controller: RPMController | None = PrivateAttr(default=None)
|
||||
@@ -176,9 +200,11 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
agent_executor: InstanceOf[CrewAgentExecutorMixin] | None = Field(
|
||||
default=None, description="An instance of the CrewAgentExecutor class."
|
||||
)
|
||||
llm: str | BaseLLM | None = Field(
|
||||
default=None, description="Language model that will run the agent."
|
||||
)
|
||||
llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(default=None, description="Language model that will run the agent.")
|
||||
crew: Annotated[
|
||||
Crew | str | None,
|
||||
BeforeValidator(_validate_crew_ref),
|
||||
@@ -197,7 +223,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
description="An instance of the ToolsHandler class.",
|
||||
)
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
default_factory=list, description="Results of the tools used by the agent."
|
||||
)
|
||||
max_tokens: int | None = Field(
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
@@ -248,6 +274,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
description="Agent Skills. Accepts paths for discovery or pre-loaded Skill objects.",
|
||||
min_length=1,
|
||||
)
|
||||
execution_context: ExecutionContext | None = Field(default=None)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -362,11 +389,12 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
if v:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None, info: Any) -> UUID4 | None:
|
||||
if v and not (info.context or {}).get("from_checkpoint"):
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self) -> Self:
|
||||
@@ -423,7 +451,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
def get_delegation_tools(self, agents: Sequence[BaseAgent]) -> list[BaseTool]:
|
||||
"""Set the task tools that init BaseAgenTools class."""
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -3,20 +3,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import GetCoreSchemaHandler
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.tools.cache_tools.cache_tools import CacheTools
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
|
||||
|
||||
class ToolsHandler:
|
||||
class ToolsHandler(BaseModel):
|
||||
"""Callback handler for tool usage.
|
||||
|
||||
Attributes:
|
||||
@@ -24,14 +19,8 @@ class ToolsHandler:
|
||||
cache: Optional cache handler for storing tool outputs.
|
||||
"""
|
||||
|
||||
def __init__(self, cache: CacheHandler | None = None) -> None:
|
||||
"""Initialize the callback handler.
|
||||
|
||||
Args:
|
||||
cache: Optional cache handler for storing tool outputs.
|
||||
"""
|
||||
self.cache: CacheHandler | None = cache
|
||||
self.last_used_tool: ToolCalling | InstructorToolCalling | None = None
|
||||
cache: CacheHandler | None = Field(default=None)
|
||||
last_used_tool: ToolCalling | InstructorToolCalling | None = Field(default=None)
|
||||
|
||||
def on_tool_use(
|
||||
self,
|
||||
@@ -48,7 +37,6 @@ class ToolsHandler:
|
||||
"""
|
||||
self.last_used_tool = calling
|
||||
if self.cache and should_cache and calling.tool_name != CacheTools().name:
|
||||
# Convert arguments to string for cache
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
@@ -61,14 +49,3 @@ class ToolsHandler:
|
||||
input=input_str,
|
||||
output=output,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: Any, _handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
"""Generate Pydantic core schema for BaseClient Protocol.
|
||||
|
||||
This allows the Protocol to be used in Pydantic models without
|
||||
requiring arbitrary_types_allowed=True.
|
||||
"""
|
||||
return core_schema.any_schema()
|
||||
|
||||
@@ -27,7 +27,7 @@ from crewai.cli.tools.main import ToolCommand
|
||||
from crewai.cli.train_crew import train_crew
|
||||
from crewai.cli.triggers.main import TriggersCommand
|
||||
from crewai.cli.update_crew import update_crew
|
||||
from crewai.cli.utils import build_env_with_tool_repository_credentials, read_toml
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials, read_toml
|
||||
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||
KickoffTaskOutputsSQLiteStorage,
|
||||
)
|
||||
@@ -48,24 +48,18 @@ def crewai() -> None:
|
||||
@click.argument("uv_args", nargs=-1, type=click.UNPROCESSED)
|
||||
def uv(uv_args: tuple[str, ...]) -> None:
|
||||
"""A wrapper around uv commands that adds custom tool authentication through env vars."""
|
||||
env = os.environ.copy()
|
||||
try:
|
||||
pyproject_data = read_toml()
|
||||
sources = pyproject_data.get("tool", {}).get("uv", {}).get("sources", {})
|
||||
|
||||
for source_config in sources.values():
|
||||
if isinstance(source_config, dict):
|
||||
index = source_config.get("index")
|
||||
if index:
|
||||
index_env = build_env_with_tool_repository_credentials(index)
|
||||
env.update(index_env)
|
||||
except (FileNotFoundError, KeyError) as e:
|
||||
# Verify pyproject.toml exists first
|
||||
read_toml()
|
||||
except FileNotFoundError as e:
|
||||
raise SystemExit(
|
||||
"Error. A valid pyproject.toml file is required. Check that a valid pyproject.toml file exists in the current directory."
|
||||
) from e
|
||||
except Exception as e:
|
||||
raise SystemExit(f"Error: {e}") from e
|
||||
|
||||
env = build_env_with_all_tool_credentials()
|
||||
|
||||
try:
|
||||
subprocess.run( # noqa: S603
|
||||
["uv", *uv_args], # noqa: S607
|
||||
|
||||
@@ -46,7 +46,7 @@ def create_flow(name: str) -> None:
|
||||
tools_template_files = ["tools/__init__.py", "tools/custom_tool.py"]
|
||||
|
||||
crew_folders = [
|
||||
"poem_crew",
|
||||
"content_crew",
|
||||
]
|
||||
|
||||
def process_file(src_file: Path, dst_file: Path) -> None:
|
||||
|
||||
@@ -2,6 +2,8 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials
|
||||
|
||||
|
||||
# Be mindful about changing this.
|
||||
# on some environments we don't use this command but instead uv sync directly
|
||||
@@ -13,7 +15,14 @@ def install_crew(proxy_options: list[str]) -> None:
|
||||
"""
|
||||
try:
|
||||
command = ["uv", "sync", *proxy_options]
|
||||
subprocess.run(command, check=True, capture_output=False, text=True) # noqa: S603
|
||||
|
||||
# Inject tool repository credentials so uv can authenticate
|
||||
# against private package indexes (e.g. crewai tool repository).
|
||||
# Without this, `uv sync` fails with 401 Unauthorized when the
|
||||
# project depends on tools from a private index.
|
||||
env = build_env_with_all_tool_credentials()
|
||||
|
||||
subprocess.run(command, check=True, capture_output=False, text=True, env=env) # noqa: S603
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from enum import Enum
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import click
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import build_env_with_tool_repository_credentials, read_toml
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials, read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
@@ -56,19 +55,7 @@ def execute_command(crew_type: CrewType) -> None:
|
||||
"""
|
||||
command = ["uv", "run", "kickoff" if crew_type == CrewType.FLOW else "run_crew"]
|
||||
|
||||
env = os.environ.copy()
|
||||
try:
|
||||
pyproject_data = read_toml()
|
||||
sources = pyproject_data.get("tool", {}).get("uv", {}).get("sources", {})
|
||||
|
||||
for source_config in sources.values():
|
||||
if isinstance(source_config, dict):
|
||||
index = source_config.get("index")
|
||||
if index:
|
||||
index_env = build_env_with_tool_repository_credentials(index)
|
||||
env.update(index_env)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
env = build_env_with_all_tool_credentials()
|
||||
|
||||
try:
|
||||
subprocess.run(command, capture_output=False, text=True, check=True, env=env) # noqa: S603
|
||||
|
||||
@@ -120,11 +120,11 @@ my_crew/
|
||||
my_flow/
|
||||
├── src/my_flow/
|
||||
│ ├── crews/ # Multiple crew definitions
|
||||
│ │ └── poem_crew/
|
||||
│ │ └── content_crew/
|
||||
│ │ ├── config/
|
||||
│ │ │ ├── agents.yaml
|
||||
│ │ │ └── tasks.yaml
|
||||
│ │ └── poem_crew.py
|
||||
│ │ └── content_crew.py
|
||||
│ ├── tools/ # Custom tools
|
||||
│ ├── main.py # Flow orchestration
|
||||
│ └── ...
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.13.0a7"
|
||||
"crewai[tools]==1.14.0a2"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -38,7 +38,7 @@ crewai run
|
||||
|
||||
This command initializes the {{name}} Flow as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
This example, unmodified, will run a content creation flow on AI Agents and save the output to `output/post.md`.
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
planner:
|
||||
role: >
|
||||
Content Planner
|
||||
goal: >
|
||||
Plan a detailed and engaging blog post outline on {topic}
|
||||
backstory: >
|
||||
You're an experienced content strategist who excels at creating
|
||||
structured outlines for blog posts. You know how to organize ideas
|
||||
into a logical flow that keeps readers engaged from start to finish.
|
||||
|
||||
writer:
|
||||
role: >
|
||||
Content Writer
|
||||
goal: >
|
||||
Write a compelling and well-structured blog post on {topic}
|
||||
based on the provided outline
|
||||
backstory: >
|
||||
You're a skilled writer with a talent for turning outlines into
|
||||
engaging, informative blog posts. Your writing is clear, conversational,
|
||||
and backed by solid reasoning. You adapt your tone to the subject matter
|
||||
while keeping things accessible to a broad audience.
|
||||
|
||||
editor:
|
||||
role: >
|
||||
Content Editor
|
||||
goal: >
|
||||
Review and polish the blog post on {topic} to ensure it is
|
||||
publication-ready
|
||||
backstory: >
|
||||
You're a meticulous editor with years of experience refining written
|
||||
content. You have an eye for clarity, flow, grammar, and consistency.
|
||||
You improve prose without changing the author's voice and ensure every
|
||||
piece you touch is polished and professional.
|
||||
@@ -0,0 +1,50 @@
|
||||
planning_task:
|
||||
description: >
|
||||
Create a detailed outline for a blog post about {topic}.
|
||||
|
||||
The outline should include:
|
||||
- A compelling title
|
||||
- An introduction hook
|
||||
- 3-5 main sections with key points to cover in each
|
||||
- A conclusion with a call to action
|
||||
|
||||
Make the outline detailed enough that a writer can produce
|
||||
a full blog post from it without additional research.
|
||||
expected_output: >
|
||||
A structured blog post outline with a title, introduction notes,
|
||||
detailed section breakdowns, and conclusion notes.
|
||||
agent: planner
|
||||
|
||||
writing_task:
|
||||
description: >
|
||||
Using the outline provided, write a full blog post about {topic}.
|
||||
|
||||
Requirements:
|
||||
- Follow the outline structure closely
|
||||
- Write in a clear, engaging, and conversational tone
|
||||
- Each section should be 2-3 paragraphs
|
||||
- Include a strong introduction and conclusion
|
||||
- Target around 800-1200 words
|
||||
expected_output: >
|
||||
A complete blog post in markdown format, ready for editing.
|
||||
The post should follow the outline and be well-written with
|
||||
clear transitions between sections.
|
||||
agent: writer
|
||||
|
||||
editing_task:
|
||||
description: >
|
||||
Review and edit the blog post about {topic}.
|
||||
|
||||
Focus on:
|
||||
- Fixing any grammar or spelling errors
|
||||
- Improving sentence clarity and flow
|
||||
- Ensuring consistent tone throughout
|
||||
- Strengthening the introduction and conclusion
|
||||
- Removing any redundancy
|
||||
|
||||
Do not rewrite the post — refine and polish it.
|
||||
expected_output: >
|
||||
The final, polished blog post in markdown format without '```'.
|
||||
Publication-ready with clean formatting and professional prose.
|
||||
agent: editor
|
||||
output_file: output/post.md
|
||||
@@ -8,8 +8,8 @@ from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
|
||||
@CrewBase
|
||||
class PoemCrew:
|
||||
"""Poem Crew"""
|
||||
class ContentCrew:
|
||||
"""Content Crew"""
|
||||
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
@@ -20,26 +20,50 @@ class PoemCrew:
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
# If you would lik to add tools to your crew, you can learn more about it here:
|
||||
# If you would like to add tools to your crew, you can learn more about it here:
|
||||
# https://docs.crewai.com/concepts/agents#agent-tools
|
||||
@agent
|
||||
def poem_writer(self) -> Agent:
|
||||
def planner(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["poem_writer"], # type: ignore[index]
|
||||
config=self.agents_config["planner"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@agent
|
||||
def writer(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["writer"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@agent
|
||||
def editor(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["editor"], # type: ignore[index]
|
||||
)
|
||||
|
||||
# To learn more about structured task outputs,
|
||||
# task dependencies, and task callbacks, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/tasks#overview-of-a-task
|
||||
@task
|
||||
def write_poem(self) -> Task:
|
||||
def planning_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["write_poem"], # type: ignore[index]
|
||||
config=self.tasks_config["planning_task"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@task
|
||||
def writing_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["writing_task"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@task
|
||||
def editing_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["editing_task"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Research Crew"""
|
||||
"""Creates the Content Crew"""
|
||||
# To learn how to add knowledge sources to your crew, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/knowledge#what-is-knowledge
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
"""Poem crew template."""
|
||||
@@ -1,11 +0,0 @@
|
||||
poem_writer:
|
||||
role: >
|
||||
CrewAI Poem Writer
|
||||
goal: >
|
||||
Generate a funny, light heartedpoem about how CrewAI
|
||||
is awesome with a sentence count of {sentence_count}
|
||||
backstory: >
|
||||
You're a creative poet with a talent for capturing the essence of any topic
|
||||
in a beautiful and engaging way. Known for your ability to craft poems that
|
||||
resonate with readers, you bring a unique perspective and artistic flair to
|
||||
every piece you write.
|
||||
@@ -1,7 +0,0 @@
|
||||
write_poem:
|
||||
description: >
|
||||
Write a poem about how CrewAI is awesome.
|
||||
Ensure the poem is engaging and adheres to the specified sentence count of {sentence_count}.
|
||||
expected_output: >
|
||||
A beautifully crafted poem about CrewAI, with exactly {sentence_count} sentences.
|
||||
agent: poem_writer
|
||||
@@ -1,59 +1,64 @@
|
||||
#!/usr/bin/env python
|
||||
from random import randint
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow import Flow, listen, start
|
||||
|
||||
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew
|
||||
from {{folder_name}}.crews.content_crew.content_crew import ContentCrew
|
||||
|
||||
|
||||
class PoemState(BaseModel):
|
||||
sentence_count: int = 1
|
||||
poem: str = ""
|
||||
class ContentState(BaseModel):
|
||||
topic: str = ""
|
||||
outline: str = ""
|
||||
draft: str = ""
|
||||
final_post: str = ""
|
||||
|
||||
|
||||
class PoemFlow(Flow[PoemState]):
|
||||
class ContentFlow(Flow[ContentState]):
|
||||
|
||||
@start()
|
||||
def generate_sentence_count(self, crewai_trigger_payload: dict = None):
|
||||
print("Generating sentence count")
|
||||
def plan_content(self, crewai_trigger_payload: dict = None):
|
||||
print("Planning content")
|
||||
|
||||
# Use trigger payload if available
|
||||
if crewai_trigger_payload:
|
||||
# Example: use trigger data to influence sentence count
|
||||
self.state.sentence_count = crewai_trigger_payload.get('sentence_count', randint(1, 5))
|
||||
self.state.topic = crewai_trigger_payload.get("topic", "AI Agents")
|
||||
print(f"Using trigger payload: {crewai_trigger_payload}")
|
||||
else:
|
||||
self.state.sentence_count = randint(1, 5)
|
||||
self.state.topic = "AI Agents"
|
||||
|
||||
@listen(generate_sentence_count)
|
||||
def generate_poem(self):
|
||||
print("Generating poem")
|
||||
print(f"Topic: {self.state.topic}")
|
||||
|
||||
@listen(plan_content)
|
||||
def generate_content(self):
|
||||
print(f"Generating content on: {self.state.topic}")
|
||||
result = (
|
||||
PoemCrew()
|
||||
ContentCrew()
|
||||
.crew()
|
||||
.kickoff(inputs={"sentence_count": self.state.sentence_count})
|
||||
.kickoff(inputs={"topic": self.state.topic})
|
||||
)
|
||||
|
||||
print("Poem generated", result.raw)
|
||||
self.state.poem = result.raw
|
||||
print("Content generated")
|
||||
self.state.final_post = result.raw
|
||||
|
||||
@listen(generate_poem)
|
||||
def save_poem(self):
|
||||
print("Saving poem")
|
||||
with open("poem.txt", "w") as f:
|
||||
f.write(self.state.poem)
|
||||
@listen(generate_content)
|
||||
def save_content(self):
|
||||
print("Saving content")
|
||||
output_dir = Path("output")
|
||||
output_dir.mkdir(exist_ok=True)
|
||||
with open(output_dir / "post.md", "w") as f:
|
||||
f.write(self.state.final_post)
|
||||
print("Post saved to output/post.md")
|
||||
|
||||
|
||||
def kickoff():
|
||||
poem_flow = PoemFlow()
|
||||
poem_flow.kickoff()
|
||||
content_flow = ContentFlow()
|
||||
content_flow.kickoff()
|
||||
|
||||
|
||||
def plot():
|
||||
poem_flow = PoemFlow()
|
||||
poem_flow.plot()
|
||||
content_flow = ContentFlow()
|
||||
content_flow.plot()
|
||||
|
||||
|
||||
def run_with_trigger():
|
||||
@@ -74,10 +79,10 @@ def run_with_trigger():
|
||||
|
||||
# Create flow and kickoff with trigger payload
|
||||
# The @start() methods will automatically receive crewai_trigger_payload parameter
|
||||
poem_flow = PoemFlow()
|
||||
content_flow = ContentFlow()
|
||||
|
||||
try:
|
||||
result = poem_flow.kickoff({"crewai_trigger_payload": trigger_payload})
|
||||
result = content_flow.kickoff({"crewai_trigger_payload": trigger_payload})
|
||||
return result
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while running the flow with trigger: {e}")
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.13.0a7"
|
||||
"crewai[tools]==1.14.0a2"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.13.0a7"
|
||||
"crewai[tools]==1.14.0a2"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -21,6 +21,7 @@ from crewai.cli.utils import (
|
||||
get_project_description,
|
||||
get_project_name,
|
||||
get_project_version,
|
||||
read_toml,
|
||||
tree_copy,
|
||||
tree_find_and_replace,
|
||||
)
|
||||
@@ -116,11 +117,26 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
self._print_tools_preview(tools_metadata)
|
||||
self._print_current_organization()
|
||||
|
||||
build_env = os.environ.copy()
|
||||
try:
|
||||
pyproject_data = read_toml()
|
||||
sources = pyproject_data.get("tool", {}).get("uv", {}).get("sources", {})
|
||||
|
||||
for source_config in sources.values():
|
||||
if isinstance(source_config, dict):
|
||||
index = source_config.get("index")
|
||||
if index:
|
||||
index_env = build_env_with_tool_repository_credentials(index)
|
||||
build_env.update(index_env)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_build_dir:
|
||||
subprocess.run( # noqa: S603
|
||||
["uv", "build", "--sdist", "--out-dir", temp_build_dir], # noqa: S607
|
||||
check=True,
|
||||
capture_output=False,
|
||||
env=build_env,
|
||||
)
|
||||
|
||||
tarball_filename = next(
|
||||
|
||||
@@ -484,8 +484,12 @@ def get_flows(flow_path: str = "main.py") -> list[Flow[Any]]:
|
||||
if flow_instances:
|
||||
break
|
||||
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
except Exception as e:
|
||||
import logging
|
||||
|
||||
logging.getLogger(__name__).debug(
|
||||
f"Could not load tool repository credentials: {e}"
|
||||
)
|
||||
|
||||
return flow_instances
|
||||
|
||||
@@ -549,6 +553,31 @@ def build_env_with_tool_repository_credentials(
|
||||
return env
|
||||
|
||||
|
||||
def build_env_with_all_tool_credentials() -> dict[str, Any]:
|
||||
"""
|
||||
Build environment dict with credentials for all tool repository indexes
|
||||
found in pyproject.toml's [tool.uv.sources] section.
|
||||
|
||||
Returns:
|
||||
dict: Environment variables with credentials for all private indexes.
|
||||
"""
|
||||
env = os.environ.copy()
|
||||
try:
|
||||
pyproject_data = read_toml()
|
||||
sources = pyproject_data.get("tool", {}).get("uv", {}).get("sources", {})
|
||||
|
||||
for source_config in sources.values():
|
||||
if isinstance(source_config, dict):
|
||||
index = source_config.get("index")
|
||||
if index:
|
||||
index_env = build_env_with_tool_repository_credentials(index)
|
||||
env.update(index_env)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
return env
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _load_module_from_file(
|
||||
init_file: Path, module_name: str | None = None
|
||||
|
||||
@@ -4,6 +4,23 @@ import contextvars
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.events.base_events import (
|
||||
get_emission_sequence,
|
||||
set_emission_counter,
|
||||
)
|
||||
from crewai.events.event_context import (
|
||||
_event_id_stack,
|
||||
_last_event_id,
|
||||
_triggering_event_id,
|
||||
)
|
||||
from crewai.flow.flow_context import (
|
||||
current_flow_id,
|
||||
current_flow_method_name,
|
||||
current_flow_request_id,
|
||||
)
|
||||
|
||||
|
||||
_platform_integration_token: contextvars.ContextVar[str | None] = (
|
||||
contextvars.ContextVar("platform_integration_token", default=None)
|
||||
@@ -63,3 +80,53 @@ def reset_current_task_id(token: contextvars.Token[str | None]) -> None:
|
||||
def get_current_task_id() -> str | None:
|
||||
"""Get the current task ID from the context."""
|
||||
return _current_task_id.get()
|
||||
|
||||
|
||||
class ExecutionContext(BaseModel):
|
||||
"""Snapshot of ContextVar execution state."""
|
||||
|
||||
current_task_id: str | None = Field(default=None)
|
||||
flow_request_id: str | None = Field(default=None)
|
||||
flow_id: str | None = Field(default=None)
|
||||
flow_method_name: str = Field(default="unknown")
|
||||
|
||||
event_id_stack: tuple[tuple[str, str], ...] = Field(default=())
|
||||
last_event_id: str | None = Field(default=None)
|
||||
triggering_event_id: str | None = Field(default=None)
|
||||
emission_sequence: int = Field(default=0)
|
||||
|
||||
feedback_callback_info: dict[str, Any] | None = Field(default=None)
|
||||
platform_token: str | None = Field(default=None)
|
||||
|
||||
|
||||
def capture_execution_context(
|
||||
feedback_callback_info: dict[str, Any] | None = None,
|
||||
) -> ExecutionContext:
|
||||
"""Read current ContextVars into an ExecutionContext."""
|
||||
return ExecutionContext(
|
||||
current_task_id=_current_task_id.get(),
|
||||
flow_request_id=current_flow_request_id.get(),
|
||||
flow_id=current_flow_id.get(),
|
||||
flow_method_name=current_flow_method_name.get(),
|
||||
event_id_stack=_event_id_stack.get(),
|
||||
last_event_id=_last_event_id.get(),
|
||||
triggering_event_id=_triggering_event_id.get(),
|
||||
emission_sequence=get_emission_sequence(),
|
||||
feedback_callback_info=feedback_callback_info,
|
||||
platform_token=_platform_integration_token.get(),
|
||||
)
|
||||
|
||||
|
||||
def apply_execution_context(ctx: ExecutionContext) -> None:
|
||||
"""Write an ExecutionContext back into the ContextVars."""
|
||||
_current_task_id.set(ctx.current_task_id)
|
||||
current_flow_request_id.set(ctx.flow_request_id)
|
||||
current_flow_id.set(ctx.flow_id)
|
||||
current_flow_method_name.set(ctx.flow_method_name)
|
||||
|
||||
_event_id_stack.set(ctx.event_id_stack)
|
||||
_last_event_id.set(ctx.last_event_id)
|
||||
_triggering_event_id.set(ctx.triggering_event_id)
|
||||
set_emission_counter(ctx.emission_sequence)
|
||||
|
||||
_platform_integration_token.set(ctx.platform_token)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from concurrent.futures import Future
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
@@ -10,7 +10,9 @@ from pathlib import Path
|
||||
import re
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
Any,
|
||||
Literal,
|
||||
cast,
|
||||
)
|
||||
import uuid
|
||||
@@ -21,12 +23,14 @@ from opentelemetry.context import attach, detach
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
Field,
|
||||
Json,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic.functional_serializers import PlainSerializer
|
||||
from pydantic_core import PydanticCustomError
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
@@ -37,6 +41,8 @@ if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
from opentelemetry.trace import Span
|
||||
|
||||
from crewai.context import ExecutionContext
|
||||
|
||||
try:
|
||||
from crewai_files import get_supported_content_types
|
||||
|
||||
@@ -49,7 +55,12 @@ except ImportError:
|
||||
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent import (
|
||||
BaseAgent,
|
||||
_resolve_agent,
|
||||
_serialize_llm_ref,
|
||||
_validate_llm_ref,
|
||||
)
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.crews.utils import (
|
||||
@@ -132,6 +143,12 @@ from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
|
||||
|
||||
def _resolve_agents(value: Any, info: Any) -> Any:
|
||||
if not isinstance(value, list):
|
||||
return value
|
||||
return [_resolve_agent(a, info) for a in value]
|
||||
|
||||
|
||||
class Crew(FlowTrackable, BaseModel):
|
||||
"""
|
||||
Represents a group of agents, defining how they should collaborate and the
|
||||
@@ -170,6 +187,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
fingerprinting.
|
||||
"""
|
||||
|
||||
entity_type: Literal["crew"] = "crew"
|
||||
|
||||
__hash__ = object.__hash__
|
||||
_execution_span: Span | None = PrivateAttr()
|
||||
_rpm_controller: RPMController = PrivateAttr()
|
||||
@@ -191,7 +210,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
name: str | None = Field(default="crew")
|
||||
cache: bool = Field(default=True)
|
||||
tasks: list[Task] = Field(default_factory=list)
|
||||
agents: list[BaseAgent] = Field(default_factory=list)
|
||||
agents: Annotated[
|
||||
list[BaseAgent],
|
||||
BeforeValidator(_resolve_agents),
|
||||
] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: bool = Field(default=False)
|
||||
memory: bool | Memory | MemoryScope | MemorySlice | None = Field(
|
||||
@@ -209,15 +231,20 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
manager_llm: str | BaseLLM | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
manager_agent: BaseAgent | None = Field(
|
||||
description="Custom agent that will be used as manager.", default=None
|
||||
)
|
||||
function_calling_llm: str | LLM | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
manager_llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(description="Language model that will run the agent.", default=None)
|
||||
manager_agent: Annotated[
|
||||
BaseAgent | None,
|
||||
BeforeValidator(_resolve_agent),
|
||||
] = Field(description="Custom agent that will be used as manager.", default=None)
|
||||
function_calling_llm: Annotated[
|
||||
str | LLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(description="Language model that will run the agent.", default=None)
|
||||
config: Json[dict[str, Any]] | dict[str, Any] | None = Field(default=None)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
share_crew: bool | None = Field(default=False)
|
||||
@@ -266,7 +293,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default=False,
|
||||
description="Plan the crew execution and add the plan to the crew.",
|
||||
)
|
||||
planning_llm: str | BaseLLM | None = Field(
|
||||
planning_llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"Language model that will run the AgentPlanner if planning is True."
|
||||
@@ -287,7 +318,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"knowledge object."
|
||||
),
|
||||
)
|
||||
chat_llm: str | BaseLLM | None = Field(
|
||||
chat_llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(
|
||||
default=None,
|
||||
description="LLM used to handle chatting with the crew.",
|
||||
)
|
||||
@@ -313,14 +348,20 @@ class Crew(FlowTrackable, BaseModel):
|
||||
description="Whether to enable tracing for the crew. True=always enable, False=always disable, None=check environment/user settings.",
|
||||
)
|
||||
|
||||
execution_context: ExecutionContext | None = Field(default=None)
|
||||
checkpoint_inputs: dict[str, Any] | None = Field(default=None)
|
||||
checkpoint_train: bool | None = Field(default=None)
|
||||
checkpoint_kickoff_event_id: str | None = Field(default=None)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None, info: Any) -> UUID4 | None:
|
||||
"""Prevent manual setting of the 'id' field by users."""
|
||||
if v:
|
||||
if v and not (info.context or {}).get("from_checkpoint"):
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "The 'id' field cannot be set by the user.", {}
|
||||
)
|
||||
return v
|
||||
|
||||
@field_validator("config", mode="before")
|
||||
@classmethod
|
||||
@@ -1388,7 +1429,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self,
|
||||
tools: list[BaseTool],
|
||||
task_agent: BaseAgent,
|
||||
agents: list[BaseAgent],
|
||||
agents: Sequence[BaseAgent],
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(task_agent, "get_delegation_tools"):
|
||||
delegation_tools = task_agent.get_delegation_tools(agents)
|
||||
|
||||
@@ -21,7 +21,7 @@ class CrewOutput(BaseModel):
|
||||
description="JSON dict output of Crew", default=None
|
||||
)
|
||||
tasks_output: list[TaskOutput] = Field(
|
||||
description="Output of each task", default=[]
|
||||
description="Output of each task", default_factory=list
|
||||
)
|
||||
token_usage: UsageMetrics = Field(
|
||||
description="Processed token summary", default_factory=UsageMetrics
|
||||
|
||||
@@ -78,9 +78,15 @@ from crewai.events.types.mcp_events import (
|
||||
MCPConnectionCompletedEvent,
|
||||
MCPConnectionFailedEvent,
|
||||
MCPConnectionStartedEvent,
|
||||
MCPToolExecutionCompletedEvent,
|
||||
MCPToolExecutionFailedEvent,
|
||||
MCPToolExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
)
|
||||
from crewai.events.types.observation_events import (
|
||||
GoalAchievedEarlyEvent,
|
||||
PlanRefinementEvent,
|
||||
@@ -94,6 +100,12 @@ from crewai.events.types.reasoning_events import (
|
||||
AgentReasoningFailedEvent,
|
||||
AgentReasoningStartedEvent,
|
||||
)
|
||||
from crewai.events.types.skill_events import (
|
||||
SkillActivatedEvent,
|
||||
SkillDiscoveryCompletedEvent,
|
||||
SkillLoadFailedEvent,
|
||||
SkillLoadedEvent,
|
||||
)
|
||||
from crewai.events.types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
@@ -478,6 +490,7 @@ class EventListener(BaseEventListener):
|
||||
self.formatter.handle_guardrail_completed(
|
||||
event.success, event.error, event.retry_count
|
||||
)
|
||||
self._telemetry.feature_usage_span("guardrail:execution")
|
||||
|
||||
@crewai_event_bus.on(CrewTestStartedEvent)
|
||||
def on_crew_test_started(source: Any, event: CrewTestStartedEvent) -> None:
|
||||
@@ -559,6 +572,7 @@ class EventListener(BaseEventListener):
|
||||
event.plan,
|
||||
event.ready,
|
||||
)
|
||||
self._telemetry.feature_usage_span("planning:creation")
|
||||
|
||||
@crewai_event_bus.on(AgentReasoningFailedEvent)
|
||||
def on_agent_reasoning_failed(_: Any, event: AgentReasoningFailedEvent) -> None:
|
||||
@@ -616,6 +630,7 @@ class EventListener(BaseEventListener):
|
||||
event.replan_count,
|
||||
event.completed_steps_preserved,
|
||||
)
|
||||
self._telemetry.feature_usage_span("planning:replan")
|
||||
|
||||
@crewai_event_bus.on(GoalAchievedEarlyEvent)
|
||||
def on_goal_achieved_early(_: Any, event: GoalAchievedEarlyEvent) -> None:
|
||||
@@ -623,6 +638,25 @@ class EventListener(BaseEventListener):
|
||||
event.steps_completed,
|
||||
event.steps_remaining,
|
||||
)
|
||||
self._telemetry.feature_usage_span("planning:goal_achieved_early")
|
||||
|
||||
# ----------- SKILL EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(SkillDiscoveryCompletedEvent)
|
||||
def on_skill_discovery(_: Any, event: SkillDiscoveryCompletedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("skill:discovery")
|
||||
|
||||
@crewai_event_bus.on(SkillLoadedEvent)
|
||||
def on_skill_loaded(_: Any, event: SkillLoadedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("skill:loaded")
|
||||
|
||||
@crewai_event_bus.on(SkillLoadFailedEvent)
|
||||
def on_skill_load_failed(_: Any, event: SkillLoadFailedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("skill:load_failed")
|
||||
|
||||
@crewai_event_bus.on(SkillActivatedEvent)
|
||||
def on_skill_activated(_: Any, event: SkillActivatedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("skill:activated")
|
||||
|
||||
# ----------- AGENT LOGGING EVENTS -----------
|
||||
|
||||
@@ -662,6 +696,7 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.is_multiturn,
|
||||
)
|
||||
self._telemetry.feature_usage_span("a2a:delegation")
|
||||
|
||||
@crewai_event_bus.on(A2AConversationStartedEvent)
|
||||
def on_a2a_conversation_started(
|
||||
@@ -703,6 +738,7 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.total_turns,
|
||||
)
|
||||
self._telemetry.feature_usage_span("a2a:conversation")
|
||||
|
||||
@crewai_event_bus.on(A2APollingStartedEvent)
|
||||
def on_a2a_polling_started(_: Any, event: A2APollingStartedEvent) -> None:
|
||||
@@ -744,6 +780,7 @@ class EventListener(BaseEventListener):
|
||||
event.connection_duration_ms,
|
||||
event.is_reconnect,
|
||||
)
|
||||
self._telemetry.feature_usage_span("mcp:connection")
|
||||
|
||||
@crewai_event_bus.on(MCPConnectionFailedEvent)
|
||||
def on_mcp_connection_failed(_: Any, event: MCPConnectionFailedEvent) -> None:
|
||||
@@ -754,6 +791,7 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.error_type,
|
||||
)
|
||||
self._telemetry.feature_usage_span("mcp:connection_failed")
|
||||
|
||||
@crewai_event_bus.on(MCPConfigFetchFailedEvent)
|
||||
def on_mcp_config_fetch_failed(
|
||||
@@ -764,6 +802,7 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.error_type,
|
||||
)
|
||||
self._telemetry.feature_usage_span("mcp:config_fetch_failed")
|
||||
|
||||
@crewai_event_bus.on(MCPToolExecutionStartedEvent)
|
||||
def on_mcp_tool_execution_started(
|
||||
@@ -775,6 +814,12 @@ class EventListener(BaseEventListener):
|
||||
event.tool_args,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MCPToolExecutionCompletedEvent)
|
||||
def on_mcp_tool_execution_completed(
|
||||
_: Any, event: MCPToolExecutionCompletedEvent
|
||||
) -> None:
|
||||
self._telemetry.feature_usage_span("mcp:tool_execution")
|
||||
|
||||
@crewai_event_bus.on(MCPToolExecutionFailedEvent)
|
||||
def on_mcp_tool_execution_failed(
|
||||
_: Any, event: MCPToolExecutionFailedEvent
|
||||
@@ -786,6 +831,45 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.error_type,
|
||||
)
|
||||
self._telemetry.feature_usage_span("mcp:tool_execution_failed")
|
||||
|
||||
# ----------- MEMORY TELEMETRY -----------
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_memory_save_completed(_: Any, event: MemorySaveCompletedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("memory:save")
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
def on_memory_query_completed(_: Any, event: MemoryQueryCompletedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("memory:query")
|
||||
|
||||
@crewai_event_bus.on(MemoryRetrievalCompletedEvent)
|
||||
def on_memory_retrieval_completed_telemetry(
|
||||
_: Any, event: MemoryRetrievalCompletedEvent
|
||||
) -> None:
|
||||
self._telemetry.feature_usage_span("memory:retrieval")
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_hooks(_: Any, event: CrewKickoffStartedEvent) -> None:
|
||||
from crewai.hooks.llm_hooks import (
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
)
|
||||
from crewai.hooks.tool_hooks import (
|
||||
get_after_tool_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
|
||||
has_hooks = any(
|
||||
[
|
||||
get_before_llm_call_hooks(),
|
||||
get_after_llm_call_hooks(),
|
||||
get_before_tool_call_hooks(),
|
||||
get_after_tool_call_hooks(),
|
||||
]
|
||||
)
|
||||
if has_hooks:
|
||||
self._telemetry.feature_usage_span("hooks:registered")
|
||||
|
||||
|
||||
event_listener = EventListener()
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
"""Checkpointable execution context for the crewAI runtime.
|
||||
|
||||
Captures the ContextVar state needed to resume execution from a checkpoint.
|
||||
Used by the RootModel (step 5) to include execution context in snapshots.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.context import (
|
||||
_current_task_id,
|
||||
_platform_integration_token,
|
||||
)
|
||||
from crewai.events.base_events import (
|
||||
get_emission_sequence,
|
||||
set_emission_counter,
|
||||
)
|
||||
from crewai.events.event_context import (
|
||||
_event_id_stack,
|
||||
_last_event_id,
|
||||
_triggering_event_id,
|
||||
)
|
||||
from crewai.flow.flow_context import (
|
||||
current_flow_id,
|
||||
current_flow_method_name,
|
||||
current_flow_request_id,
|
||||
)
|
||||
|
||||
|
||||
class ExecutionContext(BaseModel):
|
||||
"""Snapshot of ContextVar state required for checkpoint/resume."""
|
||||
|
||||
current_task_id: str | None = Field(default=None)
|
||||
flow_request_id: str | None = Field(default=None)
|
||||
flow_id: str | None = Field(default=None)
|
||||
flow_method_name: str = Field(default="unknown")
|
||||
|
||||
event_id_stack: tuple[tuple[str, str], ...] = Field(default=())
|
||||
last_event_id: str | None = Field(default=None)
|
||||
triggering_event_id: str | None = Field(default=None)
|
||||
emission_sequence: int = Field(default=0)
|
||||
|
||||
feedback_callback_info: dict[str, Any] | None = Field(default=None)
|
||||
platform_token: str | None = Field(default=None)
|
||||
|
||||
|
||||
def capture_execution_context(
|
||||
feedback_callback_info: dict[str, Any] | None = None,
|
||||
) -> ExecutionContext:
|
||||
"""Read all checkpoint-required ContextVars into an ExecutionContext."""
|
||||
return ExecutionContext(
|
||||
current_task_id=_current_task_id.get(),
|
||||
flow_request_id=current_flow_request_id.get(),
|
||||
flow_id=current_flow_id.get(),
|
||||
flow_method_name=current_flow_method_name.get(),
|
||||
event_id_stack=_event_id_stack.get(),
|
||||
last_event_id=_last_event_id.get(),
|
||||
triggering_event_id=_triggering_event_id.get(),
|
||||
emission_sequence=get_emission_sequence(),
|
||||
feedback_callback_info=feedback_callback_info,
|
||||
platform_token=_platform_integration_token.get(),
|
||||
)
|
||||
|
||||
|
||||
def apply_execution_context(ctx: ExecutionContext) -> None:
|
||||
"""Write an ExecutionContext back into the ContextVars."""
|
||||
_current_task_id.set(ctx.current_task_id)
|
||||
current_flow_request_id.set(ctx.flow_request_id)
|
||||
current_flow_id.set(ctx.flow_id)
|
||||
current_flow_method_name.set(ctx.flow_method_name)
|
||||
|
||||
_event_id_stack.set(ctx.event_id_stack)
|
||||
_last_event_id.set(ctx.last_event_id)
|
||||
_triggering_event_id.set(ctx.triggering_event_id)
|
||||
set_emission_counter(ctx.emission_sequence)
|
||||
|
||||
_platform_integration_token.set(ctx.platform_token)
|
||||
@@ -1907,6 +1907,37 @@ class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin):
|
||||
"original_tool": original_tool,
|
||||
}
|
||||
|
||||
def _extract_tool_name(self, tool_call: Any) -> str:
|
||||
"""Extract tool name from various tool call formats."""
|
||||
if hasattr(tool_call, "function"):
|
||||
return sanitize_tool_name(tool_call.function.name)
|
||||
if hasattr(tool_call, "function_call") and tool_call.function_call:
|
||||
return sanitize_tool_name(tool_call.function_call.name)
|
||||
if hasattr(tool_call, "name"):
|
||||
return sanitize_tool_name(tool_call.name)
|
||||
if isinstance(tool_call, dict):
|
||||
func_info = tool_call.get("function", {})
|
||||
return sanitize_tool_name(
|
||||
func_info.get("name", "") or tool_call.get("name", "unknown")
|
||||
)
|
||||
return "unknown"
|
||||
|
||||
@router(execute_native_tool)
|
||||
def check_native_todo_completion(
|
||||
self,
|
||||
) -> Literal["todo_satisfied", "todo_not_satisfied"]:
|
||||
"""Check if the native tool execution satisfied the active todo.
|
||||
|
||||
Similar to check_todo_completion but for native tool execution path.
|
||||
"""
|
||||
current_todo = self.state.todos.current_todo
|
||||
|
||||
if not current_todo:
|
||||
return "todo_not_satisfied"
|
||||
|
||||
# For native tools, any tool execution satisfies the todo
|
||||
return "todo_satisfied"
|
||||
|
||||
@listen("initialized")
|
||||
def continue_iteration(self) -> Literal["check_iteration"]:
|
||||
"""Bridge listener that connects iteration loop back to iteration check."""
|
||||
|
||||
@@ -25,6 +25,7 @@ import logging
|
||||
import threading
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
Any,
|
||||
ClassVar,
|
||||
Generic,
|
||||
@@ -41,9 +42,11 @@ from opentelemetry import baggage
|
||||
from opentelemetry.context import attach, detach
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
ConfigDict,
|
||||
Field,
|
||||
PrivateAttr,
|
||||
SerializeAsAny,
|
||||
ValidationError,
|
||||
)
|
||||
from pydantic._internal._model_construction import ModelMetaclass
|
||||
@@ -115,6 +118,7 @@ from crewai.memory.unified_memory import Memory
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
from crewai.context import ExecutionContext
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
|
||||
@@ -134,6 +138,19 @@ from crewai.utilities.streaming import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _resolve_persistence(value: Any) -> Any:
|
||||
if value is None or isinstance(value, FlowPersistence):
|
||||
return value
|
||||
if isinstance(value, dict):
|
||||
from crewai.flow.persistence.base import _persistence_registry
|
||||
|
||||
type_name = value.get("persistence_type", "SQLiteFlowPersistence")
|
||||
cls = _persistence_registry.get(type_name)
|
||||
if cls is not None:
|
||||
return cls.model_validate(value)
|
||||
return value
|
||||
|
||||
|
||||
class FlowState(BaseModel):
|
||||
"""Base model for all flow states, ensuring each state has a unique ID."""
|
||||
|
||||
@@ -883,6 +900,8 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta):
|
||||
_routers: ClassVar[set[FlowMethodName]] = set()
|
||||
_router_paths: ClassVar[dict[FlowMethodName, list[FlowMethodName]]] = {}
|
||||
|
||||
entity_type: Literal["flow"] = "flow"
|
||||
|
||||
initial_state: Any = Field(default=None)
|
||||
name: str | None = Field(default=None)
|
||||
tracing: bool | None = Field(default=None)
|
||||
@@ -893,8 +912,17 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta):
|
||||
human_feedback_history: list[HumanFeedbackResult] = Field(default_factory=list)
|
||||
last_human_feedback: HumanFeedbackResult | None = Field(default=None)
|
||||
|
||||
persistence: Any = Field(default=None, exclude=True)
|
||||
max_method_calls: int = Field(default=100, exclude=True)
|
||||
persistence: Annotated[
|
||||
SerializeAsAny[FlowPersistence] | Any,
|
||||
BeforeValidator(lambda v, _: _resolve_persistence(v)),
|
||||
] = Field(default=None)
|
||||
max_method_calls: int = Field(default=100)
|
||||
|
||||
execution_context: ExecutionContext | None = Field(default=None)
|
||||
checkpoint_completed_methods: set[str] | None = Field(default=None)
|
||||
checkpoint_method_outputs: list[Any] | None = Field(default=None)
|
||||
checkpoint_method_counts: dict[str, int] | None = Field(default=None)
|
||||
checkpoint_state: dict[str, Any] | None = Field(default=None)
|
||||
|
||||
_methods: dict[FlowMethodName, FlowMethod[Any, Any]] = PrivateAttr(
|
||||
default_factory=dict
|
||||
|
||||
@@ -5,14 +5,17 @@ from __future__ import annotations
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
|
||||
|
||||
class FlowPersistence(ABC):
|
||||
_persistence_registry: dict[str, type[FlowPersistence]] = {}
|
||||
|
||||
|
||||
class FlowPersistence(BaseModel, ABC):
|
||||
"""Abstract base class for flow state persistence.
|
||||
|
||||
This class defines the interface that all persistence implementations must follow.
|
||||
@@ -24,6 +27,13 @@ class FlowPersistence(ABC):
|
||||
- clear_pending_feedback(): Clears pending feedback after resume
|
||||
"""
|
||||
|
||||
persistence_type: str = Field(default="base")
|
||||
|
||||
def __init_subclass__(cls, **kwargs: Any) -> None:
|
||||
super().__init_subclass__(**kwargs)
|
||||
if not getattr(cls, "__abstractmethods__", set()):
|
||||
_persistence_registry[cls.__name__] = cls
|
||||
|
||||
@abstractmethod
|
||||
def init_db(self) -> None:
|
||||
"""Initialize the persistence backend.
|
||||
@@ -95,7 +105,7 @@ class FlowPersistence(ABC):
|
||||
"""
|
||||
return None
|
||||
|
||||
def clear_pending_feedback(self, flow_uuid: str) -> None: # noqa: B027
|
||||
def clear_pending_feedback(self, flow_uuid: str) -> None:
|
||||
"""Clear the pending feedback marker after successful resume.
|
||||
|
||||
This is called after feedback is received and the flow resumes.
|
||||
|
||||
@@ -9,7 +9,8 @@ from pathlib import Path
|
||||
import sqlite3
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.utilities.lock_store import lock as store_lock
|
||||
@@ -50,26 +51,22 @@ class SQLiteFlowPersistence(FlowPersistence):
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: str | None = None) -> None:
|
||||
"""Initialize SQLite persistence.
|
||||
persistence_type: str = Field(default="SQLiteFlowPersistence")
|
||||
db_path: str = Field(
|
||||
default_factory=lambda: str(Path(db_storage_path()) / "flow_states.db")
|
||||
)
|
||||
_lock_name: str = PrivateAttr()
|
||||
|
||||
Args:
|
||||
db_path: Path to the SQLite database file. If not provided, uses
|
||||
db_storage_path() from utilities.paths.
|
||||
def __init__(self, db_path: str | None = None, /, **kwargs: Any) -> None:
|
||||
if db_path is not None:
|
||||
kwargs["db_path"] = db_path
|
||||
super().__init__(**kwargs)
|
||||
|
||||
Raises:
|
||||
ValueError: If db_path is invalid
|
||||
"""
|
||||
|
||||
# Get path from argument or default location
|
||||
path = db_path or str(Path(db_storage_path()) / "flow_states.db")
|
||||
|
||||
if not path:
|
||||
raise ValueError("Database path must be provided")
|
||||
|
||||
self.db_path = path # Now mypy knows this is str
|
||||
@model_validator(mode="after")
|
||||
def _setup(self) -> Self:
|
||||
self._lock_name = f"sqlite:{os.path.realpath(self.db_path)}"
|
||||
self.init_db()
|
||||
return self
|
||||
|
||||
def init_db(self) -> None:
|
||||
"""Create the necessary tables if they don't exist."""
|
||||
|
||||
@@ -40,7 +40,9 @@ class LiteAgentOutput(BaseModel):
|
||||
usage_metrics: dict[str, Any] | None = Field(
|
||||
description="Token usage metrics for this execution", default=None
|
||||
)
|
||||
messages: list[LLMMessage] = Field(description="Messages of the agent", default=[])
|
||||
messages: list[LLMMessage] = Field(
|
||||
description="Messages of the agent", default_factory=list
|
||||
)
|
||||
|
||||
plan: str | None = Field(
|
||||
default=None, description="The execution plan that was generated, if any"
|
||||
|
||||
@@ -32,6 +32,10 @@ class MemoryScope(BaseModel):
|
||||
"""Extract memory dependency and normalize root path before validation."""
|
||||
if isinstance(data, MemoryScope):
|
||||
return data
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"Expected dict or MemoryScope, got {type(data).__name__}")
|
||||
if "memory" not in data:
|
||||
raise ValueError("MemoryScope requires a 'memory' key")
|
||||
memory = data.pop("memory")
|
||||
instance: MemoryScope = handler(data)
|
||||
instance._memory = memory
|
||||
@@ -199,6 +203,10 @@ class MemorySlice(BaseModel):
|
||||
"""Extract memory dependency and normalize scopes before validation."""
|
||||
if isinstance(data, MemorySlice):
|
||||
return data
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"Expected dict or MemorySlice, got {type(data).__name__}")
|
||||
if "memory" not in data:
|
||||
raise ValueError("MemorySlice requires a 'memory' key")
|
||||
memory = data.pop("memory")
|
||||
data["scopes"] = [s.rstrip("/") or "/" for s in data.get("scopes", [])]
|
||||
instance: MemorySlice = handler(data)
|
||||
|
||||
18
lib/crewai/src/crewai/runtime_state.py
Normal file
18
lib/crewai/src/crewai/runtime_state.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""Unified runtime state for crewAI.
|
||||
|
||||
``RuntimeState`` is a ``RootModel`` whose ``model_dump_json()`` produces a
|
||||
complete, self-contained snapshot of every active entity in the program.
|
||||
|
||||
The ``Entity`` type alias and ``RuntimeState`` model are built at import time
|
||||
in ``crewai/__init__.py`` after all forward references are resolved.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _entity_discriminator(v: dict[str, Any] | object) -> str:
|
||||
if isinstance(v, dict):
|
||||
raw = v.get("entity_type", "agent")
|
||||
else:
|
||||
raw = getattr(v, "entity_type", "agent")
|
||||
return str(raw)
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Sequence
|
||||
from concurrent.futures import Future
|
||||
import contextvars
|
||||
from copy import copy as shallow_copy
|
||||
@@ -12,6 +13,7 @@ import logging
|
||||
from pathlib import Path
|
||||
import threading
|
||||
from typing import (
|
||||
Annotated,
|
||||
Any,
|
||||
ClassVar,
|
||||
cast,
|
||||
@@ -24,6 +26,7 @@ import warnings
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
Field,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
@@ -32,7 +35,7 @@ from pydantic import (
|
||||
from pydantic_core import PydanticCustomError
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent, _resolve_agent
|
||||
from crewai.context import reset_current_task_id, set_current_task_id
|
||||
from crewai.core.providers.content_processor import process_content
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
@@ -129,9 +132,10 @@ class Task(BaseModel):
|
||||
callback: SerializableCallable | None = Field(
|
||||
description="Callback to be executed after the task is completed.", default=None
|
||||
)
|
||||
agent: BaseAgent | None = Field(
|
||||
description="Agent responsible for execution the task.", default=None
|
||||
)
|
||||
agent: Annotated[
|
||||
BaseAgent | None,
|
||||
BeforeValidator(_resolve_agent),
|
||||
] = Field(description="Agent responsible for execution the task.", default=None)
|
||||
context: list[Task] | None | _NotSpecified = Field(
|
||||
description="Other tasks that will have their output used as context for this task.",
|
||||
default=NOT_SPECIFIED,
|
||||
@@ -392,11 +396,12 @@ class Task(BaseModel):
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
if v:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None, info: Any) -> UUID4 | None:
|
||||
if v and not (info.context or {}).get("from_checkpoint"):
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
return v
|
||||
|
||||
@field_validator("input_files", mode="before")
|
||||
@classmethod
|
||||
@@ -997,7 +1002,7 @@ Follow these guidelines:
|
||||
self.delegations += 1
|
||||
|
||||
def copy( # type: ignore
|
||||
self, agents: list[BaseAgent], task_mapping: dict[str, Task]
|
||||
self, agents: Sequence[BaseAgent], task_mapping: dict[str, Task]
|
||||
) -> Task:
|
||||
"""Creates a deep copy of the Task while preserving its original class type.
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from pydantic import Field
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.types.callback import SerializableCallable
|
||||
|
||||
|
||||
class ConditionalTask(Task):
|
||||
@@ -24,7 +25,7 @@ class ConditionalTask(Task):
|
||||
- Cannot be the first task since it needs context from the previous task
|
||||
"""
|
||||
|
||||
condition: Callable[[TaskOutput], bool] | None = Field(
|
||||
condition: SerializableCallable | None = Field(
|
||||
default=None,
|
||||
description="Function that determines whether the task should be executed based on previous task output.",
|
||||
)
|
||||
@@ -51,7 +52,7 @@ class ConditionalTask(Task):
|
||||
"""
|
||||
if self.condition is None:
|
||||
raise ValueError("No condition function set for conditional task")
|
||||
return self.condition(context)
|
||||
return bool(self.condition(context))
|
||||
|
||||
def get_skipped_task_output(self) -> TaskOutput:
|
||||
"""Generate a TaskOutput for when the conditional task is skipped.
|
||||
|
||||
@@ -43,7 +43,9 @@ class TaskOutput(BaseModel):
|
||||
output_format: OutputFormat = Field(
|
||||
description="Output format of the task", default=OutputFormat.RAW
|
||||
)
|
||||
messages: list[LLMMessage] = Field(description="Messages of the task", default=[])
|
||||
messages: list[LLMMessage] = Field(
|
||||
description="Messages of the task", default_factory=list
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_summary(self) -> TaskOutput:
|
||||
|
||||
@@ -1040,3 +1040,20 @@ class Telemetry:
|
||||
close_span(span)
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
|
||||
def feature_usage_span(self, feature: str) -> None:
|
||||
"""Records that a feature was used. One span = one count.
|
||||
|
||||
Args:
|
||||
feature: Feature identifier, e.g. "planning:creation",
|
||||
"mcp:connection", "a2a:delegation".
|
||||
"""
|
||||
|
||||
def _operation() -> None:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Feature Usage")
|
||||
self._add_attribute(span, "crewai_version", version("crewai"))
|
||||
self._add_attribute(span, "feature", feature)
|
||||
close_span(span)
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.tools.agent_tools.ask_question_tool import AskQuestionTool
|
||||
@@ -16,7 +17,7 @@ if TYPE_CHECKING:
|
||||
class AgentTools:
|
||||
"""Manager class for agent-related tools"""
|
||||
|
||||
def __init__(self, agents: list[BaseAgent], i18n: I18N | None = None) -> None:
|
||||
def __init__(self, agents: Sequence[BaseAgent], i18n: I18N | None = None) -> None:
|
||||
self.agents = agents
|
||||
self.i18n = i18n if i18n is not None else get_i18n()
|
||||
|
||||
|
||||
@@ -318,6 +318,8 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
fingerprint_config = self._build_fingerprint_config()
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
@@ -328,15 +330,16 @@ class ToolUsage:
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
result = await tool.ainvoke(
|
||||
input=arguments, config=fingerprint_config
|
||||
)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
result = await tool.ainvoke(
|
||||
input=arguments, config=fingerprint_config
|
||||
)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
result = await tool.ainvoke(input={}, config=fingerprint_config)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
@@ -550,6 +553,8 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
fingerprint_config = self._build_fingerprint_config()
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
@@ -560,15 +565,16 @@ class ToolUsage:
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
result = tool.invoke(
|
||||
input=arguments, config=fingerprint_config
|
||||
)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
result = tool.invoke(
|
||||
input=arguments, config=fingerprint_config
|
||||
)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = tool.invoke(input=arguments)
|
||||
result = tool.invoke(input={}, config=fingerprint_config)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
@@ -1008,23 +1014,16 @@ class ToolUsage:
|
||||
|
||||
return event_data
|
||||
|
||||
def _add_fingerprint_metadata(self, arguments: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Add fingerprint metadata to tool arguments if available.
|
||||
def _build_fingerprint_config(self) -> dict[str, Any]:
|
||||
"""Build fingerprint metadata as a config dict for tool invocation.
|
||||
|
||||
Args:
|
||||
arguments: The original tool arguments
|
||||
Returns the fingerprint data in a config dict rather than injecting it
|
||||
into tool arguments, so it doesn't conflict with strict tool schemas.
|
||||
|
||||
Returns:
|
||||
Updated arguments dictionary with fingerprint metadata
|
||||
Config dictionary with security_context metadata.
|
||||
"""
|
||||
# Create a shallow copy to avoid modifying the original
|
||||
arguments = arguments.copy()
|
||||
|
||||
# Add security metadata under a designated key
|
||||
if "security_context" not in arguments:
|
||||
arguments["security_context"] = {}
|
||||
|
||||
security_context = arguments["security_context"]
|
||||
security_context: dict[str, Any] = {}
|
||||
|
||||
# Add agent fingerprint if available
|
||||
if self.agent and hasattr(self.agent, "security_config"):
|
||||
@@ -1048,4 +1047,4 @@ class ToolUsage:
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return arguments
|
||||
return {"security_context": security_context} if security_context else {}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from typing import Annotated, Final
|
||||
|
||||
from pydantic_core import CoreSchema
|
||||
|
||||
from crewai.utilities.printer import PrinterColor
|
||||
|
||||
|
||||
@@ -36,6 +38,25 @@ class _NotSpecified:
|
||||
def __repr__(self) -> str:
|
||||
return "NOT_SPECIFIED"
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: object, _handler: object
|
||||
) -> CoreSchema:
|
||||
from pydantic_core import core_schema
|
||||
|
||||
def _validate(v: object) -> _NotSpecified:
|
||||
if isinstance(v, _NotSpecified) or v == "NOT_SPECIFIED":
|
||||
return NOT_SPECIFIED
|
||||
raise ValueError(f"Expected NOT_SPECIFIED sentinel, got {type(v).__name__}")
|
||||
|
||||
return core_schema.no_info_plain_validator_function(
|
||||
_validate,
|
||||
serialization=core_schema.plain_serializer_function_ser_schema(
|
||||
lambda v: "NOT_SPECIFIED",
|
||||
info_arg=False,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
NOT_SPECIFIED: Final[
|
||||
Annotated[
|
||||
|
||||
@@ -623,7 +623,7 @@ def create_model_from_schema( # type: ignore[no-any-unimported]
|
||||
for name, prop in (json_schema.get("properties", {}) or {}).items()
|
||||
}
|
||||
|
||||
effective_config = __config__ or ConfigDict(extra="ignore")
|
||||
effective_config = __config__ or ConfigDict(extra="forbid")
|
||||
|
||||
return create_model_base(
|
||||
effective_name,
|
||||
|
||||
@@ -927,6 +927,30 @@ class TestNativeToolExecution:
|
||||
assert len(tool_messages) == 1
|
||||
assert tool_messages[0]["tool_call_id"] == "call_1"
|
||||
|
||||
def test_check_native_todo_completion_requires_current_todo(
|
||||
self, mock_dependencies
|
||||
):
|
||||
from crewai.utilities.planning_types import TodoList
|
||||
|
||||
executor = _build_executor(**mock_dependencies)
|
||||
|
||||
# No current todo → not satisfied
|
||||
executor.state.todos = TodoList(items=[])
|
||||
assert executor.check_native_todo_completion() == "todo_not_satisfied"
|
||||
|
||||
# With a current todo that has tool_to_use → satisfied
|
||||
running = TodoItem(
|
||||
step_number=1,
|
||||
description="Use the expected tool",
|
||||
tool_to_use="expected_tool",
|
||||
status="running",
|
||||
)
|
||||
executor.state.todos = TodoList(items=[running])
|
||||
assert executor.check_native_todo_completion() == "todo_satisfied"
|
||||
|
||||
# With a current todo without tool_to_use → still satisfied
|
||||
running.tool_to_use = None
|
||||
assert executor.check_native_todo_completion() == "todo_satisfied"
|
||||
|
||||
|
||||
class TestPlannerObserver:
|
||||
|
||||
@@ -218,6 +218,7 @@ def test_publish_when_not_in_sync_and_force(
|
||||
["uv", "build", "--sdist", "--out-dir", unittest.mock.ANY],
|
||||
check=True,
|
||||
capture_output=False,
|
||||
env=unittest.mock.ANY,
|
||||
)
|
||||
mock_open.assert_called_with(unittest.mock.ANY, "rb")
|
||||
mock_publish.assert_called_with(
|
||||
@@ -279,6 +280,7 @@ def test_publish_success(
|
||||
["uv", "build", "--sdist", "--out-dir", unittest.mock.ANY],
|
||||
check=True,
|
||||
capture_output=False,
|
||||
env=unittest.mock.ANY,
|
||||
)
|
||||
mock_open.assert_called_with(unittest.mock.ANY, "rb")
|
||||
mock_publish.assert_called_with(
|
||||
|
||||
@@ -882,129 +882,3 @@ class TestEndToEndMCPSchema:
|
||||
)
|
||||
assert obj.filters.date_from == datetime.date(2025, 1, 1)
|
||||
assert obj.filters.categories == ["news", "tech"]
|
||||
|
||||
|
||||
class TestExtraFieldsIgnored:
|
||||
"""Regression tests for OSS-9: security_context injection causing
|
||||
extra_forbidden errors on MCP and integration tool schemas.
|
||||
|
||||
When the framework injects metadata like security_context into tool call
|
||||
arguments, dynamically-created Pydantic models must ignore (not reject)
|
||||
extra fields so that tool execution is not blocked.
|
||||
"""
|
||||
|
||||
SIMPLE_TOOL_SCHEMA: dict[str, Any] = {
|
||||
"type": "object",
|
||||
"title": "ExecuteSqlSchema",
|
||||
"properties": {
|
||||
"query": {
|
||||
"type": "string",
|
||||
"description": "The SQL query to execute.",
|
||||
},
|
||||
},
|
||||
"required": ["query"],
|
||||
}
|
||||
|
||||
OUTLOOK_TOOL_SCHEMA: dict[str, Any] = {
|
||||
"type": "object",
|
||||
"title": "MicrosoftOutlookSendEmailSchema",
|
||||
"properties": {
|
||||
"to_recipients": {
|
||||
"type": "array",
|
||||
"items": {"type": "string"},
|
||||
"description": "Array of recipient email addresses.",
|
||||
},
|
||||
"subject": {
|
||||
"type": "string",
|
||||
"description": "Email subject line.",
|
||||
},
|
||||
"body": {
|
||||
"type": "string",
|
||||
"description": "Email body content.",
|
||||
},
|
||||
},
|
||||
"required": ["to_recipients", "subject", "body"],
|
||||
}
|
||||
|
||||
SECURITY_CONTEXT_PAYLOAD: dict[str, Any] = {
|
||||
"agent_fingerprint": {
|
||||
"user_id": "test-user-123",
|
||||
"session_id": "test-session-456",
|
||||
"metadata": {},
|
||||
},
|
||||
}
|
||||
|
||||
def test_mcp_tool_schema_ignores_security_context(self) -> None:
|
||||
"""Reproduces OSS-9 Case 1: Databricks MCP execute_sql fails when
|
||||
security_context is injected into tool args."""
|
||||
Model = create_model_from_schema(self.SIMPLE_TOOL_SCHEMA)
|
||||
# This previously raised: Extra inputs are not permitted
|
||||
# [type=extra_forbidden, input_value={'agent_fingerprint': ...}]
|
||||
obj = Model.model_validate(
|
||||
{
|
||||
"query": "SELECT * FROM my_table",
|
||||
"security_context": self.SECURITY_CONTEXT_PAYLOAD,
|
||||
}
|
||||
)
|
||||
assert obj.query == "SELECT * FROM my_table"
|
||||
# security_context should be silently dropped, not present on the model
|
||||
assert not hasattr(obj, "security_context")
|
||||
|
||||
def test_integration_tool_schema_ignores_security_context(self) -> None:
|
||||
"""Reproduces OSS-9 Case 2: Microsoft Outlook send_email fails when
|
||||
security_context is injected into tool args."""
|
||||
Model = create_model_from_schema(self.OUTLOOK_TOOL_SCHEMA)
|
||||
obj = Model.model_validate(
|
||||
{
|
||||
"to_recipients": ["user@example.com"],
|
||||
"subject": "Test",
|
||||
"body": "Hello",
|
||||
"security_context": self.SECURITY_CONTEXT_PAYLOAD,
|
||||
}
|
||||
)
|
||||
assert obj.to_recipients == ["user@example.com"]
|
||||
assert obj.subject == "Test"
|
||||
assert not hasattr(obj, "security_context")
|
||||
|
||||
def test_arbitrary_extra_fields_ignored(self) -> None:
|
||||
"""Any unexpected extra field should be silently ignored, not just
|
||||
security_context."""
|
||||
Model = create_model_from_schema(self.SIMPLE_TOOL_SCHEMA)
|
||||
obj = Model.model_validate(
|
||||
{
|
||||
"query": "SELECT 1",
|
||||
"some_unknown_field": "should be dropped",
|
||||
"another_extra": 42,
|
||||
}
|
||||
)
|
||||
assert obj.query == "SELECT 1"
|
||||
assert not hasattr(obj, "some_unknown_field")
|
||||
assert not hasattr(obj, "another_extra")
|
||||
|
||||
def test_required_fields_still_enforced(self) -> None:
|
||||
"""Changing to extra=ignore must NOT weaken required field validation."""
|
||||
Model = create_model_from_schema(self.SIMPLE_TOOL_SCHEMA)
|
||||
with pytest.raises(Exception):
|
||||
Model.model_validate({"security_context": self.SECURITY_CONTEXT_PAYLOAD})
|
||||
|
||||
def test_type_validation_still_enforced(self) -> None:
|
||||
"""Changing to extra=ignore must NOT weaken type validation."""
|
||||
Model = create_model_from_schema(self.SIMPLE_TOOL_SCHEMA)
|
||||
with pytest.raises(Exception):
|
||||
Model.model_validate({"query": 12345}) # should be string
|
||||
|
||||
def test_explicit_extra_forbid_still_works(self) -> None:
|
||||
"""Callers can still opt into extra=forbid via __config__."""
|
||||
from pydantic import ConfigDict
|
||||
|
||||
Model = create_model_from_schema(
|
||||
self.SIMPLE_TOOL_SCHEMA,
|
||||
__config__=ConfigDict(extra="forbid"),
|
||||
)
|
||||
with pytest.raises(Exception):
|
||||
Model.model_validate(
|
||||
{
|
||||
"query": "SELECT 1",
|
||||
"security_context": self.SECURITY_CONTEXT_PAYLOAD,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -25,6 +25,9 @@ release = "crewai_devtools.cli:release"
|
||||
docs-check = "crewai_devtools.docs_check:docs_check"
|
||||
devtools = "crewai_devtools.cli:main"
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""CrewAI development tools."""
|
||||
|
||||
__version__ = "1.13.0a7"
|
||||
__version__ = "1.14.0a2"
|
||||
|
||||
@@ -160,6 +160,7 @@ info = "Commits must follow Conventional Commits 1.0.0."
|
||||
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
# composio-core pins rich<14 but textual requires rich>=14.
|
||||
# onnxruntime 1.24+ dropped Python 3.10 wheels; cap it so qdrant[fastembed] resolves on 3.10.
|
||||
|
||||
Reference in New Issue
Block a user