mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-12 14:02:47 +00:00
Compare commits
26 Commits
devin/1775
...
docs/file-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9723113755 | ||
|
|
e80d89a31c | ||
|
|
33339bd60b | ||
|
|
415f248274 | ||
|
|
07a06561ed | ||
|
|
64ee002201 | ||
|
|
202ca028d6 | ||
|
|
bce10f5978 | ||
|
|
d2e57e375b | ||
|
|
d039a075aa | ||
|
|
ce99312db1 | ||
|
|
c571620f8c | ||
|
|
931f3556cf | ||
|
|
914776b7ed | ||
|
|
6ef6fada4d | ||
|
|
1b7be63b60 | ||
|
|
59aa5b2243 | ||
|
|
2e2fae02d2 | ||
|
|
804c26bd01 | ||
|
|
4e46913045 | ||
|
|
335130cb15 | ||
|
|
186ea77c63 | ||
|
|
9e51229e6c | ||
|
|
247d623499 | ||
|
|
c260f3e19f | ||
|
|
d9cf7dda31 |
105
.github/workflows/vulnerability-scan.yml
vendored
Normal file
105
.github/workflows/vulnerability-scan.yml
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
name: Vulnerability Scan
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
schedule:
|
||||
# Run weekly on Monday at 9:00 UTC
|
||||
- cron: '0 9 * * 1'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
pip-audit:
|
||||
name: pip-audit
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py3.11-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: "3.11"
|
||||
enable-cache: false
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras --no-install-project
|
||||
|
||||
- name: Install pip-audit
|
||||
run: uv pip install pip-audit
|
||||
|
||||
- name: Run pip-audit
|
||||
run: |
|
||||
uv run pip-audit --desc --aliases --skip-editable --format json --output pip-audit-report.json \
|
||||
--ignore-vuln CVE-2025-69872 \
|
||||
--ignore-vuln CVE-2026-25645 \
|
||||
--ignore-vuln CVE-2026-27448 \
|
||||
--ignore-vuln CVE-2026-27459 \
|
||||
--ignore-vuln PYSEC-2023-235
|
||||
# Ignored CVEs:
|
||||
# CVE-2025-69872 - diskcache 5.6.3: no fix available (latest version)
|
||||
# CVE-2026-25645 - requests 2.32.5: fix requires 2.33.0, blocked by crewai-tools ~=2.32.5 pin
|
||||
# CVE-2026-27448 - pyopenssl 25.3.0: fix requires 26.0.0, blocked by snowflake-connector-python <26.0.0 pin
|
||||
# CVE-2026-27459 - pyopenssl 25.3.0: same as above
|
||||
# PYSEC-2023-235 - couchbase: fixed in 4.6.0 (already upgraded), advisory not yet updated
|
||||
continue-on-error: true
|
||||
|
||||
- name: Display results
|
||||
if: always()
|
||||
run: |
|
||||
if [ -f pip-audit-report.json ]; then
|
||||
echo "## pip-audit Results" >> $GITHUB_STEP_SUMMARY
|
||||
echo '```json' >> $GITHUB_STEP_SUMMARY
|
||||
cat pip-audit-report.json | python3 -m json.tool >> $GITHUB_STEP_SUMMARY
|
||||
echo '```' >> $GITHUB_STEP_SUMMARY
|
||||
# Fail if vulnerabilities found
|
||||
python3 -c "
|
||||
import json, sys
|
||||
with open('pip-audit-report.json') as f:
|
||||
data = json.load(f)
|
||||
vulns = [d for d in data.get('dependencies', []) if d.get('vulns')]
|
||||
if vulns:
|
||||
print(f'::error::Found vulnerabilities in {len(vulns)} package(s)')
|
||||
for v in vulns:
|
||||
for vuln in v['vulns']:
|
||||
print(f' - {v[\"name\"]}=={v[\"version\"]}: {vuln[\"id\"]}')
|
||||
sys.exit(1)
|
||||
print('No known vulnerabilities found')
|
||||
"
|
||||
else
|
||||
echo "::error::pip-audit failed to produce a report. Check the pip-audit step logs."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Upload pip-audit report
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: pip-audit-report
|
||||
path: pip-audit-report.json
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
|
||||
@@ -4,6 +4,75 @@ description: "تحديثات المنتج والتحسينات وإصلاحات
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="2 أبريل 2026">
|
||||
## v1.13.0
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الميزات
|
||||
- إضافة نموذج RuntimeState RootModel لتوحيد تسلسل الحالة
|
||||
- تعزيز مستمع الأحداث مع نطاقات جديدة للقياس عن أحداث المهارة والذاكرة
|
||||
- إضافة امتداد A2UI مع دعم v0.8/v0.9، والمخططات، والوثائق
|
||||
- إصدار بيانات استخدام الرموز في حدث LLMCallCompletedEvent
|
||||
- تحديث تلقائي لمستودع اختبار النشر أثناء الإصدار
|
||||
- تحسين مرونة الإصدار المؤسسي وتجربة المستخدم
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إضافة بيانات اعتماد مستودع الأدوات إلى تثبيت crewai
|
||||
- إضافة بيانات اعتماد مستودع الأدوات إلى بناء uv في نشر الأدوات
|
||||
- تمرير بيانات التعريف عبر الإعدادات بدلاً من معلمات الأدوات
|
||||
- معالجة نماذج GPT-5.x التي لا تدعم معلمة API `stop`
|
||||
- إضافة GPT-5 وسلسلة o إلى بادئات الرؤية متعددة الوسائط
|
||||
- مسح ذاكرة التخزين المؤقت uv للحزم التي تم نشرها حديثًا في الإصدار المؤسسي
|
||||
- تحديد lancedb أقل من 0.30.1 لضمان التوافق مع Windows
|
||||
- إصلاح مستويات أذونات RBAC لتتناسب مع خيارات واجهة المستخدم الفعلية
|
||||
- إصلاح عدم الدقة في قدرات الوكيل عبر جميع اللغات
|
||||
|
||||
### الوثائق
|
||||
- إضافة فيديو توضيحي لمهارات وكيل البرمجة إلى صفحات البدء
|
||||
- إضافة دليل شامل لتكوين SSO
|
||||
- إضافة مصفوفة شاملة لأذونات RBAC ودليل النشر
|
||||
- تحديث سجل التغييرات والإصدار إلى v1.13.0
|
||||
|
||||
### الأداء
|
||||
- تقليل الحمل الزائد للإطار باستخدام حافلة الأحداث الكسولة، وتخطي التتبع عند تعطيله
|
||||
|
||||
### إعادة الهيكلة
|
||||
- تحويل Flow إلى Pydantic BaseModel
|
||||
- تحويل فئات LLM إلى Pydantic BaseModel
|
||||
- استبدال InstanceOf[T] بتعليقات نوع عادية
|
||||
- إزالة دليل LLM الخاص بالطرف الثالث غير المستخدم
|
||||
|
||||
## المساهمون
|
||||
|
||||
@alex-clawd, @dependabot[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide, @thiagomoretto
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2 أبريل 2026">
|
||||
## v1.13.0a7
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0a7)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الميزات
|
||||
- إضافة امتداد A2UI مع دعم v0.8/v0.9، والمخططات، والوثائق
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح بادئات الرؤية متعددة الأنماط عن طريق إضافة GPT-5 وسلسلة o
|
||||
|
||||
### الوثائق
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.13.0a6
|
||||
|
||||
## المساهمون
|
||||
|
||||
@alex-clawd, @greysonlalonde, @joaomdmoura
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="1 أبريل 2026">
|
||||
## v1.13.0a6
|
||||
|
||||
|
||||
@@ -117,23 +117,35 @@ task = Task(
|
||||
|
||||
### مع التدفقات
|
||||
|
||||
مرر الملفات إلى التدفقات، والتي تنتقل تلقائيًا إلى الأطقم:
|
||||
تعمل الحقول من نوع الملف (`File`، `ImageFile`، `PDFFile`) في مخطط حالة التدفق كإشارة لواجهة المنصة. عند النشر، تُعرض هذه الحقول كمناطق سحب وإفلات لرفع الملفات. يمكن أيضًا تمرير الملفات عبر `input_files` في API.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai_files import ImageFile
|
||||
from crewai_files import File, ImageFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AnalysisFlow(Flow):
|
||||
class MyState(BaseModel):
|
||||
document: File # Renders as file dropzone in Platform UI
|
||||
cover_image: ImageFile # Image-specific dropzone
|
||||
title: str = ""
|
||||
|
||||
class AnalysisFlow(Flow[MyState]):
|
||||
@start()
|
||||
def analyze(self):
|
||||
# Files are automatically populated in state
|
||||
content = self.state.document.read()
|
||||
return self.analysis_crew.kickoff()
|
||||
|
||||
flow = AnalysisFlow()
|
||||
result = flow.kickoff(
|
||||
input_files={"image": ImageFile(source="data.png")}
|
||||
input_files={"document": File(source="report.pdf")}
|
||||
)
|
||||
```
|
||||
|
||||
<Note type="info" title="تكامل منصة CrewAI">
|
||||
عند النشر على منصة CrewAI، تحصل الحقول من نوع الملف مثل `ImageFile` و `PDFFile` وغيرها في حالة التدفق تلقائيًا على واجهة رفع ملفات. يمكن للمستخدمين سحب وإفلات الملفات مباشرة في واجهة المنصة. يتم تخزين الملفات بشكل آمن وتمريرها إلى الوكلاء باستخدام تحسينات خاصة بالمزود (base64 مضمّن، أو واجهات برمجة لرفع الملفات، أو مراجع URL حسب المزود). للاطلاع على أمثلة استخدام API، راجع [مدخلات الملفات في التدفقات](/ar/concepts/flows#مدخلات-الملفات).
|
||||
</Note>
|
||||
|
||||
### مع الوكلاء المستقلين
|
||||
|
||||
مرر الملفات مباشرة إلى تشغيل الوكيل:
|
||||
|
||||
@@ -341,6 +341,90 @@ flow.kickoff()
|
||||
|
||||
من خلال توفير خيارات إدارة الحالة غير المهيكلة والمهيكلة، تمكّن تدفقات CrewAI المطورين من بناء سير عمل ذكاء اصطناعي مرن ومتين في آن واحد، ملبيةً مجموعة واسعة من متطلبات التطبيقات.
|
||||
|
||||
### مدخلات الملفات
|
||||
|
||||
عند استخدام الحالة المهيكلة، يمكنك تضمين حقول من نوع الملف باستخدام فئات من `crewai-files`. تعمل الحقول من نوع الملف في حالة التدفق كإشارة للمنصة — فهي تُعرض تلقائيًا كمناطق سحب وإفلات لرفع الملفات في واجهة علامة تبويب Run ويتم تعبئتها عند رفع الملفات عبر المنصة أو تمريرها عبر `input_files` في API.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai_files import File, ImageFile, PDFFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
class MyState(BaseModel):
|
||||
document: File # Renders as file dropzone in Platform
|
||||
title: str = ""
|
||||
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def process(self):
|
||||
# File object is automatically populated in state
|
||||
# when uploaded via Platform UI or passed via API
|
||||
content = self.state.document.read()
|
||||
print(f"Processing {self.state.title}: {len(content)} bytes")
|
||||
return content
|
||||
```
|
||||
|
||||
عند النشر على **منصة CrewAI**، تُعرض الحقول من نوع الملف (`File`، `ImageFile`، `PDFFile` من `crewai-files`) تلقائيًا كمناطق سحب وإفلات لرفع الملفات في واجهة المستخدم. يمكن للمستخدمين سحب وإفلات الملفات، والتي تُملأ بعد ذلك في حالة التدفق الخاص بك.
|
||||
|
||||
**بدء التشغيل مع الملفات عبر API:**
|
||||
|
||||
تكتشف نقطة النهاية `/kickoff` تنسيق الطلب تلقائيًا:
|
||||
- **جسم JSON** ← بدء تشغيل عادي
|
||||
- **multipart/form-data** ← رفع ملف + بدء تشغيل
|
||||
|
||||
يمكن لمستخدمي API أيضًا تمرير سلاسل URL مباشرة إلى الحقول من نوع الملف — يقوم Pydantic بتحويلها تلقائيًا.
|
||||
|
||||
### استخدام API
|
||||
|
||||
#### الخيار 1: بدء تشغيل multipart (موصى به)
|
||||
|
||||
أرسل الملفات مباشرة مع طلب بدء التشغيل:
|
||||
|
||||
```bash
|
||||
# With files (multipart) — same endpoint
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'inputs={"company_name": "Einstein"}' \
|
||||
-F 'cover_image=@/path/to/photo.jpg'
|
||||
```
|
||||
|
||||
يتم تخزين الملفات تلقائيًا وتحويلها إلى كائنات `FileInput`. يتلقى الوكيل الملف مع تحسين خاص بالمزود (base64 مضمّن، أو API لرفع الملفات، أو مرجع URL حسب مزود LLM).
|
||||
|
||||
#### الخيار 2: بدء تشغيل JSON (بدون ملفات)
|
||||
|
||||
```bash
|
||||
# Without files (JSON) — same endpoint
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"company_name": "Einstein"}}'
|
||||
```
|
||||
|
||||
#### الخيار 3: رفع منفصل + بدء تشغيل
|
||||
|
||||
هذا بديل لرفع multipart عندما تحتاج إلى رفع الملفات بشكل منفصل عن طلب بدء التشغيل. ارفع الملفات أولاً، ثم أشِر إليها بواسطة URL:
|
||||
|
||||
```bash
|
||||
# Step 1: Upload
|
||||
curl -X POST https://your-deployment.crewai.com/files \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'file=@/path/to/photo.jpg' \
|
||||
-F 'field_name=cover_image'
|
||||
# Returns: {"url": "https://...", "field_name": "cover_image"}
|
||||
|
||||
# Step 2: Kickoff with URL
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"company_name": "Einstein"}, "input_files": {"cover_image": "https://..."}}'
|
||||
```
|
||||
|
||||
راجع وثائق Platform API للحصول على تفاصيل كاملة حول نقطة النهاية `/files`.
|
||||
|
||||
#### على منصة CrewAI
|
||||
|
||||
عند استخدام واجهة المنصة، تُعرض الحقول من نوع الملف تلقائيًا كمناطق سحب وإفلات للرفع. لا حاجة لاستدعاءات API — فقط أفلِت الملف وانقر على تشغيل.
|
||||
|
||||
## استمرارية التدفق
|
||||
|
||||
يتيح مزخرف @persist الاستمرارية التلقائية للحالة في تدفقات CrewAI، مما يسمح لك بالحفاظ على حالة التدفق عبر عمليات إعادة التشغيل أو تنفيذات سير العمل المختلفة. يمكن تطبيق هذا المزخرف على مستوى الفئة أو مستوى الدالة، مما يوفر مرونة في كيفية إدارة استمرارية الحالة.
|
||||
|
||||
@@ -86,6 +86,60 @@ curl -H "Authorization: Bearer YOUR_CREW_TOKEN" https://your-crew-url.crewai.com
|
||||
|
||||
رمز الحامل متاح في علامة تبويب Status في صفحة تفاصيل طاقمك.
|
||||
|
||||
## رفع الملفات
|
||||
|
||||
عندما يتضمن طاقمك أو تدفقك حقول حالة من نوع الملف (باستخدام `ImageFile` أو `PDFFile` أو `File` من `crewai-files`)، تُعرض هذه الحقول تلقائيًا كمناطق سحب وإفلات لرفع الملفات في واجهة علامة تبويب Run. يمكن للمستخدمين سحب وإفلات الملفات مباشرة، وتتولى المنصة التخزين والتسليم إلى وكلائك.
|
||||
|
||||
### بدء تشغيل Multipart (موصى به)
|
||||
|
||||
أرسل الملفات مباشرة مع طلب بدء التشغيل باستخدام `multipart/form-data`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'inputs={"title": "Report"}' \
|
||||
-F 'document=@/path/to/file.pdf'
|
||||
```
|
||||
|
||||
يتم تخزين الملفات تلقائيًا وتحويلها إلى كائنات ملفات. يتلقى الوكيل الملف مع تحسين خاص بالمزود (base64 مضمّن، أو API لرفع الملفات، أو مرجع URL حسب مزود LLM).
|
||||
|
||||
### بدء تشغيل JSON مع عناوين URL للملفات
|
||||
|
||||
إذا كانت لديك ملفات مستضافة بالفعل على عناوين URL، مررها عبر `input_files`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"inputs": {"title": "Report"},
|
||||
"input_files": {"document": "https://example.com/file.pdf"}
|
||||
}'
|
||||
```
|
||||
|
||||
### رفع منفصل + بدء تشغيل
|
||||
|
||||
ارفع الملفات أولاً، ثم أشِر إليها بواسطة URL:
|
||||
|
||||
```bash
|
||||
# Step 1: Upload
|
||||
curl -X POST https://your-deployment.crewai.com/files \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'file=@/path/to/file.pdf' \
|
||||
-F 'field_name=document'
|
||||
# Returns: {"url": "https://...", "field_name": "document"}
|
||||
|
||||
# Step 2: Kickoff with URL
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"title": "Report"}, "input_files": {"document": "https://..."}}'
|
||||
```
|
||||
|
||||
<Note type="info">
|
||||
يعمل رفع الملفات بنفس الطريقة لكل من الطواقم والتدفقات. عرّف حقول من نوع الملف في مخطط حالتك، وستتولى واجهة المنصة وAPI الرفع تلقائيًا.
|
||||
</Note>
|
||||
|
||||
### التحقق من صحة الطاقم
|
||||
|
||||
قبل تنفيذ العمليات، يمكنك التحقق من أن طاقمك يعمل بشكل صحيح:
|
||||
|
||||
132
docs/ar/enterprise/guides/training-crews.mdx
Normal file
132
docs/ar/enterprise/guides/training-crews.mdx
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: "تدريب الطواقم"
|
||||
description: "قم بتدريب طواقمك المنشورة مباشرة من منصة CrewAI AMP لتحسين أداء الوكلاء بمرور الوقت"
|
||||
icon: "dumbbell"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
يتيح لك التدريب تحسين أداء الطاقم من خلال تشغيل جلسات تدريب تكرارية مباشرة من علامة تبويب **Training** في CrewAI AMP. تستخدم المنصة **وضع التدريب التلقائي** — حيث تتولى العملية التكرارية تلقائياً، على عكس تدريب CLI الذي يتطلب ملاحظات بشرية تفاعلية لكل تكرار.
|
||||
|
||||
بعد اكتمال التدريب، يقوم CrewAI بتقييم مخرجات الوكلاء ودمج الملاحظات في اقتراحات قابلة للتنفيذ لكل وكيل. يتم بعد ذلك تطبيق هذه الاقتراحات على تشغيلات الطاقم المستقبلية لتحسين جودة المخرجات.
|
||||
|
||||
<Tip>
|
||||
للحصول على تفاصيل حول كيفية عمل تدريب CrewAI، راجع صفحة [مفاهيم التدريب](/ar/concepts/training).
|
||||
</Tip>
|
||||
|
||||
## المتطلبات الأساسية
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="نشر نشط" icon="rocket">
|
||||
تحتاج إلى حساب CrewAI AMP مع نشر نشط في حالة **Ready** (نوع Crew).
|
||||
</Card>
|
||||
<Card title="صلاحية التشغيل" icon="key">
|
||||
يجب أن يكون لحسابك صلاحية تشغيل للنشر الذي تريد تدريبه.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## كيفية تدريب طاقم
|
||||
|
||||
<Steps>
|
||||
<Step title="افتح علامة تبويب Training">
|
||||
انتقل إلى **Deployments**، انقر على نشرك، ثم اختر علامة تبويب **Training**.
|
||||
</Step>
|
||||
|
||||
<Step title="أدخل اسم التدريب">
|
||||
قدم **Training Name** — سيصبح هذا اسم ملف `.pkl` المستخدم لتخزين نتائج التدريب. على سبيل المثال، "Expert Mode Training" ينتج `expert_mode_training.pkl`.
|
||||
</Step>
|
||||
|
||||
<Step title="املأ مدخلات الطاقم">
|
||||
أدخل حقول إدخال الطاقم. هذه هي نفس المدخلات التي ستقدمها للتشغيل العادي — يتم تحميلها ديناميكياً بناءً على تكوين طاقمك.
|
||||
</Step>
|
||||
|
||||
<Step title="ابدأ التدريب">
|
||||
انقر على **Train Crew**. يتغير الزر إلى "Training..." مع مؤشر دوران أثناء تشغيل العملية.
|
||||
|
||||
خلف الكواليس:
|
||||
- يتم إنشاء سجل تدريب للنشر الخاص بك
|
||||
- تستدعي المنصة نقطة نهاية التدريب التلقائي للنشر
|
||||
- يقوم الطاقم بتشغيل تكراراته تلقائياً — لا حاجة لملاحظات يدوية
|
||||
</Step>
|
||||
|
||||
<Step title="راقب التقدم">
|
||||
تعرض لوحة **Current Training Status**:
|
||||
- **Status** — الحالة الحالية لجلسة التدريب
|
||||
- **Nº Iterations** — عدد تكرارات التدريب المُهيأة
|
||||
- **Filename** — ملف `.pkl` الذي يتم إنشاؤه
|
||||
- **Started At** — وقت بدء التدريب
|
||||
- **Training Inputs** — المدخلات التي قدمتها
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## فهم نتائج التدريب
|
||||
|
||||
بمجرد اكتمال التدريب، سترى بطاقات نتائج لكل وكيل تحتوي على المعلومات التالية:
|
||||
|
||||
- **Agent Role** — اسم/دور الوكيل في طاقمك
|
||||
- **Final Quality** — درجة من 0 إلى 10 تقيّم جودة مخرجات الوكيل
|
||||
- **Final Summary** — ملخص لأداء الوكيل أثناء التدريب
|
||||
- **Suggestions** — توصيات قابلة للتنفيذ لتحسين سلوك الوكيل
|
||||
|
||||
### تحرير الاقتراحات
|
||||
|
||||
يمكنك تحسين الاقتراحات لأي وكيل:
|
||||
|
||||
<Steps>
|
||||
<Step title="انقر على Edit">
|
||||
في بطاقة نتائج أي وكيل، انقر على زر **Edit** بجوار الاقتراحات.
|
||||
</Step>
|
||||
|
||||
<Step title="عدّل الاقتراحات">
|
||||
حدّث نص الاقتراحات ليعكس التحسينات التي تريدها بشكل أفضل.
|
||||
</Step>
|
||||
|
||||
<Step title="احفظ التغييرات">
|
||||
انقر على **Save**. تتم مزامنة الاقتراحات المُعدّلة مع النشر وتُستخدم في جميع التشغيلات المستقبلية.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## استخدام بيانات التدريب
|
||||
|
||||
لتطبيق نتائج التدريب على طاقمك:
|
||||
|
||||
1. لاحظ **Training Filename** (ملف `.pkl`) من جلسة التدريب المكتملة.
|
||||
2. حدد اسم الملف هذا في تكوين kickoff أو التشغيل الخاص بنشرك.
|
||||
3. يقوم الطاقم تلقائياً بتحميل ملف التدريب وتطبيق الاقتراحات المخزنة على كل وكيل.
|
||||
|
||||
هذا يعني أن الوكلاء يستفيدون من الملاحظات المُنشأة أثناء التدريب في كل تشغيل لاحق.
|
||||
|
||||
## التدريبات السابقة
|
||||
|
||||
يعرض الجزء السفلي من علامة تبويب Training **سجل جميع جلسات التدريب السابقة** للنشر. استخدم هذا لمراجعة التدريبات السابقة، ومقارنة النتائج، أو اختيار ملف تدريب مختلف للاستخدام.
|
||||
|
||||
## معالجة الأخطاء
|
||||
|
||||
إذا فشل تشغيل التدريب، تعرض لوحة الحالة حالة خطأ مع رسالة تصف ما حدث خطأ.
|
||||
|
||||
الأسباب الشائعة لفشل التدريب:
|
||||
- **لم يتم تحديث وقت تشغيل النشر** — تأكد من أن نشرك يعمل بأحدث إصدار
|
||||
- **أخطاء تنفيذ الطاقم** — مشاكل في منطق مهام الطاقم أو تكوين الوكيل
|
||||
- **مشاكل الشبكة** — مشاكل الاتصال بين المنصة والنشر
|
||||
|
||||
## القيود
|
||||
|
||||
<Info>
|
||||
ضع هذه القيود في الاعتبار عند التخطيط لسير عمل التدريب الخاص بك:
|
||||
- **تدريب نشط واحد في كل مرة** لكل نشر — انتظر حتى ينتهي التشغيل الحالي قبل بدء آخر
|
||||
- **وضع التدريب التلقائي فقط** — لا تدعم المنصة الملاحظات التفاعلية لكل تكرار مثل CLI
|
||||
- **بيانات التدريب خاصة بالنشر** — ترتبط نتائج التدريب بمثيل وإصدار النشر المحدد
|
||||
</Info>
|
||||
|
||||
## الموارد ذات الصلة
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="مفاهيم التدريب" icon="book" href="/ar/concepts/training">
|
||||
تعلم كيف يعمل تدريب CrewAI.
|
||||
</Card>
|
||||
<Card title="تشغيل الطاقم" icon="play" href="/ar/enterprise/guides/kickoff-crew">
|
||||
قم بتشغيل طاقمك المنشور من منصة AMP.
|
||||
</Card>
|
||||
<Card title="النشر على AMP" icon="cloud-arrow-up" href="/ar/enterprise/guides/deploy-to-amp">
|
||||
انشر طاقمك واجعله جاهزاً للتدريب.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -5,6 +5,14 @@ icon: wrench
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### شاهد: بناء Agents و Flows في CrewAI باستخدام Coding Agent Skills
|
||||
|
||||
قم بتثبيت مهارات وكيل البرمجة الخاصة بنا (Claude Code، Codex، ...) لتشغيل وكلاء البرمجة بسرعة مع CrewAI.
|
||||
|
||||
يمكنك تثبيتها باستخدام `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## فيديو تعليمي
|
||||
|
||||
شاهد هذا الفيديو التعليمي لعرض تفصيلي لعملية التثبيت:
|
||||
|
||||
@@ -16,6 +16,14 @@ mode: "wide"
|
||||
|
||||
مع أكثر من 100,000 مطور معتمد عبر دوراتنا المجتمعية، يُعد CrewAI المعيار لأتمتة الذكاء الاصطناعي الجاهزة للمؤسسات.
|
||||
|
||||
### شاهد: بناء Agents و Flows في CrewAI باستخدام Coding Agent Skills
|
||||
|
||||
قم بتثبيت مهارات وكيل البرمجة الخاصة بنا (Claude Code، Codex، ...) لتشغيل وكلاء البرمجة بسرعة مع CrewAI.
|
||||
|
||||
يمكنك تثبيتها باستخدام `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## بنية CrewAI المعمارية
|
||||
|
||||
صُممت بنية CrewAI لتحقيق التوازن بين الاستقلالية والتحكم.
|
||||
|
||||
@@ -5,6 +5,14 @@ icon: rocket
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### شاهد: بناء Agents و Flows في CrewAI باستخدام Coding Agent Skills
|
||||
|
||||
قم بتثبيت مهارات وكيل البرمجة الخاصة بنا (Claude Code، Codex، ...) لتشغيل وكلاء البرمجة بسرعة مع CrewAI.
|
||||
|
||||
يمكنك تثبيتها باستخدام `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## ابنِ أول وكيل CrewAI
|
||||
|
||||
لننشئ طاقماً بسيطاً يساعدنا في `البحث` و`إعداد التقارير` عن `أحدث تطورات الذكاء الاصطناعي` لموضوع أو مجال معين.
|
||||
|
||||
3199
docs/docs.json
3199
docs/docs.json
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,75 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="Apr 02, 2026">
|
||||
## v1.13.0
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Add RuntimeState RootModel for unified state serialization
|
||||
- Enhance event listener with new telemetry spans for skill and memory events
|
||||
- Add A2UI extension with v0.8/v0.9 support, schemas, and docs
|
||||
- Emit token usage data in LLMCallCompletedEvent
|
||||
- Auto-update deployment test repo during release
|
||||
- Improve enterprise release resilience and UX
|
||||
|
||||
### Bug Fixes
|
||||
- Add tool repository credentials to crewai install
|
||||
- Add tool repository credentials to uv build in tool publish
|
||||
- Pass fingerprint metadata via config instead of tool args
|
||||
- Handle GPT-5.x models not supporting the `stop` API parameter
|
||||
- Add GPT-5 and o-series to multimodal vision prefixes
|
||||
- Bust uv cache for freshly published packages in enterprise release
|
||||
- Cap lancedb below 0.30.1 for Windows compatibility
|
||||
- Fix RBAC permission levels to match actual UI options
|
||||
- Fix inaccuracies in agent-capabilities across all languages
|
||||
|
||||
### Documentation
|
||||
- Add coding agent skills demo video to getting started pages
|
||||
- Add comprehensive SSO configuration guide
|
||||
- Add comprehensive RBAC permissions matrix and deployment guide
|
||||
- Update changelog and version for v1.13.0
|
||||
|
||||
### Performance
|
||||
- Reduce framework overhead with lazy event bus, skip tracing when disabled
|
||||
|
||||
### Refactoring
|
||||
- Convert Flow to Pydantic BaseModel
|
||||
- Convert LLM classes to Pydantic BaseModel
|
||||
- Replace InstanceOf[T] with plain type annotations
|
||||
- Remove unused third_party LLM directory
|
||||
|
||||
## Contributors
|
||||
|
||||
@alex-clawd, @dependabot[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide, @thiagomoretto
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Apr 02, 2026">
|
||||
## v1.13.0a7
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0a7)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Add A2UI extension with v0.8/v0.9 support, schemas, and docs
|
||||
|
||||
### Bug Fixes
|
||||
- Fix multimodal vision prefixes by adding GPT-5 and o-series
|
||||
|
||||
### Documentation
|
||||
- Update changelog and version for v1.13.0a6
|
||||
|
||||
## Contributors
|
||||
|
||||
@alex-clawd, @greysonlalonde, @joaomdmoura
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Apr 01, 2026">
|
||||
## v1.13.0a6
|
||||
|
||||
|
||||
@@ -117,23 +117,35 @@ task = Task(
|
||||
|
||||
### With Flows
|
||||
|
||||
Pass files to flows, which automatically inherit to crews:
|
||||
File-typed fields (`File`, `ImageFile`, `PDFFile`) in your flow's state schema serve as the signal to the Platform UI. When deployed, these fields render as file upload dropzones. Files can also be passed via `input_files` in the API.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai_files import ImageFile
|
||||
from crewai_files import File, ImageFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AnalysisFlow(Flow):
|
||||
class MyState(BaseModel):
|
||||
document: File # Renders as file dropzone in Platform UI
|
||||
cover_image: ImageFile # Image-specific dropzone
|
||||
title: str = ""
|
||||
|
||||
class AnalysisFlow(Flow[MyState]):
|
||||
@start()
|
||||
def analyze(self):
|
||||
# Files are automatically populated in state
|
||||
content = self.state.document.read()
|
||||
return self.analysis_crew.kickoff()
|
||||
|
||||
flow = AnalysisFlow()
|
||||
result = flow.kickoff(
|
||||
input_files={"image": ImageFile(source="data.png")}
|
||||
input_files={"document": File(source="report.pdf")}
|
||||
)
|
||||
```
|
||||
|
||||
<Note type="info" title="CrewAI Platform Integration">
|
||||
When deployed on CrewAI Platform, `ImageFile`, `PDFFile`, and other file-typed fields in your flow state automatically get a file upload UI. Users can drag and drop files directly in the Platform interface. Files are stored securely and passed to agents using provider-specific optimizations (inline base64, file upload APIs, or URL references depending on the provider). For API usage examples, see [File Inputs in Flows](/concepts/flows#file-inputs).
|
||||
</Note>
|
||||
|
||||
### With Standalone Agents
|
||||
|
||||
Pass files directly to agent kickoff:
|
||||
|
||||
@@ -341,6 +341,90 @@ flow.kickoff()
|
||||
|
||||
By providing both unstructured and structured state management options, CrewAI Flows empowers developers to build AI workflows that are both flexible and robust, catering to a wide range of application requirements.
|
||||
|
||||
### File Inputs
|
||||
|
||||
When using structured state, you can include file-typed fields using classes from `crewai-files`. File-typed fields in your flow state serve as the signal to the Platform—they automatically render as file upload dropzones in the Run tab UI and get populated when files are uploaded via the Platform or passed via `input_files` in the API.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai_files import File, ImageFile, PDFFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
class MyState(BaseModel):
|
||||
document: File # Renders as file dropzone in Platform
|
||||
title: str = ""
|
||||
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def process(self):
|
||||
# File object is automatically populated in state
|
||||
# when uploaded via Platform UI or passed via API
|
||||
content = self.state.document.read()
|
||||
print(f"Processing {self.state.title}: {len(content)} bytes")
|
||||
return content
|
||||
```
|
||||
|
||||
When deployed on **CrewAI Platform**, file-typed fields (`File`, `ImageFile`, `PDFFile` from `crewai-files`) automatically render as file upload dropzones in the UI. Users can drag and drop files, which are then populated into your flow's state.
|
||||
|
||||
**Kicking off with files via API:**
|
||||
|
||||
The `/kickoff` endpoint auto-detects the request format:
|
||||
- **JSON body** → normal kickoff
|
||||
- **multipart/form-data** → file upload + kickoff
|
||||
|
||||
API users can also pass URL strings directly to file-typed fields—Pydantic coerces them automatically.
|
||||
|
||||
### API Usage
|
||||
|
||||
#### Option 1: Multipart kickoff (recommended)
|
||||
|
||||
Send files directly with the kickoff request:
|
||||
|
||||
```bash
|
||||
# With files (multipart) — same endpoint
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'inputs={"company_name": "Einstein"}' \
|
||||
-F 'cover_image=@/path/to/photo.jpg'
|
||||
```
|
||||
|
||||
Files are automatically stored and converted to `FileInput` objects. The agent receives the file with provider-specific optimization (inline base64, file upload API, or URL reference depending on the LLM provider).
|
||||
|
||||
#### Option 2: JSON kickoff (no files)
|
||||
|
||||
```bash
|
||||
# Without files (JSON) — same endpoint
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"company_name": "Einstein"}}'
|
||||
```
|
||||
|
||||
#### Option 3: Separate upload + kickoff
|
||||
|
||||
This is an alternative to multipart upload when you need to upload files separately from the kickoff request. Upload files first, then reference them by URL:
|
||||
|
||||
```bash
|
||||
# Step 1: Upload
|
||||
curl -X POST https://your-deployment.crewai.com/files \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'file=@/path/to/photo.jpg' \
|
||||
-F 'field_name=cover_image'
|
||||
# Returns: {"url": "https://...", "field_name": "cover_image"}
|
||||
|
||||
# Step 2: Kickoff with URL
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"company_name": "Einstein"}, "input_files": {"cover_image": "https://..."}}'
|
||||
```
|
||||
|
||||
See the Platform API documentation for full `/files` endpoint details.
|
||||
|
||||
#### On CrewAI Platform
|
||||
|
||||
When using the Platform UI, file-typed fields automatically render as drag-and-drop upload zones. No API calls needed—just drop the file and click Run.
|
||||
|
||||
## Flow Persistence
|
||||
|
||||
The @persist decorator enables automatic state persistence in CrewAI Flows, allowing you to maintain flow state across restarts or different workflow executions. This decorator can be applied at either the class level or method level, providing flexibility in how you manage state persistence.
|
||||
|
||||
@@ -86,6 +86,60 @@ curl -H "Authorization: Bearer YOUR_CREW_TOKEN" https://your-crew-url.crewai.com
|
||||
|
||||
Your bearer token is available on the Status tab of your crew's detail page.
|
||||
|
||||
## File Uploads
|
||||
|
||||
When your crew or flow includes file-typed state fields (using `ImageFile`, `PDFFile`, or `File` from `crewai-files`), these fields automatically render as file upload dropzones in the Run tab UI. Users can drag and drop files directly, and the Platform handles storage and delivery to your agents.
|
||||
|
||||
### Multipart Kickoff (Recommended)
|
||||
|
||||
Send files directly with the kickoff request using `multipart/form-data`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'inputs={"title": "Report"}' \
|
||||
-F 'document=@/path/to/file.pdf'
|
||||
```
|
||||
|
||||
Files are automatically stored and converted to file objects. The agent receives the file with provider-specific optimization (inline base64, file upload API, or URL reference depending on the LLM provider).
|
||||
|
||||
### JSON Kickoff with File URLs
|
||||
|
||||
If you have files already hosted at URLs, pass them via `input_files`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"inputs": {"title": "Report"},
|
||||
"input_files": {"document": "https://example.com/file.pdf"}
|
||||
}'
|
||||
```
|
||||
|
||||
### Separate Upload + Kickoff
|
||||
|
||||
Upload files first, then reference them by URL:
|
||||
|
||||
```bash
|
||||
# Step 1: Upload
|
||||
curl -X POST https://your-deployment.crewai.com/files \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'file=@/path/to/file.pdf' \
|
||||
-F 'field_name=document'
|
||||
# Returns: {"url": "https://...", "field_name": "document"}
|
||||
|
||||
# Step 2: Kickoff with URL
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"title": "Report"}, "input_files": {"document": "https://..."}}'
|
||||
```
|
||||
|
||||
<Note type="info">
|
||||
File uploads work the same way for both crews and flows. Define file-typed fields in your state schema, and the Platform UI and API will handle uploads automatically.
|
||||
</Note>
|
||||
|
||||
### Checking Crew Health
|
||||
|
||||
Before executing operations, you can verify that your crew is running properly:
|
||||
|
||||
132
docs/en/enterprise/guides/training-crews.mdx
Normal file
132
docs/en/enterprise/guides/training-crews.mdx
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: "Training Crews"
|
||||
description: "Train your deployed crews directly from the CrewAI AMP platform to improve agent performance over time"
|
||||
icon: "dumbbell"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
Training lets you improve crew performance by running iterative training sessions directly from the **Training** tab in CrewAI AMP. The platform uses **auto-train mode** — it handles the iterative process automatically, unlike CLI training which requires interactive human feedback per iteration.
|
||||
|
||||
After training completes, CrewAI evaluates agent outputs and consolidates feedback into actionable suggestions for each agent. These suggestions are then applied to future crew runs to improve output quality.
|
||||
|
||||
<Tip>
|
||||
For details on how CrewAI training works under the hood, see the [Training Concepts](/en/concepts/training) page.
|
||||
</Tip>
|
||||
|
||||
## Prerequisites
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Active deployment" icon="rocket">
|
||||
You need a CrewAI AMP account with an active deployment in **Ready** status (Crew type).
|
||||
</Card>
|
||||
<Card title="Run permission" icon="key">
|
||||
Your account must have run permission for the deployment you want to train.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## How to train a crew
|
||||
|
||||
<Steps>
|
||||
<Step title="Open the Training tab">
|
||||
Navigate to **Deployments**, click your deployment, then select the **Training** tab.
|
||||
</Step>
|
||||
|
||||
<Step title="Enter a training name">
|
||||
Provide a **Training Name** — this becomes the `.pkl` filename used to store training results. For example, "Expert Mode Training" produces `expert_mode_training.pkl`.
|
||||
</Step>
|
||||
|
||||
<Step title="Fill in the crew inputs">
|
||||
Enter the crew's input fields. These are the same inputs you'd provide for a normal kickoff — they're dynamically loaded based on your crew's configuration.
|
||||
</Step>
|
||||
|
||||
<Step title="Start training">
|
||||
Click **Train Crew**. The button changes to "Training..." with a spinner while the process runs.
|
||||
|
||||
Behind the scenes:
|
||||
- A training record is created for your deployment
|
||||
- The platform calls the deployment's auto-train endpoint
|
||||
- The crew runs its iterations automatically — no manual feedback required
|
||||
</Step>
|
||||
|
||||
<Step title="Monitor progress">
|
||||
The **Current Training Status** panel displays:
|
||||
- **Status** — Current state of the training run
|
||||
- **Nº Iterations** — Number of training iterations configured
|
||||
- **Filename** — The `.pkl` file being generated
|
||||
- **Started At** — When training began
|
||||
- **Training Inputs** — The inputs you provided
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Understanding training results
|
||||
|
||||
Once training completes, you'll see per-agent result cards with the following information:
|
||||
|
||||
- **Agent Role** — The name/role of the agent in your crew
|
||||
- **Final Quality** — A score from 0 to 10 evaluating the agent's output quality
|
||||
- **Final Summary** — A summary of the agent's performance during training
|
||||
- **Suggestions** — Actionable recommendations for improving the agent's behavior
|
||||
|
||||
### Editing suggestions
|
||||
|
||||
You can refine the suggestions for any agent:
|
||||
|
||||
<Steps>
|
||||
<Step title="Click Edit">
|
||||
On any agent's result card, click the **Edit** button next to the suggestions.
|
||||
</Step>
|
||||
|
||||
<Step title="Modify suggestions">
|
||||
Update the suggestions text to better reflect the improvements you want.
|
||||
</Step>
|
||||
|
||||
<Step title="Save changes">
|
||||
Click **Save**. The edited suggestions sync back to the deployment and are used in all future runs.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Using trained data
|
||||
|
||||
To apply training results to your crew:
|
||||
|
||||
1. Note the **Training Filename** (the `.pkl` file) from your completed training session.
|
||||
2. Specify this filename in your deployment's kickoff or run configuration.
|
||||
3. The crew automatically loads the training file and applies the stored suggestions to each agent.
|
||||
|
||||
This means agents benefit from the feedback generated during training on every subsequent run.
|
||||
|
||||
## Previous trainings
|
||||
|
||||
The bottom of the Training tab displays a **history of all past training sessions** for the deployment. Use this to review previous training runs, compare results, or select a different training file to use.
|
||||
|
||||
## Error handling
|
||||
|
||||
If a training run fails, the status panel shows an error state along with a message describing what went wrong.
|
||||
|
||||
Common causes of training failures:
|
||||
- **Deployment runtime not updated** — Ensure your deployment is running the latest version
|
||||
- **Crew execution errors** — Issues within the crew's task logic or agent configuration
|
||||
- **Network issues** — Connectivity problems between the platform and the deployment
|
||||
|
||||
## Limitations
|
||||
|
||||
<Info>
|
||||
Keep these constraints in mind when planning your training workflow:
|
||||
- **One active training at a time** per deployment — wait for the current run to finish before starting another
|
||||
- **Auto-train mode only** — the platform does not support interactive per-iteration feedback like the CLI does
|
||||
- **Training data is deployment-specific** — training results are tied to the specific deployment instance and version
|
||||
</Info>
|
||||
|
||||
## Related resources
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Training Concepts" icon="book" href="/en/concepts/training">
|
||||
Learn how CrewAI training works under the hood.
|
||||
</Card>
|
||||
<Card title="Kickoff Crew" icon="play" href="/en/enterprise/guides/kickoff-crew">
|
||||
Run your deployed crew from the AMP platform.
|
||||
</Card>
|
||||
<Card title="Deploy to AMP" icon="cloud-arrow-up" href="/en/enterprise/guides/deploy-to-amp">
|
||||
Get your crew deployed and ready for training.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -5,6 +5,14 @@ icon: wrench
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### Watch: Building CrewAI Agents & Flows with Coding Agent Skills
|
||||
|
||||
Install our coding agent skills (Claude Code, Codex, ...) to quickly get your coding agents up and running with CrewAI.
|
||||
|
||||
You can install it with `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## Video Tutorial
|
||||
|
||||
Watch this video tutorial for a step-by-step demonstration of the installation process:
|
||||
@@ -163,6 +171,9 @@ We recommend using the `YAML` template scaffolding for a structured approach to
|
||||
```shell
|
||||
uv add <package-name>
|
||||
```
|
||||
<Note>
|
||||
As a supply-chain security measure, CrewAI's internal packages use `exclude-newer = "3 days"` in their `pyproject.toml` files. This means transitive dependencies pulled in by CrewAI won't resolve packages released less than 3 days ago. Your own direct dependencies are not affected by this policy. If you notice a transitive dependency is behind, you can pin the version you want explicitly in your project's dependencies.
|
||||
</Note>
|
||||
- To run your crew, execute the following command in the root of your project:
|
||||
```bash
|
||||
crewai run
|
||||
|
||||
@@ -16,6 +16,14 @@ It empowers developers to build production-ready multi-agent systems by combinin
|
||||
|
||||
With over 100,000 developers certified through our community courses, CrewAI is the standard for enterprise-ready AI automation.
|
||||
|
||||
### Watch: Building CrewAI Agents & Flows with Coding Agent Skills
|
||||
|
||||
Install our coding agent skills (Claude Code, Codex, ...) to quickly get your coding agents up and running with CrewAI.
|
||||
|
||||
You can install it with `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## The CrewAI Architecture
|
||||
|
||||
CrewAI's architecture is designed to balance autonomy with control.
|
||||
|
||||
@@ -5,6 +5,14 @@ icon: rocket
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### Watch: Building CrewAI Agents & Flows with Coding Agent Skills
|
||||
|
||||
Install our coding agent skills (Claude Code, Codex, ...) to quickly get your coding agents up and running with CrewAI.
|
||||
|
||||
You can install it with `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## Build your first CrewAI Agent
|
||||
|
||||
Let's create a simple crew that will help us `research` and `report` on the `latest AI developments` for a given topic or subject.
|
||||
|
||||
@@ -4,6 +4,75 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="2026년 4월 2일">
|
||||
## v1.13.0
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- 통합 상태 직렬화를 위한 RuntimeState RootModel 추가
|
||||
- 기술 및 메모리 이벤트에 대한 새로운 텔레메트리 스팬으로 이벤트 리스너 강화
|
||||
- v0.8/v0.9 지원, 스키마 및 문서가 포함된 A2UI 확장 추가
|
||||
- LLMCallCompletedEvent에서 토큰 사용 데이터 방출
|
||||
- 릴리스 중 배포 테스트 리포 자동 업데이트
|
||||
- 기업 릴리스의 복원력 및 사용자 경험 개선
|
||||
|
||||
### 버그 수정
|
||||
- crewai 설치에 도구 리포지토리 자격 증명 추가
|
||||
- 도구 게시의 uv 빌드에 도구 리포지토리 자격 증명 추가
|
||||
- 도구 인수 대신 구성으로 지문 메타데이터 전달
|
||||
- `stop` API 매개변수를 지원하지 않는 GPT-5.x 모델 처리
|
||||
- 멀티모달 비전 접두사에 GPT-5 및 o-series 추가
|
||||
- 기업 릴리스에서 새로 게시된 패키지에 대한 uv 캐시 무효화
|
||||
- Windows 호환성을 위해 lancedb를 0.30.1 이하로 제한
|
||||
- 실제 UI 옵션과 일치하도록 RBAC 권한 수준 수정
|
||||
- 모든 언어에서 에이전트 기능의 부정확성 수정
|
||||
|
||||
### 문서
|
||||
- 시작하기 페이지에 코딩 에이전트 기술 데모 비디오 추가
|
||||
- 포괄적인 SSO 구성 가이드 추가
|
||||
- 포괄적인 RBAC 권한 매트릭스 및 배포 가이드 추가
|
||||
- v1.13.0에 대한 변경 로그 및 버전 업데이트
|
||||
|
||||
### 성능
|
||||
- 비활성화 시 추적 건너뛰기와 함께 지연 이벤트 버스를 사용하여 프레임워크 오버헤드 감소
|
||||
|
||||
### 리팩토링
|
||||
- Flow를 Pydantic BaseModel로 변환
|
||||
- LLM 클래스를 Pydantic BaseModel로 변환
|
||||
- InstanceOf[T]를 일반 타입 주석으로 교체
|
||||
- 사용되지 않는 third_party LLM 디렉토리 제거
|
||||
|
||||
## 기여자
|
||||
|
||||
@alex-clawd, @dependabot[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide, @thiagomoretto
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 4월 2일">
|
||||
## v1.13.0a7
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0a7)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- v0.8/v0.9 지원, 스키마 및 문서가 포함된 A2UI 확장 추가
|
||||
|
||||
### 버그 수정
|
||||
- GPT-5 및 o-series를 추가하여 다중 모드 비전 접두사 수정
|
||||
|
||||
### 문서
|
||||
- v1.13.0a6에 대한 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@alex-clawd, @greysonlalonde, @joaomdmoura
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 4월 1일">
|
||||
## v1.13.0a6
|
||||
|
||||
|
||||
@@ -117,23 +117,35 @@ task = Task(
|
||||
|
||||
### Flow와 함께
|
||||
|
||||
flow에 파일을 전달하면 자동으로 crew에 상속됩니다:
|
||||
flow의 상태 스키마에 있는 파일 타입 필드(`File`, `ImageFile`, `PDFFile`)는 플랫폼 UI에 대한 신호 역할을 합니다. 배포 시 이러한 필드는 파일 업로드 드롭존으로 렌더링됩니다. 파일은 API에서 `input_files`를 통해서도 전달할 수 있습니다.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai_files import ImageFile
|
||||
from crewai_files import File, ImageFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AnalysisFlow(Flow):
|
||||
class MyState(BaseModel):
|
||||
document: File # Renders as file dropzone in Platform UI
|
||||
cover_image: ImageFile # Image-specific dropzone
|
||||
title: str = ""
|
||||
|
||||
class AnalysisFlow(Flow[MyState]):
|
||||
@start()
|
||||
def analyze(self):
|
||||
# Files are automatically populated in state
|
||||
content = self.state.document.read()
|
||||
return self.analysis_crew.kickoff()
|
||||
|
||||
flow = AnalysisFlow()
|
||||
result = flow.kickoff(
|
||||
input_files={"image": ImageFile(source="data.png")}
|
||||
input_files={"document": File(source="report.pdf")}
|
||||
)
|
||||
```
|
||||
|
||||
<Note type="info" title="CrewAI 플랫폼 통합">
|
||||
CrewAI 플랫폼에 배포하면 flow 상태의 `ImageFile`, `PDFFile` 및 기타 파일 타입 필드가 자동으로 파일 업로드 UI를 갖게 됩니다. 사용자는 플랫폼 인터페이스에서 직접 파일을 드래그 앤 드롭할 수 있습니다. 파일은 안전하게 저장되고 프로바이더별 최적화(인라인 base64, 파일 업로드 API 또는 프로바이더에 따른 URL 참조)를 사용하여 에이전트에 전달됩니다. API 사용 예제는 [Flows의 파일 입력](/ko/concepts/flows#파일-입력)을 참조하세요.
|
||||
</Note>
|
||||
|
||||
### 단독 에이전트와 함께
|
||||
|
||||
에이전트 킥오프에 직접 파일을 전달합니다:
|
||||
|
||||
@@ -334,6 +334,90 @@ flow.kickoff()
|
||||
|
||||
CrewAI Flows는 비구조적 및 구조적 상태 관리 옵션을 모두 제공함으로써, 개발자들이 다양한 애플리케이션 요구 사항에 맞춰 유연하면서도 견고한 AI 워크플로를 구축할 수 있도록 지원합니다.
|
||||
|
||||
### 파일 입력
|
||||
|
||||
구조화된 상태를 사용할 때, `crewai-files`의 클래스를 사용하여 파일 타입 필드를 포함할 수 있습니다. flow 상태의 파일 타입 필드는 플랫폼에 대한 신호 역할을 합니다 — Run 탭 UI에서 자동으로 파일 업로드 드롭존으로 렌더링되며, 플랫폼을 통해 파일을 업로드하거나 API에서 `input_files`를 통해 전달할 때 자동으로 채워집니다.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai_files import File, ImageFile, PDFFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
class MyState(BaseModel):
|
||||
document: File # Renders as file dropzone in Platform
|
||||
title: str = ""
|
||||
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def process(self):
|
||||
# File object is automatically populated in state
|
||||
# when uploaded via Platform UI or passed via API
|
||||
content = self.state.document.read()
|
||||
print(f"Processing {self.state.title}: {len(content)} bytes")
|
||||
return content
|
||||
```
|
||||
|
||||
**CrewAI 플랫폼**에 배포하면 파일 타입 필드(`crewai-files`의 `File`, `ImageFile`, `PDFFile`)가 UI에서 자동으로 파일 업로드 드롭존으로 렌더링됩니다. 사용자는 파일을 드래그 앤 드롭할 수 있으며, 해당 파일은 flow의 상태에 자동으로 채워집니다.
|
||||
|
||||
**API를 통한 파일 포함 시작:**
|
||||
|
||||
`/kickoff` 엔드포인트는 요청 형식을 자동으로 감지합니다:
|
||||
- **JSON body** → 일반 kickoff
|
||||
- **multipart/form-data** → 파일 업로드 + kickoff
|
||||
|
||||
API 사용자는 파일 타입 필드에 URL 문자열을 직접 전달할 수도 있습니다 — Pydantic이 자동으로 변환합니다.
|
||||
|
||||
### API 사용법
|
||||
|
||||
#### 옵션 1: Multipart kickoff (권장)
|
||||
|
||||
kickoff 요청과 함께 파일을 직접 전송합니다:
|
||||
|
||||
```bash
|
||||
# With files (multipart) — same endpoint
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'inputs={"company_name": "Einstein"}' \
|
||||
-F 'cover_image=@/path/to/photo.jpg'
|
||||
```
|
||||
|
||||
파일은 자동으로 저장되고 `FileInput` 객체로 변환됩니다. 에이전트는 프로바이더별 최적화(LLM 프로바이더에 따라 인라인 base64, 파일 업로드 API 또는 URL 참조)와 함께 파일을 수신합니다.
|
||||
|
||||
#### 옵션 2: JSON kickoff (파일 없음)
|
||||
|
||||
```bash
|
||||
# Without files (JSON) — same endpoint
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"company_name": "Einstein"}}'
|
||||
```
|
||||
|
||||
#### 옵션 3: 분리된 업로드 + kickoff
|
||||
|
||||
kickoff 요청과 별도로 파일을 업로드해야 할 때 multipart 업로드의 대안입니다. 먼저 파일을 업로드한 다음 URL로 참조합니다:
|
||||
|
||||
```bash
|
||||
# Step 1: Upload
|
||||
curl -X POST https://your-deployment.crewai.com/files \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'file=@/path/to/photo.jpg' \
|
||||
-F 'field_name=cover_image'
|
||||
# Returns: {"url": "https://...", "field_name": "cover_image"}
|
||||
|
||||
# Step 2: Kickoff with URL
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"company_name": "Einstein"}, "input_files": {"cover_image": "https://..."}}'
|
||||
```
|
||||
|
||||
`/files` 엔드포인트에 대한 자세한 내용은 플랫폼 API 문서를 참조하세요.
|
||||
|
||||
#### CrewAI 플랫폼에서
|
||||
|
||||
플랫폼 UI를 사용할 때 파일 타입 필드는 자동으로 드래그 앤 드롭 업로드 영역으로 렌더링됩니다. API 호출이 필요 없습니다 — 파일을 드롭하고 실행을 클릭하면 됩니다.
|
||||
|
||||
## 플로우 지속성
|
||||
|
||||
@persist 데코레이터는 CrewAI 플로우에서 자동 상태 지속성을 활성화하여, 플로우 상태를 재시작이나 다른 워크플로우 실행 간에도 유지할 수 있도록 합니다. 이 데코레이터는 클래스 수준이나 메서드 수준 모두에 적용할 수 있어, 상태 지속성을 관리하는 데 유연성을 제공합니다.
|
||||
|
||||
@@ -86,6 +86,60 @@ curl -H "Authorization: Bearer YOUR_CREW_TOKEN" https://your-crew-url.crewai.com
|
||||
|
||||
베어러 토큰은 crew의 상세 페이지의 Status 탭에서 확인할 수 있습니다.
|
||||
|
||||
## 파일 업로드
|
||||
|
||||
crew나 flow에 파일 타입 상태 필드(`crewai-files`의 `ImageFile`, `PDFFile`, 또는 `File` 사용)가 포함되어 있으면, 이러한 필드는 Run 탭 UI에서 자동으로 파일 업로드 드롭존으로 렌더링됩니다. 사용자는 파일을 직접 드래그 앤 드롭할 수 있으며, 플랫폼이 저장 및 에이전트 전달을 처리합니다.
|
||||
|
||||
### Multipart Kickoff (권장)
|
||||
|
||||
`multipart/form-data`를 사용하여 kickoff 요청과 함께 파일을 직접 전송합니다:
|
||||
|
||||
```bash
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'inputs={"title": "Report"}' \
|
||||
-F 'document=@/path/to/file.pdf'
|
||||
```
|
||||
|
||||
파일은 자동으로 저장되고 파일 객체로 변환됩니다. 에이전트는 프로바이더별 최적화(LLM 프로바이더에 따라 인라인 base64, 파일 업로드 API 또는 URL 참조)와 함께 파일을 수신합니다.
|
||||
|
||||
### 파일 URL을 포함한 JSON Kickoff
|
||||
|
||||
이미 URL에 호스팅된 파일이 있다면 `input_files`를 통해 전달합니다:
|
||||
|
||||
```bash
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"inputs": {"title": "Report"},
|
||||
"input_files": {"document": "https://example.com/file.pdf"}
|
||||
}'
|
||||
```
|
||||
|
||||
### 분리된 업로드 + Kickoff
|
||||
|
||||
먼저 파일을 업로드한 다음 URL로 참조합니다:
|
||||
|
||||
```bash
|
||||
# Step 1: Upload
|
||||
curl -X POST https://your-deployment.crewai.com/files \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'file=@/path/to/file.pdf' \
|
||||
-F 'field_name=document'
|
||||
# Returns: {"url": "https://...", "field_name": "document"}
|
||||
|
||||
# Step 2: Kickoff with URL
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"title": "Report"}, "input_files": {"document": "https://..."}}'
|
||||
```
|
||||
|
||||
<Note type="info">
|
||||
파일 업로드는 crew와 flow 모두에서 동일하게 작동합니다. 상태 스키마에 파일 타입 필드를 정의하면 플랫폼 UI와 API가 자동으로 업로드를 처리합니다.
|
||||
</Note>
|
||||
|
||||
### 크루 상태 확인
|
||||
|
||||
작업을 실행하기 전에 크루가 정상적으로 실행되고 있는지 확인할 수 있습니다:
|
||||
|
||||
132
docs/ko/enterprise/guides/training-crews.mdx
Normal file
132
docs/ko/enterprise/guides/training-crews.mdx
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: "Crew 훈련"
|
||||
description: "CrewAI AMP 플랫폼에서 직접 배포된 Crew를 훈련하여 시간이 지남에 따라 에이전트 성능을 개선하세요"
|
||||
icon: "dumbbell"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
훈련을 통해 CrewAI AMP의 **Training** 탭에서 직접 반복 훈련 세션을 실행하여 Crew 성능을 개선할 수 있습니다. 플랫폼은 **자동 훈련 모드**를 사용합니다 — 반복 프로세스를 자동으로 처리하며, 반복마다 대화형 피드백이 필요한 CLI 훈련과는 다릅니다.
|
||||
|
||||
훈련이 완료되면 CrewAI는 에이전트 출력을 평가하고 각 에이전트에 대한 실행 가능한 제안으로 피드백을 통합합니다. 이러한 제안은 향후 Crew 실행에 적용되어 출력 품질을 개선합니다.
|
||||
|
||||
<Tip>
|
||||
CrewAI 훈련이 내부적으로 어떻게 작동하는지에 대한 자세한 내용은 [훈련 개념](/ko/concepts/training) 페이지를 참조하세요.
|
||||
</Tip>
|
||||
|
||||
## 사전 요구 사항
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="활성 배포" icon="rocket">
|
||||
**Ready** 상태의 활성 배포(Crew 유형)가 있는 CrewAI AMP 계정이 필요합니다.
|
||||
</Card>
|
||||
<Card title="실행 권한" icon="key">
|
||||
훈련하려는 배포에 대한 실행 권한이 계정에 있어야 합니다.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Crew 훈련 방법
|
||||
|
||||
<Steps>
|
||||
<Step title="Training 탭 열기">
|
||||
**Deployments**로 이동하여 배포를 클릭한 다음 **Training** 탭을 선택합니다.
|
||||
</Step>
|
||||
|
||||
<Step title="훈련 이름 입력">
|
||||
**Training Name**을 입력합니다 — 이것은 훈련 결과를 저장하는 데 사용되는 `.pkl` 파일 이름이 됩니다. 예를 들어, "Expert Mode Training"은 `expert_mode_training.pkl`을 생성합니다.
|
||||
</Step>
|
||||
|
||||
<Step title="Crew 입력값 작성">
|
||||
Crew의 입력 필드를 입력합니다. 이는 일반 kickoff에 제공하는 것과 동일한 입력값입니다 — Crew 구성에 따라 동적으로 로드됩니다.
|
||||
</Step>
|
||||
|
||||
<Step title="훈련 시작">
|
||||
**Train Crew**를 클릭합니다. 프로세스가 실행되는 동안 버튼이 스피너와 함께 "Training..."으로 변경됩니다.
|
||||
|
||||
내부적으로:
|
||||
- 배포에 대한 훈련 레코드가 생성됩니다
|
||||
- 플랫폼이 배포의 자동 훈련 엔드포인트를 호출합니다
|
||||
- Crew가 자동으로 반복을 실행합니다 — 수동 피드백이 필요하지 않습니다
|
||||
</Step>
|
||||
|
||||
<Step title="진행 상황 모니터링">
|
||||
**Current Training Status** 패널에 다음이 표시됩니다:
|
||||
- **Status** — 훈련 실행의 현재 상태
|
||||
- **Nº Iterations** — 구성된 훈련 반복 횟수
|
||||
- **Filename** — 생성 중인 `.pkl` 파일
|
||||
- **Started At** — 훈련 시작 시간
|
||||
- **Training Inputs** — 제공한 입력값
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 훈련 결과 이해
|
||||
|
||||
훈련이 완료되면 다음 정보가 포함된 에이전트별 결과 카드가 표시됩니다:
|
||||
|
||||
- **Agent Role** — Crew에서 에이전트의 이름/역할
|
||||
- **Final Quality** — 에이전트 출력 품질을 평가하는 0~10점 점수
|
||||
- **Final Summary** — 훈련 중 에이전트 성능 요약
|
||||
- **Suggestions** — 에이전트 동작 개선을 위한 실행 가능한 권장 사항
|
||||
|
||||
### 제안 편집
|
||||
|
||||
모든 에이전트의 제안을 개선할 수 있습니다:
|
||||
|
||||
<Steps>
|
||||
<Step title="Edit 클릭">
|
||||
에이전트의 결과 카드에서 제안 옆에 있는 **Edit** 버튼을 클릭합니다.
|
||||
</Step>
|
||||
|
||||
<Step title="제안 수정">
|
||||
원하는 개선 사항을 더 잘 반영하도록 제안 텍스트를 업데이트합니다.
|
||||
</Step>
|
||||
|
||||
<Step title="변경 사항 저장">
|
||||
**Save**를 클릭합니다. 편집된 제안이 배포에 다시 동기화되고 이후 모든 실행에 사용됩니다.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 훈련 데이터 사용
|
||||
|
||||
Crew에 훈련 결과를 적용하려면:
|
||||
|
||||
1. 완료된 훈련 세션에서 **Training Filename**(`.pkl` 파일)을 확인합니다.
|
||||
2. 배포의 kickoff 또는 실행 구성에서 이 파일 이름을 지정합니다.
|
||||
3. Crew가 자동으로 훈련 파일을 로드하고 저장된 제안을 각 에이전트에 적용합니다.
|
||||
|
||||
이는 에이전트가 이후 모든 실행에서 훈련 중에 생성된 피드백의 혜택을 받는다는 것을 의미합니다.
|
||||
|
||||
## 이전 훈련
|
||||
|
||||
Training 탭 하단에는 배포에 대한 **모든 과거 훈련 세션 기록**이 표시됩니다. 이전 훈련 실행을 검토하거나 결과를 비교하거나 사용할 다른 훈련 파일을 선택하는 데 사용합니다.
|
||||
|
||||
## 오류 처리
|
||||
|
||||
훈련 실행이 실패하면 상태 패널에 무엇이 잘못되었는지 설명하는 메시지와 함께 오류 상태가 표시됩니다.
|
||||
|
||||
훈련 실패의 일반적인 원인:
|
||||
- **배포 런타임이 업데이트되지 않음** — 배포가 최신 버전을 실행하고 있는지 확인하세요
|
||||
- **Crew 실행 오류** — Crew의 작업 로직 또는 에이전트 구성 내 문제
|
||||
- **네트워크 문제** — 플랫폼과 배포 간의 연결 문제
|
||||
|
||||
## 제한 사항
|
||||
|
||||
<Info>
|
||||
훈련 워크플로를 계획할 때 다음 제약 사항을 염두에 두세요:
|
||||
- **배포당 한 번에 하나의 활성 훈련** — 다른 훈련을 시작하기 전에 현재 실행이 완료될 때까지 기다리세요
|
||||
- **자동 훈련 모드만** — 플랫폼은 CLI처럼 반복당 대화형 피드백을 지원하지 않습니다
|
||||
- **훈련 데이터는 배포별** — 훈련 결과는 특정 배포 인스턴스 및 버전에 연결됩니다
|
||||
</Info>
|
||||
|
||||
## 관련 리소스
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="훈련 개념" icon="book" href="/ko/concepts/training">
|
||||
CrewAI 훈련이 내부적으로 어떻게 작동하는지 알아보세요.
|
||||
</Card>
|
||||
<Card title="Crew 시작" icon="play" href="/ko/enterprise/guides/kickoff-crew">
|
||||
AMP 플랫폼에서 배포된 Crew를 실행하세요.
|
||||
</Card>
|
||||
<Card title="AMP에 배포" icon="cloud-arrow-up" href="/ko/enterprise/guides/deploy-to-amp">
|
||||
Crew를 배포하고 훈련 준비를 완료하세요.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -5,6 +5,14 @@ icon: wrench
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### 영상: 코딩 에이전트 스킬을 활용한 CrewAI Agents & Flows 구축
|
||||
|
||||
코딩 에이전트 스킬(Claude Code, Codex 등)을 설치하여 CrewAI로 코딩 에이전트를 빠르게 시작하세요.
|
||||
|
||||
`npx skills add crewaiinc/skills` 명령어로 설치할 수 있습니다
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## 비디오 튜토리얼
|
||||
|
||||
설치 과정을 단계별로 시연하는 비디오 튜토리얼을 시청하세요:
|
||||
|
||||
@@ -16,6 +16,14 @@ mode: "wide"
|
||||
|
||||
10만 명이 넘는 개발자가 커뮤니티 과정을 통해 인증을 받았으며, CrewAI는 기업용 AI 자동화의 표준입니다.
|
||||
|
||||
### 영상: 코딩 에이전트 스킬을 활용한 CrewAI Agents & Flows 구축
|
||||
|
||||
코딩 에이전트 스킬(Claude Code, Codex 등)을 설치하여 CrewAI로 코딩 에이전트를 빠르게 시작하세요.
|
||||
|
||||
`npx skills add crewaiinc/skills` 명령어로 설치할 수 있습니다
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## CrewAI 아키텍처
|
||||
|
||||
CrewAI의 아키텍처는 자율성과 제어의 균형을 맞추도록 설계되었습니다.
|
||||
|
||||
@@ -5,6 +5,14 @@ icon: rocket
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### 영상: 코딩 에이전트 스킬을 활용한 CrewAI Agents & Flows 구축
|
||||
|
||||
코딩 에이전트 스킬(Claude Code, Codex 등)을 설치하여 CrewAI로 코딩 에이전트를 빠르게 시작하세요.
|
||||
|
||||
`npx skills add crewaiinc/skills` 명령어로 설치할 수 있습니다
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## 첫 번째 CrewAI Agent 만들기
|
||||
|
||||
이제 주어진 주제나 항목에 대해 `최신 AI 개발 동향`을 `연구`하고 `보고`하는 간단한 crew를 만들어보겠습니다.
|
||||
|
||||
@@ -4,6 +4,75 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="02 abr 2026">
|
||||
## v1.13.0
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Funcionalidades
|
||||
- Adicionar RuntimeState RootModel para serialização de estado unificado
|
||||
- Melhorar o listener de eventos com novos spans de telemetria para eventos de habilidade e memória
|
||||
- Adicionar extensão A2UI com suporte a v0.8/v0.9, esquemas e documentação
|
||||
- Emitir dados de uso de token no LLMCallCompletedEvent
|
||||
- Atualizar automaticamente o repositório de testes de implantação durante o lançamento
|
||||
- Melhorar a resiliência e a experiência do usuário na versão empresarial
|
||||
|
||||
### Correções de Bugs
|
||||
- Adicionar credenciais do repositório de ferramentas ao crewai install
|
||||
- Adicionar credenciais do repositório de ferramentas ao uv build na publicação de ferramentas
|
||||
- Passar metadados de impressão digital via configuração em vez de argumentos de ferramenta
|
||||
- Lidar com modelos GPT-5.x que não suportam o parâmetro API `stop`
|
||||
- Adicionar GPT-5 e a série o aos prefixos de visão multimodal
|
||||
- Limpar cache uv para pacotes recém-publicados na versão empresarial
|
||||
- Limitar lancedb abaixo de 0.30.1 para compatibilidade com Windows
|
||||
- Corrigir níveis de permissão RBAC para corresponder às opções reais da interface do usuário
|
||||
- Corrigir imprecisões nas capacidades do agente em todos os idiomas
|
||||
|
||||
### Documentação
|
||||
- Adicionar vídeo de demonstração de habilidades do agente de codificação às páginas de introdução
|
||||
- Adicionar guia abrangente de configuração SSO
|
||||
- Adicionar matriz de permissões RBAC abrangente e guia de implantação
|
||||
- Atualizar changelog e versão para v1.13.0
|
||||
|
||||
### Desempenho
|
||||
- Reduzir a sobrecarga do framework com bus de eventos preguiçoso, pular rastreamento quando desativado
|
||||
|
||||
### Refatoração
|
||||
- Converter Flow para Pydantic BaseModel
|
||||
- Converter classes LLM para Pydantic BaseModel
|
||||
- Substituir InstanceOf[T] por anotações de tipo simples
|
||||
- Remover diretório LLM de terceiros não utilizado
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@alex-clawd, @dependabot[bot], @greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay, @lucasgomide, @thiagomoretto
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="02 abr 2026">
|
||||
## v1.13.0a7
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.13.0a7)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Funcionalidades
|
||||
- Adicionar a extensão A2UI com suporte a v0.8/v0.9, esquemas e documentação
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir prefixos de visão multimodal adicionando GPT-5 e o-series
|
||||
|
||||
### Documentação
|
||||
- Atualizar changelog e versão para v1.13.0a6
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@alex-clawd, @greysonlalonde, @joaomdmoura
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="01 abr 2026">
|
||||
## v1.13.0a6
|
||||
|
||||
|
||||
@@ -117,23 +117,35 @@ task = Task(
|
||||
|
||||
### Com Flows
|
||||
|
||||
Passe arquivos para flows, que automaticamente herdam para crews:
|
||||
Campos tipados como arquivo (`File`, `ImageFile`, `PDFFile`) no esquema de estado do seu flow servem como sinal para a interface da Plataforma. Quando implantado, esses campos são renderizados como zonas de upload de arquivos. Arquivos também podem ser passados via `input_files` na API.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai_files import ImageFile
|
||||
from crewai_files import File, ImageFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
class AnalysisFlow(Flow):
|
||||
class MyState(BaseModel):
|
||||
document: File # Renders as file dropzone in Platform UI
|
||||
cover_image: ImageFile # Image-specific dropzone
|
||||
title: str = ""
|
||||
|
||||
class AnalysisFlow(Flow[MyState]):
|
||||
@start()
|
||||
def analyze(self):
|
||||
# Files are automatically populated in state
|
||||
content = self.state.document.read()
|
||||
return self.analysis_crew.kickoff()
|
||||
|
||||
flow = AnalysisFlow()
|
||||
result = flow.kickoff(
|
||||
input_files={"image": ImageFile(source="data.png")}
|
||||
input_files={"document": File(source="report.pdf")}
|
||||
)
|
||||
```
|
||||
|
||||
<Note type="info" title="Integração com a Plataforma CrewAI">
|
||||
Quando implantado na Plataforma CrewAI, campos tipados como arquivo como `ImageFile`, `PDFFile` e outros no estado do seu flow recebem automaticamente uma interface de upload de arquivos. Os usuários podem arrastar e soltar arquivos diretamente na interface da Plataforma. Os arquivos são armazenados de forma segura e passados para os agentes usando otimizações específicas do provedor (base64 inline, APIs de upload de arquivo ou referências por URL dependendo do provedor). Para exemplos de uso da API, consulte [Entradas de Arquivos em Flows](/pt-BR/concepts/flows#entradas-de-arquivos).
|
||||
</Note>
|
||||
|
||||
### Com Agentes Standalone
|
||||
|
||||
Passe arquivos diretamente no kickoff do agente:
|
||||
|
||||
@@ -173,6 +173,90 @@ Cada estado nos flows do CrewAI recebe automaticamente um identificador único (
|
||||
|
||||
Ao oferecer as duas opções de gerenciamento de estado, o CrewAI Flows permite que desenvolvedores criem fluxos de IA que sejam ao mesmo tempo flexíveis e robustos, atendendo a uma ampla variedade de requisitos de aplicação.
|
||||
|
||||
### Entradas de Arquivos
|
||||
|
||||
Ao usar estado estruturado, você pode incluir campos tipados como arquivo usando classes do `crewai-files`. Campos tipados como arquivo no estado do seu flow servem como sinal para a Plataforma — eles são renderizados automaticamente como zonas de upload de arquivos na aba Run da interface e são preenchidos quando arquivos são enviados via Plataforma ou passados via `input_files` na API.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai_files import File, ImageFile, PDFFile
|
||||
from pydantic import BaseModel
|
||||
|
||||
class MyState(BaseModel):
|
||||
document: File # Renders as file dropzone in Platform
|
||||
title: str = ""
|
||||
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def process(self):
|
||||
# File object is automatically populated in state
|
||||
# when uploaded via Platform UI or passed via API
|
||||
content = self.state.document.read()
|
||||
print(f"Processing {self.state.title}: {len(content)} bytes")
|
||||
return content
|
||||
```
|
||||
|
||||
Quando implantado na **Plataforma CrewAI**, campos tipados como arquivo (`File`, `ImageFile`, `PDFFile` do `crewai-files`) são renderizados automaticamente como zonas de upload de arquivos na interface. Os usuários podem arrastar e soltar arquivos, que são então preenchidos no estado do seu flow.
|
||||
|
||||
**Iniciando com arquivos via API:**
|
||||
|
||||
O endpoint `/kickoff` detecta automaticamente o formato da requisição:
|
||||
- **Corpo JSON** → kickoff normal
|
||||
- **multipart/form-data** → upload de arquivo + kickoff
|
||||
|
||||
Usuários da API também podem passar strings de URL diretamente para campos tipados como arquivo — o Pydantic as converte automaticamente.
|
||||
|
||||
### Uso da API
|
||||
|
||||
#### Opção 1: Kickoff multipart (recomendado)
|
||||
|
||||
Envie arquivos diretamente com a requisição de kickoff:
|
||||
|
||||
```bash
|
||||
# With files (multipart) — same endpoint
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'inputs={"company_name": "Einstein"}' \
|
||||
-F 'cover_image=@/path/to/photo.jpg'
|
||||
```
|
||||
|
||||
Os arquivos são armazenados automaticamente e convertidos em objetos `FileInput`. O agente recebe o arquivo com otimização específica do provedor (base64 inline, API de upload de arquivo ou referência por URL dependendo do provedor LLM).
|
||||
|
||||
#### Opção 2: Kickoff JSON (sem arquivos)
|
||||
|
||||
```bash
|
||||
# Without files (JSON) — same endpoint
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"company_name": "Einstein"}}'
|
||||
```
|
||||
|
||||
#### Opção 3: Upload separado + kickoff
|
||||
|
||||
Esta é uma alternativa ao upload multipart quando você precisa fazer upload dos arquivos separadamente da requisição de kickoff. Faça o upload dos arquivos primeiro e depois referencie-os por URL:
|
||||
|
||||
```bash
|
||||
# Step 1: Upload
|
||||
curl -X POST https://your-deployment.crewai.com/files \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'file=@/path/to/photo.jpg' \
|
||||
-F 'field_name=cover_image'
|
||||
# Returns: {"url": "https://...", "field_name": "cover_image"}
|
||||
|
||||
# Step 2: Kickoff with URL
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"company_name": "Einstein"}, "input_files": {"cover_image": "https://..."}}'
|
||||
```
|
||||
|
||||
Consulte a documentação da API da Plataforma para detalhes completos do endpoint `/files`.
|
||||
|
||||
#### Na Plataforma CrewAI
|
||||
|
||||
Ao usar a interface da Plataforma, campos tipados como arquivo são renderizados automaticamente como zonas de arrastar e soltar para upload. Nenhuma chamada de API é necessária — basta soltar o arquivo e clicar em Executar.
|
||||
|
||||
## Persistência de Flow
|
||||
|
||||
O decorador @persist permite a persistência automática do estado nos flows do CrewAI, garantindo que você mantenha o estado do flow entre reinicializações ou execuções diferentes do workflow. Esse decorador pode ser aplicado tanto ao nível de classe, quanto ao nível de método, oferecendo flexibilidade sobre como gerenciar a persistência do estado.
|
||||
|
||||
@@ -86,6 +86,60 @@ curl -H "Authorization: Bearer YOUR_CREW_TOKEN" https://your-crew-url.crewai.com
|
||||
|
||||
Seu bearer token está disponível na aba Status na página de detalhes do seu crew.
|
||||
|
||||
## Upload de Arquivos
|
||||
|
||||
Quando seu crew ou flow inclui campos de estado tipados como arquivo (usando `ImageFile`, `PDFFile` ou `File` do `crewai-files`), esses campos são renderizados automaticamente como zonas de upload de arquivos na aba Run da interface. Os usuários podem arrastar e soltar arquivos diretamente, e a Plataforma gerencia o armazenamento e entrega para seus agentes.
|
||||
|
||||
### Kickoff Multipart (Recomendado)
|
||||
|
||||
Envie arquivos diretamente com a requisição de kickoff usando `multipart/form-data`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'inputs={"title": "Report"}' \
|
||||
-F 'document=@/path/to/file.pdf'
|
||||
```
|
||||
|
||||
Os arquivos são armazenados automaticamente e convertidos em objetos de arquivo. O agente recebe o arquivo com otimização específica do provedor (base64 inline, API de upload de arquivo ou referência por URL dependendo do provedor LLM).
|
||||
|
||||
### Kickoff JSON com URLs de Arquivos
|
||||
|
||||
Se você já tem arquivos hospedados em URLs, passe-os via `input_files`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"inputs": {"title": "Report"},
|
||||
"input_files": {"document": "https://example.com/file.pdf"}
|
||||
}'
|
||||
```
|
||||
|
||||
### Upload Separado + Kickoff
|
||||
|
||||
Faça upload dos arquivos primeiro e depois referencie-os por URL:
|
||||
|
||||
```bash
|
||||
# Step 1: Upload
|
||||
curl -X POST https://your-deployment.crewai.com/files \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-F 'file=@/path/to/file.pdf' \
|
||||
-F 'field_name=document'
|
||||
# Returns: {"url": "https://...", "field_name": "document"}
|
||||
|
||||
# Step 2: Kickoff with URL
|
||||
curl -X POST https://your-deployment.crewai.com/kickoff \
|
||||
-H 'Authorization: Bearer YOUR_TOKEN' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"inputs": {"title": "Report"}, "input_files": {"document": "https://..."}}'
|
||||
```
|
||||
|
||||
<Note type="info">
|
||||
O upload de arquivos funciona da mesma forma tanto para crews quanto para flows. Defina campos tipados como arquivo no seu esquema de estado, e a interface da Plataforma e a API tratarão os uploads automaticamente.
|
||||
</Note>
|
||||
|
||||
### Verificando o Status do Crew
|
||||
|
||||
Antes de executar operações, você pode verificar se seu crew está funcionando corretamente:
|
||||
|
||||
132
docs/pt-BR/enterprise/guides/training-crews.mdx
Normal file
132
docs/pt-BR/enterprise/guides/training-crews.mdx
Normal file
@@ -0,0 +1,132 @@
|
||||
---
|
||||
title: "Treinamento de Crews"
|
||||
description: "Treine seus crews implantados diretamente da plataforma CrewAI AMP para melhorar o desempenho dos agentes ao longo do tempo"
|
||||
icon: "dumbbell"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
O treinamento permite que você melhore o desempenho do crew executando sessões de treinamento iterativas diretamente da aba **Training** no CrewAI AMP. A plataforma usa o **modo de auto-treinamento** — ela gerencia o processo iterativo automaticamente, diferente do treinamento via CLI que requer feedback humano interativo por iteração.
|
||||
|
||||
Após a conclusão do treinamento, o CrewAI avalia as saídas dos agentes e consolida o feedback em sugestões acionáveis para cada agente. Essas sugestões são então aplicadas às execuções futuras do crew para melhorar a qualidade das saídas.
|
||||
|
||||
<Tip>
|
||||
Para detalhes sobre como o treinamento do CrewAI funciona internamente, consulte a página [Conceitos de Treinamento](/pt-BR/concepts/training).
|
||||
</Tip>
|
||||
|
||||
## Pré-requisitos
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Implantação ativa" icon="rocket">
|
||||
Você precisa de uma conta CrewAI AMP com uma implantação ativa em status **Ready** (tipo Crew).
|
||||
</Card>
|
||||
<Card title="Permissão de execução" icon="key">
|
||||
Sua conta deve ter permissão de execução para a implantação que deseja treinar.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Como treinar um crew
|
||||
|
||||
<Steps>
|
||||
<Step title="Abra a aba Training">
|
||||
Navegue até **Deployments**, clique na sua implantação e selecione a aba **Training**.
|
||||
</Step>
|
||||
|
||||
<Step title="Insira um nome de treinamento">
|
||||
Forneça um **Training Name** — este será o nome do arquivo `.pkl` usado para armazenar os resultados do treinamento. Por exemplo, "Expert Mode Training" produz `expert_mode_training.pkl`.
|
||||
</Step>
|
||||
|
||||
<Step title="Preencha as entradas do crew">
|
||||
Insira os campos de entrada do crew. Estas são as mesmas entradas que você forneceria para um kickoff normal — elas são carregadas dinamicamente com base na configuração do seu crew.
|
||||
</Step>
|
||||
|
||||
<Step title="Inicie o treinamento">
|
||||
Clique em **Train Crew**. O botão muda para "Training..." com um spinner enquanto o processo é executado.
|
||||
|
||||
Por trás dos panos:
|
||||
- Um registro de treinamento é criado para sua implantação
|
||||
- A plataforma chama o endpoint de auto-treinamento da implantação
|
||||
- O crew executa suas iterações automaticamente — nenhum feedback manual é necessário
|
||||
</Step>
|
||||
|
||||
<Step title="Monitore o progresso">
|
||||
O painel **Current Training Status** exibe:
|
||||
- **Status** — Estado atual da execução do treinamento
|
||||
- **Nº Iterations** — Número de iterações de treinamento configuradas
|
||||
- **Filename** — O arquivo `.pkl` sendo gerado
|
||||
- **Started At** — Quando o treinamento começou
|
||||
- **Training Inputs** — As entradas que você forneceu
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Entendendo os resultados do treinamento
|
||||
|
||||
Uma vez que o treinamento for concluído, você verá cards de resultado por agente com as seguintes informações:
|
||||
|
||||
- **Agent Role** — O nome/função do agente no seu crew
|
||||
- **Final Quality** — Uma pontuação de 0 a 10 avaliando a qualidade da saída do agente
|
||||
- **Final Summary** — Um resumo do desempenho do agente durante o treinamento
|
||||
- **Suggestions** — Recomendações acionáveis para melhorar o comportamento do agente
|
||||
|
||||
### Editando sugestões
|
||||
|
||||
Você pode refinar as sugestões para qualquer agente:
|
||||
|
||||
<Steps>
|
||||
<Step title="Clique em Edit">
|
||||
No card de resultado de qualquer agente, clique no botão **Edit** ao lado das sugestões.
|
||||
</Step>
|
||||
|
||||
<Step title="Modifique as sugestões">
|
||||
Atualize o texto das sugestões para refletir melhor as melhorias que você deseja.
|
||||
</Step>
|
||||
|
||||
<Step title="Salve as alterações">
|
||||
Clique em **Save**. As sugestões editadas são sincronizadas de volta à implantação e usadas em todas as execuções futuras.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Usando dados de treinamento
|
||||
|
||||
Para aplicar os resultados do treinamento ao seu crew:
|
||||
|
||||
1. Anote o **Training Filename** (o arquivo `.pkl`) da sua sessão de treinamento concluída.
|
||||
2. Especifique este nome de arquivo na configuração de kickoff ou execução da sua implantação.
|
||||
3. O crew carrega automaticamente o arquivo de treinamento e aplica as sugestões armazenadas a cada agente.
|
||||
|
||||
Isso significa que os agentes se beneficiam do feedback gerado durante o treinamento em cada execução subsequente.
|
||||
|
||||
## Treinamentos anteriores
|
||||
|
||||
A parte inferior da aba Training exibe um **histórico de todas as sessões de treinamento anteriores** da implantação. Use isso para revisar execuções de treinamento anteriores, comparar resultados ou selecionar um arquivo de treinamento diferente para usar.
|
||||
|
||||
## Tratamento de erros
|
||||
|
||||
Se uma execução de treinamento falhar, o painel de status mostra um estado de erro junto com uma mensagem descrevendo o que deu errado.
|
||||
|
||||
Causas comuns de falhas de treinamento:
|
||||
- **Runtime da implantação não atualizado** — Certifique-se de que sua implantação está executando a versão mais recente
|
||||
- **Erros de execução do crew** — Problemas na lógica de tarefas do crew ou configuração do agente
|
||||
- **Problemas de rede** — Problemas de conectividade entre a plataforma e a implantação
|
||||
|
||||
## Limitações
|
||||
|
||||
<Info>
|
||||
Tenha estas restrições em mente ao planejar seu fluxo de trabalho de treinamento:
|
||||
- **Um treinamento ativo por vez** por implantação — aguarde a execução atual terminar antes de iniciar outra
|
||||
- **Apenas modo de auto-treinamento** — a plataforma não suporta feedback interativo por iteração como o CLI
|
||||
- **Dados de treinamento são específicos da implantação** — os resultados do treinamento estão vinculados à instância e versão específicas da implantação
|
||||
</Info>
|
||||
|
||||
## Recursos relacionados
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Conceitos de Treinamento" icon="book" href="/pt-BR/concepts/training">
|
||||
Aprenda como o treinamento do CrewAI funciona internamente.
|
||||
</Card>
|
||||
<Card title="Kickoff Crew" icon="play" href="/pt-BR/enterprise/guides/kickoff-crew">
|
||||
Execute seu crew implantado a partir da plataforma AMP.
|
||||
</Card>
|
||||
<Card title="Implantar no AMP" icon="cloud-arrow-up" href="/pt-BR/enterprise/guides/deploy-to-amp">
|
||||
Faça a implantação do seu crew e deixe-o pronto para treinamento.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -5,6 +5,14 @@ icon: wrench
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### Assista: Construindo Agents e Flows CrewAI com Coding Agent Skills
|
||||
|
||||
Instale nossas coding agent skills (Claude Code, Codex, ...) para colocar seus agentes de código para funcionar rapidamente com o CrewAI.
|
||||
|
||||
Você pode instalar com `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## Tutorial em Vídeo
|
||||
|
||||
Assista a este tutorial em vídeo para uma demonstração passo a passo do processo de instalação:
|
||||
|
||||
@@ -16,6 +16,14 @@ Ele capacita desenvolvedores a construir sistemas multi-agente prontos para prod
|
||||
|
||||
Com mais de 100.000 desenvolvedores certificados em nossos cursos comunitários, o CrewAI é o padrão para automação de IA pronta para empresas.
|
||||
|
||||
### Assista: Construindo Agents e Flows CrewAI com Coding Agent Skills
|
||||
|
||||
Instale nossas coding agent skills (Claude Code, Codex, ...) para colocar seus agentes de código para funcionar rapidamente com o CrewAI.
|
||||
|
||||
Você pode instalar com `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## A Arquitetura do CrewAI
|
||||
|
||||
A arquitetura do CrewAI foi projetada para equilibrar autonomia com controle.
|
||||
|
||||
@@ -5,6 +5,14 @@ icon: rocket
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
### Assista: Construindo Agents e Flows CrewAI com Coding Agent Skills
|
||||
|
||||
Instale nossas coding agent skills (Claude Code, Codex, ...) para colocar seus agentes de código para funcionar rapidamente com o CrewAI.
|
||||
|
||||
Você pode instalar com `npx skills add crewaiinc/skills`
|
||||
|
||||
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{width: "100%", height: "400px"}}></iframe>
|
||||
|
||||
## Construa seu primeiro Agente CrewAI
|
||||
|
||||
Vamos criar uma tripulação simples que nos ajudará a `pesquisar` e `relatar` sobre os `últimos avanços em IA` para um determinado tópico ou assunto.
|
||||
|
||||
@@ -17,6 +17,9 @@ dependencies = [
|
||||
"av~=13.0.0",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
@@ -152,4 +152,4 @@ __all__ = [
|
||||
"wrap_file_source",
|
||||
]
|
||||
|
||||
__version__ = "1.13.0a6"
|
||||
__version__ = "1.13.0"
|
||||
|
||||
@@ -11,7 +11,7 @@ dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests~=2.32.5",
|
||||
"docker~=7.1.0",
|
||||
"crewai==1.13.0a6",
|
||||
"crewai==1.13.0",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
"python-docx~=1.2.0",
|
||||
@@ -142,6 +142,9 @@ contextual = [
|
||||
]
|
||||
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
@@ -309,4 +309,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.13.0a6"
|
||||
__version__ = "1.13.0"
|
||||
|
||||
@@ -54,7 +54,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.13.0a6",
|
||||
"crewai-tools==1.13.0",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
@@ -115,6 +115,9 @@ qdrant-edge = [
|
||||
crewai = "crewai.cli.cli:crewai"
|
||||
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
# PyTorch index configuration, since torch 2.5.0 is not compatible with python 3.13
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-nightly"
|
||||
|
||||
@@ -8,6 +8,7 @@ from pydantic import PydanticUserError
|
||||
|
||||
from crewai.agent.core import Agent
|
||||
from crewai.agent.planning_config import PlanningConfig
|
||||
from crewai.context import ExecutionContext
|
||||
from crewai.crew import Crew
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.flow.flow import Flow
|
||||
@@ -15,6 +16,7 @@ from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.process import Process
|
||||
from crewai.runtime_state import _entity_discriminator
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -44,7 +46,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.13.0a6"
|
||||
__version__ = "1.13.0"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
@@ -96,6 +98,10 @@ def __getattr__(name: str) -> Any:
|
||||
|
||||
|
||||
try:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent as _BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import (
|
||||
CrewAgentExecutorMixin as _CrewAgentExecutorMixin,
|
||||
)
|
||||
from crewai.agents.tools_handler import ToolsHandler as _ToolsHandler
|
||||
from crewai.experimental.agent_executor import AgentExecutor as _AgentExecutor
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext as _LLMCallHookContext
|
||||
@@ -105,27 +111,93 @@ try:
|
||||
SystemPromptResult as _SystemPromptResult,
|
||||
)
|
||||
|
||||
_AgentExecutor.model_rebuild(
|
||||
force=True,
|
||||
_types_namespace={
|
||||
"Agent": Agent,
|
||||
"ToolsHandler": _ToolsHandler,
|
||||
"Crew": Crew,
|
||||
"BaseLLM": BaseLLM,
|
||||
"Task": Task,
|
||||
"StandardPromptResult": _StandardPromptResult,
|
||||
"SystemPromptResult": _SystemPromptResult,
|
||||
"LLMCallHookContext": _LLMCallHookContext,
|
||||
"ToolResult": _ToolResult,
|
||||
},
|
||||
)
|
||||
_base_namespace: dict[str, type] = {
|
||||
"Agent": Agent,
|
||||
"BaseAgent": _BaseAgent,
|
||||
"Crew": Crew,
|
||||
"Flow": Flow,
|
||||
"BaseLLM": BaseLLM,
|
||||
"Task": Task,
|
||||
"CrewAgentExecutorMixin": _CrewAgentExecutorMixin,
|
||||
"ExecutionContext": ExecutionContext,
|
||||
}
|
||||
|
||||
try:
|
||||
from crewai.a2a.config import (
|
||||
A2AClientConfig as _A2AClientConfig,
|
||||
A2AConfig as _A2AConfig,
|
||||
A2AServerConfig as _A2AServerConfig,
|
||||
)
|
||||
|
||||
_base_namespace.update(
|
||||
{
|
||||
"A2AConfig": _A2AConfig,
|
||||
"A2AClientConfig": _A2AClientConfig,
|
||||
"A2AServerConfig": _A2AServerConfig,
|
||||
}
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import sys
|
||||
|
||||
_full_namespace = {
|
||||
**_base_namespace,
|
||||
"ToolsHandler": _ToolsHandler,
|
||||
"StandardPromptResult": _StandardPromptResult,
|
||||
"SystemPromptResult": _SystemPromptResult,
|
||||
"LLMCallHookContext": _LLMCallHookContext,
|
||||
"ToolResult": _ToolResult,
|
||||
}
|
||||
|
||||
_resolve_namespace = {
|
||||
**_full_namespace,
|
||||
**sys.modules[_BaseAgent.__module__].__dict__,
|
||||
}
|
||||
|
||||
for _mod_name in (
|
||||
_BaseAgent.__module__,
|
||||
Agent.__module__,
|
||||
Crew.__module__,
|
||||
Flow.__module__,
|
||||
Task.__module__,
|
||||
_AgentExecutor.__module__,
|
||||
):
|
||||
sys.modules[_mod_name].__dict__.update(_resolve_namespace)
|
||||
|
||||
from crewai.tasks.conditional_task import ConditionalTask as _ConditionalTask
|
||||
|
||||
_BaseAgent.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
Task.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
_ConditionalTask.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
Crew.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
Flow.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
_AgentExecutor.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
|
||||
from typing import Annotated
|
||||
|
||||
from pydantic import Discriminator, RootModel, Tag
|
||||
|
||||
Entity = Annotated[
|
||||
Annotated[Flow, Tag("flow")] # type: ignore[type-arg]
|
||||
| Annotated[Crew, Tag("crew")]
|
||||
| Annotated[Agent, Tag("agent")],
|
||||
Discriminator(_entity_discriminator),
|
||||
]
|
||||
RuntimeState = RootModel[list[Entity]]
|
||||
|
||||
try:
|
||||
Agent.model_rebuild(force=True, _types_namespace=_full_namespace)
|
||||
except PydanticUserError:
|
||||
pass
|
||||
except (ImportError, PydanticUserError):
|
||||
import logging as _logging
|
||||
|
||||
_logging.getLogger(__name__).warning(
|
||||
"AgentExecutor.model_rebuild() failed; forward refs may be unresolved.",
|
||||
"model_rebuild() failed; forward refs may be unresolved.",
|
||||
exc_info=True,
|
||||
)
|
||||
RuntimeState = None # type: ignore[assignment,misc]
|
||||
|
||||
__all__ = [
|
||||
"LLM",
|
||||
@@ -133,12 +205,14 @@ __all__ = [
|
||||
"BaseLLM",
|
||||
"Crew",
|
||||
"CrewOutput",
|
||||
"ExecutionContext",
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
"LLMGuardrail",
|
||||
"Memory",
|
||||
"PlanningConfig",
|
||||
"Process",
|
||||
"RuntimeState",
|
||||
"Task",
|
||||
"TaskOutput",
|
||||
"__version__",
|
||||
|
||||
@@ -14,6 +14,7 @@ import subprocess
|
||||
import time
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
Any,
|
||||
Literal,
|
||||
NoReturn,
|
||||
@@ -23,11 +24,14 @@ import warnings
|
||||
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic.functional_serializers import PlainSerializer
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.agent.planning_config import PlanningConfig
|
||||
@@ -45,7 +49,11 @@ from crewai.agent.utils import (
|
||||
save_last_messages,
|
||||
validate_max_execution_time,
|
||||
)
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent import (
|
||||
BaseAgent,
|
||||
_serialize_llm_ref,
|
||||
_validate_llm_ref,
|
||||
)
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
@@ -121,6 +129,24 @@ if TYPE_CHECKING:
|
||||
|
||||
_passthrough_exceptions: tuple[type[Exception], ...] = ()
|
||||
|
||||
_EXECUTOR_CLASS_MAP: dict[str, type] = {
|
||||
"CrewAgentExecutor": CrewAgentExecutor,
|
||||
"AgentExecutor": AgentExecutor,
|
||||
}
|
||||
|
||||
|
||||
def _validate_executor_class(value: Any) -> Any:
|
||||
if isinstance(value, str):
|
||||
cls = _EXECUTOR_CLASS_MAP.get(value)
|
||||
if cls is None:
|
||||
raise ValueError(f"Unknown executor class: {value}")
|
||||
return cls
|
||||
return value
|
||||
|
||||
|
||||
def _serialize_executor_class(value: Any) -> str:
|
||||
return value.__name__ if isinstance(value, type) else str(value)
|
||||
|
||||
|
||||
class Agent(BaseAgent):
|
||||
"""Represents an agent in a system.
|
||||
@@ -166,12 +192,16 @@ class Agent(BaseAgent):
|
||||
default=True,
|
||||
description="Use system prompt for the agent.",
|
||||
)
|
||||
llm: str | BaseLLM | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
function_calling_llm: str | BaseLLM | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(description="Language model that will run the agent.", default=None)
|
||||
function_calling_llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(description="Language model that will run the agent.", default=None)
|
||||
system_template: str | None = Field(
|
||||
default=None, description="System format for the agent."
|
||||
)
|
||||
@@ -267,7 +297,14 @@ class Agent(BaseAgent):
|
||||
Can be a single A2AConfig/A2AClientConfig/A2AServerConfig, or a list of any number of A2AConfig/A2AClientConfig with a single A2AServerConfig.
|
||||
""",
|
||||
)
|
||||
executor_class: type[CrewAgentExecutor] | type[AgentExecutor] = Field(
|
||||
agent_executor: InstanceOf[CrewAgentExecutor] | InstanceOf[AgentExecutor] | None = (
|
||||
Field(default=None, description="An instance of the CrewAgentExecutor class.")
|
||||
)
|
||||
executor_class: Annotated[
|
||||
type[CrewAgentExecutor] | type[AgentExecutor],
|
||||
BeforeValidator(_validate_executor_class),
|
||||
PlainSerializer(_serialize_executor_class, return_type=str, when_used="json"),
|
||||
] = Field(
|
||||
default=CrewAgentExecutor,
|
||||
description="Class to use for the agent executor. Defaults to CrewAgentExecutor, can optionally use AgentExecutor.",
|
||||
)
|
||||
@@ -690,7 +727,9 @@ class Agent(BaseAgent):
|
||||
task_prompt,
|
||||
knowledge_config,
|
||||
self.knowledge.query if self.knowledge else lambda *a, **k: None,
|
||||
self.crew.query_knowledge if self.crew else lambda *a, **k: None,
|
||||
self.crew.query_knowledge
|
||||
if self.crew and not isinstance(self.crew, str)
|
||||
else lambda *a, **k: None,
|
||||
)
|
||||
|
||||
task_prompt = self._finalize_task_prompt(task_prompt, tools, task)
|
||||
@@ -777,14 +816,18 @@ class Agent(BaseAgent):
|
||||
if not self.agent_executor:
|
||||
raise RuntimeError("Agent executor is not initialized.")
|
||||
|
||||
return self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
"ask_for_human_input": task.human_input,
|
||||
}
|
||||
)["output"]
|
||||
result = cast(
|
||||
dict[str, Any],
|
||||
self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
"ask_for_human_input": task.human_input,
|
||||
}
|
||||
),
|
||||
)
|
||||
return result["output"]
|
||||
|
||||
async def aexecute_task(
|
||||
self,
|
||||
@@ -955,19 +998,23 @@ class Agent(BaseAgent):
|
||||
if self.agent_executor is not None:
|
||||
self._update_executor_parameters(
|
||||
task=task,
|
||||
tools=parsed_tools, # type: ignore[arg-type]
|
||||
tools=parsed_tools,
|
||||
raw_tools=raw_tools,
|
||||
prompt=prompt,
|
||||
stop_words=stop_words,
|
||||
rpm_limit_fn=rpm_limit_fn,
|
||||
)
|
||||
else:
|
||||
if not isinstance(self.llm, BaseLLM):
|
||||
raise RuntimeError(
|
||||
"LLM must be resolved before creating agent executor."
|
||||
)
|
||||
self.agent_executor = self.executor_class(
|
||||
llm=cast(BaseLLM, self.llm),
|
||||
llm=self.llm,
|
||||
task=task, # type: ignore[arg-type]
|
||||
i18n=self.i18n,
|
||||
agent=self,
|
||||
crew=self.crew,
|
||||
crew=self.crew, # type: ignore[arg-type]
|
||||
tools=parsed_tools,
|
||||
prompt=prompt,
|
||||
original_tools=raw_tools,
|
||||
@@ -991,7 +1038,7 @@ class Agent(BaseAgent):
|
||||
def _update_executor_parameters(
|
||||
self,
|
||||
task: Task | None,
|
||||
tools: list[BaseTool],
|
||||
tools: list[CrewStructuredTool],
|
||||
raw_tools: list[BaseTool],
|
||||
prompt: SystemPromptResult | StandardPromptResult,
|
||||
stop_words: list[str],
|
||||
@@ -1007,11 +1054,17 @@ class Agent(BaseAgent):
|
||||
stop_words: Stop words list.
|
||||
rpm_limit_fn: RPM limit callback function.
|
||||
"""
|
||||
if self.agent_executor is None:
|
||||
raise RuntimeError("Agent executor is not initialized.")
|
||||
|
||||
self.agent_executor.task = task
|
||||
self.agent_executor.tools = tools
|
||||
self.agent_executor.original_tools = raw_tools
|
||||
self.agent_executor.prompt = prompt
|
||||
self.agent_executor.stop_words = stop_words
|
||||
if isinstance(self.agent_executor, AgentExecutor):
|
||||
self.agent_executor.stop_words = stop_words
|
||||
else:
|
||||
self.agent_executor.stop = stop_words
|
||||
self.agent_executor.tools_names = get_tool_names(tools)
|
||||
self.agent_executor.tools_description = render_text_description_and_args(tools)
|
||||
self.agent_executor.response_model = (
|
||||
@@ -1033,7 +1086,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
)
|
||||
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
def get_delegation_tools(self, agents: Sequence[BaseAgent]) -> list[BaseTool]:
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
return agent_tools.tools()
|
||||
|
||||
@@ -1787,21 +1840,3 @@ class Agent(BaseAgent):
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
return await self.kickoff_async(messages, response_format, input_files)
|
||||
|
||||
|
||||
try:
|
||||
from crewai.a2a.config import (
|
||||
A2AClientConfig as _A2AClientConfig,
|
||||
A2AConfig as _A2AConfig,
|
||||
A2AServerConfig as _A2AServerConfig,
|
||||
)
|
||||
|
||||
Agent.model_rebuild(
|
||||
_types_namespace={
|
||||
"A2AConfig": _A2AConfig,
|
||||
"A2AClientConfig": _A2AClientConfig,
|
||||
"A2AServerConfig": _A2AServerConfig,
|
||||
}
|
||||
)
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
@@ -137,7 +137,8 @@ def handle_knowledge_retrieval(
|
||||
Returns:
|
||||
The task prompt potentially augmented with knowledge context.
|
||||
"""
|
||||
if not (agent.knowledge or (agent.crew and agent.crew.knowledge)):
|
||||
_crew = agent.crew if not isinstance(agent.crew, str) else None
|
||||
if not (agent.knowledge or (_crew and _crew.knowledge)):
|
||||
return task_prompt
|
||||
|
||||
crewai_event_bus.emit(
|
||||
@@ -244,7 +245,7 @@ def apply_training_data(agent: Agent, task_prompt: str) -> str:
|
||||
Returns:
|
||||
The task prompt with training data applied.
|
||||
"""
|
||||
if agent.crew and agent.crew._train:
|
||||
if agent.crew and not isinstance(agent.crew, str) and agent.crew._train:
|
||||
return agent._training_handler(task_prompt=task_prompt)
|
||||
return agent._use_trained_data(task_prompt=task_prompt)
|
||||
|
||||
@@ -355,7 +356,8 @@ async def ahandle_knowledge_retrieval(
|
||||
Returns:
|
||||
The task prompt potentially augmented with knowledge context.
|
||||
"""
|
||||
if not (agent.knowledge or (agent.crew and agent.crew.knowledge)):
|
||||
_crew = agent.crew if not isinstance(agent.crew, str) else None
|
||||
if not (agent.knowledge or (_crew and _crew.knowledge)):
|
||||
return task_prompt
|
||||
|
||||
crewai_event_bus.emit(
|
||||
@@ -381,15 +383,16 @@ async def ahandle_knowledge_retrieval(
|
||||
if agent.agent_knowledge_context:
|
||||
task_prompt += agent.agent_knowledge_context
|
||||
|
||||
knowledge_snippets = await agent.crew.aquery_knowledge(
|
||||
[agent.knowledge_search_query], **knowledge_config
|
||||
)
|
||||
if knowledge_snippets:
|
||||
agent.crew_knowledge_context = extract_knowledge_context(
|
||||
knowledge_snippets
|
||||
if _crew:
|
||||
knowledge_snippets = await _crew.aquery_knowledge(
|
||||
[agent.knowledge_search_query], **knowledge_config
|
||||
)
|
||||
if agent.crew_knowledge_context:
|
||||
task_prompt += agent.crew_knowledge_context
|
||||
if knowledge_snippets:
|
||||
agent.crew_knowledge_context = extract_knowledge_context(
|
||||
knowledge_snippets
|
||||
)
|
||||
if agent.crew_knowledge_context:
|
||||
task_prompt += agent.crew_knowledge_context
|
||||
|
||||
crewai_event_bus.emit(
|
||||
agent,
|
||||
|
||||
@@ -5,7 +5,7 @@ with CrewAI's agent system. Provides memory persistence, tool integration, and s
|
||||
output functionality.
|
||||
"""
|
||||
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, PrivateAttr
|
||||
@@ -30,6 +30,7 @@ from crewai.events.types.agent_events import (
|
||||
)
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.types.callback import SerializableCallable
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.import_utils import require
|
||||
@@ -50,7 +51,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
_memory: Any = PrivateAttr(default=None)
|
||||
_max_iterations: int = PrivateAttr(default=10)
|
||||
function_calling_llm: Any = Field(default=None)
|
||||
step_callback: Callable[..., Any] | None = Field(default=None)
|
||||
step_callback: SerializableCallable | None = Field(default=None)
|
||||
|
||||
model: str = Field(default="gpt-4o")
|
||||
verbose: bool = Field(default=False)
|
||||
@@ -272,7 +273,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
available_tools: list[Any] = self._tool_adapter.tools()
|
||||
self._graph.tools = available_tools
|
||||
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
def get_delegation_tools(self, agents: Sequence[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support for LangGraph.
|
||||
|
||||
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||
|
||||
@@ -4,6 +4,7 @@ This module contains the OpenAIAgentAdapter class that integrates OpenAI Assista
|
||||
with CrewAI's agent system, providing tool integration and structured output support.
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import Any, cast
|
||||
|
||||
from pydantic import ConfigDict, Field, PrivateAttr
|
||||
@@ -188,14 +189,14 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
self._openai_agent = OpenAIAgent(
|
||||
name=self.role,
|
||||
instructions=instructions,
|
||||
model=self.llm,
|
||||
model=str(self.llm),
|
||||
**self._agent_config or {},
|
||||
)
|
||||
|
||||
if all_tools:
|
||||
self.configure_tools(all_tools)
|
||||
|
||||
self.agent_executor = Runner
|
||||
self.agent_executor = Runner # type: ignore[assignment]
|
||||
|
||||
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure tools for the OpenAI Assistant.
|
||||
@@ -221,7 +222,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
"""
|
||||
return self._converter_adapter.post_process_result(result.final_output)
|
||||
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
def get_delegation_tools(self, agents: Sequence[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support.
|
||||
|
||||
Creates delegation tools that allow this agent to delegate tasks to other agents.
|
||||
|
||||
@@ -1,25 +1,30 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Sequence
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Any, Final, Literal
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Final, Literal
|
||||
import uuid
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic.functional_serializers import PlainSerializer
|
||||
from pydantic_core import PydanticCustomError
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.agent.internal.meta import AgentMeta
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
@@ -27,6 +32,7 @@ from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.knowledge_config import KnowledgeConfig
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.mcp.config import MCPServerConfig
|
||||
from crewai.memory.memory_scope import MemoryScope, MemorySlice
|
||||
from crewai.memory.unified_memory import Memory
|
||||
@@ -42,6 +48,41 @@ from crewai.utilities.rpm_controller import RPMController
|
||||
from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.context import ExecutionContext
|
||||
from crewai.crew import Crew
|
||||
|
||||
|
||||
def _validate_crew_ref(value: Any) -> Any:
|
||||
return value
|
||||
|
||||
|
||||
def _serialize_crew_ref(value: Any) -> str | None:
|
||||
if value is None:
|
||||
return None
|
||||
return str(value.id) if hasattr(value, "id") else str(value)
|
||||
|
||||
|
||||
def _validate_llm_ref(value: Any) -> Any:
|
||||
return value
|
||||
|
||||
|
||||
def _resolve_agent(value: Any, info: Any) -> Any:
|
||||
if isinstance(value, BaseAgent) or value is None or not isinstance(value, dict):
|
||||
return value
|
||||
from crewai.agent.core import Agent
|
||||
|
||||
return Agent.model_validate(value, context=getattr(info, "context", None))
|
||||
|
||||
|
||||
def _serialize_llm_ref(value: Any) -> str | None:
|
||||
if value is None:
|
||||
return None
|
||||
if isinstance(value, str):
|
||||
return value
|
||||
return getattr(value, "model", str(value))
|
||||
|
||||
|
||||
_SLUG_RE: Final[re.Pattern[str]] = re.compile(
|
||||
r"^(?:crewai-amp:)?[a-zA-Z0-9][a-zA-Z0-9_-]*(?:#[\w-]+)?$"
|
||||
)
|
||||
@@ -119,10 +160,12 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
Set private attributes.
|
||||
"""
|
||||
|
||||
entity_type: Literal["agent"] = "agent"
|
||||
|
||||
__hash__ = object.__hash__
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
|
||||
_rpm_controller: RPMController | None = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: SerializableCallable | None = PrivateAttr(default=None)
|
||||
_original_role: str | None = PrivateAttr(default=None)
|
||||
_original_goal: str | None = PrivateAttr(default=None)
|
||||
_original_backstory: str | None = PrivateAttr(default=None)
|
||||
@@ -154,13 +197,21 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
max_iter: int = Field(
|
||||
default=25, description="Maximum iterations for an agent to execute a task"
|
||||
)
|
||||
agent_executor: Any = Field(
|
||||
agent_executor: InstanceOf[CrewAgentExecutorMixin] | None = Field(
|
||||
default=None, description="An instance of the CrewAgentExecutor class."
|
||||
)
|
||||
llm: Any = Field(
|
||||
default=None, description="Language model that will run the agent."
|
||||
)
|
||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||
llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(default=None, description="Language model that will run the agent.")
|
||||
crew: Annotated[
|
||||
Crew | str | None,
|
||||
BeforeValidator(_validate_crew_ref),
|
||||
PlainSerializer(
|
||||
_serialize_crew_ref, return_type=str | None, when_used="always"
|
||||
),
|
||||
] = Field(default=None, description="Crew to which the agent belongs.")
|
||||
i18n: I18N = Field(
|
||||
default_factory=get_i18n, description="Internationalization settings."
|
||||
)
|
||||
@@ -172,7 +223,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
description="An instance of the ToolsHandler class.",
|
||||
)
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
default_factory=list, description="Results of the tools used by the agent."
|
||||
)
|
||||
max_tokens: int | None = Field(
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
@@ -223,6 +274,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
description="Agent Skills. Accepts paths for discovery or pre-loaded Skill objects.",
|
||||
min_length=1,
|
||||
)
|
||||
execution_context: ExecutionContext | None = Field(default=None)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -337,11 +389,12 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
if v:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None, info: Any) -> UUID4 | None:
|
||||
if v and not (info.context or {}).get("from_checkpoint"):
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
return v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self) -> Self:
|
||||
@@ -398,7 +451,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
def get_delegation_tools(self, agents: Sequence[BaseAgent]) -> list[BaseTool]:
|
||||
"""Set the task tools that init BaseAgenTools class."""
|
||||
|
||||
@abstractmethod
|
||||
|
||||
@@ -3,20 +3,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import GetCoreSchemaHandler
|
||||
from pydantic_core import CoreSchema, core_schema
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.tools.cache_tools.cache_tools import CacheTools
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
|
||||
|
||||
class ToolsHandler:
|
||||
class ToolsHandler(BaseModel):
|
||||
"""Callback handler for tool usage.
|
||||
|
||||
Attributes:
|
||||
@@ -24,14 +19,8 @@ class ToolsHandler:
|
||||
cache: Optional cache handler for storing tool outputs.
|
||||
"""
|
||||
|
||||
def __init__(self, cache: CacheHandler | None = None) -> None:
|
||||
"""Initialize the callback handler.
|
||||
|
||||
Args:
|
||||
cache: Optional cache handler for storing tool outputs.
|
||||
"""
|
||||
self.cache: CacheHandler | None = cache
|
||||
self.last_used_tool: ToolCalling | InstructorToolCalling | None = None
|
||||
cache: CacheHandler | None = Field(default=None)
|
||||
last_used_tool: ToolCalling | InstructorToolCalling | None = Field(default=None)
|
||||
|
||||
def on_tool_use(
|
||||
self,
|
||||
@@ -48,7 +37,6 @@ class ToolsHandler:
|
||||
"""
|
||||
self.last_used_tool = calling
|
||||
if self.cache and should_cache and calling.tool_name != CacheTools().name:
|
||||
# Convert arguments to string for cache
|
||||
input_str = ""
|
||||
if calling.arguments:
|
||||
if isinstance(calling.arguments, dict):
|
||||
@@ -61,14 +49,3 @@ class ToolsHandler:
|
||||
input=input_str,
|
||||
output=output,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: Any, _handler: GetCoreSchemaHandler
|
||||
) -> CoreSchema:
|
||||
"""Generate Pydantic core schema for BaseClient Protocol.
|
||||
|
||||
This allows the Protocol to be used in Pydantic models without
|
||||
requiring arbitrary_types_allowed=True.
|
||||
"""
|
||||
return core_schema.any_schema()
|
||||
|
||||
@@ -27,7 +27,7 @@ from crewai.cli.tools.main import ToolCommand
|
||||
from crewai.cli.train_crew import train_crew
|
||||
from crewai.cli.triggers.main import TriggersCommand
|
||||
from crewai.cli.update_crew import update_crew
|
||||
from crewai.cli.utils import build_env_with_tool_repository_credentials, read_toml
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials, read_toml
|
||||
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||
KickoffTaskOutputsSQLiteStorage,
|
||||
)
|
||||
@@ -48,24 +48,18 @@ def crewai() -> None:
|
||||
@click.argument("uv_args", nargs=-1, type=click.UNPROCESSED)
|
||||
def uv(uv_args: tuple[str, ...]) -> None:
|
||||
"""A wrapper around uv commands that adds custom tool authentication through env vars."""
|
||||
env = os.environ.copy()
|
||||
try:
|
||||
pyproject_data = read_toml()
|
||||
sources = pyproject_data.get("tool", {}).get("uv", {}).get("sources", {})
|
||||
|
||||
for source_config in sources.values():
|
||||
if isinstance(source_config, dict):
|
||||
index = source_config.get("index")
|
||||
if index:
|
||||
index_env = build_env_with_tool_repository_credentials(index)
|
||||
env.update(index_env)
|
||||
except (FileNotFoundError, KeyError) as e:
|
||||
# Verify pyproject.toml exists first
|
||||
read_toml()
|
||||
except FileNotFoundError as e:
|
||||
raise SystemExit(
|
||||
"Error. A valid pyproject.toml file is required. Check that a valid pyproject.toml file exists in the current directory."
|
||||
) from e
|
||||
except Exception as e:
|
||||
raise SystemExit(f"Error: {e}") from e
|
||||
|
||||
env = build_env_with_all_tool_credentials()
|
||||
|
||||
try:
|
||||
subprocess.run( # noqa: S603
|
||||
["uv", *uv_args], # noqa: S607
|
||||
|
||||
@@ -46,7 +46,7 @@ def create_flow(name: str) -> None:
|
||||
tools_template_files = ["tools/__init__.py", "tools/custom_tool.py"]
|
||||
|
||||
crew_folders = [
|
||||
"poem_crew",
|
||||
"content_crew",
|
||||
]
|
||||
|
||||
def process_file(src_file: Path, dst_file: Path) -> None:
|
||||
|
||||
@@ -2,6 +2,8 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials
|
||||
|
||||
|
||||
# Be mindful about changing this.
|
||||
# on some environments we don't use this command but instead uv sync directly
|
||||
@@ -13,7 +15,14 @@ def install_crew(proxy_options: list[str]) -> None:
|
||||
"""
|
||||
try:
|
||||
command = ["uv", "sync", *proxy_options]
|
||||
subprocess.run(command, check=True, capture_output=False, text=True) # noqa: S603
|
||||
|
||||
# Inject tool repository credentials so uv can authenticate
|
||||
# against private package indexes (e.g. crewai tool repository).
|
||||
# Without this, `uv sync` fails with 401 Unauthorized when the
|
||||
# project depends on tools from a private index.
|
||||
env = build_env_with_all_tool_credentials()
|
||||
|
||||
subprocess.run(command, check=True, capture_output=False, text=True, env=env) # noqa: S603
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
from enum import Enum
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import click
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import build_env_with_tool_repository_credentials, read_toml
|
||||
from crewai.cli.utils import build_env_with_all_tool_credentials, read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
@@ -56,19 +55,7 @@ def execute_command(crew_type: CrewType) -> None:
|
||||
"""
|
||||
command = ["uv", "run", "kickoff" if crew_type == CrewType.FLOW else "run_crew"]
|
||||
|
||||
env = os.environ.copy()
|
||||
try:
|
||||
pyproject_data = read_toml()
|
||||
sources = pyproject_data.get("tool", {}).get("uv", {}).get("sources", {})
|
||||
|
||||
for source_config in sources.values():
|
||||
if isinstance(source_config, dict):
|
||||
index = source_config.get("index")
|
||||
if index:
|
||||
index_env = build_env_with_tool_repository_credentials(index)
|
||||
env.update(index_env)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
env = build_env_with_all_tool_credentials()
|
||||
|
||||
try:
|
||||
subprocess.run(command, capture_output=False, text=True, check=True, env=env) # noqa: S603
|
||||
|
||||
@@ -120,11 +120,11 @@ my_crew/
|
||||
my_flow/
|
||||
├── src/my_flow/
|
||||
│ ├── crews/ # Multiple crew definitions
|
||||
│ │ └── poem_crew/
|
||||
│ │ └── content_crew/
|
||||
│ │ ├── config/
|
||||
│ │ │ ├── agents.yaml
|
||||
│ │ │ └── tasks.yaml
|
||||
│ │ └── poem_crew.py
|
||||
│ │ └── content_crew.py
|
||||
│ ├── tools/ # Custom tools
|
||||
│ ├── main.py # Flow orchestration
|
||||
│ └── ...
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.13.0a6"
|
||||
"crewai[tools]==1.13.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -38,7 +38,7 @@ crewai run
|
||||
|
||||
This command initializes the {{name}} Flow as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
This example, unmodified, will run a content creation flow on AI Agents and save the output to `output/post.md`.
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
|
||||
@@ -0,0 +1,33 @@
|
||||
planner:
|
||||
role: >
|
||||
Content Planner
|
||||
goal: >
|
||||
Plan a detailed and engaging blog post outline on {topic}
|
||||
backstory: >
|
||||
You're an experienced content strategist who excels at creating
|
||||
structured outlines for blog posts. You know how to organize ideas
|
||||
into a logical flow that keeps readers engaged from start to finish.
|
||||
|
||||
writer:
|
||||
role: >
|
||||
Content Writer
|
||||
goal: >
|
||||
Write a compelling and well-structured blog post on {topic}
|
||||
based on the provided outline
|
||||
backstory: >
|
||||
You're a skilled writer with a talent for turning outlines into
|
||||
engaging, informative blog posts. Your writing is clear, conversational,
|
||||
and backed by solid reasoning. You adapt your tone to the subject matter
|
||||
while keeping things accessible to a broad audience.
|
||||
|
||||
editor:
|
||||
role: >
|
||||
Content Editor
|
||||
goal: >
|
||||
Review and polish the blog post on {topic} to ensure it is
|
||||
publication-ready
|
||||
backstory: >
|
||||
You're a meticulous editor with years of experience refining written
|
||||
content. You have an eye for clarity, flow, grammar, and consistency.
|
||||
You improve prose without changing the author's voice and ensure every
|
||||
piece you touch is polished and professional.
|
||||
@@ -0,0 +1,50 @@
|
||||
planning_task:
|
||||
description: >
|
||||
Create a detailed outline for a blog post about {topic}.
|
||||
|
||||
The outline should include:
|
||||
- A compelling title
|
||||
- An introduction hook
|
||||
- 3-5 main sections with key points to cover in each
|
||||
- A conclusion with a call to action
|
||||
|
||||
Make the outline detailed enough that a writer can produce
|
||||
a full blog post from it without additional research.
|
||||
expected_output: >
|
||||
A structured blog post outline with a title, introduction notes,
|
||||
detailed section breakdowns, and conclusion notes.
|
||||
agent: planner
|
||||
|
||||
writing_task:
|
||||
description: >
|
||||
Using the outline provided, write a full blog post about {topic}.
|
||||
|
||||
Requirements:
|
||||
- Follow the outline structure closely
|
||||
- Write in a clear, engaging, and conversational tone
|
||||
- Each section should be 2-3 paragraphs
|
||||
- Include a strong introduction and conclusion
|
||||
- Target around 800-1200 words
|
||||
expected_output: >
|
||||
A complete blog post in markdown format, ready for editing.
|
||||
The post should follow the outline and be well-written with
|
||||
clear transitions between sections.
|
||||
agent: writer
|
||||
|
||||
editing_task:
|
||||
description: >
|
||||
Review and edit the blog post about {topic}.
|
||||
|
||||
Focus on:
|
||||
- Fixing any grammar or spelling errors
|
||||
- Improving sentence clarity and flow
|
||||
- Ensuring consistent tone throughout
|
||||
- Strengthening the introduction and conclusion
|
||||
- Removing any redundancy
|
||||
|
||||
Do not rewrite the post — refine and polish it.
|
||||
expected_output: >
|
||||
The final, polished blog post in markdown format without '```'.
|
||||
Publication-ready with clean formatting and professional prose.
|
||||
agent: editor
|
||||
output_file: output/post.md
|
||||
@@ -8,8 +8,8 @@ from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
|
||||
@CrewBase
|
||||
class PoemCrew:
|
||||
"""Poem Crew"""
|
||||
class ContentCrew:
|
||||
"""Content Crew"""
|
||||
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
@@ -20,26 +20,50 @@ class PoemCrew:
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
# If you would lik to add tools to your crew, you can learn more about it here:
|
||||
# If you would like to add tools to your crew, you can learn more about it here:
|
||||
# https://docs.crewai.com/concepts/agents#agent-tools
|
||||
@agent
|
||||
def poem_writer(self) -> Agent:
|
||||
def planner(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["poem_writer"], # type: ignore[index]
|
||||
config=self.agents_config["planner"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@agent
|
||||
def writer(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["writer"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@agent
|
||||
def editor(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["editor"], # type: ignore[index]
|
||||
)
|
||||
|
||||
# To learn more about structured task outputs,
|
||||
# task dependencies, and task callbacks, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/tasks#overview-of-a-task
|
||||
@task
|
||||
def write_poem(self) -> Task:
|
||||
def planning_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["write_poem"], # type: ignore[index]
|
||||
config=self.tasks_config["planning_task"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@task
|
||||
def writing_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["writing_task"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@task
|
||||
def editing_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["editing_task"], # type: ignore[index]
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Research Crew"""
|
||||
"""Creates the Content Crew"""
|
||||
# To learn how to add knowledge sources to your crew, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/knowledge#what-is-knowledge
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
"""Poem crew template."""
|
||||
@@ -1,11 +0,0 @@
|
||||
poem_writer:
|
||||
role: >
|
||||
CrewAI Poem Writer
|
||||
goal: >
|
||||
Generate a funny, light heartedpoem about how CrewAI
|
||||
is awesome with a sentence count of {sentence_count}
|
||||
backstory: >
|
||||
You're a creative poet with a talent for capturing the essence of any topic
|
||||
in a beautiful and engaging way. Known for your ability to craft poems that
|
||||
resonate with readers, you bring a unique perspective and artistic flair to
|
||||
every piece you write.
|
||||
@@ -1,7 +0,0 @@
|
||||
write_poem:
|
||||
description: >
|
||||
Write a poem about how CrewAI is awesome.
|
||||
Ensure the poem is engaging and adheres to the specified sentence count of {sentence_count}.
|
||||
expected_output: >
|
||||
A beautifully crafted poem about CrewAI, with exactly {sentence_count} sentences.
|
||||
agent: poem_writer
|
||||
@@ -1,59 +1,64 @@
|
||||
#!/usr/bin/env python
|
||||
from random import randint
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow import Flow, listen, start
|
||||
|
||||
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew
|
||||
from {{folder_name}}.crews.content_crew.content_crew import ContentCrew
|
||||
|
||||
|
||||
class PoemState(BaseModel):
|
||||
sentence_count: int = 1
|
||||
poem: str = ""
|
||||
class ContentState(BaseModel):
|
||||
topic: str = ""
|
||||
outline: str = ""
|
||||
draft: str = ""
|
||||
final_post: str = ""
|
||||
|
||||
|
||||
class PoemFlow(Flow[PoemState]):
|
||||
class ContentFlow(Flow[ContentState]):
|
||||
|
||||
@start()
|
||||
def generate_sentence_count(self, crewai_trigger_payload: dict = None):
|
||||
print("Generating sentence count")
|
||||
def plan_content(self, crewai_trigger_payload: dict = None):
|
||||
print("Planning content")
|
||||
|
||||
# Use trigger payload if available
|
||||
if crewai_trigger_payload:
|
||||
# Example: use trigger data to influence sentence count
|
||||
self.state.sentence_count = crewai_trigger_payload.get('sentence_count', randint(1, 5))
|
||||
self.state.topic = crewai_trigger_payload.get("topic", "AI Agents")
|
||||
print(f"Using trigger payload: {crewai_trigger_payload}")
|
||||
else:
|
||||
self.state.sentence_count = randint(1, 5)
|
||||
self.state.topic = "AI Agents"
|
||||
|
||||
@listen(generate_sentence_count)
|
||||
def generate_poem(self):
|
||||
print("Generating poem")
|
||||
print(f"Topic: {self.state.topic}")
|
||||
|
||||
@listen(plan_content)
|
||||
def generate_content(self):
|
||||
print(f"Generating content on: {self.state.topic}")
|
||||
result = (
|
||||
PoemCrew()
|
||||
ContentCrew()
|
||||
.crew()
|
||||
.kickoff(inputs={"sentence_count": self.state.sentence_count})
|
||||
.kickoff(inputs={"topic": self.state.topic})
|
||||
)
|
||||
|
||||
print("Poem generated", result.raw)
|
||||
self.state.poem = result.raw
|
||||
print("Content generated")
|
||||
self.state.final_post = result.raw
|
||||
|
||||
@listen(generate_poem)
|
||||
def save_poem(self):
|
||||
print("Saving poem")
|
||||
with open("poem.txt", "w") as f:
|
||||
f.write(self.state.poem)
|
||||
@listen(generate_content)
|
||||
def save_content(self):
|
||||
print("Saving content")
|
||||
output_dir = Path("output")
|
||||
output_dir.mkdir(exist_ok=True)
|
||||
with open(output_dir / "post.md", "w") as f:
|
||||
f.write(self.state.final_post)
|
||||
print("Post saved to output/post.md")
|
||||
|
||||
|
||||
def kickoff():
|
||||
poem_flow = PoemFlow()
|
||||
poem_flow.kickoff()
|
||||
content_flow = ContentFlow()
|
||||
content_flow.kickoff()
|
||||
|
||||
|
||||
def plot():
|
||||
poem_flow = PoemFlow()
|
||||
poem_flow.plot()
|
||||
content_flow = ContentFlow()
|
||||
content_flow.plot()
|
||||
|
||||
|
||||
def run_with_trigger():
|
||||
@@ -74,10 +79,10 @@ def run_with_trigger():
|
||||
|
||||
# Create flow and kickoff with trigger payload
|
||||
# The @start() methods will automatically receive crewai_trigger_payload parameter
|
||||
poem_flow = PoemFlow()
|
||||
content_flow = ContentFlow()
|
||||
|
||||
try:
|
||||
result = poem_flow.kickoff({"crewai_trigger_payload": trigger_payload})
|
||||
result = content_flow.kickoff({"crewai_trigger_payload": trigger_payload})
|
||||
return result
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while running the flow with trigger: {e}")
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.13.0a6"
|
||||
"crewai[tools]==1.13.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.13.0a6"
|
||||
"crewai[tools]==1.13.0"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -21,6 +21,7 @@ from crewai.cli.utils import (
|
||||
get_project_description,
|
||||
get_project_name,
|
||||
get_project_version,
|
||||
read_toml,
|
||||
tree_copy,
|
||||
tree_find_and_replace,
|
||||
)
|
||||
@@ -116,11 +117,26 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
self._print_tools_preview(tools_metadata)
|
||||
self._print_current_organization()
|
||||
|
||||
build_env = os.environ.copy()
|
||||
try:
|
||||
pyproject_data = read_toml()
|
||||
sources = pyproject_data.get("tool", {}).get("uv", {}).get("sources", {})
|
||||
|
||||
for source_config in sources.values():
|
||||
if isinstance(source_config, dict):
|
||||
index = source_config.get("index")
|
||||
if index:
|
||||
index_env = build_env_with_tool_repository_credentials(index)
|
||||
build_env.update(index_env)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
with tempfile.TemporaryDirectory() as temp_build_dir:
|
||||
subprocess.run( # noqa: S603
|
||||
["uv", "build", "--sdist", "--out-dir", temp_build_dir], # noqa: S607
|
||||
check=True,
|
||||
capture_output=False,
|
||||
env=build_env,
|
||||
)
|
||||
|
||||
tarball_filename = next(
|
||||
|
||||
@@ -484,8 +484,12 @@ def get_flows(flow_path: str = "main.py") -> list[Flow[Any]]:
|
||||
if flow_instances:
|
||||
break
|
||||
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
except Exception as e:
|
||||
import logging
|
||||
|
||||
logging.getLogger(__name__).debug(
|
||||
f"Could not load tool repository credentials: {e}"
|
||||
)
|
||||
|
||||
return flow_instances
|
||||
|
||||
@@ -549,6 +553,31 @@ def build_env_with_tool_repository_credentials(
|
||||
return env
|
||||
|
||||
|
||||
def build_env_with_all_tool_credentials() -> dict[str, Any]:
|
||||
"""
|
||||
Build environment dict with credentials for all tool repository indexes
|
||||
found in pyproject.toml's [tool.uv.sources] section.
|
||||
|
||||
Returns:
|
||||
dict: Environment variables with credentials for all private indexes.
|
||||
"""
|
||||
env = os.environ.copy()
|
||||
try:
|
||||
pyproject_data = read_toml()
|
||||
sources = pyproject_data.get("tool", {}).get("uv", {}).get("sources", {})
|
||||
|
||||
for source_config in sources.values():
|
||||
if isinstance(source_config, dict):
|
||||
index = source_config.get("index")
|
||||
if index:
|
||||
index_env = build_env_with_tool_repository_credentials(index)
|
||||
env.update(index_env)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
return env
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _load_module_from_file(
|
||||
init_file: Path, module_name: str | None = None
|
||||
|
||||
@@ -4,6 +4,23 @@ import contextvars
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.events.base_events import (
|
||||
get_emission_sequence,
|
||||
set_emission_counter,
|
||||
)
|
||||
from crewai.events.event_context import (
|
||||
_event_id_stack,
|
||||
_last_event_id,
|
||||
_triggering_event_id,
|
||||
)
|
||||
from crewai.flow.flow_context import (
|
||||
current_flow_id,
|
||||
current_flow_method_name,
|
||||
current_flow_request_id,
|
||||
)
|
||||
|
||||
|
||||
_platform_integration_token: contextvars.ContextVar[str | None] = (
|
||||
contextvars.ContextVar("platform_integration_token", default=None)
|
||||
@@ -63,3 +80,53 @@ def reset_current_task_id(token: contextvars.Token[str | None]) -> None:
|
||||
def get_current_task_id() -> str | None:
|
||||
"""Get the current task ID from the context."""
|
||||
return _current_task_id.get()
|
||||
|
||||
|
||||
class ExecutionContext(BaseModel):
|
||||
"""Snapshot of ContextVar execution state."""
|
||||
|
||||
current_task_id: str | None = Field(default=None)
|
||||
flow_request_id: str | None = Field(default=None)
|
||||
flow_id: str | None = Field(default=None)
|
||||
flow_method_name: str = Field(default="unknown")
|
||||
|
||||
event_id_stack: tuple[tuple[str, str], ...] = Field(default=())
|
||||
last_event_id: str | None = Field(default=None)
|
||||
triggering_event_id: str | None = Field(default=None)
|
||||
emission_sequence: int = Field(default=0)
|
||||
|
||||
feedback_callback_info: dict[str, Any] | None = Field(default=None)
|
||||
platform_token: str | None = Field(default=None)
|
||||
|
||||
|
||||
def capture_execution_context(
|
||||
feedback_callback_info: dict[str, Any] | None = None,
|
||||
) -> ExecutionContext:
|
||||
"""Read current ContextVars into an ExecutionContext."""
|
||||
return ExecutionContext(
|
||||
current_task_id=_current_task_id.get(),
|
||||
flow_request_id=current_flow_request_id.get(),
|
||||
flow_id=current_flow_id.get(),
|
||||
flow_method_name=current_flow_method_name.get(),
|
||||
event_id_stack=_event_id_stack.get(),
|
||||
last_event_id=_last_event_id.get(),
|
||||
triggering_event_id=_triggering_event_id.get(),
|
||||
emission_sequence=get_emission_sequence(),
|
||||
feedback_callback_info=feedback_callback_info,
|
||||
platform_token=_platform_integration_token.get(),
|
||||
)
|
||||
|
||||
|
||||
def apply_execution_context(ctx: ExecutionContext) -> None:
|
||||
"""Write an ExecutionContext back into the ContextVars."""
|
||||
_current_task_id.set(ctx.current_task_id)
|
||||
current_flow_request_id.set(ctx.flow_request_id)
|
||||
current_flow_id.set(ctx.flow_id)
|
||||
current_flow_method_name.set(ctx.flow_method_name)
|
||||
|
||||
_event_id_stack.set(ctx.event_id_stack)
|
||||
_last_event_id.set(ctx.last_event_id)
|
||||
_triggering_event_id.set(ctx.triggering_event_id)
|
||||
set_emission_counter(ctx.emission_sequence)
|
||||
|
||||
_platform_integration_token.set(ctx.platform_token)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Callable, Sequence
|
||||
from concurrent.futures import Future
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
@@ -10,7 +10,9 @@ from pathlib import Path
|
||||
import re
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
Any,
|
||||
Literal,
|
||||
cast,
|
||||
)
|
||||
import uuid
|
||||
@@ -21,12 +23,14 @@ from opentelemetry.context import attach, detach
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
Field,
|
||||
Json,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic.functional_serializers import PlainSerializer
|
||||
from pydantic_core import PydanticCustomError
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
@@ -37,6 +41,8 @@ if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
from opentelemetry.trace import Span
|
||||
|
||||
from crewai.context import ExecutionContext
|
||||
|
||||
try:
|
||||
from crewai_files import get_supported_content_types
|
||||
|
||||
@@ -49,7 +55,12 @@ except ImportError:
|
||||
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent import (
|
||||
BaseAgent,
|
||||
_resolve_agent,
|
||||
_serialize_llm_ref,
|
||||
_validate_llm_ref,
|
||||
)
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.crews.utils import (
|
||||
@@ -132,6 +143,12 @@ from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
|
||||
|
||||
def _resolve_agents(value: Any, info: Any) -> Any:
|
||||
if not isinstance(value, list):
|
||||
return value
|
||||
return [_resolve_agent(a, info) for a in value]
|
||||
|
||||
|
||||
class Crew(FlowTrackable, BaseModel):
|
||||
"""
|
||||
Represents a group of agents, defining how they should collaborate and the
|
||||
@@ -170,6 +187,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
fingerprinting.
|
||||
"""
|
||||
|
||||
entity_type: Literal["crew"] = "crew"
|
||||
|
||||
__hash__ = object.__hash__
|
||||
_execution_span: Span | None = PrivateAttr()
|
||||
_rpm_controller: RPMController = PrivateAttr()
|
||||
@@ -191,7 +210,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
name: str | None = Field(default="crew")
|
||||
cache: bool = Field(default=True)
|
||||
tasks: list[Task] = Field(default_factory=list)
|
||||
agents: list[BaseAgent] = Field(default_factory=list)
|
||||
agents: Annotated[
|
||||
list[BaseAgent],
|
||||
BeforeValidator(_resolve_agents),
|
||||
] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: bool = Field(default=False)
|
||||
memory: bool | Memory | MemoryScope | MemorySlice | None = Field(
|
||||
@@ -209,15 +231,20 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default=None,
|
||||
description="Metrics for the LLM usage during all tasks execution.",
|
||||
)
|
||||
manager_llm: str | BaseLLM | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
manager_agent: BaseAgent | None = Field(
|
||||
description="Custom agent that will be used as manager.", default=None
|
||||
)
|
||||
function_calling_llm: str | LLM | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
manager_llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(description="Language model that will run the agent.", default=None)
|
||||
manager_agent: Annotated[
|
||||
BaseAgent | None,
|
||||
BeforeValidator(_resolve_agent),
|
||||
] = Field(description="Custom agent that will be used as manager.", default=None)
|
||||
function_calling_llm: Annotated[
|
||||
str | LLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(description="Language model that will run the agent.", default=None)
|
||||
config: Json[dict[str, Any]] | dict[str, Any] | None = Field(default=None)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
share_crew: bool | None = Field(default=False)
|
||||
@@ -266,7 +293,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default=False,
|
||||
description="Plan the crew execution and add the plan to the crew.",
|
||||
)
|
||||
planning_llm: str | BaseLLM | Any | None = Field(
|
||||
planning_llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"Language model that will run the AgentPlanner if planning is True."
|
||||
@@ -287,7 +318,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"knowledge object."
|
||||
),
|
||||
)
|
||||
chat_llm: str | BaseLLM | Any | None = Field(
|
||||
chat_llm: Annotated[
|
||||
str | BaseLLM | None,
|
||||
BeforeValidator(_validate_llm_ref),
|
||||
PlainSerializer(_serialize_llm_ref, return_type=str | None, when_used="json"),
|
||||
] = Field(
|
||||
default=None,
|
||||
description="LLM used to handle chatting with the crew.",
|
||||
)
|
||||
@@ -313,14 +348,20 @@ class Crew(FlowTrackable, BaseModel):
|
||||
description="Whether to enable tracing for the crew. True=always enable, False=always disable, None=check environment/user settings.",
|
||||
)
|
||||
|
||||
execution_context: ExecutionContext | None = Field(default=None)
|
||||
checkpoint_inputs: dict[str, Any] | None = Field(default=None)
|
||||
checkpoint_train: bool | None = Field(default=None)
|
||||
checkpoint_kickoff_event_id: str | None = Field(default=None)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None, info: Any) -> UUID4 | None:
|
||||
"""Prevent manual setting of the 'id' field by users."""
|
||||
if v:
|
||||
if v and not (info.context or {}).get("from_checkpoint"):
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "The 'id' field cannot be set by the user.", {}
|
||||
)
|
||||
return v
|
||||
|
||||
@field_validator("config", mode="before")
|
||||
@classmethod
|
||||
@@ -1311,7 +1352,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
and hasattr(agent, "multimodal")
|
||||
and getattr(agent, "multimodal", False)
|
||||
):
|
||||
if not (agent.llm and agent.llm.supports_multimodal()):
|
||||
if not (isinstance(agent.llm, BaseLLM) and agent.llm.supports_multimodal()):
|
||||
tools = self._add_multimodal_tools(agent, tools)
|
||||
|
||||
if agent and (hasattr(agent, "apps") and getattr(agent, "apps", None)):
|
||||
@@ -1328,7 +1369,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
files = get_all_files(self.id, task.id)
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if agent and agent.llm and agent.llm.supports_multimodal():
|
||||
if (
|
||||
agent
|
||||
and isinstance(agent.llm, BaseLLM)
|
||||
and agent.llm.supports_multimodal()
|
||||
):
|
||||
provider = (
|
||||
getattr(agent.llm, "provider", None)
|
||||
or getattr(agent.llm, "model", None)
|
||||
@@ -1384,7 +1429,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self,
|
||||
tools: list[BaseTool],
|
||||
task_agent: BaseAgent,
|
||||
agents: list[BaseAgent],
|
||||
agents: Sequence[BaseAgent],
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(task_agent, "get_delegation_tools"):
|
||||
delegation_tools = task_agent.get_delegation_tools(agents)
|
||||
@@ -1781,17 +1826,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
token_sum = self.manager_agent._token_process.get_summary()
|
||||
total_usage_metrics.add_usage_metrics(token_sum)
|
||||
|
||||
if (
|
||||
self.manager_agent
|
||||
and hasattr(self.manager_agent, "llm")
|
||||
and hasattr(self.manager_agent.llm, "get_token_usage_summary")
|
||||
):
|
||||
if self.manager_agent:
|
||||
if isinstance(self.manager_agent.llm, BaseLLM):
|
||||
llm_usage = self.manager_agent.llm.get_token_usage_summary()
|
||||
else:
|
||||
llm_usage = self.manager_agent.llm._token_process.get_summary()
|
||||
|
||||
total_usage_metrics.add_usage_metrics(llm_usage)
|
||||
total_usage_metrics.add_usage_metrics(llm_usage)
|
||||
|
||||
self.usage_metrics = total_usage_metrics
|
||||
return total_usage_metrics
|
||||
|
||||
@@ -21,7 +21,7 @@ class CrewOutput(BaseModel):
|
||||
description="JSON dict output of Crew", default=None
|
||||
)
|
||||
tasks_output: list[TaskOutput] = Field(
|
||||
description="Output of each task", default=[]
|
||||
description="Output of each task", default_factory=list
|
||||
)
|
||||
token_usage: UsageMetrics = Field(
|
||||
description="Processed token summary", default_factory=UsageMetrics
|
||||
|
||||
@@ -11,6 +11,7 @@ from opentelemetry import baggage
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.crews.crew_output import CrewOutput
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
from crewai.skills.loader import activate_skill, discover_skills
|
||||
from crewai.skills.models import INSTRUCTIONS, Skill as SkillModel
|
||||
@@ -50,7 +51,7 @@ def enable_agent_streaming(agents: Iterable[BaseAgent]) -> None:
|
||||
agents: Iterable of agents to enable streaming on.
|
||||
"""
|
||||
for agent in agents:
|
||||
if agent.llm is not None:
|
||||
if isinstance(agent.llm, BaseLLM):
|
||||
agent.llm.stream = True
|
||||
|
||||
|
||||
|
||||
@@ -25,13 +25,25 @@ def _get_or_create_counter() -> Iterator[int]:
|
||||
return counter
|
||||
|
||||
|
||||
_last_emitted: contextvars.ContextVar[int] = contextvars.ContextVar(
|
||||
"_last_emitted", default=0
|
||||
)
|
||||
|
||||
|
||||
def get_next_emission_sequence() -> int:
|
||||
"""Get the next emission sequence number.
|
||||
|
||||
Returns:
|
||||
The next sequence number.
|
||||
"""
|
||||
return next(_get_or_create_counter())
|
||||
seq = next(_get_or_create_counter())
|
||||
_last_emitted.set(seq)
|
||||
return seq
|
||||
|
||||
|
||||
def get_emission_sequence() -> int:
|
||||
"""Get the current emission sequence value without incrementing."""
|
||||
return _last_emitted.get()
|
||||
|
||||
|
||||
def reset_emission_counter() -> None:
|
||||
@@ -41,6 +53,14 @@ def reset_emission_counter() -> None:
|
||||
"""
|
||||
counter: Iterator[int] = itertools.count(start=1)
|
||||
_emission_counter.set(counter)
|
||||
_last_emitted.set(0)
|
||||
|
||||
|
||||
def set_emission_counter(start: int) -> None:
|
||||
"""Set the emission counter to resume from a given value."""
|
||||
counter: Iterator[int] = itertools.count(start=start + 1)
|
||||
_emission_counter.set(counter)
|
||||
_last_emitted.set(start)
|
||||
|
||||
|
||||
class BaseEvent(BaseModel):
|
||||
|
||||
@@ -78,9 +78,15 @@ from crewai.events.types.mcp_events import (
|
||||
MCPConnectionCompletedEvent,
|
||||
MCPConnectionFailedEvent,
|
||||
MCPConnectionStartedEvent,
|
||||
MCPToolExecutionCompletedEvent,
|
||||
MCPToolExecutionFailedEvent,
|
||||
MCPToolExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
)
|
||||
from crewai.events.types.observation_events import (
|
||||
GoalAchievedEarlyEvent,
|
||||
PlanRefinementEvent,
|
||||
@@ -94,6 +100,12 @@ from crewai.events.types.reasoning_events import (
|
||||
AgentReasoningFailedEvent,
|
||||
AgentReasoningStartedEvent,
|
||||
)
|
||||
from crewai.events.types.skill_events import (
|
||||
SkillActivatedEvent,
|
||||
SkillDiscoveryCompletedEvent,
|
||||
SkillLoadFailedEvent,
|
||||
SkillLoadedEvent,
|
||||
)
|
||||
from crewai.events.types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
@@ -478,6 +490,7 @@ class EventListener(BaseEventListener):
|
||||
self.formatter.handle_guardrail_completed(
|
||||
event.success, event.error, event.retry_count
|
||||
)
|
||||
self._telemetry.feature_usage_span("guardrail:execution")
|
||||
|
||||
@crewai_event_bus.on(CrewTestStartedEvent)
|
||||
def on_crew_test_started(source: Any, event: CrewTestStartedEvent) -> None:
|
||||
@@ -559,6 +572,7 @@ class EventListener(BaseEventListener):
|
||||
event.plan,
|
||||
event.ready,
|
||||
)
|
||||
self._telemetry.feature_usage_span("planning:creation")
|
||||
|
||||
@crewai_event_bus.on(AgentReasoningFailedEvent)
|
||||
def on_agent_reasoning_failed(_: Any, event: AgentReasoningFailedEvent) -> None:
|
||||
@@ -616,6 +630,7 @@ class EventListener(BaseEventListener):
|
||||
event.replan_count,
|
||||
event.completed_steps_preserved,
|
||||
)
|
||||
self._telemetry.feature_usage_span("planning:replan")
|
||||
|
||||
@crewai_event_bus.on(GoalAchievedEarlyEvent)
|
||||
def on_goal_achieved_early(_: Any, event: GoalAchievedEarlyEvent) -> None:
|
||||
@@ -623,6 +638,25 @@ class EventListener(BaseEventListener):
|
||||
event.steps_completed,
|
||||
event.steps_remaining,
|
||||
)
|
||||
self._telemetry.feature_usage_span("planning:goal_achieved_early")
|
||||
|
||||
# ----------- SKILL EVENTS -----------
|
||||
|
||||
@crewai_event_bus.on(SkillDiscoveryCompletedEvent)
|
||||
def on_skill_discovery(_: Any, event: SkillDiscoveryCompletedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("skill:discovery")
|
||||
|
||||
@crewai_event_bus.on(SkillLoadedEvent)
|
||||
def on_skill_loaded(_: Any, event: SkillLoadedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("skill:loaded")
|
||||
|
||||
@crewai_event_bus.on(SkillLoadFailedEvent)
|
||||
def on_skill_load_failed(_: Any, event: SkillLoadFailedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("skill:load_failed")
|
||||
|
||||
@crewai_event_bus.on(SkillActivatedEvent)
|
||||
def on_skill_activated(_: Any, event: SkillActivatedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("skill:activated")
|
||||
|
||||
# ----------- AGENT LOGGING EVENTS -----------
|
||||
|
||||
@@ -662,6 +696,7 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.is_multiturn,
|
||||
)
|
||||
self._telemetry.feature_usage_span("a2a:delegation")
|
||||
|
||||
@crewai_event_bus.on(A2AConversationStartedEvent)
|
||||
def on_a2a_conversation_started(
|
||||
@@ -703,6 +738,7 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.total_turns,
|
||||
)
|
||||
self._telemetry.feature_usage_span("a2a:conversation")
|
||||
|
||||
@crewai_event_bus.on(A2APollingStartedEvent)
|
||||
def on_a2a_polling_started(_: Any, event: A2APollingStartedEvent) -> None:
|
||||
@@ -744,6 +780,7 @@ class EventListener(BaseEventListener):
|
||||
event.connection_duration_ms,
|
||||
event.is_reconnect,
|
||||
)
|
||||
self._telemetry.feature_usage_span("mcp:connection")
|
||||
|
||||
@crewai_event_bus.on(MCPConnectionFailedEvent)
|
||||
def on_mcp_connection_failed(_: Any, event: MCPConnectionFailedEvent) -> None:
|
||||
@@ -754,6 +791,7 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.error_type,
|
||||
)
|
||||
self._telemetry.feature_usage_span("mcp:connection_failed")
|
||||
|
||||
@crewai_event_bus.on(MCPConfigFetchFailedEvent)
|
||||
def on_mcp_config_fetch_failed(
|
||||
@@ -764,6 +802,7 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.error_type,
|
||||
)
|
||||
self._telemetry.feature_usage_span("mcp:config_fetch_failed")
|
||||
|
||||
@crewai_event_bus.on(MCPToolExecutionStartedEvent)
|
||||
def on_mcp_tool_execution_started(
|
||||
@@ -775,6 +814,12 @@ class EventListener(BaseEventListener):
|
||||
event.tool_args,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MCPToolExecutionCompletedEvent)
|
||||
def on_mcp_tool_execution_completed(
|
||||
_: Any, event: MCPToolExecutionCompletedEvent
|
||||
) -> None:
|
||||
self._telemetry.feature_usage_span("mcp:tool_execution")
|
||||
|
||||
@crewai_event_bus.on(MCPToolExecutionFailedEvent)
|
||||
def on_mcp_tool_execution_failed(
|
||||
_: Any, event: MCPToolExecutionFailedEvent
|
||||
@@ -786,6 +831,45 @@ class EventListener(BaseEventListener):
|
||||
event.error,
|
||||
event.error_type,
|
||||
)
|
||||
self._telemetry.feature_usage_span("mcp:tool_execution_failed")
|
||||
|
||||
# ----------- MEMORY TELEMETRY -----------
|
||||
|
||||
@crewai_event_bus.on(MemorySaveCompletedEvent)
|
||||
def on_memory_save_completed(_: Any, event: MemorySaveCompletedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("memory:save")
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
def on_memory_query_completed(_: Any, event: MemoryQueryCompletedEvent) -> None:
|
||||
self._telemetry.feature_usage_span("memory:query")
|
||||
|
||||
@crewai_event_bus.on(MemoryRetrievalCompletedEvent)
|
||||
def on_memory_retrieval_completed_telemetry(
|
||||
_: Any, event: MemoryRetrievalCompletedEvent
|
||||
) -> None:
|
||||
self._telemetry.feature_usage_span("memory:retrieval")
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_hooks(_: Any, event: CrewKickoffStartedEvent) -> None:
|
||||
from crewai.hooks.llm_hooks import (
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
)
|
||||
from crewai.hooks.tool_hooks import (
|
||||
get_after_tool_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
|
||||
has_hooks = any(
|
||||
[
|
||||
get_before_llm_call_hooks(),
|
||||
get_after_llm_call_hooks(),
|
||||
get_before_tool_call_hooks(),
|
||||
get_after_tool_call_hooks(),
|
||||
]
|
||||
)
|
||||
if has_hooks:
|
||||
self._telemetry.feature_usage_span("hooks:registered")
|
||||
|
||||
|
||||
event_listener = EventListener()
|
||||
|
||||
@@ -1907,6 +1907,37 @@ class AgentExecutor(Flow[AgentExecutorState], CrewAgentExecutorMixin):
|
||||
"original_tool": original_tool,
|
||||
}
|
||||
|
||||
def _extract_tool_name(self, tool_call: Any) -> str:
|
||||
"""Extract tool name from various tool call formats."""
|
||||
if hasattr(tool_call, "function"):
|
||||
return sanitize_tool_name(tool_call.function.name)
|
||||
if hasattr(tool_call, "function_call") and tool_call.function_call:
|
||||
return sanitize_tool_name(tool_call.function_call.name)
|
||||
if hasattr(tool_call, "name"):
|
||||
return sanitize_tool_name(tool_call.name)
|
||||
if isinstance(tool_call, dict):
|
||||
func_info = tool_call.get("function", {})
|
||||
return sanitize_tool_name(
|
||||
func_info.get("name", "") or tool_call.get("name", "unknown")
|
||||
)
|
||||
return "unknown"
|
||||
|
||||
@router(execute_native_tool)
|
||||
def check_native_todo_completion(
|
||||
self,
|
||||
) -> Literal["todo_satisfied", "todo_not_satisfied"]:
|
||||
"""Check if the native tool execution satisfied the active todo.
|
||||
|
||||
Similar to check_todo_completion but for native tool execution path.
|
||||
"""
|
||||
current_todo = self.state.todos.current_todo
|
||||
|
||||
if not current_todo:
|
||||
return "todo_not_satisfied"
|
||||
|
||||
# For native tools, any tool execution satisfies the todo
|
||||
return "todo_satisfied"
|
||||
|
||||
@listen("initialized")
|
||||
def continue_iteration(self) -> Literal["check_iteration"]:
|
||||
"""Bridge listener that connects iteration loop back to iteration check."""
|
||||
|
||||
@@ -25,6 +25,7 @@ import logging
|
||||
import threading
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Annotated,
|
||||
Any,
|
||||
ClassVar,
|
||||
Generic,
|
||||
@@ -41,9 +42,11 @@ from opentelemetry import baggage
|
||||
from opentelemetry.context import attach, detach
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
ConfigDict,
|
||||
Field,
|
||||
PrivateAttr,
|
||||
SerializeAsAny,
|
||||
ValidationError,
|
||||
)
|
||||
from pydantic._internal._model_construction import ModelMetaclass
|
||||
@@ -115,6 +118,7 @@ from crewai.memory.unified_memory import Memory
|
||||
if TYPE_CHECKING:
|
||||
from crewai_files import FileInput
|
||||
|
||||
from crewai.context import ExecutionContext
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
|
||||
@@ -134,6 +138,19 @@ from crewai.utilities.streaming import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _resolve_persistence(value: Any) -> Any:
|
||||
if value is None or isinstance(value, FlowPersistence):
|
||||
return value
|
||||
if isinstance(value, dict):
|
||||
from crewai.flow.persistence.base import _persistence_registry
|
||||
|
||||
type_name = value.get("persistence_type", "SQLiteFlowPersistence")
|
||||
cls = _persistence_registry.get(type_name)
|
||||
if cls is not None:
|
||||
return cls.model_validate(value)
|
||||
return value
|
||||
|
||||
|
||||
class FlowState(BaseModel):
|
||||
"""Base model for all flow states, ensuring each state has a unique ID."""
|
||||
|
||||
@@ -883,6 +900,8 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta):
|
||||
_routers: ClassVar[set[FlowMethodName]] = set()
|
||||
_router_paths: ClassVar[dict[FlowMethodName, list[FlowMethodName]]] = {}
|
||||
|
||||
entity_type: Literal["flow"] = "flow"
|
||||
|
||||
initial_state: Any = Field(default=None)
|
||||
name: str | None = Field(default=None)
|
||||
tracing: bool | None = Field(default=None)
|
||||
@@ -893,8 +912,17 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta):
|
||||
human_feedback_history: list[HumanFeedbackResult] = Field(default_factory=list)
|
||||
last_human_feedback: HumanFeedbackResult | None = Field(default=None)
|
||||
|
||||
persistence: Any = Field(default=None, exclude=True)
|
||||
max_method_calls: int = Field(default=100, exclude=True)
|
||||
persistence: Annotated[
|
||||
SerializeAsAny[FlowPersistence] | Any,
|
||||
BeforeValidator(lambda v, _: _resolve_persistence(v)),
|
||||
] = Field(default=None)
|
||||
max_method_calls: int = Field(default=100)
|
||||
|
||||
execution_context: ExecutionContext | None = Field(default=None)
|
||||
checkpoint_completed_methods: set[str] | None = Field(default=None)
|
||||
checkpoint_method_outputs: list[Any] | None = Field(default=None)
|
||||
checkpoint_method_counts: dict[str, int] | None = Field(default=None)
|
||||
checkpoint_state: dict[str, Any] | None = Field(default=None)
|
||||
|
||||
_methods: dict[FlowMethodName, FlowMethod[Any, Any]] = PrivateAttr(
|
||||
default_factory=dict
|
||||
|
||||
@@ -5,14 +5,17 @@ from __future__ import annotations
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
|
||||
|
||||
class FlowPersistence(ABC):
|
||||
_persistence_registry: dict[str, type[FlowPersistence]] = {}
|
||||
|
||||
|
||||
class FlowPersistence(BaseModel, ABC):
|
||||
"""Abstract base class for flow state persistence.
|
||||
|
||||
This class defines the interface that all persistence implementations must follow.
|
||||
@@ -24,6 +27,13 @@ class FlowPersistence(ABC):
|
||||
- clear_pending_feedback(): Clears pending feedback after resume
|
||||
"""
|
||||
|
||||
persistence_type: str = Field(default="base")
|
||||
|
||||
def __init_subclass__(cls, **kwargs: Any) -> None:
|
||||
super().__init_subclass__(**kwargs)
|
||||
if not getattr(cls, "__abstractmethods__", set()):
|
||||
_persistence_registry[cls.__name__] = cls
|
||||
|
||||
@abstractmethod
|
||||
def init_db(self) -> None:
|
||||
"""Initialize the persistence backend.
|
||||
@@ -95,7 +105,7 @@ class FlowPersistence(ABC):
|
||||
"""
|
||||
return None
|
||||
|
||||
def clear_pending_feedback(self, flow_uuid: str) -> None: # noqa: B027
|
||||
def clear_pending_feedback(self, flow_uuid: str) -> None:
|
||||
"""Clear the pending feedback marker after successful resume.
|
||||
|
||||
This is called after feedback is received and the flow resumes.
|
||||
|
||||
@@ -9,7 +9,8 @@ from pathlib import Path
|
||||
import sqlite3
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.utilities.lock_store import lock as store_lock
|
||||
@@ -50,26 +51,22 @@ class SQLiteFlowPersistence(FlowPersistence):
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: str | None = None) -> None:
|
||||
"""Initialize SQLite persistence.
|
||||
persistence_type: str = Field(default="SQLiteFlowPersistence")
|
||||
db_path: str = Field(
|
||||
default_factory=lambda: str(Path(db_storage_path()) / "flow_states.db")
|
||||
)
|
||||
_lock_name: str = PrivateAttr()
|
||||
|
||||
Args:
|
||||
db_path: Path to the SQLite database file. If not provided, uses
|
||||
db_storage_path() from utilities.paths.
|
||||
def __init__(self, db_path: str | None = None, /, **kwargs: Any) -> None:
|
||||
if db_path is not None:
|
||||
kwargs["db_path"] = db_path
|
||||
super().__init__(**kwargs)
|
||||
|
||||
Raises:
|
||||
ValueError: If db_path is invalid
|
||||
"""
|
||||
|
||||
# Get path from argument or default location
|
||||
path = db_path or str(Path(db_storage_path()) / "flow_states.db")
|
||||
|
||||
if not path:
|
||||
raise ValueError("Database path must be provided")
|
||||
|
||||
self.db_path = path # Now mypy knows this is str
|
||||
@model_validator(mode="after")
|
||||
def _setup(self) -> Self:
|
||||
self._lock_name = f"sqlite:{os.path.realpath(self.db_path)}"
|
||||
self.init_db()
|
||||
return self
|
||||
|
||||
def init_db(self) -> None:
|
||||
"""Create the necessary tables if they don't exist."""
|
||||
|
||||
@@ -40,7 +40,9 @@ class LiteAgentOutput(BaseModel):
|
||||
usage_metrics: dict[str, Any] | None = Field(
|
||||
description="Token usage metrics for this execution", default=None
|
||||
)
|
||||
messages: list[LLMMessage] = Field(description="Messages of the agent", default=[])
|
||||
messages: list[LLMMessage] = Field(
|
||||
description="Messages of the agent", default_factory=list
|
||||
)
|
||||
|
||||
plan: str | None = Field(
|
||||
default=None, description="The execution plan that was generated, if any"
|
||||
|
||||
@@ -32,6 +32,10 @@ class MemoryScope(BaseModel):
|
||||
"""Extract memory dependency and normalize root path before validation."""
|
||||
if isinstance(data, MemoryScope):
|
||||
return data
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"Expected dict or MemoryScope, got {type(data).__name__}")
|
||||
if "memory" not in data:
|
||||
raise ValueError("MemoryScope requires a 'memory' key")
|
||||
memory = data.pop("memory")
|
||||
instance: MemoryScope = handler(data)
|
||||
instance._memory = memory
|
||||
@@ -199,6 +203,10 @@ class MemorySlice(BaseModel):
|
||||
"""Extract memory dependency and normalize scopes before validation."""
|
||||
if isinstance(data, MemorySlice):
|
||||
return data
|
||||
if not isinstance(data, dict):
|
||||
raise ValueError(f"Expected dict or MemorySlice, got {type(data).__name__}")
|
||||
if "memory" not in data:
|
||||
raise ValueError("MemorySlice requires a 'memory' key")
|
||||
memory = data.pop("memory")
|
||||
data["scopes"] = [s.rstrip("/") or "/" for s in data.get("scopes", [])]
|
||||
instance: MemorySlice = handler(data)
|
||||
|
||||
18
lib/crewai/src/crewai/runtime_state.py
Normal file
18
lib/crewai/src/crewai/runtime_state.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""Unified runtime state for crewAI.
|
||||
|
||||
``RuntimeState`` is a ``RootModel`` whose ``model_dump_json()`` produces a
|
||||
complete, self-contained snapshot of every active entity in the program.
|
||||
|
||||
The ``Entity`` type alias and ``RuntimeState`` model are built at import time
|
||||
in ``crewai/__init__.py`` after all forward references are resolved.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _entity_discriminator(v: dict[str, Any] | object) -> str:
|
||||
if isinstance(v, dict):
|
||||
raw = v.get("entity_type", "agent")
|
||||
else:
|
||||
raw = getattr(v, "entity_type", "agent")
|
||||
return str(raw)
|
||||
@@ -1,6 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Sequence
|
||||
from concurrent.futures import Future
|
||||
import contextvars
|
||||
from copy import copy as shallow_copy
|
||||
@@ -12,6 +13,7 @@ import logging
|
||||
from pathlib import Path
|
||||
import threading
|
||||
from typing import (
|
||||
Annotated,
|
||||
Any,
|
||||
ClassVar,
|
||||
cast,
|
||||
@@ -24,6 +26,7 @@ import warnings
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
BeforeValidator,
|
||||
Field,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
@@ -32,7 +35,7 @@ from pydantic import (
|
||||
from pydantic_core import PydanticCustomError
|
||||
from typing_extensions import Self
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent, _resolve_agent
|
||||
from crewai.context import reset_current_task_id, set_current_task_id
|
||||
from crewai.core.providers.content_processor import process_content
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
@@ -41,6 +44,7 @@ from crewai.events.types.task_events import (
|
||||
TaskFailedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.security import Fingerprint, SecurityConfig
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -128,9 +132,10 @@ class Task(BaseModel):
|
||||
callback: SerializableCallable | None = Field(
|
||||
description="Callback to be executed after the task is completed.", default=None
|
||||
)
|
||||
agent: BaseAgent | None = Field(
|
||||
description="Agent responsible for execution the task.", default=None
|
||||
)
|
||||
agent: Annotated[
|
||||
BaseAgent | None,
|
||||
BeforeValidator(_resolve_agent),
|
||||
] = Field(description="Agent responsible for execution the task.", default=None)
|
||||
context: list[Task] | None | _NotSpecified = Field(
|
||||
description="Other tasks that will have their output used as context for this task.",
|
||||
default=NOT_SPECIFIED,
|
||||
@@ -316,6 +321,10 @@ class Task(BaseModel):
|
||||
if self.agent is None:
|
||||
raise ValueError("Agent is required to use LLMGuardrail")
|
||||
|
||||
if not isinstance(self.agent.llm, BaseLLM):
|
||||
raise ValueError(
|
||||
"Agent must have a BaseLLM instance to use LLMGuardrail"
|
||||
)
|
||||
self._guardrail = cast(
|
||||
GuardrailCallable,
|
||||
LLMGuardrail(description=self.guardrail, llm=self.agent.llm),
|
||||
@@ -339,6 +348,10 @@ class Task(BaseModel):
|
||||
)
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
if not isinstance(self.agent.llm, BaseLLM):
|
||||
raise ValueError(
|
||||
"Agent must have a BaseLLM instance to use LLMGuardrail"
|
||||
)
|
||||
guardrails.append(
|
||||
cast(
|
||||
GuardrailCallable,
|
||||
@@ -359,6 +372,10 @@ class Task(BaseModel):
|
||||
)
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
|
||||
if not isinstance(self.agent.llm, BaseLLM):
|
||||
raise ValueError(
|
||||
"Agent must have a BaseLLM instance to use LLMGuardrail"
|
||||
)
|
||||
guardrails.append(
|
||||
cast(
|
||||
GuardrailCallable,
|
||||
@@ -379,11 +396,12 @@ class Task(BaseModel):
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
if v:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None, info: Any) -> UUID4 | None:
|
||||
if v and not (info.context or {}).get("from_checkpoint"):
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
return v
|
||||
|
||||
@field_validator("input_files", mode="before")
|
||||
@classmethod
|
||||
@@ -646,7 +664,12 @@ class Task(BaseModel):
|
||||
await cb_result
|
||||
|
||||
crew = self.agent.crew # type: ignore[union-attr]
|
||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||
if (
|
||||
crew
|
||||
and not isinstance(crew, str)
|
||||
and crew.task_callback
|
||||
and crew.task_callback != self.callback
|
||||
):
|
||||
cb_result = crew.task_callback(self.output)
|
||||
if inspect.isawaitable(cb_result):
|
||||
await cb_result
|
||||
@@ -761,7 +784,12 @@ class Task(BaseModel):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
crew = self.agent.crew # type: ignore[union-attr]
|
||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||
if (
|
||||
crew
|
||||
and not isinstance(crew, str)
|
||||
and crew.task_callback
|
||||
and crew.task_callback != self.callback
|
||||
):
|
||||
cb_result = crew.task_callback(self.output)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
@@ -812,11 +840,14 @@ class Task(BaseModel):
|
||||
if trigger_payload is not None:
|
||||
description += f"\n\nTrigger Payload: {trigger_payload}"
|
||||
|
||||
if self.agent and self.agent.crew:
|
||||
if self.agent and self.agent.crew and not isinstance(self.agent.crew, str):
|
||||
files = get_all_files(self.agent.crew.id, self.id)
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
if self.agent.llm and self.agent.llm.supports_multimodal():
|
||||
if (
|
||||
isinstance(self.agent.llm, BaseLLM)
|
||||
and self.agent.llm.supports_multimodal()
|
||||
):
|
||||
provider: str = str(
|
||||
getattr(self.agent.llm, "provider", None)
|
||||
or getattr(self.agent.llm, "model", "openai")
|
||||
@@ -971,7 +1002,7 @@ Follow these guidelines:
|
||||
self.delegations += 1
|
||||
|
||||
def copy( # type: ignore
|
||||
self, agents: list[BaseAgent], task_mapping: dict[str, Task]
|
||||
self, agents: Sequence[BaseAgent], task_mapping: dict[str, Task]
|
||||
) -> Task:
|
||||
"""Creates a deep copy of the Task while preserving its original class type.
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ from pydantic import Field
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.types.callback import SerializableCallable
|
||||
|
||||
|
||||
class ConditionalTask(Task):
|
||||
@@ -24,7 +25,7 @@ class ConditionalTask(Task):
|
||||
- Cannot be the first task since it needs context from the previous task
|
||||
"""
|
||||
|
||||
condition: Callable[[TaskOutput], bool] | None = Field(
|
||||
condition: SerializableCallable | None = Field(
|
||||
default=None,
|
||||
description="Function that determines whether the task should be executed based on previous task output.",
|
||||
)
|
||||
@@ -51,7 +52,7 @@ class ConditionalTask(Task):
|
||||
"""
|
||||
if self.condition is None:
|
||||
raise ValueError("No condition function set for conditional task")
|
||||
return self.condition(context)
|
||||
return bool(self.condition(context))
|
||||
|
||||
def get_skipped_task_output(self) -> TaskOutput:
|
||||
"""Generate a TaskOutput for when the conditional task is skipped.
|
||||
|
||||
@@ -43,7 +43,9 @@ class TaskOutput(BaseModel):
|
||||
output_format: OutputFormat = Field(
|
||||
description="Output format of the task", default=OutputFormat.RAW
|
||||
)
|
||||
messages: list[LLMMessage] = Field(description="Messages of the task", default=[])
|
||||
messages: list[LLMMessage] = Field(
|
||||
description="Messages of the task", default_factory=list
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_summary(self) -> TaskOutput:
|
||||
|
||||
@@ -41,6 +41,7 @@ from crewai.events.types.system_events import (
|
||||
SigTStpEvent,
|
||||
SigTermEvent,
|
||||
)
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.telemetry.constants import (
|
||||
CREWAI_TELEMETRY_BASE_URL,
|
||||
CREWAI_TELEMETRY_SERVICE_NAME,
|
||||
@@ -323,7 +324,9 @@ class Telemetry:
|
||||
if getattr(agent, "function_calling_llm", None)
|
||||
else ""
|
||||
),
|
||||
"llm": agent.llm.model,
|
||||
"llm": agent.llm.model
|
||||
if isinstance(agent.llm, BaseLLM)
|
||||
else str(agent.llm),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"allow_code_execution?": getattr(
|
||||
agent, "allow_code_execution", False
|
||||
@@ -427,7 +430,9 @@ class Telemetry:
|
||||
if getattr(agent, "function_calling_llm", None)
|
||||
else ""
|
||||
),
|
||||
"llm": agent.llm.model,
|
||||
"llm": agent.llm.model
|
||||
if isinstance(agent.llm, BaseLLM)
|
||||
else str(agent.llm),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"allow_code_execution?": getattr(
|
||||
agent, "allow_code_execution", False
|
||||
@@ -840,7 +845,9 @@ class Telemetry:
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
"i18n": agent.i18n.prompt_file,
|
||||
"llm": agent.llm.model,
|
||||
"llm": agent.llm.model
|
||||
if isinstance(agent.llm, BaseLLM)
|
||||
else str(agent.llm),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [
|
||||
sanitize_tool_name(tool.name)
|
||||
@@ -1033,3 +1040,20 @@ class Telemetry:
|
||||
close_span(span)
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
|
||||
def feature_usage_span(self, feature: str) -> None:
|
||||
"""Records that a feature was used. One span = one count.
|
||||
|
||||
Args:
|
||||
feature: Feature identifier, e.g. "planning:creation",
|
||||
"mcp:connection", "a2a:delegation".
|
||||
"""
|
||||
|
||||
def _operation() -> None:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Feature Usage")
|
||||
self._add_attribute(span, "crewai_version", version("crewai"))
|
||||
self._add_attribute(span, "feature", feature)
|
||||
close_span(span)
|
||||
|
||||
self._safe_telemetry_operation(_operation)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.tools.agent_tools.ask_question_tool import AskQuestionTool
|
||||
@@ -16,7 +17,7 @@ if TYPE_CHECKING:
|
||||
class AgentTools:
|
||||
"""Manager class for agent-related tools"""
|
||||
|
||||
def __init__(self, agents: list[BaseAgent], i18n: I18N | None = None) -> None:
|
||||
def __init__(self, agents: Sequence[BaseAgent], i18n: I18N | None = None) -> None:
|
||||
self.agents = agents
|
||||
self.i18n = i18n if i18n is not None else get_i18n()
|
||||
|
||||
|
||||
@@ -318,6 +318,8 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
fingerprint_config = self._build_fingerprint_config()
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
@@ -328,15 +330,16 @@ class ToolUsage:
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
result = await tool.ainvoke(
|
||||
input=arguments, config=fingerprint_config
|
||||
)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
result = await tool.ainvoke(
|
||||
input=arguments, config=fingerprint_config
|
||||
)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = await tool.ainvoke(input=arguments)
|
||||
result = await tool.ainvoke(input={}, config=fingerprint_config)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
@@ -550,6 +553,8 @@ class ToolUsage:
|
||||
if self.task:
|
||||
self.task.increment_delegations(coworker)
|
||||
|
||||
fingerprint_config = self._build_fingerprint_config()
|
||||
|
||||
if calling.arguments:
|
||||
try:
|
||||
acceptable_args = tool.args_schema.model_json_schema()[
|
||||
@@ -560,15 +565,16 @@ class ToolUsage:
|
||||
for k, v in calling.arguments.items()
|
||||
if k in acceptable_args
|
||||
}
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
result = tool.invoke(
|
||||
input=arguments, config=fingerprint_config
|
||||
)
|
||||
except Exception:
|
||||
arguments = calling.arguments
|
||||
arguments = self._add_fingerprint_metadata(arguments)
|
||||
result = tool.invoke(input=arguments)
|
||||
result = tool.invoke(
|
||||
input=arguments, config=fingerprint_config
|
||||
)
|
||||
else:
|
||||
arguments = self._add_fingerprint_metadata({})
|
||||
result = tool.invoke(input=arguments)
|
||||
result = tool.invoke(input={}, config=fingerprint_config)
|
||||
|
||||
if self.tools_handler:
|
||||
should_cache = True
|
||||
@@ -1008,23 +1014,16 @@ class ToolUsage:
|
||||
|
||||
return event_data
|
||||
|
||||
def _add_fingerprint_metadata(self, arguments: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Add fingerprint metadata to tool arguments if available.
|
||||
def _build_fingerprint_config(self) -> dict[str, Any]:
|
||||
"""Build fingerprint metadata as a config dict for tool invocation.
|
||||
|
||||
Args:
|
||||
arguments: The original tool arguments
|
||||
Returns the fingerprint data in a config dict rather than injecting it
|
||||
into tool arguments, so it doesn't conflict with strict tool schemas.
|
||||
|
||||
Returns:
|
||||
Updated arguments dictionary with fingerprint metadata
|
||||
Config dictionary with security_context metadata.
|
||||
"""
|
||||
# Create a shallow copy to avoid modifying the original
|
||||
arguments = arguments.copy()
|
||||
|
||||
# Add security metadata under a designated key
|
||||
if "security_context" not in arguments:
|
||||
arguments["security_context"] = {}
|
||||
|
||||
security_context = arguments["security_context"]
|
||||
security_context: dict[str, Any] = {}
|
||||
|
||||
# Add agent fingerprint if available
|
||||
if self.agent and hasattr(self.agent, "security_config"):
|
||||
@@ -1048,4 +1047,4 @@ class ToolUsage:
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return arguments
|
||||
return {"security_context": security_context} if security_context else {}
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from typing import Annotated, Final
|
||||
|
||||
from pydantic_core import CoreSchema
|
||||
|
||||
from crewai.utilities.printer import PrinterColor
|
||||
|
||||
|
||||
@@ -36,6 +38,25 @@ class _NotSpecified:
|
||||
def __repr__(self) -> str:
|
||||
return "NOT_SPECIFIED"
|
||||
|
||||
@classmethod
|
||||
def __get_pydantic_core_schema__(
|
||||
cls, _source_type: object, _handler: object
|
||||
) -> CoreSchema:
|
||||
from pydantic_core import core_schema
|
||||
|
||||
def _validate(v: object) -> _NotSpecified:
|
||||
if isinstance(v, _NotSpecified) or v == "NOT_SPECIFIED":
|
||||
return NOT_SPECIFIED
|
||||
raise ValueError(f"Expected NOT_SPECIFIED sentinel, got {type(v).__name__}")
|
||||
|
||||
return core_schema.no_info_plain_validator_function(
|
||||
_validate,
|
||||
serialization=core_schema.plain_serializer_function_ser_schema(
|
||||
lambda v: "NOT_SPECIFIED",
|
||||
info_arg=False,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
NOT_SPECIFIED: Final[
|
||||
Annotated[
|
||||
|
||||
@@ -927,6 +927,30 @@ class TestNativeToolExecution:
|
||||
assert len(tool_messages) == 1
|
||||
assert tool_messages[0]["tool_call_id"] == "call_1"
|
||||
|
||||
def test_check_native_todo_completion_requires_current_todo(
|
||||
self, mock_dependencies
|
||||
):
|
||||
from crewai.utilities.planning_types import TodoList
|
||||
|
||||
executor = _build_executor(**mock_dependencies)
|
||||
|
||||
# No current todo → not satisfied
|
||||
executor.state.todos = TodoList(items=[])
|
||||
assert executor.check_native_todo_completion() == "todo_not_satisfied"
|
||||
|
||||
# With a current todo that has tool_to_use → satisfied
|
||||
running = TodoItem(
|
||||
step_number=1,
|
||||
description="Use the expected tool",
|
||||
tool_to_use="expected_tool",
|
||||
status="running",
|
||||
)
|
||||
executor.state.todos = TodoList(items=[running])
|
||||
assert executor.check_native_todo_completion() == "todo_satisfied"
|
||||
|
||||
# With a current todo without tool_to_use → still satisfied
|
||||
running.tool_to_use = None
|
||||
assert executor.check_native_todo_completion() == "todo_satisfied"
|
||||
|
||||
|
||||
class TestPlannerObserver:
|
||||
|
||||
@@ -218,6 +218,7 @@ def test_publish_when_not_in_sync_and_force(
|
||||
["uv", "build", "--sdist", "--out-dir", unittest.mock.ANY],
|
||||
check=True,
|
||||
capture_output=False,
|
||||
env=unittest.mock.ANY,
|
||||
)
|
||||
mock_open.assert_called_with(unittest.mock.ANY, "rb")
|
||||
mock_publish.assert_called_with(
|
||||
@@ -279,6 +280,7 @@ def test_publish_success(
|
||||
["uv", "build", "--sdist", "--out-dir", unittest.mock.ANY],
|
||||
check=True,
|
||||
capture_output=False,
|
||||
env=unittest.mock.ANY,
|
||||
)
|
||||
mock_open.assert_called_with(unittest.mock.ANY, "rb")
|
||||
mock_publish.assert_called_with(
|
||||
|
||||
@@ -25,6 +25,9 @@ release = "crewai_devtools.cli:release"
|
||||
docs-check = "crewai_devtools.docs_check:docs_check"
|
||||
devtools = "crewai_devtools.cli:main"
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""CrewAI development tools."""
|
||||
|
||||
__version__ = "1.13.0a6"
|
||||
__version__ = "1.13.0"
|
||||
|
||||
@@ -160,6 +160,7 @@ info = "Commits must follow Conventional Commits 1.0.0."
|
||||
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
|
||||
# composio-core pins rich<14 but textual requires rich>=14.
|
||||
# onnxruntime 1.24+ dropped Python 3.10 wheels; cap it so qdrant[fastembed] resolves on 3.10.
|
||||
|
||||
Reference in New Issue
Block a user