Compare commits

...

14 Commits

Author SHA1 Message Date
Greyson LaLonde
15f5bff043 docs: update changelog and version for v1.14.1
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Check Documentation Broken Links / Check broken links (push) Has been cancelled
Vulnerability Scan / pip-audit (push) Has been cancelled
2026-04-09 01:56:51 +08:00
Greyson LaLonde
a0578bb6c3 feat: bump versions to 1.14.1 2026-04-09 01:45:40 +08:00
Greyson LaLonde
00400a9f31 ci: skip python tests, lint, and type checks on docs-only PRs 2026-04-09 01:34:47 +08:00
Lorenze Jay
5c08e566b5 dedicate skills page (#5331) 2026-04-08 10:10:18 -07:00
Greyson LaLonde
fe028ef400 docs: update changelog and version for v1.14.1rc1 2026-04-09 00:29:04 +08:00
Greyson LaLonde
52c227ab17 feat: bump versions to 1.14.1rc1 2026-04-09 00:22:24 +08:00
Greyson LaLonde
8bae740899 fix: use regex for template pyproject.toml version bumps
tomlkit.parse() fails on Jinja placeholders like {{folder_name}}
in CLI template files. Switch to regex replacement for templates.
2026-04-09 00:13:07 +08:00
Greyson LaLonde
1c784695c1 feat: add async checkpoint TUI browser
Launch a Textual TUI via `crewai checkpoint` to browse and resume
from checkpoints. Uses run_async/akickoff for fully async execution.
Adds provider auto-detection from file magic bytes.
2026-04-08 23:59:09 +08:00
iris-clawd
1ae237a287 refactor: replace hardcoded denylist with dynamic BaseTool field exclusion in spec gen (#5347)
The spec generator previously used a hardcoded list of field names to
exclude from init_params_schema. Any new field or computed_field added
to BaseTool (like tool_type from 86ce54f) would silently leak into
tool.specs.json unless someone remembered to update that list.

Now _extract_init_params() dynamically computes BaseTool's fields at
import time via model_fields + model_computed_fields, so any future
additions to BaseTool are automatically excluded.

Fields from intermediate base classes (RagTool, BraveSearchToolBase,
SerpApiBaseTool) are correctly preserved since they're not on BaseTool.

TDD:
- RED: 3 new tests confirming BaseTool field leak, intermediate base
  preservation, and future-proofing — all failed before the fix
- GREEN: Dynamic allowlist applied — all 10 tests pass
- Regenerated tool.specs.json (tool_type removed from all tools)
2026-04-08 11:49:16 -04:00
Greyson LaLonde
0e8ed75947 feat: add aclose()/close() and async context manager to streaming outputs 2026-04-08 23:32:37 +08:00
Greyson LaLonde
98e0d1054f fix: sanitize tool names in hook decorator filters 2026-04-08 21:02:25 +08:00
Greyson LaLonde
fc9280ccf6 refactor: replace regex with tomlkit in devtools CLI
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Vulnerability Scan / pip-audit (push) Has been cancelled
Build uv cache / build-cache (3.10) (push) Has been cancelled
Build uv cache / build-cache (3.11) (push) Has been cancelled
Build uv cache / build-cache (3.12) (push) Has been cancelled
Build uv cache / build-cache (3.13) (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2026-04-08 19:52:51 +08:00
Greyson LaLonde
f4c0667d34 fix: bump transformers to 5.5.0 to resolve CVE-2026-1839
Bumps docling pin from ~=2.75.0 to ~=2.84.0 (allows huggingface-hub>=1)
and adds a transformers>=5.4.0 override to force resolution past 4.57.6.
2026-04-08 18:59:51 +08:00
Greyson LaLonde
0450d06a65 refactor: use shared PRINTER singleton
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Vulnerability Scan / pip-audit (push) Has been cancelled
Nightly Canary Release / Check for new commits (push) Has been cancelled
Nightly Canary Release / Build nightly packages (push) Has been cancelled
Nightly Canary Release / Publish nightly to PyPI (push) Has been cancelled
2026-04-08 07:17:22 +08:00
73 changed files with 4231 additions and 1132 deletions

View File

@@ -6,7 +6,24 @@ permissions:
contents: read
jobs:
lint:
changes:
name: Detect changes
runs-on: ubuntu-latest
outputs:
code: ${{ steps.filter.outputs.code }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@v3
id: filter
with:
filters: |
code:
- '!docs/**'
- '!**/*.md'
lint-run:
needs: changes
if: needs.changes.outputs.code == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -48,3 +65,23 @@ jobs:
~/.local/share/uv
.venv
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
# Summary job to provide single status for branch protection
lint:
name: lint
runs-on: ubuntu-latest
needs: [changes, lint-run]
if: always()
steps:
- name: Check results
run: |
if [ "${{ needs.changes.outputs.code }}" != "true" ]; then
echo "Docs-only change, skipping lint"
exit 0
fi
if [ "${{ needs.lint-run.result }}" == "success" ]; then
echo "Lint passed"
else
echo "Lint failed"
exit 1
fi

View File

@@ -6,8 +6,25 @@ permissions:
contents: read
jobs:
tests:
changes:
name: Detect changes
runs-on: ubuntu-latest
outputs:
code: ${{ steps.filter.outputs.code }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@v3
id: filter
with:
filters: |
code:
- '!docs/**'
- '!**/*.md'
tests-matrix:
name: tests (${{ matrix.python-version }})
needs: changes
if: needs.changes.outputs.code == 'true'
runs-on: ubuntu-latest
timeout-minutes: 15
strategy:
@@ -98,3 +115,23 @@ jobs:
~/.local/share/uv
.venv
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
# Summary job to provide single status for branch protection
tests:
name: tests
runs-on: ubuntu-latest
needs: [changes, tests-matrix]
if: always()
steps:
- name: Check results
run: |
if [ "${{ needs.changes.outputs.code }}" != "true" ]; then
echo "Docs-only change, skipping tests"
exit 0
fi
if [ "${{ needs.tests-matrix.result }}" == "success" ]; then
echo "All tests passed"
else
echo "Tests failed"
exit 1
fi

View File

@@ -6,8 +6,25 @@ permissions:
contents: read
jobs:
changes:
name: Detect changes
runs-on: ubuntu-latest
outputs:
code: ${{ steps.filter.outputs.code }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@v3
id: filter
with:
filters: |
code:
- '!docs/**'
- '!**/*.md'
type-checker-matrix:
name: type-checker (${{ matrix.python-version }})
needs: changes
if: needs.changes.outputs.code == 'true'
runs-on: ubuntu-latest
strategy:
fail-fast: false
@@ -57,14 +74,18 @@ jobs:
type-checker:
name: type-checker
runs-on: ubuntu-latest
needs: type-checker-matrix
needs: [changes, type-checker-matrix]
if: always()
steps:
- name: Check matrix results
- name: Check results
run: |
if [ "${{ needs.type-checker-matrix.result }}" == "success" ] || [ "${{ needs.type-checker-matrix.result }}" == "skipped" ]; then
echo "✅ All type checks passed"
if [ "${{ needs.changes.outputs.code }}" != "true" ]; then
echo "Docs-only change, skipping type checks"
exit 0
fi
if [ "${{ needs.type-checker-matrix.result }}" == "success" ]; then
echo "All type checks passed"
else
echo "Type checks failed"
echo "Type checks failed"
exit 1
fi

View File

@@ -4,6 +4,73 @@ description: "تحديثات المنتج والتحسينات وإصلاحات
icon: "clock"
mode: "wide"
---
<Update label="9 أبريل 2026">
## v1.14.1
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.1)
## ما الذي تغير
### الميزات
- إضافة متصفح TUI لنقاط التفتيش غير المتزامنة
- إضافة دالة aclose()/close() ومدير سياق غير متزامن لمخرجات البث
### إصلاحات الأخطاء
- إصلاح التعبير النمطي لزيادة إصدار pyproject.toml
- تنظيف أسماء الأدوات في مرشحات زخرفة الخطاف
- إصلاح تسجيل معالجات نقاط التفتيش عند إنشاء CheckpointConfig
- رفع إصدار transformers إلى 5.5.0 لحل CVE-2026-1839
- إزالة غلاف FilteredStream لـ stdout/stderr
### الوثائق
- تحديث سجل التغييرات والإصدار لـ v1.14.1rc1
### إعادة الهيكلة
- استبدال القائمة المحظورة الثابتة باستبعاد حقل BaseTool الديناميكي في توليد المواصفات
- استبدال التعبير النمطي بـ tomlkit في واجهة سطر أوامر أدوات التطوير
- استخدام كائن PRINTER المشترك
- جعل BaseProvider نموذجاً أساسياً مع مميز نوع المزود
## المساهمون
@greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay
</Update>
<Update label="9 أبريل 2026">
## v1.14.1rc1
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.1rc1)
## ما الذي تغير
### الميزات
- إضافة متصفح TUI لنقطة التحقق غير المتزامنة
- إضافة aclose()/close() ومدير سياق غير متزامن لمخرجات البث
### إصلاحات الأخطاء
- إصلاح زيادة إصدارات pyproject.toml باستخدام التعبيرات العادية
- تنظيف أسماء الأدوات في مرشحات ديكور المكونات
- زيادة إصدار transformers إلى 5.5.0 لحل CVE-2026-1839
- تسجيل معالجات نقطة التحقق عند إنشاء CheckpointConfig
### إعادة الهيكلة
- استبدال القائمة المحظورة الثابتة باستبعاد حقل BaseTool الديناميكي في توليد المواصفات
- استبدال التعبيرات العادية بـ tomlkit في واجهة سطر الأوامر devtools
- استخدام كائن PRINTER المشترك
- جعل BaseProvider نموذجًا أساسيًا مع مميز نوع المزود
- إزالة غلاف stdout/stderr لـ FilteredStream
- إزالة flow/config.py غير المستخدمة
### الوثائق
- تحديث سجل التغييرات والإصدار لـ v1.14.0
## المساهمون
@greysonlalonde, @iris-clawd, @joaomdmoura
</Update>
<Update label="7 أبريل 2026">
## v1.14.0

View File

@@ -325,6 +325,34 @@ asyncio.run(interactive_research())
- **تجربة المستخدم**: تقليل زمن الاستجابة المتصور بعرض نتائج تدريجية
- **لوحات المعلومات الحية**: بناء واجهات مراقبة تعرض حالة تنفيذ الطاقم
## الإلغاء وتنظيف الموارد
يدعم `CrewStreamingOutput` الإلغاء السلس بحيث يتوقف العمل الجاري فوراً عند انقطاع اتصال المستهلك.
### مدير السياق غير المتزامن
```python Code
streaming = await crew.akickoff(inputs={"topic": "AI"})
async with streaming:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
```
### الإلغاء الصريح
```python Code
streaming = await crew.akickoff(inputs={"topic": "AI"})
try:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
finally:
await streaming.aclose() # غير متزامن
# streaming.close() # المكافئ المتزامن
```
بعد الإلغاء، يكون كل من `streaming.is_cancelled` و `streaming.is_completed` بقيمة `True`. كل من `aclose()` و `close()` متساويان القوة.
## ملاحظات مهمة
- يفعّل البث تلقائياً بث LLM لجميع الوكلاء في الطاقم

View File

@@ -420,6 +420,34 @@ except Exception as e:
print("Streaming completed but flow encountered an error")
```
## الإلغاء وتنظيف الموارد
يدعم `FlowStreamingOutput` الإلغاء السلس بحيث يتوقف العمل الجاري فوراً عند انقطاع اتصال المستهلك.
### مدير السياق غير المتزامن
```python Code
streaming = await flow.kickoff_async()
async with streaming:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
```
### الإلغاء الصريح
```python Code
streaming = await flow.kickoff_async()
try:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
finally:
await streaming.aclose() # غير متزامن
# streaming.close() # المكافئ المتزامن
```
بعد الإلغاء، يكون كل من `streaming.is_cancelled` و `streaming.is_completed` بقيمة `True`. كل من `aclose()` و `close()` متساويان القوة.
## ملاحظات مهمة
- يفعّل البث تلقائياً بث LLM لأي أطقم مستخدمة داخل التدفق

50
docs/ar/skills.mdx Normal file
View File

@@ -0,0 +1,50 @@
---
title: Skills
description: ثبّت crewaiinc/skills من السجل الرسمي على skills.sh—Flows وCrews ووكلاء مرتبطون بالوثائق لـ Claude Code وCursor وCodex وغيرها.
icon: wand-magic-sparkles
mode: "wide"
---
# Skills
**امنح وكيل البرمجة سياق CrewAI في أمر واحد.**
تُنشر **Skills** الخاصة بـ CrewAI على **[skills.sh/crewaiinc/skills](https://skills.sh/crewaiinc/skills)**—السجل الرسمي لـ `crewaiinc/skills`، بما في ذلك كل مهارة (مثل **design-agent** و**getting-started** و**design-task** و**ask-docs**) وإحصاءات التثبيت والتدقيقات. تعلّم وكلاء البرمجة—مثل Claude Code وCursor وCodex—هيكلة Flows وضبط Crews واستخدام الأدوات واتباع أنماط CrewAI. نفّذ الأمر أدناه (أو الصقه في الوكيل).
```shell Terminal
npx skills add crewaiinc/skills
```
يضيف ذلك حزمة المهارات إلى سير عمل الوكيل لتطبيق اتفاقيات CrewAI دون إعادة شرح الإطار في كل جلسة. المصدر والقضايا على [GitHub](https://github.com/crewAIInc/skills).
## ما يحصل عليه الوكيل
- **Flows** — تطبيقات ذات حالة وخطوات وkickoffs للـ crew على نمط CrewAI
- **Crews والوكلاء** — أنماط YAML أولاً، أدوار، مهام، وتفويض
- **الأدوات والتكاملات** — ربط الوكلاء بالبحث وواجهات API وأدوات CrewAI الشائعة
- **هيكل المشروع** — مواءمة مع قوالب CLI واتفاقيات المستودع
- **أنماط محدثة** — تتبع المهارات وثائق CrewAI والممارسات الموصى بها
## تعرّف أكثر على هذا الموقع
<CardGroup cols={2}>
<Card title="أدوات البرمجة و AGENTS.md" icon="terminal" href="/ar/guides/coding-tools/agents-md">
استخدام `AGENTS.md` وسير عمل وكلاء البرمجة مع CrewAI.
</Card>
<Card title="البداية السريعة" icon="rocket" href="/ar/quickstart">
ابنِ أول Flow وcrew من البداية للنهاية.
</Card>
<Card title="التثبيت" icon="download" href="/ar/installation">
ثبّت CrewAI CLI وحزمة Python.
</Card>
<Card title="سجل Skills (skills.sh)" icon="globe" href="https://skills.sh/crewaiinc/skills">
القائمة الرسمية لـ `crewaiinc/skills`—المهارات والتثبيتات والتدقيقات.
</Card>
<Card title="المصدر على GitHub" icon="code-branch" href="https://github.com/crewAIInc/skills">
مصدر الحزمة والتحديثات والقضايا.
</Card>
</CardGroup>
### فيديو: CrewAI مع مهارات وكلاء البرمجة
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{ width: "100%", height: "400px" }} />

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,73 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
icon: "clock"
mode: "wide"
---
<Update label="Apr 09, 2026">
## v1.14.1
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.1)
## What's Changed
### Features
- Add async checkpoint TUI browser
- Add aclose()/close() and async context manager to streaming outputs
### Bug Fixes
- Fix regex for template pyproject.toml version bumps
- Sanitize tool names in hook decorator filters
- Fix checkpoint handlers registration when CheckpointConfig is created
- Bump transformers to 5.5.0 to resolve CVE-2026-1839
- Remove FilteredStream stdout/stderr wrapper
### Documentation
- Update changelog and version for v1.14.1rc1
### Refactoring
- Replace hardcoded denylist with dynamic BaseTool field exclusion in spec gen
- Replace regex with tomlkit in devtools CLI
- Use shared PRINTER singleton
- Make BaseProvider a BaseModel with provider_type discriminator
## Contributors
@greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay
</Update>
<Update label="Apr 09, 2026">
## v1.14.1rc1
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.1rc1)
## What's Changed
### Features
- Add async checkpoint TUI browser
- Add aclose()/close() and async context manager to streaming outputs
### Bug Fixes
- Fix template pyproject.toml version bumps using regex
- Sanitize tool names in hook decorator filters
- Bump transformers to 5.5.0 to resolve CVE-2026-1839
- Register checkpoint handlers when CheckpointConfig is created
### Refactoring
- Replace hardcoded denylist with dynamic BaseTool field exclusion in spec gen
- Replace regex with tomlkit in devtools CLI
- Use shared PRINTER singleton
- Make BaseProvider a BaseModel with provider_type discriminator
- Remove FilteredStream stdout/stderr wrapper
- Remove unused flow/config.py
### Documentation
- Update changelog and version for v1.14.0
## Contributors
@greysonlalonde, @iris-clawd, @joaomdmoura
</Update>
<Update label="Apr 07, 2026">
## v1.14.0

View File

@@ -325,6 +325,34 @@ Streaming is particularly valuable for:
- **User Experience**: Reduce perceived latency by showing incremental results
- **Live Dashboards**: Build monitoring interfaces that display crew execution status
## Cancellation and Resource Cleanup
`CrewStreamingOutput` supports graceful cancellation so that in-flight work stops promptly when the consumer disconnects.
### Async Context Manager
```python Code
streaming = await crew.akickoff(inputs={"topic": "AI"})
async with streaming:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
```
### Explicit Cancellation
```python Code
streaming = await crew.akickoff(inputs={"topic": "AI"})
try:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
finally:
await streaming.aclose() # async
# streaming.close() # sync equivalent
```
After cancellation, `streaming.is_cancelled` and `streaming.is_completed` are both `True`. Both `aclose()` and `close()` are idempotent.
## Important Notes
- Streaming automatically enables LLM streaming for all agents in the crew

View File

@@ -420,6 +420,34 @@ except Exception as e:
print("Streaming completed but flow encountered an error")
```
## Cancellation and Resource Cleanup
`FlowStreamingOutput` supports graceful cancellation so that in-flight work stops promptly when the consumer disconnects.
### Async Context Manager
```python Code
streaming = await flow.kickoff_async()
async with streaming:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
```
### Explicit Cancellation
```python Code
streaming = await flow.kickoff_async()
try:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
finally:
await streaming.aclose() # async
# streaming.close() # sync equivalent
```
After cancellation, `streaming.is_cancelled` and `streaming.is_completed` are both `True`. Both `aclose()` and `close()` are idempotent.
## Important Notes
- Streaming automatically enables LLM streaming for any crews used within the flow

50
docs/en/skills.mdx Normal file
View File

@@ -0,0 +1,50 @@
---
title: Skills
description: Install crewaiinc/skills from the official registry at skills.sh—Flows, Crews, and docs-aware agents for Claude Code, Cursor, Codex, and more.
icon: wand-magic-sparkles
mode: "wide"
---
# Skills
**Give your AI coding agent CrewAI context in one command.**
CrewAI **Skills** are published on **[skills.sh/crewaiinc/skills](https://skills.sh/crewaiinc/skills)**—the official registry for `crewaiinc/skills`, including individual skills (for example **design-agent**, **getting-started**, **design-task**, and **ask-docs**), install stats, and audits. They teach coding agents—like Claude Code, Cursor, and Codex—how to scaffold Flows, configure Crews, use tools, and follow CrewAI patterns. Run the install below (or paste it into your agent).
```shell Terminal
npx skills add crewaiinc/skills
```
That pulls the official skill pack into your agent workflow so it can apply CrewAI conventions without you re-explaining the framework each session. Source code and issues live on [GitHub](https://github.com/crewAIInc/skills).
## What your agent gets
- **Flows** — structure stateful apps, steps, and crew kickoffs the CrewAI way
- **Crews & agents** — YAML-first patterns, roles, tasks, and delegation
- **Tools & integrations** — hook agents to search, APIs, and common CrewAI tools
- **Project layout** — align with CLI scaffolds and repo conventions
- **Up-to-date patterns** — skills track current CrewAI docs and recommended practices
## Learn more on this site
<CardGroup cols={2}>
<Card title="Coding tools & AGENTS.md" icon="terminal" href="/en/guides/coding-tools/agents-md">
How to use `AGENTS.md` and coding-agent workflows with CrewAI.
</Card>
<Card title="Quickstart" icon="rocket" href="/en/quickstart">
Build your first Flow and crew end-to-end.
</Card>
<Card title="Installation" icon="download" href="/en/installation">
Install the CrewAI CLI and Python package.
</Card>
<Card title="Skills registry (skills.sh)" icon="globe" href="https://skills.sh/crewaiinc/skills">
Official listing for `crewaiinc/skills`—skills, installs, and audits.
</Card>
<Card title="GitHub source" icon="code-branch" href="https://github.com/crewAIInc/skills">
Source, updates, and issues for the skill pack.
</Card>
</CardGroup>
### Video: CrewAI with coding agent skills
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{ width: "100%", height: "400px" }} />

View File

@@ -4,6 +4,73 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
icon: "clock"
mode: "wide"
---
<Update label="2026년 4월 9일">
## v1.14.1
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.1)
## 변경 사항
### 기능
- 비동기 체크포인트 TUI 브라우저 추가
- 스트리밍 출력에 aclose()/close() 및 비동기 컨텍스트 관리자 추가
### 버그 수정
- 템플릿 pyproject.toml 버전 증가를 위한 정규 표현식 수정
- 훅 데코레이터 필터에서 도구 이름 정리
- CheckpointConfig 생성 시 체크포인트 핸들러 등록 수정
- CVE-2026-1839 해결을 위해 transformers를 5.5.0으로 업데이트
- FilteredStream stdout/stderr 래퍼 제거
### 문서
- v1.14.1rc1에 대한 변경 로그 및 버전 업데이트
### 리팩토링
- 하드코딩된 거부 목록을 동적 BaseTool 필드 제외로 교체
- devtools CLI에서 정규 표현식을 tomlkit으로 교체
- 공유 PRINTER 싱글톤 사용
- BaseProvider를 provider_type 식별자가 있는 BaseModel로 변경
## 기여자
@greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay
</Update>
<Update label="2026년 4월 9일">
## v1.14.1rc1
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.1rc1)
## 변경 사항
### 기능
- 비동기 체크포인트 TUI 브라우저 추가
- 스트리밍 출력에 aclose()/close() 및 비동기 컨텍스트 관리자 추가
### 버그 수정
- 정규 표현식을 사용하여 템플릿 pyproject.toml 버전 증가 수정
- 후크 데코레이터 필터에서 도구 이름 정리
- CVE-2026-1839 해결을 위해 transformers를 5.5.0으로 업데이트
- CheckpointConfig가 생성될 때 체크포인트 핸들러 등록
### 리팩토링
- 하드코딩된 거부 목록을 동적 BaseTool 필드 제외로 교체
- devtools CLI에서 정규 표현식을 tomlkit으로 교체
- 공유 PRINTER 싱글톤 사용
- BaseProvider를 provider_type 구분자가 있는 BaseModel로 변경
- FilteredStream stdout/stderr 래퍼 제거
- 사용되지 않는 flow/config.py 제거
### 문서
- v1.14.0에 대한 변경 로그 및 버전 업데이트
## 기여자
@greysonlalonde, @iris-clawd, @joaomdmoura
</Update>
<Update label="2026년 4월 7일">
## v1.14.0

View File

@@ -325,6 +325,34 @@ asyncio.run(interactive_research())
- **사용자 경험**: 점진적인 결과를 표시하여 체감 지연 시간 감소
- **라이브 대시보드**: crew 실행 상태를 표시하는 모니터링 인터페이스 구축
## 취소 및 리소스 정리
`CrewStreamingOutput`은 소비자가 연결을 끊을 때 진행 중인 작업을 즉시 중단하는 정상적인 취소를 지원합니다.
### 비동기 컨텍스트 매니저
```python Code
streaming = await crew.akickoff(inputs={"topic": "AI"})
async with streaming:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
```
### 명시적 취소
```python Code
streaming = await crew.akickoff(inputs={"topic": "AI"})
try:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
finally:
await streaming.aclose() # 비동기
# streaming.close() # 동기 버전
```
취소 후 `streaming.is_cancelled`와 `streaming.is_completed`는 모두 `True`입니다. `aclose()`와 `close()` 모두 멱등성을 가집니다.
## 중요 사항
- 스트리밍은 crew의 모든 에이전트에 대해 자동으로 LLM 스트리밍을 활성화합니다

50
docs/ko/skills.mdx Normal file
View File

@@ -0,0 +1,50 @@
---
title: Skills
description: skills.sh의 공식 레지스트리에서 crewaiinc/skills를 설치하세요. Claude Code, Cursor, Codex 등을 위한 Flow, Crew, 문서 연동 스킬.
icon: wand-magic-sparkles
mode: "wide"
---
# Skills
**한 번의 명령으로 코딩 에이전트에 CrewAI 컨텍스트를 제공하세요.**
CrewAI **Skills**는 **[skills.sh/crewaiinc/skills](https://skills.sh/crewaiinc/skills)**에 게시됩니다. `crewaiinc/skills`의 공식 레지스트리로, 개별 스킬(예: **design-agent**, **getting-started**, **design-task**, **ask-docs**), 설치 수, 감사 정보를 확인할 수 있습니다. Claude Code, Cursor, Codex 같은 코딩 에이전트에게 Flow 구성, Crew 설정, 도구 사용, CrewAI 패턴을 가르칩니다. 아래를 실행하거나 에이전트에 붙여 넣으세요.
```shell Terminal
npx skills add crewaiinc/skills
```
에이전트 워크플로에 스킬 팩이 추가되어 세션마다 프레임워크를 다시 설명하지 않아도 CrewAI 관례를 적용할 수 있습니다. 소스와 이슈는 [GitHub](https://github.com/crewAIInc/skills)에서 관리합니다.
## 에이전트가 얻는 것
- **Flows** — CrewAI 방식의 상태ful 앱, 단계, crew kickoff
- **Crew & 에이전트** — YAML 우선 패턴, 역할, 작업, 위임
- **도구 & 통합** — 검색, API, 일반적인 CrewAI 도구 연결
- **프로젝트 구조** — CLI 스캐폴드 및 저장소 관례와 정렬
- **최신 패턴** — 스킬이 현재 CrewAI 문서 및 권장 사항을 반영
## 이 사이트에서 더 알아보기
<CardGroup cols={2}>
<Card title="코딩 도구 & AGENTS.md" icon="terminal" href="/ko/guides/coding-tools/agents-md">
CrewAI와 `AGENTS.md`, 코딩 에이전트 워크플로 사용법.
</Card>
<Card title="빠른 시작" icon="rocket" href="/ko/quickstart">
첫 Flow와 crew를 처음부터 끝까지 구축합니다.
</Card>
<Card title="설치" icon="download" href="/ko/installation">
CrewAI CLI와 Python 패키지를 설치합니다.
</Card>
<Card title="Skills 레지스트리 (skills.sh)" icon="globe" href="https://skills.sh/crewaiinc/skills">
`crewaiinc/skills` 공식 목록—스킬, 설치 수, 감사.
</Card>
<Card title="GitHub 소스" icon="code-branch" href="https://github.com/crewAIInc/skills">
스킬 팩 소스, 업데이트, 이슈.
</Card>
</CardGroup>
### 영상: 코딩 에이전트 스킬과 CrewAI
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{ width: "100%", height: "400px" }} />

View File

@@ -4,6 +4,73 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
icon: "clock"
mode: "wide"
---
<Update label="09 abr 2026">
## v1.14.1
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.1)
## O que Mudou
### Funcionalidades
- Adicionar navegador TUI de ponto de verificação assíncrono
- Adicionar aclose()/close() e gerenciador de contexto assíncrono para saídas de streaming
### Correções de Bugs
- Corrigir regex para aumentos de versão do template pyproject.toml
- Sanitizar nomes de ferramentas nos filtros do decorador de hook
- Corrigir registro de manipuladores de ponto de verificação quando CheckpointConfig é criado
- Atualizar transformers para 5.5.0 para resolver CVE-2026-1839
- Remover wrapper stdout/stderr de FilteredStream
### Documentação
- Atualizar changelog e versão para v1.14.1rc1
### Refatoração
- Substituir lista de negação codificada por exclusão dinâmica de campo BaseTool na geração de especificações
- Substituir regex por tomlkit na CLI do devtools
- Usar singleton PRINTER compartilhado
- Fazer BaseProvider um BaseModel com discriminador provider_type
## Contribuidores
@greysonlalonde, @iris-clawd, @joaomdmoura, @lorenzejay
</Update>
<Update label="09 abr 2026">
## v1.14.1rc1
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.1rc1)
## O que Mudou
### Recursos
- Adicionar navegador TUI de ponto de verificação assíncrono
- Adicionar aclose()/close() e gerenciador de contexto assíncrono para saídas de streaming
### Correções de Bugs
- Corrigir aumentos de versão do template pyproject.toml usando regex
- Sanitizar nomes de ferramentas nos filtros do decorador de hook
- Atualizar transformers para 5.5.0 para resolver CVE-2026-1839
- Registrar manipuladores de ponto de verificação quando CheckpointConfig é criado
### Refatoração
- Substituir lista de negação codificada por exclusão dinâmica de campo BaseTool na geração de especificações
- Substituir regex por tomlkit na CLI do devtools
- Usar singleton PRINTER compartilhado
- Tornar BaseProvider um BaseModel com discriminador de tipo de provedor
- Remover wrapper stdout/stderr de FilteredStream
- Remover flow/config.py não utilizado
### Documentação
- Atualizar changelog e versão para v1.14.0
## Contribuidores
@greysonlalonde, @iris-clawd, @joaomdmoura
</Update>
<Update label="07 abr 2026">
## v1.14.0

View File

@@ -325,6 +325,34 @@ O streaming é particularmente valioso para:
- **Experiência do Usuário**: Reduzir latência percebida mostrando resultados incrementais
- **Dashboards ao Vivo**: Construir interfaces de monitoramento que exibem status de execução da crew
## Cancelamento e Limpeza de Recursos
`CrewStreamingOutput` suporta cancelamento gracioso para que o trabalho em andamento pare imediatamente quando o consumidor desconecta.
### Gerenciador de Contexto Assíncrono
```python Code
streaming = await crew.akickoff(inputs={"topic": "AI"})
async with streaming:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
```
### Cancelamento Explícito
```python Code
streaming = await crew.akickoff(inputs={"topic": "AI"})
try:
async for chunk in streaming:
print(chunk.content, end="", flush=True)
finally:
await streaming.aclose() # assíncrono
# streaming.close() # equivalente síncrono
```
Após o cancelamento, `streaming.is_cancelled` e `streaming.is_completed` são ambos `True`. Tanto `aclose()` quanto `close()` são idempotentes.
## Notas Importantes
- O streaming ativa automaticamente o streaming do LLM para todos os agentes na crew

50
docs/pt-BR/skills.mdx Normal file
View File

@@ -0,0 +1,50 @@
---
title: Skills
description: Instale crewaiinc/skills pelo registro oficial em skills.sh—Flows, Crews e agentes alinhados à documentação para Claude Code, Cursor, Codex e outros.
icon: wand-magic-sparkles
mode: "wide"
---
# Skills
**Dê ao seu agente de código o contexto do CrewAI em um comando.**
As **Skills** do CrewAI são publicadas em **[skills.sh/crewaiinc/skills](https://skills.sh/crewaiinc/skills)**—o registro oficial de `crewaiinc/skills`, com cada skill (por exemplo **design-agent**, **getting-started**, **design-task** e **ask-docs**), estatísticas de instalação e auditorias. Ensinam agentes de código—como Claude Code, Cursor e Codex—a estruturar Flows, configurar Crews, usar ferramentas e seguir os padrões do CrewAI. Execute o comando abaixo (ou cole no seu agente).
```shell Terminal
npx skills add crewaiinc/skills
```
Isso adiciona o pacote de skills ao fluxo do seu agente para aplicar convenções do CrewAI sem precisar reexplicar o framework a cada sessão. Código-fonte e issues ficam no [GitHub](https://github.com/crewAIInc/skills).
## O que seu agente ganha
- **Flows** — apps com estado, passos e kickoffs de crew no estilo CrewAI
- **Crews e agentes** — padrões YAML-first, papéis, tarefas e delegação
- **Ferramentas e integrações** — conectar agentes a busca, APIs e ferramentas comuns
- **Layout de projeto** — alinhar com scaffolds da CLI e convenções do repositório
- **Padrões atualizados** — skills acompanham a documentação e as práticas recomendadas
## Saiba mais neste site
<CardGroup cols={2}>
<Card title="Ferramentas de codificação e AGENTS.md" icon="terminal" href="/pt-BR/guides/coding-tools/agents-md">
Como usar `AGENTS.md` e fluxos de agente de código com o CrewAI.
</Card>
<Card title="Início rápido" icon="rocket" href="/pt-BR/quickstart">
Construa seu primeiro Flow e crew ponta a ponta.
</Card>
<Card title="Instalação" icon="download" href="/pt-BR/installation">
Instale a CLI e o pacote Python do CrewAI.
</Card>
<Card title="Registro de skills (skills.sh)" icon="globe" href="https://skills.sh/crewaiinc/skills">
Listagem oficial de `crewaiinc/skills`—skills, instalações e auditorias.
</Card>
<Card title="Código no GitHub" icon="code-branch" href="https://github.com/crewAIInc/skills">
Fonte, atualizações e issues do pacote de skills.
</Card>
</CardGroup>
### Vídeo: CrewAI com coding agent skills
<iframe src="https://www.loom.com/embed/befb9f68b81f42ad8112bfdd95a780af" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen style={{ width: "100%", height: "400px" }} />

View File

@@ -152,4 +152,4 @@ __all__ = [
"wrap_file_source",
]
__version__ = "1.14.0"
__version__ = "1.14.1"

View File

@@ -10,7 +10,7 @@ requires-python = ">=3.10, <3.14"
dependencies = [
"pytube~=15.0.0",
"requests~=2.32.5",
"crewai==1.14.0",
"crewai==1.14.1",
"tiktoken~=0.8.0",
"beautifulsoup4~=4.13.4",
"python-docx~=1.2.0",

View File

@@ -305,4 +305,4 @@ __all__ = [
"ZapierActionTools",
]
__version__ = "1.14.0"
__version__ = "1.14.1"

View File

@@ -154,21 +154,19 @@ class ToolSpecExtractor:
return default_value
# Dynamically computed from BaseTool so that any future fields or
# computed_fields added to BaseTool are automatically excluded from
# the generated spec — no hardcoded denylist to maintain.
# ``package_dependencies`` is not a BaseTool field but is extracted
# into its own top-level key, so it's also excluded from init_params.
_BASE_TOOL_FIELDS: set[str] = (
set(BaseTool.model_fields)
| set(BaseTool.model_computed_fields)
| {"package_dependencies"}
)
@staticmethod
def _extract_init_params(tool_class: type[BaseTool]) -> dict[str, Any]:
ignored_init_params = [
"name",
"description",
"env_vars",
"args_schema",
"description_updated",
"cache_function",
"result_as_answer",
"max_usage_count",
"current_usage_count",
"package_dependencies",
]
json_schema = tool_class.model_json_schema(
schema_generator=SchemaGenerator, mode="serialization"
)
@@ -176,8 +174,14 @@ class ToolSpecExtractor:
json_schema["properties"] = {
key: value
for key, value in json_schema["properties"].items()
if key not in ignored_init_params
if key not in ToolSpecExtractor._BASE_TOOL_FIELDS
}
if "required" in json_schema:
json_schema["required"] = [
key
for key in json_schema["required"]
if key not in ToolSpecExtractor._BASE_TOOL_FIELDS
]
return json_schema
def save_to_json(self, output_path: str) -> None:

View File

@@ -45,6 +45,26 @@ class MockTool(BaseTool):
)
# --- Intermediate base class (like RagTool, BraveSearchToolBase) ---
class MockIntermediateBase(BaseTool):
"""Simulates an intermediate tool base class (e.g. RagTool, BraveSearchToolBase)."""
name: str = "Intermediate Base"
description: str = "An intermediate tool base"
shared_config: str = Field("default_config", description="Config from intermediate base")
def _run(self, query: str) -> str:
return query
class MockDerivedTool(MockIntermediateBase):
"""A tool inheriting from an intermediate base, like CodeDocsSearchTool(RagTool)."""
name: str = "Derived Tool"
description: str = "A tool that inherits from intermediate base"
derived_param: str = Field("derived_default", description="Param specific to derived tool")
@pytest.fixture
def extractor():
ext = ToolSpecExtractor()
@@ -169,6 +189,87 @@ def test_extract_package_dependencies(mock_tool_extractor):
]
def test_base_tool_fields_excluded_from_init_params(mock_tool_extractor):
"""BaseTool internal fields (including computed_field like tool_type) must
never appear in init_params_schema. Studio reads this schema to render
the tool config UI — internal fields confuse users."""
init_schema = mock_tool_extractor["init_params_schema"]
props = set(init_schema.get("properties", {}).keys())
required = set(init_schema.get("required", []))
# These are all BaseTool's own fields — none should leak
base_fields = {"name", "description", "env_vars", "args_schema",
"description_updated", "cache_function", "result_as_answer",
"max_usage_count", "current_usage_count", "tool_type",
"package_dependencies"}
leaked_props = base_fields & props
assert not leaked_props, (
f"BaseTool fields leaked into init_params_schema properties: {leaked_props}"
)
leaked_required = base_fields & required
assert not leaked_required, (
f"BaseTool fields leaked into init_params_schema required: {leaked_required}"
)
def test_intermediate_base_fields_preserved_for_derived_tool(extractor):
"""When a tool inherits from an intermediate base (e.g. RagTool),
the intermediate's fields should be included — only BaseTool's own
fields are excluded."""
with (
mock.patch(
"crewai_tools.generate_tool_specs.dir",
return_value=["MockDerivedTool"],
),
mock.patch(
"crewai_tools.generate_tool_specs.getattr",
return_value=MockDerivedTool,
),
):
extractor.extract_all_tools()
assert len(extractor.tools_spec) == 1
tool_info = extractor.tools_spec[0]
props = set(tool_info["init_params_schema"].get("properties", {}).keys())
# Intermediate base's field should be preserved
assert "shared_config" in props, (
"Intermediate base class fields should be preserved in init_params_schema"
)
# Derived tool's own field should be preserved
assert "derived_param" in props, (
"Derived tool's own fields should be preserved in init_params_schema"
)
# BaseTool internals should still be excluded
assert "tool_type" not in props
assert "cache_function" not in props
assert "result_as_answer" not in props
def test_future_base_tool_field_auto_excluded(extractor):
"""If a new field is added to BaseTool in the future, it should be
automatically excluded from spec generation without needing to update
the ignored list. This test verifies the allowlist approach works
by checking that ONLY non-BaseTool fields appear."""
with (
mock.patch("crewai_tools.generate_tool_specs.dir", return_value=["MockTool"]),
mock.patch("crewai_tools.generate_tool_specs.getattr", return_value=MockTool),
):
extractor.extract_all_tools()
tool_info = extractor.tools_spec[0]
props = set(tool_info["init_params_schema"].get("properties", {}).keys())
base_all = set(BaseTool.model_fields) | set(BaseTool.model_computed_fields)
leaked = base_all & props
assert not leaked, (
f"BaseTool fields should be auto-excluded but found: {leaked}. "
"The spec generator should dynamically compute BaseTool's fields "
"instead of using a hardcoded denylist."
)
def test_save_to_json(extractor, tmp_path):
extractor.tools_spec = [
{

File diff suppressed because it is too large Load Diff

View File

@@ -55,7 +55,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = [
"crewai-tools==1.14.0",
"crewai-tools==1.14.1",
]
embeddings = [
"tiktoken~=0.8.0"
@@ -68,7 +68,7 @@ openpyxl = [
]
mem0 = ["mem0ai~=0.1.94"]
docling = [
"docling~=2.75.0",
"docling~=2.84.0",
]
qdrant = [
"qdrant-client[fastembed]~=1.14.3",

View File

@@ -46,7 +46,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "1.14.0"
__version__ = "1.14.1"
_telemetry_submitted = False

View File

@@ -6,7 +6,6 @@ from pydantic import BaseModel, Field, PrivateAttr
from crewai.agents.parser import AgentFinish
from crewai.memory.utils import sanitize_scope_name
from crewai.utilities.printer import Printer
from crewai.utilities.string_utils import sanitize_tool_name
from crewai.utilities.types import LLMMessage
@@ -30,7 +29,6 @@ class BaseAgentExecutor(BaseModel):
messages: list[LLMMessage] = Field(default_factory=list)
_resuming: bool = PrivateAttr(default=False)
_i18n: I18N | None = PrivateAttr(default=None)
_printer: Printer = PrivateAttr(default_factory=Printer)
def _save_to_memory(self, output: AgentFinish) -> None:
"""Save task result to unified memory (memory or crew._memory)."""

View File

@@ -68,6 +68,7 @@ from crewai.utilities.agent_utils import (
from crewai.utilities.constants import TRAINING_DATA_FILE
from crewai.utilities.file_store import aget_all_files, get_all_files
from crewai.utilities.i18n import I18N, get_i18n
from crewai.utilities.printer import PRINTER
from crewai.utilities.string_utils import sanitize_tool_name
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.tool_utils import (
@@ -212,13 +213,13 @@ class CrewAgentExecutor(BaseAgentExecutor):
formatted_answer = self._invoke_loop()
except AssertionError:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
color="red",
)
raise
except Exception as e:
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise
if self.ask_for_human_input:
@@ -326,7 +327,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if has_reached_max_iterations(self.iterations, self.max_iter):
formatted_answer = handle_max_iterations_exceeded(
formatted_answer,
printer=self._printer,
printer=PRINTER,
i18n=self._i18n,
messages=self.messages,
llm=cast("BaseLLM", self.llm),
@@ -341,7 +342,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
llm=cast("BaseLLM", self.llm),
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
printer=PRINTER,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
@@ -422,7 +423,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
messages=self.messages,
iterations=self.iterations,
log_error_after=self.log_error_after,
printer=self._printer,
printer=PRINTER,
verbose=self.agent.verbose,
)
@@ -433,7 +434,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
printer=PRINTER,
messages=self.messages,
llm=cast("BaseLLM", self.llm),
callbacks=self.callbacks,
@@ -441,7 +442,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
verbose=self.agent.verbose,
)
continue
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise e
finally:
self.iterations += 1
@@ -482,7 +483,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if has_reached_max_iterations(self.iterations, self.max_iter):
formatted_answer = handle_max_iterations_exceeded(
None,
printer=self._printer,
printer=PRINTER,
i18n=self._i18n,
messages=self.messages,
llm=cast("BaseLLM", self.llm),
@@ -502,7 +503,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
llm=cast("BaseLLM", self.llm),
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
printer=PRINTER,
tools=openai_tools,
available_functions=None,
from_task=self.task,
@@ -570,7 +571,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
printer=PRINTER,
messages=self.messages,
llm=cast("BaseLLM", self.llm),
callbacks=self.callbacks,
@@ -578,7 +579,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
verbose=self.agent.verbose,
)
continue
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise e
finally:
self.iterations += 1
@@ -595,7 +596,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
llm=cast("BaseLLM", self.llm),
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
printer=PRINTER,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
@@ -965,7 +966,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
break
except Exception as hook_error:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Error in before_tool_call hook: {hook_error}",
color="red",
)
@@ -1031,7 +1032,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
after_hook_context.tool_result = result
except Exception as hook_error:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Error in after_tool_call hook: {hook_error}",
color="red",
)
@@ -1078,7 +1079,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if self.agent and self.agent.verbose:
cache_info = " (from cache)" if from_cache else ""
self._printer.print(
PRINTER.print(
content=f"Tool {func_name} executed with result{cache_info}: {result[:200]}...",
color="green",
)
@@ -1118,13 +1119,13 @@ class CrewAgentExecutor(BaseAgentExecutor):
formatted_answer = await self._ainvoke_loop()
except AssertionError:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
color="red",
)
raise
except Exception as e:
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise
if self.ask_for_human_input:
@@ -1168,7 +1169,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if has_reached_max_iterations(self.iterations, self.max_iter):
formatted_answer = handle_max_iterations_exceeded(
formatted_answer,
printer=self._printer,
printer=PRINTER,
i18n=self._i18n,
messages=self.messages,
llm=cast("BaseLLM", self.llm),
@@ -1183,7 +1184,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
llm=cast("BaseLLM", self.llm),
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
printer=PRINTER,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
@@ -1263,7 +1264,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
messages=self.messages,
iterations=self.iterations,
log_error_after=self.log_error_after,
printer=self._printer,
printer=PRINTER,
verbose=self.agent.verbose,
)
@@ -1273,7 +1274,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
printer=PRINTER,
messages=self.messages,
llm=cast("BaseLLM", self.llm),
callbacks=self.callbacks,
@@ -1281,7 +1282,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
verbose=self.agent.verbose,
)
continue
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise e
finally:
self.iterations += 1
@@ -1316,7 +1317,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if has_reached_max_iterations(self.iterations, self.max_iter):
formatted_answer = handle_max_iterations_exceeded(
None,
printer=self._printer,
printer=PRINTER,
i18n=self._i18n,
messages=self.messages,
llm=cast("BaseLLM", self.llm),
@@ -1336,7 +1337,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
llm=cast("BaseLLM", self.llm),
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
printer=PRINTER,
tools=openai_tools,
available_functions=None,
from_task=self.task,
@@ -1403,7 +1404,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
printer=PRINTER,
messages=self.messages,
llm=cast("BaseLLM", self.llm),
callbacks=self.callbacks,
@@ -1411,7 +1412,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
verbose=self.agent.verbose,
)
continue
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise e
finally:
self.iterations += 1
@@ -1428,7 +1429,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
llm=cast("BaseLLM", self.llm),
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
printer=PRINTER,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
@@ -1576,7 +1577,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
if train_iteration is None or not isinstance(train_iteration, int):
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content="Invalid or missing train iteration. Cannot save training data.",
color="red",
)
@@ -1600,7 +1601,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
agent_training_data[train_iteration]["improved_output"] = result.output
else:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=(
f"No existing training data for agent {agent_id} and iteration "
f"{train_iteration}. Cannot save improved output."

View File

@@ -40,7 +40,7 @@ from crewai.utilities.agent_utils import (
)
from crewai.utilities.i18n import I18N, get_i18n
from crewai.utilities.planning_types import TodoItem
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
from crewai.utilities.step_execution_context import StepExecutionContext, StepResult
from crewai.utilities.string_utils import sanitize_tool_name
from crewai.utilities.tool_utils import execute_tool_and_check_finality
@@ -109,7 +109,6 @@ class StepExecutor:
self.request_within_rpm_limit = request_within_rpm_limit
self.callbacks = callbacks or []
self._i18n: I18N = i18n or get_i18n()
self._printer: Printer = Printer()
# Native tool support — set up once
self._use_native_tools = check_native_tool_support(
@@ -585,7 +584,7 @@ class StepExecutor:
task=self.task,
crew=self.crew,
event_source=self,
printer=self._printer,
printer=PRINTER,
verbose=bool(self.agent and self.agent.verbose),
)

View File

@@ -3,17 +3,14 @@ from pathlib import Path
import click
from crewai.cli.utils import copy_template
from crewai.utilities.printer import Printer
_printer = Printer()
from crewai.utilities.printer import PRINTER
def add_crew_to_flow(crew_name: str) -> None:
"""Add a new crew to the current flow."""
# Check if pyproject.toml exists in the current directory
if not Path("pyproject.toml").exists():
_printer.print(
PRINTER.print(
"This command must be run from the root of a flow project.", color="red"
)
raise click.ClickException(
@@ -25,7 +22,7 @@ def add_crew_to_flow(crew_name: str) -> None:
crews_folder = flow_folder / "src" / flow_folder.name / "crews"
if not crews_folder.exists():
_printer.print("Crews folder does not exist in the current flow.", color="red")
PRINTER.print("Crews folder does not exist in the current flow.", color="red")
raise click.ClickException("Crews folder does not exist in the current flow.")
# Create the crew within the flow's crews directory

View File

@@ -0,0 +1,366 @@
"""Textual TUI for browsing checkpoint files."""
from __future__ import annotations
from typing import Any, ClassVar
from textual.app import App, ComposeResult
from textual.binding import Binding
from textual.containers import Horizontal, Vertical
from textual.screen import ModalScreen
from textual.widgets import Button, Footer, Header, OptionList, Static
from textual.widgets.option_list import Option
from crewai.cli.checkpoint_cli import (
_entity_summary,
_format_size,
_is_sqlite,
_list_json,
_list_sqlite,
)
_PRIMARY = "#eb6658"
_SECONDARY = "#1F7982"
_TERTIARY = "#ffffff"
_DIM = "#888888"
_BG_DARK = "#0d1117"
_BG_PANEL = "#161b22"
def _load_entries(location: str) -> list[dict[str, Any]]:
if _is_sqlite(location):
return _list_sqlite(location)
return _list_json(location)
def _format_list_label(entry: dict[str, Any]) -> str:
"""Format a checkpoint entry for the list panel."""
name = entry.get("name", "")
ts = entry.get("ts") or ""
trigger = entry.get("trigger") or ""
summary = _entity_summary(entry.get("entities", []))
line1 = f"[bold]{name}[/]"
parts = []
if ts:
parts.append(f"[dim]{ts}[/]")
if "size" in entry:
parts.append(f"[dim]{_format_size(entry['size'])}[/]")
if trigger:
parts.append(f"[{_PRIMARY}]{trigger}[/]")
line2 = " ".join(parts)
line3 = f" [{_DIM}]{summary}[/]"
return f"{line1}\n{line2}\n{line3}"
def _format_detail(entry: dict[str, Any]) -> str:
"""Format checkpoint details for the right panel."""
lines: list[str] = []
# Header
name = entry.get("name", "")
lines.append(f"[bold {_PRIMARY}]{name}[/]")
lines.append(f"[{_DIM}]{'' * 50}[/]")
lines.append("")
# Metadata table
ts = entry.get("ts") or "unknown"
trigger = entry.get("trigger") or ""
lines.append(f" [bold]Time[/] {ts}")
if "size" in entry:
lines.append(f" [bold]Size[/] {_format_size(entry['size'])}")
lines.append(f" [bold]Events[/] {entry.get('event_count', 0)}")
if trigger:
lines.append(f" [bold]Trigger[/] [{_PRIMARY}]{trigger}[/]")
if "path" in entry:
lines.append(f" [bold]Path[/] [{_DIM}]{entry['path']}[/]")
if "db" in entry:
lines.append(f" [bold]Database[/] [{_DIM}]{entry['db']}[/]")
# Entities
for ent in entry.get("entities", []):
eid = str(ent.get("id", ""))[:8]
etype = ent.get("type", "unknown")
ename = ent.get("name", "unnamed")
lines.append("")
lines.append(f" [{_DIM}]{'' * 50}[/]")
lines.append(f" [bold {_SECONDARY}]{etype}[/]: {ename} [{_DIM}]{eid}[/]")
tasks = ent.get("tasks")
if isinstance(tasks, list):
completed = ent.get("tasks_completed", 0)
total = ent.get("tasks_total", 0)
pct = int(completed / total * 100) if total else 0
bar_len = 20
filled = int(bar_len * completed / total) if total else 0
bar = f"[{_PRIMARY}]{'' * filled}[/][{_DIM}]{'' * (bar_len - filled)}[/]"
lines.append(f" {bar} {completed}/{total} tasks ({pct}%)")
lines.append("")
for i, task in enumerate(tasks):
if task.get("completed"):
icon = "[green]✓[/]"
else:
icon = "[yellow]○[/]"
desc = str(task.get("description", ""))
if len(desc) > 55:
desc = desc[:52] + "..."
lines.append(f" {icon} {i + 1}. {desc}")
return "\n".join(lines)
class ConfirmResumeScreen(ModalScreen[bool]):
"""Modal confirmation before resuming from a checkpoint."""
CSS = f"""
ConfirmResumeScreen {{
align: center middle;
}}
#confirm-dialog {{
width: 60;
height: auto;
padding: 1 2;
background: {_BG_PANEL};
border: round {_PRIMARY};
}}
#confirm-label {{
width: 100%;
content-align: center middle;
margin-bottom: 1;
}}
#confirm-name {{
width: 100%;
content-align: center middle;
color: {_PRIMARY};
text-style: bold;
margin-bottom: 1;
}}
#confirm-buttons {{
width: 100%;
height: 3;
layout: horizontal;
align: center middle;
}}
Button {{
margin: 0 2;
min-width: 12;
}}
"""
def __init__(self, checkpoint_name: str) -> None:
super().__init__()
self._checkpoint_name = checkpoint_name
def compose(self) -> ComposeResult:
with Vertical(id="confirm-dialog"):
yield Static("Resume from this checkpoint?", id="confirm-label")
yield Static(self._checkpoint_name, id="confirm-name")
with Horizontal(id="confirm-buttons"):
yield Button("Resume", variant="success", id="btn-yes")
yield Button("Cancel", variant="default", id="btn-no")
def on_button_pressed(self, event: Button.Pressed) -> None:
self.dismiss(event.button.id == "btn-yes")
def on_key(self, event: Any) -> None:
if event.key == "y":
self.dismiss(True)
elif event.key in ("n", "escape"):
self.dismiss(False)
class CheckpointTUI(App[str | None]):
"""TUI to browse and inspect checkpoints.
Returns the checkpoint location string to resume from, or None if
the user quit without selecting.
"""
TITLE = "CrewAI Checkpoints"
CSS = f"""
Screen {{
background: {_BG_DARK};
}}
Header {{
background: {_PRIMARY};
color: {_TERTIARY};
}}
Footer {{
background: {_SECONDARY};
color: {_TERTIARY};
}}
Footer > .footer-key--key {{
background: {_PRIMARY};
color: {_TERTIARY};
}}
Horizontal {{
height: 1fr;
}}
#cp-list {{
width: 38%;
background: {_BG_PANEL};
border: round {_SECONDARY};
padding: 0 1;
scrollbar-color: {_PRIMARY};
}}
#cp-list:focus {{
border: round {_PRIMARY};
}}
#cp-list > .option-list--option-highlighted {{
background: {_SECONDARY};
color: {_TERTIARY};
text-style: none;
}}
#cp-list > .option-list--option-highlighted * {{
color: {_TERTIARY};
}}
#detail-container {{
width: 62%;
padding: 0 1;
}}
#detail {{
height: 1fr;
background: {_BG_PANEL};
border: round {_SECONDARY};
padding: 1 2;
overflow-y: auto;
scrollbar-color: {_PRIMARY};
}}
#detail:focus {{
border: round {_PRIMARY};
}}
#status {{
height: 1;
padding: 0 2;
color: {_DIM};
}}
"""
BINDINGS: ClassVar[list[Binding | tuple[str, str] | tuple[str, str, str]]] = [
("q", "quit", "Quit"),
("r", "refresh", "Refresh"),
("j", "cursor_down", "Down"),
("k", "cursor_up", "Up"),
]
def __init__(self, location: str = "./.checkpoints") -> None:
super().__init__()
self._location = location
self._entries: list[dict[str, Any]] = []
self._selected_idx: int = 0
self._pending_location: str = ""
def compose(self) -> ComposeResult:
yield Header(show_clock=False)
with Horizontal():
yield OptionList(id="cp-list")
with Vertical(id="detail-container"):
yield Static("", id="status")
yield Static(
f"\n [{_DIM}]Select a checkpoint from the list[/]", # noqa: S608
id="detail",
)
yield Footer()
async def on_mount(self) -> None:
self.query_one("#cp-list", OptionList).border_title = "Checkpoints"
self.query_one("#detail", Static).border_title = "Detail"
self._refresh_list()
def _refresh_list(self) -> None:
self._entries = _load_entries(self._location)
option_list = self.query_one("#cp-list", OptionList)
option_list.clear_options()
if not self._entries:
self.query_one("#detail", Static).update(
f"\n [{_DIM}]No checkpoints in {self._location}[/]"
)
self.query_one("#status", Static).update("")
self.sub_title = self._location
return
for entry in self._entries:
option_list.add_option(Option(_format_list_label(entry)))
count = len(self._entries)
storage = "SQLite" if _is_sqlite(self._location) else "JSON"
self.sub_title = f"{self._location}"
self.query_one("#status", Static).update(f" {count} checkpoint(s) | {storage}")
async def on_option_list_option_highlighted(
self,
event: OptionList.OptionHighlighted,
) -> None:
idx = event.option_index
if idx is None:
return
if idx < len(self._entries):
self._selected_idx = idx
entry = self._entries[idx]
self.query_one("#detail", Static).update(_format_detail(entry))
def action_cursor_down(self) -> None:
self.query_one("#cp-list", OptionList).action_cursor_down()
def action_cursor_up(self) -> None:
self.query_one("#cp-list", OptionList).action_cursor_up()
async def on_option_list_option_selected(
self,
event: OptionList.OptionSelected,
) -> None:
idx = event.option_index
if idx is None or idx >= len(self._entries):
return
entry = self._entries[idx]
if "path" in entry:
loc = entry["path"]
elif _is_sqlite(self._location):
loc = f"{self._location}#{entry['name']}"
else:
loc = entry.get("name", "")
self._pending_location = loc
name = entry.get("name", loc)
self.push_screen(ConfirmResumeScreen(name), self._on_confirm)
def _on_confirm(self, confirmed: bool | None) -> None:
if confirmed:
self.exit(self._pending_location)
else:
self._pending_location = ""
def action_refresh(self) -> None:
self._refresh_list()
async def _run_checkpoint_tui_async(location: str) -> None:
"""Async implementation of the checkpoint TUI flow."""
import click
app = CheckpointTUI(location=location)
selected = await app.run_async()
if selected is None:
return
click.echo(f"\nResuming from: {selected}\n")
from crewai.crew import Crew
crew = Crew.from_checkpoint(selected)
result = await crew.akickoff()
click.echo(f"\nResult: {getattr(result, 'raw', result)}")
def run_checkpoint_tui(location: str = "./.checkpoints") -> None:
"""Launch the checkpoint browser TUI."""
import asyncio
asyncio.run(_run_checkpoint_tui_async(location))

View File

@@ -786,9 +786,19 @@ def traces_status() -> None:
console.print(panel)
@crewai.group()
def checkpoint() -> None:
"""Inspect checkpoint files."""
@crewai.group(invoke_without_command=True)
@click.option(
"--location", default="./.checkpoints", help="Checkpoint directory or SQLite file."
)
@click.pass_context
def checkpoint(ctx: click.Context, location: str) -> None:
"""Browse and inspect checkpoints. Launches a TUI when called without a subcommand."""
ctx.ensure_object(dict)
ctx.obj["location"] = location
if ctx.invoked_subcommand is None:
from crewai.cli.checkpoint_tui import run_checkpoint_tui
run_checkpoint_tui(location)
@checkpoint.command("list")

View File

@@ -19,12 +19,10 @@ from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.types.crew_chat import ChatInputField, ChatInputs
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
from crewai.utilities.types import LLMMessage
_printer = Printer()
MIN_REQUIRED_VERSION: Final[Literal["0.98.0"]] = "0.98.0"
@@ -121,9 +119,9 @@ def run_chat() -> None:
def show_loading(event: threading.Event) -> None:
"""Display animated loading dots while processing."""
while not event.is_set():
_printer.print(".", end="")
PRINTER.print(".", end="")
time.sleep(1)
_printer.print("")
PRINTER.print("")
def initialize_chat_llm(crew: Crew) -> LLM | BaseLLM | None:

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.14.0"
"crewai[tools]==1.14.1"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.14.0"
"crewai[tools]==1.14.1"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.14.0"
"crewai[tools]==1.14.1"
]
[tool.crewai]

View File

@@ -134,6 +134,7 @@ from crewai.utilities.rpm_controller import RPMController
from crewai.utilities.streaming import (
create_async_chunk_generator,
create_chunk_generator,
register_cleanup,
signal_end,
signal_error,
)
@@ -379,8 +380,12 @@ class Crew(FlowTrackable, BaseModel):
from crewai.context import apply_execution_context
from crewai.events.event_bus import crewai_event_bus
from crewai.state.provider.json_provider import JsonProvider
from crewai.state.provider.utils import detect_provider
from crewai.state.runtime import RuntimeState
if provider is None:
provider = detect_provider(path)
state = RuntimeState.from_checkpoint(
path,
provider=provider or JsonProvider(),
@@ -882,6 +887,7 @@ class Crew(FlowTrackable, BaseModel):
ctx.state, run_crew, ctx.output_holder
)
)
register_cleanup(streaming_output, ctx.state)
ctx.output_holder.append(streaming_output)
return streaming_output
@@ -1007,6 +1013,7 @@ class Crew(FlowTrackable, BaseModel):
ctx.state, run_crew, ctx.output_holder
)
)
register_cleanup(streaming_output, ctx.state)
ctx.output_holder.append(streaming_output)
return streaming_output
@@ -1078,6 +1085,7 @@ class Crew(FlowTrackable, BaseModel):
ctx.state, run_crew, ctx.output_holder
)
)
register_cleanup(streaming_output, ctx.state)
ctx.output_holder.append(streaming_output)
return streaming_output

View File

@@ -431,6 +431,7 @@ async def run_for_each_async(
from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities.streaming import (
create_async_chunk_generator,
register_cleanup,
signal_end,
signal_error,
)
@@ -480,6 +481,7 @@ async def run_for_each_async(
streaming_output._set_results(result)
streaming_output._set_result = set_results_wrapper # type: ignore[method-assign]
register_cleanup(streaming_output, ctx.state)
ctx.output_holder.append(streaming_output)
return streaming_output

View File

@@ -98,7 +98,7 @@ from crewai.utilities.planning_types import (
TodoItem,
TodoList,
)
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
from crewai.utilities.step_execution_context import StepExecutionContext, StepResult
from crewai.utilities.string_utils import sanitize_tool_name
from crewai.utilities.tool_utils import execute_tool_and_check_finality
@@ -199,7 +199,6 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
)
_i18n: I18N = PrivateAttr(default_factory=get_i18n)
_printer: Printer = PrivateAttr(default_factory=Printer)
_console: Console = PrivateAttr(default_factory=Console)
_last_parser_error: OutputParserError | None = PrivateAttr(default=None)
_last_context_error: Exception | None = PrivateAttr(default=None)
@@ -503,7 +502,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=(
f"[Observe] Step {current_todo.step_number} "
f"(effort={effort}): "
@@ -553,7 +552,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
current_todo.step_number, result=current_todo.result
)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=(
f"[Low] Step {current_todo.step_number} hard-failed "
f"— triggering replan: {observation.replan_reason}"
@@ -572,7 +571,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
if self.agent.verbose:
completed = self.state.todos.completed_count
total = len(self.state.todos.items)
self._printer.print(
PRINTER.print(
content=f"[Low] Step {current_todo.step_number} done ({completed}/{total}) — continuing",
color="green",
)
@@ -605,7 +604,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
if self.agent.verbose:
completed = self.state.todos.completed_count
total = len(self.state.todos.items)
self._printer.print(
PRINTER.print(
content=f"[Medium] Step {current_todo.step_number} succeeded ({completed}/{total}) — continuing",
color="green",
)
@@ -618,7 +617,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
current_todo.step_number, result=current_todo.result
)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=(
f"[Medium] Step {current_todo.step_number} failed + replan required "
f"— triggering replan: {observation.replan_reason}"
@@ -638,7 +637,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
if self.agent.verbose:
failed = len(self.state.todos.get_failed_todos())
total = len(self.state.todos.items)
self._printer.print(
PRINTER.print(
content=(
f"[Medium] Step {current_todo.step_number} failed but no replan needed "
f"({failed} failed/{total} total) — continuing"
@@ -680,7 +679,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
current_todo.step_number, result=current_todo.result
)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content="[Decide] Goal achieved early — finalizing",
color="green",
)
@@ -692,7 +691,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
current_todo.step_number, result=current_todo.result
)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"[Decide] Full replan needed: {observation.replan_reason}",
color="yellow",
)
@@ -705,7 +704,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
current_todo.step_number, result=current_todo.result
)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content="[Decide] Step failed — triggering replan",
color="yellow",
)
@@ -718,7 +717,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
current_todo.step_number, result=current_todo.result
)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content="[Decide] Plan valid but refining upcoming steps",
color="cyan",
)
@@ -731,7 +730,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
if self.agent.verbose:
completed = self.state.todos.completed_count
total = len(self.state.todos.items)
self._printer.print(
PRINTER.print(
content=f"[Decide] Continue plan ({completed}/{total} done)",
color="green",
)
@@ -776,7 +775,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"[Refine] Updated {len(remaining)} pending step(s)",
color="cyan",
)
@@ -811,7 +810,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content="Goal achieved early — skipping remaining steps",
color="green",
)
@@ -829,7 +828,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
if self.state.replan_count >= max_replans:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Max replans ({max_replans}) reached — finalizing with current results",
color="yellow",
)
@@ -936,7 +935,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
# Plan-and-Execute path: use StepExecutor for isolated execution
if getattr(self.agent, "planning_enabled", False):
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=(
f"[Execute] Step {current.step_number}: "
f"{current.description[:60]}..."
@@ -971,7 +970,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
if self.agent.verbose:
status = "success" if result.success else "failed"
self._printer.print(
PRINTER.print(
content=(
f"[Execute] Step {current.step_number} {status} "
f"({result.execution_time:.1f}s, "
@@ -1080,7 +1079,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
todo.result = error_msg
self.state.todos.mark_failed(todo.step_number, result=error_msg)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Todo {todo.step_number} failed: {error_msg}",
color="red",
)
@@ -1105,7 +1104,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
if self.agent.verbose:
status = "success" if step_result.success else "failed"
self._printer.print(
PRINTER.print(
content=(
f"[Execute] Step {todo.step_number} {status} "
f"({step_result.execution_time:.1f}s, "
@@ -1152,7 +1151,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
self.state.todos.mark_failed(todo.step_number, result=todo.result)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=(
f"[Observe] Step {todo.step_number} "
f"(effort={effort}): "
@@ -1203,7 +1202,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
"""Force agent to provide final answer when max iterations exceeded."""
formatted_answer = handle_max_iterations_exceeded(
formatted_answer=None,
printer=self._printer,
printer=PRINTER,
i18n=self._i18n,
messages=list(self.state.messages),
llm=self.llm,
@@ -1232,7 +1231,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
llm=self.llm,
messages=list(self.state.messages),
callbacks=self.callbacks,
printer=self._printer,
printer=PRINTER,
from_task=self.task,
from_agent=self.agent,
response_model=self.response_model,
@@ -1282,7 +1281,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
return "context_error"
if e.__class__.__module__.startswith("litellm"):
raise e
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise
@router("continue_reasoning_native")
@@ -1318,7 +1317,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
llm=self.llm,
messages=list(self.state.messages),
callbacks=self.callbacks,
printer=self._printer,
printer=PRINTER,
tools=self._openai_tools,
available_functions=None,
from_task=self.task,
@@ -1373,7 +1372,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
return "context_error"
if e.__class__.__module__.startswith("litellm"):
raise e
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise
def _route_finish_with_todos(
@@ -1442,9 +1441,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
)
except Exception as e:
if self.agent and self.agent.verbose:
self._printer.print(
content=f"Error in tool execution: {e}", color="red"
)
PRINTER.print(content=f"Error in tool execution: {e}", color="red")
if self.task:
self.task.increment_tools_errors()
@@ -1598,7 +1595,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
# Log the tool execution
if self.agent and self.agent.verbose:
cache_info = " (from cache)" if from_cache else ""
self._printer.print(
PRINTER.print(
content=f"Tool {func_name} executed with result{cache_info}: {result[:200]}...",
color="green",
)
@@ -1636,7 +1633,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
# Log the tool execution
if self.agent and self.agent.verbose:
cache_info = " (from cache)" if from_cache else ""
self._printer.print(
PRINTER.print(
content=f"Tool {func_name} executed with result{cache_info}: {result[:200]}...",
color="green",
)
@@ -1800,7 +1797,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
break
except Exception as hook_error:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Error in before_tool_call hook: {hook_error}",
color="red",
)
@@ -1875,7 +1872,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
after_hook_context.tool_result = result
except Exception as hook_error:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Error in after_tool_call hook: {hook_error}",
color="red",
)
@@ -2033,7 +2030,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
if self.agent.verbose:
completed = self.state.todos.completed_count
total = len(self.state.todos.items)
self._printer.print(
PRINTER.print(
content=f"✓ Todo {step_number} completed ({completed}/{total})",
color="green",
)
@@ -2100,7 +2097,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
self._finalize_called = True
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"[Finalize] todos_count={len(self.state.todos.items)}, todos_with_results={sum(1 for t in self.state.todos.items if t.result)}",
color="magenta",
)
@@ -2263,7 +2260,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
except Exception as e:
if self.agent and self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Synthesis LLM call failed ({e}), falling back to concatenation",
color="yellow",
)
@@ -2348,7 +2345,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
self.state.last_replan_reason = reason
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Triggering replan (attempt {self.state.replan_count}): {reason}",
color="yellow",
)
@@ -2408,7 +2405,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
self.state.todos.replace_pending_todos(new_todos)
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Replan: {len(new_todos)} new steps (completed history preserved)",
color="green",
)
@@ -2492,7 +2489,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
if self.state.replan_count >= max_replans:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Max replans ({max_replans}) reached — finalizing with current results",
color="yellow",
)
@@ -2518,7 +2515,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
messages=list(self.state.messages),
iterations=self.state.iterations,
log_error_after=self.log_error_after,
printer=self._printer,
printer=PRINTER,
verbose=self.agent.verbose,
)
@@ -2534,7 +2531,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
"""Recover from context length errors and retry."""
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
printer=PRINTER,
messages=self.state.messages,
llm=self.llm,
callbacks=self.callbacks,
@@ -2637,7 +2634,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
self._console.print(fail_text)
raise
except Exception as e:
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise
finally:
self._is_executing = False
@@ -2728,7 +2725,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
self._console.print(fail_text)
raise
except Exception as e:
handle_unknown_error(self._printer, e, verbose=self.agent.verbose)
handle_unknown_error(PRINTER, e, verbose=self.agent.verbose)
raise
finally:
self._is_executing = False
@@ -2793,7 +2790,7 @@ class AgentExecutor(Flow[AgentExecutorState], BaseAgentExecutor): # type: ignor
task.result()
except Exception as e:
if self.agent.verbose:
self._printer.print(
PRINTER.print(
content=f"Error in async step_callback task: {e!s}",
color="red",
)

View File

@@ -132,6 +132,7 @@ from crewai.utilities.streaming import (
create_async_chunk_generator,
create_chunk_generator,
create_streaming_state,
register_cleanup,
signal_end,
signal_error,
)
@@ -1962,6 +1963,7 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta):
streaming_output = FlowStreamingOutput(
sync_iterator=create_chunk_generator(state, run_flow, output_holder)
)
register_cleanup(streaming_output, state)
output_holder.append(streaming_output)
return streaming_output
@@ -2035,6 +2037,7 @@ class Flow(BaseModel, Generic[T], metaclass=FlowMeta):
state, run_flow, output_holder
)
)
register_cleanup(streaming_output, state)
output_holder.append(streaming_output)
return streaming_output

View File

@@ -28,13 +28,13 @@ import asyncio
from collections.abc import Callable
import functools
import logging
from typing import TYPE_CHECKING, Any, ClassVar, Final, TypeVar, cast
from typing import TYPE_CHECKING, Any, Final, TypeVar, cast
from pydantic import BaseModel
from crewai.flow.persistence.base import FlowPersistence
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
if TYPE_CHECKING:
@@ -56,8 +56,6 @@ LOG_MESSAGES: Final[dict[str, str]] = {
class PersistenceDecorator:
"""Class to handle flow state persistence with consistent logging."""
_printer: ClassVar[Printer] = Printer()
@classmethod
def persist_state(
cls,
@@ -104,7 +102,7 @@ class PersistenceDecorator:
# Log state saving only if verbose is True
if verbose:
cls._printer.print(
PRINTER.print(
LOG_MESSAGES["save_state"].format(flow_uuid), color="cyan"
)
logger.info(LOG_MESSAGES["save_state"].format(flow_uuid))
@@ -119,19 +117,19 @@ class PersistenceDecorator:
except Exception as e:
error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e))
if verbose:
cls._printer.print(error_msg, color="red")
PRINTER.print(error_msg, color="red")
logger.error(error_msg)
raise RuntimeError(f"State persistence failed: {e!s}") from e
except AttributeError as e:
error_msg = LOG_MESSAGES["state_missing"]
if verbose:
cls._printer.print(error_msg, color="red")
PRINTER.print(error_msg, color="red")
logger.error(error_msg)
raise ValueError(error_msg) from e
except (TypeError, ValueError) as e:
error_msg = LOG_MESSAGES["id_missing"]
if verbose:
cls._printer.print(error_msg, color="red")
PRINTER.print(error_msg, color="red")
logger.error(error_msg)
raise ValueError(error_msg) from e

View File

@@ -32,14 +32,12 @@ from crewai.flow.flow_wrappers import (
SimpleFlowCondition,
)
from crewai.flow.types import FlowMethodCallable, FlowMethodName
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
if TYPE_CHECKING:
from crewai.flow.flow import Flow
_printer = Printer()
def _extract_string_literals_from_type_annotation(
node: ast.expr,
@@ -181,7 +179,7 @@ def get_possible_return_constants(
return None
except Exception as e:
if verbose:
_printer.print(
PRINTER.print(
f"Error retrieving source code for function {function.__name__}: {e}",
color="red",
)
@@ -194,27 +192,27 @@ def get_possible_return_constants(
code_ast = ast.parse(source)
except IndentationError as e:
if verbose:
_printer.print(
PRINTER.print(
f"IndentationError while parsing source code of {function.__name__}: {e}",
color="red",
)
_printer.print(f"Source code:\n{source}", color="yellow")
PRINTER.print(f"Source code:\n{source}", color="yellow")
return None
except SyntaxError as e:
if verbose:
_printer.print(
PRINTER.print(
f"SyntaxError while parsing source code of {function.__name__}: {e}",
color="red",
)
_printer.print(f"Source code:\n{source}", color="yellow")
PRINTER.print(f"Source code:\n{source}", color="yellow")
return None
except Exception as e:
if verbose:
_printer.print(
PRINTER.print(
f"Unexpected error while parsing source code of {function.__name__}: {e}",
color="red",
)
_printer.print(f"Source code:\n{source}", color="yellow")
PRINTER.print(f"Source code:\n{source}", color="yellow")
return None
return_values: set[str] = set()
@@ -395,13 +393,13 @@ def get_possible_return_constants(
StateAttributeVisitor().visit(class_ast)
except Exception as e:
if verbose:
_printer.print(
PRINTER.print(
f"Could not analyze class context for {function.__name__}: {e}",
color="yellow",
)
except Exception as e:
if verbose:
_printer.print(
PRINTER.print(
f"Could not introspect class for {function.__name__}: {e}",
color="yellow",
)

View File

@@ -5,6 +5,8 @@ from functools import wraps
import inspect
from typing import TYPE_CHECKING, Any, TypeVar, overload
from crewai.utilities.string_utils import sanitize_tool_name
if TYPE_CHECKING:
from crewai.hooks.llm_hooks import LLMCallHookContext
@@ -37,6 +39,9 @@ def _create_hook_decorator(
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> Callable[..., Any]:
if tools:
tools = [sanitize_tool_name(t) for t in tools]
def decorator(f: Callable[..., Any]) -> Callable[..., Any]:
setattr(f, marker_attribute, True)

View File

@@ -9,7 +9,7 @@ from crewai.hooks.types import (
BeforeLLMCallHookCallable,
BeforeLLMCallHookType,
)
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
if TYPE_CHECKING:
@@ -138,16 +138,15 @@ class LLMCallHookContext:
... print("LLM call skipped by user")
"""
printer = Printer()
event_listener.formatter.pause_live_updates()
try:
printer.print(content=f"\n{prompt}", color="bold_yellow")
printer.print(content=default_message, color="cyan")
PRINTER.print(content=f"\n{prompt}", color="bold_yellow")
PRINTER.print(content=default_message, color="cyan")
response = input().strip()
if response:
printer.print(content="\nProcessing your input...", color="cyan")
PRINTER.print(content="\nProcessing your input...", color="cyan")
return response
finally:

View File

@@ -9,7 +9,7 @@ from crewai.hooks.types import (
BeforeToolCallHookCallable,
BeforeToolCallHookType,
)
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
if TYPE_CHECKING:
@@ -100,16 +100,15 @@ class ToolCallHookContext:
... return None # Allow execution
"""
printer = Printer()
event_listener.formatter.pause_live_updates()
try:
printer.print(content=f"\n{prompt}", color="bold_yellow")
printer.print(content=default_message, color="cyan")
PRINTER.print(content=f"\n{prompt}", color="bold_yellow")
PRINTER.print(content=default_message, color="cyan")
response = input().strip()
if response:
printer.print(content="\nProcessing your input...", color="cyan")
PRINTER.print(content="\nProcessing your input...", color="cyan")
return response
finally:

View File

@@ -91,7 +91,7 @@ from crewai.utilities.guardrail import process_guardrail
from crewai.utilities.guardrail_types import GuardrailCallable, GuardrailType
from crewai.utilities.i18n import I18N, get_i18n
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.tool_utils import execute_tool_and_check_finality
@@ -270,7 +270,6 @@ class LiteAgent(FlowTrackable, BaseModel):
_key: str = PrivateAttr(default_factory=lambda: str(uuid.uuid4()))
_messages: list[LLMMessage] = PrivateAttr(default_factory=list)
_iterations: int = PrivateAttr(default=0)
_printer: Printer = PrivateAttr(default_factory=Printer)
_guardrail: GuardrailCallable | None = PrivateAttr(default=None)
_guardrail_retry_count: int = PrivateAttr(default=0)
_callbacks: list[TokenCalcHandler] = PrivateAttr(default_factory=list)
@@ -528,11 +527,11 @@ class LiteAgent(FlowTrackable, BaseModel):
except Exception as e:
if self.verbose:
self._printer.print(
PRINTER.print(
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
color="red",
)
handle_unknown_error(self._printer, e, verbose=self.verbose)
handle_unknown_error(PRINTER, e, verbose=self.verbose)
# Emit error event
crewai_event_bus.emit(
self,
@@ -609,7 +608,7 @@ class LiteAgent(FlowTrackable, BaseModel):
self._memory.remember_many(extracted, agent_role=self.role)
except Exception as e:
if self.verbose:
self._printer.print(
PRINTER.print(
content=f"Failed to save to memory: {e}",
color="yellow",
)
@@ -661,7 +660,7 @@ class LiteAgent(FlowTrackable, BaseModel):
formatted_result = result
except ConverterError as e:
if self.verbose:
self._printer.print(
PRINTER.print(
content=f"Failed to parse output into response format after retries: {e.message}",
color="yellow",
)
@@ -704,7 +703,7 @@ class LiteAgent(FlowTrackable, BaseModel):
)
self._guardrail_retry_count += 1
if self.verbose:
self._printer.print(
PRINTER.print(
f"Guardrail failed. Retrying ({self._guardrail_retry_count}/{self.guardrail_max_retries})..."
f"\n{guardrail_result.error}"
)
@@ -875,7 +874,7 @@ class LiteAgent(FlowTrackable, BaseModel):
if has_reached_max_iterations(self._iterations, self.max_iterations):
formatted_answer = handle_max_iterations_exceeded(
formatted_answer,
printer=self._printer,
printer=PRINTER,
i18n=self.i18n,
messages=self._messages,
llm=cast(LLM, self.llm),
@@ -890,7 +889,7 @@ class LiteAgent(FlowTrackable, BaseModel):
llm=cast(LLM, self.llm),
messages=self._messages,
callbacks=self._callbacks,
printer=self._printer,
printer=PRINTER,
from_agent=self, # type: ignore[arg-type]
executor_context=self,
response_model=response_model,
@@ -933,7 +932,7 @@ class LiteAgent(FlowTrackable, BaseModel):
self._append_message(formatted_answer.text, role="assistant")
except OutputParserError as e:
if self.verbose:
self._printer.print(
PRINTER.print(
content="Failed to parse LLM output. Retrying...",
color="yellow",
)
@@ -942,7 +941,7 @@ class LiteAgent(FlowTrackable, BaseModel):
messages=self._messages,
iterations=self._iterations,
log_error_after=3,
printer=self._printer,
printer=PRINTER,
verbose=self.verbose,
)
@@ -953,7 +952,7 @@ class LiteAgent(FlowTrackable, BaseModel):
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
printer=PRINTER,
messages=self._messages,
llm=cast(LLM, self.llm),
callbacks=self._callbacks,
@@ -961,7 +960,7 @@ class LiteAgent(FlowTrackable, BaseModel):
verbose=self.verbose,
)
continue
handle_unknown_error(self._printer, e, verbose=self.verbose)
handle_unknown_error(PRINTER, e, verbose=self.verbose)
raise e
finally:

View File

@@ -857,7 +857,7 @@ class BaseLLM(BaseModel, ABC):
LLMCallHookContext,
get_before_llm_call_hooks,
)
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
before_hooks = get_before_llm_call_hooks()
if not before_hooks:
@@ -872,21 +872,20 @@ class BaseLLM(BaseModel, ABC):
crew=None,
)
verbose = getattr(from_agent, "verbose", True) if from_agent else True
printer = Printer()
try:
for hook in before_hooks:
result = hook(hook_context)
if result is False:
if verbose:
printer.print(
PRINTER.print(
content="LLM call blocked by before_llm_call hook",
color="yellow",
)
return False
except Exception as e:
if verbose:
printer.print(
PRINTER.print(
content=f"Error in before_llm_call hook: {e}",
color="yellow",
)
@@ -927,7 +926,7 @@ class BaseLLM(BaseModel, ABC):
LLMCallHookContext,
get_after_llm_call_hooks,
)
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
after_hooks = get_after_llm_call_hooks()
if not after_hooks:
@@ -943,7 +942,6 @@ class BaseLLM(BaseModel, ABC):
response=response,
)
verbose = getattr(from_agent, "verbose", True) if from_agent else True
printer = Printer()
modified_response = response
try:
@@ -954,7 +952,7 @@ class BaseLLM(BaseModel, ABC):
hook_context.response = modified_response
except Exception as e:
if verbose:
printer.print(
PRINTER.print(
content=f"Error in after_llm_call hook: {e}",
color="yellow",
)

View File

@@ -6,7 +6,6 @@ import sqlite3
from typing import Any
from crewai.task import Task
from crewai.utilities import Printer
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
from crewai.utilities.errors import DatabaseError, DatabaseOperationError
from crewai.utilities.lock_store import lock as store_lock
@@ -27,7 +26,6 @@ class KickoffTaskOutputsSQLiteStorage:
db_path = str(Path(db_storage_path()) / "latest_kickoff_task_outputs.db")
self.db_path = db_path
self._lock_name = f"sqlite:{os.path.realpath(self.db_path)}"
self._printer: Printer = Printer()
self._initialize_db()
def _initialize_db(self) -> None:

View File

@@ -6,10 +6,7 @@ from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
from typing_extensions import Unpack
from crewai.rag.embeddings.providers.ibm.types import WatsonXProviderConfig
from crewai.utilities.printer import Printer
_printer = Printer()
from crewai.utilities.printer import PRINTER
class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
@@ -164,5 +161,5 @@ class WatsonXEmbeddingFunction(EmbeddingFunction[Documents]):
return cast(Embeddings, embeddings)
except Exception as e:
if self._verbose:
_printer.print(f"Error during WatsonX embedding: {e}", color="red")
PRINTER.print(f"Error during WatsonX embedding: {e}", color="red")
raise

View File

@@ -0,0 +1,34 @@
"""Provider detection utilities."""
from __future__ import annotations
from crewai.state.provider.core import BaseProvider
_SQLITE_MAGIC = b"SQLite format 3\x00"
def detect_provider(path: str) -> BaseProvider:
"""Detect the storage provider from a checkpoint path.
Reads the file's magic bytes to determine if it's a SQLite database.
For paths containing ``#``, checks the portion before the ``#``.
Falls back to JsonProvider.
Args:
path: A checkpoint file path, directory, or ``db_path#checkpoint_id``.
Returns:
The appropriate provider instance.
"""
from crewai.state.provider.json_provider import JsonProvider
from crewai.state.provider.sqlite_provider import SqliteProvider
file_path = path.split("#")[0] if "#" in path else path
try:
with open(file_path, "rb") as f:
if f.read(16) == _SQLITE_MAGIC:
return SqliteProvider()
except OSError:
pass
return JsonProvider()

View File

@@ -81,13 +81,10 @@ from crewai.utilities.guardrail_types import (
GuardrailsType,
)
from crewai.utilities.i18n import I18N, get_i18n
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
from crewai.utilities.string_utils import interpolate_only
_printer = Printer()
class Task(BaseModel):
"""Class that represents a task to be executed.
@@ -981,7 +978,7 @@ Follow these guidelines:
crew_chat_messages = json.loads(crew_chat_messages_json)
except json.JSONDecodeError as e:
if self.agent and self.agent.verbose:
_printer.print(
PRINTER.print(
f"An error occurred while parsing crew chat messages: {e}",
color="red",
)
@@ -1227,8 +1224,7 @@ Follow these guidelines:
task_output=task_output.raw,
)
if agent and agent.verbose:
printer = Printer()
printer.print(
PRINTER.print(
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
color="yellow",
)
@@ -1325,8 +1321,7 @@ Follow these guidelines:
task_output=task_output.raw,
)
if agent and agent.verbose:
printer = Printer()
printer.print(
PRINTER.print(
content=f"Guardrail {guardrail_index if guardrail_index is not None else ''} blocked (attempt {attempt + 1}/{max_attempts}), retrying due to: {guardrail_result.error}\n",
color="yellow",
)

View File

@@ -38,13 +38,10 @@ from crewai.tools.structured_tool import (
build_schema_hint,
)
from crewai.types.callback import SerializableCallable, _resolve_dotted_path
from crewai.utilities.printer import Printer
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.string_utils import sanitize_tool_name
_printer = Printer()
P = ParamSpec("P")
R = TypeVar("R", covariant=True)

View File

@@ -29,7 +29,7 @@ from crewai.utilities.agent_utils import (
)
from crewai.utilities.converter import Converter
from crewai.utilities.i18n import I18N, get_i18n
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
from crewai.utilities.string_utils import sanitize_tool_name
@@ -94,7 +94,6 @@ class ToolUsage:
fingerprint_context: dict[str, str] | None = None,
) -> None:
self._i18n: I18N = agent.i18n if agent else get_i18n()
self._printer: Printer = Printer()
self._telemetry: Telemetry = Telemetry()
self._run_attempts: int = 1
self._max_parsing_attempts: int = 3
@@ -129,7 +128,7 @@ class ToolUsage:
if isinstance(calling, ToolUsageError):
error = calling.message
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red")
PRINTER.print(content=f"\n\n{error}\n", color="red")
if self.task:
self.task.increment_tools_errors()
return error
@@ -141,7 +140,7 @@ class ToolUsage:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red")
PRINTER.print(content=f"\n\n{error}\n", color="red")
return error
if (
@@ -157,7 +156,7 @@ class ToolUsage:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red")
PRINTER.print(content=f"\n\n{error}\n", color="red")
return error
return f"{self._use(tool_string=tool_string, tool=tool, calling=calling)}"
@@ -177,7 +176,7 @@ class ToolUsage:
if isinstance(calling, ToolUsageError):
error = calling.message
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red")
PRINTER.print(content=f"\n\n{error}\n", color="red")
if self.task:
self.task.increment_tools_errors()
return error
@@ -189,7 +188,7 @@ class ToolUsage:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red")
PRINTER.print(content=f"\n\n{error}\n", color="red")
return error
if (
@@ -206,7 +205,7 @@ class ToolUsage:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red")
PRINTER.print(content=f"\n\n{error}\n", color="red")
return error
return (
@@ -391,7 +390,7 @@ class ToolUsage:
and self.agent
and self.agent.verbose
):
self._printer.print(
PRINTER.print(
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
color="blue",
)
@@ -405,7 +404,7 @@ class ToolUsage:
and self.agent
and self.agent.verbose
):
self._printer.print(
PRINTER.print(
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
color="blue",
)
@@ -429,9 +428,7 @@ class ToolUsage:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(
content=f"\n\n{error_message}\n", color="red"
)
PRINTER.print(content=f"\n\n{error_message}\n", color="red")
else:
if self.task:
self.task.increment_tools_errors()
@@ -626,7 +623,7 @@ class ToolUsage:
and self.agent
and self.agent.verbose
):
self._printer.print(
PRINTER.print(
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
color="blue",
)
@@ -640,7 +637,7 @@ class ToolUsage:
and self.agent
and self.agent.verbose
):
self._printer.print(
PRINTER.print(
content=f"Tool '{sanitize_tool_name(available_tool.name)}' usage: {available_tool.current_usage_count}/{available_tool.max_usage_count}",
color="blue",
)
@@ -664,9 +661,7 @@ class ToolUsage:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(
content=f"\n\n{error_message}\n", color="red"
)
PRINTER.print(content=f"\n\n{error_message}\n", color="red")
else:
if self.task:
self.task.increment_tools_errors()
@@ -859,7 +854,7 @@ class ToolUsage:
if self.task:
self.task.increment_tools_errors()
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{e}\n", color="red")
PRINTER.print(content=f"\n\n{e}\n", color="red")
return ToolUsageError(
f"{self._i18n.errors('tool_usage_error').format(error=e)}\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
)
@@ -903,16 +898,14 @@ class ToolUsage:
try:
repaired_input = str(repair_json(tool_input, skip_json_loads=True))
if self.agent and self.agent.verbose:
self._printer.print(
content=f"Repaired JSON: {repaired_input}", color="blue"
)
PRINTER.print(content=f"Repaired JSON: {repaired_input}", color="blue")
arguments = json.loads(repaired_input)
if isinstance(arguments, dict):
return arguments
except Exception as e:
error = f"Failed to repair JSON: {e}"
if self.agent and self.agent.verbose:
self._printer.print(content=error, color="red")
PRINTER.print(content=error, color="red")
error_message = (
"Tool input must be a valid dictionary in JSON or Python literal format"

View File

@@ -2,11 +2,12 @@
from __future__ import annotations
from collections.abc import AsyncIterator, Iterator
from collections.abc import AsyncIterator, Callable, Iterator
from enum import Enum
from typing import TYPE_CHECKING, Any, Generic, TypeVar
from pydantic import BaseModel, Field
from typing_extensions import Self
if TYPE_CHECKING:
@@ -78,12 +79,21 @@ class StreamingOutputBase(Generic[T]):
via the .result property after streaming completes.
"""
def __init__(self) -> None:
def __init__(
self,
sync_iterator: Iterator[StreamChunk] | None = None,
async_iterator: AsyncIterator[StreamChunk] | None = None,
) -> None:
"""Initialize streaming output base."""
self._result: T | None = None
self._completed: bool = False
self._chunks: list[StreamChunk] = []
self._error: Exception | None = None
self._cancelled: bool = False
self._exhausted: bool = False
self._on_cleanup: Callable[[], None] | None = None
self._sync_iterator = sync_iterator
self._async_iterator = async_iterator
@property
def result(self) -> T:
@@ -112,6 +122,11 @@ class StreamingOutputBase(Generic[T]):
"""Check if streaming has completed."""
return self._completed
@property
def is_cancelled(self) -> bool:
"""Check if streaming was cancelled."""
return self._cancelled
@property
def chunks(self) -> list[StreamChunk]:
"""Get all collected chunks so far."""
@@ -129,6 +144,98 @@ class StreamingOutputBase(Generic[T]):
if chunk.chunk_type == StreamChunkType.TEXT
)
async def __aenter__(self) -> Self:
"""Enter async context manager."""
return self
async def __aexit__(self, *exc_info: Any) -> None:
"""Exit async context manager, cancelling if still running."""
await self.aclose()
async def aclose(self) -> None:
"""Cancel streaming and clean up resources.
Cancels any in-flight tasks and closes the underlying async iterator.
Safe to call multiple times. No-op if already cancelled or fully consumed.
"""
if self._cancelled or self._exhausted or self._error is not None:
return
self._cancelled = True
self._completed = True
if self._async_iterator is not None and hasattr(self._async_iterator, "aclose"):
await self._async_iterator.aclose()
if self._on_cleanup is not None:
self._on_cleanup()
self._on_cleanup = None
def close(self) -> None:
"""Cancel streaming and clean up resources (sync).
Closes the underlying sync iterator. Safe to call multiple times.
No-op if already cancelled, fully consumed, or errored.
"""
if self._cancelled or self._exhausted or self._error is not None:
return
self._cancelled = True
self._completed = True
if self._sync_iterator is not None and hasattr(self._sync_iterator, "close"):
self._sync_iterator.close()
if self._on_cleanup is not None:
self._on_cleanup()
self._on_cleanup = None
def __iter__(self) -> Iterator[StreamChunk]:
"""Iterate over stream chunks synchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If sync iterator not available.
"""
if self._sync_iterator is None:
raise RuntimeError("Sync iterator not available")
try:
for chunk in self._sync_iterator:
self._chunks.append(chunk)
yield chunk
self._exhausted = True
except Exception as e:
self._error = e
raise
finally:
self._completed = True
def __aiter__(self) -> AsyncIterator[StreamChunk]:
"""Return async iterator for stream chunks.
Returns:
Async iterator for StreamChunk objects.
"""
return self._async_iterate()
async def _async_iterate(self) -> AsyncIterator[StreamChunk]:
"""Iterate over stream chunks asynchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If async iterator not available.
"""
if self._async_iterator is None:
raise RuntimeError("Async iterator not available")
try:
async for chunk in self._async_iterator:
self._chunks.append(chunk)
yield chunk
self._exhausted = True
except Exception as e:
self._error = e
raise
finally:
self._completed = True
class CrewStreamingOutput(StreamingOutputBase["CrewOutput"]):
"""Streaming output wrapper for crew execution.
@@ -167,9 +274,7 @@ class CrewStreamingOutput(StreamingOutputBase["CrewOutput"]):
sync_iterator: Synchronous iterator for chunks.
async_iterator: Asynchronous iterator for chunks.
"""
super().__init__()
self._sync_iterator = sync_iterator
self._async_iterator = async_iterator
super().__init__(sync_iterator=sync_iterator, async_iterator=async_iterator)
self._results: list[CrewOutput] | None = None
@property
@@ -204,56 +309,6 @@ class CrewStreamingOutput(StreamingOutputBase["CrewOutput"]):
self._results = results
self._completed = True
def __iter__(self) -> Iterator[StreamChunk]:
"""Iterate over stream chunks synchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If sync iterator not available.
"""
if self._sync_iterator is None:
raise RuntimeError("Sync iterator not available")
try:
for chunk in self._sync_iterator:
self._chunks.append(chunk)
yield chunk
except Exception as e:
self._error = e
raise
finally:
self._completed = True
def __aiter__(self) -> AsyncIterator[StreamChunk]:
"""Return async iterator for stream chunks.
Returns:
Async iterator for StreamChunk objects.
"""
return self._async_iterate()
async def _async_iterate(self) -> AsyncIterator[StreamChunk]:
"""Iterate over stream chunks asynchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If async iterator not available.
"""
if self._async_iterator is None:
raise RuntimeError("Async iterator not available")
try:
async for chunk in self._async_iterator:
self._chunks.append(chunk)
yield chunk
except Exception as e:
self._error = e
raise
finally:
self._completed = True
def _set_result(self, result: CrewOutput) -> None:
"""Set the final result after streaming completes.
@@ -286,71 +341,6 @@ class FlowStreamingOutput(StreamingOutputBase[Any]):
```
"""
def __init__(
self,
sync_iterator: Iterator[StreamChunk] | None = None,
async_iterator: AsyncIterator[StreamChunk] | None = None,
) -> None:
"""Initialize flow streaming output.
Args:
sync_iterator: Synchronous iterator for chunks.
async_iterator: Asynchronous iterator for chunks.
"""
super().__init__()
self._sync_iterator = sync_iterator
self._async_iterator = async_iterator
def __iter__(self) -> Iterator[StreamChunk]:
"""Iterate over stream chunks synchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If sync iterator not available.
"""
if self._sync_iterator is None:
raise RuntimeError("Sync iterator not available")
try:
for chunk in self._sync_iterator:
self._chunks.append(chunk)
yield chunk
except Exception as e:
self._error = e
raise
finally:
self._completed = True
def __aiter__(self) -> AsyncIterator[StreamChunk]:
"""Return async iterator for stream chunks.
Returns:
Async iterator for StreamChunk objects.
"""
return self._async_iterate()
async def _async_iterate(self) -> AsyncIterator[StreamChunk]:
"""Iterate over stream chunks asynchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If async iterator not available.
"""
if self._async_iterator is None:
raise RuntimeError("Async iterator not available")
try:
async for chunk in self._async_iterator:
self._chunks.append(chunk)
yield chunk
except Exception as e:
self._error = e
raise
finally:
self._completed = True
def _set_result(self, result: Any) -> None:
"""Set the final result after streaming completes.

View File

@@ -32,7 +32,7 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededError,
)
from crewai.utilities.i18n import I18N
from crewai.utilities.printer import ColoredText, Printer
from crewai.utilities.printer import PRINTER, ColoredText, Printer
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.string_utils import sanitize_tool_name
from crewai.utilities.token_counter_callback import TokenCalcHandler
@@ -946,7 +946,7 @@ def summarize_messages(
summarized_contents: list[SummaryContent] = []
for idx, chunk in enumerate(chunks, 1):
if verbose:
Printer().print(
PRINTER.print(
content=f"Summarizing {idx}/{total_chunks}...",
color="yellow",
)
@@ -967,7 +967,7 @@ def summarize_messages(
else:
# Multiple chunks — summarize in parallel via asyncio
if verbose:
Printer().print(
PRINTER.print(
content=f"Summarizing {total_chunks} chunks in parallel...",
color="yellow",
)

View File

@@ -10,7 +10,7 @@ from typing_extensions import Unpack
from crewai.agents.agent_builder.utilities.base_output_converter import OutputConverter
from crewai.utilities.i18n import get_i18n
from crewai.utilities.internal_instructor import InternalInstructor
from crewai.utilities.printer import Printer
from crewai.utilities.printer import PRINTER
from crewai.utilities.pydantic_schema_utils import generate_model_description
@@ -209,7 +209,7 @@ def convert_to_model(
except Exception as e:
if agent and getattr(agent, "verbose", True):
Printer().print(
PRINTER.print(
content=f"Unexpected error during model conversion: {type(e).__name__}: {e}. Returning original result.",
color="red",
)
@@ -267,7 +267,7 @@ def handle_partial_json(
raise
except Exception as e:
if agent and getattr(agent, "verbose", True):
Printer().print(
PRINTER.print(
content=f"Unexpected error during partial JSON handling: {type(e).__name__}: {e}. Attempting alternative conversion method.",
color="red",
)
@@ -329,7 +329,7 @@ def convert_with_instructions(
if isinstance(exported_result, ConverterError):
if agent and getattr(agent, "verbose", True):
Printer().print(
PRINTER.print(
content=f"Failed to convert result to model: {exported_result}",
color="red",
)

View File

@@ -1,8 +1,8 @@
from datetime import datetime
from pydantic import BaseModel, Field, PrivateAttr
from pydantic import BaseModel, Field
from crewai.utilities.printer import ColoredText, Printer, PrinterColor
from crewai.utilities.printer import PRINTER, ColoredText, PrinterColor
class Logger(BaseModel):
@@ -14,7 +14,6 @@ class Logger(BaseModel):
default="bold_yellow",
description="Default color for log messages",
)
_printer: Printer = PrivateAttr(default_factory=Printer)
def log(self, level: str, message: str, color: PrinterColor | None = None) -> None:
"""Log a message with timestamp if verbose mode is enabled.
@@ -26,7 +25,7 @@ class Logger(BaseModel):
"""
if self.verbose:
timestamp: str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self._printer.print(
PRINTER.print(
[
ColoredText(f"\n[{timestamp}]", "cyan"),
ColoredText(f"[{level.upper()}]: ", "yellow"),

View File

@@ -93,3 +93,6 @@ class Printer:
file=file,
flush=flush,
)
PRINTER: Printer = Printer()

View File

@@ -3,6 +3,7 @@
import asyncio
from collections.abc import AsyncIterator, Callable, Iterator
import contextvars
import logging
import queue
import threading
from typing import Any, NamedTuple
@@ -22,6 +23,9 @@ from crewai.types.streaming import (
from crewai.utilities.string_utils import sanitize_tool_name
logger = logging.getLogger(__name__)
class TaskInfo(TypedDict):
"""Task context information for streaming."""
@@ -159,10 +163,23 @@ def _finalize_streaming(
streaming_output: The streaming output to set the result on.
"""
_unregister_handler(state.handler)
streaming_output._on_cleanup = None
if state.result_holder:
streaming_output._set_result(state.result_holder[0])
def register_cleanup(
streaming_output: CrewStreamingOutput | FlowStreamingOutput,
state: StreamingState,
) -> None:
"""Register a cleanup callback on the streaming output.
Ensures the event handler is unregistered even if aclose()/close()
is called before iteration starts.
"""
streaming_output._on_cleanup = lambda: _unregister_handler(state.handler)
def create_streaming_state(
current_task_info: TaskInfo,
result_holder: list[Any],
@@ -294,7 +311,14 @@ async def create_async_chunk_generator(
raise item
yield item
finally:
await task
if not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
except Exception:
logger.debug("Background streaming task failed", exc_info=True)
if output_holder:
_finalize_streaming(state, output_holder[0])
else:

View File

@@ -96,7 +96,7 @@ async def aexecute_tool_and_check_finality(
if tool:
tool_input = tool_calling.arguments if tool_calling.arguments else {}
hook_context = ToolCallHookContext(
tool_name=tool_calling.tool_name,
tool_name=sanitized_tool_name,
tool_input=tool_input,
tool=tool,
agent=agent,
@@ -120,7 +120,7 @@ async def aexecute_tool_and_check_finality(
tool_result = await tool_usage.ause(tool_calling, agent_action.text)
after_hook_context = ToolCallHookContext(
tool_name=tool_calling.tool_name,
tool_name=sanitized_tool_name,
tool_input=tool_input,
tool=tool,
agent=agent,
@@ -216,7 +216,7 @@ def execute_tool_and_check_finality(
if tool:
tool_input = tool_calling.arguments if tool_calling.arguments else {}
hook_context = ToolCallHookContext(
tool_name=tool_calling.tool_name,
tool_name=sanitized_tool_name,
tool_input=tool_input,
tool=tool,
agent=agent,
@@ -240,7 +240,7 @@ def execute_tool_and_check_finality(
tool_result = tool_usage.use(tool_calling, agent_action.text)
after_hook_context = ToolCallHookContext(
tool_name=tool_calling.tool_name,
tool_name=sanitized_tool_name,
tool_input=tool_input,
tool=tool,
agent=agent,

View File

@@ -48,8 +48,6 @@ def _build_executor(**kwargs: Any) -> AgentExecutor:
executor._last_context_error = None
executor._step_executor = None
executor._planner_observer = None
from crewai.utilities.printer import Printer
executor._printer = Printer()
from crewai.utilities.i18n import get_i18n
executor._i18n = kwargs.get("i18n") or get_i18n()
return executor
@@ -1491,7 +1489,6 @@ class TestReasoningEffort:
executor.handle_step_observed_medium = (
AgentExecutor.handle_step_observed_medium.__get__(executor)
)
executor._printer = Mock()
# --- Case 1: step succeeded → should return "continue_plan" ---
success_todo = TodoItem(
@@ -1562,7 +1559,6 @@ class TestReasoningEffort:
executor.handle_step_observed_low = (
AgentExecutor.handle_step_observed_low.__get__(executor)
)
executor._printer = Mock()
todo = TodoItem(
step_number=1,

View File

@@ -1060,27 +1060,13 @@ def test_lite_agent_verbose_false_suppresses_printer_output():
verbose=False,
)
result = agent.kickoff("Say hello")
mock_printer = Mock()
with patch("crewai.lite_agent.PRINTER", mock_printer):
result = agent.kickoff("Say hello")
assert result is not None
assert isinstance(result, LiteAgentOutput)
# Verify the printer was never called
agent._printer.print = Mock()
# For a clean verification, patch printer before execution
with pytest.warns(DeprecationWarning):
agent2 = LiteAgent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm=mock_llm,
verbose=False,
)
mock_printer = Mock()
agent2._printer = mock_printer
agent2.kickoff("Say hello")
# Verify the printer was never called when verbose=False
mock_printer.print.assert_not_called()

View File

@@ -192,6 +192,38 @@ class TestToolHookDecorators:
# Should still be 1 (hook didn't execute for read_file)
assert len(execution_log) == 1
def test_before_tool_call_tool_filter_sanitizes_names(self):
"""Tool filter should auto-sanitize names so users can pass BaseTool.name directly."""
execution_log = []
# User passes the human-readable tool name (e.g. BaseTool.name)
@before_tool_call(tools=["Delete File", "Execute Code"])
def filtered_hook(context):
execution_log.append(context.tool_name)
return None
hooks = get_before_tool_call_hooks()
assert len(hooks) == 1
mock_tool = Mock()
# Context uses the sanitized name (as set by the executor)
context = ToolCallHookContext(
tool_name="delete_file",
tool_input={},
tool=mock_tool,
)
hooks[0](context)
assert execution_log == ["delete_file"]
# Non-matching tool still filtered out
context2 = ToolCallHookContext(
tool_name="read_file",
tool_input={},
tool=mock_tool,
)
hooks[0](context2)
assert execution_log == ["delete_file"]
def test_before_tool_call_with_combined_filters(self):
"""Test that combined tool and agent filters work."""
execution_log = []

View File

@@ -709,6 +709,158 @@ class TestStreamingEdgeCases:
assert streaming.is_completed
class TestStreamingCancellation:
"""Tests for streaming cancellation and resource cleanup."""
@pytest.mark.asyncio
async def test_aclose_cancels_async_streaming(self) -> None:
"""Test that aclose() stops iteration and marks as cancelled."""
chunks_yielded: list[str] = []
async def slow_gen() -> AsyncIterator[StreamChunk]:
for i in range(100):
await asyncio.sleep(0.01)
chunks_yielded.append(f"chunk-{i}")
yield StreamChunk(content=f"chunk-{i}")
streaming = CrewStreamingOutput(async_iterator=slow_gen())
collected: list[StreamChunk] = []
async for chunk in streaming:
collected.append(chunk)
if len(collected) >= 3:
break
await streaming.aclose()
assert streaming.is_cancelled
assert streaming.is_completed
assert len(collected) == 3
@pytest.mark.asyncio
async def test_aclose_idempotent(self) -> None:
"""Test that calling aclose() multiple times is safe."""
async def gen() -> AsyncIterator[StreamChunk]:
yield StreamChunk(content="test")
streaming = CrewStreamingOutput(async_iterator=gen())
async for _ in streaming:
pass
await streaming.aclose()
await streaming.aclose()
assert not streaming.is_cancelled
assert streaming.is_completed
@pytest.mark.asyncio
async def test_async_context_manager(self) -> None:
"""Test using streaming output as async context manager."""
async def gen() -> AsyncIterator[StreamChunk]:
yield StreamChunk(content="hello")
yield StreamChunk(content="world")
streaming = CrewStreamingOutput(async_iterator=gen())
collected: list[StreamChunk] = []
async with streaming:
async for chunk in streaming:
collected.append(chunk)
assert not streaming.is_cancelled
assert streaming.is_completed
assert len(collected) == 2
@pytest.mark.asyncio
async def test_async_context_manager_early_exit(self) -> None:
"""Test context manager cleans up on early exit."""
async def gen() -> AsyncIterator[StreamChunk]:
for i in range(100):
await asyncio.sleep(0.01)
yield StreamChunk(content=f"chunk-{i}")
streaming = CrewStreamingOutput(async_iterator=gen())
async with streaming:
async for chunk in streaming:
if chunk.content == "chunk-2":
break
assert streaming.is_cancelled
assert streaming.is_completed
def test_close_cancels_sync_streaming(self) -> None:
"""Test that close() stops sync streaming and marks as cancelled."""
def gen() -> Generator[StreamChunk, None, None]:
for i in range(100):
yield StreamChunk(content=f"chunk-{i}")
streaming = CrewStreamingOutput(sync_iterator=gen())
collected: list[StreamChunk] = []
for chunk in streaming:
collected.append(chunk)
if len(collected) >= 3:
break
streaming.close()
assert streaming.is_cancelled
assert streaming.is_completed
def test_close_idempotent(self) -> None:
"""Test that calling close() multiple times is safe."""
def gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="test")
streaming = CrewStreamingOutput(sync_iterator=gen())
list(streaming)
streaming.close()
streaming.close()
assert not streaming.is_cancelled
assert streaming.is_completed
@pytest.mark.asyncio
async def test_flow_aclose(self) -> None:
"""Test that FlowStreamingOutput aclose() is no-op after normal completion."""
async def gen() -> AsyncIterator[StreamChunk]:
yield StreamChunk(content="flow-chunk")
streaming = FlowStreamingOutput(async_iterator=gen())
async for _ in streaming:
pass
await streaming.aclose()
assert not streaming.is_cancelled
assert streaming.is_completed
@pytest.mark.asyncio
async def test_flow_async_context_manager(self) -> None:
"""Test FlowStreamingOutput as async context manager with full consumption."""
async def gen() -> AsyncIterator[StreamChunk]:
yield StreamChunk(content="flow-chunk")
streaming = FlowStreamingOutput(async_iterator=gen())
async with streaming:
async for _ in streaming:
pass
assert not streaming.is_cancelled
assert streaming.is_completed
def test_flow_close(self) -> None:
"""Test that FlowStreamingOutput close() is no-op after normal completion."""
def gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="flow-chunk")
streaming = FlowStreamingOutput(sync_iterator=gen())
list(streaming)
streaming.close()
assert not streaming.is_cancelled
class TestStreamingImports:
"""Tests for correct imports of streaming types."""

View File

@@ -529,9 +529,6 @@ def test_tool_validate_input_error_event():
mock_task = MagicMock()
mock_tools_handler = MagicMock()
# Mock printer
mock_printer = MagicMock()
# Create test tool
class TestTool(BaseTool):
name: str = "Test Tool"
@@ -551,8 +548,6 @@ def test_tool_validate_input_error_event():
agent=mock_agent,
action=MagicMock(tool="test_tool"),
)
tool_usage._printer = mock_printer
# Mock all parsing attempts to fail
with (
patch("json.loads", side_effect=json.JSONDecodeError("Test Error", "", 0)),

View File

@@ -207,10 +207,10 @@ def test_convert_with_instructions_failure(
mock_create_converter.return_value = mock_converter
result = "Some text to convert"
with patch("crewai.utilities.converter.Printer") as mock_printer:
with patch("crewai.utilities.converter.PRINTER") as mock_printer:
output = convert_with_instructions(result, SimpleModel, False, mock_agent)
assert output == result
mock_printer.return_value.print.assert_called_once()
mock_printer.print.assert_called_once()
# Tests for get_conversion_instructions

View File

@@ -11,7 +11,7 @@ classifiers = ["Private :: Do Not Upload"]
private = true
dependencies = [
"click~=8.1.7",
"toml~=0.10.2",
"tomlkit~=0.13.2",
"openai>=1.83.0,<3",
"python-dotenv~=1.1.1",
"pygithub~=1.59.1",
@@ -25,6 +25,10 @@ release = "crewai_devtools.cli:release"
docs-check = "crewai_devtools.docs_check:docs_check"
devtools = "crewai_devtools.cli:main"
[tool.pytest.ini_options]
testpaths = ["tests"]
addopts = "--noconftest"
[tool.uv]
exclude-newer = "3 days"

View File

@@ -1,3 +1,3 @@
"""CrewAI development tools."""
__version__ = "1.14.0"
__version__ = "1.14.1"

View File

@@ -1,8 +1,8 @@
"""Development tools for version bumping and git automation."""
from collections.abc import Mapping
import os
from pathlib import Path
import re
import subprocess
import sys
import tempfile
@@ -18,6 +18,7 @@ from rich.console import Console
from rich.markdown import Markdown
from rich.panel import Panel
from rich.prompt import Confirm
import tomlkit
from crewai_devtools.docs_check import docs_check
from crewai_devtools.prompts import RELEASE_NOTES_PROMPT, TRANSLATE_RELEASE_NOTES_PROMPT
@@ -169,18 +170,17 @@ def update_pyproject_version(file_path: Path, new_version: str) -> bool:
if not file_path.exists():
return False
content = file_path.read_text()
new_content = re.sub(
r'^(version\s*=\s*")[^"]+(")',
rf"\g<1>{new_version}\2",
content,
count=1,
flags=re.MULTILINE,
)
if new_content != content:
file_path.write_text(new_content)
return True
return False
doc = tomlkit.parse(file_path.read_text())
project = doc.get("project")
if project is None:
return False
old_version = project.get("version")
if old_version is None or old_version == new_version:
return False
project["version"] = new_version
file_path.write_text(tomlkit.dumps(doc))
return True
_DEFAULT_WORKSPACE_PACKAGES: Final[list[str]] = [
@@ -473,6 +473,14 @@ def update_changelog(
return True
def _is_crewai_dep(spec: str) -> bool:
"""Return True if *spec* is a ``crewai`` or ``crewai[...]`` dependency."""
if not spec.startswith("crewai"):
return False
rest = spec[6:] # after "crewai"
return len(rest) > 0 and rest[0] in ("[", "=", ">", "<", "~", "!")
def _pin_crewai_deps(content: str, version: str) -> str:
"""Replace crewai dependency version pins in a pyproject.toml string.
@@ -486,16 +494,30 @@ def _pin_crewai_deps(content: str, version: str) -> str:
Returns:
Transformed content.
"""
return re.sub(
r'"crewai(\[tools\])?(==|>=)[^"]*"',
lambda m: f'"crewai{(m.group(1) or "")!s}=={version}"',
content,
)
doc = tomlkit.parse(content)
for key in ("dependencies", "optional-dependencies"):
deps = doc.get("project", {}).get(key)
if deps is None:
continue
# optional-dependencies is a table of lists; dependencies is a list
dep_lists = deps.values() if isinstance(deps, Mapping) else [deps]
for dep_list in dep_lists:
for i, dep in enumerate(dep_list):
s = str(dep)
if not _is_crewai_dep(s) or ("==" not in s and ">=" not in s):
continue
extras = s[6 : s.index("]") + 1] if "[" in s[6:7] else ""
dep_list[i] = f"crewai{extras}=={version}"
return tomlkit.dumps(doc)
def update_template_dependencies(templates_dir: Path, new_version: str) -> list[Path]:
"""Update crewai dependency versions in CLI template pyproject.toml files.
Uses simple string replacement instead of TOML parsing because
template files contain Jinja placeholders (``{{folder_name}}``)
that are not valid TOML.
Args:
templates_dir: Path to the CLI templates directory.
new_version: New version string.
@@ -503,10 +525,13 @@ def update_template_dependencies(templates_dir: Path, new_version: str) -> list[
Returns:
List of paths that were updated.
"""
import re
pattern = re.compile(r"(crewai(?:\[[\w,]+\])?)(?:==|>=)[^\s\"']+")
updated = []
for pyproject in templates_dir.rglob("pyproject.toml"):
content = pyproject.read_text()
new_content = _pin_crewai_deps(content, new_version)
new_content = pattern.sub(rf"\1=={new_version}", content)
if new_content != content:
pyproject.write_text(new_content)
updated.append(pyproject)
@@ -1049,6 +1074,11 @@ _ENTERPRISE_EXTRA_PACKAGES: Final[tuple[str, ...]] = tuple(
for p in os.getenv("ENTERPRISE_EXTRA_PACKAGES", "").split(",")
if p.strip()
)
_ENTERPRISE_WORKFLOW_PATHS: Final[tuple[str, ...]] = tuple(
p.strip()
for p in os.getenv("ENTERPRISE_WORKFLOW_PATHS", "").split(",")
if p.strip()
)
def _update_enterprise_crewai_dep(pyproject_path: Path, version: str) -> bool:
@@ -1072,6 +1102,86 @@ def _update_enterprise_crewai_dep(pyproject_path: Path, version: str) -> bool:
return False
def _update_enterprise_workflows(repo_dir: Path, version: str) -> list[Path]:
"""Update crewai version pins in enterprise CI workflow files.
Applies ``_repin_crewai_install`` line-by-line on the raw file so
only version numbers change and all formatting is preserved.
Args:
repo_dir: Root of the cloned enterprise repo.
version: New crewai version string.
Returns:
List of workflow paths that were modified.
"""
updated: list[Path] = []
for rel_path in _ENTERPRISE_WORKFLOW_PATHS:
workflow = repo_dir / rel_path
if not workflow.exists():
continue
raw = workflow.read_text()
lines = raw.splitlines(keepends=True)
changed = False
for i, line in enumerate(lines):
if "crewai[" not in line:
continue
new_line = _repin_crewai_install(line, version)
if new_line != line:
lines[i] = new_line
changed = True
if changed:
new_raw = "".join(lines)
else:
new_raw = raw
if new_raw != raw:
workflow.write_text(new_raw)
updated.append(workflow)
return updated
def _repin_crewai_install(run_value: str, version: str) -> str:
"""Rewrite ``crewai[extras]==old`` pins in a shell command string.
Splits on the known ``crewai[`` prefix and reconstructs the pin
with the new version, avoiding regex.
Args:
run_value: The ``run:`` string from a workflow step.
version: New version to pin to.
Returns:
The updated string.
"""
result: list[str] = []
remainder = run_value
marker = "crewai["
while marker in remainder:
before, _, after = remainder.partition(marker)
result.append(before)
# after looks like: a2a]==1.14.0" ...
bracket_end = after.index("]")
extras = after[:bracket_end]
rest = after[bracket_end + 1 :]
if rest.startswith("=="):
# Find end of version — next quote or whitespace
ver_start = 2 # len("==")
ver_end = ver_start
while ver_end < len(rest) and rest[ver_end] not in ('"', "'", " ", "\n"):
ver_end += 1
result.append(f"crewai[{extras}]=={version}")
remainder = rest[ver_end:]
else:
result.append(f"crewai[{extras}]")
remainder = rest
result.append(remainder)
return "".join(result)
_DEPLOYMENT_TEST_REPO: Final[str] = "crewAIInc/crew_deployment_test"
_PYPI_POLL_INTERVAL: Final[int] = 15
@@ -1099,11 +1209,7 @@ def _update_deployment_test_repo(version: str, is_prerelease: bool) -> None:
pyproject = repo_dir / "pyproject.toml"
content = pyproject.read_text()
new_content = re.sub(
r'"crewai\[tools\]==[^"]+"',
f'"crewai[tools]=={version}"',
content,
)
new_content = _pin_crewai_deps(content, version)
if new_content == content:
console.print(
"[yellow]Warning:[/yellow] No crewai[tools] pin found to update"
@@ -1262,6 +1368,12 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
f"[green]✓[/green] Updated crewai[tools] dep in {enterprise_dep_path}"
)
# --- update crewai pins in CI workflows ---
for wf in _update_enterprise_workflows(repo_dir, version):
console.print(
f"[green]✓[/green] Updated crewai pin in {wf.relative_to(repo_dir)}"
)
_wait_for_pypi("crewai", version)
console.print("\nSyncing workspace...")

View File

View File

@@ -0,0 +1,274 @@
"""Tests for TOML-based version and dependency update functions."""
from pathlib import Path
from textwrap import dedent
from crewai_devtools.cli import (
_pin_crewai_deps,
_repin_crewai_install,
update_pyproject_version,
update_template_dependencies,
)
# --- update_pyproject_version ---
class TestUpdatePyprojectVersion:
def test_updates_version(self, tmp_path: Path) -> None:
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(
dedent("""\
[project]
name = "my-pkg"
version = "1.0.0"
""")
)
assert update_pyproject_version(pyproject, "2.0.0") is True
assert 'version = "2.0.0"' in pyproject.read_text()
def test_returns_false_when_already_current(self, tmp_path: Path) -> None:
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(
dedent("""\
[project]
name = "my-pkg"
version = "1.0.0"
""")
)
assert update_pyproject_version(pyproject, "1.0.0") is False
def test_returns_false_when_no_project_section(self, tmp_path: Path) -> None:
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text("[tool.ruff]\nline-length = 88\n")
assert update_pyproject_version(pyproject, "1.0.0") is False
def test_returns_false_when_version_is_dynamic(self, tmp_path: Path) -> None:
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(
dedent("""\
[project]
name = "my-pkg"
dynamic = ["version"]
""")
)
assert update_pyproject_version(pyproject, "1.0.0") is False
assert 'version = "1.0.0"' not in pyproject.read_text()
def test_returns_false_for_missing_file(self, tmp_path: Path) -> None:
assert update_pyproject_version(tmp_path / "nope.toml", "1.0.0") is False
def test_preserves_comments_and_formatting(self, tmp_path: Path) -> None:
content = dedent("""\
# This is important
[project]
name = "my-pkg"
version = "1.0.0" # current version
description = "A package"
""")
pyproject = tmp_path / "pyproject.toml"
pyproject.write_text(content)
update_pyproject_version(pyproject, "2.0.0")
result = pyproject.read_text()
assert "# This is important" in result
assert 'description = "A package"' in result
# --- _pin_crewai_deps ---
class TestPinCrewaiDeps:
def test_pins_exact_version(self) -> None:
content = dedent("""\
[project]
dependencies = [
"crewai==1.0.0",
]
""")
result = _pin_crewai_deps(content, "2.0.0")
assert '"crewai==2.0.0"' in result
def test_pins_minimum_version(self) -> None:
content = dedent("""\
[project]
dependencies = [
"crewai>=1.0.0",
]
""")
result = _pin_crewai_deps(content, "2.0.0")
assert '"crewai==2.0.0"' in result
assert ">=" not in result
def test_pins_with_tools_extra(self) -> None:
content = dedent("""\
[project]
dependencies = [
"crewai[tools]==1.0.0",
]
""")
result = _pin_crewai_deps(content, "2.0.0")
assert '"crewai[tools]==2.0.0"' in result
def test_leaves_unrelated_deps_alone(self) -> None:
content = dedent("""\
[project]
dependencies = [
"requests>=2.0",
"crewai==1.0.0",
"click~=8.1",
]
""")
result = _pin_crewai_deps(content, "2.0.0")
assert '"requests>=2.0"' in result
assert '"click~=8.1"' in result
def test_handles_optional_dependencies(self) -> None:
content = dedent("""\
[project]
dependencies = []
[project.optional-dependencies]
tools = [
"crewai[tools]>=1.0.0",
]
""")
result = _pin_crewai_deps(content, "3.0.0")
assert '"crewai[tools]==3.0.0"' in result
def test_handles_multiple_crewai_entries(self) -> None:
content = dedent("""\
[project]
dependencies = [
"crewai==1.0.0",
"crewai[tools]==1.0.0",
]
""")
result = _pin_crewai_deps(content, "2.0.0")
assert '"crewai==2.0.0"' in result
assert '"crewai[tools]==2.0.0"' in result
def test_preserves_arbitrary_extras(self) -> None:
content = dedent("""\
[project]
dependencies = [
"crewai[a2a]==1.0.0",
]
""")
result = _pin_crewai_deps(content, "2.0.0")
assert '"crewai[a2a]==2.0.0"' in result
def test_no_deps_returns_unchanged(self) -> None:
content = dedent("""\
[project]
name = "empty"
""")
result = _pin_crewai_deps(content, "2.0.0")
assert "empty" in result
def test_skips_crewai_without_version_specifier(self) -> None:
content = dedent("""\
[project]
dependencies = [
"crewai-tools~=1.0",
]
""")
result = _pin_crewai_deps(content, "2.0.0")
assert '"crewai-tools~=1.0"' in result
def test_skips_crewai_extras_without_pin(self) -> None:
content = dedent("""\
[project]
dependencies = [
"crewai[tools]",
]
""")
result = _pin_crewai_deps(content, "2.0.0")
assert '"crewai[tools]"' in result
assert "==" not in result
# --- _repin_crewai_install ---
class TestRepinCrewaiInstall:
def test_repins_a2a_extra(self) -> None:
result = _repin_crewai_install('uv pip install "crewai[a2a]==1.14.0"', "2.0.0")
assert result == 'uv pip install "crewai[a2a]==2.0.0"'
def test_repins_tools_extra(self) -> None:
result = _repin_crewai_install('uv pip install "crewai[tools]==1.0.0"', "3.0.0")
assert result == 'uv pip install "crewai[tools]==3.0.0"'
def test_leaves_unrelated_commands_alone(self) -> None:
cmd = "uv pip install requests"
assert _repin_crewai_install(cmd, "2.0.0") == cmd
def test_handles_multiple_pins(self) -> None:
cmd = 'pip install "crewai[a2a]==1.0.0" "crewai[tools]==1.0.0"'
result = _repin_crewai_install(cmd, "2.0.0")
assert result == 'pip install "crewai[a2a]==2.0.0" "crewai[tools]==2.0.0"'
def test_preserves_surrounding_text(self) -> None:
cmd = 'echo hello && uv pip install "crewai[a2a]==1.14.0" && echo done'
result = _repin_crewai_install(cmd, "2.0.0")
assert (
result == 'echo hello && uv pip install "crewai[a2a]==2.0.0" && echo done'
)
def test_no_version_specifier_unchanged(self) -> None:
cmd = 'pip install "crewai[tools]>=1.0"'
assert _repin_crewai_install(cmd, "2.0.0") == cmd
# --- update_template_dependencies ---
class TestUpdateTemplateDependencies:
def test_updates_jinja_template(self, tmp_path: Path) -> None:
"""Template pyproject.toml files with Jinja placeholders should not break."""
tpl = tmp_path / "crew" / "pyproject.toml"
tpl.parent.mkdir()
tpl.write_text(
dedent("""\
[project]
name = "{{folder_name}}"
version = "0.1.0"
dependencies = [
"crewai[tools]==1.14.0"
]
[project.scripts]
{{folder_name}} = "{{folder_name}}.main:run"
""")
)
updated = update_template_dependencies(tmp_path, "2.0.0")
assert len(updated) == 1
content = tpl.read_text()
assert '"crewai[tools]==2.0.0"' in content
assert "{{folder_name}}" in content
def test_updates_bare_crewai(self, tmp_path: Path) -> None:
tpl = tmp_path / "pyproject.toml"
tpl.write_text('dependencies = [\n "crewai==1.0.0"\n]\n')
updated = update_template_dependencies(tmp_path, "3.0.0")
assert len(updated) == 1
assert '"crewai==3.0.0"' in tpl.read_text()
def test_skips_unrelated_deps(self, tmp_path: Path) -> None:
tpl = tmp_path / "pyproject.toml"
tpl.write_text('dependencies = [\n "requests>=2.0"\n]\n')
updated = update_template_dependencies(tmp_path, "2.0.0")
assert len(updated) == 0
assert '"requests>=2.0"' in tpl.read_text()

View File

@@ -107,6 +107,7 @@ ignore-decorators = ["typing.overload"]
"lib/crewai/tests/**/*.py" = ["S101", "RET504", "S105", "S106"] # Allow assert statements, unnecessary assignments, and hardcoded passwords in tests
"lib/crewai-tools/tests/**/*.py" = ["S101", "RET504", "S105", "S106", "RUF012", "N818", "E402", "RUF043", "S110", "B017"] # Allow various test-specific patterns
"lib/crewai-files/tests/**/*.py" = ["S101", "RET504", "S105", "S106", "B017", "F841"] # Allow assert statements and blind exception assertions in tests
"lib/devtools/tests/**/*.py" = ["S101"]
[tool.mypy]
@@ -166,12 +167,14 @@ exclude-newer = "3 days"
# onnxruntime 1.24+ dropped Python 3.10 wheels; cap it so qdrant[fastembed] resolves on 3.10.
# fastembed 0.7.x and docling 2.63 cap pillow<12; the removed APIs don't affect them.
# langchain-core <1.2.11 has SSRF via image_url token counting (CVE-2026-26013).
# transformers 4.57.6 has CVE-2026-1839; force 5.4+ (docling 2.84 allows huggingface-hub>=1).
override-dependencies = [
"rich>=13.7.1",
"onnxruntime<1.24; python_version < '3.11'",
"pillow>=12.1.1",
"langchain-core>=1.2.11,<2",
"urllib3>=2.6.3",
"transformers>=5.4.0; python_version >= '3.10'",
]
[tool.uv.workspace]

52
uv.lock generated
View File

@@ -13,7 +13,7 @@ resolution-markers = [
]
[options]
exclude-newer = "2026-04-04T15:11:41.651093Z"
exclude-newer = "2026-04-05T11:09:48.9111Z"
exclude-newer-span = "P3D"
[manifest]
@@ -28,6 +28,7 @@ overrides = [
{ name = "onnxruntime", marker = "python_full_version < '3.11'", specifier = "<1.24" },
{ name = "pillow", specifier = ">=12.1.1" },
{ name = "rich", specifier = ">=13.7.1" },
{ name = "transformers", marker = "python_full_version >= '3.10'", specifier = ">=5.4.0" },
{ name = "urllib3", specifier = ">=2.6.3" },
]
@@ -1307,7 +1308,7 @@ requires-dist = [
{ name = "click", specifier = "~=8.1.7" },
{ name = "crewai-files", marker = "extra == 'file-processing'", editable = "lib/crewai-files" },
{ name = "crewai-tools", marker = "extra == 'tools'", editable = "lib/crewai-tools" },
{ name = "docling", marker = "extra == 'docling'", specifier = "~=2.75.0" },
{ name = "docling", marker = "extra == 'docling'", specifier = "~=2.84.0" },
{ name = "google-genai", marker = "extra == 'google-genai'", specifier = "~=1.65.0" },
{ name = "httpx", specifier = "~=0.28.1" },
{ name = "httpx-auth", marker = "extra == 'a2a'", specifier = "~=0.23.1" },
@@ -1357,7 +1358,7 @@ dependencies = [
{ name = "pygithub" },
{ name = "python-dotenv" },
{ name = "rich" },
{ name = "toml" },
{ name = "tomlkit" },
]
[package.metadata]
@@ -1367,7 +1368,7 @@ requires-dist = [
{ name = "pygithub", specifier = "~=1.59.1" },
{ name = "python-dotenv", specifier = "~=1.1.1" },
{ name = "rich", specifier = ">=13.9.4" },
{ name = "toml", specifier = "~=0.10.2" },
{ name = "tomlkit", specifier = "~=0.13.2" },
]
[[package]]
@@ -1820,7 +1821,7 @@ wheels = [
[[package]]
name = "docling"
version = "2.75.0"
version = "2.84.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "accelerate" },
@@ -1851,12 +1852,14 @@ dependencies = [
{ name = "rtree" },
{ name = "scipy", version = "1.15.3", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
{ name = "scipy", version = "1.17.1", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
{ name = "torch" },
{ name = "torchvision" },
{ name = "tqdm" },
{ name = "typer" },
]
sdist = { url = "https://files.pythonhosted.org/packages/77/0b/8ea363fd3c8bb4facb8d3c37aebfe7ad5265fecc1c6bd40f979d1f6179ba/docling-2.75.0.tar.gz", hash = "sha256:1b0a77766e201e5e2d118e236c006f3814afcea2e13726fb3c7389d666a56622", size = 364929, upload-time = "2026-02-24T20:18:04.896Z" }
sdist = { url = "https://files.pythonhosted.org/packages/6f/1f/85560d7ba90a20f46c65396b45990fad34b7c95da23ca6e547456631d0e6/docling-2.84.0.tar.gz", hash = "sha256:007b0bad3c0ec45dc91af6083cbe1f0a93ddef1686304f466e8a168a1fb1dccb", size = 425470, upload-time = "2026-04-01T18:36:31.377Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b8/85/5c6885547ce5cde33af43201e3b2b04cf2360e6854abc07485f54b8d265d/docling-2.75.0-py3-none-any.whl", hash = "sha256:6e156f0326edb6471fc076e978ac64f902f54aac0da13cf89df456013e377bcc", size = 396243, upload-time = "2026-02-24T20:18:03.57Z" },
{ url = "https://files.pythonhosted.org/packages/22/e1/054e6ddf45e5760d51053b93b1a4f8be1568882b50c5ceeb88e6adaa6918/docling-2.84.0-py3-none-any.whl", hash = "sha256:ee431e5bb20cbebdd957f6173918f133d769340462814f3479df3446743d240e", size = 451391, upload-time = "2026-04-01T18:36:29.379Z" },
]
[[package]]
@@ -2735,21 +2738,22 @@ wheels = [
[[package]]
name = "huggingface-hub"
version = "0.36.2"
version = "1.9.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "filelock" },
{ name = "fsspec" },
{ name = "hf-xet", marker = "platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" },
{ name = "hf-xet", marker = "platform_machine == 'AMD64' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'arm64' or platform_machine == 'x86_64'" },
{ name = "httpx" },
{ name = "packaging" },
{ name = "pyyaml" },
{ name = "requests" },
{ name = "tqdm" },
{ name = "typer" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/7c/b7/8cb61d2eece5fb05a83271da168186721c450eb74e3c31f7ef3169fa475b/huggingface_hub-0.36.2.tar.gz", hash = "sha256:1934304d2fb224f8afa3b87007d58501acfda9215b334eed53072dd5e815ff7a", size = 649782, upload-time = "2026-02-06T09:24:13.098Z" }
sdist = { url = "https://files.pythonhosted.org/packages/88/bb/62c7aa86f63a05e2f9b96642fdef9b94526a23979820b09f5455deff4983/huggingface_hub-1.9.0.tar.gz", hash = "sha256:0ea5be7a56135c91797cae6ad726e38eaeb6eb4b77cefff5c9d38ba0ecf874f7", size = 750326, upload-time = "2026-04-03T08:35:55.888Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/a8/af/48ac8483240de756d2438c380746e7130d1c6f75802ef22f3c6d49982787/huggingface_hub-0.36.2-py3-none-any.whl", hash = "sha256:48f0c8eac16145dfce371e9d2d7772854a4f591bcb56c9cf548accf531d54270", size = 566395, upload-time = "2026-02-06T09:24:11.133Z" },
{ url = "https://files.pythonhosted.org/packages/73/37/0d15d16150e1829f3e90962c99f28257f6de9e526a680b4c6f5acdb54fd2/huggingface_hub-1.9.0-py3-none-any.whl", hash = "sha256:2999328c058d39fd19ab748dd09bd4da2fbaa4f4c1ddea823eab103051e14a1f", size = 637355, upload-time = "2026-04-03T08:35:53.897Z" },
]
[[package]]
@@ -8033,15 +8037,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/59/8c/b1c87148aa15e099243ec9f0cf9d0e970cc2234c3257d558c25a2c5304e6/tokenizers-0.22.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f01a9c019878532f98927d2bacb79bbb404b43d3437455522a00a30718cdedb5", size = 3373542, upload-time = "2026-01-05T10:40:52.803Z" },
]
[[package]]
name = "toml"
version = "0.10.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/be/ba/1f744cdc819428fc6b5084ec34d9b30660f6f9daaf70eead706e3203ec3c/toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f", size = 22253, upload-time = "2020-11-01T01:40:22.204Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", size = 16588, upload-time = "2020-11-01T01:40:20.672Z" },
]
[[package]]
name = "tomli"
version = "2.0.2"
@@ -8062,11 +8057,11 @@ wheels = [
[[package]]
name = "tomlkit"
version = "0.14.0"
version = "0.13.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/c3/af/14b24e41977adb296d6bd1fb59402cf7d60ce364f90c890bd2ec65c43b5a/tomlkit-0.14.0.tar.gz", hash = "sha256:cf00efca415dbd57575befb1f6634c4f42d2d87dbba376128adb42c121b87064", size = 187167, upload-time = "2026-01-13T01:14:53.304Z" }
sdist = { url = "https://files.pythonhosted.org/packages/cc/18/0bbf3884e9eaa38819ebe46a7bd25dcd56b67434402b66a58c4b8e552575/tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1", size = 185207, upload-time = "2025-06-05T07:13:44.947Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b5/11/87d6d29fb5d237229d67973a6c9e06e048f01cf4994dee194ab0ea841814/tomlkit-0.14.0-py3-none-any.whl", hash = "sha256:592064ed85b40fa213469f81ac584f67a4f2992509a7c3ea2d632208623a3680", size = 39310, upload-time = "2026-01-13T01:14:51.965Z" },
{ url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" },
]
[[package]]
@@ -8172,24 +8167,23 @@ wheels = [
[[package]]
name = "transformers"
version = "4.57.6"
version = "5.5.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "filelock" },
{ name = "huggingface-hub" },
{ name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" },
{ name = "numpy", version = "2.4.4", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version >= '3.11'" },
{ name = "packaging" },
{ name = "pyyaml" },
{ name = "regex" },
{ name = "requests" },
{ name = "safetensors" },
{ name = "tokenizers" },
{ name = "tqdm" },
{ name = "typer" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c4/35/67252acc1b929dc88b6602e8c4a982e64f31e733b804c14bc24b47da35e6/transformers-4.57.6.tar.gz", hash = "sha256:55e44126ece9dc0a291521b7e5492b572e6ef2766338a610b9ab5afbb70689d3", size = 10134912, upload-time = "2026-01-16T10:38:39.284Z" }
sdist = { url = "https://files.pythonhosted.org/packages/ff/9d/fb46e729b461985f41a5740167688b924a4019141e5c164bea77548d3d9e/transformers-5.5.0.tar.gz", hash = "sha256:c8db656cf51c600cd8c75f06b20ef85c72e8b8ff9abc880c5d3e8bc70e0ddcbd", size = 8237745, upload-time = "2026-04-02T16:13:08.113Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/03/b8/e484ef633af3887baeeb4b6ad12743363af7cce68ae51e938e00aaa0529d/transformers-4.57.6-py3-none-any.whl", hash = "sha256:4c9e9de11333ddfe5114bc872c9f370509198acf0b87a832a0ab9458e2bd0550", size = 11993498, upload-time = "2026-01-16T10:38:31.289Z" },
{ url = "https://files.pythonhosted.org/packages/e7/28/35f7411ff80a3640c1f4fc907dcbb6a65061ebb82f66950e38bfc9f7f740/transformers-5.5.0-py3-none-any.whl", hash = "sha256:821a9ff0961abbb29eb1eb686d78df1c85929fdf213a3fe49dc6bd94f9efa944", size = 10245591, upload-time = "2026-04-02T16:13:03.462Z" },
]
[[package]]