mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-15 15:32:40 +00:00
Compare commits
1 Commits
1.14.2a5
...
docs/oss-3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b46d0113c |
@@ -4,76 +4,6 @@ description: "تحديثات المنتج والتحسينات وإصلاحات
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="15 أبريل 2026">
|
||||
## v1.14.2a5
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a5)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الوثائق
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.14.2a4
|
||||
|
||||
## المساهمون
|
||||
|
||||
@greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="15 أبريل 2026">
|
||||
## v1.14.2a4
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a4)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الميزات
|
||||
- إضافة تلميحات استئناف إلى إصدار أدوات المطورين عند الفشل
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- إصلاح توجيه وضع الصرامة إلى واجهة برمجة تطبيقات Bedrock Converse
|
||||
- إصلاح إصدار pytest إلى 9.0.3 لثغرة الأمان GHSA-6w46-j5rx-g56g
|
||||
- رفع الحد الأدنى لـ OpenAI إلى >=2.0.0
|
||||
|
||||
### الوثائق
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.14.2a3
|
||||
|
||||
## المساهمون
|
||||
|
||||
@greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="13 أبريل 2026">
|
||||
## v1.14.2a3
|
||||
|
||||
[عرض الإصدار على GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a3)
|
||||
|
||||
## ما الذي تغير
|
||||
|
||||
### الميزات
|
||||
- إضافة واجهة سطر الأوامر للتحقق من النشر
|
||||
- تحسين سهولة استخدام تهيئة LLM
|
||||
|
||||
### إصلاحات الأخطاء
|
||||
- تجاوز pypdf و uv إلى إصدارات مصححة لـ CVE-2026-40260 و GHSA-pjjw-68hj-v9mw
|
||||
- ترقية requests إلى >=2.33.0 لمعالجة ثغرة ملف مؤقت CVE
|
||||
- الحفاظ على معلمات استدعاء أداة Bedrock من خلال إزالة القيمة الافتراضية الصحيحة
|
||||
- تنظيف مخططات الأدوات لوضع صارم
|
||||
- إصلاح اختبار تسلسل تضمين MemoryRecord
|
||||
|
||||
### الوثائق
|
||||
- تنظيف لغة A2A الخاصة بالمؤسسات
|
||||
- إضافة وثائق ميزات A2A الخاصة بالمؤسسات
|
||||
- تحديث وثائق A2A الخاصة بالمصادر المفتوحة
|
||||
- تحديث سجل التغييرات والإصدار لـ v1.14.2a2
|
||||
|
||||
## المساهمون
|
||||
|
||||
@Yanhu007, @greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="10 أبريل 2026">
|
||||
## v1.14.2a2
|
||||
|
||||
|
||||
@@ -392,8 +392,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -866,8 +865,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1340,8 +1338,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -1814,8 +1811,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -2287,8 +2283,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -2759,8 +2754,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -3231,8 +3225,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -3705,8 +3698,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -4177,8 +4169,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -4652,8 +4643,7 @@
|
||||
"en/enterprise/features/marketplace",
|
||||
"en/enterprise/features/agent-repositories",
|
||||
"en/enterprise/features/tools-and-integrations",
|
||||
"en/enterprise/features/pii-trace-redactions",
|
||||
"en/enterprise/features/a2a"
|
||||
"en/enterprise/features/pii-trace-redactions"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -4,76 +4,6 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="Apr 15, 2026">
|
||||
## v1.14.2a5
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a5)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Documentation
|
||||
- Update changelog and version for v1.14.2a4
|
||||
|
||||
## Contributors
|
||||
|
||||
@greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Apr 15, 2026">
|
||||
## v1.14.2a4
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a4)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Add resume hints to devtools release on failure
|
||||
|
||||
### Bug Fixes
|
||||
- Fix strict mode forwarding to Bedrock Converse API
|
||||
- Fix pytest version to 9.0.3 for security vulnerability GHSA-6w46-j5rx-g56g
|
||||
- Bump OpenAI lower bound to >=2.0.0
|
||||
|
||||
### Documentation
|
||||
- Update changelog and version for v1.14.2a3
|
||||
|
||||
## Contributors
|
||||
|
||||
@greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Apr 13, 2026">
|
||||
## v1.14.2a3
|
||||
|
||||
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a3)
|
||||
|
||||
## What's Changed
|
||||
|
||||
### Features
|
||||
- Add deploy validation CLI
|
||||
- Improve LLM initialization ergonomics
|
||||
|
||||
### Bug Fixes
|
||||
- Override pypdf and uv to patched versions for CVE-2026-40260 and GHSA-pjjw-68hj-v9mw
|
||||
- Upgrade requests to >=2.33.0 for CVE temp file vulnerability
|
||||
- Preserve Bedrock tool call arguments by removing truthy default
|
||||
- Sanitize tool schemas for strict mode
|
||||
- Deflake MemoryRecord embedding serialization test
|
||||
|
||||
### Documentation
|
||||
- Clean up enterprise A2A language
|
||||
- Add enterprise A2A feature documentation
|
||||
- Update OSS A2A documentation
|
||||
- Update changelog and version for v1.14.2a2
|
||||
|
||||
## Contributors
|
||||
|
||||
@Yanhu007, @greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="Apr 10, 2026">
|
||||
## v1.14.2a2
|
||||
|
||||
|
||||
@@ -33,7 +33,14 @@ A crew in crewAI represents a collaborative group of agents working together to
|
||||
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
|
||||
| **Planning LLM** *(optional)* | `planning_llm` | The language model used by the AgentPlanner in a planning process. |
|
||||
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | Knowledge sources available at the crew level, accessible to all the agents. |
|
||||
| **Stream** _(optional)_ | `stream` | Enable streaming output to receive real-time updates during crew execution. Returns a `CrewStreamingOutput` object that can be iterated for chunks. Defaults to `False`. |
|
||||
| **Stream** _(optional)_ | `stream` | Enable streaming output to receive real-time updates during crew execution. Returns a `CrewStreamingOutput` object that can be iterated for chunks. Defaults to `False`. |
|
||||
| **Chat LLM** _(optional)_ | `chat_llm` | The language model used to orchestrate `crewai chat` CLI interactions with the crew. Accepts a model name string or `LLM` instance. Defaults to `None`. |
|
||||
| **Before Kickoff Callbacks** _(optional)_ | `before_kickoff_callbacks` | A list of callable functions executed **before** the crew starts. Each callback receives and can modify the inputs dict. Distinct from the `@before_kickoff` decorator. Defaults to `[]`. |
|
||||
| **After Kickoff Callbacks** _(optional)_ | `after_kickoff_callbacks` | A list of callable functions executed **after** the crew finishes. Each callback receives and can modify the `CrewOutput`. Distinct from the `@after_kickoff` decorator. Defaults to `[]`. |
|
||||
| **Tracing** _(optional)_ | `tracing` | Controls OpenTelemetry tracing for the crew. `True` = always enable, `False` = always disable, `None` = inherit from environment / user settings. Defaults to `None`. |
|
||||
| **Skills** _(optional)_ | `skills` | A list of `Path` objects (skill search directories) or pre-loaded `Skill` objects applied to all agents in the crew. Defaults to `None`. |
|
||||
| **Security Config** _(optional)_ | `security_config` | A `SecurityConfig` instance managing crew fingerprinting and identity. Defaults to `SecurityConfig()`. |
|
||||
| **Checkpoint** _(optional)_ | `checkpoint` | Enables automatic checkpointing. Pass `True` for sensible defaults, a `CheckpointConfig` for full control, `False` to opt out, or `None` to inherit. See the [Checkpointing](#checkpointing) section below. Defaults to `None`. |
|
||||
|
||||
<Tip>
|
||||
**Crew Max RPM**: The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents' `max_rpm` settings if you set it.
|
||||
@@ -271,6 +278,72 @@ crew = Crew(output_log_file = file_name.json) # Logs will be saved as file_name
|
||||
|
||||
|
||||
|
||||
## Checkpointing
|
||||
|
||||
Checkpointing lets a crew automatically save its state after key events (e.g. task completion) so that long-running or interrupted runs can be resumed exactly where they left off without re-executing completed tasks.
|
||||
|
||||
### Quick Start
|
||||
|
||||
Pass `checkpoint=True` to enable checkpointing with sensible defaults (saves to `.checkpoints/` after every task):
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Process
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, write_task],
|
||||
process=Process.sequential,
|
||||
checkpoint=True, # saves to .checkpoints/ after every task
|
||||
)
|
||||
|
||||
crew.kickoff(inputs={"topic": "AI trends"})
|
||||
```
|
||||
|
||||
### Full Control with `CheckpointConfig`
|
||||
|
||||
Use `CheckpointConfig` for fine-grained control over location, trigger events, storage backend, and retention:
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Process
|
||||
from crewai.state.checkpoint_config import CheckpointConfig
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, write_task],
|
||||
process=Process.sequential,
|
||||
checkpoint=CheckpointConfig(
|
||||
location="./.checkpoints", # directory for JSON files (default)
|
||||
on_events=["task_completed"], # trigger after each task (default)
|
||||
max_checkpoints=5, # keep only the 5 most recent checkpoints
|
||||
),
|
||||
)
|
||||
|
||||
crew.kickoff(inputs={"topic": "AI trends"})
|
||||
```
|
||||
|
||||
### Resuming from a Checkpoint
|
||||
|
||||
Use `Crew.from_checkpoint()` to restore a crew from a saved checkpoint file, then call `kickoff()` to resume:
|
||||
|
||||
```python Code
|
||||
# Resume from the most recent checkpoint
|
||||
crew = Crew.from_checkpoint(".checkpoints/latest.json")
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
<Note>
|
||||
When restoring from a checkpoint, `checkpoint_inputs`, `checkpoint_train`, and `checkpoint_kickoff_event_id` are automatically reconstructed — you do not need to set these manually.
|
||||
</Note>
|
||||
|
||||
### `CheckpointConfig` Attributes
|
||||
|
||||
| Attribute | Type | Default | Description |
|
||||
| :----------------- | :------------------------------------- | :------------------- | :-------------------------------------------------------------------------------------------- |
|
||||
| `location` | `str` | `"./.checkpoints"` | Storage destination. For `JsonProvider` this is a directory path; for `SqliteProvider` a database file path. |
|
||||
| `on_events` | `list[str]` | `["task_completed"]` | Event types that trigger a checkpoint write. Use `["*"]` to checkpoint on every event. |
|
||||
| `provider` | `JsonProvider \| SqliteProvider` | `JsonProvider()` | Storage backend. Defaults to `JsonProvider` (plain JSON files). |
|
||||
| `max_checkpoints` | `int \| None` | `None` | Maximum checkpoints to keep. Oldest are pruned after each write. `None` keeps all. |
|
||||
|
||||
## Memory Utilization
|
||||
|
||||
Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies.
|
||||
|
||||
@@ -1,227 +0,0 @@
|
||||
---
|
||||
title: A2A on AMP
|
||||
description: Production-grade Agent-to-Agent communication with distributed state and multi-scheme authentication
|
||||
icon: "network-wired"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Warning>
|
||||
A2A server agents on AMP are in early release. APIs may change in future versions.
|
||||
</Warning>
|
||||
|
||||
## Overview
|
||||
|
||||
CrewAI AMP extends the open-source [A2A protocol implementation](/en/learn/a2a-agent-delegation) with production infrastructure for deploying distributed agents at scale. AMP supports A2A protocol versions 0.2 and 0.3. When you deploy a crew or agent with A2A server configuration to AMP, the platform automatically provisions distributed state management, authentication, multi-transport endpoints, and lifecycle management.
|
||||
|
||||
<Note>
|
||||
For A2A protocol fundamentals, client/server configuration, and authentication schemes, see the [A2A Agent Delegation](/en/learn/a2a-agent-delegation) documentation. This page covers what AMP adds on top of the open-source implementation.
|
||||
</Note>
|
||||
|
||||
### Usage
|
||||
|
||||
Add `A2AServerConfig` to any agent in your crew and deploy to AMP. The platform detects agents with server configuration and automatically registers A2A endpoints, generates agent cards, and provisions the infrastructure described below.
|
||||
|
||||
```python
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.a2a import A2AServerConfig
|
||||
from crewai.a2a.auth import EnterpriseTokenAuth
|
||||
|
||||
agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze datasets and provide insights",
|
||||
backstory="Expert data scientist with statistical analysis skills",
|
||||
llm="gpt-4o",
|
||||
a2a=A2AServerConfig(
|
||||
auth=EnterpriseTokenAuth()
|
||||
)
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Analyze the provided dataset",
|
||||
expected_output="Statistical summary with key insights",
|
||||
agent=agent
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
```
|
||||
|
||||
After [deploying to AMP](/en/enterprise/guides/deploy-to-amp), the platform registers two levels of A2A endpoints:
|
||||
|
||||
- **Crew-level**: an aggregate agent card at `/.well-known/agent-card.json` where each agent with `A2AServerConfig` is listed as a skill, with a JSON-RPC endpoint at `/a2a`
|
||||
- **Per-agent**: isolated agent cards and JSON-RPC endpoints mounted at `/a2a/agents/{role}/`, each with its own tenancy
|
||||
|
||||
Clients can interact with the crew as a whole or target a specific agent directly. To route a request to a specific agent through the crew-level endpoint, include `"target_agent"` in the message metadata with the agent's slugified role name (e.g., `"data-analyst"` for an agent with role `"Data Analyst"`). If no `target_agent` is provided, the request is handled by the first agent in the crew.
|
||||
|
||||
See [A2A Agent Delegation](/en/learn/a2a-agent-delegation#server-configuration-options) for the full list of `A2AServerConfig` options.
|
||||
|
||||
<Warning>
|
||||
Per the A2A protocol, agent cards are publicly accessible to enable discovery. This includes both the crew-level card at `/.well-known/agent-card.json` and per-agent cards at `/a2a/agents/{role}/.well-known/agent-card.json`. Do not include sensitive information in agent names, descriptions, or skill definitions.
|
||||
</Warning>
|
||||
|
||||
### File Inputs and Structured Output
|
||||
|
||||
A2A on AMP supports passing files and requesting structured output in both directions. Clients can send files as `FilePart`s and request structured responses by embedding a JSON schema in the message. Server agents receive files as `input_files` on the task, and return structured data as `DataPart`s when a schema is provided. See [File Inputs and Structured Output](/en/learn/a2a-agent-delegation#file-inputs-and-structured-output) for details.
|
||||
|
||||
### What AMP Adds
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Distributed State" icon="database">
|
||||
Persistent task, context, and result storage
|
||||
</Card>
|
||||
<Card title="Enterprise Authentication" icon="shield-halved">
|
||||
OIDC, OAuth2, mTLS, and Enterprise token validation beyond simple bearer tokens
|
||||
</Card>
|
||||
<Card title="gRPC Transport" icon="bolt">
|
||||
Full gRPC server with TLS and authentication
|
||||
</Card>
|
||||
<Card title="Context Lifecycle" icon="clock-rotate-left">
|
||||
Automatic idle detection, expiration, and cleanup of long-running conversations
|
||||
</Card>
|
||||
<Card title="Signed Webhooks" icon="signature">
|
||||
HMAC-SHA256 signed push notifications with replay protection
|
||||
</Card>
|
||||
<Card title="Multi-Transport" icon="arrows-split-up-and-left">
|
||||
REST, JSON-RPC, and gRPC endpoints served simultaneously from a single deployment
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
---
|
||||
|
||||
## Distributed State Management
|
||||
|
||||
In the open-source implementation, task and context state lives in memory on a single process. AMP replaces this with persistent, distributed stores.
|
||||
|
||||
### Storage Layers
|
||||
|
||||
| Store | Purpose |
|
||||
|---|---|
|
||||
| **Task Store** | Persists A2A task state and metadata |
|
||||
| **Context Store** | Tracks conversation context, creation time, last activity, and associated tasks |
|
||||
| **Result Store** | Caches task results for retrieval |
|
||||
| **Push Config Store** | Manages webhook subscriptions per task |
|
||||
|
||||
Multiple A2A deployments are automatically isolated from each other, preventing data collisions when sharing infrastructure.
|
||||
|
||||
---
|
||||
|
||||
## Enterprise Authentication
|
||||
|
||||
AMP supports six authentication schemes for incoming A2A requests, configurable per deployment. Authentication works across both HTTP and gRPC transports.
|
||||
|
||||
| Scheme | Description | Use Case |
|
||||
|---|---|---|
|
||||
| **SimpleTokenAuth** | Static bearer token from `AUTH_TOKEN` env var | Development, simple deployments |
|
||||
| **EnterpriseTokenAuth** | Token verification via CrewAI PlusAPI with integration token claims | AMP-to-AMP agent communication |
|
||||
| **OIDCAuth** | OpenID Connect JWT validation with JWKS endpoint caching | Enterprise SSO integration |
|
||||
| **OAuth2ServerAuth** | OAuth2 with configurable scopes | Fine-grained access control |
|
||||
| **APIKeyServerAuth** | API key validation via header or query parameter | Third-party integrations |
|
||||
| **MTLSServerAuth** | Mutual TLS certificate-based authentication | Zero-trust environments |
|
||||
|
||||
The configured auth scheme automatically populates the agent card's `securitySchemes` and `security` fields. Clients discover authentication requirements by fetching the agent card before making requests.
|
||||
|
||||
---
|
||||
|
||||
## Extended Agent Cards
|
||||
|
||||
AMP supports role-based skill visibility through extended agent cards. Unauthenticated users see the standard agent card with public skills. Authenticated users receive an extended card with additional capabilities.
|
||||
|
||||
This enables patterns like:
|
||||
- Public agents that expose basic skills to anyone, with advanced skills available to authenticated clients
|
||||
- Internal agents that advertise different capabilities based on the caller's identity
|
||||
|
||||
---
|
||||
|
||||
## gRPC Transport
|
||||
|
||||
If enabled, AMP provides full gRPC support alongside the default JSON-RPC transport.
|
||||
|
||||
- **TLS termination** with configurable certificate and key paths
|
||||
- **gRPC reflection** for debugging with tools like `grpcurl`
|
||||
- **Authentication** using the same schemes available for HTTP
|
||||
- **Extension validation** ensuring clients support required protocol extensions
|
||||
- **Version negotiation** across A2A protocol versions 0.2 and 0.3
|
||||
|
||||
For deployments exposing multiple agents, AMP automatically allocates per-agent gRPC ports and coordinates TLS, startup, and shutdown across all servers.
|
||||
|
||||
---
|
||||
|
||||
## Context Lifecycle Management
|
||||
|
||||
AMP tracks the lifecycle of A2A conversation contexts and automatically manages cleanup.
|
||||
|
||||
### Lifecycle States
|
||||
|
||||
| State | Condition | Action |
|
||||
|---|---|---|
|
||||
| **Active** | Context has recent activity | None |
|
||||
| **Idle** | No activity for a configured period | Marked idle, event emitted |
|
||||
| **Expired** | Context exceeds its maximum lifetime | Marked expired, associated tasks cleaned up, event emitted |
|
||||
|
||||
A background cleanup task runs hourly to scan for idle and expired contexts. All state transitions emit CrewAI events that integrate with the platform's observability features.
|
||||
|
||||
---
|
||||
|
||||
## Signed Push Notifications
|
||||
|
||||
When an A2A agent sends push notifications to a client webhook, AMP signs each request with HMAC-SHA256 to ensure integrity and prevent tampering.
|
||||
|
||||
### Signature Headers
|
||||
|
||||
| Header | Purpose |
|
||||
|---|---|
|
||||
| `X-A2A-Signature` | HMAC-SHA256 signature in `sha256={hex_digest}` format |
|
||||
| `X-A2A-Signature-Timestamp` | Unix timestamp bound to the signature |
|
||||
| `X-A2A-Notification-Token` | Optional notification auth token |
|
||||
|
||||
### Security Properties
|
||||
|
||||
- **Integrity**: payload cannot be modified without invalidating the signature
|
||||
- **Replay protection**: signatures are timestamp-bound with a configurable tolerance window
|
||||
- **Retry with backoff**: failed deliveries retry with exponential backoff
|
||||
|
||||
---
|
||||
|
||||
## Distributed Event Streaming
|
||||
|
||||
In the open-source implementation, SSE streaming works within a single process. AMP propagates SSE events across instances so that clients receive updates even when the instance holding the streaming connection differs from the instance executing the task.
|
||||
|
||||
---
|
||||
|
||||
## Multi-Transport Endpoints
|
||||
|
||||
AMP serves REST and JSON-RPC by default. gRPC is available as an additional transport if enabled.
|
||||
|
||||
| Transport | Path Convention | Description |
|
||||
|---|---|---|
|
||||
| **REST** | `/v1/message:send`, `/v1/message:stream`, `/v1/tasks` | Google API conventions |
|
||||
| **JSON-RPC** | Standard A2A JSON-RPC endpoint | Default A2A protocol transport |
|
||||
| **gRPC** | Per-agent port allocation | Optional, high-performance binary protocol |
|
||||
|
||||
All active transports share the same authentication, version negotiation, and extension validation. Agent cards are generated from agent and crew metadata — roles, goals, and tools become skills and descriptions — and automatically include interfaces for each active transport. They can also be manually configured via `A2AServerConfig`.
|
||||
|
||||
---
|
||||
|
||||
## Version and Extension Negotiation
|
||||
|
||||
AMP validates A2A protocol versions and extensions at the transport layer.
|
||||
|
||||
### Version Negotiation
|
||||
|
||||
- Clients send the `A2A-Version` header with their preferred version
|
||||
- AMP validates against supported versions (0.2, 0.3) and falls back to 0.3 if unspecified
|
||||
- The negotiated version is returned in the response headers
|
||||
|
||||
### Extension Validation
|
||||
|
||||
- Clients declare supported extensions via the `X-A2A-Extensions` header
|
||||
- AMP validates that clients support all extensions the agent requires
|
||||
- Requests from clients missing required extensions receive an `UnsupportedExtensionError`
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
- [A2A Agent Delegation](/en/learn/a2a-agent-delegation) — A2A protocol fundamentals and configuration
|
||||
- [A2UI](/en/learn/a2ui) — Interactive UI rendering over A2A
|
||||
- [Deploy to AMP](/en/enterprise/guides/deploy-to-amp) — General deployment guide
|
||||
- [Webhook Streaming](/en/enterprise/features/webhook-streaming) — Event streaming for deployed automations
|
||||
@@ -7,10 +7,6 @@ mode: "wide"
|
||||
|
||||
## A2A Agent Delegation
|
||||
|
||||
<Info>
|
||||
Deploying A2A agents to production? See [A2A on AMP](/en/enterprise/features/a2a) for distributed state, enterprise authentication, gRPC transport, and horizontal scaling.
|
||||
</Info>
|
||||
|
||||
CrewAI treats [A2A protocol](https://a2a-protocol.org/latest/) as a first-class delegation primitive, enabling agents to delegate tasks, request information, and collaborate with remote agents, as well as act as A2A-compliant server agents.
|
||||
In client mode, agents autonomously choose between local execution and remote delegation based on task requirements.
|
||||
|
||||
@@ -100,28 +96,24 @@ The `A2AClientConfig` class accepts the following parameters:
|
||||
Update mechanism for receiving task status. Options: `StreamingConfig`, `PollingConfig`, or `PushNotificationConfig`.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="transport_protocol" type="Literal['JSONRPC', 'GRPC', 'HTTP+JSON']" default="JSONRPC">
|
||||
Transport protocol for A2A communication. Options: `JSONRPC` (default), `GRPC`, or `HTTP+JSON`.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="accepted_output_modes" type="list[str]" default='["application/json"]'>
|
||||
Media types the client can accept in responses.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="supported_transports" type="list[str]" default='["JSONRPC"]'>
|
||||
Ordered list of transport protocols the client supports.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="use_client_preference" type="bool" default="False">
|
||||
Whether to prioritize client transport preferences over server.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="extensions" type="list[str]" default="[]">
|
||||
A2A protocol extension URIs the client supports.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="client_extensions" type="list[A2AExtension]" default="[]">
|
||||
Client-side processing hooks for tool injection, prompt augmentation, and response modification.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="transport" type="ClientTransportConfig" default="ClientTransportConfig()">
|
||||
Transport configuration including preferred transport, supported transports for negotiation, and protocol-specific settings (gRPC message sizes, keepalive, etc.).
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="transport_protocol" type="Literal['JSONRPC', 'GRPC', 'HTTP+JSON']" default="None">
|
||||
**Deprecated**: Use `transport=ClientTransportConfig(preferred=...)` instead.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="supported_transports" type="list[str]" default="None">
|
||||
**Deprecated**: Use `transport=ClientTransportConfig(supported=...)` instead.
|
||||
Extension URIs the client supports.
|
||||
</ParamField>
|
||||
|
||||
## Authentication
|
||||
@@ -413,7 +405,11 @@ agent = Agent(
|
||||
Preferred endpoint URL. If set, overrides the URL passed to `to_agent_card()`.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="protocol_version" type="str" default="0.3.0">
|
||||
<ParamField path="preferred_transport" type="Literal['JSONRPC', 'GRPC', 'HTTP+JSON']" default="JSONRPC">
|
||||
Transport protocol for the preferred endpoint.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="protocol_version" type="str" default="0.3">
|
||||
A2A protocol version this agent supports.
|
||||
</ParamField>
|
||||
|
||||
@@ -445,36 +441,8 @@ agent = Agent(
|
||||
Whether agent provides extended card to authenticated users.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="extended_skills" type="list[AgentSkill]" default="[]">
|
||||
Additional skills visible only to authenticated users in the extended agent card.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="signing_config" type="AgentCardSigningConfig" default="None">
|
||||
Configuration for signing the AgentCard with JWS. Supports RS256, ES256, PS256, and related algorithms.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="server_extensions" type="list[ServerExtension]" default="[]">
|
||||
Server-side A2A protocol extensions with `on_request`/`on_response` hooks that modify agent behavior.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="push_notifications" type="ServerPushNotificationConfig" default="None">
|
||||
Configuration for outgoing push notifications, including HMAC-SHA256 signing secret.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="transport" type="ServerTransportConfig" default="ServerTransportConfig()">
|
||||
Transport configuration including preferred transport, gRPC server settings, JSON-RPC paths, and HTTP+JSON settings.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="auth" type="ServerAuthScheme" default="None">
|
||||
Authentication scheme for incoming A2A requests. Defaults to `SimpleTokenAuth` using the `AUTH_TOKEN` environment variable.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="preferred_transport" type="Literal['JSONRPC', 'GRPC', 'HTTP+JSON']" default="None">
|
||||
**Deprecated**: Use `transport=ServerTransportConfig(preferred=...)` instead.
|
||||
</ParamField>
|
||||
|
||||
<ParamField path="signatures" type="list[AgentCardSignature]" default="None">
|
||||
**Deprecated**: Use `signing_config=AgentCardSigningConfig(...)` instead.
|
||||
<ParamField path="signatures" type="list[AgentCardSignature]" default="[]">
|
||||
JSON Web Signatures for the AgentCard.
|
||||
</ParamField>
|
||||
|
||||
### Combined Client and Server
|
||||
@@ -500,14 +468,6 @@ agent = Agent(
|
||||
)
|
||||
```
|
||||
|
||||
### File Inputs and Structured Output
|
||||
|
||||
A2A supports passing files and requesting structured output in both directions.
|
||||
|
||||
**Client side**: When delegating to a remote A2A agent, files from the task's `input_files` are sent as `FilePart`s in the outgoing message. If `response_model` is set on the `A2AClientConfig`, the Pydantic model's JSON schema is embedded in the message metadata, requesting structured output from the remote agent.
|
||||
|
||||
**Server side**: Incoming `FilePart`s are extracted and passed to the agent's task as `input_files`. If the client included a JSON schema, the server creates a response model from it and applies it to the task. When the agent returns structured data, the response is sent back as a `DataPart` rather than plain text.
|
||||
|
||||
## Best Practices
|
||||
|
||||
<CardGroup cols={2}>
|
||||
|
||||
@@ -4,76 +4,6 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="2026년 4월 15일">
|
||||
## v1.14.2a5
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a5)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 문서
|
||||
- v1.14.2a4의 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 4월 15일">
|
||||
## v1.14.2a4
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a4)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- 실패 시 devtools 릴리스에 이력서 힌트 추가
|
||||
|
||||
### 버그 수정
|
||||
- Bedrock Converse API로의 엄격 모드 포워딩 수정
|
||||
- 보안 취약점 GHSA-6w46-j5rx-g56g에 대해 pytest 버전을 9.0.3으로 수정
|
||||
- OpenAI 하한을 >=2.0.0으로 상향 조정
|
||||
|
||||
### 문서
|
||||
- v1.14.2a3에 대한 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 4월 13일">
|
||||
## v1.14.2a3
|
||||
|
||||
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a3)
|
||||
|
||||
## 변경 사항
|
||||
|
||||
### 기능
|
||||
- 배포 검증 CLI 추가
|
||||
- LLM 초기화 사용성 개선
|
||||
|
||||
### 버그 수정
|
||||
- CVE-2026-40260 및 GHSA-pjjw-68hj-v9mw에 대한 패치된 버전으로 pypdf 및 uv 재정의
|
||||
- CVE 임시 파일 취약점에 대해 requests를 >=2.33.0으로 업그레이드
|
||||
- 진리값 기본값을 제거하여 Bedrock 도구 호출 인수 보존
|
||||
- 엄격 모드를 위한 도구 스키마 정리
|
||||
- MemoryRecord 임베딩 직렬화 테스트의 불안정성 제거
|
||||
|
||||
### 문서
|
||||
- 기업 A2A 언어 정리
|
||||
- 기업 A2A 기능 문서 추가
|
||||
- OSS A2A 문서 업데이트
|
||||
- v1.14.2a2에 대한 변경 로그 및 버전 업데이트
|
||||
|
||||
## 기여자
|
||||
|
||||
@Yanhu007, @greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="2026년 4월 10일">
|
||||
## v1.14.2a2
|
||||
|
||||
|
||||
@@ -4,76 +4,6 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
|
||||
icon: "clock"
|
||||
mode: "wide"
|
||||
---
|
||||
<Update label="15 abr 2026">
|
||||
## v1.14.2a5
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a5)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Documentação
|
||||
- Atualizar changelog e versão para v1.14.2a4
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="15 abr 2026">
|
||||
## v1.14.2a4
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a4)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Recursos
|
||||
- Adicionar dicas de retomar ao release do devtools em caso de falha
|
||||
|
||||
### Correções de Bugs
|
||||
- Corrigir o encaminhamento do modo estrito para a API Bedrock Converse
|
||||
- Corrigir a versão do pytest para 9.0.3 devido à vulnerabilidade de segurança GHSA-6w46-j5rx-g56g
|
||||
- Aumentar o limite inferior do OpenAI para >=2.0.0
|
||||
|
||||
### Documentação
|
||||
- Atualizar o changelog e a versão para v1.14.2a3
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="13 abr 2026">
|
||||
## v1.14.2a3
|
||||
|
||||
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.14.2a3)
|
||||
|
||||
## O que Mudou
|
||||
|
||||
### Recursos
|
||||
- Adicionar CLI de validação de deploy
|
||||
- Melhorar a ergonomia de inicialização do LLM
|
||||
|
||||
### Correções de Bugs
|
||||
- Substituir pypdf e uv por versões corrigidas para CVE-2026-40260 e GHSA-pjjw-68hj-v9mw
|
||||
- Atualizar requests para >=2.33.0 devido à vulnerabilidade de arquivo temporário CVE
|
||||
- Preservar os argumentos de chamada da ferramenta Bedrock removendo o padrão truthy
|
||||
- Sanitizar esquemas de ferramentas para modo estrito
|
||||
- Remover flakiness do teste de serialização de embedding MemoryRecord
|
||||
|
||||
### Documentação
|
||||
- Limpar a linguagem do A2A empresarial
|
||||
- Adicionar documentação de recursos do A2A empresarial
|
||||
- Atualizar documentação do A2A OSS
|
||||
- Atualizar changelog e versão para v1.14.2a2
|
||||
|
||||
## Contribuidores
|
||||
|
||||
@Yanhu007, @greysonlalonde
|
||||
|
||||
</Update>
|
||||
|
||||
<Update label="10 abr 2026">
|
||||
## v1.14.2a2
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ authors = [
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"Pillow~=12.1.1",
|
||||
"pypdf~=6.10.0",
|
||||
"pypdf~=6.9.1",
|
||||
"python-magic>=0.4.27",
|
||||
"aiocache~=0.12.3",
|
||||
"aiofiles~=24.1.0",
|
||||
|
||||
@@ -152,4 +152,4 @@ __all__ = [
|
||||
"wrap_file_source",
|
||||
]
|
||||
|
||||
__version__ = "1.14.2a5"
|
||||
__version__ = "1.14.2a2"
|
||||
|
||||
@@ -9,8 +9,8 @@ authors = [
|
||||
requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
"pytube~=15.0.0",
|
||||
"requests>=2.33.0,<3",
|
||||
"crewai==1.14.2a5",
|
||||
"requests~=2.32.5",
|
||||
"crewai==1.14.2a2",
|
||||
"tiktoken~=0.8.0",
|
||||
"beautifulsoup4~=4.13.4",
|
||||
"python-docx~=1.2.0",
|
||||
|
||||
@@ -305,4 +305,4 @@ __all__ = [
|
||||
"ZapierActionTools",
|
||||
]
|
||||
|
||||
__version__ = "1.14.2a5"
|
||||
__version__ = "1.14.2a2"
|
||||
|
||||
@@ -10,7 +10,7 @@ requires-python = ">=3.10, <3.14"
|
||||
dependencies = [
|
||||
# Core Dependencies
|
||||
"pydantic~=2.11.9",
|
||||
"openai>=2.0.0,<3",
|
||||
"openai>=1.83.0,<3",
|
||||
"instructor>=1.3.3",
|
||||
# Text Processing
|
||||
"pdfplumber~=0.11.4",
|
||||
@@ -40,7 +40,7 @@ dependencies = [
|
||||
"pydantic-settings~=2.10.1",
|
||||
"httpx~=0.28.1",
|
||||
"mcp~=1.26.0",
|
||||
"uv~=0.11.6",
|
||||
"uv~=0.9.13",
|
||||
"aiosqlite~=0.21.0",
|
||||
"pyyaml~=6.0",
|
||||
"aiofiles~=24.1.0",
|
||||
@@ -55,7 +55,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = [
|
||||
"crewai-tools==1.14.2a5",
|
||||
"crewai-tools==1.14.2a2",
|
||||
]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
|
||||
@@ -46,7 +46,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
__version__ = "1.14.2a5"
|
||||
__version__ = "1.14.2a2"
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
|
||||
@@ -98,6 +98,7 @@ class A2AErrorCode(IntEnum):
|
||||
"""The specified artifact was not found."""
|
||||
|
||||
|
||||
# Error code to default message mapping
|
||||
ERROR_MESSAGES: dict[int, str] = {
|
||||
A2AErrorCode.JSON_PARSE_ERROR: "Parse error",
|
||||
A2AErrorCode.INVALID_REQUEST: "Invalid Request",
|
||||
|
||||
@@ -63,21 +63,25 @@ class A2AExtension(Protocol):
|
||||
Example:
|
||||
class MyExtension:
|
||||
def inject_tools(self, agent: Agent) -> None:
|
||||
# Add custom tools to the agent
|
||||
pass
|
||||
|
||||
def extract_state_from_history(
|
||||
self, conversation_history: Sequence[Message]
|
||||
) -> ConversationState | None:
|
||||
# Extract state from conversation
|
||||
return None
|
||||
|
||||
def augment_prompt(
|
||||
self, base_prompt: str, conversation_state: ConversationState | None
|
||||
) -> str:
|
||||
# Add custom instructions
|
||||
return base_prompt
|
||||
|
||||
def process_response(
|
||||
self, agent_response: Any, conversation_state: ConversationState | None
|
||||
) -> Any:
|
||||
# Modify response if needed
|
||||
return agent_response
|
||||
"""
|
||||
|
||||
|
||||
@@ -77,6 +77,7 @@ def extract_a2a_agent_ids_from_config(
|
||||
else:
|
||||
configs = a2a_config
|
||||
|
||||
# Filter to only client configs (those with endpoint)
|
||||
client_configs: list[A2AClientConfigTypes] = [
|
||||
config for config in configs if isinstance(config, (A2AConfig, A2AClientConfig))
|
||||
]
|
||||
|
||||
@@ -1341,6 +1341,7 @@ class Agent(BaseAgent):
|
||||
|
||||
raw_tools: list[BaseTool] = self.tools or []
|
||||
|
||||
# Inject memory tools for standalone kickoff (crew path handles its own)
|
||||
agent_memory = getattr(self, "memory", None)
|
||||
if agent_memory is not None:
|
||||
from crewai.tools.memory_tools import create_memory_tools
|
||||
@@ -1398,6 +1399,7 @@ class Agent(BaseAgent):
|
||||
if input_files:
|
||||
all_files.update(input_files)
|
||||
|
||||
# Inject memory context for standalone kickoff (recall before execution)
|
||||
if agent_memory is not None:
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
@@ -1483,6 +1485,8 @@ class Agent(BaseAgent):
|
||||
Note:
|
||||
For explicit async usage outside of Flow, use kickoff_async() directly.
|
||||
"""
|
||||
# Magic auto-async: if inside event loop (e.g., inside a Flow),
|
||||
# return coroutine for Flow to await
|
||||
if is_inside_event_loop():
|
||||
return self.kickoff_async(messages, response_format, input_files)
|
||||
|
||||
@@ -1633,7 +1637,7 @@ class Agent(BaseAgent):
|
||||
if isinstance(conversion_result, BaseModel):
|
||||
formatted_result = conversion_result
|
||||
except ConverterError:
|
||||
pass
|
||||
pass # Keep raw output if conversion fails
|
||||
else:
|
||||
raw_output = str(output) if not isinstance(output, str) else output
|
||||
|
||||
@@ -1715,6 +1719,7 @@ class Agent(BaseAgent):
|
||||
elif callable(self.guardrail):
|
||||
guardrail_callable = self.guardrail
|
||||
else:
|
||||
# Should not happen if called from kickoff with guardrail check
|
||||
return output
|
||||
|
||||
guardrail_result = process_guardrail(
|
||||
|
||||
@@ -41,6 +41,7 @@ class PlanningConfig(BaseModel):
|
||||
from crewai import Agent
|
||||
from crewai.agent.planning_config import PlanningConfig
|
||||
|
||||
# Simple usage — fast, linear execution (default)
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Research topics",
|
||||
@@ -48,6 +49,7 @@ class PlanningConfig(BaseModel):
|
||||
planning_config=PlanningConfig(),
|
||||
)
|
||||
|
||||
# Balanced — replan only when steps fail
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Research topics",
|
||||
@@ -57,6 +59,7 @@ class PlanningConfig(BaseModel):
|
||||
),
|
||||
)
|
||||
|
||||
# Full adaptive planning with refinement and replanning
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Research topics",
|
||||
@@ -66,7 +69,7 @@ class PlanningConfig(BaseModel):
|
||||
max_attempts=3,
|
||||
max_steps=10,
|
||||
plan_prompt="Create a focused plan for: {description}",
|
||||
llm="gpt-4o-mini",
|
||||
llm="gpt-4o-mini", # Use cheaper model for planning
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
@@ -39,6 +39,7 @@ def handle_reasoning(agent: Agent, task: Task) -> None:
|
||||
agent: The agent performing the task.
|
||||
task: The task to execute.
|
||||
"""
|
||||
# Check if planning is enabled using the planning_enabled property
|
||||
if not getattr(agent, "planning_enabled", False):
|
||||
return
|
||||
|
||||
|
||||
@@ -99,10 +99,12 @@ class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
Returns:
|
||||
Tool execution result.
|
||||
"""
|
||||
# Get the parameter name from the schema
|
||||
param_name: str = next(
|
||||
iter(tool.args_schema.model_json_schema()["properties"].keys())
|
||||
)
|
||||
|
||||
# Handle different argument types
|
||||
args_dict: dict[str, Any]
|
||||
if isinstance(arguments, dict):
|
||||
args_dict = arguments
|
||||
@@ -114,13 +116,16 @@ class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
else:
|
||||
args_dict = {param_name: str(arguments)}
|
||||
|
||||
# Run the tool with the processed arguments
|
||||
output: Any | Awaitable[Any] = tool._run(**args_dict)
|
||||
|
||||
# Await if the tool returned a coroutine
|
||||
if inspect.isawaitable(output):
|
||||
result: Any = await output
|
||||
else:
|
||||
result = output
|
||||
|
||||
# Ensure the result is JSON serializable
|
||||
if isinstance(result, (dict, list, str, int, float, bool, type(None))):
|
||||
return result
|
||||
return str(result)
|
||||
|
||||
@@ -383,6 +383,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
if isinstance(tool, BaseTool):
|
||||
processed_tools.append(tool)
|
||||
elif all(hasattr(tool, attr) for attr in required_attrs):
|
||||
# Tool has the required attributes, create a Tool instance
|
||||
processed_tools.append(Tool.from_langchain(tool))
|
||||
else:
|
||||
raise ValueError(
|
||||
@@ -447,12 +448,14 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_and_set_attributes(self) -> Self:
|
||||
# Validate required fields
|
||||
for field in ["role", "goal", "backstory"]:
|
||||
if getattr(self, field) is None:
|
||||
raise ValueError(
|
||||
f"{field} must be provided either directly or through config"
|
||||
)
|
||||
|
||||
# Set private attributes
|
||||
self._logger = Logger(verbose=self.verbose)
|
||||
if self.max_rpm and not self._rpm_controller:
|
||||
self._rpm_controller = RPMController(
|
||||
@@ -461,6 +464,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
if not self._token_process:
|
||||
self._token_process = TokenProcess()
|
||||
|
||||
# Initialize security_config if not provided
|
||||
if self.security_config is None:
|
||||
self.security_config = SecurityConfig()
|
||||
|
||||
@@ -562,11 +566,14 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
"actions",
|
||||
}
|
||||
|
||||
# Copy llm
|
||||
existing_llm = shallow_copy(self.llm)
|
||||
copied_knowledge = shallow_copy(self.knowledge)
|
||||
copied_knowledge_storage = shallow_copy(self.knowledge_storage)
|
||||
# Properly copy knowledge sources if they exist
|
||||
existing_knowledge_sources = None
|
||||
if self.knowledge_sources:
|
||||
# Create a shared storage instance for all knowledge sources
|
||||
shared_storage = (
|
||||
self.knowledge_sources[0].storage if self.knowledge_sources else None
|
||||
)
|
||||
@@ -578,6 +585,7 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
if hasattr(source, "model_copy")
|
||||
else shallow_copy(source)
|
||||
)
|
||||
# Ensure all copied sources use the same storage instance
|
||||
copied_source.storage = shared_storage
|
||||
existing_knowledge_sources.append(copied_source)
|
||||
|
||||
|
||||
@@ -4,6 +4,8 @@ import re
|
||||
from typing import Final
|
||||
|
||||
|
||||
# crewai.agents.parser constants
|
||||
|
||||
FINAL_ANSWER_ACTION: Final[str] = "Final Answer:"
|
||||
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE: Final[str] = (
|
||||
"I did it wrong. Invalid Format: I missed the 'Action:' after 'Thought:'. I will do right next, and don't use a tool I have already used.\n"
|
||||
|
||||
@@ -296,6 +296,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
# Check if model supports native function calling
|
||||
use_native_tools = (
|
||||
hasattr(self.llm, "supports_function_calling")
|
||||
and callable(getattr(self.llm, "supports_function_calling", None))
|
||||
@@ -306,6 +307,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
if use_native_tools:
|
||||
return self._invoke_loop_native_tools()
|
||||
|
||||
# Fall back to ReAct text-based pattern
|
||||
return self._invoke_loop_react()
|
||||
|
||||
def _invoke_loop_react(self) -> AgentFinish:
|
||||
@@ -345,6 +347,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
# breakpoint()
|
||||
if self.response_model is not None:
|
||||
try:
|
||||
if isinstance(answer, BaseModel):
|
||||
@@ -362,6 +365,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
text=answer,
|
||||
)
|
||||
except ValidationError:
|
||||
# If validation fails, convert BaseModel to JSON string for parsing
|
||||
answer_str = (
|
||||
answer.model_dump_json()
|
||||
if isinstance(answer, BaseModel)
|
||||
@@ -371,12 +375,14 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
answer_str, self.use_stop_words
|
||||
) # type: ignore[assignment]
|
||||
else:
|
||||
# When no response_model, answer should be a string
|
||||
answer_str = str(answer) if not isinstance(answer, str) else answer
|
||||
formatted_answer = process_llm_response(
|
||||
answer_str, self.use_stop_words
|
||||
) # type: ignore[assignment]
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
# Extract agent fingerprint if available
|
||||
fingerprint_context = {}
|
||||
if (
|
||||
self.agent
|
||||
@@ -420,6 +426,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
|
||||
except Exception as e:
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
# Do not retry on litellm errors
|
||||
raise e
|
||||
if is_context_length_exceeded(e):
|
||||
handle_context_length(
|
||||
@@ -436,6 +443,10 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
finally:
|
||||
self.iterations += 1
|
||||
|
||||
# During the invoke loop, formatted_answer alternates between AgentAction
|
||||
# (when the agent is using tools) and eventually becomes AgentFinish
|
||||
# (when the agent reaches a final answer). This check confirms we've
|
||||
# reached a final answer and helps type checking understand this transition.
|
||||
if not isinstance(formatted_answer, AgentFinish):
|
||||
raise RuntimeError(
|
||||
"Agent execution ended without reaching a final answer. "
|
||||
@@ -454,7 +465,9 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
Returns:
|
||||
Final answer from the agent.
|
||||
"""
|
||||
# Convert tools to OpenAI schema format
|
||||
if not self.original_tools:
|
||||
# No tools available, fall back to simple LLM call
|
||||
return self._invoke_loop_native_no_tools()
|
||||
|
||||
openai_tools, available_functions, self._tool_name_mapping = (
|
||||
@@ -477,6 +490,10 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
|
||||
# Call LLM with native tools
|
||||
# Pass available_functions=None so the LLM returns tool_calls
|
||||
# without executing them. The executor handles tool execution
|
||||
# via _handle_native_tool_calls to properly manage message history.
|
||||
answer = get_llm_response(
|
||||
llm=cast("BaseLLM", self.llm),
|
||||
messages=self.messages,
|
||||
@@ -491,26 +508,32 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
|
||||
# Check if the response is a list of tool calls
|
||||
if (
|
||||
isinstance(answer, list)
|
||||
and answer
|
||||
and self._is_tool_call_list(answer)
|
||||
):
|
||||
# Handle tool calls - execute tools and add results to messages
|
||||
tool_finish = self._handle_native_tool_calls(
|
||||
answer, available_functions
|
||||
)
|
||||
# If tool has result_as_answer=True, return immediately
|
||||
if tool_finish is not None:
|
||||
return tool_finish
|
||||
# Continue loop to let LLM analyze results and decide next steps
|
||||
continue
|
||||
|
||||
# Text or other response - handle as potential final answer
|
||||
if isinstance(answer, str):
|
||||
# Text response - this is the final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(answer)
|
||||
self._append_message(answer) # Save final answer to messages
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
@@ -526,13 +549,14 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
# Unexpected response type, treat as final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=str(answer),
|
||||
text=str(answer),
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(str(answer))
|
||||
self._append_message(str(answer)) # Save final answer to messages
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
@@ -603,10 +627,12 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
if not response:
|
||||
return False
|
||||
first_item = response[0]
|
||||
# OpenAI-style
|
||||
if hasattr(first_item, "function") or (
|
||||
isinstance(first_item, dict) and "function" in first_item
|
||||
):
|
||||
return True
|
||||
# Anthropic-style (object with attributes)
|
||||
if (
|
||||
hasattr(first_item, "type")
|
||||
and getattr(first_item, "type", None) == "tool_use"
|
||||
@@ -614,12 +640,14 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
return True
|
||||
if hasattr(first_item, "name") and hasattr(first_item, "input"):
|
||||
return True
|
||||
# Bedrock-style (dict with name and input keys)
|
||||
if (
|
||||
isinstance(first_item, dict)
|
||||
and "name" in first_item
|
||||
and "input" in first_item
|
||||
):
|
||||
return True
|
||||
# Gemini-style
|
||||
if hasattr(first_item, "function_call") and first_item.function_call:
|
||||
return True
|
||||
return False
|
||||
@@ -678,6 +706,8 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
for _, func_name, _ in parsed_calls
|
||||
)
|
||||
|
||||
# Preserve historical sequential behavior for result_as_answer batches.
|
||||
# Also avoid threading around usage counters for max_usage_count tools.
|
||||
if has_result_as_answer_in_batch or has_max_usage_count_in_batch:
|
||||
logger.debug(
|
||||
"Skipping parallel native execution because batch includes result_as_answer or max_usage_count tool"
|
||||
@@ -743,6 +773,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
self.messages.append(reasoning_message)
|
||||
return None
|
||||
|
||||
# Sequential behavior: process only first tool call, then force reflection.
|
||||
call_id, func_name, func_args = parsed_calls[0]
|
||||
self._append_assistant_tool_calls_message([(call_id, func_name, func_args)])
|
||||
|
||||
@@ -796,7 +827,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
func_name = sanitize_tool_name(
|
||||
func_info.get("name", "") or tool_call.get("name", "")
|
||||
)
|
||||
func_args = func_info.get("arguments") or tool_call.get("input", {})
|
||||
func_args = func_info.get("arguments", "{}") or tool_call.get("input", {})
|
||||
return call_id, func_name, func_args
|
||||
return None
|
||||
|
||||
@@ -1171,6 +1202,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
text=answer,
|
||||
)
|
||||
except ValidationError:
|
||||
# If validation fails, convert BaseModel to JSON string for parsing
|
||||
answer_str = (
|
||||
answer.model_dump_json()
|
||||
if isinstance(answer, BaseModel)
|
||||
@@ -1180,6 +1212,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
answer_str, self.use_stop_words
|
||||
) # type: ignore[assignment]
|
||||
else:
|
||||
# When no response_model, answer should be a string
|
||||
answer_str = str(answer) if not isinstance(answer, str) else answer
|
||||
formatted_answer = process_llm_response(
|
||||
answer_str, self.use_stop_words
|
||||
@@ -1286,6 +1319,10 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
|
||||
enforce_rpm_limit(self.request_within_rpm_limit)
|
||||
|
||||
# Call LLM with native tools
|
||||
# Pass available_functions=None so the LLM returns tool_calls
|
||||
# without executing them. The executor handles tool execution
|
||||
# via _handle_native_tool_calls to properly manage message history.
|
||||
answer = await aget_llm_response(
|
||||
llm=cast("BaseLLM", self.llm),
|
||||
messages=self.messages,
|
||||
@@ -1299,26 +1336,32 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
executor_context=self,
|
||||
verbose=self.agent.verbose,
|
||||
)
|
||||
# Check if the response is a list of tool calls
|
||||
if (
|
||||
isinstance(answer, list)
|
||||
and answer
|
||||
and self._is_tool_call_list(answer)
|
||||
):
|
||||
# Handle tool calls - execute tools and add results to messages
|
||||
tool_finish = self._handle_native_tool_calls(
|
||||
answer, available_functions
|
||||
)
|
||||
# If tool has result_as_answer=True, return immediately
|
||||
if tool_finish is not None:
|
||||
return tool_finish
|
||||
# Continue loop to let LLM analyze results and decide next steps
|
||||
continue
|
||||
|
||||
# Text or other response - handle as potential final answer
|
||||
if isinstance(answer, str):
|
||||
# Text response - this is the final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=answer,
|
||||
text=answer,
|
||||
)
|
||||
await self._ainvoke_step_callback(formatted_answer)
|
||||
self._append_message(answer)
|
||||
self._append_message(answer) # Save final answer to messages
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
@@ -1334,13 +1377,14 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
# Unexpected response type, treat as final answer
|
||||
formatted_answer = AgentFinish(
|
||||
thought="",
|
||||
output=str(answer),
|
||||
text=str(answer),
|
||||
)
|
||||
await self._ainvoke_step_callback(formatted_answer)
|
||||
self._append_message(str(answer))
|
||||
self._append_message(str(answer)) # Save final answer to messages
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
@@ -1411,6 +1455,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
Returns:
|
||||
Updated action or final answer.
|
||||
"""
|
||||
# Special case for add_image_tool
|
||||
add_image_tool = I18N_DEFAULT.tools("add_image")
|
||||
if (
|
||||
isinstance(add_image_tool, dict)
|
||||
@@ -1530,14 +1575,17 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
||||
training_data = training_handler.load() or {}
|
||||
|
||||
# Initialize or retrieve agent's training data
|
||||
agent_training_data = training_data.get(agent_id, {})
|
||||
|
||||
if human_feedback is not None:
|
||||
# Save initial output and human feedback
|
||||
agent_training_data[train_iteration] = {
|
||||
"initial_output": result.output,
|
||||
"human_feedback": human_feedback,
|
||||
}
|
||||
else:
|
||||
# Save improved output
|
||||
if train_iteration in agent_training_data:
|
||||
agent_training_data[train_iteration]["improved_output"] = result.output
|
||||
else:
|
||||
@@ -1551,6 +1599,7 @@ class CrewAgentExecutor(BaseAgentExecutor):
|
||||
)
|
||||
return
|
||||
|
||||
# Update the training data and save
|
||||
training_data[agent_id] = agent_training_data
|
||||
training_handler.save(training_data)
|
||||
|
||||
|
||||
@@ -94,8 +94,11 @@ def parse(text: str) -> AgentAction | AgentFinish:
|
||||
|
||||
if includes_answer:
|
||||
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
||||
# Check whether the final answer ends with triple backticks.
|
||||
if final_answer.endswith("```"):
|
||||
# Count occurrences of triple backticks in the final answer.
|
||||
count = final_answer.count("```")
|
||||
# If count is odd then it's an unmatched trailing set; remove it.
|
||||
if count % 2 != 0:
|
||||
final_answer = final_answer[:-3].rstrip()
|
||||
return AgentFinish(thought=thought, output=final_answer, text=text)
|
||||
@@ -143,6 +146,7 @@ def _extract_thought(text: str) -> str:
|
||||
if thought_index == -1:
|
||||
return ""
|
||||
thought = text[:thought_index].strip()
|
||||
# Remove any triple backticks from the thought string
|
||||
return thought.replace("```", "").strip()
|
||||
|
||||
|
||||
@@ -167,9 +171,18 @@ def _safe_repair_json(tool_input: str) -> str:
|
||||
Returns:
|
||||
The repaired JSON string or original if repair fails.
|
||||
"""
|
||||
# Skip repair if the input starts and ends with square brackets
|
||||
# Explanation: The JSON parser has issues handling inputs that are enclosed in square brackets ('[]').
|
||||
# These are typically valid JSON arrays or strings that do not require repair. Attempting to repair such inputs
|
||||
# might lead to unintended alterations, such as wrapping the entire input in additional layers or modifying
|
||||
# the structure in a way that changes its meaning. By skipping the repair for inputs that start and end with
|
||||
# square brackets, we preserve the integrity of these valid JSON structures and avoid unnecessary modifications.
|
||||
if tool_input.startswith("[") and tool_input.endswith("]"):
|
||||
return tool_input
|
||||
|
||||
# Before repair, handle common LLM issues:
|
||||
# 1. Replace """ with " to avoid JSON parser errors
|
||||
|
||||
tool_input = tool_input.replace('"""', '"')
|
||||
|
||||
result = repair_json(tool_input)
|
||||
|
||||
@@ -83,6 +83,10 @@ class PlannerObserver:
|
||||
return create_llm(config.llm)
|
||||
return self.agent.llm
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def observe(
|
||||
self,
|
||||
completed_step: TodoItem,
|
||||
@@ -178,6 +182,9 @@ class PlannerObserver:
|
||||
),
|
||||
)
|
||||
|
||||
# Don't force a full replan — the step may have succeeded even if the
|
||||
# observer LLM failed to parse the result. Defaulting to "continue" is
|
||||
# far less disruptive than wiping the entire plan on every observer error.
|
||||
return StepObservation(
|
||||
step_completed_successfully=True,
|
||||
key_information_learned="",
|
||||
@@ -214,6 +221,10 @@ class PlannerObserver:
|
||||
|
||||
return remaining_todos
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal: Message building
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _build_observation_messages(
|
||||
self,
|
||||
completed_step: TodoItem,
|
||||
@@ -228,11 +239,15 @@ class PlannerObserver:
|
||||
task_desc = self.task.description or ""
|
||||
task_goal = self.task.expected_output or ""
|
||||
elif self.kickoff_input:
|
||||
# Standalone kickoff path — no Task object, but we have the raw input.
|
||||
# Extract just the ## Task section so the observer sees the actual goal,
|
||||
# not the full enriched instruction with env/tools/verification noise.
|
||||
task_desc = extract_task_section(self.kickoff_input)
|
||||
task_goal = "Complete the task successfully"
|
||||
|
||||
system_prompt = I18N_DEFAULT.retrieve("planning", "observation_system_prompt")
|
||||
|
||||
# Build context of what's been done
|
||||
completed_summary = ""
|
||||
if all_completed:
|
||||
completed_lines = []
|
||||
@@ -246,6 +261,7 @@ class PlannerObserver:
|
||||
completed_lines
|
||||
)
|
||||
|
||||
# Build remaining plan
|
||||
remaining_summary = ""
|
||||
if remaining_todos:
|
||||
remaining_lines = [
|
||||
@@ -290,14 +306,17 @@ class PlannerObserver:
|
||||
if isinstance(response, StepObservation):
|
||||
return response
|
||||
|
||||
# JSON string path — most common miss before this fix
|
||||
if isinstance(response, str):
|
||||
text = response.strip()
|
||||
try:
|
||||
return StepObservation.model_validate_json(text)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
# Some LLMs wrap the JSON in markdown fences
|
||||
if text.startswith("```"):
|
||||
lines = text.split("\n")
|
||||
# Strip first and last lines (``` markers)
|
||||
inner = "\n".join(
|
||||
lines[1:-1] if lines[-1].strip() == "```" else lines[1:]
|
||||
)
|
||||
@@ -306,12 +325,14 @@ class PlannerObserver:
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
# Dict path
|
||||
if isinstance(response, dict):
|
||||
try:
|
||||
return StepObservation.model_validate(response)
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
# Last resort — log what we got so it's diagnosable
|
||||
logger.warning(
|
||||
"Could not parse observation response (type=%s). "
|
||||
"Falling back to default failure observation. Preview: %.200s",
|
||||
|
||||
@@ -108,6 +108,7 @@ class StepExecutor:
|
||||
self.request_within_rpm_limit = request_within_rpm_limit
|
||||
self.callbacks = callbacks or []
|
||||
|
||||
# Native tool support — set up once
|
||||
self._use_native_tools = check_native_tool_support(
|
||||
self.llm, self.original_tools
|
||||
)
|
||||
@@ -120,6 +121,10 @@ class StepExecutor:
|
||||
_,
|
||||
) = setup_native_tools(self.original_tools)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def execute(
|
||||
self,
|
||||
todo: TodoItem,
|
||||
@@ -185,6 +190,10 @@ class StepExecutor:
|
||||
execution_time=elapsed,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal: Message building
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _build_isolated_messages(
|
||||
self, todo: TodoItem, context: StepExecutionContext
|
||||
) -> list[LLMMessage]:
|
||||
@@ -228,6 +237,10 @@ class StepExecutor:
|
||||
"""Build the user prompt for this specific step."""
|
||||
parts: list[str] = []
|
||||
|
||||
# Include overall task context so the executor knows the full goal and
|
||||
# required output format/location — critical for knowing WHAT to produce.
|
||||
# We extract only the task body (not tool instructions or verification
|
||||
# sections) to avoid duplicating directives already in the system prompt.
|
||||
if context.task_description:
|
||||
task_section = extract_task_section(context.task_description)
|
||||
if task_section:
|
||||
@@ -254,6 +267,7 @@ class StepExecutor:
|
||||
)
|
||||
)
|
||||
|
||||
# Include dependency results (final results only, no traces)
|
||||
if context.dependency_results:
|
||||
parts.append(
|
||||
I18N_DEFAULT.retrieve("planning", "step_executor_context_header")
|
||||
@@ -269,6 +283,10 @@ class StepExecutor:
|
||||
|
||||
return "\n".join(parts)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal: Multi-turn execution loop
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _execute_text_parsed(
|
||||
self,
|
||||
messages: list[LLMMessage],
|
||||
@@ -288,6 +306,7 @@ class StepExecutor:
|
||||
last_tool_result = ""
|
||||
|
||||
for _ in range(max_step_iterations):
|
||||
# Check step timeout
|
||||
if step_timeout and start_time:
|
||||
elapsed = time.monotonic() - start_time
|
||||
if elapsed >= step_timeout:
|
||||
@@ -312,12 +331,17 @@ class StepExecutor:
|
||||
tool_calls_made.append(formatted.tool)
|
||||
tool_result = self._execute_text_tool_with_events(formatted)
|
||||
last_tool_result = tool_result
|
||||
# Append the assistant's reasoning + action, then the observation.
|
||||
# _build_observation_message handles vision sentinels so the LLM
|
||||
# receives an image content block instead of raw base64 text.
|
||||
messages.append({"role": "assistant", "content": answer_str})
|
||||
messages.append(self._build_observation_message(tool_result))
|
||||
continue
|
||||
|
||||
# Raw text response with no Final Answer marker — treat as done
|
||||
return answer_str
|
||||
|
||||
# Max iterations reached — return the last tool result we accumulated
|
||||
return last_tool_result
|
||||
|
||||
def _execute_text_tool_with_events(self, formatted: AgentAction) -> str:
|
||||
@@ -405,6 +429,10 @@ class StepExecutor:
|
||||
return {"input": stripped_input}
|
||||
return {"input": str(tool_input)}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal: Vision support
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@staticmethod
|
||||
def _parse_vision_sentinel(raw: str) -> tuple[str, str] | None:
|
||||
"""Parse a VISION_IMAGE sentinel into (media_type, base64_data), or None."""
|
||||
@@ -489,6 +517,7 @@ class StepExecutor:
|
||||
accumulated_results: list[str] = []
|
||||
|
||||
for _ in range(max_step_iterations):
|
||||
# Check step timeout
|
||||
if step_timeout and start_time:
|
||||
elapsed = time.monotonic() - start_time
|
||||
if elapsed >= step_timeout:
|
||||
@@ -512,14 +541,19 @@ class StepExecutor:
|
||||
return answer.model_dump_json()
|
||||
|
||||
if isinstance(answer, list) and answer and is_tool_call_list(answer):
|
||||
# _execute_native_tool_calls appends assistant + tool messages
|
||||
# to `messages` as a side-effect, so the next LLM call will
|
||||
# see the full conversation history including tool outputs.
|
||||
result = self._execute_native_tool_calls(
|
||||
answer, messages, tool_calls_made
|
||||
)
|
||||
accumulated_results.append(result)
|
||||
continue
|
||||
|
||||
# Text answer → LLM decided the step is done
|
||||
return str(answer)
|
||||
|
||||
# Max iterations reached — return everything we accumulated
|
||||
return "\n".join(filter(None, accumulated_results))
|
||||
|
||||
def _execute_native_tool_calls(
|
||||
@@ -565,6 +599,9 @@ class StepExecutor:
|
||||
parsed = self._parse_vision_sentinel(raw_content)
|
||||
if parsed:
|
||||
media_type, b64_data = parsed
|
||||
# Replace the sentinel with a standard image_url content block.
|
||||
# Each provider's _format_messages handles conversion to
|
||||
# its native format (e.g. Anthropic image blocks).
|
||||
modified: LLMMessage = cast(
|
||||
LLMMessage, dict(call_result.tool_message)
|
||||
)
|
||||
|
||||
@@ -392,15 +392,10 @@ def deploy() -> None:
|
||||
|
||||
@deploy.command(name="create")
|
||||
@click.option("-y", "--yes", is_flag=True, help="Skip the confirmation prompt")
|
||||
@click.option(
|
||||
"--skip-validate",
|
||||
is_flag=True,
|
||||
help="Skip the pre-deploy validation checks.",
|
||||
)
|
||||
def deploy_create(yes: bool, skip_validate: bool) -> None:
|
||||
def deploy_create(yes: bool) -> None:
|
||||
"""Create a Crew deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.create_crew(yes, skip_validate=skip_validate)
|
||||
deploy_cmd.create_crew(yes)
|
||||
|
||||
|
||||
@deploy.command(name="list")
|
||||
@@ -412,28 +407,10 @@ def deploy_list() -> None:
|
||||
|
||||
@deploy.command(name="push")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
@click.option(
|
||||
"--skip-validate",
|
||||
is_flag=True,
|
||||
help="Skip the pre-deploy validation checks.",
|
||||
)
|
||||
def deploy_push(uuid: str | None, skip_validate: bool) -> None:
|
||||
def deploy_push(uuid: str | None) -> None:
|
||||
"""Deploy the Crew."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.deploy(uuid=uuid, skip_validate=skip_validate)
|
||||
|
||||
|
||||
@deploy.command(name="validate")
|
||||
def deploy_validate() -> None:
|
||||
"""Validate the current project against common deployment failures.
|
||||
|
||||
Runs the same pre-deploy checks that `crewai deploy create` and
|
||||
`crewai deploy push` run automatically, without contacting the platform.
|
||||
Exits non-zero if any blocking issues are found.
|
||||
"""
|
||||
from crewai.cli.deploy.validate import run_validate_command
|
||||
|
||||
run_validate_command()
|
||||
deploy_cmd.deploy(uuid=uuid)
|
||||
|
||||
|
||||
@deploy.command(name="status")
|
||||
|
||||
@@ -4,35 +4,12 @@ from rich.console import Console
|
||||
|
||||
from crewai.cli import git
|
||||
from crewai.cli.command import BaseCommand, PlusAPIMixin
|
||||
from crewai.cli.deploy.validate import validate_project
|
||||
from crewai.cli.utils import fetch_and_json_env_file, get_project_name
|
||||
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
def _run_predeploy_validation(skip_validate: bool) -> bool:
|
||||
"""Run pre-deploy validation unless skipped.
|
||||
|
||||
Returns True if deployment should proceed, False if it should abort.
|
||||
"""
|
||||
if skip_validate:
|
||||
console.print(
|
||||
"[yellow]Skipping pre-deploy validation (--skip-validate).[/yellow]"
|
||||
)
|
||||
return True
|
||||
|
||||
console.print("Running pre-deploy validation...", style="bold blue")
|
||||
validator = validate_project()
|
||||
if not validator.ok:
|
||||
console.print(
|
||||
"\n[bold red]Pre-deploy validation failed. "
|
||||
"Fix the issues above or re-run with --skip-validate.[/bold red]"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
"""
|
||||
A class to handle deployment-related operations for CrewAI projects.
|
||||
@@ -83,16 +60,13 @@ class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
f"{log_message['timestamp']} - {log_message['level']}: {log_message['message']}"
|
||||
)
|
||||
|
||||
def deploy(self, uuid: str | None = None, skip_validate: bool = False) -> None:
|
||||
def deploy(self, uuid: str | None = None) -> None:
|
||||
"""
|
||||
Deploy a crew using either UUID or project name.
|
||||
|
||||
Args:
|
||||
uuid (Optional[str]): The UUID of the crew to deploy.
|
||||
skip_validate (bool): Skip pre-deploy validation checks.
|
||||
"""
|
||||
if not _run_predeploy_validation(skip_validate):
|
||||
return
|
||||
self._telemetry.start_deployment_span(uuid)
|
||||
console.print("Starting deployment...", style="bold blue")
|
||||
if uuid:
|
||||
@@ -106,16 +80,10 @@ class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
self._validate_response(response)
|
||||
self._display_deployment_info(response.json())
|
||||
|
||||
def create_crew(self, confirm: bool = False, skip_validate: bool = False) -> None:
|
||||
def create_crew(self, confirm: bool = False) -> None:
|
||||
"""
|
||||
Create a new crew deployment.
|
||||
|
||||
Args:
|
||||
confirm (bool): Whether to skip the interactive confirmation prompt.
|
||||
skip_validate (bool): Skip pre-deploy validation checks.
|
||||
"""
|
||||
if not _run_predeploy_validation(skip_validate):
|
||||
return
|
||||
self._telemetry.create_crew_deployment_span()
|
||||
console.print("Creating deployment...", style="bold blue")
|
||||
env_vars = fetch_and_json_env_file()
|
||||
|
||||
@@ -1,845 +0,0 @@
|
||||
"""Pre-deploy validation for CrewAI projects.
|
||||
|
||||
Catches locally what a deploy would reject at build or runtime so users
|
||||
don't burn deployment attempts on fixable project-structure problems.
|
||||
|
||||
Each check is grouped into one of:
|
||||
- ERROR: will block a deployment; validator exits non-zero.
|
||||
- WARNING: may still deploy but is almost always a deployment bug; printed
|
||||
but does not block.
|
||||
|
||||
The individual checks mirror the categories observed in production
|
||||
deployment-failure logs:
|
||||
|
||||
1. pyproject.toml present with ``[project].name``
|
||||
2. lockfile (``uv.lock`` or ``poetry.lock``) present and not stale
|
||||
3. package directory at ``src/<package>/`` exists (no empty name, no egg-info)
|
||||
4. standard crew files: ``crew.py``, ``config/agents.yaml``, ``config/tasks.yaml``
|
||||
5. flow entrypoint: ``main.py`` with a Flow subclass
|
||||
6. hatch wheel target resolves (packages = [...] or default dir matches name)
|
||||
7. crew/flow module imports cleanly (catches ``@CrewBase not found``,
|
||||
``No Flow subclass found``, provider import errors)
|
||||
8. environment variables referenced in code vs ``.env`` / deployment env
|
||||
9. installed crewai vs lockfile pin (catches missing-attribute failures from
|
||||
stale pins)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
from typing import Any
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.utils import parse_toml
|
||||
|
||||
|
||||
console = Console()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Severity(str, Enum):
|
||||
"""Severity of a validation finding."""
|
||||
|
||||
ERROR = "error"
|
||||
WARNING = "warning"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""A single finding from a validation check.
|
||||
|
||||
Attributes:
|
||||
severity: whether this blocks deploy or is advisory.
|
||||
code: stable short identifier, used in tests and docs
|
||||
(e.g. ``missing_pyproject``, ``stale_lockfile``).
|
||||
title: one-line summary shown to the user.
|
||||
detail: optional multi-line explanation.
|
||||
hint: optional remediation suggestion.
|
||||
"""
|
||||
|
||||
severity: Severity
|
||||
code: str
|
||||
title: str
|
||||
detail: str = ""
|
||||
hint: str = ""
|
||||
|
||||
|
||||
# Maps known provider env var names → label used in hint messages.
|
||||
_KNOWN_API_KEY_HINTS: dict[str, str] = {
|
||||
"OPENAI_API_KEY": "OpenAI",
|
||||
"ANTHROPIC_API_KEY": "Anthropic",
|
||||
"GOOGLE_API_KEY": "Google",
|
||||
"GEMINI_API_KEY": "Gemini",
|
||||
"AZURE_OPENAI_API_KEY": "Azure OpenAI",
|
||||
"AZURE_API_KEY": "Azure",
|
||||
"AWS_ACCESS_KEY_ID": "AWS",
|
||||
"AWS_SECRET_ACCESS_KEY": "AWS",
|
||||
"COHERE_API_KEY": "Cohere",
|
||||
"GROQ_API_KEY": "Groq",
|
||||
"MISTRAL_API_KEY": "Mistral",
|
||||
"TAVILY_API_KEY": "Tavily",
|
||||
"SERPER_API_KEY": "Serper",
|
||||
"SERPLY_API_KEY": "Serply",
|
||||
"PERPLEXITY_API_KEY": "Perplexity",
|
||||
"DEEPSEEK_API_KEY": "DeepSeek",
|
||||
"OPENROUTER_API_KEY": "OpenRouter",
|
||||
"FIRECRAWL_API_KEY": "Firecrawl",
|
||||
"EXA_API_KEY": "Exa",
|
||||
"BROWSERBASE_API_KEY": "Browserbase",
|
||||
}
|
||||
|
||||
|
||||
def normalize_package_name(project_name: str) -> str:
|
||||
"""Normalize a pyproject project.name into a Python package directory name.
|
||||
|
||||
Mirrors the rules in ``crewai.cli.create_crew.create_crew`` so the
|
||||
validator agrees with the scaffolder about where ``src/<pkg>/`` should
|
||||
live.
|
||||
"""
|
||||
folder = project_name.replace(" ", "_").replace("-", "_").lower()
|
||||
return re.sub(r"[^a-zA-Z0-9_]", "", folder)
|
||||
|
||||
|
||||
class DeployValidator:
|
||||
"""Runs the full pre-deploy validation suite against a project directory."""
|
||||
|
||||
def __init__(self, project_root: Path | None = None) -> None:
|
||||
self.project_root: Path = (project_root or Path.cwd()).resolve()
|
||||
self.results: list[ValidationResult] = []
|
||||
self._pyproject: dict[str, Any] | None = None
|
||||
self._project_name: str | None = None
|
||||
self._package_name: str | None = None
|
||||
self._package_dir: Path | None = None
|
||||
self._is_flow: bool = False
|
||||
|
||||
def _add(
|
||||
self,
|
||||
severity: Severity,
|
||||
code: str,
|
||||
title: str,
|
||||
detail: str = "",
|
||||
hint: str = "",
|
||||
) -> None:
|
||||
self.results.append(
|
||||
ValidationResult(
|
||||
severity=severity,
|
||||
code=code,
|
||||
title=title,
|
||||
detail=detail,
|
||||
hint=hint,
|
||||
)
|
||||
)
|
||||
|
||||
@property
|
||||
def errors(self) -> list[ValidationResult]:
|
||||
return [r for r in self.results if r.severity is Severity.ERROR]
|
||||
|
||||
@property
|
||||
def warnings(self) -> list[ValidationResult]:
|
||||
return [r for r in self.results if r.severity is Severity.WARNING]
|
||||
|
||||
@property
|
||||
def ok(self) -> bool:
|
||||
return not self.errors
|
||||
|
||||
def run(self) -> list[ValidationResult]:
|
||||
"""Run all checks. Later checks are skipped when earlier ones make
|
||||
them impossible (e.g. no pyproject.toml → no lockfile check)."""
|
||||
if not self._check_pyproject():
|
||||
return self.results
|
||||
|
||||
self._check_lockfile()
|
||||
|
||||
if not self._check_package_dir():
|
||||
self._check_hatch_wheel_target()
|
||||
return self.results
|
||||
|
||||
if self._is_flow:
|
||||
self._check_flow_entrypoint()
|
||||
else:
|
||||
self._check_crew_entrypoint()
|
||||
self._check_config_yamls()
|
||||
|
||||
self._check_hatch_wheel_target()
|
||||
self._check_module_imports()
|
||||
self._check_env_vars()
|
||||
self._check_version_vs_lockfile()
|
||||
|
||||
return self.results
|
||||
|
||||
def _check_pyproject(self) -> bool:
|
||||
pyproject_path = self.project_root / "pyproject.toml"
|
||||
if not pyproject_path.exists():
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"missing_pyproject",
|
||||
"Cannot find pyproject.toml",
|
||||
detail=(
|
||||
f"Expected pyproject.toml at {pyproject_path}. "
|
||||
"CrewAI projects must be installable Python packages."
|
||||
),
|
||||
hint="Run `crewai create crew <name>` to scaffold a valid project layout.",
|
||||
)
|
||||
return False
|
||||
|
||||
try:
|
||||
self._pyproject = parse_toml(pyproject_path.read_text())
|
||||
except Exception as e:
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"invalid_pyproject",
|
||||
"pyproject.toml is not valid TOML",
|
||||
detail=str(e),
|
||||
)
|
||||
return False
|
||||
|
||||
project = self._pyproject.get("project") or {}
|
||||
name = project.get("name")
|
||||
if not isinstance(name, str) or not name.strip():
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"missing_project_name",
|
||||
"pyproject.toml is missing [project].name",
|
||||
detail=(
|
||||
"Without a project name the platform cannot resolve your "
|
||||
"package directory (this produces errors like "
|
||||
"'Cannot find src//crew.py')."
|
||||
),
|
||||
hint='Set a `name = "..."` field under `[project]` in pyproject.toml.',
|
||||
)
|
||||
return False
|
||||
|
||||
self._project_name = name
|
||||
self._package_name = normalize_package_name(name)
|
||||
self._is_flow = (self._pyproject.get("tool") or {}).get("crewai", {}).get(
|
||||
"type"
|
||||
) == "flow"
|
||||
return True
|
||||
|
||||
def _check_lockfile(self) -> None:
|
||||
uv_lock = self.project_root / "uv.lock"
|
||||
poetry_lock = self.project_root / "poetry.lock"
|
||||
pyproject = self.project_root / "pyproject.toml"
|
||||
|
||||
if not uv_lock.exists() and not poetry_lock.exists():
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"missing_lockfile",
|
||||
"Expected to find at least one of these files: uv.lock or poetry.lock",
|
||||
hint=(
|
||||
"Run `uv lock` (recommended) or `poetry lock` in your project "
|
||||
"directory, commit the lockfile, then redeploy."
|
||||
),
|
||||
)
|
||||
return
|
||||
|
||||
lockfile = uv_lock if uv_lock.exists() else poetry_lock
|
||||
try:
|
||||
if lockfile.stat().st_mtime < pyproject.stat().st_mtime:
|
||||
self._add(
|
||||
Severity.WARNING,
|
||||
"stale_lockfile",
|
||||
f"{lockfile.name} is older than pyproject.toml",
|
||||
detail=(
|
||||
"Your lockfile may not reflect recent dependency changes. "
|
||||
"The platform resolves from the lockfile, so deployed "
|
||||
"dependencies may differ from local."
|
||||
),
|
||||
hint="Run `uv lock` (or `poetry lock`) and commit the result.",
|
||||
)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def _check_package_dir(self) -> bool:
|
||||
if self._package_name is None:
|
||||
return False
|
||||
|
||||
src_dir = self.project_root / "src"
|
||||
if not src_dir.is_dir():
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"missing_src_dir",
|
||||
"Missing src/ directory",
|
||||
detail=(
|
||||
"CrewAI deployments expect a src-layout project: "
|
||||
f"src/{self._package_name}/crew.py (or main.py for flows)."
|
||||
),
|
||||
hint="Run `crewai create crew <name>` to see the expected layout.",
|
||||
)
|
||||
return False
|
||||
|
||||
package_dir = src_dir / self._package_name
|
||||
if not package_dir.is_dir():
|
||||
siblings = [
|
||||
p.name
|
||||
for p in src_dir.iterdir()
|
||||
if p.is_dir() and not p.name.endswith(".egg-info")
|
||||
]
|
||||
egg_info = [
|
||||
p.name for p in src_dir.iterdir() if p.name.endswith(".egg-info")
|
||||
]
|
||||
|
||||
hint_parts = [
|
||||
f'Create src/{self._package_name}/ to match [project].name = "{self._project_name}".'
|
||||
]
|
||||
if siblings:
|
||||
hint_parts.append(
|
||||
f"Found other package directories: {', '.join(siblings)}. "
|
||||
f"Either rename one to '{self._package_name}' or update [project].name."
|
||||
)
|
||||
if egg_info:
|
||||
hint_parts.append(
|
||||
f"Delete stale build artifacts: {', '.join(egg_info)} "
|
||||
"(these confuse the platform's package discovery)."
|
||||
)
|
||||
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"missing_package_dir",
|
||||
f"Cannot find src/{self._package_name}/",
|
||||
detail=(
|
||||
"The platform looks for your crew source under "
|
||||
"src/<package_name>/, derived from [project].name."
|
||||
),
|
||||
hint=" ".join(hint_parts),
|
||||
)
|
||||
return False
|
||||
|
||||
for p in src_dir.iterdir():
|
||||
if p.name.endswith(".egg-info"):
|
||||
self._add(
|
||||
Severity.WARNING,
|
||||
"stale_egg_info",
|
||||
f"Stale build artifact in src/: {p.name}",
|
||||
detail=(
|
||||
".egg-info directories can be mistaken for your package "
|
||||
"and cause 'Cannot find src/<name>.egg-info/crew.py' errors."
|
||||
),
|
||||
hint=f"Delete {p} and add `*.egg-info/` to .gitignore.",
|
||||
)
|
||||
|
||||
self._package_dir = package_dir
|
||||
return True
|
||||
|
||||
def _check_crew_entrypoint(self) -> None:
|
||||
if self._package_dir is None:
|
||||
return
|
||||
crew_py = self._package_dir / "crew.py"
|
||||
if not crew_py.is_file():
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"missing_crew_py",
|
||||
f"Cannot find {crew_py.relative_to(self.project_root)}",
|
||||
detail=(
|
||||
"Standard crew projects must define a Crew class decorated "
|
||||
"with @CrewBase inside crew.py."
|
||||
),
|
||||
hint=(
|
||||
"Create crew.py with an @CrewBase-annotated class, or set "
|
||||
'`[tool.crewai] type = "flow"` in pyproject.toml if this is a flow.'
|
||||
),
|
||||
)
|
||||
|
||||
def _check_config_yamls(self) -> None:
|
||||
if self._package_dir is None:
|
||||
return
|
||||
config_dir = self._package_dir / "config"
|
||||
if not config_dir.is_dir():
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"missing_config_dir",
|
||||
f"Cannot find {config_dir.relative_to(self.project_root)}",
|
||||
hint="Create a config/ directory with agents.yaml and tasks.yaml.",
|
||||
)
|
||||
return
|
||||
|
||||
for yaml_name in ("agents.yaml", "tasks.yaml"):
|
||||
yaml_path = config_dir / yaml_name
|
||||
if not yaml_path.is_file():
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
f"missing_{yaml_name.replace('.', '_')}",
|
||||
f"Cannot find {yaml_path.relative_to(self.project_root)}",
|
||||
detail=(
|
||||
"CrewAI loads agent and task config from these files; "
|
||||
"missing them causes empty-config warnings and runtime crashes."
|
||||
),
|
||||
)
|
||||
|
||||
def _check_flow_entrypoint(self) -> None:
|
||||
if self._package_dir is None:
|
||||
return
|
||||
main_py = self._package_dir / "main.py"
|
||||
if not main_py.is_file():
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"missing_flow_main",
|
||||
f"Cannot find {main_py.relative_to(self.project_root)}",
|
||||
detail=(
|
||||
"Flow projects must define a Flow subclass in main.py. "
|
||||
'This project has `[tool.crewai] type = "flow"` set.'
|
||||
),
|
||||
hint="Create main.py with a `class MyFlow(Flow[...])`.",
|
||||
)
|
||||
|
||||
def _check_hatch_wheel_target(self) -> None:
|
||||
if not self._pyproject:
|
||||
return
|
||||
|
||||
build_system = self._pyproject.get("build-system") or {}
|
||||
backend = build_system.get("build-backend", "")
|
||||
if "hatchling" not in backend:
|
||||
return
|
||||
|
||||
hatch_wheel = (
|
||||
(self._pyproject.get("tool") or {})
|
||||
.get("hatch", {})
|
||||
.get("build", {})
|
||||
.get("targets", {})
|
||||
.get("wheel", {})
|
||||
)
|
||||
if hatch_wheel.get("packages") or hatch_wheel.get("only-include"):
|
||||
return
|
||||
|
||||
if self._package_dir and self._package_dir.is_dir():
|
||||
return
|
||||
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"hatch_wheel_target_missing",
|
||||
"Hatchling cannot determine which files to ship",
|
||||
detail=(
|
||||
"Your pyproject uses hatchling but has no "
|
||||
"[tool.hatch.build.targets.wheel] configuration and no "
|
||||
"directory matching your project name."
|
||||
),
|
||||
hint=(
|
||||
"Add:\n"
|
||||
" [tool.hatch.build.targets.wheel]\n"
|
||||
f' packages = ["src/{self._package_name}"]'
|
||||
),
|
||||
)
|
||||
|
||||
def _check_module_imports(self) -> None:
|
||||
"""Import the user's crew/flow via `uv run` so the check sees the same
|
||||
package versions as `crewai run` would. Result is reported as JSON on
|
||||
the subprocess's stdout."""
|
||||
script = (
|
||||
"import json, sys, traceback, os\n"
|
||||
"os.chdir(sys.argv[1])\n"
|
||||
"try:\n"
|
||||
" from crewai.cli.utils import get_crews, get_flows\n"
|
||||
" is_flow = sys.argv[2] == 'flow'\n"
|
||||
" if is_flow:\n"
|
||||
" instances = get_flows()\n"
|
||||
" kind = 'flow'\n"
|
||||
" else:\n"
|
||||
" instances = get_crews()\n"
|
||||
" kind = 'crew'\n"
|
||||
" print(json.dumps({'ok': True, 'kind': kind, 'count': len(instances)}))\n"
|
||||
"except BaseException as e:\n"
|
||||
" print(json.dumps({\n"
|
||||
" 'ok': False,\n"
|
||||
" 'error_type': type(e).__name__,\n"
|
||||
" 'error': str(e),\n"
|
||||
" 'traceback': traceback.format_exc(),\n"
|
||||
" }))\n"
|
||||
)
|
||||
|
||||
uv_path = shutil.which("uv")
|
||||
if uv_path is None:
|
||||
self._add(
|
||||
Severity.WARNING,
|
||||
"uv_not_found",
|
||||
"Skipping import check: `uv` not installed",
|
||||
hint="Install uv: https://docs.astral.sh/uv/",
|
||||
)
|
||||
return
|
||||
|
||||
try:
|
||||
proc = subprocess.run( # noqa: S603 - args constructed from trusted inputs
|
||||
[
|
||||
uv_path,
|
||||
"run",
|
||||
"python",
|
||||
"-c",
|
||||
script,
|
||||
str(self.project_root),
|
||||
"flow" if self._is_flow else "crew",
|
||||
],
|
||||
cwd=self.project_root,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=120,
|
||||
check=False,
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"import_timeout",
|
||||
"Importing your crew/flow module timed out after 120s",
|
||||
detail=(
|
||||
"User code may be making network calls or doing heavy work "
|
||||
"at import time. Move that work into agent methods."
|
||||
),
|
||||
)
|
||||
return
|
||||
|
||||
# The payload is the last JSON object on stdout; user code may print
|
||||
# other lines before it.
|
||||
payload: dict[str, Any] | None = None
|
||||
for line in reversed(proc.stdout.splitlines()):
|
||||
line = line.strip()
|
||||
if line.startswith("{") and line.endswith("}"):
|
||||
try:
|
||||
payload = json.loads(line)
|
||||
break
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
if payload is None:
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"import_failed",
|
||||
"Could not import your crew/flow module",
|
||||
detail=(proc.stderr or proc.stdout or "").strip()[:1500],
|
||||
hint="Run `crewai run` locally first to reproduce the error.",
|
||||
)
|
||||
return
|
||||
|
||||
if payload.get("ok"):
|
||||
if payload.get("count", 0) == 0:
|
||||
kind = payload.get("kind", "crew")
|
||||
if kind == "flow":
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"no_flow_subclass",
|
||||
"No Flow subclass found in the module",
|
||||
hint=(
|
||||
"main.py must define a class extending "
|
||||
"`crewai.flow.Flow`, instantiable with no arguments."
|
||||
),
|
||||
)
|
||||
else:
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"no_crewbase_class",
|
||||
"Crew class annotated with @CrewBase not found",
|
||||
hint=(
|
||||
"Decorate your crew class with @CrewBase from "
|
||||
"crewai.project (see `crewai create crew` template)."
|
||||
),
|
||||
)
|
||||
return
|
||||
|
||||
err_msg = str(payload.get("error", ""))
|
||||
err_type = str(payload.get("error_type", "Exception"))
|
||||
tb = str(payload.get("traceback", ""))
|
||||
self._classify_import_error(err_type, err_msg, tb)
|
||||
|
||||
def _classify_import_error(self, err_type: str, err_msg: str, tb: str) -> None:
|
||||
"""Turn a raw import-time exception into a user-actionable finding."""
|
||||
# Must be checked before the generic "native provider" branch below:
|
||||
# the extras-missing message contains the same phrase. Providers
|
||||
# format the install command as plain text (`to install: uv add
|
||||
# "crewai[extra]"`); also tolerate backtick-delimited variants.
|
||||
m = re.search(
|
||||
r"(?P<pkg>[A-Za-z0-9_ -]+?)\s+native provider not available"
|
||||
r".*?to install:\s*`?(?P<cmd>uv add [\"']crewai\[[^\]]+\][\"'])`?",
|
||||
err_msg,
|
||||
)
|
||||
if m:
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"missing_provider_extra",
|
||||
f"{m.group('pkg').strip()} provider extra not installed",
|
||||
hint=f"Run: {m.group('cmd')}",
|
||||
)
|
||||
return
|
||||
|
||||
# crewai.llm.LLM.__new__ wraps provider init errors as
|
||||
# ImportError("Error importing native provider: ...").
|
||||
if "Error importing native provider" in err_msg or "native provider" in err_msg:
|
||||
missing_key = self._extract_missing_api_key(err_msg)
|
||||
if missing_key:
|
||||
provider = _KNOWN_API_KEY_HINTS.get(missing_key, missing_key)
|
||||
self._add(
|
||||
Severity.WARNING,
|
||||
"llm_init_missing_key",
|
||||
f"LLM is constructed at import time but {missing_key} is not set",
|
||||
detail=(
|
||||
f"Your crew instantiates a {provider} LLM during module "
|
||||
"load (e.g. in a class field default or @crew method). "
|
||||
f"The {provider} provider currently requires {missing_key} "
|
||||
"at construction time, so this will fail on the platform "
|
||||
"unless the key is set in your deployment environment."
|
||||
),
|
||||
hint=(
|
||||
f"Add {missing_key} to your deployment's Environment "
|
||||
"Variables before deploying, or move LLM construction "
|
||||
"inside agent methods so it runs lazily."
|
||||
),
|
||||
)
|
||||
return
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"llm_provider_init_failed",
|
||||
"LLM native provider failed to initialize",
|
||||
detail=err_msg,
|
||||
hint=(
|
||||
"Check your LLM(model=...) configuration and provider-specific "
|
||||
"extras (e.g. `uv add 'crewai[azure-ai-inference]'` for Azure)."
|
||||
),
|
||||
)
|
||||
return
|
||||
|
||||
if err_type == "KeyError":
|
||||
key = err_msg.strip("'\"")
|
||||
if key in _KNOWN_API_KEY_HINTS or key.endswith("_API_KEY"):
|
||||
self._add(
|
||||
Severity.WARNING,
|
||||
"env_var_read_at_import",
|
||||
f"{key} is read at import time via os.environ[...]",
|
||||
detail=(
|
||||
"Using os.environ[...] (rather than os.getenv(...)) "
|
||||
"at module scope crashes the build if the key isn't set."
|
||||
),
|
||||
hint=(
|
||||
f"Either add {key} as a deployment env var, or switch "
|
||||
"to os.getenv() and move the access inside agent methods."
|
||||
),
|
||||
)
|
||||
return
|
||||
|
||||
if "Crew class annotated with @CrewBase not found" in err_msg:
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"no_crewbase_class",
|
||||
"Crew class annotated with @CrewBase not found",
|
||||
detail=err_msg,
|
||||
)
|
||||
return
|
||||
if "No Flow subclass found" in err_msg:
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"no_flow_subclass",
|
||||
"No Flow subclass found in the module",
|
||||
detail=err_msg,
|
||||
)
|
||||
return
|
||||
|
||||
if (
|
||||
err_type == "AttributeError"
|
||||
and "has no attribute '_load_response_format'" in err_msg
|
||||
):
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"stale_crewai_pin",
|
||||
"Your lockfile pins a crewai version missing `_load_response_format`",
|
||||
detail=err_msg,
|
||||
hint=(
|
||||
"Run `uv lock --upgrade-package crewai` (or `poetry update crewai`) "
|
||||
"to pin a newer release."
|
||||
),
|
||||
)
|
||||
return
|
||||
|
||||
if "pydantic" in tb.lower() or "validation error" in err_msg.lower():
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"pydantic_validation_error",
|
||||
"Pydantic validation failed while loading your crew",
|
||||
detail=err_msg[:800],
|
||||
hint=(
|
||||
"Check agent/task configuration fields. `crewai run` locally "
|
||||
"will show the full traceback."
|
||||
),
|
||||
)
|
||||
return
|
||||
|
||||
self._add(
|
||||
Severity.ERROR,
|
||||
"import_failed",
|
||||
f"Importing your crew failed: {err_type}",
|
||||
detail=err_msg[:800],
|
||||
hint="Run `crewai run` locally to see the full traceback.",
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _extract_missing_api_key(err_msg: str) -> str | None:
|
||||
"""Pull 'FOO_API_KEY' out of '... FOO_API_KEY is required ...'."""
|
||||
m = re.search(r"([A-Z][A-Z0-9_]*_API_KEY)\s+is required", err_msg)
|
||||
if m:
|
||||
return m.group(1)
|
||||
m = re.search(r"['\"]([A-Z][A-Z0-9_]*_API_KEY)['\"]", err_msg)
|
||||
if m:
|
||||
return m.group(1)
|
||||
return None
|
||||
|
||||
def _check_env_vars(self) -> None:
|
||||
"""Warn about env vars referenced in user code but missing locally.
|
||||
Best-effort only — the platform sets vars server-side, so we never error.
|
||||
"""
|
||||
if not self._package_dir:
|
||||
return
|
||||
|
||||
referenced: set[str] = set()
|
||||
pattern = re.compile(
|
||||
r"""(?x)
|
||||
(?:os\.environ\s*(?:\[\s*|\.get\s*\(\s*)
|
||||
|os\.getenv\s*\(\s*
|
||||
|getenv\s*\(\s*)
|
||||
['"]([A-Z][A-Z0-9_]*)['"]
|
||||
"""
|
||||
)
|
||||
|
||||
for path in self._package_dir.rglob("*.py"):
|
||||
try:
|
||||
text = path.read_text(encoding="utf-8", errors="ignore")
|
||||
except OSError:
|
||||
continue
|
||||
referenced.update(pattern.findall(text))
|
||||
|
||||
for path in self._package_dir.rglob("*.yaml"):
|
||||
try:
|
||||
text = path.read_text(encoding="utf-8", errors="ignore")
|
||||
except OSError:
|
||||
continue
|
||||
referenced.update(re.findall(r"\$\{?([A-Z][A-Z0-9_]+)\}?", text))
|
||||
|
||||
env_file = self.project_root / ".env"
|
||||
env_keys: set[str] = set()
|
||||
if env_file.exists():
|
||||
for line in env_file.read_text(errors="ignore").splitlines():
|
||||
line = line.strip()
|
||||
if not line or line.startswith("#") or "=" not in line:
|
||||
continue
|
||||
env_keys.add(line.split("=", 1)[0].strip())
|
||||
|
||||
missing_known: list[str] = sorted(
|
||||
var
|
||||
for var in referenced
|
||||
if var in _KNOWN_API_KEY_HINTS
|
||||
and var not in env_keys
|
||||
and var not in os.environ
|
||||
)
|
||||
if missing_known:
|
||||
self._add(
|
||||
Severity.WARNING,
|
||||
"env_vars_not_in_dotenv",
|
||||
f"{len(missing_known)} referenced API key(s) not in .env",
|
||||
detail=(
|
||||
"These env vars are referenced in your source but not set "
|
||||
f"locally: {', '.join(missing_known)}. Deploys will fail "
|
||||
"unless they are added to the deployment's Environment "
|
||||
"Variables in the CrewAI dashboard."
|
||||
),
|
||||
)
|
||||
|
||||
def _check_version_vs_lockfile(self) -> None:
|
||||
"""Warn when the lockfile pins a crewai release older than 1.13.0,
|
||||
which is where ``_load_response_format`` was introduced.
|
||||
"""
|
||||
uv_lock = self.project_root / "uv.lock"
|
||||
poetry_lock = self.project_root / "poetry.lock"
|
||||
lockfile = (
|
||||
uv_lock
|
||||
if uv_lock.exists()
|
||||
else poetry_lock
|
||||
if poetry_lock.exists()
|
||||
else None
|
||||
)
|
||||
if lockfile is None:
|
||||
return
|
||||
|
||||
try:
|
||||
text = lockfile.read_text(errors="ignore")
|
||||
except OSError:
|
||||
return
|
||||
|
||||
m = re.search(
|
||||
r'name\s*=\s*"crewai"\s*\nversion\s*=\s*"([^"]+)"',
|
||||
text,
|
||||
)
|
||||
if not m:
|
||||
return
|
||||
locked = m.group(1)
|
||||
|
||||
try:
|
||||
from packaging.version import Version
|
||||
|
||||
if Version(locked) < Version("1.13.0"):
|
||||
self._add(
|
||||
Severity.WARNING,
|
||||
"old_crewai_pin",
|
||||
f"Lockfile pins crewai=={locked} (older than 1.13.0)",
|
||||
detail=(
|
||||
"Older pinned versions are missing API surface the "
|
||||
"platform builder expects (e.g. `_load_response_format`)."
|
||||
),
|
||||
hint="Run `uv lock --upgrade-package crewai` and redeploy.",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.debug("Could not parse crewai pin from lockfile: %s", e)
|
||||
|
||||
|
||||
def render_report(results: list[ValidationResult]) -> None:
|
||||
"""Pretty-print results to the shared rich console."""
|
||||
if not results:
|
||||
console.print("[bold green]Pre-deploy validation passed.[/bold green]")
|
||||
return
|
||||
|
||||
errors = [r for r in results if r.severity is Severity.ERROR]
|
||||
warnings = [r for r in results if r.severity is Severity.WARNING]
|
||||
|
||||
for result in errors:
|
||||
console.print(f"[bold red]ERROR[/bold red] [{result.code}] {result.title}")
|
||||
if result.detail:
|
||||
console.print(f" {result.detail}")
|
||||
if result.hint:
|
||||
console.print(f" [dim]hint:[/dim] {result.hint}")
|
||||
|
||||
for result in warnings:
|
||||
console.print(
|
||||
f"[bold yellow]WARNING[/bold yellow] [{result.code}] {result.title}"
|
||||
)
|
||||
if result.detail:
|
||||
console.print(f" {result.detail}")
|
||||
if result.hint:
|
||||
console.print(f" [dim]hint:[/dim] {result.hint}")
|
||||
|
||||
summary_parts: list[str] = []
|
||||
if errors:
|
||||
summary_parts.append(f"[bold red]{len(errors)} error(s)[/bold red]")
|
||||
if warnings:
|
||||
summary_parts.append(f"[bold yellow]{len(warnings)} warning(s)[/bold yellow]")
|
||||
console.print(f"\n{' / '.join(summary_parts)}")
|
||||
|
||||
|
||||
def validate_project(project_root: Path | None = None) -> DeployValidator:
|
||||
"""Entrypoint: run validation, render results, return the validator.
|
||||
|
||||
The caller inspects ``validator.ok`` to decide whether to proceed with a
|
||||
deploy.
|
||||
"""
|
||||
validator = DeployValidator(project_root=project_root)
|
||||
validator.run()
|
||||
render_report(validator.results)
|
||||
return validator
|
||||
|
||||
|
||||
def run_validate_command() -> None:
|
||||
"""Implementation of `crewai deploy validate`."""
|
||||
validator = validate_project()
|
||||
if not validator.ok:
|
||||
sys.exit(1)
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.14.2a5"
|
||||
"crewai[tools]==1.14.2a2"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.14.2a5"
|
||||
"crewai[tools]==1.14.2a2"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]==1.14.2a5"
|
||||
"crewai[tools]==1.14.2a2"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -16,6 +16,7 @@ from typing import (
|
||||
get_origin,
|
||||
)
|
||||
import uuid
|
||||
import warnings
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -25,7 +26,7 @@ from pydantic import (
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from typing_extensions import Self, deprecated
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@@ -172,12 +173,9 @@ def _kickoff_with_a2a_support(
|
||||
)
|
||||
|
||||
|
||||
@deprecated(
|
||||
"LiteAgent is deprecated and will be removed in v2.0.0.",
|
||||
category=FutureWarning,
|
||||
)
|
||||
class LiteAgent(FlowTrackable, BaseModel):
|
||||
"""A lightweight agent that can process messages and use tools.
|
||||
"""
|
||||
A lightweight agent that can process messages and use tools.
|
||||
|
||||
.. deprecated::
|
||||
LiteAgent is deprecated and will be removed in a future version.
|
||||
@@ -280,6 +278,18 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
_memory: Any = PrivateAttr(default=None)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def emit_deprecation_warning(self) -> Self:
|
||||
"""Emit deprecation warning for LiteAgent usage."""
|
||||
warnings.warn(
|
||||
"LiteAgent is deprecated and will be removed in a future version. "
|
||||
"Use Agent().kickoff(messages) instead, which provides the same "
|
||||
"functionality with additional features like memory and knowledge support.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def setup_llm(self) -> Self:
|
||||
"""Set up the LLM and other components after initialization."""
|
||||
|
||||
@@ -51,7 +51,6 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
)
|
||||
from crewai.utilities.logger_utils import suppress_warnings
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
|
||||
|
||||
try:
|
||||
@@ -76,13 +75,8 @@ try:
|
||||
from litellm.types.utils import (
|
||||
ChatCompletionDeltaToolCall,
|
||||
Choices,
|
||||
Delta as LiteLLMDelta,
|
||||
Function,
|
||||
Message,
|
||||
ModelResponse,
|
||||
ModelResponseBase,
|
||||
ModelResponseStream,
|
||||
StreamingChoices as LiteLLMStreamingChoices,
|
||||
)
|
||||
from litellm.utils import supports_response_schema
|
||||
|
||||
@@ -91,11 +85,6 @@ except ImportError:
|
||||
LITELLM_AVAILABLE = False
|
||||
litellm = None # type: ignore[assignment]
|
||||
Choices = None # type: ignore[assignment, misc]
|
||||
LiteLLMDelta = None # type: ignore[assignment, misc]
|
||||
Message = None # type: ignore[assignment, misc]
|
||||
ModelResponseBase = None # type: ignore[assignment, misc]
|
||||
ModelResponseStream = None # type: ignore[assignment, misc]
|
||||
LiteLLMStreamingChoices = None # type: ignore[assignment, misc]
|
||||
get_supported_openai_params = None # type: ignore[assignment]
|
||||
ChatCompletionDeltaToolCall = None # type: ignore[assignment, misc]
|
||||
Function = None # type: ignore[assignment, misc]
|
||||
@@ -720,7 +709,7 @@ class LLM(BaseLLM):
|
||||
chunk_content = None
|
||||
response_id = None
|
||||
|
||||
if isinstance(chunk, ModelResponseBase):
|
||||
if hasattr(chunk, "id"):
|
||||
response_id = chunk.id
|
||||
|
||||
# Safely extract content from various chunk formats
|
||||
@@ -729,16 +718,18 @@ class LLM(BaseLLM):
|
||||
choices = None
|
||||
if isinstance(chunk, dict) and "choices" in chunk:
|
||||
choices = chunk["choices"]
|
||||
elif isinstance(chunk, ModelResponseStream):
|
||||
choices = chunk.choices
|
||||
elif hasattr(chunk, "choices"):
|
||||
# Check if choices is not a type but an actual attribute with value
|
||||
if not isinstance(chunk.choices, type):
|
||||
choices = chunk.choices
|
||||
|
||||
# Try to extract usage information if available
|
||||
# NOTE: usage is a pydantic extra field on ModelResponseBase,
|
||||
# so it must be accessed via model_extra.
|
||||
if isinstance(chunk, dict) and "usage" in chunk:
|
||||
usage_info = chunk["usage"]
|
||||
elif isinstance(chunk, ModelResponseBase) and chunk.model_extra:
|
||||
usage_info = chunk.model_extra.get("usage") or usage_info
|
||||
elif hasattr(chunk, "usage"):
|
||||
# Check if usage is not a type but an actual attribute with value
|
||||
if not isinstance(chunk.usage, type):
|
||||
usage_info = chunk.usage
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
choice = choices[0]
|
||||
@@ -747,7 +738,7 @@ class LLM(BaseLLM):
|
||||
delta = None
|
||||
if isinstance(choice, dict) and "delta" in choice:
|
||||
delta = choice["delta"]
|
||||
elif isinstance(choice, LiteLLMStreamingChoices):
|
||||
elif hasattr(choice, "delta"):
|
||||
delta = choice.delta
|
||||
|
||||
# Extract content from delta
|
||||
@@ -757,7 +748,7 @@ class LLM(BaseLLM):
|
||||
if "content" in delta and delta["content"] is not None:
|
||||
chunk_content = delta["content"]
|
||||
# Handle object format
|
||||
elif isinstance(delta, LiteLLMDelta):
|
||||
elif hasattr(delta, "content"):
|
||||
chunk_content = delta.content
|
||||
|
||||
# Handle case where content might be None or empty
|
||||
@@ -830,8 +821,9 @@ class LLM(BaseLLM):
|
||||
choices = None
|
||||
if isinstance(last_chunk, dict) and "choices" in last_chunk:
|
||||
choices = last_chunk["choices"]
|
||||
elif isinstance(last_chunk, ModelResponseStream):
|
||||
choices = last_chunk.choices
|
||||
elif hasattr(last_chunk, "choices"):
|
||||
if not isinstance(last_chunk.choices, type):
|
||||
choices = last_chunk.choices
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
choice = choices[0]
|
||||
@@ -840,14 +832,14 @@ class LLM(BaseLLM):
|
||||
message = None
|
||||
if isinstance(choice, dict) and "message" in choice:
|
||||
message = choice["message"]
|
||||
elif isinstance(choice, Choices):
|
||||
elif hasattr(choice, "message"):
|
||||
message = choice.message
|
||||
|
||||
if message:
|
||||
content = None
|
||||
if isinstance(message, dict) and "content" in message:
|
||||
content = message["content"]
|
||||
elif isinstance(message, Message):
|
||||
elif hasattr(message, "content"):
|
||||
content = message.content
|
||||
|
||||
if content:
|
||||
@@ -874,23 +866,24 @@ class LLM(BaseLLM):
|
||||
choices = None
|
||||
if isinstance(last_chunk, dict) and "choices" in last_chunk:
|
||||
choices = last_chunk["choices"]
|
||||
elif isinstance(last_chunk, ModelResponseStream):
|
||||
choices = last_chunk.choices
|
||||
elif hasattr(last_chunk, "choices"):
|
||||
if not isinstance(last_chunk.choices, type):
|
||||
choices = last_chunk.choices
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
choice = choices[0]
|
||||
|
||||
delta = None
|
||||
if isinstance(choice, dict) and "delta" in choice:
|
||||
delta = choice["delta"]
|
||||
elif isinstance(choice, LiteLLMStreamingChoices):
|
||||
delta = choice.delta
|
||||
message = None
|
||||
if isinstance(choice, dict) and "message" in choice:
|
||||
message = choice["message"]
|
||||
elif hasattr(choice, "message"):
|
||||
message = choice.message
|
||||
|
||||
if delta:
|
||||
if isinstance(delta, dict) and "tool_calls" in delta:
|
||||
tool_calls = delta["tool_calls"]
|
||||
elif isinstance(delta, LiteLLMDelta):
|
||||
tool_calls = delta.tool_calls
|
||||
if message:
|
||||
if isinstance(message, dict) and "tool_calls" in message:
|
||||
tool_calls = message["tool_calls"]
|
||||
elif hasattr(message, "tool_calls"):
|
||||
tool_calls = message.tool_calls
|
||||
except Exception as e:
|
||||
logging.debug(f"Error checking for tool calls: {e}")
|
||||
|
||||
@@ -1044,7 +1037,7 @@ class LLM(BaseLLM):
|
||||
"""
|
||||
if callbacks and len(callbacks) > 0:
|
||||
for callback in callbacks:
|
||||
if isinstance(callback, TokenCalcHandler):
|
||||
if hasattr(callback, "log_success_event"):
|
||||
# Use the usage_info we've been tracking
|
||||
if not usage_info:
|
||||
# Try to get usage from the last chunk if we haven't already
|
||||
@@ -1055,14 +1048,9 @@ class LLM(BaseLLM):
|
||||
and "usage" in last_chunk
|
||||
):
|
||||
usage_info = last_chunk["usage"]
|
||||
elif (
|
||||
isinstance(last_chunk, ModelResponseBase)
|
||||
and last_chunk.model_extra
|
||||
):
|
||||
usage_info = (
|
||||
last_chunk.model_extra.get("usage")
|
||||
or usage_info
|
||||
)
|
||||
elif hasattr(last_chunk, "usage"):
|
||||
if not isinstance(last_chunk.usage, type):
|
||||
usage_info = last_chunk.usage
|
||||
except Exception as e:
|
||||
logging.debug(f"Error extracting usage info: {e}")
|
||||
|
||||
@@ -1135,10 +1123,13 @@ class LLM(BaseLLM):
|
||||
params["response_model"] = response_model
|
||||
response = litellm.completion(**params)
|
||||
|
||||
if isinstance(response, ModelResponseBase) and response.model_extra:
|
||||
usage_info = response.model_extra.get("usage")
|
||||
if usage_info:
|
||||
self._track_token_usage_internal(usage_info)
|
||||
if (
|
||||
hasattr(response, "usage")
|
||||
and not isinstance(response.usage, type)
|
||||
and response.usage
|
||||
):
|
||||
usage_info = response.usage
|
||||
self._track_token_usage_internal(usage_info)
|
||||
|
||||
except LLMContextLengthExceededError:
|
||||
# Re-raise our own context length error
|
||||
@@ -1150,11 +1141,7 @@ class LLM(BaseLLM):
|
||||
raise LLMContextLengthExceededError(error_msg) from e
|
||||
raise
|
||||
|
||||
response_usage = self._usage_to_dict(
|
||||
response.model_extra.get("usage")
|
||||
if isinstance(response, ModelResponseBase) and response.model_extra
|
||||
else None
|
||||
)
|
||||
response_usage = self._usage_to_dict(getattr(response, "usage", None))
|
||||
|
||||
# --- 2) Handle structured output response (when response_model is provided)
|
||||
if response_model is not None:
|
||||
@@ -1179,13 +1166,8 @@ class LLM(BaseLLM):
|
||||
# --- 3) Handle callbacks with usage info
|
||||
if callbacks and len(callbacks) > 0:
|
||||
for callback in callbacks:
|
||||
if isinstance(callback, TokenCalcHandler):
|
||||
usage_info = (
|
||||
response.model_extra.get("usage")
|
||||
if isinstance(response, ModelResponseBase)
|
||||
and response.model_extra
|
||||
else None
|
||||
)
|
||||
if hasattr(callback, "log_success_event"):
|
||||
usage_info = getattr(response, "usage", None)
|
||||
if usage_info:
|
||||
callback.log_success_event(
|
||||
kwargs=params,
|
||||
@@ -1194,7 +1176,7 @@ class LLM(BaseLLM):
|
||||
end_time=0,
|
||||
)
|
||||
# --- 4) Check for tool calls
|
||||
tool_calls = response_message.tool_calls or []
|
||||
tool_calls = getattr(response_message, "tool_calls", [])
|
||||
|
||||
# --- 5) If no tool calls or no available functions, return the text response directly as long as there is a text response
|
||||
if (not tool_calls or not available_functions) and text_response:
|
||||
@@ -1287,10 +1269,13 @@ class LLM(BaseLLM):
|
||||
params["response_model"] = response_model
|
||||
response = await litellm.acompletion(**params)
|
||||
|
||||
if isinstance(response, ModelResponseBase) and response.model_extra:
|
||||
usage_info = response.model_extra.get("usage")
|
||||
if usage_info:
|
||||
self._track_token_usage_internal(usage_info)
|
||||
if (
|
||||
hasattr(response, "usage")
|
||||
and not isinstance(response.usage, type)
|
||||
and response.usage
|
||||
):
|
||||
usage_info = response.usage
|
||||
self._track_token_usage_internal(usage_info)
|
||||
|
||||
except LLMContextLengthExceededError:
|
||||
# Re-raise our own context length error
|
||||
@@ -1302,11 +1287,7 @@ class LLM(BaseLLM):
|
||||
raise LLMContextLengthExceededError(error_msg) from e
|
||||
raise
|
||||
|
||||
response_usage = self._usage_to_dict(
|
||||
response.model_extra.get("usage")
|
||||
if isinstance(response, ModelResponseBase) and response.model_extra
|
||||
else None
|
||||
)
|
||||
response_usage = self._usage_to_dict(getattr(response, "usage", None))
|
||||
|
||||
if response_model is not None:
|
||||
if isinstance(response, BaseModel):
|
||||
@@ -1328,13 +1309,8 @@ class LLM(BaseLLM):
|
||||
|
||||
if callbacks and len(callbacks) > 0:
|
||||
for callback in callbacks:
|
||||
if isinstance(callback, TokenCalcHandler):
|
||||
usage_info = (
|
||||
response.model_extra.get("usage")
|
||||
if isinstance(response, ModelResponseBase)
|
||||
and response.model_extra
|
||||
else None
|
||||
)
|
||||
if hasattr(callback, "log_success_event"):
|
||||
usage_info = getattr(response, "usage", None)
|
||||
if usage_info:
|
||||
callback.log_success_event(
|
||||
kwargs=params,
|
||||
@@ -1343,7 +1319,7 @@ class LLM(BaseLLM):
|
||||
end_time=0,
|
||||
)
|
||||
|
||||
tool_calls = response_message.tool_calls or []
|
||||
tool_calls = getattr(response_message, "tool_calls", [])
|
||||
|
||||
if (not tool_calls or not available_functions) and text_response:
|
||||
self._handle_emit_call_events(
|
||||
@@ -1418,19 +1394,18 @@ class LLM(BaseLLM):
|
||||
async for chunk in await litellm.acompletion(**params):
|
||||
chunk_count += 1
|
||||
chunk_content = None
|
||||
response_id = chunk.id if isinstance(chunk, ModelResponseBase) else None
|
||||
response_id = chunk.id if hasattr(chunk, "id") else None
|
||||
|
||||
try:
|
||||
choices = None
|
||||
if isinstance(chunk, dict) and "choices" in chunk:
|
||||
choices = chunk["choices"]
|
||||
elif isinstance(chunk, ModelResponseStream):
|
||||
choices = chunk.choices
|
||||
elif hasattr(chunk, "choices"):
|
||||
if not isinstance(chunk.choices, type):
|
||||
choices = chunk.choices
|
||||
|
||||
if isinstance(chunk, ModelResponseBase) and chunk.model_extra:
|
||||
chunk_usage = chunk.model_extra.get("usage")
|
||||
if chunk_usage is not None:
|
||||
usage_info = chunk_usage
|
||||
if hasattr(chunk, "usage") and chunk.usage is not None:
|
||||
usage_info = chunk.usage
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
first_choice = choices[0]
|
||||
@@ -1438,19 +1413,19 @@ class LLM(BaseLLM):
|
||||
|
||||
if isinstance(first_choice, dict):
|
||||
delta = first_choice.get("delta", {})
|
||||
elif isinstance(first_choice, LiteLLMStreamingChoices):
|
||||
elif hasattr(first_choice, "delta"):
|
||||
delta = first_choice.delta
|
||||
|
||||
if delta:
|
||||
if isinstance(delta, dict):
|
||||
chunk_content = delta.get("content")
|
||||
elif isinstance(delta, LiteLLMDelta):
|
||||
elif hasattr(delta, "content"):
|
||||
chunk_content = delta.content
|
||||
|
||||
tool_calls: list[ChatCompletionDeltaToolCall] | None = None
|
||||
if isinstance(delta, dict):
|
||||
tool_calls = delta.get("tool_calls")
|
||||
elif isinstance(delta, LiteLLMDelta):
|
||||
elif hasattr(delta, "tool_calls"):
|
||||
tool_calls = delta.tool_calls
|
||||
|
||||
if tool_calls:
|
||||
@@ -1486,7 +1461,7 @@ class LLM(BaseLLM):
|
||||
|
||||
if callbacks and len(callbacks) > 0 and usage_info:
|
||||
for callback in callbacks:
|
||||
if isinstance(callback, TokenCalcHandler):
|
||||
if hasattr(callback, "log_success_event"):
|
||||
callback.log_success_event(
|
||||
kwargs=params,
|
||||
response_obj={"usage": usage_info},
|
||||
@@ -1945,7 +1920,7 @@ class LLM(BaseLLM):
|
||||
return None
|
||||
if isinstance(usage, dict):
|
||||
return usage
|
||||
if isinstance(usage, BaseModel):
|
||||
if hasattr(usage, "model_dump"):
|
||||
result: dict[str, Any] = usage.model_dump()
|
||||
return result
|
||||
if hasattr(usage, "__dict__"):
|
||||
@@ -2009,7 +1984,7 @@ class LLM(BaseLLM):
|
||||
)
|
||||
return messages
|
||||
|
||||
provider = self.provider or self.model
|
||||
provider = getattr(self, "provider", None) or self.model
|
||||
|
||||
for msg in messages:
|
||||
files = msg.get("files")
|
||||
@@ -2060,7 +2035,7 @@ class LLM(BaseLLM):
|
||||
)
|
||||
return messages
|
||||
|
||||
provider = self.provider or self.model
|
||||
provider = getattr(self, "provider", None) or self.model
|
||||
|
||||
for msg in messages:
|
||||
files = msg.get("files")
|
||||
|
||||
@@ -11,14 +11,10 @@ from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llms.base_llm import BaseLLM, JsonResponseFormat, llm_call_context
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.llms.hooks.transport import AsyncHTTPTransport, HTTPTransport
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
)
|
||||
from crewai.utilities.pydantic_schema_utils import (
|
||||
sanitize_tool_params_for_anthropic_strict,
|
||||
)
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
@@ -193,41 +189,16 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _init_clients(self) -> AnthropicCompletion:
|
||||
"""Eagerly build clients when the API key is available, otherwise
|
||||
defer so ``LLM(model="anthropic/...")`` can be constructed at module
|
||||
import time even before deployment env vars are set.
|
||||
"""
|
||||
try:
|
||||
self._client = self._build_sync_client()
|
||||
self._async_client = self._build_async_client()
|
||||
except ValueError:
|
||||
pass
|
||||
return self
|
||||
self._client = Anthropic(**self._get_client_params())
|
||||
|
||||
def _build_sync_client(self) -> Any:
|
||||
return Anthropic(**self._get_client_params())
|
||||
|
||||
def _build_async_client(self) -> Any:
|
||||
# Skip the sync httpx.Client that `_get_client_params` would
|
||||
# otherwise construct under `interceptor`; we attach an async one
|
||||
# below and would leak the sync one if both were built.
|
||||
async_client_params = self._get_client_params(include_http_client=False)
|
||||
async_client_params = self._get_client_params()
|
||||
if self.interceptor:
|
||||
async_transport = AsyncHTTPTransport(interceptor=self.interceptor)
|
||||
async_client_params["http_client"] = httpx.AsyncClient(
|
||||
transport=async_transport
|
||||
)
|
||||
return AsyncAnthropic(**async_client_params)
|
||||
async_http_client = httpx.AsyncClient(transport=async_transport)
|
||||
async_client_params["http_client"] = async_http_client
|
||||
|
||||
def _get_sync_client(self) -> Any:
|
||||
if self._client is None:
|
||||
self._client = self._build_sync_client()
|
||||
return self._client
|
||||
|
||||
def _get_async_client(self) -> Any:
|
||||
if self._async_client is None:
|
||||
self._async_client = self._build_async_client()
|
||||
return self._async_client
|
||||
self._async_client = AsyncAnthropic(**async_client_params)
|
||||
return self
|
||||
|
||||
def to_config_dict(self) -> dict[str, Any]:
|
||||
"""Extend base config with Anthropic-specific fields."""
|
||||
@@ -242,15 +213,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
config["timeout"] = self.timeout
|
||||
return config
|
||||
|
||||
def _get_client_params(self, include_http_client: bool = True) -> dict[str, Any]:
|
||||
"""Get client parameters.
|
||||
|
||||
Args:
|
||||
include_http_client: When True (default) and an interceptor is
|
||||
set, attach a sync ``httpx.Client``. The async builder
|
||||
passes ``False`` so it can attach its own async client
|
||||
without leaking a sync one.
|
||||
"""
|
||||
def _get_client_params(self) -> dict[str, Any]:
|
||||
"""Get client parameters."""
|
||||
|
||||
if self.api_key is None:
|
||||
self.api_key = os.getenv("ANTHROPIC_API_KEY")
|
||||
@@ -264,7 +228,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
"max_retries": self.max_retries,
|
||||
}
|
||||
|
||||
if include_http_client and self.interceptor:
|
||||
if self.interceptor:
|
||||
transport = HTTPTransport(interceptor=self.interceptor)
|
||||
http_client = httpx.Client(transport=transport)
|
||||
client_params["http_client"] = http_client # type: ignore[assignment]
|
||||
@@ -509,8 +473,10 @@ class AnthropicCompletion(BaseLLM):
|
||||
continue
|
||||
|
||||
try:
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
name, description, parameters = safe_tool_conversion(tool, "Anthropic")
|
||||
except (KeyError, ValueError) as e:
|
||||
except (ImportError, KeyError, ValueError) as e:
|
||||
logging.error(f"Error converting tool to Anthropic format: {e}")
|
||||
raise e
|
||||
|
||||
@@ -519,15 +485,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
"description": description,
|
||||
}
|
||||
|
||||
func_info = tool.get("function", {})
|
||||
strict_enabled = bool(func_info.get("strict"))
|
||||
|
||||
if parameters and isinstance(parameters, dict):
|
||||
anthropic_tool["input_schema"] = (
|
||||
sanitize_tool_params_for_anthropic_strict(parameters)
|
||||
if strict_enabled
|
||||
else parameters
|
||||
)
|
||||
anthropic_tool["input_schema"] = parameters
|
||||
else:
|
||||
anthropic_tool["input_schema"] = {
|
||||
"type": "object",
|
||||
@@ -535,7 +494,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
"required": [],
|
||||
}
|
||||
|
||||
if strict_enabled:
|
||||
func_info = tool.get("function", {})
|
||||
if func_info.get("strict"):
|
||||
anthropic_tool["strict"] = True
|
||||
|
||||
anthropic_tools.append(anthropic_tool)
|
||||
@@ -830,11 +790,11 @@ class AnthropicCompletion(BaseLLM):
|
||||
try:
|
||||
if betas:
|
||||
params["betas"] = betas
|
||||
response = self._get_sync_client().beta.messages.create(
|
||||
response = self._client.beta.messages.create(
|
||||
**params, extra_body=extra_body
|
||||
)
|
||||
else:
|
||||
response = self._get_sync_client().messages.create(**params)
|
||||
response = self._client.messages.create(**params)
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
@@ -982,11 +942,9 @@ class AnthropicCompletion(BaseLLM):
|
||||
current_tool_calls: dict[int, dict[str, Any]] = {}
|
||||
|
||||
stream_context = (
|
||||
self._get_sync_client().beta.messages.stream(
|
||||
**stream_params, extra_body=extra_body
|
||||
)
|
||||
self._client.beta.messages.stream(**stream_params, extra_body=extra_body)
|
||||
if betas
|
||||
else self._get_sync_client().messages.stream(**stream_params)
|
||||
else self._client.messages.stream(**stream_params)
|
||||
)
|
||||
with stream_context as stream:
|
||||
response_id = None
|
||||
@@ -1265,9 +1223,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
|
||||
try:
|
||||
# Send tool results back to Claude for final response
|
||||
final_response: Message = self._get_sync_client().messages.create(
|
||||
**follow_up_params
|
||||
)
|
||||
final_response: Message = self._client.messages.create(**follow_up_params)
|
||||
|
||||
# Track token usage for follow-up call
|
||||
follow_up_usage = self._extract_anthropic_token_usage(final_response)
|
||||
@@ -1363,11 +1319,11 @@ class AnthropicCompletion(BaseLLM):
|
||||
try:
|
||||
if betas:
|
||||
params["betas"] = betas
|
||||
response = await self._get_async_client().beta.messages.create(
|
||||
response = await self._async_client.beta.messages.create(
|
||||
**params, extra_body=extra_body
|
||||
)
|
||||
else:
|
||||
response = await self._get_async_client().messages.create(**params)
|
||||
response = await self._async_client.messages.create(**params)
|
||||
|
||||
except Exception as e:
|
||||
if is_context_length_exceeded(e):
|
||||
@@ -1501,11 +1457,11 @@ class AnthropicCompletion(BaseLLM):
|
||||
current_tool_calls: dict[int, dict[str, Any]] = {}
|
||||
|
||||
stream_context = (
|
||||
self._get_async_client().beta.messages.stream(
|
||||
self._async_client.beta.messages.stream(
|
||||
**stream_params, extra_body=extra_body
|
||||
)
|
||||
if betas
|
||||
else self._get_async_client().messages.stream(**stream_params)
|
||||
else self._async_client.messages.stream(**stream_params)
|
||||
)
|
||||
async with stream_context as stream:
|
||||
response_id = None
|
||||
@@ -1670,7 +1626,7 @@ class AnthropicCompletion(BaseLLM):
|
||||
]
|
||||
|
||||
try:
|
||||
final_response: Message = await self._get_async_client().messages.create(
|
||||
final_response: Message = await self._async_client.messages.create(
|
||||
**follow_up_params
|
||||
)
|
||||
|
||||
@@ -1798,8 +1754,8 @@ class AnthropicCompletion(BaseLLM):
|
||||
from crewai_files.uploaders.anthropic import AnthropicFileUploader
|
||||
|
||||
return AnthropicFileUploader(
|
||||
client=self._get_sync_client(),
|
||||
async_client=self._get_async_client(),
|
||||
client=self._client,
|
||||
async_client=self._async_client,
|
||||
)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
@@ -116,100 +116,43 @@ class AzureCompletion(BaseLLM):
|
||||
data.get("api_version") or os.getenv("AZURE_API_VERSION") or "2024-06-01"
|
||||
)
|
||||
|
||||
# Credentials and endpoint are validated lazily in `_init_clients`
|
||||
# so the LLM can be constructed before deployment env vars are set.
|
||||
model = data.get("model", "")
|
||||
if data["endpoint"]:
|
||||
data["endpoint"] = AzureCompletion._validate_and_fix_endpoint(
|
||||
data["endpoint"], model
|
||||
if not data["api_key"]:
|
||||
raise ValueError(
|
||||
"Azure API key is required. Set AZURE_API_KEY environment variable or pass api_key parameter."
|
||||
)
|
||||
data["is_azure_openai_endpoint"] = AzureCompletion._is_azure_openai_endpoint(
|
||||
data["endpoint"]
|
||||
if not data["endpoint"]:
|
||||
raise ValueError(
|
||||
"Azure endpoint is required. Set AZURE_ENDPOINT environment variable or pass endpoint parameter."
|
||||
)
|
||||
|
||||
model = data.get("model", "")
|
||||
data["endpoint"] = AzureCompletion._validate_and_fix_endpoint(
|
||||
data["endpoint"], model
|
||||
)
|
||||
data["is_openai_model"] = any(
|
||||
prefix in model.lower() for prefix in ["gpt-", "o1-", "text-"]
|
||||
)
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _is_azure_openai_endpoint(endpoint: str | None) -> bool:
|
||||
if not endpoint:
|
||||
return False
|
||||
hostname = urlparse(endpoint).hostname or ""
|
||||
return (
|
||||
parsed = urlparse(data["endpoint"])
|
||||
hostname = parsed.hostname or ""
|
||||
data["is_azure_openai_endpoint"] = (
|
||||
hostname == "openai.azure.com" or hostname.endswith(".openai.azure.com")
|
||||
) and "/openai/deployments/" in endpoint
|
||||
) and "/openai/deployments/" in data["endpoint"]
|
||||
return data
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _init_clients(self) -> AzureCompletion:
|
||||
"""Eagerly build clients when credentials are available, otherwise
|
||||
defer so ``LLM(model="azure/...")`` can be constructed at module
|
||||
import time even before deployment env vars are set.
|
||||
"""
|
||||
try:
|
||||
self._client = self._build_sync_client()
|
||||
self._async_client = self._build_async_client()
|
||||
except ValueError:
|
||||
pass
|
||||
return self
|
||||
|
||||
def _build_sync_client(self) -> Any:
|
||||
return ChatCompletionsClient(**self._make_client_kwargs())
|
||||
|
||||
def _build_async_client(self) -> Any:
|
||||
return AsyncChatCompletionsClient(**self._make_client_kwargs())
|
||||
|
||||
def _make_client_kwargs(self) -> dict[str, Any]:
|
||||
# Re-read env vars so that a deferred build can pick up credentials
|
||||
# that weren't set at instantiation time (e.g. LLM constructed at
|
||||
# module import before deployment env vars were injected).
|
||||
if not self.api_key:
|
||||
self.api_key = os.getenv("AZURE_API_KEY")
|
||||
if not self.endpoint:
|
||||
endpoint = (
|
||||
os.getenv("AZURE_ENDPOINT")
|
||||
or os.getenv("AZURE_OPENAI_ENDPOINT")
|
||||
or os.getenv("AZURE_API_BASE")
|
||||
)
|
||||
if endpoint:
|
||||
self.endpoint = AzureCompletion._validate_and_fix_endpoint(
|
||||
endpoint, self.model
|
||||
)
|
||||
# Recompute the routing flag now that the endpoint is known —
|
||||
# _prepare_completion_params uses it to decide whether to
|
||||
# include `model` in the request body (Azure OpenAI endpoints
|
||||
# embed the deployment name in the URL and reject it).
|
||||
self.is_azure_openai_endpoint = (
|
||||
AzureCompletion._is_azure_openai_endpoint(self.endpoint)
|
||||
)
|
||||
|
||||
if not self.api_key:
|
||||
raise ValueError(
|
||||
"Azure API key is required. Set AZURE_API_KEY environment "
|
||||
"variable or pass api_key parameter."
|
||||
)
|
||||
if not self.endpoint:
|
||||
raise ValueError(
|
||||
"Azure endpoint is required. Set AZURE_ENDPOINT environment "
|
||||
"variable or pass endpoint parameter."
|
||||
)
|
||||
raise ValueError("Azure API key is required.")
|
||||
client_kwargs: dict[str, Any] = {
|
||||
"endpoint": self.endpoint,
|
||||
"credential": AzureKeyCredential(self.api_key),
|
||||
}
|
||||
if self.api_version:
|
||||
client_kwargs["api_version"] = self.api_version
|
||||
return client_kwargs
|
||||
|
||||
def _get_sync_client(self) -> Any:
|
||||
if self._client is None:
|
||||
self._client = self._build_sync_client()
|
||||
return self._client
|
||||
|
||||
def _get_async_client(self) -> Any:
|
||||
if self._async_client is None:
|
||||
self._async_client = self._build_async_client()
|
||||
return self._async_client
|
||||
self._client = ChatCompletionsClient(**client_kwargs)
|
||||
self._async_client = AsyncChatCompletionsClient(**client_kwargs)
|
||||
return self
|
||||
|
||||
def to_config_dict(self) -> dict[str, Any]:
|
||||
"""Extend base config with Azure-specific fields."""
|
||||
@@ -770,7 +713,8 @@ class AzureCompletion(BaseLLM):
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming chat completion."""
|
||||
try:
|
||||
response: ChatCompletions = self._get_sync_client().complete(**params)
|
||||
# Cast params to Any to avoid type checking issues with TypedDict unpacking
|
||||
response: ChatCompletions = self._client.complete(**params)
|
||||
return self._process_completion_response(
|
||||
response=response,
|
||||
params=params,
|
||||
@@ -969,7 +913,7 @@ class AzureCompletion(BaseLLM):
|
||||
tool_calls: dict[int, dict[str, Any]] = {}
|
||||
|
||||
usage_data: dict[str, Any] | None = None
|
||||
for update in self._get_sync_client().complete(**params):
|
||||
for update in self._client.complete(**params):
|
||||
if isinstance(update, StreamingChatCompletionsUpdate):
|
||||
if update.usage:
|
||||
usage = update.usage
|
||||
@@ -1009,9 +953,8 @@ class AzureCompletion(BaseLLM):
|
||||
) -> str | Any:
|
||||
"""Handle non-streaming chat completion asynchronously."""
|
||||
try:
|
||||
response: ChatCompletions = await self._get_async_client().complete(
|
||||
**params
|
||||
)
|
||||
# Cast params to Any to avoid type checking issues with TypedDict unpacking
|
||||
response: ChatCompletions = await self._async_client.complete(**params)
|
||||
return self._process_completion_response(
|
||||
response=response,
|
||||
params=params,
|
||||
@@ -1037,7 +980,7 @@ class AzureCompletion(BaseLLM):
|
||||
|
||||
usage_data: dict[str, Any] | None = None
|
||||
|
||||
stream = await self._get_async_client().complete(**params)
|
||||
stream = await self._async_client.complete(**params)
|
||||
async for update in stream:
|
||||
if isinstance(update, StreamingChatCompletionsUpdate):
|
||||
if hasattr(update, "usage") and update.usage:
|
||||
@@ -1160,12 +1103,9 @@ class AzureCompletion(BaseLLM):
|
||||
"""Close the async client and clean up resources.
|
||||
|
||||
This ensures proper cleanup of the underlying aiohttp session
|
||||
to avoid unclosed connector warnings. Accesses the cached client
|
||||
directly rather than going through `_get_async_client` so a
|
||||
cleanup on an uninitialized LLM is a harmless no-op rather than
|
||||
a credential-required error.
|
||||
to avoid unclosed connector warnings.
|
||||
"""
|
||||
if self._async_client is not None and hasattr(self._async_client, "close"):
|
||||
if hasattr(self._async_client, "close"):
|
||||
await self._async_client.close()
|
||||
|
||||
async def __aenter__(self) -> Self:
|
||||
|
||||
@@ -12,7 +12,6 @@ from typing_extensions import Required
|
||||
|
||||
from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llms.base_llm import BaseLLM, llm_call_context
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
@@ -170,6 +169,7 @@ class ToolSpec(TypedDict, total=False):
|
||||
name: Required[str]
|
||||
description: Required[str]
|
||||
inputSchema: ToolInputSchema
|
||||
strict: bool
|
||||
|
||||
|
||||
class ConverseToolTypeDef(TypedDict):
|
||||
@@ -303,22 +303,6 @@ class BedrockCompletion(BaseLLM):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _init_clients(self) -> BedrockCompletion:
|
||||
"""Eagerly build the sync client when AWS credentials resolve,
|
||||
otherwise defer so ``LLM(model="bedrock/...")`` can be constructed
|
||||
at module import time even before deployment env vars are set.
|
||||
|
||||
Only credential/SDK errors are caught — programming errors like
|
||||
``TypeError`` or ``AttributeError`` propagate so real bugs aren't
|
||||
silently swallowed.
|
||||
"""
|
||||
try:
|
||||
self._client = self._build_sync_client()
|
||||
except (BotoCoreError, ClientError, ValueError) as e:
|
||||
logging.debug("Deferring Bedrock client construction: %s", e)
|
||||
self._async_exit_stack = AsyncExitStack() if AIOBOTOCORE_AVAILABLE else None
|
||||
return self
|
||||
|
||||
def _build_sync_client(self) -> Any:
|
||||
config = Config(
|
||||
read_timeout=300,
|
||||
retries={"max_attempts": 3, "mode": "adaptive"},
|
||||
@@ -330,17 +314,9 @@ class BedrockCompletion(BaseLLM):
|
||||
aws_session_token=self.aws_session_token,
|
||||
region_name=self.region_name,
|
||||
)
|
||||
return session.client("bedrock-runtime", config=config)
|
||||
|
||||
def _get_sync_client(self) -> Any:
|
||||
if self._client is None:
|
||||
self._client = self._build_sync_client()
|
||||
return self._client
|
||||
|
||||
def _get_async_client(self) -> Any:
|
||||
"""Async client is set up separately by ``_ensure_async_client``
|
||||
using ``aiobotocore`` inside an exit stack."""
|
||||
return self._async_client
|
||||
self._client = session.client("bedrock-runtime", config=config)
|
||||
self._async_exit_stack = AsyncExitStack() if AIOBOTOCORE_AVAILABLE else None
|
||||
return self
|
||||
|
||||
def to_config_dict(self) -> dict[str, Any]:
|
||||
"""Extend base config with Bedrock-specific fields."""
|
||||
@@ -680,7 +656,7 @@ class BedrockCompletion(BaseLLM):
|
||||
raise ValueError(f"Invalid message format at index {i}")
|
||||
|
||||
# Call Bedrock Converse API with proper error handling
|
||||
response = self._get_sync_client().converse(
|
||||
response = self._client.converse(
|
||||
modelId=self.model_id,
|
||||
messages=cast(
|
||||
"Sequence[MessageTypeDef | MessageOutputTypeDef]",
|
||||
@@ -969,7 +945,7 @@ class BedrockCompletion(BaseLLM):
|
||||
usage_data: dict[str, Any] | None = None
|
||||
|
||||
try:
|
||||
response = self._get_sync_client().converse_stream(
|
||||
response = self._client.converse_stream(
|
||||
modelId=self.model_id,
|
||||
messages=cast(
|
||||
"Sequence[MessageTypeDef | MessageOutputTypeDef]",
|
||||
@@ -1973,6 +1949,8 @@ class BedrockCompletion(BaseLLM):
|
||||
tools: list[dict[str, Any]],
|
||||
) -> list[ConverseToolTypeDef]:
|
||||
"""Convert CrewAI tools to Converse API format following AWS specification."""
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
converse_tools: list[ConverseToolTypeDef] = []
|
||||
|
||||
for tool in tools:
|
||||
@@ -1988,6 +1966,10 @@ class BedrockCompletion(BaseLLM):
|
||||
input_schema: ToolInputSchema = {"json": parameters}
|
||||
tool_spec["inputSchema"] = input_schema
|
||||
|
||||
func_info = tool.get("function", {})
|
||||
if func_info.get("strict"):
|
||||
tool_spec["strict"] = True
|
||||
|
||||
converse_tool: ConverseToolTypeDef = {"toolSpec": tool_spec}
|
||||
|
||||
converse_tools.append(converse_tool)
|
||||
|
||||
@@ -118,33 +118,9 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _init_client(self) -> GeminiCompletion:
|
||||
"""Eagerly build the client when credentials resolve, otherwise defer
|
||||
so ``LLM(model="gemini/...")`` can be constructed at module import time
|
||||
even before deployment env vars are set.
|
||||
"""
|
||||
try:
|
||||
self._client = self._initialize_client(self.use_vertexai)
|
||||
except ValueError:
|
||||
pass
|
||||
self._client = self._initialize_client(self.use_vertexai)
|
||||
return self
|
||||
|
||||
def _get_sync_client(self) -> Any:
|
||||
if self._client is None:
|
||||
# Re-read env vars so a deferred build can pick up credentials
|
||||
# that weren't set at instantiation time.
|
||||
if not self.api_key:
|
||||
self.api_key = os.getenv("GOOGLE_API_KEY") or os.getenv(
|
||||
"GEMINI_API_KEY"
|
||||
)
|
||||
if not self.project:
|
||||
self.project = os.getenv("GOOGLE_CLOUD_PROJECT")
|
||||
self._client = self._initialize_client(self.use_vertexai)
|
||||
return self._client
|
||||
|
||||
def _get_async_client(self) -> Any:
|
||||
"""Gemini uses a single client for both sync and async calls."""
|
||||
return self._get_sync_client()
|
||||
|
||||
def to_config_dict(self) -> dict[str, Any]:
|
||||
"""Extend base config with Gemini/Vertex-specific fields."""
|
||||
config = super().to_config_dict()
|
||||
@@ -252,7 +228,6 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
if (
|
||||
hasattr(self, "client")
|
||||
and self._client is not None
|
||||
and hasattr(self._client, "vertexai")
|
||||
and self._client.vertexai
|
||||
):
|
||||
@@ -1137,7 +1112,7 @@ class GeminiCompletion(BaseLLM):
|
||||
try:
|
||||
# The API accepts list[Content] but mypy is overly strict about variance
|
||||
contents_for_api: Any = contents
|
||||
response = self._get_sync_client().models.generate_content(
|
||||
response = self._client.models.generate_content(
|
||||
model=self.model,
|
||||
contents=contents_for_api,
|
||||
config=config,
|
||||
@@ -1178,7 +1153,7 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
# The API accepts list[Content] but mypy is overly strict about variance
|
||||
contents_for_api: Any = contents
|
||||
for chunk in self._get_sync_client().models.generate_content_stream(
|
||||
for chunk in self._client.models.generate_content_stream(
|
||||
model=self.model,
|
||||
contents=contents_for_api,
|
||||
config=config,
|
||||
@@ -1216,7 +1191,7 @@ class GeminiCompletion(BaseLLM):
|
||||
try:
|
||||
# The API accepts list[Content] but mypy is overly strict about variance
|
||||
contents_for_api: Any = contents
|
||||
response = await self._get_async_client().aio.models.generate_content(
|
||||
response = await self._client.aio.models.generate_content(
|
||||
model=self.model,
|
||||
contents=contents_for_api,
|
||||
config=config,
|
||||
@@ -1257,7 +1232,7 @@ class GeminiCompletion(BaseLLM):
|
||||
|
||||
# The API accepts list[Content] but mypy is overly strict about variance
|
||||
contents_for_api: Any = contents
|
||||
stream = await self._get_async_client().aio.models.generate_content_stream(
|
||||
stream = await self._client.aio.models.generate_content_stream(
|
||||
model=self.model,
|
||||
contents=contents_for_api,
|
||||
config=config,
|
||||
@@ -1464,6 +1439,6 @@ class GeminiCompletion(BaseLLM):
|
||||
try:
|
||||
from crewai_files.uploaders.gemini import GeminiFileUploader
|
||||
|
||||
return GeminiFileUploader(client=self._get_sync_client())
|
||||
return GeminiFileUploader(client=self._client)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
@@ -32,15 +32,11 @@ from crewai.events.types.llm_events import LLMCallType
|
||||
from crewai.llms.base_llm import BaseLLM, JsonResponseFormat, llm_call_context
|
||||
from crewai.llms.hooks.base import BaseInterceptor
|
||||
from crewai.llms.hooks.transport import AsyncHTTPTransport, HTTPTransport
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.agent_utils import is_context_length_exceeded
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededError,
|
||||
)
|
||||
from crewai.utilities.pydantic_schema_utils import (
|
||||
generate_model_description,
|
||||
sanitize_tool_params_for_openai_strict,
|
||||
)
|
||||
from crewai.utilities.pydantic_schema_utils import generate_model_description
|
||||
from crewai.utilities.types import LLMMessage
|
||||
|
||||
|
||||
@@ -257,40 +253,22 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def _init_clients(self) -> OpenAICompletion:
|
||||
"""Eagerly build clients when the API key is available, otherwise
|
||||
defer so ``LLM(model="openai/...")`` can be constructed at module
|
||||
import time even before deployment env vars are set.
|
||||
"""
|
||||
try:
|
||||
self._client = self._build_sync_client()
|
||||
self._async_client = self._build_async_client()
|
||||
except ValueError:
|
||||
pass
|
||||
return self
|
||||
|
||||
def _build_sync_client(self) -> Any:
|
||||
client_config = self._get_client_params()
|
||||
if self.interceptor:
|
||||
transport = HTTPTransport(interceptor=self.interceptor)
|
||||
client_config["http_client"] = httpx.Client(transport=transport)
|
||||
return OpenAI(**client_config)
|
||||
http_client = httpx.Client(transport=transport)
|
||||
client_config["http_client"] = http_client
|
||||
|
||||
def _build_async_client(self) -> Any:
|
||||
client_config = self._get_client_params()
|
||||
self._client = OpenAI(**client_config)
|
||||
|
||||
async_client_config = self._get_client_params()
|
||||
if self.interceptor:
|
||||
transport = AsyncHTTPTransport(interceptor=self.interceptor)
|
||||
client_config["http_client"] = httpx.AsyncClient(transport=transport)
|
||||
return AsyncOpenAI(**client_config)
|
||||
async_transport = AsyncHTTPTransport(interceptor=self.interceptor)
|
||||
async_http_client = httpx.AsyncClient(transport=async_transport)
|
||||
async_client_config["http_client"] = async_http_client
|
||||
|
||||
def _get_sync_client(self) -> Any:
|
||||
if self._client is None:
|
||||
self._client = self._build_sync_client()
|
||||
return self._client
|
||||
|
||||
def _get_async_client(self) -> Any:
|
||||
if self._async_client is None:
|
||||
self._async_client = self._build_async_client()
|
||||
return self._async_client
|
||||
self._async_client = AsyncOpenAI(**async_client_config)
|
||||
return self
|
||||
|
||||
@property
|
||||
def last_response_id(self) -> str | None:
|
||||
@@ -786,6 +764,8 @@ class OpenAICompletion(BaseLLM):
|
||||
"function": {"name": "...", "description": "...", "parameters": {...}}
|
||||
}
|
||||
"""
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
|
||||
responses_tools = []
|
||||
|
||||
for tool in tools:
|
||||
@@ -817,7 +797,7 @@ class OpenAICompletion(BaseLLM):
|
||||
) -> str | ResponsesAPIResult | Any:
|
||||
"""Handle non-streaming Responses API call."""
|
||||
try:
|
||||
response: Response = self._get_sync_client().responses.create(**params)
|
||||
response: Response = self._client.responses.create(**params)
|
||||
|
||||
# Track response ID for auto-chaining
|
||||
if self.auto_chain and response.id:
|
||||
@@ -953,9 +933,7 @@ class OpenAICompletion(BaseLLM):
|
||||
) -> str | ResponsesAPIResult | Any:
|
||||
"""Handle async non-streaming Responses API call."""
|
||||
try:
|
||||
response: Response = await self._get_async_client().responses.create(
|
||||
**params
|
||||
)
|
||||
response: Response = await self._async_client.responses.create(**params)
|
||||
|
||||
# Track response ID for auto-chaining
|
||||
if self.auto_chain and response.id:
|
||||
@@ -1091,7 +1069,7 @@ class OpenAICompletion(BaseLLM):
|
||||
final_response: Response | None = None
|
||||
usage: dict[str, Any] | None = None
|
||||
|
||||
stream = self._get_sync_client().responses.create(**params)
|
||||
stream = self._client.responses.create(**params)
|
||||
response_id_stream = None
|
||||
|
||||
for event in stream:
|
||||
@@ -1219,7 +1197,7 @@ class OpenAICompletion(BaseLLM):
|
||||
final_response: Response | None = None
|
||||
usage: dict[str, Any] | None = None
|
||||
|
||||
stream = await self._get_async_client().responses.create(**params)
|
||||
stream = await self._async_client.responses.create(**params)
|
||||
response_id_stream = None
|
||||
|
||||
async for event in stream:
|
||||
@@ -1570,6 +1548,11 @@ class OpenAICompletion(BaseLLM):
|
||||
self, tools: list[dict[str, BaseTool]]
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Convert CrewAI tool format to OpenAI function calling format."""
|
||||
from crewai.llms.providers.utils.common import safe_tool_conversion
|
||||
from crewai.utilities.pydantic_schema_utils import (
|
||||
force_additional_properties_false,
|
||||
)
|
||||
|
||||
openai_tools = []
|
||||
|
||||
for tool in tools:
|
||||
@@ -1588,9 +1571,8 @@ class OpenAICompletion(BaseLLM):
|
||||
params_dict = (
|
||||
parameters if isinstance(parameters, dict) else dict(parameters)
|
||||
)
|
||||
openai_tool["function"]["parameters"] = (
|
||||
sanitize_tool_params_for_openai_strict(params_dict)
|
||||
)
|
||||
params_dict = force_additional_properties_false(params_dict)
|
||||
openai_tool["function"]["parameters"] = params_dict
|
||||
|
||||
openai_tools.append(openai_tool)
|
||||
return openai_tools
|
||||
@@ -1609,7 +1591,7 @@ class OpenAICompletion(BaseLLM):
|
||||
parse_params = {
|
||||
k: v for k, v in params.items() if k != "response_format"
|
||||
}
|
||||
parsed_response = self._get_sync_client().beta.chat.completions.parse(
|
||||
parsed_response = self._client.beta.chat.completions.parse(
|
||||
**parse_params,
|
||||
response_format=response_model,
|
||||
)
|
||||
@@ -1633,9 +1615,7 @@ class OpenAICompletion(BaseLLM):
|
||||
)
|
||||
return parsed_object
|
||||
|
||||
response: ChatCompletion = self._get_sync_client().chat.completions.create(
|
||||
**params
|
||||
)
|
||||
response: ChatCompletion = self._client.chat.completions.create(**params)
|
||||
|
||||
usage = self._extract_openai_token_usage(response)
|
||||
|
||||
@@ -1862,7 +1842,7 @@ class OpenAICompletion(BaseLLM):
|
||||
}
|
||||
|
||||
stream: ChatCompletionStream[BaseModel]
|
||||
with self._get_sync_client().beta.chat.completions.stream(
|
||||
with self._client.beta.chat.completions.stream(
|
||||
**parse_params, response_format=response_model
|
||||
) as stream:
|
||||
for chunk in stream:
|
||||
@@ -1899,7 +1879,7 @@ class OpenAICompletion(BaseLLM):
|
||||
return ""
|
||||
|
||||
completion_stream: Stream[ChatCompletionChunk] = (
|
||||
self._get_sync_client().chat.completions.create(**params)
|
||||
self._client.chat.completions.create(**params)
|
||||
)
|
||||
|
||||
usage_data: dict[str, Any] | None = None
|
||||
@@ -1996,11 +1976,9 @@ class OpenAICompletion(BaseLLM):
|
||||
parse_params = {
|
||||
k: v for k, v in params.items() if k != "response_format"
|
||||
}
|
||||
parsed_response = (
|
||||
await self._get_async_client().beta.chat.completions.parse(
|
||||
**parse_params,
|
||||
response_format=response_model,
|
||||
)
|
||||
parsed_response = await self._async_client.beta.chat.completions.parse(
|
||||
**parse_params,
|
||||
response_format=response_model,
|
||||
)
|
||||
math_reasoning = parsed_response.choices[0].message
|
||||
|
||||
@@ -2022,8 +2000,8 @@ class OpenAICompletion(BaseLLM):
|
||||
)
|
||||
return parsed_object
|
||||
|
||||
response: ChatCompletion = (
|
||||
await self._get_async_client().chat.completions.create(**params)
|
||||
response: ChatCompletion = await self._async_client.chat.completions.create(
|
||||
**params
|
||||
)
|
||||
|
||||
usage = self._extract_openai_token_usage(response)
|
||||
@@ -2149,7 +2127,7 @@ class OpenAICompletion(BaseLLM):
|
||||
if response_model:
|
||||
completion_stream: AsyncIterator[
|
||||
ChatCompletionChunk
|
||||
] = await self._get_async_client().chat.completions.create(**params)
|
||||
] = await self._async_client.chat.completions.create(**params)
|
||||
|
||||
accumulated_content = ""
|
||||
usage_data: dict[str, Any] | None = None
|
||||
@@ -2205,7 +2183,7 @@ class OpenAICompletion(BaseLLM):
|
||||
|
||||
stream: AsyncIterator[
|
||||
ChatCompletionChunk
|
||||
] = await self._get_async_client().chat.completions.create(**params)
|
||||
] = await self._async_client.chat.completions.create(**params)
|
||||
|
||||
usage_data = None
|
||||
|
||||
@@ -2401,8 +2379,8 @@ class OpenAICompletion(BaseLLM):
|
||||
from crewai_files.uploaders.openai import OpenAIFileUploader
|
||||
|
||||
return OpenAIFileUploader(
|
||||
client=self._get_sync_client(),
|
||||
async_client=self._get_async_client(),
|
||||
client=self._client,
|
||||
async_client=self._async_client,
|
||||
)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
@@ -45,7 +45,6 @@ from crewai.events.types.task_events import (
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.llms.providers.openai.completion import OpenAICompletion
|
||||
from crewai.security import Fingerprint, SecurityConfig
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
@@ -302,14 +301,12 @@ class Task(BaseModel):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_required_fields(self) -> Self:
|
||||
if self.description is None:
|
||||
raise ValueError(
|
||||
"description must be provided either directly or through config"
|
||||
)
|
||||
if self.expected_output is None:
|
||||
raise ValueError(
|
||||
"expected_output must be provided either directly or through config"
|
||||
)
|
||||
required_fields = ["description", "expected_output"]
|
||||
for field in required_fields:
|
||||
if getattr(self, field) is None:
|
||||
raise ValueError(
|
||||
f"{field} must be provided either directly or through config"
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -841,8 +838,8 @@ class Task(BaseModel):
|
||||
should_inject = self.allow_crewai_trigger_context
|
||||
|
||||
if should_inject and self.agent:
|
||||
crew = self.agent.crew
|
||||
if crew and not isinstance(crew, str) and crew._inputs:
|
||||
crew = getattr(self.agent, "crew", None)
|
||||
if crew and hasattr(crew, "_inputs") and crew._inputs:
|
||||
trigger_payload = crew._inputs.get("crewai_trigger_payload")
|
||||
if trigger_payload is not None:
|
||||
description += f"\n\nTrigger Payload: {trigger_payload}"
|
||||
@@ -855,12 +852,11 @@ class Task(BaseModel):
|
||||
isinstance(self.agent.llm, BaseLLM)
|
||||
and self.agent.llm.supports_multimodal()
|
||||
):
|
||||
provider: str = self.agent.llm.provider or self.agent.llm.model
|
||||
api: str | None = (
|
||||
self.agent.llm.api
|
||||
if isinstance(self.agent.llm, OpenAICompletion)
|
||||
else None
|
||||
provider: str = str(
|
||||
getattr(self.agent.llm, "provider", None)
|
||||
or getattr(self.agent.llm, "model", "openai")
|
||||
)
|
||||
api: str | None = getattr(self.agent.llm, "api", None)
|
||||
supported_types = get_supported_content_types(provider, api)
|
||||
|
||||
def is_auto_injected(content_type: str) -> bool:
|
||||
|
||||
@@ -19,7 +19,7 @@ from collections.abc import Callable
|
||||
from copy import deepcopy
|
||||
import datetime
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Final, Literal, TypedDict, Union, cast
|
||||
from typing import TYPE_CHECKING, Annotated, Any, Final, Literal, TypedDict, Union
|
||||
import uuid
|
||||
|
||||
import jsonref # type: ignore[import-untyped]
|
||||
@@ -417,119 +417,6 @@ def strip_null_from_types(schema: dict[str, Any]) -> dict[str, Any]:
|
||||
return schema
|
||||
|
||||
|
||||
_STRICT_METADATA_KEYS: Final[tuple[str, ...]] = (
|
||||
"title",
|
||||
"default",
|
||||
"examples",
|
||||
"example",
|
||||
"$comment",
|
||||
"readOnly",
|
||||
"writeOnly",
|
||||
"deprecated",
|
||||
)
|
||||
|
||||
_CLAUDE_STRICT_UNSUPPORTED: Final[tuple[str, ...]] = (
|
||||
"minimum",
|
||||
"maximum",
|
||||
"exclusiveMinimum",
|
||||
"exclusiveMaximum",
|
||||
"multipleOf",
|
||||
"minLength",
|
||||
"maxLength",
|
||||
"pattern",
|
||||
"minItems",
|
||||
"maxItems",
|
||||
"uniqueItems",
|
||||
"minContains",
|
||||
"maxContains",
|
||||
"minProperties",
|
||||
"maxProperties",
|
||||
"patternProperties",
|
||||
"propertyNames",
|
||||
"dependentRequired",
|
||||
"dependentSchemas",
|
||||
)
|
||||
|
||||
|
||||
def _strip_keys_recursive(d: Any, keys: tuple[str, ...]) -> Any:
|
||||
"""Recursively delete a fixed set of keys from a schema."""
|
||||
if isinstance(d, dict):
|
||||
for key in keys:
|
||||
d.pop(key, None)
|
||||
for v in d.values():
|
||||
_strip_keys_recursive(v, keys)
|
||||
elif isinstance(d, list):
|
||||
for i in d:
|
||||
_strip_keys_recursive(i, keys)
|
||||
return d
|
||||
|
||||
|
||||
def lift_top_level_anyof(schema: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Unwrap a top-level anyOf/oneOf/allOf wrapping a single object variant.
|
||||
|
||||
Anthropic's strict ``input_schema`` rejects top-level union keywords. When
|
||||
exactly one variant is an object schema, lift it so the root is a plain
|
||||
object; otherwise leave the schema alone.
|
||||
"""
|
||||
for key in ("anyOf", "oneOf", "allOf"):
|
||||
variants = schema.get(key)
|
||||
if not isinstance(variants, list):
|
||||
continue
|
||||
object_variants = [
|
||||
v for v in variants if isinstance(v, dict) and v.get("type") == "object"
|
||||
]
|
||||
if len(object_variants) == 1:
|
||||
lifted = deepcopy(object_variants[0])
|
||||
schema.pop(key)
|
||||
schema.update(lifted)
|
||||
break
|
||||
return schema
|
||||
|
||||
|
||||
def _common_strict_pipeline(params: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Shared strict sanitization: inline refs, close objects, require all properties."""
|
||||
sanitized = resolve_refs(deepcopy(params))
|
||||
sanitized.pop("$defs", None)
|
||||
sanitized = convert_oneof_to_anyof(sanitized)
|
||||
sanitized = ensure_type_in_schemas(sanitized)
|
||||
sanitized = force_additional_properties_false(sanitized)
|
||||
sanitized = ensure_all_properties_required(sanitized)
|
||||
return cast(dict[str, Any], _strip_keys_recursive(sanitized, _STRICT_METADATA_KEYS))
|
||||
|
||||
|
||||
def sanitize_tool_params_for_openai_strict(
|
||||
params: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""Sanitize a JSON schema for OpenAI strict function calling."""
|
||||
if not isinstance(params, dict):
|
||||
return params
|
||||
return cast(
|
||||
dict[str, Any], strip_unsupported_formats(_common_strict_pipeline(params))
|
||||
)
|
||||
|
||||
|
||||
def sanitize_tool_params_for_anthropic_strict(
|
||||
params: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""Sanitize a JSON schema for Anthropic strict tool use."""
|
||||
if not isinstance(params, dict):
|
||||
return params
|
||||
sanitized = lift_top_level_anyof(_common_strict_pipeline(params))
|
||||
sanitized = _strip_keys_recursive(sanitized, _CLAUDE_STRICT_UNSUPPORTED)
|
||||
return cast(dict[str, Any], strip_unsupported_formats(sanitized))
|
||||
|
||||
|
||||
def sanitize_tool_params_for_bedrock_strict(
|
||||
params: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""Sanitize a JSON schema for Bedrock Converse strict tool use.
|
||||
|
||||
Bedrock Converse uses the same grammar compiler as the underlying Claude
|
||||
model, so the constraints match Anthropic's.
|
||||
"""
|
||||
return sanitize_tool_params_for_anthropic_strict(params)
|
||||
|
||||
|
||||
def generate_model_description(
|
||||
model: type[BaseModel],
|
||||
*,
|
||||
|
||||
@@ -1051,7 +1051,7 @@ def test_lite_agent_verbose_false_suppresses_printer_output():
|
||||
successful_requests=1,
|
||||
)
|
||||
|
||||
with pytest.warns(FutureWarning):
|
||||
with pytest.warns(DeprecationWarning):
|
||||
agent = LiteAgent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
|
||||
@@ -125,7 +125,7 @@ class TestDeployCommand(unittest.TestCase):
|
||||
mock_response.json.return_value = {"uuid": "test-uuid"}
|
||||
self.mock_client.deploy_by_uuid.return_value = mock_response
|
||||
|
||||
self.deploy_command.deploy(uuid="test-uuid", skip_validate=True)
|
||||
self.deploy_command.deploy(uuid="test-uuid")
|
||||
|
||||
self.mock_client.deploy_by_uuid.assert_called_once_with("test-uuid")
|
||||
mock_display.assert_called_once_with({"uuid": "test-uuid"})
|
||||
@@ -137,7 +137,7 @@ class TestDeployCommand(unittest.TestCase):
|
||||
mock_response.json.return_value = {"uuid": "test-uuid"}
|
||||
self.mock_client.deploy_by_name.return_value = mock_response
|
||||
|
||||
self.deploy_command.deploy(skip_validate=True)
|
||||
self.deploy_command.deploy()
|
||||
|
||||
self.mock_client.deploy_by_name.assert_called_once_with("test_project")
|
||||
mock_display.assert_called_once_with({"uuid": "test-uuid"})
|
||||
@@ -156,7 +156,7 @@ class TestDeployCommand(unittest.TestCase):
|
||||
self.mock_client.create_crew.return_value = mock_response
|
||||
|
||||
with patch("sys.stdout", new=StringIO()) as fake_out:
|
||||
self.deploy_command.create_crew(skip_validate=True)
|
||||
self.deploy_command.create_crew()
|
||||
self.assertIn("Deployment created successfully!", fake_out.getvalue())
|
||||
self.assertIn("new-uuid", fake_out.getvalue())
|
||||
|
||||
|
||||
@@ -1,430 +0,0 @@
|
||||
"""Tests for `crewai.cli.deploy.validate`.
|
||||
|
||||
The fixtures here correspond 1:1 to the deployment-failure patterns observed
|
||||
in the #crewai-deployment-failures Slack channel that motivated this work.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from textwrap import dedent
|
||||
from typing import Iterable
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.cli.deploy.validate import (
|
||||
DeployValidator,
|
||||
Severity,
|
||||
normalize_package_name,
|
||||
)
|
||||
|
||||
|
||||
def _make_pyproject(
|
||||
name: str = "my_crew",
|
||||
dependencies: Iterable[str] = ("crewai>=1.14.0",),
|
||||
*,
|
||||
hatchling: bool = False,
|
||||
flow: bool = False,
|
||||
extra: str = "",
|
||||
) -> str:
|
||||
deps = ", ".join(f'"{d}"' for d in dependencies)
|
||||
lines = [
|
||||
"[project]",
|
||||
f'name = "{name}"',
|
||||
'version = "0.1.0"',
|
||||
f"dependencies = [{deps}]",
|
||||
]
|
||||
if hatchling:
|
||||
lines += [
|
||||
"",
|
||||
"[build-system]",
|
||||
'requires = ["hatchling"]',
|
||||
'build-backend = "hatchling.build"',
|
||||
]
|
||||
if flow:
|
||||
lines += ["", "[tool.crewai]", 'type = "flow"']
|
||||
if extra:
|
||||
lines += ["", extra]
|
||||
return "\n".join(lines) + "\n"
|
||||
|
||||
|
||||
def _scaffold_standard_crew(
|
||||
root: Path,
|
||||
*,
|
||||
name: str = "my_crew",
|
||||
include_crew_py: bool = True,
|
||||
include_agents_yaml: bool = True,
|
||||
include_tasks_yaml: bool = True,
|
||||
include_lockfile: bool = True,
|
||||
pyproject: str | None = None,
|
||||
) -> Path:
|
||||
(root / "pyproject.toml").write_text(pyproject or _make_pyproject(name=name))
|
||||
if include_lockfile:
|
||||
(root / "uv.lock").write_text("# dummy uv lockfile\n")
|
||||
|
||||
pkg_dir = root / "src" / normalize_package_name(name)
|
||||
pkg_dir.mkdir(parents=True)
|
||||
(pkg_dir / "__init__.py").write_text("")
|
||||
|
||||
if include_crew_py:
|
||||
(pkg_dir / "crew.py").write_text(
|
||||
dedent(
|
||||
"""
|
||||
from crewai.project import CrewBase, crew
|
||||
|
||||
@CrewBase
|
||||
class MyCrew:
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
from crewai import Crew
|
||||
return Crew(agents=[], tasks=[])
|
||||
"""
|
||||
).strip()
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
config_dir = pkg_dir / "config"
|
||||
config_dir.mkdir()
|
||||
if include_agents_yaml:
|
||||
(config_dir / "agents.yaml").write_text("{}\n")
|
||||
if include_tasks_yaml:
|
||||
(config_dir / "tasks.yaml").write_text("{}\n")
|
||||
|
||||
return pkg_dir
|
||||
|
||||
|
||||
def _codes(validator: DeployValidator) -> set[str]:
|
||||
return {r.code for r in validator.results}
|
||||
|
||||
|
||||
def _run_without_import_check(root: Path) -> DeployValidator:
|
||||
"""Run validation with the subprocess-based import check stubbed out;
|
||||
the classifier is exercised directly in its own tests below."""
|
||||
with patch.object(DeployValidator, "_check_module_imports", lambda self: None):
|
||||
v = DeployValidator(project_root=root)
|
||||
v.run()
|
||||
return v
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"project_name, expected",
|
||||
[
|
||||
("my-crew", "my_crew"),
|
||||
("My Cool-Project", "my_cool_project"),
|
||||
("crew123", "crew123"),
|
||||
("crew.name!with$chars", "crewnamewithchars"),
|
||||
],
|
||||
)
|
||||
def test_normalize_package_name(project_name: str, expected: str) -> None:
|
||||
assert normalize_package_name(project_name) == expected
|
||||
|
||||
|
||||
def test_valid_standard_crew_project_passes(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert v.ok, f"expected clean run, got {v.results}"
|
||||
|
||||
|
||||
def test_missing_pyproject_errors(tmp_path: Path) -> None:
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_pyproject" in _codes(v)
|
||||
assert not v.ok
|
||||
|
||||
|
||||
def test_invalid_pyproject_errors(tmp_path: Path) -> None:
|
||||
(tmp_path / "pyproject.toml").write_text("this is not valid toml ====\n")
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "invalid_pyproject" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_project_name_errors(tmp_path: Path) -> None:
|
||||
(tmp_path / "pyproject.toml").write_text(
|
||||
'[project]\nversion = "0.1.0"\ndependencies = ["crewai>=1.14.0"]\n'
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_project_name" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_lockfile_errors(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_lockfile=False)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_lockfile" in _codes(v)
|
||||
|
||||
|
||||
def test_poetry_lock_is_accepted(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_lockfile=False)
|
||||
(tmp_path / "poetry.lock").write_text("# poetry lockfile\n")
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_lockfile" not in _codes(v)
|
||||
|
||||
|
||||
def test_stale_lockfile_warns(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
# Make lockfile older than pyproject.
|
||||
lock = tmp_path / "uv.lock"
|
||||
pyproject = tmp_path / "pyproject.toml"
|
||||
old_time = pyproject.stat().st_mtime - 60
|
||||
import os
|
||||
|
||||
os.utime(lock, (old_time, old_time))
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "stale_lockfile" in _codes(v)
|
||||
# Stale is a warning, so the run can still be ok (no errors).
|
||||
assert v.ok
|
||||
|
||||
|
||||
def test_missing_package_dir_errors(tmp_path: Path) -> None:
|
||||
# pyproject says name=my_crew but we only create src/other_pkg/
|
||||
(tmp_path / "pyproject.toml").write_text(_make_pyproject(name="my_crew"))
|
||||
(tmp_path / "uv.lock").write_text("")
|
||||
(tmp_path / "src" / "other_pkg").mkdir(parents=True)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
codes = _codes(v)
|
||||
assert "missing_package_dir" in codes
|
||||
finding = next(r for r in v.results if r.code == "missing_package_dir")
|
||||
assert "other_pkg" in finding.hint
|
||||
|
||||
|
||||
def test_egg_info_only_errors_with_targeted_hint(tmp_path: Path) -> None:
|
||||
"""Regression for the case where only src/<name>.egg-info/ exists."""
|
||||
(tmp_path / "pyproject.toml").write_text(_make_pyproject(name="odoo_pm_agents"))
|
||||
(tmp_path / "uv.lock").write_text("")
|
||||
(tmp_path / "src" / "odoo_pm_agents.egg-info").mkdir(parents=True)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
finding = next(r for r in v.results if r.code == "missing_package_dir")
|
||||
assert "egg-info" in finding.hint
|
||||
|
||||
|
||||
def test_stale_egg_info_sibling_warns(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
(tmp_path / "src" / "my_crew.egg-info").mkdir()
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "stale_egg_info" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_crew_py_errors(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_crew_py=False)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_crew_py" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_agents_yaml_errors(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_agents_yaml=False)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_agents_yaml" in _codes(v)
|
||||
|
||||
|
||||
def test_missing_tasks_yaml_errors(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path, include_tasks_yaml=False)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_tasks_yaml" in _codes(v)
|
||||
|
||||
|
||||
def test_flow_project_requires_main_py(tmp_path: Path) -> None:
|
||||
(tmp_path / "pyproject.toml").write_text(
|
||||
_make_pyproject(name="my_flow", flow=True)
|
||||
)
|
||||
(tmp_path / "uv.lock").write_text("")
|
||||
(tmp_path / "src" / "my_flow").mkdir(parents=True)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_flow_main" in _codes(v)
|
||||
|
||||
|
||||
def test_flow_project_with_main_py_passes(tmp_path: Path) -> None:
|
||||
(tmp_path / "pyproject.toml").write_text(
|
||||
_make_pyproject(name="my_flow", flow=True)
|
||||
)
|
||||
(tmp_path / "uv.lock").write_text("")
|
||||
pkg = tmp_path / "src" / "my_flow"
|
||||
pkg.mkdir(parents=True)
|
||||
(pkg / "main.py").write_text("# flow entrypoint\n")
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "missing_flow_main" not in _codes(v)
|
||||
|
||||
|
||||
def test_hatchling_without_wheel_config_passes_when_pkg_dir_matches(
|
||||
tmp_path: Path,
|
||||
) -> None:
|
||||
_scaffold_standard_crew(
|
||||
tmp_path, pyproject=_make_pyproject(name="my_crew", hatchling=True)
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
# src/my_crew/ exists, so hatch default should find it — no wheel error.
|
||||
assert "hatch_wheel_target_missing" not in _codes(v)
|
||||
|
||||
|
||||
def test_hatchling_with_explicit_wheel_config_passes(tmp_path: Path) -> None:
|
||||
extra = (
|
||||
"[tool.hatch.build.targets.wheel]\n"
|
||||
'packages = ["src/my_crew"]'
|
||||
)
|
||||
_scaffold_standard_crew(
|
||||
tmp_path,
|
||||
pyproject=_make_pyproject(name="my_crew", hatchling=True, extra=extra),
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "hatch_wheel_target_missing" not in _codes(v)
|
||||
|
||||
|
||||
def test_classify_missing_openai_key_is_warning(tmp_path: Path) -> None:
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"ImportError",
|
||||
"Error importing native provider: 1 validation error for OpenAICompletion\n"
|
||||
" Value error, OPENAI_API_KEY is required",
|
||||
tb="",
|
||||
)
|
||||
assert len(v.results) == 1
|
||||
result = v.results[0]
|
||||
assert result.code == "llm_init_missing_key"
|
||||
assert result.severity is Severity.WARNING
|
||||
assert "OPENAI_API_KEY" in result.title
|
||||
|
||||
|
||||
def test_classify_azure_extra_missing_is_error(tmp_path: Path) -> None:
|
||||
"""The real message raised by the Azure provider module uses plain
|
||||
double quotes around the install command (no backticks). Match the
|
||||
exact string that ships in the provider source so this test actually
|
||||
guards the regex used in production."""
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"ImportError",
|
||||
'Azure AI Inference native provider not available, to install: uv add "crewai[azure-ai-inference]"',
|
||||
tb="",
|
||||
)
|
||||
assert "missing_provider_extra" in _codes(v)
|
||||
finding = next(r for r in v.results if r.code == "missing_provider_extra")
|
||||
assert finding.title.startswith("Azure AI Inference")
|
||||
assert 'uv add "crewai[azure-ai-inference]"' in finding.hint
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"pkg_label, install_cmd",
|
||||
[
|
||||
("Anthropic", 'uv add "crewai[anthropic]"'),
|
||||
("AWS Bedrock", 'uv add "crewai[bedrock]"'),
|
||||
("Google Gen AI", 'uv add "crewai[google-genai]"'),
|
||||
],
|
||||
)
|
||||
def test_classify_missing_provider_extra_matches_real_messages(
|
||||
tmp_path: Path, pkg_label: str, install_cmd: str
|
||||
) -> None:
|
||||
"""Regression for the four provider error strings verbatim."""
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"ImportError",
|
||||
f"{pkg_label} native provider not available, to install: {install_cmd}",
|
||||
tb="",
|
||||
)
|
||||
assert "missing_provider_extra" in _codes(v)
|
||||
finding = next(r for r in v.results if r.code == "missing_provider_extra")
|
||||
assert install_cmd in finding.hint
|
||||
|
||||
|
||||
def test_classify_keyerror_at_import_is_warning(tmp_path: Path) -> None:
|
||||
"""Regression for `KeyError: 'SERPLY_API_KEY'` raised at import time."""
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error("KeyError", "'SERPLY_API_KEY'", tb="")
|
||||
codes = _codes(v)
|
||||
assert "env_var_read_at_import" in codes
|
||||
|
||||
|
||||
def test_classify_no_crewbase_class_is_error(tmp_path: Path) -> None:
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"ValueError",
|
||||
"Crew class annotated with @CrewBase not found.",
|
||||
tb="",
|
||||
)
|
||||
assert "no_crewbase_class" in _codes(v)
|
||||
|
||||
|
||||
def test_classify_no_flow_subclass_is_error(tmp_path: Path) -> None:
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error("ValueError", "No Flow subclass found in the module.", tb="")
|
||||
assert "no_flow_subclass" in _codes(v)
|
||||
|
||||
|
||||
def test_classify_stale_crewai_pin_attribute_error(tmp_path: Path) -> None:
|
||||
"""Regression for a stale crewai pin missing `_load_response_format`."""
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error(
|
||||
"AttributeError",
|
||||
"'EmploymentServiceDecisionSupportSystemCrew' object has no attribute '_load_response_format'",
|
||||
tb="",
|
||||
)
|
||||
assert "stale_crewai_pin" in _codes(v)
|
||||
|
||||
|
||||
def test_classify_unknown_error_is_fallback(tmp_path: Path) -> None:
|
||||
v = DeployValidator(project_root=tmp_path)
|
||||
v._classify_import_error("RuntimeError", "something weird happened", tb="")
|
||||
assert "import_failed" in _codes(v)
|
||||
|
||||
|
||||
def test_env_var_referenced_but_missing_warns(tmp_path: Path) -> None:
|
||||
pkg = _scaffold_standard_crew(tmp_path)
|
||||
(pkg / "tools.py").write_text(
|
||||
'import os\nkey = os.getenv("TAVILY_API_KEY")\n'
|
||||
)
|
||||
import os
|
||||
|
||||
# Make sure the test doesn't inherit the key from the host environment.
|
||||
with patch.dict(os.environ, {}, clear=False):
|
||||
os.environ.pop("TAVILY_API_KEY", None)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
codes = _codes(v)
|
||||
assert "env_vars_not_in_dotenv" in codes
|
||||
|
||||
|
||||
def test_env_var_in_dotenv_does_not_warn(tmp_path: Path) -> None:
|
||||
pkg = _scaffold_standard_crew(tmp_path)
|
||||
(pkg / "tools.py").write_text(
|
||||
'import os\nkey = os.getenv("TAVILY_API_KEY")\n'
|
||||
)
|
||||
(tmp_path / ".env").write_text("TAVILY_API_KEY=abc\n")
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "env_vars_not_in_dotenv" not in _codes(v)
|
||||
|
||||
|
||||
def test_old_crewai_pin_in_uv_lock_warns(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
(tmp_path / "uv.lock").write_text(
|
||||
'name = "crewai"\nversion = "1.10.0"\nsource = { registry = "..." }\n'
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "old_crewai_pin" in _codes(v)
|
||||
|
||||
|
||||
def test_modern_crewai_pin_does_not_warn(tmp_path: Path) -> None:
|
||||
_scaffold_standard_crew(tmp_path)
|
||||
(tmp_path / "uv.lock").write_text(
|
||||
'name = "crewai"\nversion = "1.14.1"\nsource = { registry = "..." }\n'
|
||||
)
|
||||
v = _run_without_import_check(tmp_path)
|
||||
assert "old_crewai_pin" not in _codes(v)
|
||||
|
||||
|
||||
def test_create_crew_aborts_on_validation_error(tmp_path: Path) -> None:
|
||||
"""`crewai deploy create` must not contact the API when validation fails."""
|
||||
from unittest.mock import MagicMock, patch as mock_patch
|
||||
|
||||
from crewai.cli.deploy.main import DeployCommand
|
||||
|
||||
with (
|
||||
mock_patch("crewai.cli.command.get_auth_token", return_value="tok"),
|
||||
mock_patch("crewai.cli.deploy.main.get_project_name", return_value="p"),
|
||||
mock_patch("crewai.cli.command.PlusAPI") as mock_api,
|
||||
mock_patch(
|
||||
"crewai.cli.deploy.main.validate_project"
|
||||
) as mock_validate,
|
||||
):
|
||||
mock_validate.return_value = MagicMock(ok=False)
|
||||
cmd = DeployCommand()
|
||||
cmd.create_crew()
|
||||
assert not cmd.plus_api_client.create_crew.called
|
||||
del mock_api # silence unused-var lint
|
||||
@@ -367,7 +367,7 @@ def test_deploy_push(command, runner):
|
||||
result = runner.invoke(deploy_push, ["-u", uuid])
|
||||
|
||||
assert result.exit_code == 0
|
||||
mock_deploy.deploy.assert_called_once_with(uuid=uuid, skip_validate=False)
|
||||
mock_deploy.deploy.assert_called_once_with(uuid=uuid)
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.cli.DeployCommand")
|
||||
@@ -376,7 +376,7 @@ def test_deploy_push_no_uuid(command, runner):
|
||||
result = runner.invoke(deploy_push)
|
||||
|
||||
assert result.exit_code == 0
|
||||
mock_deploy.deploy.assert_called_once_with(uuid=None, skip_validate=False)
|
||||
mock_deploy.deploy.assert_called_once_with(uuid=None)
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.cli.DeployCommand")
|
||||
|
||||
@@ -2,7 +2,6 @@ import os
|
||||
import sys
|
||||
import types
|
||||
from unittest.mock import patch, MagicMock, Mock
|
||||
from urllib.parse import urlparse
|
||||
import pytest
|
||||
|
||||
from crewai.llm import LLM
|
||||
@@ -379,72 +378,23 @@ def test_azure_completion_with_tools():
|
||||
|
||||
|
||||
def test_azure_raises_error_when_endpoint_missing():
|
||||
"""Credentials are validated lazily: construction succeeds, first
|
||||
client build raises the descriptive error."""
|
||||
"""Test that AzureCompletion raises ValueError when endpoint is missing"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
|
||||
# Clear environment variables
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
llm = AzureCompletion(model="gpt-4", api_key="test-key")
|
||||
with pytest.raises(ValueError, match="Azure endpoint is required"):
|
||||
llm._get_sync_client()
|
||||
AzureCompletion(model="gpt-4", api_key="test-key")
|
||||
|
||||
|
||||
def test_azure_raises_error_when_api_key_missing():
|
||||
"""Credentials are validated lazily: construction succeeds, first
|
||||
client build raises the descriptive error."""
|
||||
"""Test that AzureCompletion raises ValueError when API key is missing"""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
|
||||
# Clear environment variables
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
llm = AzureCompletion(
|
||||
model="gpt-4", endpoint="https://test.openai.azure.com"
|
||||
)
|
||||
with pytest.raises(ValueError, match="Azure API key is required"):
|
||||
llm._get_sync_client()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_azure_aclose_is_noop_when_uninitialized():
|
||||
"""`aclose` (and `async with`) on an uninstantiated-client LLM must be
|
||||
a harmless no-op, not force lazy construction that then raises for
|
||||
missing credentials."""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
llm = AzureCompletion(model="gpt-4")
|
||||
assert llm._async_client is None
|
||||
await llm.aclose()
|
||||
async with llm:
|
||||
pass
|
||||
|
||||
|
||||
def test_azure_lazy_build_reads_env_vars_set_after_construction():
|
||||
"""When `LLM(model="azure/...")` is constructed before env vars are set,
|
||||
the lazy client builder must re-read `AZURE_API_KEY` / `AZURE_ENDPOINT`
|
||||
so the LLM actually works once credentials become available, and the
|
||||
`is_azure_openai_endpoint` routing flag must be recomputed off the
|
||||
newly-resolved endpoint."""
|
||||
from crewai.llms.providers.azure.completion import AzureCompletion
|
||||
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
llm = AzureCompletion(model="gpt-4")
|
||||
assert llm.api_key is None
|
||||
assert llm.endpoint is None
|
||||
assert llm.is_azure_openai_endpoint is False
|
||||
|
||||
with patch.dict(
|
||||
os.environ,
|
||||
{
|
||||
"AZURE_API_KEY": "late-key",
|
||||
"AZURE_ENDPOINT": "https://test.openai.azure.com/openai/deployments/gpt-4",
|
||||
},
|
||||
clear=True,
|
||||
):
|
||||
client = llm._get_sync_client()
|
||||
assert client is not None
|
||||
assert llm.api_key == "late-key"
|
||||
assert llm.endpoint is not None
|
||||
assert urlparse(llm.endpoint).hostname == "test.openai.azure.com"
|
||||
assert llm.is_azure_openai_endpoint is True
|
||||
AzureCompletion(model="gpt-4", endpoint="https://test.openai.azure.com")
|
||||
|
||||
|
||||
def test_azure_endpoint_configuration():
|
||||
|
||||
@@ -64,23 +64,6 @@ def test_gemini_completion_module_is_imported():
|
||||
assert hasattr(completion_mod, 'GeminiCompletion')
|
||||
|
||||
|
||||
def test_gemini_lazy_build_reads_env_vars_set_after_construction():
|
||||
"""When `LLM(model="gemini/...")` is constructed before env vars are set,
|
||||
the lazy client builder must re-read `GOOGLE_API_KEY` / `GEMINI_API_KEY`
|
||||
so the LLM works once credentials become available."""
|
||||
from crewai.llms.providers.gemini.completion import GeminiCompletion
|
||||
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
llm = GeminiCompletion(model="gemini-1.5-pro")
|
||||
assert llm.api_key is None
|
||||
assert llm._client is None
|
||||
|
||||
with patch.dict(os.environ, {"GEMINI_API_KEY": "late-key"}, clear=True):
|
||||
client = llm._get_sync_client()
|
||||
assert client is not None
|
||||
assert llm.api_key == "late-key"
|
||||
|
||||
|
||||
def test_native_gemini_raises_error_when_initialization_fails():
|
||||
"""
|
||||
Test that LLM raises ImportError when native Gemini completion fails.
|
||||
|
||||
@@ -119,12 +119,10 @@ def test_create_llm_with_invalid_type() -> None:
|
||||
|
||||
|
||||
def test_create_llm_openai_missing_api_key() -> None:
|
||||
"""Credentials are validated lazily: `create_llm` succeeds, and the
|
||||
descriptive error only surfaces when the client is actually built."""
|
||||
"""Test that create_llm raises error when OpenAI API key is missing"""
|
||||
with patch.dict(os.environ, {}, clear=True):
|
||||
llm = create_llm(llm_value="gpt-4o")
|
||||
with pytest.raises((ValueError, ImportError)) as exc_info:
|
||||
llm._get_sync_client()
|
||||
create_llm(llm_value="gpt-4o")
|
||||
|
||||
error_message = str(exc_info.value).lower()
|
||||
assert "openai_api_key" in error_message or "api_key" in error_message
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
"""CrewAI development tools."""
|
||||
|
||||
__version__ = "1.14.2a5"
|
||||
__version__ = "1.14.2a2"
|
||||
|
||||
@@ -29,33 +29,6 @@ load_dotenv()
|
||||
console = Console()
|
||||
|
||||
|
||||
def _resume_hint(message: str) -> None:
|
||||
"""Print a boxed resume hint after a failure."""
|
||||
console.print()
|
||||
console.print(
|
||||
Panel(
|
||||
message,
|
||||
title="[bold yellow]How to resume[/bold yellow]",
|
||||
border_style="yellow",
|
||||
padding=(1, 2),
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _print_release_error(e: BaseException) -> None:
|
||||
"""Print a release error with stderr if available."""
|
||||
if isinstance(e, KeyboardInterrupt):
|
||||
raise
|
||||
if isinstance(e, SystemExit):
|
||||
return
|
||||
if isinstance(e, subprocess.CalledProcessError):
|
||||
console.print(f"[red]Error running command:[/red] {e}")
|
||||
if e.stderr:
|
||||
console.print(e.stderr)
|
||||
else:
|
||||
console.print(f"[red]Error:[/red] {e}")
|
||||
|
||||
|
||||
def run_command(cmd: list[str], cwd: Path | None = None) -> str:
|
||||
"""Run a shell command and return output.
|
||||
|
||||
@@ -291,9 +264,11 @@ def add_docs_version(docs_json_path: Path, version: str) -> bool:
|
||||
if not versions:
|
||||
continue
|
||||
|
||||
# Skip if this version already exists for this language
|
||||
if any(v.get("version") == version_label for v in versions):
|
||||
continue
|
||||
|
||||
# Find the current default and copy its tabs
|
||||
default_version = next(
|
||||
(v for v in versions if v.get("default")),
|
||||
versions[0],
|
||||
@@ -305,7 +280,10 @@ def add_docs_version(docs_json_path: Path, version: str) -> bool:
|
||||
"tabs": default_version.get("tabs", []),
|
||||
}
|
||||
|
||||
# Remove default flag from old default
|
||||
default_version.pop("default", None)
|
||||
|
||||
# Insert new version at the beginning
|
||||
versions.insert(0, new_version)
|
||||
updated = True
|
||||
|
||||
@@ -499,7 +477,7 @@ def _is_crewai_dep(spec: str) -> bool:
|
||||
"""Return True if *spec* is a ``crewai`` or ``crewai[...]`` dependency."""
|
||||
if not spec.startswith("crewai"):
|
||||
return False
|
||||
rest = spec[6:]
|
||||
rest = spec[6:] # after "crewai"
|
||||
return len(rest) > 0 and rest[0] in ("[", "=", ">", "<", "~", "!")
|
||||
|
||||
|
||||
@@ -521,6 +499,7 @@ def _pin_crewai_deps(content: str, version: str) -> str:
|
||||
deps = doc.get("project", {}).get(key)
|
||||
if deps is None:
|
||||
continue
|
||||
# optional-dependencies is a table of lists; dependencies is a list
|
||||
dep_lists = deps.values() if isinstance(deps, Mapping) else [deps]
|
||||
for dep_list in dep_lists:
|
||||
for i, dep in enumerate(dep_list):
|
||||
@@ -659,6 +638,7 @@ def get_github_contributors(commit_range: str) -> list[str]:
|
||||
List of GitHub usernames sorted alphabetically.
|
||||
"""
|
||||
try:
|
||||
# Get GitHub token from gh CLI
|
||||
try:
|
||||
gh_token = run_command(["gh", "auth", "token"])
|
||||
except subprocess.CalledProcessError:
|
||||
@@ -700,6 +680,11 @@ def get_github_contributors(commit_range: str) -> list[str]:
|
||||
return []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Shared workflow helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _poll_pr_until_merged(
|
||||
branch_name: str, label: str, repo: str | None = None
|
||||
) -> None:
|
||||
@@ -779,6 +764,7 @@ def _update_all_versions(
|
||||
"[yellow]Warning:[/yellow] No __version__ attributes found to update"
|
||||
)
|
||||
|
||||
# Update CLI template pyproject.toml files
|
||||
templates_dir = lib_dir / "crewai" / "src" / "crewai" / "cli" / "templates"
|
||||
if templates_dir.exists():
|
||||
if dry_run:
|
||||
@@ -1177,11 +1163,13 @@ def _repin_crewai_install(run_value: str, version: str) -> str:
|
||||
while marker in remainder:
|
||||
before, _, after = remainder.partition(marker)
|
||||
result.append(before)
|
||||
# after looks like: a2a]==1.14.0" ...
|
||||
bracket_end = after.index("]")
|
||||
extras = after[:bracket_end]
|
||||
rest = after[bracket_end + 1 :]
|
||||
if rest.startswith("=="):
|
||||
ver_start = 2
|
||||
# Find end of version — next quote or whitespace
|
||||
ver_start = 2 # len("==")
|
||||
ver_end = ver_start
|
||||
while ver_end < len(rest) and rest[ver_end] not in ('"', "'", " ", "\n"):
|
||||
ver_end += 1
|
||||
@@ -1343,6 +1331,7 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
|
||||
run_command(["gh", "repo", "clone", enterprise_repo, str(repo_dir)])
|
||||
console.print(f"[green]✓[/green] Cloned {enterprise_repo}")
|
||||
|
||||
# --- bump versions ---
|
||||
for rel_dir in _ENTERPRISE_VERSION_DIRS:
|
||||
pkg_dir = repo_dir / rel_dir
|
||||
if not pkg_dir.exists():
|
||||
@@ -1372,12 +1361,14 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
|
||||
f"{pyproject.relative_to(repo_dir)}"
|
||||
)
|
||||
|
||||
# --- update crewai[tools] pin ---
|
||||
enterprise_pyproject = repo_dir / enterprise_dep_path
|
||||
if _update_enterprise_crewai_dep(enterprise_pyproject, version):
|
||||
console.print(
|
||||
f"[green]✓[/green] Updated crewai[tools] dep in {enterprise_dep_path}"
|
||||
)
|
||||
|
||||
# --- update crewai pins in CI workflows ---
|
||||
for wf in _update_enterprise_workflows(repo_dir, version):
|
||||
console.print(
|
||||
f"[green]✓[/green] Updated crewai pin in {wf.relative_to(repo_dir)}"
|
||||
@@ -1417,6 +1408,7 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
|
||||
time.sleep(_PYPI_POLL_INTERVAL)
|
||||
console.print("[green]✓[/green] Workspace synced")
|
||||
|
||||
# --- branch, commit, push, PR ---
|
||||
branch_name = f"feat/bump-version-{version}"
|
||||
run_command(["git", "checkout", "-b", branch_name], cwd=repo_dir)
|
||||
run_command(["git", "add", "."], cwd=repo_dir)
|
||||
@@ -1450,6 +1442,7 @@ def _release_enterprise(version: str, is_prerelease: bool, dry_run: bool) -> Non
|
||||
|
||||
_poll_pr_until_merged(branch_name, "enterprise bump PR", repo=enterprise_repo)
|
||||
|
||||
# --- tag and release ---
|
||||
run_command(["git", "checkout", "main"], cwd=repo_dir)
|
||||
run_command(["git", "pull"], cwd=repo_dir)
|
||||
|
||||
@@ -1491,6 +1484,7 @@ def _trigger_pypi_publish(tag_name: str, wait: bool = False) -> None:
|
||||
tag_name: The release tag to publish.
|
||||
wait: Block until the workflow run completes.
|
||||
"""
|
||||
# Capture the latest run ID before triggering so we can detect the new one
|
||||
prev_run_id = ""
|
||||
if wait:
|
||||
try:
|
||||
@@ -1565,6 +1559,11 @@ def _trigger_pypi_publish(tag_name: str, wait: bool = False) -> None:
|
||||
console.print("[green]✓[/green] PyPI publish workflow completed")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CLI commands
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli() -> None:
|
||||
"""Development tools for version bumping and git automation."""
|
||||
@@ -1832,80 +1831,62 @@ def release(
|
||||
skip_enterprise: Skip the enterprise release phase.
|
||||
skip_to_enterprise: Skip phases 1 & 2, run only the enterprise release phase.
|
||||
"""
|
||||
flags: list[str] = []
|
||||
if no_edit:
|
||||
flags.append("--no-edit")
|
||||
if skip_enterprise:
|
||||
flags.append("--skip-enterprise")
|
||||
flag_suffix = (" " + " ".join(flags)) if flags else ""
|
||||
enterprise_hint = (
|
||||
""
|
||||
if skip_enterprise
|
||||
else f"\n\nThen release enterprise:\n\n"
|
||||
f" devtools release {version} --skip-to-enterprise"
|
||||
)
|
||||
|
||||
check_gh_installed()
|
||||
|
||||
if skip_enterprise and skip_to_enterprise:
|
||||
console.print(
|
||||
"[red]Error:[/red] Cannot use both --skip-enterprise "
|
||||
"and --skip-to-enterprise"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if not skip_enterprise or skip_to_enterprise:
|
||||
missing: list[str] = []
|
||||
if not _ENTERPRISE_REPO:
|
||||
missing.append("ENTERPRISE_REPO")
|
||||
if not _ENTERPRISE_VERSION_DIRS:
|
||||
missing.append("ENTERPRISE_VERSION_DIRS")
|
||||
if not _ENTERPRISE_CREWAI_DEP_PATH:
|
||||
missing.append("ENTERPRISE_CREWAI_DEP_PATH")
|
||||
if missing:
|
||||
console.print(
|
||||
f"[red]Error:[/red] Missing required environment variable(s): "
|
||||
f"{', '.join(missing)}\n"
|
||||
f"Set them or pass --skip-enterprise to skip the enterprise release."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
cwd = Path.cwd()
|
||||
lib_dir = cwd / "lib"
|
||||
|
||||
is_prerelease = _is_prerelease(version)
|
||||
|
||||
if skip_to_enterprise:
|
||||
try:
|
||||
_release_enterprise(version, is_prerelease, dry_run)
|
||||
except BaseException as e:
|
||||
_print_release_error(e)
|
||||
_resume_hint(
|
||||
f"Fix the issue, then re-run:\n\n"
|
||||
f" devtools release {version} --skip-to-enterprise"
|
||||
)
|
||||
sys.exit(1)
|
||||
console.print(
|
||||
f"\n[green]✓[/green] Enterprise release [bold]{version}[/bold] complete!"
|
||||
)
|
||||
return
|
||||
|
||||
if not dry_run:
|
||||
console.print("Checking git status...")
|
||||
check_git_clean()
|
||||
console.print("[green]✓[/green] Working directory is clean")
|
||||
else:
|
||||
console.print("[dim][DRY RUN][/dim] Would check git status")
|
||||
|
||||
packages = get_packages(lib_dir)
|
||||
|
||||
console.print(f"\nFound {len(packages)} package(s) to update:")
|
||||
for pkg in packages:
|
||||
console.print(f" - {pkg.name}")
|
||||
|
||||
console.print(f"\n[bold cyan]Phase 1: Bumping versions to {version}[/bold cyan]")
|
||||
|
||||
try:
|
||||
check_gh_installed()
|
||||
|
||||
if skip_enterprise and skip_to_enterprise:
|
||||
console.print(
|
||||
"[red]Error:[/red] Cannot use both --skip-enterprise "
|
||||
"and --skip-to-enterprise"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if not skip_enterprise or skip_to_enterprise:
|
||||
missing: list[str] = []
|
||||
if not _ENTERPRISE_REPO:
|
||||
missing.append("ENTERPRISE_REPO")
|
||||
if not _ENTERPRISE_VERSION_DIRS:
|
||||
missing.append("ENTERPRISE_VERSION_DIRS")
|
||||
if not _ENTERPRISE_CREWAI_DEP_PATH:
|
||||
missing.append("ENTERPRISE_CREWAI_DEP_PATH")
|
||||
if missing:
|
||||
console.print(
|
||||
f"[red]Error:[/red] Missing required environment variable(s): "
|
||||
f"{', '.join(missing)}\n"
|
||||
f"Set them or pass --skip-enterprise to skip the enterprise release."
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
cwd = Path.cwd()
|
||||
lib_dir = cwd / "lib"
|
||||
|
||||
is_prerelease = _is_prerelease(version)
|
||||
|
||||
if skip_to_enterprise:
|
||||
_release_enterprise(version, is_prerelease, dry_run)
|
||||
console.print(
|
||||
f"\n[green]✓[/green] Enterprise release [bold]{version}[/bold] complete!"
|
||||
)
|
||||
return
|
||||
|
||||
if not dry_run:
|
||||
console.print("Checking git status...")
|
||||
check_git_clean()
|
||||
console.print("[green]✓[/green] Working directory is clean")
|
||||
else:
|
||||
console.print("[dim][DRY RUN][/dim] Would check git status")
|
||||
|
||||
packages = get_packages(lib_dir)
|
||||
|
||||
console.print(f"\nFound {len(packages)} package(s) to update:")
|
||||
for pkg in packages:
|
||||
console.print(f" - {pkg.name}")
|
||||
|
||||
# --- Phase 1: Bump versions ---
|
||||
console.print(
|
||||
f"\n[bold cyan]Phase 1: Bumping versions to {version}[/bold cyan]"
|
||||
)
|
||||
|
||||
_update_all_versions(cwd, lib_dir, version, packages, dry_run)
|
||||
|
||||
branch_name = f"feat/bump-version-{version}"
|
||||
@@ -1949,17 +1930,12 @@ def release(
|
||||
console.print(
|
||||
"[dim][DRY RUN][/dim] Would push branch, create PR, and wait for merge"
|
||||
)
|
||||
except BaseException as e:
|
||||
_print_release_error(e)
|
||||
_resume_hint(
|
||||
f"Phase 1 failed. Fix the issue, then re-run:\n\n"
|
||||
f" devtools release {version}{flag_suffix}"
|
||||
|
||||
# --- Phase 2: Tag and release ---
|
||||
console.print(
|
||||
f"\n[bold cyan]Phase 2: Tagging and releasing {version}[/bold cyan]"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
console.print(f"\n[bold cyan]Phase 2: Tagging and releasing {version}[/bold cyan]")
|
||||
|
||||
try:
|
||||
tag_name = version
|
||||
|
||||
if not dry_run:
|
||||
@@ -1986,57 +1962,22 @@ def release(
|
||||
|
||||
if not dry_run:
|
||||
_create_tag_and_release(tag_name, release_notes, is_prerelease)
|
||||
except BaseException as e:
|
||||
_print_release_error(e)
|
||||
_resume_hint(
|
||||
"Phase 2 failed before PyPI publish. The bump PR is already merged.\n"
|
||||
"Fix the issue, then resume with:\n\n"
|
||||
" devtools tag"
|
||||
f"\n\nAfter tagging, publish to PyPI and update deployment test:\n\n"
|
||||
f" gh workflow run publish.yml -f release_tag={version}"
|
||||
f"{enterprise_hint}"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
if not dry_run:
|
||||
_trigger_pypi_publish(tag_name, wait=True)
|
||||
except BaseException as e:
|
||||
_print_release_error(e)
|
||||
_resume_hint(
|
||||
f"Phase 2 failed at PyPI publish. Tag and GitHub release already exist.\n"
|
||||
f"Retry PyPI publish manually:\n\n"
|
||||
f" gh workflow run publish.yml -f release_tag={version}"
|
||||
f"{enterprise_hint}"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
if not dry_run:
|
||||
_update_deployment_test_repo(version, is_prerelease)
|
||||
except BaseException as e:
|
||||
_print_release_error(e)
|
||||
_resume_hint(
|
||||
f"Phase 2 failed updating deployment test repo. "
|
||||
f"Tag, release, and PyPI are done.\n"
|
||||
f"Fix the issue and update {_DEPLOYMENT_TEST_REPO} manually."
|
||||
f"{enterprise_hint}"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
if not skip_enterprise:
|
||||
try:
|
||||
if not skip_enterprise:
|
||||
_release_enterprise(version, is_prerelease, dry_run)
|
||||
except BaseException as e:
|
||||
_print_release_error(e)
|
||||
_resume_hint(
|
||||
f"Phase 3 (enterprise) failed. Phases 1 & 2 completed successfully.\n"
|
||||
f"Fix the issue, then resume:\n\n"
|
||||
f" devtools release {version} --skip-to-enterprise"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
console.print(f"\n[green]✓[/green] Release [bold]{version}[/bold] complete!")
|
||||
console.print(f"\n[green]✓[/green] Release [bold]{version}[/bold] complete!")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
console.print(f"[red]Error running command:[/red] {e}")
|
||||
if e.stderr:
|
||||
console.print(e.stderr)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error:[/red] {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
cli.add_command(bump)
|
||||
|
||||
@@ -12,7 +12,7 @@ dev = [
|
||||
"mypy==1.19.1",
|
||||
"pre-commit==4.5.1",
|
||||
"bandit==1.9.2",
|
||||
"pytest==9.0.3",
|
||||
"pytest==8.4.2",
|
||||
"pytest-asyncio==1.3.0",
|
||||
"pytest-subprocess==1.5.3",
|
||||
"vcrpy==7.0.0", # pinned, less versions break pytest-recording
|
||||
@@ -20,7 +20,7 @@ dev = [
|
||||
"pytest-randomly==4.0.1",
|
||||
"pytest-timeout==2.4.0",
|
||||
"pytest-xdist==3.8.0",
|
||||
"pytest-split==0.11.0",
|
||||
"pytest-split==0.10.0",
|
||||
"types-requests~=2.31.0.6",
|
||||
"types-pyyaml==6.0.*",
|
||||
"types-regex==2026.1.15.*",
|
||||
@@ -162,7 +162,7 @@ info = "Commits must follow Conventional Commits 1.0.0."
|
||||
|
||||
|
||||
[tool.uv]
|
||||
exclude-newer = "3 days"
|
||||
exclude-newer = "2026-04-10" # pinned for CVE-2026-39892; restore to "3 days" after 2026-04-11
|
||||
|
||||
# composio-core pins rich<14 but textual requires rich>=14.
|
||||
# onnxruntime 1.24+ dropped Python 3.10 wheels; cap it so qdrant[fastembed] resolves on 3.10.
|
||||
@@ -170,8 +170,6 @@ exclude-newer = "3 days"
|
||||
# langchain-core <1.2.28 has GHSA-926x-3r5x-gfhw (incomplete f-string validation).
|
||||
# transformers 4.57.6 has CVE-2026-1839; force 5.4+ (docling 2.84 allows huggingface-hub>=1).
|
||||
# cryptography 46.0.6 has CVE-2026-39892; force 46.0.7+.
|
||||
# pypdf <6.10.0 has CVE-2026-40260; force 6.10.0+.
|
||||
# uv <0.11.6 has GHSA-pjjw-68hj-v9mw; force 0.11.6+.
|
||||
override-dependencies = [
|
||||
"rich>=13.7.1",
|
||||
"onnxruntime<1.24; python_version < '3.11'",
|
||||
@@ -180,8 +178,6 @@ override-dependencies = [
|
||||
"urllib3>=2.6.3",
|
||||
"transformers>=5.4.0; python_version >= '3.10'",
|
||||
"cryptography>=46.0.7",
|
||||
"pypdf>=6.10.0,<7",
|
||||
"uv>=0.11.6,<1",
|
||||
]
|
||||
|
||||
[tool.uv.workspace]
|
||||
|
||||
81
uv.lock
generated
81
uv.lock
generated
@@ -13,8 +13,7 @@ resolution-markers = [
|
||||
]
|
||||
|
||||
[options]
|
||||
exclude-newer = "2026-04-10T18:30:59.748668Z"
|
||||
exclude-newer-span = "P3D"
|
||||
exclude-newer = "2026-04-10T16:00:00Z"
|
||||
|
||||
[manifest]
|
||||
members = [
|
||||
@@ -28,11 +27,9 @@ overrides = [
|
||||
{ name = "langchain-core", specifier = ">=1.2.28,<2" },
|
||||
{ name = "onnxruntime", marker = "python_full_version < '3.11'", specifier = "<1.24" },
|
||||
{ name = "pillow", specifier = ">=12.1.1" },
|
||||
{ name = "pypdf", specifier = ">=6.10.0,<7" },
|
||||
{ name = "rich", specifier = ">=13.7.1" },
|
||||
{ name = "transformers", marker = "python_full_version >= '3.10'", specifier = ">=5.4.0" },
|
||||
{ name = "urllib3", specifier = ">=2.6.3" },
|
||||
{ name = "uv", specifier = ">=0.11.6,<1" },
|
||||
]
|
||||
|
||||
[manifest.dependency-groups]
|
||||
@@ -43,11 +40,11 @@ dev = [
|
||||
{ name = "mypy", specifier = "==1.19.1" },
|
||||
{ name = "pip-audit", specifier = "==2.9.0" },
|
||||
{ name = "pre-commit", specifier = "==4.5.1" },
|
||||
{ name = "pytest", specifier = "==9.0.3" },
|
||||
{ name = "pytest", specifier = "==8.4.2" },
|
||||
{ name = "pytest-asyncio", specifier = "==1.3.0" },
|
||||
{ name = "pytest-randomly", specifier = "==4.0.1" },
|
||||
{ name = "pytest-recording", specifier = "==0.13.4" },
|
||||
{ name = "pytest-split", specifier = "==0.11.0" },
|
||||
{ name = "pytest-split", specifier = "==0.10.0" },
|
||||
{ name = "pytest-subprocess", specifier = "==1.5.3" },
|
||||
{ name = "pytest-timeout", specifier = "==2.4.0" },
|
||||
{ name = "pytest-xdist", specifier = "==3.8.0" },
|
||||
@@ -1355,7 +1352,7 @@ requires-dist = [
|
||||
{ name = "litellm", marker = "extra == 'litellm'", specifier = "~=1.83.0" },
|
||||
{ name = "mcp", specifier = "~=1.26.0" },
|
||||
{ name = "mem0ai", marker = "extra == 'mem0'", specifier = "~=0.1.94" },
|
||||
{ name = "openai", specifier = ">=2.0.0,<3" },
|
||||
{ name = "openai", specifier = ">=1.83.0,<3" },
|
||||
{ name = "openpyxl", specifier = "~=3.1.5" },
|
||||
{ name = "openpyxl", marker = "extra == 'openpyxl'", specifier = "~=3.1.5" },
|
||||
{ name = "opentelemetry-api", specifier = "~=1.34.0" },
|
||||
@@ -1377,7 +1374,7 @@ requires-dist = [
|
||||
{ name = "tokenizers", specifier = ">=0.21,<1" },
|
||||
{ name = "tomli", specifier = "~=2.0.2" },
|
||||
{ name = "tomli-w", specifier = "~=1.1.0" },
|
||||
{ name = "uv", specifier = "~=0.11.6" },
|
||||
{ name = "uv", specifier = "~=0.9.13" },
|
||||
{ name = "voyageai", marker = "extra == 'voyageai'", specifier = "~=0.3.5" },
|
||||
]
|
||||
provides-extras = ["a2a", "anthropic", "aws", "azure-ai-inference", "bedrock", "docling", "embeddings", "file-processing", "google-genai", "litellm", "mem0", "openpyxl", "pandas", "qdrant", "qdrant-edge", "tools", "voyageai", "watson"]
|
||||
@@ -1423,7 +1420,7 @@ requires-dist = [
|
||||
{ name = "aiofiles", specifier = "~=24.1.0" },
|
||||
{ name = "av", specifier = "~=13.0.0" },
|
||||
{ name = "pillow", specifier = "~=12.1.1" },
|
||||
{ name = "pypdf", specifier = "~=6.10.0" },
|
||||
{ name = "pypdf", specifier = "~=6.9.1" },
|
||||
{ name = "python-magic", specifier = ">=0.4.27" },
|
||||
{ name = "tinytag", specifier = "~=2.2.1" },
|
||||
]
|
||||
@@ -1595,7 +1592,7 @@ requires-dist = [
|
||||
{ name = "python-docx", marker = "extra == 'rag'", specifier = ">=1.1.0" },
|
||||
{ name = "pytube", specifier = "~=15.0.0" },
|
||||
{ name = "qdrant-client", marker = "extra == 'qdrant-client'", specifier = ">=1.12.1" },
|
||||
{ name = "requests", specifier = ">=2.33.0,<3" },
|
||||
{ name = "requests", specifier = "~=2.32.5" },
|
||||
{ name = "scrapegraph-py", marker = "extra == 'scrapegraph-py'", specifier = ">=1.9.0" },
|
||||
{ name = "scrapfly-sdk", marker = "extra == 'scrapfly-sdk'", specifier = ">=0.8.19" },
|
||||
{ name = "selenium", marker = "extra == 'selenium'", specifier = ">=4.27.1" },
|
||||
@@ -6727,14 +6724,14 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pypdf"
|
||||
version = "6.10.0"
|
||||
version = "6.9.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "typing-extensions", marker = "python_full_version < '3.11'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/b8/9f/ca96abf18683ca12602065e4ed2bec9050b672c87d317f1079abc7b6d993/pypdf-6.10.0.tar.gz", hash = "sha256:4c5a48ba258c37024ec2505f7e8fd858525f5502784a2e1c8d415604af29f6ef", size = 5314833, upload-time = "2026-04-10T09:34:57.102Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/31/83/691bdb309306232362503083cb15777491045dd54f45393a317dc7d8082f/pypdf-6.9.2.tar.gz", hash = "sha256:7f850faf2b0d4ab936582c05da32c52214c2b089d61a316627b5bfb5b0dab46c", size = 5311837, upload-time = "2026-03-23T14:53:27.983Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/55/f2/7ebe366f633f30a6ad105f650f44f24f98cb1335c4157d21ae47138b3482/pypdf-6.10.0-py3-none-any.whl", hash = "sha256:90005e959e1596c6e6c84c8b0ad383285b3e17011751cedd17f2ce8fcdfc86de", size = 334459, upload-time = "2026-04-10T09:34:54.966Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/7e/c85f41243086a8fe5d1baeba527cb26a1918158a565932b41e0f7c0b32e9/pypdf-6.9.2-py3-none-any.whl", hash = "sha256:662cf29bcb419a36a1365232449624ab40b7c2d0cfc28e54f42eeecd1fd7e844", size = 333744, upload-time = "2026-03-23T14:53:26.573Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6817,7 +6814,7 @@ sdist = { url = "https://files.pythonhosted.org/packages/12/a0/d0638470df605ce26
|
||||
|
||||
[[package]]
|
||||
name = "pytest"
|
||||
version = "9.0.3"
|
||||
version = "8.4.2"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "colorama", marker = "sys_platform == 'win32'" },
|
||||
@@ -6828,9 +6825,9 @@ dependencies = [
|
||||
{ name = "pygments" },
|
||||
{ name = "tomli", marker = "python_full_version < '3.11'" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/7d/0d/549bd94f1a0a402dc8cf64563a117c0f3765662e2e668477624baeec44d5/pytest-9.0.3.tar.gz", hash = "sha256:b86ada508af81d19edeb213c681b1d48246c1a91d304c6c81a427674c17eb91c", size = 1572165, upload-time = "2026-04-07T17:16:18.027Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/a3/5c/00a0e072241553e1a7496d638deababa67c5058571567b92a7eaa258397c/pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01", size = 1519618, upload-time = "2025-09-04T14:34:22.711Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d4/24/a372aaf5c9b7208e7112038812994107bc65a84cd00e0354a88c2c77a617/pytest-9.0.3-py3-none-any.whl", hash = "sha256:2c5efc453d45394fdd706ade797c0a81091eccd1d6e4bccfcd476e2b8e0ab5d9", size = 375249, upload-time = "2026-04-07T17:16:16.13Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6874,14 +6871,14 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "pytest-split"
|
||||
version = "0.11.0"
|
||||
version = "0.10.0"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "pytest" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/2f/16/8af4c5f2ceb3640bb1f78dfdf5c184556b10dfe9369feaaad7ff1c13f329/pytest_split-0.11.0.tar.gz", hash = "sha256:8ebdb29cc72cc962e8eb1ec07db1eeb98ab25e215ed8e3216f6b9fc7ce0ec2b5", size = 13421, upload-time = "2026-02-03T09:14:31.469Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/46/d7/e30ba44adf83f15aee3f636daea54efadf735769edc0f0a7d98163f61038/pytest_split-0.10.0.tar.gz", hash = "sha256:adf80ba9fef7be89500d571e705b4f963dfa05038edf35e4925817e6b34ea66f", size = 13903, upload-time = "2024-10-16T15:45:19.783Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/a1/d4423657caaa8be9b31e491592b49cebdcfd434d3e74512ce71f6ec39905/pytest_split-0.11.0-py3-none-any.whl", hash = "sha256:899d7c0f5730da91e2daf283860eb73b503259cb416851a65599368849c7f382", size = 11911, upload-time = "2026-02-03T09:14:33.708Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/a7/cad88e9c1109a5c2a320d608daa32e5ee008ccbc766310f54b1cd6b3d69c/pytest_split-0.10.0-py3-none-any.whl", hash = "sha256:466096b086a7147bcd423c6e6c2e57fc62af1c5ea2e256b4ed50fc030fc3dddc", size = 11961, upload-time = "2024-10-16T15:45:18.289Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7372,7 +7369,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.33.1"
|
||||
version = "2.32.5"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
dependencies = [
|
||||
{ name = "certifi" },
|
||||
@@ -7380,9 +7377,9 @@ dependencies = [
|
||||
{ name = "idna" },
|
||||
{ name = "urllib3" },
|
||||
]
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/5f/a4/98b9c7c6428a668bf7e42ebb7c79d576a1c3c1e3ae2d47e674b468388871/requests-2.33.1.tar.gz", hash = "sha256:18817f8c57c6263968bc123d237e3b8b08ac046f5456bd1e307ee8f4250d3517", size = 134120, upload-time = "2026-03-30T16:09:15.531Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/d7/8e/7540e8a2036f79a125c1d2ebadf69ed7901608859186c856fa0388ef4197/requests-2.33.1-py3-none-any.whl", hash = "sha256:4e6d1ef462f3626a1f0a0a9c42dd93c63bad33f9f1c1937509b8c5c8718ab56a", size = 64947, upload-time = "2026-03-30T16:09:13.83Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8979,28 +8976,28 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "uv"
|
||||
version = "0.11.6"
|
||||
version = "0.9.30"
|
||||
source = { registry = "https://pypi.org/simple" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/dd/f3/8aceeab67ea69805293ab290e7ca8cc1b61a064d28b8a35c76d8eba063dd/uv-0.11.6.tar.gz", hash = "sha256:e3b21b7e80024c95ff339fcd147ac6fc3dd98d3613c9d45d3a1f4fd1057f127b", size = 4073298, upload-time = "2026-04-09T12:09:01.738Z" }
|
||||
sdist = { url = "https://files.pythonhosted.org/packages/4e/a0/63cea38fe839fb89592728b91928ee6d15705f1376a7940fee5bbc77fea0/uv-0.9.30.tar.gz", hash = "sha256:03ebd4b22769e0a8d825fa09d038e31cbab5d3d48edf755971cb0cec7920ab95", size = 3846526, upload-time = "2026-02-04T21:45:37.58Z" }
|
||||
wheels = [
|
||||
{ url = "https://files.pythonhosted.org/packages/1f/fe/4b61a3d5ad9d02e8a4405026ccd43593d7044598e0fa47d892d4dafe44c9/uv-0.11.6-py3-none-linux_armv6l.whl", hash = "sha256:ada04dcf89ddea5b69d27ac9cdc5ef575a82f90a209a1392e930de504b2321d6", size = 23780079, upload-time = "2026-04-09T12:08:56.609Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/52/db/d27519a9e1a5ffee9d71af1a811ad0e19ce7ab9ae815453bef39dd479389/uv-0.11.6-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5be013888420f96879c6e0d3081e7bcf51b539b034a01777041934457dfbedf3", size = 23214721, upload-time = "2026-04-09T12:09:32.228Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a6/8f/4399fa8b882bd7e0efffc829f73ab24d117d490a93e6bc7104a50282b854/uv-0.11.6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:ffa5dc1cbb52bdce3b8447e83d1601a57ad4da6b523d77d4b47366db8b1ceb18", size = 21750109, upload-time = "2026-04-09T12:09:24.357Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/32/07/5a12944c31c3dda253632da7a363edddb869ed47839d4d92a2dc5f546c93/uv-0.11.6-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:bfb107b4dade1d2c9e572992b06992d51dd5f2136eb8ceee9e62dd124289e825", size = 23551146, upload-time = "2026-04-09T12:09:10.439Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/79/5b/2ec8b0af80acd1016ed596baf205ddc77b19ece288473b01926c4a9cf6db/uv-0.11.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.musllinux_1_1_armv7l.whl", hash = "sha256:9e2fe7ce12161d8016b7deb1eaad7905a76ff7afec13383333ca75e0c4b5425d", size = 23331192, upload-time = "2026-04-09T12:09:34.792Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/62/7d/eea35935f2112b21c296a3e42645f3e4b1aa8bcd34dcf13345fbd55134b7/uv-0.11.6-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7ed9c6f70c25e8dfeedddf4eddaf14d353f5e6b0eb43da9a14d3a1033d51d915", size = 23337686, upload-time = "2026-04-09T12:09:18.522Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/21/47/2584f5ab618f6ebe9bdefb2f765f2ca8540e9d739667606a916b35449eec/uv-0.11.6-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d68a013e609cebf82077cbeeb0809ed5e205257814273bfd31e02fc0353bbfc2", size = 25008139, upload-time = "2026-04-09T12:09:03.983Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/95/81/497ae5c1d36355b56b97dc59f550c7e89d0291c163a3f203c6f341dff195/uv-0.11.6-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93f736dddca03dae732c6fdea177328d3bc4bf137c75248f3d433c57416a4311", size = 25712458, upload-time = "2026-04-09T12:09:07.598Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/1c/74083238e4fab2672b63575b9008f1ea418b02a714bcfcf017f4f6a309b6/uv-0.11.6-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e96a66abe53fced0e3389008b8d2eff8278cfa8bb545d75631ae8ceb9c929aba", size = 24915507, upload-time = "2026-04-09T12:08:50.892Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/5a/ee/e14fe10ba455a823ed18233f12de6699a601890905420b5c504abf115116/uv-0.11.6-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b096311b2743b228df911a19532b3f18fa420bf9530547aecd6a8e04bbfaccd", size = 24971011, upload-time = "2026-04-09T12:08:54.016Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3c/a1/7b9c83eaadf98e343317ff6384a7227a4855afd02cdaf9696bcc71ee6155/uv-0.11.6-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:904d537b4a6e798015b4a64ff5622023bd4601b43b6cd1e5f423d63471f5e948", size = 23640234, upload-time = "2026-04-09T12:09:15.735Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/d6/51/75ccdd23e76ff1703b70eb82881cd5b4d2a954c9679f8ef7e0136ef2cfab/uv-0.11.6-py3-none-manylinux_2_31_riscv64.musllinux_1_1_riscv64.whl", hash = "sha256:4ed8150c26b5e319381d75ae2ce6aba1e9c65888f4850f4e3b3fa839953c90a5", size = 24452664, upload-time = "2026-04-09T12:09:26.875Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/4d/86/ace80fe47d8d48b5e3b5aee0b6eb1a49deaacc2313782870250b3faa36f5/uv-0.11.6-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:1c9218c8d4ac35ca6e617fb0951cc0ab2d907c91a6aea2617de0a5494cf162c0", size = 24494599, upload-time = "2026-04-09T12:09:37.368Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/05/2d/4b642669b56648194f026de79bc992cbfc3ac2318b0a8d435f3c284934e8/uv-0.11.6-py3-none-musllinux_1_1_i686.whl", hash = "sha256:9e211c83cc890c569b86a4183fcf5f8b6f0c7adc33a839b699a98d30f1310d3a", size = 24159150, upload-time = "2026-04-09T12:09:13.17Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/ae/24/7eecd76fe983a74fed1fc700a14882e70c4e857f1d562a9f2303d4286c12/uv-0.11.6-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:d2a1d2089afdf117ad19a4c1dd36b8189c00ae1ad4135d3bfbfced82342595cf", size = 25164324, upload-time = "2026-04-09T12:08:59.56Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/27/e0/bbd4ba7c2e5067bbba617d87d306ec146889edaeeaa2081d3e122178ca08/uv-0.11.6-py3-none-win32.whl", hash = "sha256:6e8344f38fa29f85dcfd3e62dc35a700d2448f8e90381077ef393438dcd5012e", size = 22865693, upload-time = "2026-04-09T12:09:21.415Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a5/33/1983ce113c538a856f2d620d16e39691962ecceef091a84086c5785e32e5/uv-0.11.6-py3-none-win_amd64.whl", hash = "sha256:a28bea69c1186303d1200f155c7a28c449f8a4431e458fcf89360cc7ef546e40", size = 25371258, upload-time = "2026-04-09T12:09:40.52Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/35/01/be0873f44b9c9bc250fcbf263367fcfc1f59feab996355bcb6b52fff080d/uv-0.11.6-py3-none-win_arm64.whl", hash = "sha256:a78f6d64b9950e24061bc7ec7f15ff8089ad7f5a976e7b65fcadce58fe02f613", size = 23869585, upload-time = "2026-04-09T12:09:29.425Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a3/3c/71be72f125f0035348b415468559cc3b335ec219376d17a3d242d2bd9b23/uv-0.9.30-py3-none-linux_armv6l.whl", hash = "sha256:a5467dddae1cd5f4e093f433c0f0d9a0df679b92696273485ec91bbb5a8620e6", size = 21927585, upload-time = "2026-02-04T21:46:14.935Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0f/fd/8070b5423a77d4058d14e48a970aa075762bbff4c812dda3bb3171543e44/uv-0.9.30-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6ec38ae29aa83a37c6e50331707eac8ecc90cf2b356d60ea6382a94de14973be", size = 21050392, upload-time = "2026-02-04T21:45:55.649Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/42/5f/3ccc9415ef62969ed01829572338ea7bdf4c5cf1ffb9edc1f8cb91b571f3/uv-0.9.30-py3-none-macosx_11_0_arm64.whl", hash = "sha256:777ecd117cf1d8d6bb07de8c9b7f6c5f3e802415b926cf059d3423699732eb8c", size = 19817085, upload-time = "2026-02-04T21:45:40.881Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/8b/3f/76b44e2a224f4c4a8816fc92686ef6d4c2656bc5fc9d4f673816162c994d/uv-0.9.30-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.musllinux_1_1_aarch64.whl", hash = "sha256:93049ba3c41fa2cc38b467cb78ef61b2ddedca34b6be924a5481d7750c8111c6", size = 21620537, upload-time = "2026-02-04T21:45:47.846Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/60/2a/50f7e8c6d532af8dd327f77bdc75ce4652322ac34f5e29f79a8e04ea3cc8/uv-0.9.30-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.musllinux_1_1_armv7l.whl", hash = "sha256:f295604fee71224ebe2685a0f1f4ff7a45c77211a60bd57133a4a02056d7c775", size = 21550855, upload-time = "2026-02-04T21:46:26.269Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/0e/10/f823d4af1125fae559194b356757dc7d4a8ac79d10d11db32c2d4c9e2f63/uv-0.9.30-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2faf84e1f3b6fc347a34c07f1291d11acf000b0dd537a61d541020f22b17ccd9", size = 21516576, upload-time = "2026-02-04T21:46:03.494Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/91/f3/64b02db11f38226ed34458c7fbdb6f16b6d4fd951de24c3e51acf02b30f8/uv-0.9.30-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0b3b3700ecf64a09a07fd04d10ec35f0973ec15595d38bbafaa0318252f7e31f", size = 22718097, upload-time = "2026-02-04T21:45:51.875Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/28/21/a48d1872260f04a68bb5177b0f62ddef62ab892d544ed1922f2d19fd2b00/uv-0.9.30-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b176fc2937937dd81820445cb7e7e2e3cd1009a003c512f55fa0ae10064c8a38", size = 24107844, upload-time = "2026-02-04T21:46:19.032Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/1c/c6/d7e5559bfe1ab7a215a7ad49c58c8a5701728f2473f7f436ef00b4664e88/uv-0.9.30-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:180e8070b8c438b9a3fb3fde8a37b365f85c3c06e17090f555dc68fdebd73333", size = 23685378, upload-time = "2026-02-04T21:46:07.166Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/a8/bf/b937bbd50d14c6286e353fd4c7bdc09b75f6b3a26bd4e2f3357e99891f28/uv-0.9.30-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4125a9aa2a751e1589728f6365cfe204d1be41499148ead44b6180b7df576f27", size = 22848471, upload-time = "2026-02-04T21:45:18.728Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/6a/57/12a67c569e69b71508ad669adad266221f0b1d374be88eaf60109f551354/uv-0.9.30-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4366dd740ac9ad3ec50a58868a955b032493bb7d7e6ed368289e6ced8bbc70f3", size = 22774258, upload-time = "2026-02-04T21:46:10.798Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/b8/a26cc64685dddb9fb13f14c3dc1b12009f800083405f854f84eb8c86b494/uv-0.9.30-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:33e50f208e01a0c20b3c5f87d453356a5cbcfd68f19e47a28b274cd45618881c", size = 21699573, upload-time = "2026-02-04T21:45:44.365Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/c8/59/995af0c5f0740f8acb30468e720269e720352df1d204e82c2d52d9a8c586/uv-0.9.30-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:5e7a6fa7a3549ce893cf91fe4b06629e3e594fc1dca0a6050aba2ea08722e964", size = 22460799, upload-time = "2026-02-04T21:45:26.658Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/bb/0b/6affe815ecbaebf38b35d6230fbed2f44708c67d5dd5720f81f2ec8f96ff/uv-0.9.30-py3-none-musllinux_1_1_i686.whl", hash = "sha256:62d7e408d41e392b55ffa4cf9b07f7bbd8b04e0929258a42e19716c221ac0590", size = 22001777, upload-time = "2026-02-04T21:45:34.656Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f3/b6/47a515171c891b0d29f8e90c8a1c0e233e4813c95a011799605cfe04c74c/uv-0.9.30-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:6dc65c24f5b9cdc78300fa6631368d3106e260bbffa66fb1e831a318374da2df", size = 22968416, upload-time = "2026-02-04T21:45:22.863Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/3d/3a/c1df8615385138bb7c43342586431ca32b77466c5fb086ac0ed14ab6ca28/uv-0.9.30-py3-none-win32.whl", hash = "sha256:74e94c65d578657db94a753d41763d0364e5468ec0d368fb9ac8ddab0fb6e21f", size = 20889232, upload-time = "2026-02-04T21:46:22.617Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/f2/a8/e8761c8414a880d70223723946576069e042765475f73b4436d78b865dba/uv-0.9.30-py3-none-win_amd64.whl", hash = "sha256:88a2190810684830a1ba4bb1cf8fb06b0308988a1589559404259d295260891c", size = 23432208, upload-time = "2026-02-04T21:45:30.85Z" },
|
||||
{ url = "https://files.pythonhosted.org/packages/49/e8/6f2ebab941ec559f97110bbbae1279cd0333d6bc352b55f6fa3fefb020d9/uv-0.9.30-py3-none-win_arm64.whl", hash = "sha256:7fde83a5b5ea027315223c33c30a1ab2f2186910b933d091a1b7652da879e230", size = 21887273, upload-time = "2026-02-04T21:45:59.787Z" },
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
Reference in New Issue
Block a user