Compare commits

...

10 Commits

Author SHA1 Message Date
lorenzejay
b8eb7dd294 fix import 2025-12-21 22:38:24 -08:00
lorenzejay
f39379ddd5 refactor: replace AgentExecutionErrorEvent with TaskFailedEvent for LLM call handling
- Updated Agent class to emit TaskFailedEvent instead of AgentExecutionErrorEvent when LLM calls are blocked.
- Removed unnecessary LLMCallBlockedError handling from CrewAgentExecutor.
- Enhanced test cases to ensure proper exception handling for blocked LLM calls.
- Improved code clarity and consistency in event handling across agent execution.
2025-12-21 22:05:11 -08:00
lorenzejay
05c42791c9 feat: implement LLMCallBlockedError handling in LLM and Agent classes
- Introduced LLMCallBlockedError to manage blocked LLM calls from before_llm_call hooks.
- Updated LLM class to raise LLMCallBlockedError instead of returning a boolean.
- Enhanced Agent class to emit events and handle LLMCallBlockedError during task execution.
- Added error handling in CrewAgentExecutor and agent utilities to gracefully manage blocked calls.
- Updated tests to verify behavior when LLM calls are blocked.
2025-12-21 21:48:26 -08:00
Heitor Carvalho
be70a04153 fix: correct error fetching for workos login polling (#4124)
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Build uv cache / build-cache (3.10) (push) Has been cancelled
Build uv cache / build-cache (3.11) (push) Has been cancelled
Build uv cache / build-cache (3.12) (push) Has been cancelled
Build uv cache / build-cache (3.13) (push) Has been cancelled
2025-12-19 20:00:26 -03:00
Greyson LaLonde
0c359f4df8 feat: bump versions to 1.7.2
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
2025-12-19 15:47:00 -05:00
Lucas Gomide
fe288dbe73 Resolving some connection issues (#4129)
* fix: use CREWAI_PLUS_URL env var in precedence over PlusAPI configured value

* feat: bypass TLS certificate verification when calling platform

* test: fix test
2025-12-19 10:15:20 -05:00
Heitor Carvalho
dc63bc2319 chore: remove CREWAI_BASE_URL and fetch url from settings instead
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2025-12-18 15:41:38 -03:00
Greyson LaLonde
8d0effafec chore: add commitizen pre-commit hook
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2025-12-17 15:49:24 -05:00
Greyson LaLonde
1cdbe79b34 chore: add deployment action, trigger for releases 2025-12-17 08:40:14 -05:00
Lorenze Jay
84328d9311 fixed api-reference/status docs page (#4109)
Some checks failed
CodeQL Advanced / Analyze (actions) (push) Has been cancelled
CodeQL Advanced / Analyze (python) (push) Has been cancelled
Notify Downstream / notify-downstream (push) Has been cancelled
Check Documentation Broken Links / Check broken links (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2025-12-16 15:31:30 -08:00
43 changed files with 714 additions and 269 deletions

View File

@@ -1,9 +1,14 @@
name: Publish to PyPI
on:
release:
types: [ published ]
repository_dispatch:
types: [deployment-tests-passed]
workflow_dispatch:
inputs:
release_tag:
description: 'Release tag to publish'
required: false
type: string
jobs:
build:
@@ -12,7 +17,21 @@ jobs:
permissions:
contents: read
steps:
- name: Determine release tag
id: release
run: |
# Priority: workflow_dispatch input > repository_dispatch payload > default branch
if [ -n "${{ inputs.release_tag }}" ]; then
echo "tag=${{ inputs.release_tag }}" >> $GITHUB_OUTPUT
elif [ -n "${{ github.event.client_payload.release_tag }}" ]; then
echo "tag=${{ github.event.client_payload.release_tag }}" >> $GITHUB_OUTPUT
else
echo "tag=" >> $GITHUB_OUTPUT
fi
- uses: actions/checkout@v4
with:
ref: ${{ steps.release.outputs.tag || github.ref }}
- name: Set up Python
uses: actions/setup-python@v5

View File

@@ -0,0 +1,18 @@
name: Trigger Deployment Tests
on:
release:
types: [published]
jobs:
trigger:
name: Trigger deployment tests
runs-on: ubuntu-latest
steps:
- name: Trigger deployment tests
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.CREWAI_DEPLOYMENTS_PAT }}
repository: ${{ secrets.CREWAI_DEPLOYMENTS_REPOSITORY }}
event-type: crewai-release
client-payload: '{"release_tag": "${{ github.event.release.tag_name }}", "release_name": "${{ github.event.release.name }}"}'

View File

@@ -24,4 +24,10 @@ repos:
rev: 0.9.3
hooks:
- id: uv-lock
- repo: https://github.com/commitizen-tools/commitizen
rev: v4.10.1
hooks:
- id: commitizen
- id: commitizen-branch
stages: [ pre-push ]

View File

@@ -16,16 +16,17 @@ Welcome to the CrewAI AOP API reference. This API allows you to programmatically
Navigate to your crew's detail page in the CrewAI AOP dashboard and copy your Bearer Token from the Status tab.
</Step>
<Step title="Discover Required Inputs">
Use the `GET /inputs` endpoint to see what parameters your crew expects.
</Step>
<Step title="Discover Required Inputs">
Use the `GET /inputs` endpoint to see what parameters your crew expects.
</Step>
<Step title="Start a Crew Execution">
Call `POST /kickoff` with your inputs to start the crew execution and receive a `kickoff_id`.
</Step>
<Step title="Start a Crew Execution">
Call `POST /kickoff` with your inputs to start the crew execution and receive
a `kickoff_id`.
</Step>
<Step title="Monitor Progress">
Use `GET /status/{kickoff_id}` to check execution status and retrieve results.
Use `GET /{kickoff_id}/status` to check execution status and retrieve results.
</Step>
</Steps>
@@ -40,13 +41,14 @@ curl -H "Authorization: Bearer YOUR_CREW_TOKEN" \
### Token Types
| Token Type | Scope | Use Case |
|:-----------|:--------|:----------|
| **Bearer Token** | Organization-level access | Full crew operations, ideal for server-to-server integration |
| **User Bearer Token** | User-scoped access | Limited permissions, suitable for user-specific operations |
| Token Type | Scope | Use Case |
| :-------------------- | :------------------------ | :----------------------------------------------------------- |
| **Bearer Token** | Organization-level access | Full crew operations, ideal for server-to-server integration |
| **User Bearer Token** | User-scoped access | Limited permissions, suitable for user-specific operations |
<Tip>
You can find both token types in the Status tab of your crew's detail page in the CrewAI AOP dashboard.
You can find both token types in the Status tab of your crew's detail page in
the CrewAI AOP dashboard.
</Tip>
## Base URL
@@ -63,29 +65,33 @@ Replace `your-crew-name` with your actual crew's URL from the dashboard.
1. **Discovery**: Call `GET /inputs` to understand what your crew needs
2. **Execution**: Submit inputs via `POST /kickoff` to start processing
3. **Monitoring**: Poll `GET /status/{kickoff_id}` until completion
3. **Monitoring**: Poll `GET /{kickoff_id}/status` until completion
4. **Results**: Extract the final output from the completed response
## Error Handling
The API uses standard HTTP status codes:
| Code | Meaning |
|------|:--------|
| `200` | Success |
| `400` | Bad Request - Invalid input format |
| `401` | Unauthorized - Invalid bearer token |
| `404` | Not Found - Resource doesn't exist |
| Code | Meaning |
| ----- | :----------------------------------------- |
| `200` | Success |
| `400` | Bad Request - Invalid input format |
| `401` | Unauthorized - Invalid bearer token |
| `404` | Not Found - Resource doesn't exist |
| `422` | Validation Error - Missing required inputs |
| `500` | Server Error - Contact support |
| `500` | Server Error - Contact support |
## Interactive Testing
<Info>
**Why no "Send" button?** Since each CrewAI AOP user has their own unique crew URL, we use **reference mode** instead of an interactive playground to avoid confusion. This shows you exactly what the requests should look like without non-functional send buttons.
**Why no "Send" button?** Since each CrewAI AOP user has their own unique crew
URL, we use **reference mode** instead of an interactive playground to avoid
confusion. This shows you exactly what the requests should look like without
non-functional send buttons.
</Info>
Each endpoint page shows you:
- ✅ **Exact request format** with all parameters
- ✅ **Response examples** for success and error cases
- ✅ **Code samples** in multiple languages (cURL, Python, JavaScript, etc.)
@@ -103,6 +109,7 @@ Each endpoint page shows you:
</CardGroup>
**Example workflow:**
1. **Copy this cURL example** from any endpoint page
2. **Replace `your-actual-crew-name.crewai.com`** with your real crew URL
3. **Replace the Bearer token** with your real token from the dashboard
@@ -111,10 +118,18 @@ Each endpoint page shows you:
## Need Help?
<CardGroup cols={2}>
<Card title="Enterprise Support" icon="headset" href="mailto:support@crewai.com">
<Card
title="Enterprise Support"
icon="headset"
href="mailto:support@crewai.com"
>
Get help with API integration and troubleshooting
</Card>
<Card title="Enterprise Dashboard" icon="chart-line" href="https://app.crewai.com">
<Card
title="Enterprise Dashboard"
icon="chart-line"
href="https://app.crewai.com"
>
Manage your crews and view execution logs
</Card>
</CardGroup>

View File

@@ -1,8 +1,6 @@
---
title: "GET /status/{kickoff_id}"
title: "GET /{kickoff_id}/status"
description: "Get execution status"
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
openapi: "/enterprise-api.en.yaml GET /{kickoff_id}/status"
mode: "wide"
---

View File

@@ -35,7 +35,7 @@ info:
1. **Discover inputs** using `GET /inputs`
2. **Start execution** using `POST /kickoff`
3. **Monitor progress** using `GET /status/{kickoff_id}`
3. **Monitor progress** using `GET /{kickoff_id}/status`
version: 1.0.0
contact:
name: CrewAI Support
@@ -63,7 +63,7 @@ paths:
Use this endpoint to discover what inputs you need to provide when starting a crew execution.
operationId: getRequiredInputs
responses:
'200':
"200":
description: Successfully retrieved required inputs
content:
application/json:
@@ -84,13 +84,21 @@ paths:
outreach_crew:
summary: Outreach crew inputs
value:
inputs: ["name", "title", "company", "industry", "our_product", "linkedin_url"]
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
$ref: '#/components/responses/NotFoundError'
'500':
$ref: '#/components/responses/ServerError'
inputs:
[
"name",
"title",
"company",
"industry",
"our_product",
"linkedin_url",
]
"401":
$ref: "#/components/responses/UnauthorizedError"
"404":
$ref: "#/components/responses/NotFoundError"
"500":
$ref: "#/components/responses/ServerError"
/kickoff:
post:
@@ -170,7 +178,7 @@ paths:
taskWebhookUrl: "https://api.example.com/webhooks/task"
crewWebhookUrl: "https://api.example.com/webhooks/crew"
responses:
'200':
"200":
description: Crew execution started successfully
content:
application/json:
@@ -182,24 +190,24 @@ paths:
format: uuid
description: Unique identifier for tracking this execution
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
'400':
"400":
description: Invalid request body or missing required inputs
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
'401':
$ref: '#/components/responses/UnauthorizedError'
'422':
$ref: "#/components/schemas/Error"
"401":
$ref: "#/components/responses/UnauthorizedError"
"422":
description: Validation error - ensure all required inputs are provided
content:
application/json:
schema:
$ref: '#/components/schemas/ValidationError'
'500':
$ref: '#/components/responses/ServerError'
$ref: "#/components/schemas/ValidationError"
"500":
$ref: "#/components/responses/ServerError"
/status/{kickoff_id}:
/{kickoff_id}/status:
get:
summary: Get Execution Status
description: |
@@ -222,15 +230,15 @@ paths:
format: uuid
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
responses:
'200':
"200":
description: Successfully retrieved execution status
content:
application/json:
schema:
oneOf:
- $ref: '#/components/schemas/ExecutionRunning'
- $ref: '#/components/schemas/ExecutionCompleted'
- $ref: '#/components/schemas/ExecutionError'
- $ref: "#/components/schemas/ExecutionRunning"
- $ref: "#/components/schemas/ExecutionCompleted"
- $ref: "#/components/schemas/ExecutionError"
examples:
running:
summary: Execution in progress
@@ -262,19 +270,19 @@ paths:
status: "error"
error: "Task execution failed: Invalid API key for external service"
execution_time: 23.1
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
"401":
$ref: "#/components/responses/UnauthorizedError"
"404":
description: Kickoff ID not found
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Execution not found"
message: "No execution found with ID: abcd1234-5678-90ef-ghij-klmnopqrstuv"
'500':
$ref: '#/components/responses/ServerError'
"500":
$ref: "#/components/responses/ServerError"
/resume:
post:
@@ -354,7 +362,7 @@ paths:
taskWebhookUrl: "https://api.example.com/webhooks/task"
crewWebhookUrl: "https://api.example.com/webhooks/crew"
responses:
'200':
"200":
description: Execution resumed successfully
content:
application/json:
@@ -381,28 +389,28 @@ paths:
value:
status: "retrying"
message: "Task will be retried with your feedback"
'400':
"400":
description: Invalid request body or execution not in pending state
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Invalid Request"
message: "Execution is not in pending human input state"
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
"401":
$ref: "#/components/responses/UnauthorizedError"
"404":
description: Execution ID or Task ID not found
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Not Found"
message: "Execution ID not found"
'500':
$ref: '#/components/responses/ServerError'
"500":
$ref: "#/components/responses/ServerError"
components:
securitySchemes:
@@ -458,7 +466,7 @@ components:
tasks:
type: array
items:
$ref: '#/components/schemas/TaskResult'
$ref: "#/components/schemas/TaskResult"
execution_time:
type: number
description: Total execution time in seconds
@@ -536,7 +544,7 @@ components:
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Unauthorized"
message: "Invalid or missing bearer token"
@@ -546,7 +554,7 @@ components:
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Not Found"
message: "The requested resource was not found"
@@ -556,7 +564,7 @@ components:
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Internal Server Error"
message: "An unexpected error occurred"

View File

@@ -35,7 +35,7 @@ info:
1. **Discover inputs** using `GET /inputs`
2. **Start execution** using `POST /kickoff`
3. **Monitor progress** using `GET /status/{kickoff_id}`
3. **Monitor progress** using `GET /{kickoff_id}/status`
version: 1.0.0
contact:
name: CrewAI Support
@@ -63,7 +63,7 @@ paths:
Use this endpoint to discover what inputs you need to provide when starting a crew execution.
operationId: getRequiredInputs
responses:
'200':
"200":
description: Successfully retrieved required inputs
content:
application/json:
@@ -84,13 +84,21 @@ paths:
outreach_crew:
summary: Outreach crew inputs
value:
inputs: ["name", "title", "company", "industry", "our_product", "linkedin_url"]
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
$ref: '#/components/responses/NotFoundError'
'500':
$ref: '#/components/responses/ServerError'
inputs:
[
"name",
"title",
"company",
"industry",
"our_product",
"linkedin_url",
]
"401":
$ref: "#/components/responses/UnauthorizedError"
"404":
$ref: "#/components/responses/NotFoundError"
"500":
$ref: "#/components/responses/ServerError"
/kickoff:
post:
@@ -170,7 +178,7 @@ paths:
taskWebhookUrl: "https://api.example.com/webhooks/task"
crewWebhookUrl: "https://api.example.com/webhooks/crew"
responses:
'200':
"200":
description: Crew execution started successfully
content:
application/json:
@@ -182,24 +190,24 @@ paths:
format: uuid
description: Unique identifier for tracking this execution
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
'400':
"400":
description: Invalid request body or missing required inputs
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
'401':
$ref: '#/components/responses/UnauthorizedError'
'422':
$ref: "#/components/schemas/Error"
"401":
$ref: "#/components/responses/UnauthorizedError"
"422":
description: Validation error - ensure all required inputs are provided
content:
application/json:
schema:
$ref: '#/components/schemas/ValidationError'
'500':
$ref: '#/components/responses/ServerError'
$ref: "#/components/schemas/ValidationError"
"500":
$ref: "#/components/responses/ServerError"
/status/{kickoff_id}:
/{kickoff_id}/status:
get:
summary: Get Execution Status
description: |
@@ -222,15 +230,15 @@ paths:
format: uuid
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
responses:
'200':
"200":
description: Successfully retrieved execution status
content:
application/json:
schema:
oneOf:
- $ref: '#/components/schemas/ExecutionRunning'
- $ref: '#/components/schemas/ExecutionCompleted'
- $ref: '#/components/schemas/ExecutionError'
- $ref: "#/components/schemas/ExecutionRunning"
- $ref: "#/components/schemas/ExecutionCompleted"
- $ref: "#/components/schemas/ExecutionError"
examples:
running:
summary: Execution in progress
@@ -262,19 +270,19 @@ paths:
status: "error"
error: "Task execution failed: Invalid API key for external service"
execution_time: 23.1
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
"401":
$ref: "#/components/responses/UnauthorizedError"
"404":
description: Kickoff ID not found
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Execution not found"
message: "No execution found with ID: abcd1234-5678-90ef-ghij-klmnopqrstuv"
'500':
$ref: '#/components/responses/ServerError'
"500":
$ref: "#/components/responses/ServerError"
/resume:
post:
@@ -354,7 +362,7 @@ paths:
taskWebhookUrl: "https://api.example.com/webhooks/task"
crewWebhookUrl: "https://api.example.com/webhooks/crew"
responses:
'200':
"200":
description: Execution resumed successfully
content:
application/json:
@@ -381,28 +389,28 @@ paths:
value:
status: "retrying"
message: "Task will be retried with your feedback"
'400':
"400":
description: Invalid request body or execution not in pending state
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Invalid Request"
message: "Execution is not in pending human input state"
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
"401":
$ref: "#/components/responses/UnauthorizedError"
"404":
description: Execution ID or Task ID not found
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Not Found"
message: "Execution ID not found"
'500':
$ref: '#/components/responses/ServerError'
"500":
$ref: "#/components/responses/ServerError"
components:
securitySchemes:
@@ -458,7 +466,7 @@ components:
tasks:
type: array
items:
$ref: '#/components/schemas/TaskResult'
$ref: "#/components/schemas/TaskResult"
execution_time:
type: number
description: Total execution time in seconds
@@ -536,7 +544,7 @@ components:
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Unauthorized"
message: "Invalid or missing bearer token"
@@ -546,7 +554,7 @@ components:
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Not Found"
message: "The requested resource was not found"
@@ -556,7 +564,7 @@ components:
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Internal Server Error"
message: "An unexpected error occurred"

View File

@@ -84,7 +84,7 @@ paths:
'500':
$ref: '#/components/responses/ServerError'
/status/{kickoff_id}:
/{kickoff_id}/status:
get:
summary: 실행 상태 조회
description: |

View File

@@ -35,7 +35,7 @@ info:
1. **Descubra os inputs** usando `GET /inputs`
2. **Inicie a execução** usando `POST /kickoff`
3. **Monitore o progresso** usando `GET /status/{kickoff_id}`
3. **Monitore o progresso** usando `GET /{kickoff_id}/status`
version: 1.0.0
contact:
name: CrewAI Suporte
@@ -56,7 +56,7 @@ paths:
Retorna a lista de parâmetros de entrada que sua crew espera.
operationId: getRequiredInputs
responses:
'200':
"200":
description: Inputs requeridos obtidos com sucesso
content:
application/json:
@@ -69,12 +69,12 @@ paths:
type: string
description: Nomes dos parâmetros de entrada
example: ["budget", "interests", "duration", "age"]
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
$ref: '#/components/responses/NotFoundError'
'500':
$ref: '#/components/responses/ServerError'
"401":
$ref: "#/components/responses/UnauthorizedError"
"404":
$ref: "#/components/responses/NotFoundError"
"500":
$ref: "#/components/responses/ServerError"
/kickoff:
post:
@@ -104,7 +104,7 @@ paths:
age: "35"
responses:
'200':
"200":
description: Execução iniciada com sucesso
content:
application/json:
@@ -115,12 +115,12 @@ paths:
type: string
format: uuid
example: "abcd1234-5678-90ef-ghij-klmnopqrstuv"
'401':
$ref: '#/components/responses/UnauthorizedError'
'500':
$ref: '#/components/responses/ServerError'
"401":
$ref: "#/components/responses/UnauthorizedError"
"500":
$ref: "#/components/responses/ServerError"
/status/{kickoff_id}:
/{kickoff_id}/status:
get:
summary: Obter Status da Execução
description: |
@@ -136,25 +136,25 @@ paths:
type: string
format: uuid
responses:
'200':
"200":
description: Status recuperado com sucesso
content:
application/json:
schema:
oneOf:
- $ref: '#/components/schemas/ExecutionRunning'
- $ref: '#/components/schemas/ExecutionCompleted'
- $ref: '#/components/schemas/ExecutionError'
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
- $ref: "#/components/schemas/ExecutionRunning"
- $ref: "#/components/schemas/ExecutionCompleted"
- $ref: "#/components/schemas/ExecutionError"
"401":
$ref: "#/components/responses/UnauthorizedError"
"404":
description: Kickoff ID não encontrado
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
'500':
$ref: '#/components/responses/ServerError'
$ref: "#/components/schemas/Error"
"500":
$ref: "#/components/responses/ServerError"
/resume:
post:
@@ -234,7 +234,7 @@ paths:
taskWebhookUrl: "https://api.example.com/webhooks/task"
crewWebhookUrl: "https://api.example.com/webhooks/crew"
responses:
'200':
"200":
description: Execution resumed successfully
content:
application/json:
@@ -261,28 +261,28 @@ paths:
value:
status: "retrying"
message: "Task will be retried with your feedback"
'400':
"400":
description: Invalid request body or execution not in pending state
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Invalid Request"
message: "Execution is not in pending human input state"
'401':
$ref: '#/components/responses/UnauthorizedError'
'404':
"401":
$ref: "#/components/responses/UnauthorizedError"
"404":
description: Execution ID or Task ID not found
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
example:
error: "Not Found"
message: "Execution ID not found"
'500':
$ref: '#/components/responses/ServerError'
"500":
$ref: "#/components/responses/ServerError"
components:
securitySchemes:
@@ -324,7 +324,7 @@ components:
tasks:
type: array
items:
$ref: '#/components/schemas/TaskResult'
$ref: "#/components/schemas/TaskResult"
execution_time:
type: number
@@ -380,16 +380,16 @@ components:
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
NotFoundError:
description: Recurso não encontrado
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"
ServerError:
description: Erro interno do servidor
content:
application/json:
schema:
$ref: '#/components/schemas/Error'
$ref: "#/components/schemas/Error"

View File

@@ -16,16 +16,17 @@ CrewAI 엔터프라이즈 API 참고 자료에 오신 것을 환영합니다.
CrewAI AOP 대시보드에서 자신의 crew 상세 페이지로 이동하여 Status 탭에서 Bearer Token을 복사하세요.
</Step>
<Step title="필수 입력값 확인하기">
`GET /inputs` 엔드포인트를 사용하여 crew가 기대하는 파라미터를 확인하세요.
</Step>
<Step title="필수 입력값 확인하기">
`GET /inputs` 엔드포인트를 사용하여 crew가 기대하는 파라미터를 확인하세요.
</Step>
<Step title="Crew 실행 시작하기">
입력값과 함께 `POST /kickoff`를 호출하여 crew 실행을 시작하고 `kickoff_id`를 받으세요.
</Step>
<Step title="Crew 실행 시작하기">
입력값과 함께 `POST /kickoff`를 호출하여 crew 실행을 시작하고 `kickoff_id`를
받으세요.
</Step>
<Step title="진행 상황 모니터링">
`GET /status/{kickoff_id}`를 사용하여 실행 상태를 확인하고 결과를 조회하세요.
`GET /{kickoff_id}/status`를 사용하여 실행 상태를 확인하고 결과를 조회하세요.
</Step>
</Steps>
@@ -40,13 +41,14 @@ curl -H "Authorization: Bearer YOUR_CREW_TOKEN" \
### 토큰 유형
| 토큰 유형 | 범위 | 사용 사례 |
|:-----------|:--------|:----------|
| **Bearer Token** | 조직 단위 접근 | 전체 crew 운영, 서버 간 통합에 이상적 |
| **User Bearer Token** | 사용자 범위 접근 | 제한된 권한, 사용자별 작업에 적합 |
| 토큰 유형 | 범위 | 사용 사례 |
| :-------------------- | :--------------- | :------------------------------------ |
| **Bearer Token** | 조직 단위 접근 | 전체 crew 운영, 서버 간 통합에 이상적 |
| **User Bearer Token** | 사용자 범위 접근 | 제한된 권한, 사용자별 작업에 적합 |
<Tip>
두 토큰 유형 모두 CrewAI AOP 대시보드의 crew 상세 페이지 Status 탭에서 확인할 수 있습니다.
두 토큰 유형 모두 CrewAI AOP 대시보드의 crew 상세 페이지 Status 탭에서 확인할
수 있습니다.
</Tip>
## 기본 URL
@@ -63,29 +65,33 @@ https://your-crew-name.crewai.com
1. **탐색**: `GET /inputs`를 호출하여 crew가 필요한 것을 파악합니다.
2. **실행**: `POST /kickoff`를 통해 입력값을 제출하여 처리를 시작합니다.
3. **모니터링**: 완료될 때까지 `GET /status/{kickoff_id}`를 주기적으로 조회합니다.
3. **모니터링**: 완료될 때까지 `GET /{kickoff_id}/status`를 주기적으로 조회합니다.
4. **결과**: 완료된 응답에서 최종 출력을 추출합니다.
## 오류 처리
API는 표준 HTTP 상태 코드를 사용합니다:
| 코드 | 의미 |
|------|:--------|
| `200` | 성공 |
| `400` | 잘못된 요청 - 잘못된 입력 형식 |
| `401` | 인증 실패 - 잘못된 베어러 토큰 |
| 코드 | 의미 |
| ----- | :------------------------------------ |
| `200` | 성공 |
| `400` | 잘못된 요청 - 잘못된 입력 형식 |
| `401` | 인증 실패 - 잘못된 베어러 토큰 |
| `404` | 찾을 수 없음 - 리소스가 존재하지 않음 |
| `422` | 유효성 검사 오류 - 필수 입력 누락 |
| `500` | 서버 오류 - 지원팀에 문의하십시오 |
| `422` | 유효성 검사 오류 - 필수 입력 누락 |
| `500` | 서버 오류 - 지원팀에 문의하십시오 |
## 인터랙티브 테스트
<Info>
**왜 "전송" 버튼이 없나요?** 각 CrewAI AOP 사용자는 고유한 crew URL을 가지므로, 혼동을 피하기 위해 인터랙티브 플레이그라운드 대신 **참조 모드**를 사용합니다. 이를 통해 비작동 전송 버튼 없이 요청이 어떻게 생겼는지 정확히 보여줍니다.
**왜 "전송" 버튼이 없나요?** 각 CrewAI AOP 사용자는 고유한 crew URL을
가지므로, 혼동을 피하기 위해 인터랙티브 플레이그라운드 대신 **참조 모드**를
사용합니다. 이를 통해 비작동 전송 버튼 없이 요청이 어떻게 생겼는지 정확히
보여줍니다.
</Info>
각 엔드포인트 페이지에서는 다음을 확인할 수 있습니다:
- ✅ 모든 파라미터가 포함된 **정확한 요청 형식**
- ✅ 성공 및 오류 사례에 대한 **응답 예시**
- ✅ 여러 언어(cURL, Python, JavaScript 등)로 제공되는 **코드 샘플**
@@ -103,6 +109,7 @@ API는 표준 HTTP 상태 코드를 사용합니다:
</CardGroup>
**예시 작업 흐름:**
1. **cURL 예제를 복사**합니다 (엔드포인트 페이지에서)
2. **`your-actual-crew-name.crewai.com`**을(를) 실제 crew URL로 교체합니다
3. **Bearer 토큰을** 대시보드에서 복사한 실제 토큰으로 교체합니다
@@ -111,10 +118,18 @@ API는 표준 HTTP 상태 코드를 사용합니다:
## 도움이 필요하신가요?
<CardGroup cols={2}>
<Card title="Enterprise Support" icon="headset" href="mailto:support@crewai.com">
<Card
title="Enterprise Support"
icon="headset"
href="mailto:support@crewai.com"
>
API 통합 및 문제 해결에 대한 지원을 받으세요
</Card>
<Card title="Enterprise Dashboard" icon="chart-line" href="https://app.crewai.com">
<Card
title="Enterprise Dashboard"
icon="chart-line"
href="https://app.crewai.com"
>
crew를 관리하고 실행 로그를 확인하세요
</Card>
</CardGroup>

View File

@@ -1,8 +1,6 @@
---
title: "GET /status/{kickoff_id}"
title: "GET /{kickoff_id}/status"
description: "실행 상태 조회"
openapi: "/enterprise-api.ko.yaml GET /status/{kickoff_id}"
openapi: "/enterprise-api.ko.yaml GET /{kickoff_id}/status"
mode: "wide"
---

View File

@@ -16,16 +16,17 @@ Bem-vindo à referência da API do CrewAI AOP. Esta API permite que você intera
Navegue até a página de detalhes do seu crew no painel do CrewAI AOP e copie seu Bearer Token na aba Status.
</Step>
<Step title="Descubra os Inputs Necessários">
Use o endpoint `GET /inputs` para ver quais parâmetros seu crew espera.
</Step>
<Step title="Descubra os Inputs Necessários">
Use o endpoint `GET /inputs` para ver quais parâmetros seu crew espera.
</Step>
<Step title="Inicie uma Execução de Crew">
Chame `POST /kickoff` com seus inputs para iniciar a execução do crew e receber um `kickoff_id`.
</Step>
<Step title="Inicie uma Execução de Crew">
Chame `POST /kickoff` com seus inputs para iniciar a execução do crew e
receber um `kickoff_id`.
</Step>
<Step title="Monitore o Progresso">
Use `GET /status/{kickoff_id}` para checar o status da execução e recuperar os resultados.
Use `GET /{kickoff_id}/status` para checar o status da execução e recuperar os resultados.
</Step>
</Steps>
@@ -40,13 +41,14 @@ curl -H "Authorization: Bearer YOUR_CREW_TOKEN" \
### Tipos de Token
| Tipo de Token | Escopo | Caso de Uso |
|:--------------------|:------------------------|:---------------------------------------------------------|
| **Bearer Token** | Acesso em nível de organização | Operações completas de crew, ideal para integração server-to-server |
| **User Bearer Token** | Acesso com escopo de usuário | Permissões limitadas, adequado para operações específicas de usuário |
| Tipo de Token | Escopo | Caso de Uso |
| :-------------------- | :----------------------------- | :------------------------------------------------------------------- |
| **Bearer Token** | Acesso em nível de organização | Operações completas de crew, ideal para integração server-to-server |
| **User Bearer Token** | Acesso com escopo de usuário | Permissões limitadas, adequado para operações específicas de usuário |
<Tip>
Você pode encontrar ambos os tipos de token na aba Status da página de detalhes do seu crew no painel do CrewAI AOP.
Você pode encontrar ambos os tipos de token na aba Status da página de
detalhes do seu crew no painel do CrewAI AOP.
</Tip>
## URL Base
@@ -63,29 +65,33 @@ Substitua `your-crew-name` pela URL real do seu crew no painel.
1. **Descoberta**: Chame `GET /inputs` para entender o que seu crew precisa
2. **Execução**: Envie os inputs via `POST /kickoff` para iniciar o processamento
3. **Monitoramento**: Faça polling em `GET /status/{kickoff_id}` até a conclusão
3. **Monitoramento**: Faça polling em `GET /{kickoff_id}/status` até a conclusão
4. **Resultados**: Extraia o output final da resposta concluída
## Tratamento de Erros
A API utiliza códigos de status HTTP padrão:
| Código | Significado |
|--------|:--------------------------------------|
| `200` | Sucesso |
| `400` | Requisição Inválida - Formato de input inválido |
| `401` | Não Autorizado - Bearer token inválido |
| `404` | Não Encontrado - Recurso não existe |
| Código | Significado |
| ------ | :----------------------------------------------- |
| `200` | Sucesso |
| `400` | Requisição Inválida - Formato de input inválido |
| `401` | Não Autorizado - Bearer token inválido |
| `404` | Não Encontrado - Recurso não existe |
| `422` | Erro de Validação - Inputs obrigatórios ausentes |
| `500` | Erro no Servidor - Contate o suporte |
| `500` | Erro no Servidor - Contate o suporte |
## Testes Interativos
<Info>
**Por que não há botão "Enviar"?** Como cada usuário do CrewAI AOP possui sua própria URL de crew, utilizamos o **modo referência** em vez de um playground interativo para evitar confusão. Isso mostra exatamente como as requisições devem ser feitas, sem botões de envio não funcionais.
**Por que não há botão "Enviar"?** Como cada usuário do CrewAI AOP possui sua
própria URL de crew, utilizamos o **modo referência** em vez de um playground
interativo para evitar confusão. Isso mostra exatamente como as requisições
devem ser feitas, sem botões de envio não funcionais.
</Info>
Cada página de endpoint mostra para você:
- ✅ **Formato exato da requisição** com todos os parâmetros
- ✅ **Exemplos de resposta** para casos de sucesso e erro
- ✅ **Exemplos de código** em várias linguagens (cURL, Python, JavaScript, etc.)
@@ -103,6 +109,7 @@ Cada página de endpoint mostra para você:
</CardGroup>
**Exemplo de fluxo:**
1. **Copie este exemplo cURL** de qualquer página de endpoint
2. **Substitua `your-actual-crew-name.crewai.com`** pela URL real do seu crew
3. **Substitua o Bearer token** pelo seu token real do painel
@@ -111,10 +118,18 @@ Cada página de endpoint mostra para você:
## Precisa de Ajuda?
<CardGroup cols={2}>
<Card title="Suporte Enterprise" icon="headset" href="mailto:support@crewai.com">
<Card
title="Suporte Enterprise"
icon="headset"
href="mailto:support@crewai.com"
>
Obtenha ajuda com integração da API e resolução de problemas
</Card>
<Card title="Painel Enterprise" icon="chart-line" href="https://app.crewai.com">
<Card
title="Painel Enterprise"
icon="chart-line"
href="https://app.crewai.com"
>
Gerencie seus crews e visualize logs de execução
</Card>
</CardGroup>

View File

@@ -1,8 +1,6 @@
---
title: "GET /status/{kickoff_id}"
title: "GET /{kickoff_id}/status"
description: "Obter o status da execução"
openapi: "/enterprise-api.pt-BR.yaml GET /status/{kickoff_id}"
openapi: "/enterprise-api.pt-BR.yaml GET /{kickoff_id}/status"
mode: "wide"
---

View File

@@ -12,7 +12,7 @@ dependencies = [
"pytube~=15.0.0",
"requests~=2.32.5",
"docker~=7.1.0",
"crewai==1.7.1",
"crewai==1.7.2",
"lancedb~=0.5.4",
"tiktoken~=0.8.0",
"beautifulsoup4~=4.13.4",

View File

@@ -291,4 +291,4 @@ __all__ = [
"ZapierActionTools",
]
__version__ = "1.7.1"
__version__ = "1.7.2"

View File

@@ -1,5 +1,5 @@
"""Crewai Enterprise Tools."""
import os
import json
import re
from typing import Any, Optional, Union, cast, get_origin
@@ -432,7 +432,11 @@ class CrewAIPlatformActionTool(BaseTool):
payload = cleaned_kwargs
response = requests.post(
url=api_url, headers=headers, json=payload, timeout=60
url=api_url,
headers=headers,
json=payload,
timeout=60,
verify=os.environ.get("CREWAI_FACTORY", "false").lower() != "true",
)
data = response.json()

View File

@@ -1,5 +1,5 @@
from typing import Any
import os
from crewai.tools import BaseTool
import requests
@@ -37,6 +37,7 @@ class CrewaiPlatformToolBuilder:
headers=headers,
timeout=30,
params={"apps": ",".join(self._apps)},
verify=os.environ.get("CREWAI_FACTORY", "false").lower() != "true",
)
response.raise_for_status()
except Exception:

View File

@@ -1,4 +1,6 @@
from typing import Union, get_args, get_origin
from unittest.mock import patch, Mock
import os
from crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool import (
CrewAIPlatformActionTool,
@@ -249,3 +251,109 @@ class TestSchemaProcessing:
result_type = tool._process_schema_type(test_schema, "TestFieldAllOfMixed")
assert result_type is str
class TestCrewAIPlatformActionToolVerify:
"""Test suite for SSL verification behavior based on CREWAI_FACTORY environment variable"""
def setup_method(self):
self.action_schema = {
"function": {
"name": "test_action",
"parameters": {
"properties": {
"test_param": {
"type": "string",
"description": "Test parameter"
}
},
"required": []
}
}
}
def create_test_tool(self):
return CrewAIPlatformActionTool(
description="Test action tool",
action_name="test_action",
action_schema=self.action_schema
)
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_with_ssl_verification_default(self, mock_post):
"""Test that _run uses SSL verification by default when CREWAI_FACTORY is not set"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "false"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_with_ssl_verification_factory_false(self, mock_post):
"""Test that _run uses SSL verification when CREWAI_FACTORY is 'false'"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "FALSE"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_with_ssl_verification_factory_false_uppercase(self, mock_post):
"""Test that _run uses SSL verification when CREWAI_FACTORY is 'FALSE' (case-insensitive)"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "true"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_without_ssl_verification_factory_true(self, mock_post):
"""Test that _run disables SSL verification when CREWAI_FACTORY is 'true'"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is False
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "TRUE"}, clear=True)
@patch("crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool.requests.post")
def test_run_without_ssl_verification_factory_true_uppercase(self, mock_post):
"""Test that _run disables SSL verification when CREWAI_FACTORY is 'TRUE' (case-insensitive)"""
mock_response = Mock()
mock_response.ok = True
mock_response.json.return_value = {"result": "success"}
mock_post.return_value = mock_response
tool = self.create_test_tool()
tool._run(test_param="test_value")
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["verify"] is False

View File

@@ -258,3 +258,98 @@ class TestCrewaiPlatformToolBuilder(unittest.TestCase):
assert "simple_string" in description_text
assert "nested_object" in description_text
assert "array_prop" in description_text
class TestCrewaiPlatformToolBuilderVerify(unittest.TestCase):
"""Test suite for SSL verification behavior in CrewaiPlatformToolBuilder"""
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_with_ssl_verification_default(self, mock_get):
"""Test that _fetch_actions uses SSL verification by default when CREWAI_FACTORY is not set"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "false"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_with_ssl_verification_factory_false(self, mock_get):
"""Test that _fetch_actions uses SSL verification when CREWAI_FACTORY is 'false'"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "FALSE"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_with_ssl_verification_factory_false_uppercase(self, mock_get):
"""Test that _fetch_actions uses SSL verification when CREWAI_FACTORY is 'FALSE' (case-insensitive)"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is True
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "true"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_without_ssl_verification_factory_true(self, mock_get):
"""Test that _fetch_actions disables SSL verification when CREWAI_FACTORY is 'true'"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is False
@patch.dict("os.environ", {"CREWAI_PLATFORM_INTEGRATION_TOKEN": "test_token", "CREWAI_FACTORY": "TRUE"}, clear=True)
@patch(
"crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder.requests.get"
)
def test_fetch_actions_without_ssl_verification_factory_true_uppercase(self, mock_get):
"""Test that _fetch_actions disables SSL verification when CREWAI_FACTORY is 'TRUE' (case-insensitive)"""
mock_response = Mock()
mock_response.raise_for_status.return_value = None
mock_response.json.return_value = {"actions": {}}
mock_get.return_value = mock_response
builder = CrewaiPlatformToolBuilder(apps=["github"])
builder._fetch_actions()
mock_get.assert_called_once()
call_args = mock_get.call_args
assert call_args.kwargs["verify"] is False

View File

@@ -49,7 +49,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = [
"crewai-tools==1.7.1",
"crewai-tools==1.7.2",
]
embeddings = [
"tiktoken~=0.8.0"

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "1.7.1"
__version__ = "1.7.2"
_telemetry_submitted = False

View File

@@ -44,6 +44,8 @@ from crewai.events.types.memory_events import (
MemoryRetrievalCompletedEvent,
MemoryRetrievalStartedEvent,
)
from crewai.events.types.task_events import TaskFailedEvent
from crewai.hooks import LLMCallBlockedError
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.lite_agent import LiteAgent
@@ -409,6 +411,15 @@ class Agent(BaseAgent):
),
)
raise e
if isinstance(e, LLMCallBlockedError):
crewai_event_bus.emit(
self,
event=TaskFailedEvent( # type: ignore[no-untyped-call]
task=task,
error=str(e),
),
)
raise e
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
crewai_event_bus.emit(
@@ -615,6 +626,15 @@ class Agent(BaseAgent):
),
)
raise e
if isinstance(e, LLMCallBlockedError):
crewai_event_bus.emit(
self,
event=TaskFailedEvent( # type: ignore[no-untyped-call]
task=task,
error=str(e),
),
)
raise e
self._times_executed += 1
if self._times_executed > self.max_retry_limit:
crewai_event_bus.emit(

View File

@@ -34,6 +34,7 @@ from crewai.utilities.agent_utils import (
get_llm_response,
handle_agent_action_core,
handle_context_length,
handle_llm_call_blocked_error,
handle_max_iterations_exceeded,
handle_output_parser_exception,
handle_unknown_error,
@@ -284,7 +285,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
log_error_after=self.log_error_after,
printer=self._printer,
)
except Exception as e:
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors

View File

@@ -149,7 +149,9 @@ class AuthenticationCommand:
return
if token_data["error"] not in ("authorization_pending", "slow_down"):
raise requests.HTTPError(token_data["error_description"])
raise requests.HTTPError(
token_data.get("error_description") or token_data.get("error")
)
time.sleep(device_code_data["interval"])
attempts += 1

View File

@@ -1,6 +1,6 @@
from typing import Any
from urllib.parse import urljoin
import os
import requests
from crewai.cli.config import Settings
@@ -33,9 +33,7 @@ class PlusAPI:
if settings.org_uuid:
self.headers["X-Crewai-Organization-Id"] = settings.org_uuid
self.base_url = (
str(settings.enterprise_base_url) or DEFAULT_CREWAI_ENTERPRISE_URL
)
self.base_url = os.getenv("CREWAI_PLUS_URL") or str(settings.enterprise_base_url) or DEFAULT_CREWAI_ENTERPRISE_URL
def _make_request(
self, method: str, endpoint: str, **kwargs: Any

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.7.1"
"crewai[tools]==1.7.2"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.7.1"
"crewai[tools]==1.7.2"
]
[project.scripts]

View File

@@ -12,6 +12,7 @@ from rich.console import Console
from crewai.cli import git
from crewai.cli.command import BaseCommand, PlusAPIMixin
from crewai.cli.config import Settings
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
from crewai.cli.utils import (
build_env_with_tool_repository_credentials,
extract_available_exports,
@@ -131,10 +132,13 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
self._validate_response(publish_response)
published_handle = publish_response.json()["handle"]
settings = Settings()
base_url = settings.enterprise_base_url or DEFAULT_CREWAI_ENTERPRISE_URL
console.print(
f"Successfully published `{published_handle}` ({project_version}).\n\n"
+ "⚠️ Security checks are running in the background. Your tool will be available once these are complete.\n"
+ f"You can monitor the status or access your tool here:\nhttps://app.crewai.com/crewai_plus/tools/{published_handle}",
+ f"You can monitor the status or access your tool here:\n{base_url}/crewai_plus/tools/{published_handle}",
style="bold green",
)

View File

@@ -9,6 +9,8 @@ from rich.console import Console
from rich.panel import Panel
from crewai.cli.authentication.token import AuthError, get_auth_token
from crewai.cli.config import Settings
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
from crewai.cli.plus_api import PlusAPI
from crewai.cli.version import get_crewai_version
from crewai.events.listeners.tracing.types import TraceEvent
@@ -16,7 +18,6 @@ from crewai.events.listeners.tracing.utils import (
is_tracing_enabled_in_context,
should_auto_collect_first_time_traces,
)
from crewai.utilities.constants import CREWAI_BASE_URL
logger = getLogger(__name__)
@@ -326,10 +327,12 @@ class TraceBatchManager:
if response.status_code == 200:
access_code = response.json().get("access_code", None)
console = Console()
settings = Settings()
base_url = settings.enterprise_base_url or DEFAULT_CREWAI_ENTERPRISE_URL
return_link = (
f"{CREWAI_BASE_URL}/crewai_plus/trace_batches/{self.trace_batch_id}"
f"{base_url}/crewai_plus/trace_batches/{self.trace_batch_id}"
if not self.is_current_batch_ephemeral and access_code is None
else f"{CREWAI_BASE_URL}/crewai_plus/ephemeral_trace_batches/{self.trace_batch_id}?access_code={access_code}"
else f"{base_url}/crewai_plus/ephemeral_trace_batches/{self.trace_batch_id}?access_code={access_code}"
)
if self.is_current_batch_ephemeral:

View File

@@ -7,6 +7,7 @@ from crewai.hooks.decorators import (
before_tool_call,
)
from crewai.hooks.llm_hooks import (
LLMCallBlockedError,
LLMCallHookContext,
clear_after_llm_call_hooks,
clear_all_llm_call_hooks,
@@ -74,6 +75,8 @@ def clear_all_global_hooks() -> dict[str, tuple[int, int]]:
__all__ = [
# Exceptions
"LLMCallBlockedError",
# Context classes
"LLMCallHookContext",
"ToolCallHookContext",

View File

@@ -14,6 +14,14 @@ if TYPE_CHECKING:
from crewai.utilities.types import LLMMessage
class LLMCallBlockedError(Exception):
"""Raised when a before_llm_call hook blocks the LLM call.
This exception is intentionally NOT retried by the agent,
as it represents an intentional block by the hook.
"""
class LLMCallHookContext:
"""Context object passed to LLM call hooks.
@@ -131,6 +139,7 @@ class LLMCallHookContext:
... if response.lower() == "no":
... print("LLM call skipped by user")
"""
# from crewai.events.event_listener import event_listener
printer = Printer()
event_listener.formatter.pause_live_updates()

View File

@@ -1645,8 +1645,7 @@ class LLM(BaseLLM):
msg_role: Literal["assistant"] = "assistant"
message["role"] = msg_role
if not self._invoke_before_llm_call_hooks(messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(messages, from_agent)
# --- 5) Set up callbacks if provided
with suppress_warnings():

View File

@@ -591,7 +591,7 @@ class BaseLLM(ABC):
self,
messages: list[LLMMessage],
from_agent: Agent | None = None,
) -> bool:
) -> None:
"""Invoke before_llm_call hooks for direct LLM calls (no agent context).
This method should be called by native provider implementations before
@@ -601,20 +601,19 @@ class BaseLLM(ABC):
messages: The messages being sent to the LLM
from_agent: The agent making the call (None for direct calls)
Returns:
True if LLM call should proceed, False if blocked by hook
Raises:
LLMCallBlockedError: If any hook returns False to block the LLM call.
Example:
>>> # In a native provider's call() method:
>>> if from_agent is None and not self._invoke_before_llm_call_hooks(
... messages, from_agent
... ):
... raise ValueError("LLM call blocked by hook")
>>> if from_agent is None:
... self._invoke_before_llm_call_hooks(messages, from_agent)
"""
# Only invoke hooks for direct calls (no agent context)
if from_agent is not None:
return True
return
from crewai.hooks import LLMCallBlockedError
from crewai.hooks.llm_hooks import (
LLMCallHookContext,
get_before_llm_call_hooks,
@@ -623,7 +622,7 @@ class BaseLLM(ABC):
before_hooks = get_before_llm_call_hooks()
if not before_hooks:
return True
return
hook_context = LLMCallHookContext(
executor=None,
@@ -643,15 +642,17 @@ class BaseLLM(ABC):
content="LLM call blocked by before_llm_call hook",
color="yellow",
)
return False
raise LLMCallBlockedError(
"LLM call blocked by before_llm_call hook"
)
except LLMCallBlockedError:
raise
except Exception as e:
printer.print(
content=f"Error in before_llm_call hook: {e}",
color="yellow",
)
return True
def _invoke_after_llm_call_hooks(
self,
messages: list[LLMMessage],

View File

@@ -5,7 +5,6 @@ import logging
import os
from typing import TYPE_CHECKING, Any, Literal, cast
from anthropic.types import ThinkingBlock
from pydantic import BaseModel
from crewai.events.types.llm_events import LLMCallType
@@ -197,8 +196,7 @@ class AnthropicCompletion(BaseLLM):
messages
)
if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(formatted_messages, from_agent)
# Prepare completion parameters
completion_params = self._prepare_completion_params(

View File

@@ -302,8 +302,7 @@ class AzureCompletion(BaseLLM):
# Format messages for Azure
formatted_messages = self._format_messages_for_azure(messages)
if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(formatted_messages, from_agent)
# Prepare completion parameters
completion_params = self._prepare_completion_params(

View File

@@ -315,10 +315,9 @@ class BedrockCompletion(BaseLLM):
messages
)
if not self._invoke_before_llm_call_hooks(
self._invoke_before_llm_call_hooks(
cast(list[LLMMessage], formatted_messages), from_agent
):
raise ValueError("LLM call blocked by before_llm_call hook")
)
# Prepare request body
body: BedrockConverseRequestBody = {

View File

@@ -250,8 +250,7 @@ class GeminiCompletion(BaseLLM):
messages_for_hooks = self._convert_contents_to_dict(formatted_content)
if not self._invoke_before_llm_call_hooks(messages_for_hooks, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(messages_for_hooks, from_agent)
config = self._prepare_generation_config(
system_instruction, tools, response_model

View File

@@ -190,8 +190,7 @@ class OpenAICompletion(BaseLLM):
formatted_messages = self._format_messages(messages)
if not self._invoke_before_llm_call_hooks(formatted_messages, from_agent):
raise ValueError("LLM call blocked by before_llm_call hook")
self._invoke_before_llm_call_hooks(formatted_messages, from_agent)
completion_params = self._prepare_completion_params(
messages=formatted_messages, tools=tools

View File

@@ -16,6 +16,7 @@ from crewai.agents.parser import (
parse,
)
from crewai.cli.config import Settings
from crewai.hooks import LLMCallBlockedError
from crewai.llms.base_llm import BaseLLM
from crewai.tools import BaseTool as CrewAITool
from crewai.tools.base_tool import BaseTool
@@ -260,8 +261,7 @@ def get_llm_response(
"""
if executor_context is not None:
if not _setup_before_llm_call_hooks(executor_context, printer):
raise ValueError("LLM call blocked by before_llm_call hook")
_setup_before_llm_call_hooks(executor_context, printer) # Raises if blocked
messages = executor_context.messages
try:
@@ -314,8 +314,7 @@ async def aget_llm_response(
ValueError: If the response is None or empty.
"""
if executor_context is not None:
if not _setup_before_llm_call_hooks(executor_context, printer):
raise ValueError("LLM call blocked by before_llm_call hook")
_setup_before_llm_call_hooks(executor_context, printer) # Raises if blocked
messages = executor_context.messages
try:
@@ -461,6 +460,18 @@ def handle_output_parser_exception(
return formatted_answer
def handle_llm_call_blocked_error(
e: LLMCallBlockedError,
messages: list[LLMMessage],
) -> AgentFinish:
messages.append({"role": "user", "content": str(e)})
return AgentFinish(
thought="",
output=str(e),
text=str(e),
)
def is_context_length_exceeded(exception: Exception) -> bool:
"""Check if the exception is due to context length exceeding.
@@ -728,15 +739,15 @@ def load_agent_from_repository(from_repository: str) -> dict[str, Any]:
def _setup_before_llm_call_hooks(
executor_context: CrewAgentExecutor | LiteAgent | None, printer: Printer
) -> bool:
) -> None:
"""Setup and invoke before_llm_call hooks for the executor context.
Args:
executor_context: The executor context to setup the hooks for.
printer: Printer instance for error logging.
Returns:
True if LLM execution should proceed, False if blocked by a hook.
Raises:
LLMCallBlockedError: If any hook returns False to block the LLM call.
"""
if executor_context and executor_context.before_llm_call_hooks:
from crewai.hooks.llm_hooks import LLMCallHookContext
@@ -752,7 +763,11 @@ def _setup_before_llm_call_hooks(
content="LLM call blocked by before_llm_call hook",
color="yellow",
)
return False
raise LLMCallBlockedError(
"LLM call blocked by before_llm_call hook"
)
except LLMCallBlockedError:
raise
except Exception as e:
printer.print(
content=f"Error in before_llm_call hook: {e}",
@@ -773,8 +788,6 @@ def _setup_before_llm_call_hooks(
else:
executor_context.messages = []
return True
def _setup_after_llm_call_hooks(
executor_context: CrewAgentExecutor | LiteAgent | None,

View File

@@ -30,4 +30,3 @@ NOT_SPECIFIED: Final[
"allows us to distinguish between 'not passed at all' and 'explicitly passed None' or '[]'.",
]
] = _NotSpecified()
CREWAI_BASE_URL: Final[str] = "https://app.crewai.com"

View File

@@ -1,7 +1,7 @@
import os
import unittest
from unittest.mock import ANY, MagicMock, patch
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
from crewai.cli.plus_api import PlusAPI
@@ -35,7 +35,7 @@ class TestPlusAPI(unittest.TestCase):
):
mock_make_request.assert_called_once_with(
method,
f"{DEFAULT_CREWAI_ENTERPRISE_URL}{endpoint}",
f"{os.getenv('CREWAI_PLUS_URL')}{endpoint}",
headers={
"Authorization": ANY,
"Content-Type": ANY,
@@ -53,7 +53,7 @@ class TestPlusAPI(unittest.TestCase):
):
mock_settings = MagicMock()
mock_settings.org_uuid = self.org_uuid
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL')
mock_settings_class.return_value = mock_settings
# re-initialize Client
self.api = PlusAPI(self.api_key)
@@ -84,7 +84,7 @@ class TestPlusAPI(unittest.TestCase):
def test_get_agent_with_org_uuid(self, mock_make_request, mock_settings_class):
mock_settings = MagicMock()
mock_settings.org_uuid = self.org_uuid
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL')
mock_settings_class.return_value = mock_settings
# re-initialize Client
self.api = PlusAPI(self.api_key)
@@ -115,7 +115,7 @@ class TestPlusAPI(unittest.TestCase):
def test_get_tool_with_org_uuid(self, mock_make_request, mock_settings_class):
mock_settings = MagicMock()
mock_settings.org_uuid = self.org_uuid
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL')
mock_settings_class.return_value = mock_settings
# re-initialize Client
self.api = PlusAPI(self.api_key)
@@ -163,7 +163,7 @@ class TestPlusAPI(unittest.TestCase):
def test_publish_tool_with_org_uuid(self, mock_make_request, mock_settings_class):
mock_settings = MagicMock()
mock_settings.org_uuid = self.org_uuid
mock_settings.enterprise_base_url = DEFAULT_CREWAI_ENTERPRISE_URL
mock_settings.enterprise_base_url = os.getenv('CREWAI_PLUS_URL')
mock_settings_class.return_value = mock_settings
# re-initialize Client
self.api = PlusAPI(self.api_key)
@@ -320,6 +320,7 @@ class TestPlusAPI(unittest.TestCase):
)
@patch("crewai.cli.plus_api.Settings")
@patch.dict(os.environ, {"CREWAI_PLUS_URL": ""})
def test_custom_base_url(self, mock_settings_class):
mock_settings = MagicMock()
mock_settings.enterprise_base_url = "https://custom-url.com/api"
@@ -329,3 +330,11 @@ class TestPlusAPI(unittest.TestCase):
custom_api.base_url,
"https://custom-url.com/api",
)
@patch.dict(os.environ, {"CREWAI_PLUS_URL": "https://custom-url-from-env.com"})
def test_custom_base_url_from_env(self):
custom_api = PlusAPI("test_key")
self.assertEqual(
custom_api.base_url,
"https://custom-url-from-env.com",
)

View File

@@ -4,7 +4,12 @@ from __future__ import annotations
from unittest.mock import Mock
from crewai.hooks import clear_all_llm_call_hooks, unregister_after_llm_call_hook, unregister_before_llm_call_hook
from crewai.hooks import (
LLMCallBlockedError,
clear_all_llm_call_hooks,
unregister_after_llm_call_hook,
unregister_before_llm_call_hook,
)
import pytest
from crewai.hooks.llm_hooks import (
@@ -87,6 +92,86 @@ class TestLLMCallHookContext:
assert new_message in mock_executor.messages
assert len(mock_executor.messages) == 2
def test_before_hook_returning_false_gracefully_finishes(self) -> None:
"""Test that when before_llm_call hook returns False, agent gracefully finishes."""
from crewai import Agent, Crew, Task
hook_called = {"before": False}
def blocking_hook(context: LLMCallHookContext) -> bool:
"""Hook that blocks all LLM calls."""
hook_called["before"] = True
return False
register_before_llm_call_hook(blocking_hook)
try:
agent = Agent(
role="Test Agent",
goal="Answer questions",
backstory="You are a test agent",
verbose=True,
)
task = Task(
description="Say hello",
expected_output="A greeting",
agent=agent,
)
with pytest.raises(LLMCallBlockedError):
crew = Crew(agents=[agent], tasks=[task], verbose=True)
crew.kickoff()
finally:
unregister_before_llm_call_hook(blocking_hook)
def test_direct_llm_call_raises_blocked_error_when_hook_returns_false(self) -> None:
"""Test that direct LLM.call() raises LLMCallBlockedError when hook returns False."""
from crewai.hooks import LLMCallBlockedError
from crewai.llm import LLM
hook_called = {"before": False}
def blocking_hook(context: LLMCallHookContext) -> bool:
"""Hook that blocks all LLM calls."""
hook_called["before"] = True
return False
register_before_llm_call_hook(blocking_hook)
try:
llm = LLM(model="gpt-4o-mini")
with pytest.raises(LLMCallBlockedError) as exc_info:
llm.call([{"role": "user", "content": "Say hello"}])
assert hook_called["before"] is True, "Before hook should have been called"
assert "blocked" in str(exc_info.value).lower()
finally:
unregister_before_llm_call_hook(blocking_hook)
def test_raises_with_llm_call_blocked_exception(self) -> None:
"""Test that the LLM call raises an exception when the hook raises an exception."""
from crewai.hooks import LLMCallBlockedError
from crewai.llm import LLM
def blocking_hook(context: LLMCallHookContext) -> bool:
raise LLMCallBlockedError("llm call blocked")
register_before_llm_call_hook(blocking_hook)
try:
llm = LLM(model="gpt-4o-mini")
with pytest.raises(LLMCallBlockedError) as exc_info:
llm.call([{"role": "user", "content": "Say hello"}])
assert "blocked" in str(exc_info.value).lower()
finally:
unregister_before_llm_call_hook(blocking_hook)
class TestBeforeLLMCallHooks:
"""Test before_llm_call hook registration and execution."""

View File

@@ -1,3 +1,3 @@
"""CrewAI development tools."""
__version__ = "1.7.1"
__version__ = "1.7.2"