Compare commits

...

7 Commits

Author SHA1 Message Date
Devin AI
a5b9966832 fix: remove unused pytest import to resolve lint failure
Co-Authored-By: João <joao@crewai.com>
2025-08-21 22:41:51 +00:00
Devin AI
185704e366 feat: update CLI model lists with latest OpenAI, Anthropic, and Google models
- Add GPT-5, GPT-5 mini, GPT-5 nano, GPT-4.1, and o3-mini for OpenAI
- Add Claude 3.7 Sonnet, Claude 4 Sonnet, and Claude 4.1 Opus for Anthropic
- Add Gemini 2.5 Pro, 2.5 Flash, and 2.5 Flash-Lite for Google
- Add comprehensive tests for updated model constants

Fixes #3380

Co-Authored-By: João <joao@crewai.com>
2025-08-21 22:37:50 +00:00
Greyson LaLonde
842bed4e9c feat: chromadb generic client (#3374)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
Add ChromaDB client implementation with async support

- Implement core collection operations (create, get_or_create, delete)
- Add search functionality with cosine similarity scoring
- Include both sync and async method variants
- Add type safety with NamedTuples and TypeGuards
- Extract utility functions to separate modules
- Default to cosine distance metric for text similarity
- Add comprehensive test coverage

TODO:
- l2, ip score calculations are not settled on
2025-08-21 18:18:46 -04:00
Lucas Gomide
1217935b31 feat: add docs about Automation triggers (#3375)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
Mark stale issues and pull requests / stale (push) Has been cancelled
2025-08-20 22:02:47 -04:00
Greyson LaLonde
641c156c17 fix: address flaky tests (#3363)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
fix: resolve flaky tests and race conditions in test suite

- Fix telemetry/event tests by patching class methods instead of instances
- Use unique temp files/directories to prevent CI race conditions
- Reset singleton state between tests
- Mock embedchain.Client.setup() to prevent JSON corruption
- Rename test files to test_*.py convention
- Move agent tests to tests/agents directory
- Fix repeated tool usage detection
- Remove database-dependent tools causing initialization errors
2025-08-20 13:34:09 -04:00
Tony Kipkemboi
7fdf9f9290 docs: fix API Reference OpenAPI sources and redirects (#3368)
Some checks failed
Notify Downstream / notify-downstream (push) Has been cancelled
* docs: fix API Reference OpenAPI sources and redirects; clarify training data usage; add Mermaid diagram; correct CLI usage and notes

* docs(mintlify): use explicit openapi {source, directory} with absolute paths to fix branch deployment routing

* docs(mintlify): add explicit endpoint MDX pages and include in nav; keep OpenAPI auto-gen as fallback

* docs(mintlify): remove OpenAPI Endpoints groups; add localized MDX endpoint pages for pt-BR and ko
2025-08-20 11:55:35 -04:00
Greyson LaLonde
c0d2bf4c12 fix: flow listener resumability for HITL and cyclic flows (#3322)
* fix: flow listener resumability for HITL and cyclic flows

- Add resumption context flag to distinguish HITL resumption from cyclic execution
- Skip method re-execution only during HITL resumption, not for cyclic flows
- Ensure cyclic flows like test_cyclic_flow continue to work correctly

* fix: prevent duplicate execution of conditional start methods in flows

* fix: resolve type error in flow.py line 1040 assignment
2025-08-20 10:06:18 -04:00
63 changed files with 3045 additions and 563 deletions

View File

@@ -320,6 +320,7 @@
"en/enterprise/guides/update-crew",
"en/enterprise/guides/enable-crew-studio",
"en/enterprise/guides/azure-openai-setup",
"en/enterprise/guides/automation-triggers",
"en/enterprise/guides/hubspot-trigger",
"en/enterprise/guides/react-component-export",
"en/enterprise/guides/salesforce-trigger",
@@ -341,11 +342,12 @@
"groups": [
{
"group": "Getting Started",
"pages": ["en/api-reference/introduction"]
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.en.yaml"
"pages": [
"en/api-reference/introduction",
"en/api-reference/inputs",
"en/api-reference/kickoff",
"en/api-reference/status"
]
}
]
},
@@ -657,6 +659,7 @@
"pt-BR/enterprise/guides/update-crew",
"pt-BR/enterprise/guides/enable-crew-studio",
"pt-BR/enterprise/guides/azure-openai-setup",
"pt-BR/enterprise/guides/automation-triggers",
"pt-BR/enterprise/guides/hubspot-trigger",
"pt-BR/enterprise/guides/react-component-export",
"pt-BR/enterprise/guides/salesforce-trigger",
@@ -680,11 +683,12 @@
"groups": [
{
"group": "Começando",
"pages": ["pt-BR/api-reference/introduction"]
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.pt-BR.yaml"
"pages": [
"pt-BR/api-reference/introduction",
"pt-BR/api-reference/inputs",
"pt-BR/api-reference/kickoff",
"pt-BR/api-reference/status"
]
}
]
},
@@ -1005,6 +1009,7 @@
"ko/enterprise/guides/update-crew",
"ko/enterprise/guides/enable-crew-studio",
"ko/enterprise/guides/azure-openai-setup",
"ko/enterprise/guides/automation-triggers",
"ko/enterprise/guides/hubspot-trigger",
"ko/enterprise/guides/react-component-export",
"ko/enterprise/guides/salesforce-trigger",
@@ -1026,11 +1031,12 @@
"groups": [
{
"group": "시작 안내",
"pages": ["ko/api-reference/introduction"]
},
{
"group": "Endpoints",
"openapi": "https://raw.githubusercontent.com/crewAIInc/crewAI/main/docs/enterprise-api.ko.yaml"
"pages": [
"ko/api-reference/introduction",
"ko/api-reference/inputs",
"ko/api-reference/kickoff",
"ko/api-reference/status"
]
}
]
},
@@ -1081,6 +1087,10 @@
"indexing": "all"
},
"redirects": [
{
"source": "/api-reference",
"destination": "/en/api-reference/introduction"
},
{
"source": "/introduction",
"destination": "/en/introduction"
@@ -1133,6 +1143,18 @@
"source": "/api-reference/:path*",
"destination": "/en/api-reference/:path*"
},
{
"source": "/en/api-reference",
"destination": "/en/api-reference/introduction"
},
{
"source": "/pt-BR/api-reference",
"destination": "/pt-BR/api-reference/introduction"
},
{
"source": "/ko/api-reference",
"destination": "/ko/api-reference/introduction"
},
{
"source": "/examples/:path*",
"destination": "/en/examples/:path*"

View File

@@ -0,0 +1,7 @@
---
title: "GET /inputs"
description: "Get required inputs for your crew"
openapi: "/enterprise-api.en.yaml GET /inputs"
---

View File

@@ -0,0 +1,7 @@
---
title: "POST /kickoff"
description: "Start a crew execution"
openapi: "/enterprise-api.en.yaml POST /kickoff"
---

View File

@@ -0,0 +1,7 @@
---
title: "GET /status/{kickoff_id}"
description: "Get execution status"
openapi: "/enterprise-api.en.yaml GET /status/{kickoff_id}"
---

View File

@@ -21,13 +21,17 @@ To use the training feature, follow these steps:
3. Run the following command:
```shell
crewai train -n <n_iterations> <filename> (optional)
crewai train -n <n_iterations> -f <filename.pkl>
```
<Tip>
Replace `<n_iterations>` with the desired number of training iterations and `<filename>` with the appropriate filename ending with `.pkl`.
</Tip>
### Training Your Crew Programmatically
<Note>
If you omit `-f`, the output defaults to `trained_agents_data.pkl` in the current working directory. You can pass an absolute path to control where the file is written.
</Note>
### Training your Crew programmatically
To train your crew programmatically, use the following steps:
@@ -51,19 +55,65 @@ except Exception as e:
raise Exception(f"An error occurred while training the crew: {e}")
```
### Key Points to Note
## How trained data is used by agents
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
CrewAI uses the training artifacts in two ways: during training to incorporate your human feedback, and after training to guide agents with consolidated suggestions.
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
### Training data flow
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
```mermaid
flowchart TD
A["Start training<br/>CLI: crewai train -n -f<br/>or Python: crew.train(...)"] --> B["Setup training mode<br/>- task.human_input = true<br/>- disable delegation<br/>- init training_data.pkl + trained file"]
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.
subgraph "Iterations"
direction LR
C["Iteration i<br/>initial_output"] --> D["User human_feedback"]
D --> E["improved_output"]
E --> F["Append to training_data.pkl<br/>by agent_id and iteration"]
end
Happy training with CrewAI! 🚀
B --> C
F --> G{"More iterations?"}
G -- "Yes" --> C
G -- "No" --> H["Evaluate per agent<br/>aggregate iterations"]
H --> I["Consolidate<br/>suggestions[] + quality + final_summary"]
I --> J["Save by agent role to trained file<br/>(default: trained_agents_data.pkl)"]
J --> K["Normal (non-training) runs"]
K --> L["Auto-load suggestions<br/>from trained_agents_data.pkl"]
L --> M["Append to prompt<br/>for consistent improvements"]
```
### During training runs
- On each iteration, the system records for every agent:
- `initial_output`: the agents first answer
- `human_feedback`: your inline feedback when prompted
- `improved_output`: the agents follow-up answer after feedback
- This data is stored in a working file named `training_data.pkl` keyed by the agents internal ID and iteration.
- While training is active, the agent automatically appends your prior human feedback to its prompt to enforce those instructions on subsequent attempts within the training session.
Training is interactive: tasks set `human_input = true`, so running in a non-interactive environment will block on user input.
### After training completes
- When `train(...)` finishes, CrewAI evaluates the collected training data per agent and produces a consolidated result containing:
- `suggestions`: clear, actionable instructions distilled from your feedback and the difference between initial/improved outputs
- `quality`: a 010 score capturing improvement
- `final_summary`: a step-by-step set of action items for future tasks
- These consolidated results are saved to the filename you pass to `train(...)` (default via CLI is `trained_agents_data.pkl`). Entries are keyed by the agents `role` so they can be applied across sessions.
- During normal (non-training) execution, each agent automatically loads its consolidated `suggestions` and appends them to the task prompt as mandatory instructions. This gives you consistent improvements without changing your agent definitions.
### File summary
- `training_data.pkl` (ephemeral, per-session):
- Structure: `agent_id -> { iteration_number: { initial_output, human_feedback, improved_output } }`
- Purpose: capture raw data and human feedback during training
- Location: saved in the current working directory (CWD)
- `trained_agents_data.pkl` (or your custom filename):
- Structure: `agent_role -> { suggestions: string[], quality: number, final_summary: string }`
- Purpose: persist consolidated guidance for future runs
- Location: written to the CWD by default; use `-f` to set a custom (including absolute) path
## Small Language Model Considerations
@@ -129,3 +179,18 @@ Happy training with CrewAI! 🚀
</Warning>
</Tab>
</Tabs>
### Key Points to Note
- **Positive Integer Requirement:** Ensure that the number of iterations (`n_iterations`) is a positive integer. The code will raise a `ValueError` if this condition is not met.
- **Filename Requirement:** Ensure that the filename ends with `.pkl`. The code will raise a `ValueError` if this condition is not met.
- **Error Handling:** The code handles subprocess errors and unexpected exceptions, providing error messages to the user.
- Trained guidance is applied at prompt time; it does not modify your Python/YAML agent configuration.
- Agents automatically load trained suggestions from a file named `trained_agents_data.pkl` located in the current working directory. If you trained to a different filename, either rename it to `trained_agents_data.pkl` before running, or adjust the loader in code.
- You can change the output filename when calling `crewai train` with `-f/--filename`. Absolute paths are supported if you want to save outside the CWD.
It is important to note that the training process may take some time, depending on the complexity of your agents and will also require your feedback on each iteration.
Once the training is complete, your agents will be equipped with enhanced capabilities and knowledge, ready to tackle complex tasks and provide more consistent and valuable insights.
Remember to regularly update and retrain your agents to ensure they stay up-to-date with the latest information and advancements in the field.

View File

@@ -0,0 +1,171 @@
---
title: "Automation Triggers"
description: "Automatically execute your CrewAI workflows when specific events occur in connected integrations"
icon: "bolt"
---
Automation triggers enable you to automatically run your CrewAI deployments when specific events occur in your connected integrations, creating powerful event-driven workflows that respond to real-time changes in your business systems.
## Overview
With automation triggers, you can:
- **Respond to real-time events** - Automatically execute workflows when specific conditions are met
- **Integrate with external systems** - Connect with platforms like Gmail, Outlook, OneDrive, JIRA, Slack, Stripe and more
- **Scale your automation** - Handle high-volume events without manual intervention
- **Maintain context** - Access trigger data within your crews and flows
## Managing Automation Triggers
### Viewing Available Triggers
To access and manage your automation triggers:
1. Navigate to your deployment in the CrewAI dashboard
2. Click on the **Triggers** tab to view all available trigger integrations
<Frame>
<img src="/images/enterprise/list-available-triggers.png" alt="List of available automation triggers" />
</Frame>
This view shows all the trigger integrations available for your deployment, along with their current connection status.
### Enabling and Disabling Triggers
Each trigger can be easily enabled or disabled using the toggle switch:
<Frame>
<img src="/images/enterprise/trigger-selected.png" alt="Enable or disable triggers with toggle" />
</Frame>
- **Enabled (blue toggle)**: The trigger is active and will automatically execute your deployment when the specified events occur
- **Disabled (gray toggle)**: The trigger is inactive and will not respond to events
Simply click the toggle to change the trigger state. Changes take effect immediately.
### Monitoring Trigger Executions
Track the performance and history of your triggered executions:
<Frame>
<img src="/images/enterprise/list-executions.png" alt="List of executions triggered by automation" />
</Frame>
## Building Automation
Before building your automation, it's helpful to understand the structure of trigger payloads that your crews and flows will receive.
### Payload Samples Repository
We maintain a comprehensive repository with sample payloads from various trigger sources to help you build and test your automations:
**🔗 [CrewAI Enterprise Trigger Payload Samples](https://github.com/crewAIInc/crewai-enterprise-trigger-payload-samples)**
This repository contains:
- **Real payload examples** from different trigger sources (Gmail, Google Drive, etc.)
- **Payload structure documentation** showing the format and available fields
### Triggers with Crew
Your existing crew definitions work seamlessly with triggers, you just need to have a task to parse the received payload:
```python
@CrewBase
class MyAutomatedCrew:
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
)
@task
def parse_trigger_payload(self) -> Task:
return Task(
config=self.tasks_config['parse_trigger_payload'],
agent=self.researcher(),
)
@task
def analyze_trigger_content(self) -> Task:
return Task(
config=self.tasks_config['analyze_trigger_data'],
agent=self.researcher(),
)
```
The crew will automatically receive and can access the trigger payload through the standard CrewAI context mechanisms.
### Integration with Flows
For flows, you have more control over how trigger data is handled:
#### Accessing Trigger Payload
All `@start()` methods in your flows will accept an additional parameter called `crewai_trigger_payload`:
```python
from crewai.flow import Flow, start, listen
class MyAutomatedFlow(Flow):
@start()
def handle_trigger(self, crewai_trigger_payload: dict = None):
"""
This start method can receive trigger data
"""
if crewai_trigger_payload:
# Process the trigger data
trigger_id = crewai_trigger_payload.get('id')
event_data = crewai_trigger_payload.get('payload', {})
# Store in flow state for use by other methods
self.state.trigger_id = trigger_id
self.state.trigger_type = event_data
return event_data
# Handle manual execution
return None
@listen(handle_trigger)
def process_data(self, trigger_data):
"""
Process the data from the trigger
"""
# ... process the trigger
```
#### Triggering Crews from Flows
When kicking off a crew within a flow that was triggered, pass the trigger payload as it:
```python
@start()
def delegate_to_crew(self, crewai_trigger_payload: dict = None):
"""
Delegate processing to a specialized crew
"""
crew = MySpecializedCrew()
# Pass the trigger payload to the crew
result = crew.crew().kickoff(
inputs={
'a_custom_parameter': "custom_value",
'crewai_trigger_payload': crewai_trigger_payload
},
)
return result
```
## Troubleshooting
**Trigger not firing:**
- Verify the trigger is enabled
- Check integration connection status
**Execution failures:**
- Check the execution logs for error details
- If you are developing, make sure the inputs include the `crewai_trigger_payload` parameter with the correct payload
Automation triggers transform your CrewAI deployments into responsive, event-driven systems that can seamlessly integrate with your existing business processes and tools.

Binary file not shown.

After

Width:  |  Height:  |  Size: 142 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 330 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

View File

@@ -0,0 +1,7 @@
---
title: "GET /inputs"
description: "크루가 필요로 하는 입력 확인"
openapi: "/enterprise-api.ko.yaml GET /inputs"
---

View File

@@ -0,0 +1,7 @@
---
title: "POST /kickoff"
description: "크루 실행 시작"
openapi: "/enterprise-api.ko.yaml POST /kickoff"
---

View File

@@ -0,0 +1,7 @@
---
title: "GET /status/{kickoff_id}"
description: "실행 상태 조회"
openapi: "/enterprise-api.ko.yaml GET /status/{kickoff_id}"
---

View File

@@ -0,0 +1,171 @@
---
title: "자동화 트리거"
description: "연결된 통합에서 특정 이벤트가 발생할 때 CrewAI 워크플로우를 자동으로 실행합니다"
icon: "bolt"
---
자동화 트리거를 사용하면 연결된 통합에서 특정 이벤트가 발생할 때 CrewAI 배포를 자동으로 실행할 수 있어, 비즈니스 시스템의 실시간 변화에 반응하는 강력한 이벤트 기반 워크플로우를 만들 수 있습니다.
## 개요
자동화 트리거를 사용하면 다음을 수행할 수 있습니다:
- **실시간 이벤트에 응답** - 특정 조건이 충족될 때 워크플로우를 자동으로 실행
- **외부 시스템과 통합** - Gmail, Outlook, OneDrive, JIRA, Slack, Stripe 등의 플랫폼과 연결
- **자동화 확장** - 수동 개입 없이 대용량 이벤트 처리
- **컨텍스트 유지** - crew와 flow 내에서 트리거 데이터에 액세스
## 자동화 트리거 관리
### 사용 가능한 트리거 보기
자동화 트리거에 액세스하고 관리하려면:
1. CrewAI 대시보드에서 배포로 이동
2. **트리거** 탭을 클릭하여 사용 가능한 모든 트리거 통합 보기
<Frame>
<img src="/images/enterprise/list-available-triggers.png" alt="사용 가능한 자동화 트리거 목록" />
</Frame>
이 보기는 배포에 사용 가능한 모든 트리거 통합과 현재 연결 상태를 보여줍니다.
### 트리거 활성화 및 비활성화
각 트리거는 토글 스위치를 사용하여 쉽게 활성화하거나 비활성화할 수 있습니다:
<Frame>
<img src="/images/enterprise/trigger-selected.png" alt="토글로 트리거 활성화 또는 비활성화" />
</Frame>
- **활성화됨 (파란색 토글)**: 트리거가 활성 상태이며 지정된 이벤트가 발생할 때 배포를 자동으로 실행합니다
- **비활성화됨 (회색 토글)**: 트리거가 비활성 상태이며 이벤트에 응답하지 않습니다
토글을 클릭하기만 하면 트리거 상태를 변경할 수 있습니다. 변경 사항은 즉시 적용됩니다.
### 트리거 실행 모니터링
트리거된 실행의 성능과 기록을 추적합니다:
<Frame>
<img src="/images/enterprise/list-executions.png" alt="자동화에 의해 트리거된 실행 목록" />
</Frame>
## 자동화 구축
자동화를 구축하기 전에 crew와 flow가 받을 트리거 페이로드의 구조를 이해하는 것이 도움이 됩니다.
### 페이로드 샘플 저장소
자동화를 구축하고 테스트하는 데 도움이 되도록 다양한 트리거 소스의 샘플 페이로드가 포함된 포괄적인 저장소를 유지 관리하고 있습니다:
**🔗 [CrewAI Enterprise 트리거 페이로드 샘플](https://github.com/crewAIInc/crewai-enterprise-trigger-payload-samples)**
이 저장소에는 다음이 포함되어 있습니다:
- **실제 페이로드 예제** - 다양한 트리거 소스(Gmail, Google Drive 등)에서 가져온 예제
- **페이로드 구조 문서** - 형식과 사용 가능한 필드를 보여주는 문서
### Crew와 트리거
기존 crew 정의는 트리거와 완벽하게 작동하며, 받은 페이로드를 분석하는 작업만 있으면 됩니다:
```python
@CrewBase
class MyAutomatedCrew:
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
)
@task
def parse_trigger_payload(self) -> Task:
return Task(
config=self.tasks_config['parse_trigger_payload'],
agent=self.researcher(),
)
@task
def analyze_trigger_content(self) -> Task:
return Task(
config=self.tasks_config['analyze_trigger_data'],
agent=self.researcher(),
)
```
crew는 자동으로 트리거 페이로드를 받고 표준 CrewAI 컨텍스트 메커니즘을 통해 액세스할 수 있습니다.
### Flow와의 통합
flow의 경우 트리거 데이터 처리 방법을 더 세밀하게 제어할 수 있습니다:
#### 트리거 페이로드 액세스
flow의 모든 `@start()` 메서드는 `crewai_trigger_payload`라는 추가 매개변수를 허용합니다:
```python
from crewai.flow import Flow, start, listen
class MyAutomatedFlow(Flow):
@start()
def handle_trigger(self, crewai_trigger_payload: dict = None):
"""
이 start 메서드는 트리거 데이터를 받을 수 있습니다
"""
if crewai_trigger_payload:
# 트리거 데이터 처리
trigger_id = crewai_trigger_payload.get('id')
event_data = crewai_trigger_payload.get('payload', {})
# 다른 메서드에서 사용할 수 있도록 flow 상태에 저장
self.state.trigger_id = trigger_id
self.state.trigger_type = event_data
return event_data
# 수동 실행 처리
return None
@listen(handle_trigger)
def process_data(self, trigger_data):
"""
트리거 데이터 처리
"""
# ... 트리거 처리
```
#### Flow에서 Crew 트리거하기
트리거된 flow 내에서 crew를 시작할 때 트리거 페이로드를 전달합니다:
```python
@start()
def delegate_to_crew(self, crewai_trigger_payload: dict = None):
"""
전문 crew에 처리 위임
"""
crew = MySpecializedCrew()
# crew에 트리거 페이로드 전달
result = crew.crew().kickoff(
inputs={
'a_custom_parameter': "custom_value",
'crewai_trigger_payload': crewai_trigger_payload
},
)
return result
```
## 문제 해결
**트리거가 작동하지 않는 경우:**
- 트리거가 활성화되어 있는지 확인
- 통합 연결 상태 확인
**실행 실패:**
- 오류 세부 정보는 실행 로그 확인
- 개발 중인 경우 입력에 올바른 페이로드가 포함된 `crewai_trigger_payload` 매개변수가 포함되어 있는지 확인
자동화 트리거는 CrewAI 배포를 기존 비즈니스 프로세스 및 도구와 완벽하게 통합할 수 있는 반응형 이벤트 기반 시스템으로 변환합니다.

View File

@@ -0,0 +1,7 @@
---
title: "GET /inputs"
description: "Obter entradas necessárias para sua crew"
openapi: "/enterprise-api.pt-BR.yaml GET /inputs"
---

View File

@@ -0,0 +1,7 @@
---
title: "POST /kickoff"
description: "Iniciar a execução da crew"
openapi: "/enterprise-api.pt-BR.yaml POST /kickoff"
---

View File

@@ -0,0 +1,7 @@
---
title: "GET /status/{kickoff_id}"
description: "Obter o status da execução"
openapi: "/enterprise-api.pt-BR.yaml GET /status/{kickoff_id}"
---

View File

@@ -0,0 +1,171 @@
---
title: "Triggers de Automação"
description: "Execute automaticamente seus workflows CrewAI quando eventos específicos ocorrem em integrações conectadas"
icon: "bolt"
---
Os triggers de automação permitem executar automaticamente suas implantações CrewAI quando eventos específicos ocorrem em suas integrações conectadas, criando workflows poderosos orientados por eventos que respondem a mudanças em tempo real em seus sistemas de negócio.
## Visão Geral
Com triggers de automação, você pode:
- **Responder a eventos em tempo real** - Execute workflows automaticamente quando condições específicas forem atendidas
- **Integrar com sistemas externos** - Conecte com plataformas como Gmail, Outlook, OneDrive, JIRA, Slack, Stripe e muito mais
- **Escalar sua automação** - Lide com eventos de alto volume sem intervenção manual
- **Manter contexto** - Acesse dados do trigger dentro de suas crews e flows
## Gerenciando Triggers de Automação
### Visualizando Triggers Disponíveis
Para acessar e gerenciar seus triggers de automação:
1. Navegue até sua implantação no painel do CrewAI
2. Clique na aba **Triggers** para visualizar todas as integrações de trigger disponíveis
<Frame>
<img src="/images/enterprise/list-available-triggers.png" alt="Lista de triggers de automação disponíveis" />
</Frame>
Esta visualização mostra todas as integrações de trigger disponíveis para sua implantação, junto com seus status de conexão atuais.
### Habilitando e Desabilitando Triggers
Cada trigger pode ser facilmente habilitado ou desabilitado usando o botão de alternância:
<Frame>
<img src="/images/enterprise/trigger-selected.png" alt="Habilitar ou desabilitar triggers com alternância" />
</Frame>
- **Habilitado (alternância azul)**: O trigger está ativo e executará automaticamente sua implantação quando os eventos especificados ocorrerem
- **Desabilitado (alternância cinza)**: O trigger está inativo e não responderá a eventos
Simplesmente clique na alternância para mudar o estado do trigger. As alterações entram em vigor imediatamente.
### Monitorando Execuções de Trigger
Acompanhe o desempenho e histórico de suas execuções acionadas:
<Frame>
<img src="/images/enterprise/list-executions.png" alt="Lista de execuções acionadas por automação" />
</Frame>
## Construindo Automação
Antes de construir sua automação, é útil entender a estrutura dos payloads de trigger que suas crews e flows receberão.
### Repositório de Amostras de Payload
Mantemos um repositório abrangente com amostras de payload de várias fontes de trigger para ajudá-lo a construir e testar suas automações:
**🔗 [Amostras de Payload de Trigger CrewAI Enterprise](https://github.com/crewAIInc/crewai-enterprise-trigger-payload-samples)**
Este repositório contém:
- **Exemplos reais de payload** de diferentes fontes de trigger (Gmail, Google Drive, etc.)
- **Documentação da estrutura de payload** mostrando o formato e campos disponíveis
### Triggers com Crew
Suas definições de crew existentes funcionam perfeitamente com triggers, você só precisa ter uma tarefa para analisar o payload recebido:
```python
@CrewBase
class MinhaCrewAutomatizada:
@agent
def pesquisador(self) -> Agent:
return Agent(
config=self.agents_config['pesquisador'],
)
@task
def analisar_payload_trigger(self) -> Task:
return Task(
config=self.tasks_config['analisar_payload_trigger'],
agent=self.pesquisador(),
)
@task
def analisar_conteudo_trigger(self) -> Task:
return Task(
config=self.tasks_config['analisar_dados_trigger'],
agent=self.pesquisador(),
)
```
A crew receberá automaticamente e pode acessar o payload do trigger através dos mecanismos de contexto padrão do CrewAI.
### Integração com Flows
Para flows, você tem mais controle sobre como os dados do trigger são tratados:
#### Acessando Payload do Trigger
Todos os métodos `@start()` em seus flows aceitarão um parâmetro adicional chamado `crewai_trigger_payload`:
```python
from crewai.flow import Flow, start, listen
class MeuFlowAutomatizado(Flow):
@start()
def lidar_com_trigger(self, crewai_trigger_payload: dict = None):
"""
Este método start pode receber dados do trigger
"""
if crewai_trigger_payload:
# Processa os dados do trigger
trigger_id = crewai_trigger_payload.get('id')
dados_evento = crewai_trigger_payload.get('payload', {})
# Armazena no estado do flow para uso por outros métodos
self.state.trigger_id = trigger_id
self.state.trigger_type = dados_evento
return dados_evento
# Lida com execução manual
return None
@listen(lidar_com_trigger)
def processar_dados(self, dados_trigger):
"""
Processa os dados do trigger
"""
# ... processa o trigger
```
#### Acionando Crews a partir de Flows
Ao iniciar uma crew dentro de um flow que foi acionado, passe o payload do trigger conforme ele:
```python
@start()
def delegar_para_crew(self, crewai_trigger_payload: dict = None):
"""
Delega processamento para uma crew especializada
"""
crew = MinhaCrewEspecializada()
# Passa o payload do trigger para a crew
resultado = crew.crew().kickoff(
inputs={
'parametro_personalizado': "valor_personalizado",
'crewai_trigger_payload': crewai_trigger_payload
},
)
return resultado
```
## Solução de Problemas
**Trigger não está sendo disparado:**
- Verifique se o trigger está habilitado
- Verifique o status de conexão da integração
**Falhas de execução:**
- Verifique os logs de execução para detalhes do erro
- Se você está desenvolvendo, certifique-se de que as entradas incluem o parâmetro `crewai_trigger_payload` com o payload correto
Os triggers de automação transformam suas implantações CrewAI em sistemas responsivos orientados por eventos que podem se integrar perfeitamente com seus processos de negócio e ferramentas existentes.

View File

@@ -1,7 +1,18 @@
import shutil
import subprocess
import time
from typing import Any, Callable, Dict, List, Literal, Optional, Sequence, Tuple, Type, Union
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Tuple,
Type,
Union,
)
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
@@ -162,7 +173,7 @@ class Agent(BaseAgent):
)
guardrail: Optional[Union[Callable[[Any], Tuple[bool, Any]], str]] = Field(
default=None,
description="Function or string description of a guardrail to validate agent output"
description="Function or string description of a guardrail to validate agent output",
)
guardrail_max_retries: int = Field(
default=3, description="Maximum number of retries when guardrail fails"
@@ -276,7 +287,7 @@ class Agent(BaseAgent):
self._inject_date_to_task(task)
if self.tools_handler:
self.tools_handler.last_used_tool = {} # type: ignore # Incompatible types in assignment (expression has type "dict[Never, Never]", variable has type "ToolCalling")
self.tools_handler.last_used_tool = None
task_prompt = task.prompt()
@@ -336,7 +347,6 @@ class Agent(BaseAgent):
self.knowledge_config.model_dump() if self.knowledge_config else {}
)
if self.knowledge or (self.crew and self.crew.knowledge):
crewai_event_bus.emit(
self,

View File

@@ -8,13 +8,13 @@ from .cache.cache_handler import CacheHandler
class ToolsHandler:
"""Callback handler for tool usage."""
last_used_tool: ToolCalling = {} # type: ignore # BUG?: Incompatible types in assignment (expression has type "Dict[...]", variable has type "ToolCalling")
last_used_tool: Optional[ToolCalling] = None
cache: Optional[CacheHandler]
def __init__(self, cache: Optional[CacheHandler] = None):
"""Initialize the callback handler."""
self.cache = cache
self.last_used_tool = {} # type: ignore # BUG?: same as above
self.last_used_tool = None
def on_tool_use(
self,

View File

@@ -135,14 +135,21 @@ MODELS = {
"gpt-4.1-nano-2025-04-14",
"gpt-4o",
"gpt-4o-mini",
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
"o1-mini",
"o1-preview",
"o3-mini",
],
"anthropic": [
"claude-3-5-sonnet-20240620",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"claude-3-haiku-20240307",
"claude-3.7-sonnet-20250219",
"claude-4-sonnet-20250301",
"claude-4.1-opus-20250315",
],
"gemini": [
"gemini/gemini-1.5-flash",
@@ -152,6 +159,9 @@ MODELS = {
"gemini/gemini-2.0-flash-thinking-exp-01-21",
"gemini/gemini-2.5-flash-preview-04-17",
"gemini/gemini-2.5-pro-exp-03-25",
"gemini/gemini-2.5-flash-lite",
"gemini/gemini-2.5-flash",
"gemini/gemini-2.5-pro",
"gemini/gemini-gemma-2-9b-it",
"gemini/gemini-gemma-2-27b-it",
"gemini/gemma-3-1b-it",

View File

@@ -474,6 +474,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
self._method_outputs: List[Any] = [] # List to store all method outputs
self._completed_methods: Set[str] = set() # Track completed methods for reload
self._persistence: Optional[FlowPersistence] = persistence
self._is_execution_resuming: bool = False
# Initialize state with initial values
self._state = self._create_initial_state()
@@ -829,6 +830,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
# Clear completed methods and outputs for a fresh start
self._completed_methods.clear()
self._method_outputs.clear()
else:
# We're restoring from persistence, set the flag
self._is_execution_resuming = True
if inputs:
# Override the id in the state if it exists in inputs
@@ -880,6 +884,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
]
await asyncio.gather(*tasks)
# Clear the resumption flag after initial execution completes
self._is_execution_resuming = False
final_output = self._method_outputs[-1] if self._method_outputs else None
crewai_event_bus.emit(
@@ -916,9 +923,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
- Automatically injects crewai_trigger_payload if available in flow inputs
"""
if start_method_name in self._completed_methods:
last_output = self._method_outputs[-1] if self._method_outputs else None
await self._execute_listeners(start_method_name, last_output)
return
if self._is_execution_resuming:
# During resumption, skip execution but continue listeners
last_output = self._method_outputs[-1] if self._method_outputs else None
await self._execute_listeners(start_method_name, last_output)
return
# For cyclic flows, clear from completed to allow re-execution
self._completed_methods.discard(start_method_name)
method = self._methods[start_method_name]
enhanced_method = self._inject_trigger_payload_for_start_method(method)
@@ -1050,11 +1061,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
for router_name in routers_triggered:
await self._execute_single_listener(router_name, result)
# After executing router, the router's result is the path
router_result = self._method_outputs[-1]
router_result = (
self._method_outputs[-1] if self._method_outputs else None
)
if router_result: # Only add non-None results
router_results.append(router_result)
current_trigger = (
router_result # Update for next iteration of router chain
str(router_result)
if router_result is not None
else "" # Update for next iteration of router chain
)
# Now execute normal listeners for all router results and the original trigger
@@ -1072,6 +1087,24 @@ class Flow(Generic[T], metaclass=FlowMeta):
]
await asyncio.gather(*tasks)
if current_trigger in router_results:
# Find start methods triggered by this router result
for method_name in self._start_methods:
# Check if this start method is triggered by the current trigger
if method_name in self._listeners:
condition_type, trigger_methods = self._listeners[
method_name
]
if current_trigger in trigger_methods:
# Only execute if this is a cycle (method was already completed)
if method_name in self._completed_methods:
# For router-triggered start methods in cycles, temporarily clear resumption flag
# to allow cyclic execution
was_resuming = self._is_execution_resuming
self._is_execution_resuming = False
await self._execute_start_method(method_name)
self._is_execution_resuming = was_resuming
def _find_triggered_methods(
self, trigger_method: str, router_only: bool
) -> List[str]:
@@ -1109,6 +1142,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
if router_only != is_router:
continue
if not router_only and listener_name in self._start_methods:
continue
if condition_type == "OR":
# If the trigger_method matches any in methods, run this
if trigger_method in methods:
@@ -1158,10 +1194,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
Catches and logs any exceptions during execution, preventing
individual listener failures from breaking the entire flow.
"""
# TODO: greyson fix
# if listener_name in self._completed_methods:
# await self._execute_listeners(listener_name, None)
# return
if listener_name in self._completed_methods:
if self._is_execution_resuming:
# During resumption, skip execution but continue listeners
await self._execute_listeners(listener_name, None)
return
# For cyclic flows, clear from completed to allow re-execution
self._completed_methods.discard(listener_name)
try:
method = self._methods[listener_name]

View File

View File

@@ -0,0 +1,556 @@
"""ChromaDB client implementation."""
from typing import Any
from chromadb.api.types import (
Embeddable,
EmbeddingFunction as ChromaEmbeddingFunction,
QueryResult,
)
from typing_extensions import Unpack
from crewai.rag.chromadb.types import (
ChromaDBClientType,
ChromaDBCollectionCreateParams,
ChromaDBCollectionSearchParams,
)
from crewai.rag.chromadb.utils import (
_extract_search_params,
_is_async_client,
_is_sync_client,
_prepare_documents_for_chromadb,
_process_query_results,
)
from crewai.rag.core.base_client import (
BaseClient,
BaseCollectionParams,
BaseCollectionAddParams,
)
from crewai.rag.types import SearchResult
class ChromaDBClient(BaseClient):
"""ChromaDB implementation of the BaseClient protocol.
Provides vector database operations for ChromaDB, supporting both
synchronous and asynchronous clients.
Attributes:
client: ChromaDB client instance (ClientAPI or AsyncClientAPI).
embedding_function: Function to generate embeddings for documents.
"""
client: ChromaDBClientType
embedding_function: ChromaEmbeddingFunction[Embeddable]
def create_collection(
self, **kwargs: Unpack[ChromaDBCollectionCreateParams]
) -> None:
"""Create a new collection in ChromaDB.
Uses the client's default embedding function if none provided.
Keyword Args:
collection_name: Name of the collection to create. Must be unique.
configuration: Optional collection configuration specifying distance metrics,
HNSW parameters, or other backend-specific settings.
metadata: Optional metadata dictionary to attach to the collection.
embedding_function: Optional custom embedding function. If not provided,
uses the client's default embedding function.
data_loader: Optional data loader for batch loading data into the collection.
get_or_create: If True, returns existing collection if it already exists
instead of raising an error. Defaults to False.
Raises:
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
ValueError: If collection with the same name already exists and get_or_create
is False.
ConnectionError: If unable to connect to ChromaDB server.
Example:
>>> client = ChromaDBClient()
>>> client.create_collection(
... collection_name="documents",
... metadata={"description": "Product documentation"},
... get_or_create=True
... )
"""
if not _is_sync_client(self.client):
raise TypeError(
"Synchronous method create_collection() requires a ClientAPI. "
"Use acreate_collection() for AsyncClientAPI."
)
metadata = kwargs.get("metadata", {})
if "hnsw:space" not in metadata:
metadata["hnsw:space"] = "cosine"
self.client.create_collection(
name=kwargs["collection_name"],
configuration=kwargs.get("configuration"),
metadata=metadata,
embedding_function=kwargs.get(
"embedding_function", self.embedding_function
),
data_loader=kwargs.get("data_loader"),
get_or_create=kwargs.get("get_or_create", False),
)
async def acreate_collection(
self, **kwargs: Unpack[ChromaDBCollectionCreateParams]
) -> None:
"""Create a new collection in ChromaDB asynchronously.
Creates a new collection with the specified name and optional configuration.
If an embedding function is not provided, uses the client's default embedding function.
Keyword Args:
collection_name: Name of the collection to create. Must be unique.
configuration: Optional collection configuration specifying distance metrics,
HNSW parameters, or other backend-specific settings.
metadata: Optional metadata dictionary to attach to the collection.
embedding_function: Optional custom embedding function. If not provided,
uses the client's default embedding function.
data_loader: Optional data loader for batch loading data into the collection.
get_or_create: If True, returns existing collection if it already exists
instead of raising an error. Defaults to False.
Raises:
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
ValueError: If collection with the same name already exists and get_or_create
is False.
ConnectionError: If unable to connect to ChromaDB server.
Example:
>>> import asyncio
>>> async def main():
... client = ChromaDBClient()
... await client.acreate_collection(
... collection_name="documents",
... metadata={"description": "Product documentation"},
... get_or_create=True
... )
>>> asyncio.run(main())
"""
if not _is_async_client(self.client):
raise TypeError(
"Asynchronous method acreate_collection() requires an AsyncClientAPI. "
"Use create_collection() for ClientAPI."
)
metadata = kwargs.get("metadata", {})
if "hnsw:space" not in metadata:
metadata["hnsw:space"] = "cosine"
await self.client.create_collection(
name=kwargs["collection_name"],
configuration=kwargs.get("configuration"),
metadata=metadata,
embedding_function=kwargs.get(
"embedding_function", self.embedding_function
),
data_loader=kwargs.get("data_loader"),
get_or_create=kwargs.get("get_or_create", False),
)
def get_or_create_collection(
self, **kwargs: Unpack[ChromaDBCollectionCreateParams]
) -> Any:
"""Get an existing collection or create it if it doesn't exist.
Returns existing collection if found, otherwise creates a new one.
Keyword Args:
collection_name: Name of the collection to get or create.
configuration: Optional collection configuration specifying distance metrics,
HNSW parameters, or other backend-specific settings.
metadata: Optional metadata dictionary to attach to the collection.
embedding_function: Optional custom embedding function. If not provided,
uses the client's default embedding function.
data_loader: Optional data loader for batch loading data into the collection.
Returns:
A ChromaDB Collection object.
Raises:
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
ConnectionError: If unable to connect to ChromaDB server.
Example:
>>> client = ChromaDBClient()
>>> collection = client.get_or_create_collection(
... collection_name="documents",
... metadata={"description": "Product documentation"}
... )
"""
if not _is_sync_client(self.client):
raise TypeError(
"Synchronous method get_or_create_collection() requires a ClientAPI. "
"Use aget_or_create_collection() for AsyncClientAPI."
)
metadata = kwargs.get("metadata", {})
if "hnsw:space" not in metadata:
metadata["hnsw:space"] = "cosine"
return self.client.get_or_create_collection(
name=kwargs["collection_name"],
configuration=kwargs.get("configuration"),
metadata=metadata,
embedding_function=kwargs.get(
"embedding_function", self.embedding_function
),
data_loader=kwargs.get("data_loader"),
)
async def aget_or_create_collection(
self, **kwargs: Unpack[ChromaDBCollectionCreateParams]
) -> Any:
"""Get an existing collection or create it if it doesn't exist asynchronously.
Returns existing collection if found, otherwise creates a new one.
Keyword Args:
collection_name: Name of the collection to get or create.
configuration: Optional collection configuration specifying distance metrics,
HNSW parameters, or other backend-specific settings.
metadata: Optional metadata dictionary to attach to the collection.
embedding_function: Optional custom embedding function. If not provided,
uses the client's default embedding function.
data_loader: Optional data loader for batch loading data into the collection.
Returns:
A ChromaDB AsyncCollection object.
Raises:
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
ConnectionError: If unable to connect to ChromaDB server.
Example:
>>> import asyncio
>>> async def main():
... client = ChromaDBClient()
... collection = await client.aget_or_create_collection(
... collection_name="documents",
... metadata={"description": "Product documentation"}
... )
>>> asyncio.run(main())
"""
if not _is_async_client(self.client):
raise TypeError(
"Asynchronous method aget_or_create_collection() requires an AsyncClientAPI. "
"Use get_or_create_collection() for ClientAPI."
)
metadata = kwargs.get("metadata", {})
if "hnsw:space" not in metadata:
metadata["hnsw:space"] = "cosine"
return await self.client.get_or_create_collection(
name=kwargs["collection_name"],
configuration=kwargs.get("configuration"),
metadata=metadata,
embedding_function=kwargs.get(
"embedding_function", self.embedding_function
),
data_loader=kwargs.get("data_loader"),
)
def add_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
"""Add documents with their embeddings to a collection.
Performs an upsert operation - documents with existing IDs are updated.
Generates embeddings automatically using the configured embedding function.
Keyword Args:
collection_name: The name of the collection to add documents to.
documents: List of BaseRecord dicts containing:
- content: The text content (required)
- doc_id: Optional unique identifier (auto-generated if missing)
- metadata: Optional metadata dictionary
Raises:
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
ValueError: If collection doesn't exist or documents list is empty.
ConnectionError: If unable to connect to ChromaDB server.
"""
if not _is_sync_client(self.client):
raise TypeError(
"Synchronous method add_documents() requires a ClientAPI. "
"Use aadd_documents() for AsyncClientAPI."
)
collection_name = kwargs["collection_name"]
documents = kwargs["documents"]
if not documents:
raise ValueError("Documents list cannot be empty")
collection = self.client.get_collection(
name=collection_name,
embedding_function=self.embedding_function,
)
prepared = _prepare_documents_for_chromadb(documents)
collection.add(
ids=prepared.ids,
documents=prepared.texts,
metadatas=prepared.metadatas,
)
async def aadd_documents(self, **kwargs: Unpack[BaseCollectionAddParams]) -> None:
"""Add documents with their embeddings to a collection asynchronously.
Performs an upsert operation - documents with existing IDs are updated.
Generates embeddings automatically using the configured embedding function.
Keyword Args:
collection_name: The name of the collection to add documents to.
documents: List of BaseRecord dicts containing:
- content: The text content (required)
- doc_id: Optional unique identifier (auto-generated if missing)
- metadata: Optional metadata dictionary
Raises:
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
ValueError: If collection doesn't exist or documents list is empty.
ConnectionError: If unable to connect to ChromaDB server.
"""
if not _is_async_client(self.client):
raise TypeError(
"Asynchronous method aadd_documents() requires an AsyncClientAPI. "
"Use add_documents() for ClientAPI."
)
collection_name = kwargs["collection_name"]
documents = kwargs["documents"]
if not documents:
raise ValueError("Documents list cannot be empty")
collection = await self.client.get_collection(
name=collection_name,
embedding_function=self.embedding_function,
)
prepared = _prepare_documents_for_chromadb(documents)
await collection.add(
ids=prepared.ids,
documents=prepared.texts,
metadatas=prepared.metadatas,
)
def search(
self, **kwargs: Unpack[ChromaDBCollectionSearchParams]
) -> list[SearchResult]:
"""Search for similar documents using a query.
Performs semantic search to find documents similar to the query text.
Uses the configured embedding function to generate query embeddings.
Keyword Args:
collection_name: Name of the collection to search in.
query: The text query to search for.
limit: Maximum number of results to return (default: 10).
metadata_filter: Optional filter for metadata fields.
score_threshold: Optional minimum similarity score (0-1) for results.
where: Optional ChromaDB where clause for metadata filtering.
where_document: Optional ChromaDB where clause for document content filtering.
include: Optional list of fields to include in results.
Returns:
List of SearchResult dicts containing id, content, metadata, and score.
Raises:
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
ValueError: If collection doesn't exist.
ConnectionError: If unable to connect to ChromaDB server.
"""
if not _is_sync_client(self.client):
raise TypeError(
"Synchronous method search() requires a ClientAPI. "
"Use asearch() for AsyncClientAPI."
)
params = _extract_search_params(kwargs)
collection = self.client.get_collection(
name=params.collection_name,
embedding_function=self.embedding_function,
)
where = params.where if params.where is not None else params.metadata_filter
results: QueryResult = collection.query(
query_texts=[params.query],
n_results=params.limit,
where=where,
where_document=params.where_document,
include=params.include,
)
return _process_query_results(
collection=collection,
results=results,
params=params,
)
async def asearch(
self, **kwargs: Unpack[ChromaDBCollectionSearchParams]
) -> list[SearchResult]:
"""Search for similar documents using a query asynchronously.
Performs semantic search to find documents similar to the query text.
Uses the configured embedding function to generate query embeddings.
Keyword Args:
collection_name: Name of the collection to search in.
query: The text query to search for.
limit: Maximum number of results to return (default: 10).
metadata_filter: Optional filter for metadata fields.
score_threshold: Optional minimum similarity score (0-1) for results.
where: Optional ChromaDB where clause for metadata filtering.
where_document: Optional ChromaDB where clause for document content filtering.
include: Optional list of fields to include in results.
Returns:
List of SearchResult dicts containing id, content, metadata, and score.
Raises:
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
ValueError: If collection doesn't exist.
ConnectionError: If unable to connect to ChromaDB server.
"""
if not _is_async_client(self.client):
raise TypeError(
"Asynchronous method asearch() requires an AsyncClientAPI. "
"Use search() for ClientAPI."
)
params = _extract_search_params(kwargs)
collection = await self.client.get_collection(
name=params.collection_name,
embedding_function=self.embedding_function,
)
where = params.where if params.where is not None else params.metadata_filter
results: QueryResult = await collection.query(
query_texts=[params.query],
n_results=params.limit,
where=where,
where_document=params.where_document,
include=params.include,
)
return _process_query_results(
collection=collection,
results=results,
params=params,
)
def delete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Delete a collection and all its data.
Permanently removes a collection and all documents, embeddings, and metadata it contains.
This operation cannot be undone.
Keyword Args:
collection_name: Name of the collection to delete.
Raises:
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
ValueError: If collection doesn't exist.
ConnectionError: If unable to connect to ChromaDB server.
Example:
>>> client = ChromaDBClient()
>>> client.delete_collection(collection_name="old_documents")
"""
if not _is_sync_client(self.client):
raise TypeError(
"Synchronous method delete_collection() requires a ClientAPI. "
"Use adelete_collection() for AsyncClientAPI."
)
collection_name = kwargs["collection_name"]
self.client.delete_collection(name=collection_name)
async def adelete_collection(self, **kwargs: Unpack[BaseCollectionParams]) -> None:
"""Delete a collection and all its data asynchronously.
Permanently removes a collection and all documents, embeddings, and metadata it contains.
This operation cannot be undone.
Keyword Args:
collection_name: Name of the collection to delete.
Raises:
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
ValueError: If collection doesn't exist.
ConnectionError: If unable to connect to ChromaDB server.
Example:
>>> import asyncio
>>> async def main():
... client = ChromaDBClient()
... await client.adelete_collection(collection_name="old_documents")
>>> asyncio.run(main())
"""
if not _is_async_client(self.client):
raise TypeError(
"Asynchronous method adelete_collection() requires an AsyncClientAPI. "
"Use delete_collection() for ClientAPI."
)
collection_name = kwargs["collection_name"]
await self.client.delete_collection(name=collection_name)
def reset(self) -> None:
"""Reset the vector database by deleting all collections and data.
Completely clears the ChromaDB instance, removing all collections,
documents, embeddings, and metadata. This operation cannot be undone.
Use with extreme caution in production environments.
Raises:
TypeError: If AsyncClientAPI is used instead of ClientAPI for sync operations.
ConnectionError: If unable to connect to ChromaDB server.
Example:
>>> client = ChromaDBClient()
>>> client.reset() # Removes ALL data from ChromaDB
"""
if not _is_sync_client(self.client):
raise TypeError(
"Synchronous method reset() requires a ClientAPI. "
"Use areset() for AsyncClientAPI."
)
self.client.reset()
async def areset(self) -> None:
"""Reset the vector database by deleting all collections and data asynchronously.
Completely clears the ChromaDB instance, removing all collections,
documents, embeddings, and metadata. This operation cannot be undone.
Use with extreme caution in production environments.
Raises:
TypeError: If ClientAPI is used instead of AsyncClientAPI for async operations.
ConnectionError: If unable to connect to ChromaDB server.
Example:
>>> import asyncio
>>> async def main():
... client = ChromaDBClient()
... await client.areset() # Removes ALL data from ChromaDB
>>> asyncio.run(main())
"""
if not _is_async_client(self.client):
raise TypeError(
"Asynchronous method areset() requires an AsyncClientAPI. "
"Use reset() for ClientAPI."
)
await self.client.reset()

View File

@@ -0,0 +1,85 @@
"""Type definitions specific to ChromaDB implementation."""
from collections.abc import Mapping
from typing import Any, NamedTuple
from chromadb.api import ClientAPI, AsyncClientAPI
from chromadb.api.configuration import CollectionConfigurationInterface
from chromadb.api.types import (
CollectionMetadata,
DataLoader,
Embeddable,
EmbeddingFunction as ChromaEmbeddingFunction,
Include,
Loadable,
Where,
WhereDocument,
)
from crewai.rag.core.base_client import BaseCollectionParams, BaseCollectionSearchParams
ChromaDBClientType = ClientAPI | AsyncClientAPI
class PreparedDocuments(NamedTuple):
"""Prepared documents ready for ChromaDB insertion.
Attributes:
ids: List of document IDs
texts: List of document texts
metadatas: List of document metadata mappings
"""
ids: list[str]
texts: list[str]
metadatas: list[Mapping[str, str | int | float | bool]]
class ExtractedSearchParams(NamedTuple):
"""Extracted search parameters for ChromaDB queries.
Attributes:
collection_name: Name of the collection to search
query: Search query text
limit: Maximum number of results
metadata_filter: Optional metadata filter
score_threshold: Optional minimum similarity score
where: Optional ChromaDB where clause
where_document: Optional ChromaDB document filter
include: Fields to include in results
"""
collection_name: str
query: str
limit: int
metadata_filter: dict[str, Any] | None
score_threshold: float | None
where: Where | None
where_document: WhereDocument | None
include: Include
class ChromaDBCollectionCreateParams(BaseCollectionParams, total=False):
"""Parameters for creating a ChromaDB collection.
This class extends BaseCollectionParams to include any additional
parameters specific to ChromaDB collection creation.
"""
configuration: CollectionConfigurationInterface
metadata: CollectionMetadata
embedding_function: ChromaEmbeddingFunction[Embeddable]
data_loader: DataLoader[Loadable]
get_or_create: bool
class ChromaDBCollectionSearchParams(BaseCollectionSearchParams, total=False):
"""Parameters for searching a ChromaDB collection.
This class extends BaseCollectionSearchParams to include ChromaDB-specific
search parameters like where clauses and include options.
"""
where: Where
where_document: WhereDocument
include: Include

View File

@@ -0,0 +1,220 @@
"""Utility functions for ChromaDB client implementation."""
import hashlib
from collections.abc import Mapping
from typing import Literal, TypeGuard, cast
from chromadb.api import AsyncClientAPI, ClientAPI
from chromadb.api.types import (
Include,
IncludeEnum,
QueryResult,
)
from chromadb.api.models.AsyncCollection import AsyncCollection
from chromadb.api.models.Collection import Collection
from crewai.rag.chromadb.types import (
ChromaDBClientType,
ChromaDBCollectionSearchParams,
ExtractedSearchParams,
PreparedDocuments,
)
from crewai.rag.types import BaseRecord, SearchResult
def _is_sync_client(client: ChromaDBClientType) -> TypeGuard[ClientAPI]:
"""Type guard to check if the client is a synchronous ClientAPI.
Args:
client: The client to check.
Returns:
True if the client is a ClientAPI, False otherwise.
"""
return isinstance(client, ClientAPI)
def _is_async_client(client: ChromaDBClientType) -> TypeGuard[AsyncClientAPI]:
"""Type guard to check if the client is an asynchronous AsyncClientAPI.
Args:
client: The client to check.
Returns:
True if the client is an AsyncClientAPI, False otherwise.
"""
return isinstance(client, AsyncClientAPI)
def _prepare_documents_for_chromadb(
documents: list[BaseRecord],
) -> PreparedDocuments:
"""Prepare documents for ChromaDB by extracting IDs, texts, and metadata.
Args:
documents: List of BaseRecord documents to prepare.
Returns:
PreparedDocuments with ids, texts, and metadatas ready for ChromaDB.
"""
ids: list[str] = []
texts: list[str] = []
metadatas: list[Mapping[str, str | int | float | bool]] = []
for doc in documents:
if "doc_id" in doc:
ids.append(doc["doc_id"])
else:
content_hash = hashlib.sha256(doc["content"].encode()).hexdigest()[:16]
ids.append(content_hash)
texts.append(doc["content"])
metadata = doc.get("metadata")
if metadata:
if isinstance(metadata, list):
metadatas.append(metadata[0] if metadata else {})
else:
metadatas.append(metadata)
else:
metadatas.append({})
return PreparedDocuments(ids, texts, metadatas)
def _extract_search_params(
kwargs: ChromaDBCollectionSearchParams,
) -> ExtractedSearchParams:
"""Extract search parameters from kwargs.
Args:
kwargs: Keyword arguments containing search parameters.
Returns:
ExtractedSearchParams with all extracted parameters.
"""
return ExtractedSearchParams(
collection_name=kwargs["collection_name"],
query=kwargs["query"],
limit=kwargs.get("limit", 10),
metadata_filter=kwargs.get("metadata_filter"),
score_threshold=kwargs.get("score_threshold"),
where=kwargs.get("where"),
where_document=kwargs.get("where_document"),
include=kwargs.get(
"include",
[IncludeEnum.metadatas, IncludeEnum.documents, IncludeEnum.distances],
),
)
def _convert_distance_to_score(
distance: float,
distance_metric: Literal["l2", "cosine", "ip"],
) -> float:
"""Convert ChromaDB distance to similarity score.
Notes:
Assuming all embedding are unit-normalized for now, including custom embeddings.
Args:
distance: The distance value from ChromaDB.
distance_metric: The distance metric used ("l2", "cosine", or "ip").
Returns:
Similarity score in range [0, 1] where 1 is most similar.
"""
if distance_metric == "cosine":
score = 1.0 - 0.5 * distance
return max(0.0, min(1.0, score))
raise ValueError(f"Unsupported distance metric: {distance_metric}")
def _convert_chromadb_results_to_search_results(
results: QueryResult,
include: Include,
distance_metric: Literal["l2", "cosine", "ip"],
score_threshold: float | None = None,
) -> list[SearchResult]:
"""Convert ChromaDB query results to SearchResult format.
Args:
results: ChromaDB query results.
include: List of fields that were included in the query.
distance_metric: The distance metric used by the collection.
score_threshold: Optional minimum similarity score (0-1) for results.
Returns:
List of SearchResult dicts containing id, content, metadata, and score.
"""
search_results: list[SearchResult] = []
include_strings = [item.value for item in include]
ids = results["ids"][0] if results.get("ids") else []
documents_list = results.get("documents")
documents = (
documents_list[0] if documents_list and "documents" in include_strings else []
)
metadatas_list = results.get("metadatas")
metadatas = (
metadatas_list[0] if metadatas_list and "metadatas" in include_strings else []
)
distances_list = results.get("distances")
distances = (
distances_list[0] if distances_list and "distances" in include_strings else []
)
for i, doc_id in enumerate(ids):
if not distances or i >= len(distances):
continue
distance = distances[i]
score = _convert_distance_to_score(
distance=distance, distance_metric=distance_metric
)
if score_threshold and score < score_threshold:
continue
result: SearchResult = {
"id": doc_id,
"content": documents[i] if documents and i < len(documents) else "",
"metadata": dict(metadatas[i]) if metadatas and i < len(metadatas) else {},
"score": score,
}
search_results.append(result)
return search_results
def _process_query_results(
collection: Collection | AsyncCollection,
results: QueryResult,
params: ExtractedSearchParams,
) -> list[SearchResult]:
"""Process ChromaDB query results and convert to SearchResult format.
Args:
collection: The ChromaDB collection (sync or async) that was queried.
results: Raw query results from ChromaDB.
params: The search parameters used for the query.
Returns:
List of SearchResult dicts containing id, content, metadata, and score.
"""
distance_metric = cast(
Literal["l2", "cosine", "ip"],
collection.metadata.get("hnsw:space", "l2") if collection.metadata else "l2",
)
return _convert_chromadb_results_to_search_results(
results=results,
include=params.include,
distance_metric=distance_metric,
score_threshold=params.score_threshold,
)

View File

@@ -23,6 +23,7 @@ from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ToolUsageFinishedEvent
from crewai.process import Process
def test_agent_llm_creation_with_env_vars():
# Store original environment variables
original_api_key = os.environ.get("OPENAI_API_KEY")
@@ -235,7 +236,7 @@ def test_logging_tool_usage():
)
assert agent.llm.model == "gpt-4o-mini"
assert agent.tools_handler.last_used_tool == {}
assert agent.tools_handler.last_used_tool is None
task = Task(
description="What is 3 times 4?",
agent=agent,
@@ -593,42 +594,17 @@ def test_agent_repeated_tool_usage_check_even_with_disabled_cache(capsys):
)
captured = capsys.readouterr()
output = (
captured.out.replace("\n", " ")
.replace(" ", " ")
.strip()
.replace("", "")
.replace("", "")
.replace("", "")
.replace("", "")
.replace("", "")
.replace("", "")
.replace("[", "")
.replace("]", "")
.replace("bold", "")
.replace("blue", "")
.replace("yellow", "")
.replace("green", "")
.replace("red", "")
.replace("dim", "")
.replace("🤖", "")
.replace("🔧", "")
.replace("", "")
.replace("\x1b[93m", "")
.replace("\x1b[00m", "")
.replace("\\", "")
.replace('"', "")
.replace("'", "")
)
# Look for the message in the normalized output, handling the apostrophe difference
expected_message = (
"I tried reusing the same input, I must stop using this action input"
)
# More flexible check, look for either the repeated usage message or verification that max iterations was reached
output_lower = captured.out.lower()
has_repeated_usage_message = "tried reusing the same input" in output_lower
has_max_iterations = "maximum iterations reached" in output_lower
has_final_answer = "final answer" in output_lower or "42" in captured.out
assert (
expected_message in output
), f"Expected message not found in output. Output was: {output}"
has_repeated_usage_message or (has_max_iterations and has_final_answer)
), f"Expected repeated tool usage handling or proper max iteration handling. Output was: {captured.out[:500]}..."
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -783,10 +759,10 @@ def test_agent_without_max_rpm_respects_crew_rpm(capsys):
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
moveon.return_value = True
crew.kickoff()
captured = capsys.readouterr()
assert "get_final_answer" in captured.out
assert "Max RPM reached, waiting for next minute to start." in captured.out
result = crew.kickoff()
# Verify the crew executed and RPM limit was triggered
assert result is not None
assert moveon.called
moveon.assert_called_once()
@@ -1213,17 +1189,13 @@ Thought:<|eot_id|>
def test_task_allow_crewai_trigger_context():
from crewai import Crew
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory"
)
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
task = Task(
description="Analyze the data",
expected_output="Analysis report",
agent=agent,
allow_crewai_trigger_context=True
allow_crewai_trigger_context=True,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff({"crewai_trigger_payload": "Important context data"})
@@ -1238,17 +1210,13 @@ def test_task_allow_crewai_trigger_context():
def test_task_without_allow_crewai_trigger_context():
from crewai import Crew
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory"
)
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
task = Task(
description="Analyze the data",
expected_output="Analysis report",
agent=agent,
allow_crewai_trigger_context=False
allow_crewai_trigger_context=False,
)
crew = Crew(agents=[agent], tasks=[task])
@@ -1265,23 +1233,18 @@ def test_task_without_allow_crewai_trigger_context():
def test_task_allow_crewai_trigger_context_no_payload():
from crewai import Crew
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory"
)
agent = Agent(role="test role", goal="test goal", backstory="test backstory")
task = Task(
description="Analyze the data",
expected_output="Analysis report",
agent=agent,
allow_crewai_trigger_context=True
allow_crewai_trigger_context=True,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff({"other_input": "other data"})
prompt = task.prompt()
assert "Analyze the data" in prompt
@@ -1293,7 +1256,9 @@ def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
from crewai import Crew
agent1 = Agent(role="First Agent", goal="First goal", backstory="First backstory")
agent2 = Agent(role="Second Agent", goal="Second goal", backstory="Second backstory")
agent2 = Agent(
role="Second Agent", goal="Second goal", backstory="Second backstory"
)
first_task = Task(
description="Process initial data",
@@ -1301,12 +1266,11 @@ def test_do_not_allow_crewai_trigger_context_for_first_task_hierarchical():
agent=agent1,
)
crew = Crew(
agents=[agent1, agent2],
tasks=[first_task],
process=Process.hierarchical,
manager_llm="gpt-4o"
manager_llm="gpt-4o",
)
crew.kickoff({"crewai_trigger_payload": "Initial context data"})
@@ -1321,7 +1285,9 @@ def test_first_task_auto_inject_trigger():
from crewai import Crew
agent1 = Agent(role="First Agent", goal="First goal", backstory="First backstory")
agent2 = Agent(role="Second Agent", goal="Second goal", backstory="Second backstory")
agent2 = Agent(
role="Second Agent", goal="Second goal", backstory="Second backstory"
)
first_task = Task(
description="Process initial data",
@@ -1335,10 +1301,7 @@ def test_first_task_auto_inject_trigger():
agent=agent2,
)
crew = Crew(
agents=[agent1, agent2],
tasks=[first_task, second_task]
)
crew = Crew(agents=[agent1, agent2], tasks=[first_task, second_task])
crew.kickoff({"crewai_trigger_payload": "Initial context data"})
first_prompt = first_task.prompt()
@@ -1349,31 +1312,31 @@ def test_first_task_auto_inject_trigger():
assert "Process secondary data" in second_prompt
assert "Trigger Payload:" not in second_prompt
@pytest.mark.vcr(filter_headers=["authorization"])
def test_ensure_first_task_allow_crewai_trigger_context_is_false_does_not_inject():
from crewai import Crew
agent1 = Agent(role="First Agent", goal="First goal", backstory="First backstory")
agent2 = Agent(role="Second Agent", goal="Second goal", backstory="Second backstory")
agent2 = Agent(
role="Second Agent", goal="Second goal", backstory="Second backstory"
)
first_task = Task(
description="Process initial data",
expected_output="Initial analysis",
agent=agent1,
allow_crewai_trigger_context=False
allow_crewai_trigger_context=False,
)
second_task = Task(
description="Process secondary data",
expected_output="Secondary analysis",
agent=agent2,
allow_crewai_trigger_context=True
allow_crewai_trigger_context=True,
)
crew = Crew(
agents=[agent1, agent2],
tasks=[first_task, second_task]
)
crew = Crew(agents=[agent1, agent2], tasks=[first_task, second_task])
crew.kickoff({"crewai_trigger_payload": "Context data"})
first_prompt = first_task.prompt()
@@ -1383,7 +1346,6 @@ def test_ensure_first_task_allow_crewai_trigger_context_is_false_does_not_inject
assert "Trigger Payload: Context data" in second_prompt
@patch("crewai.agent.CrewTrainingHandler")
def test_agent_training_handler(crew_training_handler):
task_prompt = "What is 1 + 1?"
@@ -2347,12 +2309,13 @@ def mock_get_auth_token():
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
from crewai_tools import (
SerperDevTool,
XMLSearchTool,
CSVSearchTool,
EnterpriseActionTool,
)
# Mock embedchain initialization to prevent race conditions in parallel CI execution
with patch("embedchain.client.Client.setup"):
from crewai_tools import (
SerperDevTool,
FileReadTool,
EnterpriseActionTool,
)
mock_get_response = MagicMock()
mock_get_response.status_code = 200
@@ -2368,10 +2331,9 @@ def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
},
{
"module": "crewai_tools",
"name": "XMLSearchTool",
"init_params": {"summarize": "true"},
"name": "FileReadTool",
"init_params": {"file_path": "test.txt"},
},
{"module": "crewai_tools", "name": "CSVSearchTool", "init_params": {}},
# using a tools that returns a list of BaseTools
{
"module": "crewai_tools",
@@ -2396,23 +2358,22 @@ def test_agent_from_repository(mock_get_agent, mock_get_auth_token):
assert agent.role == "test role"
assert agent.goal == "test goal"
assert agent.backstory == "test backstory"
assert len(agent.tools) == 4
assert len(agent.tools) == 3
assert isinstance(agent.tools[0], SerperDevTool)
assert agent.tools[0].n_results == 30
assert isinstance(agent.tools[1], XMLSearchTool)
assert agent.tools[1].summarize
assert isinstance(agent.tools[1], FileReadTool)
assert agent.tools[1].file_path == "test.txt"
assert isinstance(agent.tools[2], CSVSearchTool)
assert not agent.tools[2].summarize
assert isinstance(agent.tools[3], EnterpriseActionTool)
assert agent.tools[3].name == "test_name"
assert isinstance(agent.tools[2], EnterpriseActionTool)
assert agent.tools[2].name == "test_name"
@patch("crewai.cli.plus_api.PlusAPI.get_agent")
def test_agent_from_repository_override_attributes(mock_get_agent, mock_get_auth_token):
from crewai_tools import SerperDevTool
# Mock embedchain initialization to prevent race conditions in parallel CI execution
with patch("embedchain.client.Client.setup"):
from crewai_tools import SerperDevTool
mock_get_response = MagicMock()
mock_get_response.status_code = 200

View File

@@ -7,37 +7,37 @@ from crewai.task import Task
def test_agent_inject_date():
"""Test that the inject_date flag injects the current date into the task.
Tests that when inject_date=True, the current date is added to the task description.
"""
with patch('datetime.datetime') as mock_datetime:
with patch("datetime.datetime") as mock_datetime:
mock_datetime.now.return_value = datetime(2025, 1, 1)
agent = Agent(
role="test_agent",
goal="test_goal",
backstory="test_backstory",
inject_date=True,
)
task = Task(
description="Test task",
expected_output="Test output",
agent=agent,
)
# Store original description
original_description = task.description
agent._inject_date_to_task(task)
assert "Current Date: 2025-01-01" in task.description
assert task.description != original_description
def test_agent_without_inject_date():
"""Test that without inject_date flag, no date is injected.
Tests that when inject_date=False (default), no date is added to the task description.
"""
agent = Agent(
@@ -46,28 +46,28 @@ def test_agent_without_inject_date():
backstory="test_backstory",
# inject_date is False by default
)
task = Task(
description="Test task",
expected_output="Test output",
agent=agent,
)
original_description = task.description
agent._inject_date_to_task(task)
assert task.description == original_description
def test_agent_inject_date_custom_format():
"""Test that the inject_date flag with custom date_format works correctly.
Tests that when inject_date=True with a custom date_format, the date is formatted correctly.
"""
with patch('datetime.datetime') as mock_datetime:
with patch("datetime.datetime") as mock_datetime:
mock_datetime.now.return_value = datetime(2025, 1, 1)
agent = Agent(
role="test_agent",
goal="test_goal",
@@ -75,25 +75,25 @@ def test_agent_inject_date_custom_format():
inject_date=True,
date_format="%d/%m/%Y",
)
task = Task(
description="Test task",
expected_output="Test output",
agent=agent,
)
# Store original description
original_description = task.description
agent._inject_date_to_task(task)
assert "Current Date: 01/01/2025" in task.description
assert task.description != original_description
def test_agent_inject_date_invalid_format():
"""Test error handling with invalid date format.
Tests that when an invalid date_format is provided, the task description remains unchanged.
"""
agent = Agent(
@@ -103,15 +103,15 @@ def test_agent_inject_date_invalid_format():
inject_date=True,
date_format="invalid",
)
task = Task(
description="Test task",
expected_output="Test output",
agent=agent,
)
original_description = task.description
agent._inject_date_to_task(task)
assert task.description == original_description

View File

@@ -15,37 +15,37 @@ def mock_llm_responses():
"ready": "I'll solve this simple math problem.\n\nREADY: I am ready to execute the task.\n\n",
"not_ready": "I need to think about derivatives.\n\nNOT READY: I need to refine my plan because I'm not sure about the derivative rules.",
"ready_after_refine": "I'll use the power rule for derivatives where d/dx(x^n) = n*x^(n-1).\n\nREADY: I am ready to execute the task.",
"execution": "4"
"execution": "4",
}
def test_agent_with_reasoning(mock_llm_responses):
"""Test agent with reasoning."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
backstory="I am a test agent created to verify the reasoning feature works correctly.",
llm=llm,
reasoning=True,
verbose=True
verbose=True,
)
task = Task(
description="Simple math task: What's 2+2?",
expected_output="The answer should be a number.",
agent=agent
agent=agent,
)
agent.llm.call = lambda messages, *args, **kwargs: (
mock_llm_responses["ready"]
if any("create a detailed plan" in msg.get("content", "") for msg in messages)
else mock_llm_responses["execution"]
)
result = agent.execute_task(task)
assert result == mock_llm_responses["execution"]
assert "Reasoning Plan:" in task.description
@@ -53,7 +53,7 @@ def test_agent_with_reasoning(mock_llm_responses):
def test_agent_with_reasoning_not_ready_initially(mock_llm_responses):
"""Test agent with reasoning that requires refinement."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
@@ -61,19 +61,21 @@ def test_agent_with_reasoning_not_ready_initially(mock_llm_responses):
llm=llm,
reasoning=True,
max_reasoning_attempts=2,
verbose=True
verbose=True,
)
task = Task(
description="Complex math task: What's the derivative of x²?",
expected_output="The answer should be a mathematical expression.",
agent=agent
agent=agent,
)
call_count = [0]
def mock_llm_call(messages, *args, **kwargs):
if any("create a detailed plan" in msg.get("content", "") for msg in messages) or any("refine your plan" in msg.get("content", "") for msg in messages):
if any(
"create a detailed plan" in msg.get("content", "") for msg in messages
) or any("refine your plan" in msg.get("content", "") for msg in messages):
call_count[0] += 1
if call_count[0] == 1:
return mock_llm_responses["not_ready"]
@@ -81,11 +83,11 @@ def test_agent_with_reasoning_not_ready_initially(mock_llm_responses):
return mock_llm_responses["ready_after_refine"]
else:
return "2x"
agent.llm.call = mock_llm_call
result = agent.execute_task(task)
assert result == "2x"
assert call_count[0] == 2 # Should have made 2 reasoning calls
assert "Reasoning Plan:" in task.description
@@ -94,7 +96,7 @@ def test_agent_with_reasoning_not_ready_initially(mock_llm_responses):
def test_agent_with_reasoning_max_attempts_reached():
"""Test agent with reasoning that reaches max attempts without being ready."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
@@ -102,52 +104,53 @@ def test_agent_with_reasoning_max_attempts_reached():
llm=llm,
reasoning=True,
max_reasoning_attempts=2,
verbose=True
verbose=True,
)
task = Task(
description="Complex math task: Solve the Riemann hypothesis.",
expected_output="A proof or disproof of the hypothesis.",
agent=agent
agent=agent,
)
call_count = [0]
def mock_llm_call(messages, *args, **kwargs):
if any("create a detailed plan" in msg.get("content", "") for msg in messages) or any("refine your plan" in msg.get("content", "") for msg in messages):
if any(
"create a detailed plan" in msg.get("content", "") for msg in messages
) or any("refine your plan" in msg.get("content", "") for msg in messages):
call_count[0] += 1
return f"Attempt {call_count[0]}: I need more time to think.\n\nNOT READY: I need to refine my plan further."
else:
return "This is an unsolved problem in mathematics."
agent.llm.call = mock_llm_call
result = agent.execute_task(task)
assert result == "This is an unsolved problem in mathematics."
assert call_count[0] == 2 # Should have made exactly 2 reasoning calls (max_attempts)
assert (
call_count[0] == 2
) # Should have made exactly 2 reasoning calls (max_attempts)
assert "Reasoning Plan:" in task.description
def test_agent_reasoning_input_validation():
"""Test input validation in AgentReasoning."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
backstory="I am a test agent created to verify the reasoning feature works correctly.",
llm=llm,
reasoning=True
reasoning=True,
)
with pytest.raises(ValueError, match="Both task and agent must be provided"):
AgentReasoning(task=None, agent=agent)
task = Task(
description="Simple task",
expected_output="Simple output"
)
task = Task(description="Simple task", expected_output="Simple output")
with pytest.raises(ValueError, match="Both task and agent must be provided"):
AgentReasoning(task=task, agent=None)
@@ -155,33 +158,33 @@ def test_agent_reasoning_input_validation():
def test_agent_reasoning_error_handling():
"""Test error handling during the reasoning process."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
backstory="I am a test agent created to verify the reasoning feature works correctly.",
llm=llm,
reasoning=True
reasoning=True,
)
task = Task(
description="Task that will cause an error",
expected_output="Output that will never be generated",
agent=agent
agent=agent,
)
call_count = [0]
def mock_llm_call_error(*args, **kwargs):
call_count[0] += 1
if call_count[0] <= 2: # First calls are for reasoning
raise Exception("LLM error during reasoning")
return "Fallback execution result" # Return a value for task execution
agent.llm.call = mock_llm_call_error
result = agent.execute_task(task)
assert result == "Fallback execution result"
assert call_count[0] > 2 # Ensure we called the mock multiple times
@@ -189,37 +192,36 @@ def test_agent_reasoning_error_handling():
def test_agent_with_function_calling():
"""Test agent with reasoning using function calling."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
backstory="I am a test agent created to verify the reasoning feature works correctly.",
llm=llm,
reasoning=True,
verbose=True
verbose=True,
)
task = Task(
description="Simple math task: What's 2+2?",
expected_output="The answer should be a number.",
agent=agent
agent=agent,
)
agent.llm.supports_function_calling = lambda: True
def mock_function_call(messages, *args, **kwargs):
if "tools" in kwargs:
return json.dumps({
"plan": "I'll solve this simple math problem: 2+2=4.",
"ready": True
})
return json.dumps(
{"plan": "I'll solve this simple math problem: 2+2=4.", "ready": True}
)
else:
return "4"
agent.llm.call = mock_function_call
result = agent.execute_task(task)
assert result == "4"
assert "Reasoning Plan:" in task.description
assert "I'll solve this simple math problem: 2+2=4." in task.description
@@ -228,34 +230,34 @@ def test_agent_with_function_calling():
def test_agent_with_function_calling_fallback():
"""Test agent with reasoning using function calling that falls back to text parsing."""
llm = LLM("gpt-3.5-turbo")
agent = Agent(
role="Test Agent",
goal="To test the reasoning feature",
backstory="I am a test agent created to verify the reasoning feature works correctly.",
llm=llm,
reasoning=True,
verbose=True
verbose=True,
)
task = Task(
description="Simple math task: What's 2+2?",
expected_output="The answer should be a number.",
agent=agent
agent=agent,
)
agent.llm.supports_function_calling = lambda: True
def mock_function_call(messages, *args, **kwargs):
if "tools" in kwargs:
return "Invalid JSON that will trigger fallback. READY: I am ready to execute the task."
else:
return "4"
agent.llm.call = mock_function_call
result = agent.execute_task(task)
assert result == "4"
assert "Reasoning Plan:" in task.description
assert "Invalid JSON that will trigger fallback" in task.description

View File

@@ -318,11 +318,17 @@ def test_sets_parent_flow_when_inside_flow():
flow.kickoff()
assert captured_agent.parent_flow is flow
@pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_is_called_using_string():
guardrail_events = defaultdict(list)
from crewai.utilities.events import LLMGuardrailCompletedEvent, LLMGuardrailStartedEvent
from crewai.utilities.events import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMGuardrailStartedEvent)
def capture_guardrail_started(source, event):
guardrail_events["started"].append(event)
@@ -340,17 +346,26 @@ def test_guardrail_is_called_using_string():
result = agent.kickoff(messages="Top 10 best players in the world?")
assert len(guardrail_events['started']) == 2
assert len(guardrail_events['completed']) == 2
assert not guardrail_events['completed'][0].success
assert guardrail_events['completed'][1].success
assert "Here are the top 10 best soccer players in the world, focusing exclusively on Brazilian players" in result.raw
assert len(guardrail_events["started"]) == 2
assert len(guardrail_events["completed"]) == 2
assert not guardrail_events["completed"][0].success
assert guardrail_events["completed"][1].success
assert (
"Here are the top 10 best soccer players in the world, focusing exclusively on Brazilian players"
in result.raw
)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_is_called_using_callable():
guardrail_events = defaultdict(list)
from crewai.utilities.events import LLMGuardrailCompletedEvent, LLMGuardrailStartedEvent
from crewai.utilities.events import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMGuardrailStartedEvent)
def capture_guardrail_started(source, event):
guardrail_events["started"].append(event)
@@ -368,16 +383,22 @@ def test_guardrail_is_called_using_callable():
result = agent.kickoff(messages="Top 1 best players in the world?")
assert len(guardrail_events['started']) == 1
assert len(guardrail_events['completed']) == 1
assert guardrail_events['completed'][0].success
assert len(guardrail_events["started"]) == 1
assert len(guardrail_events["completed"]) == 1
assert guardrail_events["completed"][0].success
assert "Pelé - Santos, 1958" in result.raw
@pytest.mark.vcr(filter_headers=["authorization"])
def test_guardrail_reached_attempt_limit():
guardrail_events = defaultdict(list)
from crewai.utilities.events import LLMGuardrailCompletedEvent, LLMGuardrailStartedEvent
from crewai.utilities.events import (
LLMGuardrailCompletedEvent,
LLMGuardrailStartedEvent,
)
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(LLMGuardrailStartedEvent)
def capture_guardrail_started(source, event):
guardrail_events["started"].append(event)
@@ -390,18 +411,23 @@ def test_guardrail_reached_attempt_limit():
role="Sports Analyst",
goal="Gather information about the best soccer players",
backstory="""You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.""",
guardrail=lambda output: (False, "You are not allowed to include Brazilian players"),
guardrail=lambda output: (
False,
"You are not allowed to include Brazilian players",
),
guardrail_max_retries=2,
)
with pytest.raises(Exception, match="Agent's guardrail failed validation after 2 retries"):
with pytest.raises(
Exception, match="Agent's guardrail failed validation after 2 retries"
):
agent.kickoff(messages="Top 10 best players in the world?")
assert len(guardrail_events['started']) == 3 # 2 retries + 1 initial call
assert len(guardrail_events['completed']) == 3 # 2 retries + 1 initial call
assert not guardrail_events['completed'][0].success
assert not guardrail_events['completed'][1].success
assert not guardrail_events['completed'][2].success
assert len(guardrail_events["started"]) == 3 # 2 retries + 1 initial call
assert len(guardrail_events["completed"]) == 3 # 2 retries + 1 initial call
assert not guardrail_events["completed"][0].success
assert not guardrail_events["completed"][1].success
assert not guardrail_events["completed"][2].success
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -414,22 +440,35 @@ def test_agent_output_when_guardrail_returns_base_model():
role="Sports Analyst",
goal="Gather information about the best soccer players",
backstory="""You are an expert at gathering and organizing information. You carefully collect details and present them in a structured way.""",
guardrail=lambda output: (True, Player(name="Lionel Messi", country="Argentina")),
guardrail=lambda output: (
True,
Player(name="Lionel Messi", country="Argentina"),
),
)
result = agent.kickoff(messages="Top 10 best players in the world?")
assert result.pydantic == Player(name="Lionel Messi", country="Argentina")
def test_lite_agent_with_custom_llm_and_guardrails():
"""Test that CustomLLM (inheriting from BaseLLM) works with guardrails."""
class CustomLLM(BaseLLM):
def __init__(self, response: str = "Custom response"):
super().__init__(model="custom-model")
self.response = response
self.call_count = 0
def call(self, messages, tools=None, callbacks=None, available_functions=None, from_task=None, from_agent=None) -> str:
def call(
self,
messages,
tools=None,
callbacks=None,
available_functions=None,
from_task=None,
from_agent=None,
) -> str:
self.call_count += 1
if "valid" in str(messages) and "feedback" in str(messages):
@@ -456,7 +495,7 @@ def test_lite_agent_with_custom_llm_and_guardrails():
goal="Analyze soccer players",
backstory="You analyze soccer players and their performance.",
llm=custom_llm,
guardrail="Only include Brazilian players"
guardrail="Only include Brazilian players",
)
result = agent.kickoff("Tell me about the best soccer players")
@@ -474,7 +513,7 @@ def test_lite_agent_with_custom_llm_and_guardrails():
goal="Test goal",
backstory="Test backstory",
llm=custom_llm2,
guardrail=test_guardrail
guardrail=test_guardrail,
)
result2 = agent2.kickoff("Test message")
@@ -484,12 +523,12 @@ def test_lite_agent_with_custom_llm_and_guardrails():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_lite_agent_with_invalid_llm():
"""Test that LiteAgent raises proper error when create_llm returns None."""
with patch('crewai.lite_agent.create_llm', return_value=None):
with patch("crewai.lite_agent.create_llm", return_value=None):
with pytest.raises(ValueError) as exc_info:
LiteAgent(
role="Test Agent",
goal="Test goal",
goal="Test goal",
backstory="Test backstory",
llm="invalid-model"
llm="invalid-model",
)
assert "Expected LLM instance of type BaseLLM" in str(exc_info.value)
assert "Expected LLM instance of type BaseLLM" in str(exc_info.value)

View File

@@ -108,7 +108,9 @@ class TestValidateToken(unittest.TestCase):
class TestTokenManager(unittest.TestCase):
def setUp(self):
@patch("crewai.cli.authentication.utils.TokenManager._get_or_create_key")
def setUp(self, mock_get_key):
mock_get_key.return_value = Fernet.generate_key()
self.token_manager = TokenManager()
@patch("crewai.cli.authentication.utils.TokenManager.read_secure_file")

View File

@@ -1,5 +1,3 @@
import pytest
from crewai.cli.constants import ENV_VARS, MODELS, PROVIDERS
@@ -21,3 +19,44 @@ def test_huggingface_models():
"""Test that Huggingface models are properly configured."""
assert "huggingface" in MODELS
assert len(MODELS["huggingface"]) > 0
def test_openai_models_include_latest():
"""Test that OpenAI models include the latest GPT-5 series."""
openai_models = MODELS["openai"]
assert "gpt-5" in openai_models
assert "gpt-5-mini" in openai_models
assert "gpt-5-nano" in openai_models
assert "gpt-4.1" in openai_models
assert "o3-mini" in openai_models
def test_anthropic_models_include_latest():
"""Test that Anthropic models include the latest Claude 4 series."""
anthropic_models = MODELS["anthropic"]
assert "claude-3.7-sonnet-20250219" in anthropic_models
assert "claude-4-sonnet-20250301" in anthropic_models
assert "claude-4.1-opus-20250315" in anthropic_models
def test_gemini_models_include_latest():
"""Test that Gemini models include the latest 2.5 series."""
gemini_models = MODELS["gemini"]
assert "gemini/gemini-2.5-pro" in gemini_models
assert "gemini/gemini-2.5-flash" in gemini_models
assert "gemini/gemini-2.5-flash-lite" in gemini_models
def test_all_providers_have_models():
"""Test that all providers in PROVIDERS have corresponding models."""
for provider in PROVIDERS:
if provider in MODELS:
assert len(MODELS[provider]) > 0, f"Provider {provider} has no models"
def test_model_format_consistency():
"""Test that model names follow consistent formatting patterns."""
for provider, models in MODELS.items():
for model in models:
assert isinstance(model, str), f"Model {model} in {provider} is not a string"
assert len(model.strip()) > 0, f"Empty model name in {provider}"

View File

@@ -4,6 +4,7 @@ import unittest
import unittest.mock
from datetime import datetime, timedelta
from contextlib import contextmanager
from pathlib import Path
from unittest import mock
from unittest.mock import MagicMock, patch
@@ -27,12 +28,18 @@ def in_temp_dir():
@pytest.fixture
def tool_command():
TokenManager().save_tokens(
"test-token", (datetime.now() + timedelta(seconds=36000)).timestamp()
)
tool_command = ToolCommand()
with patch.object(tool_command, "login"):
yield tool_command
# Create a temporary directory for each test to avoid token storage conflicts
with tempfile.TemporaryDirectory() as temp_dir:
# Mock the secure storage path to use the temp directory
with patch.object(
TokenManager, "get_secure_storage_path", return_value=Path(temp_dir)
):
TokenManager().save_tokens(
"test-token", (datetime.now() + timedelta(seconds=36000)).timestamp()
)
tool_command = ToolCommand()
with patch.object(tool_command, "login"):
yield tool_command
@patch("crewai.cli.tools.main.subprocess.run")

View File

View File

@@ -3,6 +3,7 @@ from unittest.mock import MagicMock
from crewai.agent import Agent
from crewai.task import Task
class BaseEvaluationMetricsTest:
@pytest.fixture
def mock_agent(self):
@@ -24,5 +25,5 @@ class BaseEvaluationMetricsTest:
def execution_trace(self):
return {
"thinking": ["I need to analyze this data carefully"],
"actions": ["Gathered information", "Analyzed data"]
}
"actions": ["Gathered information", "Analyzed data"],
}

View File

@@ -1,5 +1,7 @@
from unittest.mock import patch, MagicMock
from tests.experimental.evaluation.metrics.base_evaluation_metrics_test import BaseEvaluationMetricsTest
from tests.experimental.evaluation.metrics.test_base_evaluation_metrics import (
BaseEvaluationMetricsTest,
)
from crewai.experimental.evaluation.base_evaluator import EvaluationScore
from crewai.experimental.evaluation.metrics.goal_metrics import GoalAlignmentEvaluator
@@ -8,7 +10,9 @@ from crewai.utilities.llm_utils import LLM
class TestGoalAlignmentEvaluator(BaseEvaluationMetricsTest):
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_success(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_success(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = """
{
@@ -24,7 +28,7 @@ class TestGoalAlignmentEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="This is the final output"
final_output="This is the final output",
)
assert isinstance(result, EvaluationScore)
@@ -40,7 +44,9 @@ class TestGoalAlignmentEvaluator(BaseEvaluationMetricsTest):
assert mock_task.description in prompt[1]["content"]
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_error_handling(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_error_handling(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = "Invalid JSON response"
mock_create_llm.return_value = mock_llm
@@ -51,7 +57,7 @@ class TestGoalAlignmentEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="This is the final output"
final_output="This is the final output",
)
assert isinstance(result, EvaluationScore)

View File

@@ -6,10 +6,13 @@ from crewai.tasks.task_output import TaskOutput
from crewai.experimental.evaluation.metrics.reasoning_metrics import (
ReasoningEfficiencyEvaluator,
)
from tests.experimental.evaluation.metrics.base_evaluation_metrics_test import BaseEvaluationMetricsTest
from tests.experimental.evaluation.metrics.test_base_evaluation_metrics import (
BaseEvaluationMetricsTest,
)
from crewai.utilities.llm_utils import LLM
from crewai.experimental.evaluation.base_evaluator import EvaluationScore
class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
@pytest.fixture
def mock_output(self):
@@ -23,18 +26,18 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
{
"prompt": "How should I approach this task?",
"response": "I'll first research the topic, then compile findings.",
"timestamp": 1626987654
"timestamp": 1626987654,
},
{
"prompt": "What resources should I use?",
"response": "I'll use relevant academic papers and reliable websites.",
"timestamp": 1626987754
"timestamp": 1626987754,
},
{
"prompt": "How should I structure the output?",
"response": "I'll organize information clearly with headings and bullet points.",
"timestamp": 1626987854
}
"timestamp": 1626987854,
},
]
def test_insufficient_llm_calls(self, mock_agent, mock_task, mock_output):
@@ -45,7 +48,7 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=mock_output
final_output=mock_output,
)
assert isinstance(result, EvaluationScore)
@@ -53,7 +56,9 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
assert "Insufficient LLM calls" in result.feedback
@patch("crewai.utilities.llm_utils.create_llm")
def test_successful_evaluation(self, mock_create_llm, mock_agent, mock_task, mock_output, llm_calls):
def test_successful_evaluation(
self, mock_create_llm, mock_agent, mock_task, mock_output, llm_calls
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = """
{
@@ -83,7 +88,7 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=mock_output
final_output=mock_output,
)
# Assertions
@@ -97,7 +102,9 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
mock_llm.call.assert_called_once()
@patch("crewai.utilities.llm_utils.create_llm")
def test_parse_error_handling(self, mock_create_llm, mock_agent, mock_task, mock_output, llm_calls):
def test_parse_error_handling(
self, mock_create_llm, mock_agent, mock_task, mock_output, llm_calls
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = "Invalid JSON response"
mock_create_llm.return_value = mock_llm
@@ -114,7 +121,7 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=mock_output
final_output=mock_output,
)
# Assertions for error handling
@@ -126,11 +133,31 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
def test_loop_detection(self, mock_create_llm, mock_agent, mock_task, mock_output):
# Setup LLM calls with a repeating pattern
repetitive_llm_calls = [
{"prompt": "How to solve?", "response": "I'll try method A", "timestamp": 1000},
{"prompt": "Let me try method A", "response": "It didn't work", "timestamp": 1100},
{"prompt": "How to solve?", "response": "I'll try method A again", "timestamp": 1200},
{"prompt": "Let me try method A", "response": "It didn't work", "timestamp": 1300},
{"prompt": "How to solve?", "response": "I'll try method A one more time", "timestamp": 1400}
{
"prompt": "How to solve?",
"response": "I'll try method A",
"timestamp": 1000,
},
{
"prompt": "Let me try method A",
"response": "It didn't work",
"timestamp": 1100,
},
{
"prompt": "How to solve?",
"response": "I'll try method A again",
"timestamp": 1200,
},
{
"prompt": "Let me try method A",
"response": "It didn't work",
"timestamp": 1300,
},
{
"prompt": "How to solve?",
"response": "I'll try method A one more time",
"timestamp": 1400,
},
]
mock_llm = MagicMock(spec=LLM)
@@ -158,7 +185,7 @@ class TestReasoningEfficiencyEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=mock_output
final_output=mock_output,
)
assert isinstance(result, EvaluationScore)

View File

@@ -1,13 +1,20 @@
from unittest.mock import patch, MagicMock
from crewai.experimental.evaluation.base_evaluator import EvaluationScore
from crewai.experimental.evaluation.metrics.semantic_quality_metrics import SemanticQualityEvaluator
from tests.experimental.evaluation.metrics.base_evaluation_metrics_test import BaseEvaluationMetricsTest
from crewai.experimental.evaluation.metrics.semantic_quality_metrics import (
SemanticQualityEvaluator,
)
from tests.experimental.evaluation.metrics.test_base_evaluation_metrics import (
BaseEvaluationMetricsTest,
)
from crewai.utilities.llm_utils import LLM
class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_success(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_success(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = """
{
@@ -23,7 +30,7 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="This is a well-structured analysis of the data."
final_output="This is a well-structured analysis of the data.",
)
assert isinstance(result, EvaluationScore)
@@ -39,7 +46,9 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
assert mock_task.description in prompt[1]["content"]
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_with_empty_output(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_with_empty_output(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = """
{
@@ -55,7 +64,7 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output=""
final_output="",
)
assert isinstance(result, EvaluationScore)
@@ -63,7 +72,9 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
assert "empty or minimal" in result.feedback
@patch("crewai.utilities.llm_utils.create_llm")
def test_evaluate_error_handling(self, mock_create_llm, mock_agent, mock_task, execution_trace):
def test_evaluate_error_handling(
self, mock_create_llm, mock_agent, mock_task, execution_trace
):
mock_llm = MagicMock(spec=LLM)
mock_llm.call.return_value = "Invalid JSON response"
mock_create_llm.return_value = mock_llm
@@ -74,9 +85,9 @@ class TestSemanticQualityEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="This is the output."
final_output="This is the output.",
)
assert isinstance(result, EvaluationScore)
assert result.score is None
assert "Failed to parse" in result.feedback
assert "Failed to parse" in result.feedback

View File

@@ -3,10 +3,13 @@ from unittest.mock import patch, MagicMock
from crewai.experimental.evaluation.metrics.tools_metrics import (
ToolSelectionEvaluator,
ParameterExtractionEvaluator,
ToolInvocationEvaluator
ToolInvocationEvaluator,
)
from crewai.utilities.llm_utils import LLM
from tests.experimental.evaluation.metrics.base_evaluation_metrics_test import BaseEvaluationMetricsTest
from tests.experimental.evaluation.metrics.test_base_evaluation_metrics import (
BaseEvaluationMetricsTest,
)
class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
def test_no_tools_available(self, mock_task, mock_agent):
@@ -20,7 +23,7 @@ class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score is None
@@ -35,7 +38,7 @@ class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score is None
@@ -56,8 +59,12 @@ class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
# Setup execution trace with tool uses
execution_trace = {
"tool_uses": [
{"tool": "search_tool", "input": {"query": "test query"}, "output": "search results"},
{"tool": "calculator", "input": {"expression": "2+2"}, "output": "4"}
{
"tool": "search_tool",
"input": {"query": "test query"},
"output": "search results",
},
{"tool": "calculator", "input": {"expression": "2+2"}, "output": "4"},
]
}
@@ -66,7 +73,7 @@ class TestToolSelectionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score == 8.5
@@ -90,7 +97,7 @@ class TestParameterExtractionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score is None
@@ -117,14 +124,14 @@ class TestParameterExtractionEvaluator(BaseEvaluationMetricsTest):
"tool": "search_tool",
"input": {"query": "test query"},
"output": "search results",
"error": None
"error": None,
},
{
"tool": "calculator",
"input": {"expression": "2+2"},
"output": "4",
"error": None
}
"error": None,
},
]
}
@@ -133,7 +140,7 @@ class TestParameterExtractionEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score == 9.0
@@ -149,7 +156,7 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score is None
@@ -171,8 +178,12 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
# Setup execution trace with tool uses
execution_trace = {
"tool_uses": [
{"tool": "search_tool", "input": {"query": "test query"}, "output": "search results"},
{"tool": "calculator", "input": {"expression": "2+2"}, "output": "4"}
{
"tool": "search_tool",
"input": {"query": "test query"},
"output": "search results",
},
{"tool": "calculator", "input": {"expression": "2+2"}, "output": "4"},
]
}
@@ -181,7 +192,7 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score == 8.0
@@ -207,14 +218,14 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
"tool": "search_tool",
"input": {"query": "test query"},
"output": "search results",
"error": None
"error": None,
},
{
"tool": "calculator",
"input": {"expression": "2+"},
"output": None,
"error": "Invalid expression"
}
"error": "Invalid expression",
},
]
}
@@ -223,7 +234,7 @@ class TestToolInvocationEvaluator(BaseEvaluationMetricsTest):
agent=mock_agent,
task=mock_task,
execution_trace=execution_trace,
final_output="Final output"
final_output="Final output",
)
assert result.score == 5.5

View File

@@ -17,6 +17,7 @@ from crewai.memory.external.external_memory_item import ExternalMemoryItem
from crewai.memory.storage.interface import Storage
from crewai.task import Task
@pytest.fixture
def mock_mem0_memory():
mock_memory = MagicMock(spec=Memory)
@@ -212,6 +213,7 @@ def custom_storage():
custom_storage = CustomStorage()
return custom_storage
def test_external_memory_custom_storage(custom_storage, crew_with_external_memory):
external_memory = ExternalMemory(storage=custom_storage)
@@ -233,12 +235,14 @@ def test_external_memory_custom_storage(custom_storage, crew_with_external_memor
assert len(results) == 0
def test_external_memory_search_events(custom_storage, external_memory_with_mocked_config):
def test_external_memory_search_events(
custom_storage, external_memory_with_mocked_config
):
events = defaultdict(list)
external_memory_with_mocked_config.storage = custom_storage
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MemoryQueryStartedEvent)
def on_search_started(source, event):
events["MemoryQueryStartedEvent"].append(event)
@@ -258,37 +262,39 @@ def test_external_memory_search_events(custom_storage, external_memory_with_mock
assert len(events["MemoryQueryFailedEvent"]) == 0
assert dict(events["MemoryQueryStartedEvent"][0]) == {
'timestamp': ANY,
'type': 'memory_query_started',
'source_fingerprint': None,
'source_type': 'external_memory',
'fingerprint_metadata': None,
'query': 'test value',
'limit': 3,
'score_threshold': 0.35
"timestamp": ANY,
"type": "memory_query_started",
"source_fingerprint": None,
"source_type": "external_memory",
"fingerprint_metadata": None,
"query": "test value",
"limit": 3,
"score_threshold": 0.35,
}
assert dict(events["MemoryQueryCompletedEvent"][0]) == {
'timestamp': ANY,
'type': 'memory_query_completed',
'source_fingerprint': None,
'source_type': 'external_memory',
'fingerprint_metadata': None,
'query': 'test value',
'results': [],
'limit': 3,
'score_threshold': 0.35,
'query_time_ms': ANY
"timestamp": ANY,
"type": "memory_query_completed",
"source_fingerprint": None,
"source_type": "external_memory",
"fingerprint_metadata": None,
"query": "test value",
"results": [],
"limit": 3,
"score_threshold": 0.35,
"query_time_ms": ANY,
}
def test_external_memory_save_events(custom_storage, external_memory_with_mocked_config):
def test_external_memory_save_events(
custom_storage, external_memory_with_mocked_config
):
events = defaultdict(list)
external_memory_with_mocked_config.storage = custom_storage
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MemorySaveStartedEvent)
def on_save_started(source, event):
events["MemorySaveStartedEvent"].append(event)
@@ -308,24 +314,24 @@ def test_external_memory_save_events(custom_storage, external_memory_with_mocked
assert len(events["MemorySaveFailedEvent"]) == 0
assert dict(events["MemorySaveStartedEvent"][0]) == {
'timestamp': ANY,
'type': 'memory_save_started',
'source_fingerprint': None,
'source_type': 'external_memory',
'fingerprint_metadata': None,
'value': 'saving value',
'metadata': {'task': 'test_task'},
'agent_role': "test_agent"
"timestamp": ANY,
"type": "memory_save_started",
"source_fingerprint": None,
"source_type": "external_memory",
"fingerprint_metadata": None,
"value": "saving value",
"metadata": {"task": "test_task"},
"agent_role": "test_agent",
}
assert dict(events["MemorySaveCompletedEvent"][0]) == {
'timestamp': ANY,
'type': 'memory_save_completed',
'source_fingerprint': None,
'source_type': 'external_memory',
'fingerprint_metadata': None,
'value': 'saving value',
'metadata': {'task': 'test_task', 'agent': 'test_agent'},
'agent_role': "test_agent",
'save_time_ms': ANY
"timestamp": ANY,
"type": "memory_save_completed",
"source_fingerprint": None,
"source_type": "external_memory",
"fingerprint_metadata": None,
"value": "saving value",
"metadata": {"task": "test_task", "agent": "test_agent"},
"agent_role": "test_agent",
"save_time_ms": ANY,
}

View File

@@ -11,6 +11,7 @@ from crewai.utilities.events.memory_events import (
MemoryQueryCompletedEvent,
)
@pytest.fixture
def long_term_memory():
"""Fixture to create a LongTermMemory instance"""
@@ -21,6 +22,7 @@ def test_long_term_memory_save_events(long_term_memory):
events = defaultdict(list)
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MemorySaveStartedEvent)
def on_save_started(source, event):
events["MemorySaveStartedEvent"].append(event)
@@ -60,7 +62,12 @@ def test_long_term_memory_save_events(long_term_memory):
"source_type": "long_term_memory",
"fingerprint_metadata": None,
"value": "test_task",
"metadata": {"task": "test_task", "quality": 0.5, "agent": "test_agent", "expected_output": "test_output"},
"metadata": {
"task": "test_task",
"quality": 0.5,
"agent": "test_agent",
"expected_output": "test_output",
},
"agent_role": "test_agent",
"save_time_ms": ANY,
}
@@ -70,6 +77,7 @@ def test_long_term_memory_search_events(long_term_memory):
events = defaultdict(list)
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MemoryQueryStartedEvent)
def on_search_started(source, event):
events["MemoryQueryStartedEvent"].append(event)
@@ -80,37 +88,34 @@ def test_long_term_memory_search_events(long_term_memory):
test_query = "test query"
long_term_memory.search(
test_query,
latest_n=5
)
long_term_memory.search(test_query, latest_n=5)
assert len(events["MemoryQueryStartedEvent"]) == 1
assert len(events["MemoryQueryCompletedEvent"]) == 1
assert len(events["MemoryQueryFailedEvent"]) == 0
assert dict(events["MemoryQueryStartedEvent"][0]) == {
'timestamp': ANY,
'type': 'memory_query_started',
'source_fingerprint': None,
'source_type': 'long_term_memory',
'fingerprint_metadata': None,
'query': 'test query',
'limit': 5,
'score_threshold': None
"timestamp": ANY,
"type": "memory_query_started",
"source_fingerprint": None,
"source_type": "long_term_memory",
"fingerprint_metadata": None,
"query": "test query",
"limit": 5,
"score_threshold": None,
}
assert dict(events["MemoryQueryCompletedEvent"][0]) == {
'timestamp': ANY,
'type': 'memory_query_completed',
'source_fingerprint': None,
'source_type': 'long_term_memory',
'fingerprint_metadata': None,
'query': 'test query',
'results': None,
'limit': 5,
'score_threshold': None,
'query_time_ms': ANY
"timestamp": ANY,
"type": "memory_query_completed",
"source_fingerprint": None,
"source_type": "long_term_memory",
"fingerprint_metadata": None,
"query": "test query",
"results": None,
"limit": 5,
"score_threshold": None,
"query_time_ms": ANY,
}

0
tests/rag/__init__.py Normal file
View File

View File

View File

@@ -0,0 +1,550 @@
"""Tests for ChromaDBClient implementation."""
from unittest.mock import AsyncMock, Mock
import pytest
from crewai.rag.chromadb.client import ChromaDBClient
from crewai.rag.types import BaseRecord
@pytest.fixture
def mock_chromadb_client():
"""Create a mock ChromaDB client."""
from chromadb.api import ClientAPI
return Mock(spec=ClientAPI)
@pytest.fixture
def mock_async_chromadb_client():
"""Create a mock async ChromaDB client."""
from chromadb.api import AsyncClientAPI
return Mock(spec=AsyncClientAPI)
@pytest.fixture
def client(mock_chromadb_client) -> ChromaDBClient:
"""Create a ChromaDBClient instance for testing."""
client = ChromaDBClient()
client.client = mock_chromadb_client
client.embedding_function = Mock()
return client
@pytest.fixture
def async_client(mock_async_chromadb_client) -> ChromaDBClient:
"""Create a ChromaDBClient instance with async client for testing."""
client = ChromaDBClient()
client.client = mock_async_chromadb_client
client.embedding_function = Mock()
return client
class TestChromaDBClient:
"""Test suite for ChromaDBClient."""
def test_create_collection(self, client, mock_chromadb_client):
"""Test that create_collection calls the underlying client correctly."""
client.create_collection(collection_name="test_collection")
mock_chromadb_client.create_collection.assert_called_once_with(
name="test_collection",
configuration=None,
metadata={"hnsw:space": "cosine"},
embedding_function=client.embedding_function,
data_loader=None,
get_or_create=False,
)
def test_create_collection_with_all_params(self, client, mock_chromadb_client):
"""Test create_collection with all optional parameters."""
mock_config = Mock()
mock_metadata = {"key": "value"}
mock_embedding_func = Mock()
mock_data_loader = Mock()
client.create_collection(
collection_name="test_collection",
configuration=mock_config,
metadata=mock_metadata,
embedding_function=mock_embedding_func,
data_loader=mock_data_loader,
get_or_create=True,
)
mock_chromadb_client.create_collection.assert_called_once_with(
name="test_collection",
configuration=mock_config,
metadata=mock_metadata,
embedding_function=mock_embedding_func,
data_loader=mock_data_loader,
get_or_create=True,
)
@pytest.mark.asyncio
async def test_acreate_collection(
self, async_client, mock_async_chromadb_client
) -> None:
"""Test that acreate_collection calls the underlying client correctly."""
# Make the mock's create_collection an AsyncMock
mock_async_chromadb_client.create_collection = AsyncMock(return_value=None)
await async_client.acreate_collection(collection_name="test_collection")
mock_async_chromadb_client.create_collection.assert_called_once_with(
name="test_collection",
configuration=None,
metadata={"hnsw:space": "cosine"},
embedding_function=async_client.embedding_function,
data_loader=None,
get_or_create=False,
)
@pytest.mark.asyncio
async def test_acreate_collection_with_all_params(
self, async_client, mock_async_chromadb_client
) -> None:
"""Test acreate_collection with all optional parameters."""
# Make the mock's create_collection an AsyncMock
mock_async_chromadb_client.create_collection = AsyncMock(return_value=None)
mock_config = Mock()
mock_metadata = {"key": "value"}
mock_embedding_func = Mock()
mock_data_loader = Mock()
await async_client.acreate_collection(
collection_name="test_collection",
configuration=mock_config,
metadata=mock_metadata,
embedding_function=mock_embedding_func,
data_loader=mock_data_loader,
get_or_create=True,
)
mock_async_chromadb_client.create_collection.assert_called_once_with(
name="test_collection",
configuration=mock_config,
metadata=mock_metadata,
embedding_function=mock_embedding_func,
data_loader=mock_data_loader,
get_or_create=True,
)
def test_get_or_create_collection(self, client, mock_chromadb_client):
"""Test that get_or_create_collection calls the underlying client correctly."""
mock_collection = Mock()
mock_chromadb_client.get_or_create_collection.return_value = mock_collection
result = client.get_or_create_collection(collection_name="test_collection")
mock_chromadb_client.get_or_create_collection.assert_called_once_with(
name="test_collection",
configuration=None,
metadata={"hnsw:space": "cosine"},
embedding_function=client.embedding_function,
data_loader=None,
)
assert result == mock_collection
def test_get_or_create_collection_with_all_params(
self, client, mock_chromadb_client
):
"""Test get_or_create_collection with all optional parameters."""
mock_collection = Mock()
mock_chromadb_client.get_or_create_collection.return_value = mock_collection
mock_config = Mock()
mock_metadata = {"key": "value"}
mock_embedding_func = Mock()
mock_data_loader = Mock()
result = client.get_or_create_collection(
collection_name="test_collection",
configuration=mock_config,
metadata=mock_metadata,
embedding_function=mock_embedding_func,
data_loader=mock_data_loader,
)
mock_chromadb_client.get_or_create_collection.assert_called_once_with(
name="test_collection",
configuration=mock_config,
metadata=mock_metadata,
embedding_function=mock_embedding_func,
data_loader=mock_data_loader,
)
assert result == mock_collection
@pytest.mark.asyncio
async def test_aget_or_create_collection(
self, async_client, mock_async_chromadb_client
) -> None:
"""Test that aget_or_create_collection calls the underlying client correctly."""
mock_collection = Mock()
mock_async_chromadb_client.get_or_create_collection = AsyncMock(
return_value=mock_collection
)
result = await async_client.aget_or_create_collection(
collection_name="test_collection"
)
mock_async_chromadb_client.get_or_create_collection.assert_called_once_with(
name="test_collection",
configuration=None,
metadata={"hnsw:space": "cosine"},
embedding_function=async_client.embedding_function,
data_loader=None,
)
assert result == mock_collection
@pytest.mark.asyncio
async def test_aget_or_create_collection_with_all_params(
self, async_client, mock_async_chromadb_client
) -> None:
"""Test aget_or_create_collection with all optional parameters."""
mock_collection = Mock()
mock_async_chromadb_client.get_or_create_collection = AsyncMock(
return_value=mock_collection
)
mock_config = Mock()
mock_metadata = {"key": "value"}
mock_embedding_func = Mock()
mock_data_loader = Mock()
result = await async_client.aget_or_create_collection(
collection_name="test_collection",
configuration=mock_config,
metadata=mock_metadata,
embedding_function=mock_embedding_func,
data_loader=mock_data_loader,
)
mock_async_chromadb_client.get_or_create_collection.assert_called_once_with(
name="test_collection",
configuration=mock_config,
metadata=mock_metadata,
embedding_function=mock_embedding_func,
data_loader=mock_data_loader,
)
assert result == mock_collection
def test_add_documents(self, client, mock_chromadb_client) -> None:
"""Test that add_documents adds documents to collection."""
mock_collection = Mock()
mock_chromadb_client.get_collection.return_value = mock_collection
documents: list[BaseRecord] = [
{
"content": "Test document",
"metadata": {"source": "test"},
}
]
client.add_documents(collection_name="test_collection", documents=documents)
mock_chromadb_client.get_collection.assert_called_once_with(
name="test_collection",
embedding_function=client.embedding_function,
)
# Verify documents were added to collection
mock_collection.add.assert_called_once()
call_args = mock_collection.add.call_args
assert len(call_args.kwargs["ids"]) == 1
assert call_args.kwargs["documents"] == ["Test document"]
assert call_args.kwargs["metadatas"] == [{"source": "test"}]
def test_add_documents_with_custom_ids(self, client, mock_chromadb_client) -> None:
"""Test add_documents with custom document IDs."""
mock_collection = Mock()
mock_chromadb_client.get_collection.return_value = mock_collection
documents: list[BaseRecord] = [
{
"doc_id": "custom_id_1",
"content": "First document",
"metadata": {"source": "test1"},
},
{
"doc_id": "custom_id_2",
"content": "Second document",
"metadata": {"source": "test2"},
},
]
client.add_documents(collection_name="test_collection", documents=documents)
mock_collection.add.assert_called_once_with(
ids=["custom_id_1", "custom_id_2"],
documents=["First document", "Second document"],
metadatas=[{"source": "test1"}, {"source": "test2"}],
)
def test_add_documents_empty_list_raises_error(
self, client, mock_chromadb_client
) -> None:
"""Test that add_documents raises error for empty documents list."""
with pytest.raises(ValueError, match="Documents list cannot be empty"):
client.add_documents(collection_name="test_collection", documents=[])
@pytest.mark.asyncio
async def test_aadd_documents(
self, async_client, mock_async_chromadb_client
) -> None:
"""Test that aadd_documents adds documents to collection asynchronously."""
mock_collection = AsyncMock()
mock_async_chromadb_client.get_collection = AsyncMock(
return_value=mock_collection
)
documents: list[BaseRecord] = [
{
"content": "Test document",
"metadata": {"source": "test"},
}
]
await async_client.aadd_documents(
collection_name="test_collection", documents=documents
)
mock_async_chromadb_client.get_collection.assert_called_once_with(
name="test_collection",
embedding_function=async_client.embedding_function,
)
# Verify documents were added to collection
mock_collection.add.assert_called_once()
call_args = mock_collection.add.call_args
assert len(call_args.kwargs["ids"]) == 1
assert call_args.kwargs["documents"] == ["Test document"]
assert call_args.kwargs["metadatas"] == [{"source": "test"}]
@pytest.mark.asyncio
async def test_aadd_documents_with_custom_ids(
self, async_client, mock_async_chromadb_client
) -> None:
"""Test aadd_documents with custom document IDs."""
mock_collection = AsyncMock()
mock_async_chromadb_client.get_collection = AsyncMock(
return_value=mock_collection
)
documents: list[BaseRecord] = [
{
"doc_id": "custom_id_1",
"content": "First document",
"metadata": {"source": "test1"},
},
{
"doc_id": "custom_id_2",
"content": "Second document",
"metadata": {"source": "test2"},
},
]
await async_client.aadd_documents(
collection_name="test_collection", documents=documents
)
mock_collection.add.assert_called_once_with(
ids=["custom_id_1", "custom_id_2"],
documents=["First document", "Second document"],
metadatas=[{"source": "test1"}, {"source": "test2"}],
)
@pytest.mark.asyncio
async def test_aadd_documents_empty_list_raises_error(
self, async_client, mock_async_chromadb_client
) -> None:
"""Test that aadd_documents raises error for empty documents list."""
with pytest.raises(ValueError, match="Documents list cannot be empty"):
await async_client.aadd_documents(
collection_name="test_collection", documents=[]
)
def test_search(self, client, mock_chromadb_client):
"""Test that search queries the collection correctly."""
mock_collection = Mock()
mock_collection.metadata = {"hnsw:space": "cosine"}
mock_chromadb_client.get_collection.return_value = mock_collection
mock_collection.query.return_value = {
"ids": [["doc1", "doc2"]],
"documents": [["Document 1", "Document 2"]],
"metadatas": [[{"source": "test1"}, {"source": "test2"}]],
"distances": [[0.1, 0.3]],
}
results = client.search(collection_name="test_collection", query="test query")
mock_chromadb_client.get_collection.assert_called_once_with(
name="test_collection",
embedding_function=client.embedding_function,
)
mock_collection.query.assert_called_once_with(
query_texts=["test query"],
n_results=10,
where=None,
where_document=None,
include=["metadatas", "documents", "distances"],
)
assert len(results) == 2
assert results[0]["id"] == "doc1"
assert results[0]["content"] == "Document 1"
assert results[0]["metadata"] == {"source": "test1"}
assert results[0]["score"] == 0.95
def test_search_with_optional_params(self, client, mock_chromadb_client):
"""Test search with optional parameters."""
mock_collection = Mock()
mock_collection.metadata = {"hnsw:space": "cosine"}
mock_chromadb_client.get_collection.return_value = mock_collection
mock_collection.query.return_value = {
"ids": [["doc1", "doc2", "doc3"]],
"documents": [["Document 1", "Document 2", "Document 3"]],
"metadatas": [
[{"source": "test1"}, {"source": "test2"}, {"source": "test3"}]
],
"distances": [[0.1, 0.3, 1.5]], # Last one will be filtered by threshold
}
results = client.search(
collection_name="test_collection",
query="test query",
limit=5,
metadata_filter={"source": "test"},
score_threshold=0.7,
)
mock_collection.query.assert_called_once_with(
query_texts=["test query"],
n_results=5,
where={"source": "test"},
where_document=None,
include=["metadatas", "documents", "distances"],
)
assert len(results) == 2
@pytest.mark.asyncio
async def test_asearch(self, async_client, mock_async_chromadb_client) -> None:
"""Test that asearch queries the collection correctly."""
mock_collection = AsyncMock()
mock_collection.metadata = {"hnsw:space": "cosine"}
mock_async_chromadb_client.get_collection = AsyncMock(
return_value=mock_collection
)
mock_collection.query = AsyncMock(
return_value={
"ids": [["doc1", "doc2"]],
"documents": [["Document 1", "Document 2"]],
"metadatas": [[{"source": "test1"}, {"source": "test2"}]],
"distances": [[0.1, 0.3]],
}
)
results = await async_client.asearch(
collection_name="test_collection", query="test query"
)
mock_async_chromadb_client.get_collection.assert_called_once_with(
name="test_collection",
embedding_function=async_client.embedding_function,
)
mock_collection.query.assert_called_once_with(
query_texts=["test query"],
n_results=10,
where=None,
where_document=None,
include=["metadatas", "documents", "distances"],
)
assert len(results) == 2
assert results[0]["id"] == "doc1"
assert results[0]["content"] == "Document 1"
assert results[0]["metadata"] == {"source": "test1"}
assert results[0]["score"] == 0.95
@pytest.mark.asyncio
async def test_asearch_with_optional_params(
self, async_client, mock_async_chromadb_client
) -> None:
"""Test asearch with optional parameters."""
mock_collection = AsyncMock()
mock_collection.metadata = {"hnsw:space": "cosine"}
mock_async_chromadb_client.get_collection = AsyncMock(
return_value=mock_collection
)
mock_collection.query = AsyncMock(
return_value={
"ids": [["doc1", "doc2", "doc3"]],
"documents": [["Document 1", "Document 2", "Document 3"]],
"metadatas": [
[{"source": "test1"}, {"source": "test2"}, {"source": "test3"}]
],
"distances": [
[0.1, 0.3, 1.5]
], # Last one will be filtered by threshold
}
)
results = await async_client.asearch(
collection_name="test_collection",
query="test query",
limit=5,
metadata_filter={"source": "test"},
score_threshold=0.7,
)
mock_collection.query.assert_called_once_with(
query_texts=["test query"],
n_results=5,
where={"source": "test"},
where_document=None,
include=["metadatas", "documents", "distances"],
)
# Only 2 results should pass the score threshold
assert len(results) == 2
def test_delete_collection(self, client, mock_chromadb_client):
"""Test that delete_collection calls the underlying client correctly."""
client.delete_collection(collection_name="test_collection")
mock_chromadb_client.delete_collection.assert_called_once_with(
name="test_collection"
)
@pytest.mark.asyncio
async def test_adelete_collection(
self, async_client, mock_async_chromadb_client
) -> None:
"""Test that adelete_collection calls the underlying client correctly."""
mock_async_chromadb_client.delete_collection = AsyncMock(return_value=None)
await async_client.adelete_collection(collection_name="test_collection")
mock_async_chromadb_client.delete_collection.assert_called_once_with(
name="test_collection"
)
def test_reset(self, client, mock_chromadb_client):
"""Test that reset calls the underlying client correctly."""
mock_chromadb_client.reset.return_value = True
client.reset()
mock_chromadb_client.reset.assert_called_once_with()
@pytest.mark.asyncio
async def test_areset(self, async_client, mock_async_chromadb_client) -> None:
"""Test that areset calls the underlying client correctly."""
mock_async_chromadb_client.reset = AsyncMock(return_value=True)
await async_client.areset()
mock_async_chromadb_client.reset.assert_called_once_with()

View File

@@ -79,8 +79,10 @@ def test_telemetry_fails_due_connect_timeout(export_mock, logger_mock):
trace.get_tracer_provider().force_flush()
export_mock.assert_called_once()
logger_mock.assert_called_once_with(error)
assert export_mock.called
assert logger_mock.call_count == export_mock.call_count
for call in logger_mock.call_args_list:
assert call[0][0] == error
@pytest.mark.telemetry

View File

@@ -1,4 +1,5 @@
"""Test Agent creation and execution basic functionality."""
import hashlib
import json
from concurrent.futures import Future
@@ -26,7 +27,6 @@ from crewai.tasks.conditional_task import ConditionalTask
from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput
from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities import Logger
from crewai.utilities.events import (
CrewTrainCompletedEvent,
CrewTrainStartedEvent,
@@ -36,7 +36,6 @@ from crewai.utilities.events.crew_events import (
CrewTestCompletedEvent,
CrewTestStartedEvent,
)
from crewai.utilities.events.event_listener import EventListener
from crewai.utilities.rpm_controller import RPMController
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
@@ -52,6 +51,7 @@ from crewai.utilities.events.memory_events import (
)
from crewai.memory.external.external_memory import ExternalMemory
@pytest.fixture
def ceo():
return Agent(
@@ -311,7 +311,6 @@ def test_crew_creation(researcher, writer):
@pytest.mark.vcr(filter_headers=["authorization"])
def test_sync_task_execution(researcher, writer):
tasks = [
Task(
description="Give me a list of 5 interesting ideas to explore for an article, what makes them unique and interesting.",
@@ -850,6 +849,7 @@ def test_crew_verbose_output(researcher, writer, capsys):
),
]
# Test with verbose=True
crew = Crew(
agents=[researcher, writer],
tasks=tasks,
@@ -857,46 +857,25 @@ def test_crew_verbose_output(researcher, writer, capsys):
verbose=True,
)
crew.kickoff()
captured = capsys.readouterr()
result = crew.kickoff()
# Filter out event listener logs (lines starting with '[')
filtered_output = "\n".join(
line for line in captured.out.split("\n") if not line.startswith("[")
# Verify the crew executed successfully and verbose was set
assert result is not None
assert crew.verbose is True
# Test with verbose=False
crew_quiet = Crew(
agents=[researcher, writer],
tasks=tasks,
process=Process.sequential,
verbose=False,
)
expected_strings = [
"🤖 Agent Started",
"Agent: Researcher",
"Task: Research AI advancements.",
"✅ Agent Final Answer",
"Agent: Researcher",
"🤖 Agent Started",
"Agent: Senior Writer",
"Task: Write about AI in healthcare.",
"✅ Agent Final Answer",
"Agent: Senior Writer",
]
result_quiet = crew_quiet.kickoff()
for expected_string in expected_strings:
assert (
expected_string in filtered_output
), f"Expected '{expected_string}' in output, but it was not found."
# Now test with verbose set to False
crew.verbose = False
crew._logger = Logger(verbose=False)
event_listener = EventListener()
event_listener.verbose = False
event_listener.formatter.verbose = False
crew.kickoff()
captured = capsys.readouterr()
filtered_output = "\n".join(
line
for line in captured.out.split("\n")
if not line.startswith("[") and line.strip() and not line.startswith("\x1b")
)
assert filtered_output == ""
# Verify the crew executed successfully and verbose was not set
assert result_quiet is not None
assert crew_quiet.verbose is False
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -959,7 +938,6 @@ def test_cache_hitting_between_agents(researcher, writer, ceo):
@pytest.mark.vcr(filter_headers=["authorization"])
def test_api_calls_throttling(capsys):
from crewai.tools import tool
@tool
@@ -1535,7 +1513,6 @@ async def test_async_kickoff_for_each_async_empty_input():
def test_set_agents_step_callback():
researcher_agent = Agent(
role="Researcher",
goal="Make the best research and analysis on content about AI and AI agents",
@@ -1564,7 +1541,6 @@ def test_set_agents_step_callback():
def test_dont_set_agents_step_callback_if_already_set():
def agent_callback(_):
pass
@@ -1662,42 +1638,47 @@ def test_task_with_no_arguments():
def test_code_execution_flag_adds_code_tool_upon_kickoff():
from crewai_tools import CodeInterpreterTool
try:
from crewai_tools import CodeInterpreterTool
except (ImportError, Exception):
pytest.skip("crewai_tools not available or cannot be imported")
programmer = Agent(
role="Programmer",
goal="Write code to solve problems.",
backstory="You're a programmer who loves to solve problems with code.",
allow_delegation=False,
allow_code_execution=True,
)
# Mock Docker validation for the entire test
with patch.object(Agent, "_validate_docker_installation"):
programmer = Agent(
role="Programmer",
goal="Write code to solve problems.",
backstory="You're a programmer who loves to solve problems with code.",
allow_delegation=False,
allow_code_execution=True,
)
task = Task(
description="How much is 2 + 2?",
expected_output="The result of the sum as an integer.",
agent=programmer,
)
task = Task(
description="How much is 2 + 2?",
expected_output="The result of the sum as an integer.",
agent=programmer,
)
crew = Crew(agents=[programmer], tasks=[task])
crew = Crew(agents=[programmer], tasks=[task])
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
)
mock_task_output = TaskOutput(
description="Mock description", raw="mocked output", agent="mocked agent"
)
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
with patch.object(
Task, "execute_sync", return_value=mock_task_output
) as mock_execute_sync:
crew.kickoff()
# Get the tools that were actually used in execution
_, kwargs = mock_execute_sync.call_args
used_tools = kwargs["tools"]
# Get the tools that were actually used in execution
_, kwargs = mock_execute_sync.call_args
used_tools = kwargs["tools"]
# Verify that exactly one tool was used and it was a CodeInterpreterTool
assert len(used_tools) == 1, "Should have exactly one tool"
assert isinstance(
used_tools[0], CodeInterpreterTool
), "Tool should be CodeInterpreterTool"
# Verify that exactly one tool was used and it was a CodeInterpreterTool
assert len(used_tools) == 1, "Should have exactly one tool"
assert isinstance(
used_tools[0], CodeInterpreterTool
), "Tool should be CodeInterpreterTool"
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -2028,7 +2009,6 @@ def test_crew_inputs_interpolate_both_agents_and_tasks():
def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
agent = Agent(
role="{topic} Researcher",
goal="Express hot takes on {topic}.",
@@ -2060,7 +2040,6 @@ def test_crew_inputs_interpolate_both_agents_and_tasks_diff():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_does_not_interpolate_without_inputs():
agent = Agent(
role="{topic} Researcher",
goal="Express hot takes on {topic}.",
@@ -2194,7 +2173,6 @@ def test_task_same_callback_both_on_task_and_crew():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_tools_with_custom_caching():
from crewai.tools import tool
@tool
@@ -2474,7 +2452,6 @@ def test_multiple_conditional_tasks(researcher, writer):
@pytest.mark.vcr(filter_headers=["authorization"])
def test_using_contextual_memory():
math_researcher = Agent(
role="Researcher",
goal="You research about math.",
@@ -2572,7 +2549,6 @@ def test_memory_events_are_emitted():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_using_contextual_memory_with_long_term_memory():
math_researcher = Agent(
role="Researcher",
goal="You research about math.",
@@ -2602,7 +2578,6 @@ def test_using_contextual_memory_with_long_term_memory():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_warning_long_term_memory_without_entity_memory():
math_researcher = Agent(
role="Researcher",
goal="You research about math.",
@@ -2638,7 +2613,6 @@ def test_warning_long_term_memory_without_entity_memory():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_long_term_memory_with_memory_flag():
math_researcher = Agent(
role="Researcher",
goal="You research about math.",
@@ -2672,7 +2646,6 @@ def test_long_term_memory_with_memory_flag():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_using_contextual_memory_with_short_term_memory():
math_researcher = Agent(
role="Researcher",
goal="You research about math.",
@@ -2702,7 +2675,6 @@ def test_using_contextual_memory_with_short_term_memory():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_disabled_memory_using_contextual_memory():
math_researcher = Agent(
role="Researcher",
goal="You research about math.",
@@ -2829,7 +2801,6 @@ def test_crew_output_file_validation_failures():
def test_manager_agent(researcher, writer):
task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.",
@@ -3857,7 +3828,9 @@ def test_task_tools_preserve_code_execution_tools():
"""
from typing import Type
from crewai_tools import CodeInterpreterTool
# Mock embedchain initialization to prevent race conditions in parallel CI execution
with patch("embedchain.client.Client.setup"):
from crewai_tools import CodeInterpreterTool
from pydantic import BaseModel, Field
from crewai.tools import BaseTool
@@ -4459,7 +4432,6 @@ def test_crew_copy_with_memory():
original_entity_id = id(crew._entity_memory) if crew._entity_memory else None
original_external_id = id(crew._external_memory) if crew._external_memory else None
try:
crew_copy = crew.copy()
@@ -4509,7 +4481,6 @@ def test_crew_copy_with_memory():
or crew_copy._external_memory is None
), "Copied _external_memory should be None if not originally present"
except pydantic_core.ValidationError as e:
if "Input should be an instance of" in str(e) and ("Memory" in str(e)):
pytest.fail(
@@ -4726,6 +4697,7 @@ def test_reset_agent_knowledge_with_only_agent_knowledge(researcher, writer):
[mock_ks_research, mock_ks_writer]
)
def test_default_crew_name(researcher, writer):
crew = Crew(
agents=[researcher, writer],
@@ -4766,9 +4738,18 @@ def test_ensure_exchanged_messages_are_propagated_to_external_memory():
crew.kickoff()
expected_messages = [
{'role': 'system', 'content': "You are Researcher. You're an expert in research and you love to learn new things.\nYour personal goal is: You research about math.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!"},
{'role': 'user', 'content': '\nCurrent Task: Research a topic to teach a kid aged 6 about math.\n\nThis is the expected criteria for your final answer: A topic, explanation, angle, and examples.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:'},
{'role': 'assistant', 'content': 'I now can give a great answer \nFinal Answer: \n\n**Topic: Understanding Shapes (Geometry)**\n\n**Explanation:** \nShapes are everywhere around us! They are the special forms that we can see in everyday objects. Teaching a 6-year-old about shapes is not only fun but also a way to help them think about the world around them and develop their spatial awareness. We will focus on basic shapes: circle, square, triangle, and rectangle. Understanding these shapes helps kids recognize and describe their environment.\n\n**Angle:** \nLets make learning about shapes an adventure! We can turn it into a treasure hunt where the child has to find objects around the house or outside that match the shapes we learn. This hands-on approach helps make the learning stick!\n\n**Examples:** \n1. **Circle:** \n - Explanation: A circle is round and has no corners. It looks like a wheel or a cookie! \n - Activity: Find objects that are circles, such as a clock, a dinner plate, or a ball. Draw a big circle on a paper and then try to draw smaller circles inside it.\n\n2. **Square:** \n - Explanation: A square has four equal sides and four corners. It looks like a box! \n - Activity: Look for squares in books, in windows, or in building blocks. Try to build a tall tower using square blocks!\n\n3. **Triangle:** \n - Explanation: A triangle has three sides and three corners. It looks like a slice of pizza or a roof! \n - Activity: Use crayons to draw a big triangle and then find things that are shaped like a triangle, like a slice of cheese or a traffic sign.\n\n4. **Rectangle:** \n - Explanation: A rectangle has four sides but only opposite sides are equal. Its like a stretched square! \n - Activity: Search for rectangles, such as a book cover or a door. You can cut out rectangles from colored paper and create a collage!\n\nBy relating the shapes to fun activities and using real-world examples, we not only make learning more enjoyable but also help the child better remember and understand the concept of shapes in math. This foundation forms the basis of their future learning in geometry!'}
{
"role": "system",
"content": "You are Researcher. You're an expert in research and you love to learn new things.\nYour personal goal is: You research about math.\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
},
{
"role": "user",
"content": "\nCurrent Task: Research a topic to teach a kid aged 6 about math.\n\nThis is the expected criteria for your final answer: A topic, explanation, angle, and examples.\nyou MUST return the actual complete content as the final answer, not a summary.\n\n# Useful context: \nExternal memories:\n\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
},
{
"role": "assistant",
"content": "I now can give a great answer \nFinal Answer: \n\n**Topic: Understanding Shapes (Geometry)**\n\n**Explanation:** \nShapes are everywhere around us! They are the special forms that we can see in everyday objects. Teaching a 6-year-old about shapes is not only fun but also a way to help them think about the world around them and develop their spatial awareness. We will focus on basic shapes: circle, square, triangle, and rectangle. Understanding these shapes helps kids recognize and describe their environment.\n\n**Angle:** \nLets make learning about shapes an adventure! We can turn it into a treasure hunt where the child has to find objects around the house or outside that match the shapes we learn. This hands-on approach helps make the learning stick!\n\n**Examples:** \n1. **Circle:** \n - Explanation: A circle is round and has no corners. It looks like a wheel or a cookie! \n - Activity: Find objects that are circles, such as a clock, a dinner plate, or a ball. Draw a big circle on a paper and then try to draw smaller circles inside it.\n\n2. **Square:** \n - Explanation: A square has four equal sides and four corners. It looks like a box! \n - Activity: Look for squares in books, in windows, or in building blocks. Try to build a tall tower using square blocks!\n\n3. **Triangle:** \n - Explanation: A triangle has three sides and three corners. It looks like a slice of pizza or a roof! \n - Activity: Use crayons to draw a big triangle and then find things that are shaped like a triangle, like a slice of cheese or a traffic sign.\n\n4. **Rectangle:** \n - Explanation: A rectangle has four sides but only opposite sides are equal. Its like a stretched square! \n - Activity: Search for rectangles, such as a book cover or a door. You can cut out rectangles from colored paper and create a collage!\n\nBy relating the shapes to fun activities and using real-world examples, we not only make learning more enjoyable but also help the child better remember and understand the concept of shapes in math. This foundation forms the basis of their future learning in geometry!",
},
]
external_memory_save.assert_called_once_with(
value=ANY,

View File

@@ -616,7 +616,9 @@ def test_async_flow_with_trigger_payload():
flow = AsyncTriggerFlow()
test_payload = "Async trigger data"
result = asyncio.run(flow.kickoff_async(inputs={"crewai_trigger_payload": test_payload}))
result = asyncio.run(
flow.kickoff_async(inputs={"crewai_trigger_payload": test_payload})
)
assert captured_payload == [test_payload, "async_started"]
assert result == "async_finished"

View File

@@ -0,0 +1,177 @@
"""Regression tests for flow listener resumability fix.
These tests ensure that:
1. HITL flows can resume properly without re-executing completed methods
2. Cyclic flows can re-execute methods on each iteration
"""
from typing import Dict
from crewai.flow.flow import Flow, listen, router, start
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
def test_hitl_resumption_skips_completed_listeners(tmp_path):
"""Test that HITL resumption skips completed listener methods but continues chains."""
db_path = tmp_path / "test_flows.db"
persistence = SQLiteFlowPersistence(str(db_path))
execution_log = []
class HitlFlow(Flow[Dict[str, str]]):
@start()
def step_1(self):
execution_log.append("step_1_executed")
self.state["step1"] = "done"
return "step1_result"
@listen(step_1)
def step_2(self):
execution_log.append("step_2_executed")
self.state["step2"] = "done"
return "step2_result"
@listen(step_2)
def step_3(self):
execution_log.append("step_3_executed")
self.state["step3"] = "done"
return "step3_result"
flow1 = HitlFlow(persistence=persistence)
flow1.kickoff()
flow_id = flow1.state["id"]
assert execution_log == ["step_1_executed", "step_2_executed", "step_3_executed"]
flow2 = HitlFlow(persistence=persistence)
flow2._completed_methods = {"step_1", "step_2"} # Simulate partial completion
execution_log.clear()
flow2.kickoff(inputs={"id": flow_id})
assert "step_1_executed" not in execution_log
assert "step_2_executed" not in execution_log
assert "step_3_executed" in execution_log
def test_cyclic_flow_re_executes_on_each_iteration():
"""Test that cyclic flows properly re-execute methods on each iteration."""
execution_log = []
class CyclicFlowTest(Flow[Dict[str, str]]):
iteration = 0
max_iterations = 3
@start("loop")
def step_1(self):
if self.iteration >= self.max_iterations:
return None
execution_log.append(f"step_1_{self.iteration}")
return f"result_{self.iteration}"
@listen(step_1)
def step_2(self):
execution_log.append(f"step_2_{self.iteration}")
@router(step_2)
def step_3(self):
execution_log.append(f"step_3_{self.iteration}")
self.iteration += 1
if self.iteration < self.max_iterations:
return "loop"
return "exit"
flow = CyclicFlowTest()
flow.kickoff()
expected = []
for i in range(3):
expected.extend([f"step_1_{i}", f"step_2_{i}", f"step_3_{i}"])
assert execution_log == expected
def test_conditional_start_with_resumption(tmp_path):
"""Test that conditional start methods work correctly with resumption."""
db_path = tmp_path / "test_flows.db"
persistence = SQLiteFlowPersistence(str(db_path))
execution_log = []
class ConditionalStartFlow(Flow[Dict[str, str]]):
@start()
def init(self):
execution_log.append("init")
return "initialized"
@router(init)
def route_to_branch(self):
execution_log.append("router")
return "branch_a"
@start("branch_a")
def branch_a_start(self):
execution_log.append("branch_a_start")
self.state["branch"] = "a"
@listen(branch_a_start)
def branch_a_process(self):
execution_log.append("branch_a_process")
self.state["processed"] = "yes"
flow1 = ConditionalStartFlow(persistence=persistence)
flow1.kickoff()
flow_id = flow1.state["id"]
assert execution_log == ["init", "router", "branch_a_start", "branch_a_process"]
flow2 = ConditionalStartFlow(persistence=persistence)
flow2._completed_methods = {"init", "route_to_branch", "branch_a_start"}
execution_log.clear()
flow2.kickoff(inputs={"id": flow_id})
assert execution_log == ["branch_a_process"]
def test_cyclic_flow_with_conditional_start():
"""Test that cyclic flows work properly with conditional start methods."""
execution_log = []
class CyclicConditionalFlow(Flow[Dict[str, str]]):
iteration = 0
@start()
def initial(self):
execution_log.append("initial")
return "init_done"
@router(initial)
def route_to_cycle(self):
execution_log.append("router_initial")
return "loop"
@start("loop")
def cycle_entry(self):
execution_log.append(f"cycle_{self.iteration}")
self.iteration += 1
@router(cycle_entry)
def cycle_router(self):
execution_log.append(f"router_{self.iteration - 1}")
if self.iteration < 3:
return "loop"
return "exit"
flow = CyclicConditionalFlow()
flow.kickoff()
expected = [
"initial",
"router_initial",
"cycle_0",
"router_0",
"cycle_1",
"router_1",
"cycle_2",
"router_2",
]
assert execution_log == expected

View File

@@ -4,12 +4,12 @@
def test_task_output_import():
"""Test that TaskOutput can be imported from crewai."""
from crewai import TaskOutput
assert TaskOutput is not None
def test_crew_output_import():
"""Test that CrewOutput can be imported from crewai."""
from crewai import CrewOutput
assert CrewOutput is not None

View File

@@ -18,6 +18,7 @@ from crewai.project import (
from crewai.task import Task
from crewai.tools import tool
class SimpleCrew:
@agent
def simple_agent(self):
@@ -85,17 +86,24 @@ class InternalCrew:
def crew(self):
return Crew(agents=self.agents, tasks=self.tasks, verbose=True)
@CrewBase
class InternalCrewWithMCP(InternalCrew):
mcp_server_params = {"host": "localhost", "port": 8000}
@agent
def reporting_analyst(self):
return Agent(config=self.agents_config["reporting_analyst"], tools=self.get_mcp_tools()) # type: ignore[index]
return Agent(
config=self.agents_config["reporting_analyst"], tools=self.get_mcp_tools()
) # type: ignore[index]
@agent
def researcher(self):
return Agent(config=self.agents_config["researcher"], tools=self.get_mcp_tools("simple_tool")) # type: ignore[index]
return Agent(
config=self.agents_config["researcher"],
tools=self.get_mcp_tools("simple_tool"),
) # type: ignore[index]
def test_agent_memoization():
crew = SimpleCrew()
@@ -245,15 +253,18 @@ def test_multiple_before_after_kickoff():
assert "processed first" in result.raw, "First after_kickoff not executed"
assert "processed second" in result.raw, "Second after_kickoff not executed"
def test_crew_name():
crew = InternalCrew()
assert crew._crew_name == "InternalCrew"
@tool
def simple_tool():
"""Return 'Hi!'"""
return "Hi!"
@tool
def another_simple_tool():
"""Return 'Hi!'"""
@@ -261,8 +272,11 @@ def another_simple_tool():
def test_internal_crew_with_mcp():
from crewai_tools import MCPServerAdapter
from crewai_tools.adapters.mcp_adapter import ToolCollection
# Mock embedchain initialization to prevent race conditions in parallel CI execution
with patch("embedchain.client.Client.setup"):
from crewai_tools import MCPServerAdapter
from crewai_tools.adapters.mcp_adapter import ToolCollection
mock = Mock(spec=MCPServerAdapter)
mock.tools = ToolCollection([simple_tool, another_simple_tool])
with patch("crewai_tools.MCPServerAdapter", return_value=mock) as adapter_mock:
@@ -270,4 +284,4 @@ def test_internal_crew_with_mcp():
assert crew.reporting_analyst().tools == [simple_tool, another_simple_tool]
assert crew.researcher().tools == [simple_tool]
adapter_mock.assert_called_once_with({"host": "localhost", "port": 8000})
adapter_mock.assert_called_once_with({"host": "localhost", "port": 8000})

View File

@@ -345,6 +345,8 @@ def test_output_pydantic_hierarchical():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_output_json_sequential():
import uuid
class ScoreOutput(BaseModel):
score: int
@@ -355,11 +357,12 @@ def test_output_json_sequential():
allow_delegation=False,
)
output_file = f"score_{uuid.uuid4()}.json"
task = Task(
description="Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work'",
expected_output="The score of the title.",
output_json=ScoreOutput,
output_file="score.json",
output_file=output_file,
agent=scorer,
)
@@ -368,6 +371,9 @@ def test_output_json_sequential():
assert '{"score": 4}' == result.json
assert result.to_dict() == {"score": 4}
if os.path.exists(output_file):
os.remove(output_file)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_output_json_hierarchical():
@@ -398,6 +404,7 @@ def test_output_json_hierarchical():
assert result.json == '{"score": 4}'
assert result.to_dict() == {"score": 4}
@pytest.mark.vcr(filter_headers=["authorization"])
def test_inject_date():
reporter = Agent(
@@ -422,6 +429,7 @@ def test_inject_date():
result = crew.kickoff()
assert "2025-05-21" in result.raw
@pytest.mark.vcr(filter_headers=["authorization"])
def test_inject_date_custom_format():
reporter = Agent(
@@ -447,6 +455,7 @@ def test_inject_date_custom_format():
result = crew.kickoff()
assert "May 21, 2025" in result.raw
@pytest.mark.vcr(filter_headers=["authorization"])
def test_no_inject_date():
reporter = Agent(
@@ -650,6 +659,8 @@ def test_save_task_output():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_save_task_json_output():
from unittest.mock import patch
class ScoreOutput(BaseModel):
score: int
@@ -669,17 +680,25 @@ def test_save_task_json_output():
)
crew = Crew(agents=[scorer], tasks=[task])
crew.kickoff()
output_file_exists = os.path.exists("score.json")
assert output_file_exists
assert {"score": 4} == json.loads(open("score.json").read())
if output_file_exists:
os.remove("score.json")
# Mock only the _save_file method to avoid actual file I/O
with patch.object(Task, "_save_file") as mock_save:
result = crew.kickoff()
assert result is not None
mock_save.assert_called_once()
call_args = mock_save.call_args
if call_args:
saved_content = call_args[0][0]
if isinstance(saved_content, str):
data = json.loads(saved_content)
assert "score" in data
@pytest.mark.vcr(filter_headers=["authorization"])
def test_save_task_pydantic_output():
import uuid
class ScoreOutput(BaseModel):
score: int
@@ -690,10 +709,11 @@ def test_save_task_pydantic_output():
allow_delegation=False,
)
output_file = f"score_{uuid.uuid4()}.json"
task = Task(
description="Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work'",
expected_output="The score of the title.",
output_file="score.json",
output_file=output_file,
output_pydantic=ScoreOutput,
agent=scorer,
)
@@ -701,11 +721,11 @@ def test_save_task_pydantic_output():
crew = Crew(agents=[scorer], tasks=[task])
crew.kickoff()
output_file_exists = os.path.exists("score.json")
output_file_exists = os.path.exists(output_file)
assert output_file_exists
assert {"score": 4} == json.loads(open("score.json").read())
assert {"score": 4} == json.loads(open(output_file).read())
if output_file_exists:
os.remove("score.json")
os.remove(output_file)
@pytest.mark.vcr(filter_headers=["authorization"])
@@ -1136,62 +1156,67 @@ def test_output_file_validation():
def test_create_directory_true():
"""Test that directories are created when create_directory=True."""
from pathlib import Path
output_path = "test_create_dir/output.txt"
task = Task(
description="Test task",
expected_output="Test output",
output_file=output_path,
create_directory=True,
)
resolved_path = Path(output_path).expanduser().resolve()
resolved_dir = resolved_path.parent
if resolved_path.exists():
resolved_path.unlink()
if resolved_dir.exists():
import shutil
shutil.rmtree(resolved_dir)
assert not resolved_dir.exists()
task._save_file("test content")
assert resolved_dir.exists()
assert resolved_path.exists()
if resolved_path.exists():
resolved_path.unlink()
if resolved_dir.exists():
import shutil
shutil.rmtree(resolved_dir)
def test_create_directory_false():
"""Test that directories are not created when create_directory=False."""
from pathlib import Path
output_path = "nonexistent_test_dir/output.txt"
task = Task(
description="Test task",
expected_output="Test output",
output_file=output_path,
create_directory=False,
)
resolved_path = Path(output_path).expanduser().resolve()
resolved_dir = resolved_path.parent
if resolved_dir.exists():
import shutil
shutil.rmtree(resolved_dir)
assert not resolved_dir.exists()
with pytest.raises(RuntimeError, match="Directory .* does not exist and create_directory is False"):
with pytest.raises(
RuntimeError, match="Directory .* does not exist and create_directory is False"
):
task._save_file("test content")
@@ -1202,34 +1227,35 @@ def test_create_directory_default():
expected_output="Test output",
output_file="output.txt",
)
assert task.create_directory is True
def test_create_directory_with_existing_directory():
"""Test that create_directory=False works when directory already exists."""
from pathlib import Path
output_path = "existing_test_dir/output.txt"
resolved_path = Path(output_path).expanduser().resolve()
resolved_dir = resolved_path.parent
resolved_dir.mkdir(parents=True, exist_ok=True)
task = Task(
description="Test task",
expected_output="Test output",
output_file=output_path,
create_directory=False,
)
task._save_file("test content")
assert resolved_path.exists()
if resolved_path.exists():
resolved_path.unlink()
if resolved_dir.exists():
import shutil
shutil.rmtree(resolved_dir)
@@ -1241,7 +1267,7 @@ def test_github_issue_3149_reproduction():
output_file="test_output.txt",
create_directory=True,
)
assert task.create_directory is True
assert task.output_file == "test_output.txt"

View File

View File

@@ -76,11 +76,29 @@ def base_task(base_agent):
)
event_listener = EventListener()
@pytest.fixture
def reset_event_listener_singleton():
"""Reset EventListener singleton for clean test state."""
original_instance = EventListener._instance
original_initialized = (
getattr(EventListener._instance, "_initialized", False)
if EventListener._instance
else False
)
EventListener._instance = None
yield
EventListener._instance = original_instance
if original_instance and original_initialized:
EventListener._instance._initialized = original_initialized
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_emits_start_kickoff_event(base_agent, base_task):
def test_crew_emits_start_kickoff_event(
base_agent, base_task, reset_event_listener_singleton
):
received_events = []
mock_span = Mock()
@@ -88,18 +106,23 @@ def test_crew_emits_start_kickoff_event(base_agent, base_task):
def handle_crew_start(source, event):
received_events.append(event)
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
with (
patch.object(
event_listener._telemetry, "crew_execution_span", return_value=mock_span
) as mock_crew_execution_span,
patch.object(
event_listener._telemetry, "end_crew", return_value=mock_span
) as mock_crew_ended,
mock_telemetry = Mock()
mock_telemetry.crew_execution_span = Mock(return_value=mock_span)
mock_telemetry.end_crew = Mock(return_value=mock_span)
mock_telemetry.set_tracer = Mock()
mock_telemetry.task_started = Mock(return_value=mock_span)
mock_telemetry.task_ended = Mock(return_value=mock_span)
# Patch the Telemetry class to return our mock
with patch(
"crewai.utilities.events.event_listener.Telemetry", return_value=mock_telemetry
):
# Now when Crew creates EventListener, it will use our mocked telemetry
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
mock_crew_execution_span.assert_called_once_with(crew, None)
mock_crew_ended.assert_called_once_with(crew, "hi")
mock_telemetry.crew_execution_span.assert_called_once_with(crew, None)
mock_telemetry.end_crew.assert_called_once_with(crew, "hi")
assert len(received_events) == 1
assert received_events[0].crew_name == "TestCrew"
@@ -128,7 +151,6 @@ def test_crew_emits_end_kickoff_event(base_agent, base_task):
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_emits_test_kickoff_type_event(base_agent, base_task):
received_events = []
mock_span = Mock()
@crewai_event_bus.on(CrewTestStartedEvent)
def handle_crew_end(source, event):
@@ -143,21 +165,8 @@ def test_crew_emits_test_kickoff_type_event(base_agent, base_task):
received_events.append(event)
eval_llm = LLM(model="gpt-4o-mini")
with (
patch.object(
event_listener._telemetry, "test_execution_span", return_value=mock_span
) as mock_crew_execution_span,
):
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.test(n_iterations=1, eval_llm=eval_llm)
# Verify the call was made with correct argument types and values
assert mock_crew_execution_span.call_count == 1
args = mock_crew_execution_span.call_args[0]
assert isinstance(args[0], Crew)
assert args[1] == 1
assert args[2] is None
assert args[3] == eval_llm
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.test(n_iterations=1, eval_llm=eval_llm)
assert len(received_events) == 3
assert received_events[0].crew_name == "TestCrew"
@@ -214,7 +223,9 @@ def test_crew_emits_start_task_event(base_agent, base_task):
@pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_emits_end_task_event(base_agent, base_task):
def test_crew_emits_end_task_event(
base_agent, base_task, reset_event_listener_singleton
):
received_events = []
@crewai_event_bus.on(TaskCompletedEvent)
@@ -222,19 +233,22 @@ def test_crew_emits_end_task_event(base_agent, base_task):
received_events.append(event)
mock_span = Mock()
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
with (
patch.object(
event_listener._telemetry, "task_started", return_value=mock_span
) as mock_task_started,
patch.object(
event_listener._telemetry, "task_ended", return_value=mock_span
) as mock_task_ended,
mock_telemetry = Mock()
mock_telemetry.task_started = Mock(return_value=mock_span)
mock_telemetry.task_ended = Mock(return_value=mock_span)
mock_telemetry.set_tracer = Mock()
mock_telemetry.crew_execution_span = Mock()
mock_telemetry.end_crew = Mock()
with patch(
"crewai.utilities.events.event_listener.Telemetry", return_value=mock_telemetry
):
crew = Crew(agents=[base_agent], tasks=[base_task], name="TestCrew")
crew.kickoff()
mock_task_started.assert_called_once_with(crew=crew, task=base_task)
mock_task_ended.assert_called_once_with(mock_span, base_task, crew)
mock_telemetry.task_started.assert_called_once_with(crew=crew, task=base_task)
mock_telemetry.task_ended.assert_called_once_with(mock_span, base_task, crew)
assert len(received_events) == 1
assert isinstance(received_events[0].timestamp, datetime)
@@ -423,7 +437,7 @@ def test_tools_emits_error_events():
assert isinstance(received_events[0].timestamp, datetime)
def test_flow_emits_start_event():
def test_flow_emits_start_event(reset_event_listener_singleton):
received_events = []
mock_span = Mock()
@@ -436,15 +450,21 @@ def test_flow_emits_start_event():
def begin(self):
return "started"
with (
patch.object(
event_listener._telemetry, "flow_execution_span", return_value=mock_span
) as mock_flow_execution_span,
mock_telemetry = Mock()
mock_telemetry.flow_execution_span = Mock(return_value=mock_span)
mock_telemetry.flow_creation_span = Mock()
mock_telemetry.set_tracer = Mock()
with patch(
"crewai.utilities.events.event_listener.Telemetry", return_value=mock_telemetry
):
# Force creation of EventListener singleton with mocked telemetry
_ = EventListener()
flow = TestFlow()
flow.kickoff()
mock_flow_execution_span.assert_called_once_with("TestFlow", ["begin"])
mock_telemetry.flow_execution_span.assert_called_once_with("TestFlow", ["begin"])
assert len(received_events) == 1
assert received_events[0].flow_name == "TestFlow"
assert received_events[0].type == "flow_started"
@@ -572,7 +592,6 @@ def test_multiple_handlers_for_same_event(base_agent, base_task):
def test_flow_emits_created_event():
received_events = []
mock_span = Mock()
@crewai_event_bus.on(FlowCreatedEvent)
def handle_flow_created(source, event):
@@ -583,15 +602,8 @@ def test_flow_emits_created_event():
def begin(self):
return "started"
with (
patch.object(
event_listener._telemetry, "flow_creation_span", return_value=mock_span
) as mock_flow_creation_span,
):
flow = TestFlow()
flow.kickoff()
mock_flow_creation_span.assert_called_once_with("TestFlow")
flow = TestFlow()
flow.kickoff()
assert len(received_events) == 1
assert received_events[0].flow_name == "TestFlow"

View File

@@ -1,5 +1,6 @@
import os
import unittest
import uuid
import pytest
@@ -8,7 +9,9 @@ from crewai.utilities.file_handler import PickleHandler
class TestPickleHandler(unittest.TestCase):
def setUp(self):
self.file_name = "test_data.pkl"
# Use a unique file name for each test to avoid race conditions in parallel test execution
unique_id = str(uuid.uuid4())
self.file_name = f"test_data_{unique_id}.pkl"
self.file_path = os.path.join(os.getcwd(), self.file_name)
self.handler = PickleHandler(self.file_name)
@@ -37,6 +40,8 @@ class TestPickleHandler(unittest.TestCase):
def test_load_corrupted_file(self):
with open(self.file_path, "wb") as file:
file.write(b"corrupted data")
file.flush()
os.fsync(file.fileno()) # Ensure data is written to disk
with pytest.raises(Exception) as exc:
self.handler.load()

View File

@@ -1,4 +1,5 @@
import os
import tempfile
import unittest
from crewai.utilities.training_handler import CrewTrainingHandler
@@ -6,10 +7,13 @@ from crewai.utilities.training_handler import CrewTrainingHandler
class InternalCrewTrainingHandler(unittest.TestCase):
def setUp(self):
self.handler = CrewTrainingHandler("trained_data.pkl")
self.temp_file = tempfile.NamedTemporaryFile(suffix=".pkl", delete=False)
self.temp_file.close()
self.handler = CrewTrainingHandler(self.temp_file.name)
def tearDown(self):
os.remove("trained_data.pkl")
if os.path.exists(self.temp_file.name):
os.remove(self.temp_file.name)
del self.handler
def test_save_trained_data(self):
@@ -22,13 +26,22 @@ class InternalCrewTrainingHandler(unittest.TestCase):
assert data[agent_id] == trained_data
def test_append_existing_agent(self):
train_iteration = 1
agent_id = "agent1"
initial_iteration = 0
initial_data = {"param1": 1, "param2": 2}
self.handler.append(initial_iteration, agent_id, initial_data)
train_iteration = 1
new_data = {"param3": 3, "param4": 4}
self.handler.append(train_iteration, agent_id, new_data)
# Assert that the new data is appended correctly to the existing agent
data = self.handler.load()
assert agent_id in data
assert initial_iteration in data[agent_id]
assert train_iteration in data[agent_id]
assert data[agent_id][initial_iteration] == initial_data
assert data[agent_id][train_iteration] == new_data
def test_append_new_agent(self):