mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 04:18:35 +00:00
Compare commits
17 Commits
0.175.0
...
devin/1756
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bf2b0c5864 | ||
|
|
3257d2757f | ||
|
|
045da4f030 | ||
|
|
3619d4dc50 | ||
|
|
3a54cc859a | ||
|
|
f0def350a4 | ||
|
|
f4f32b5f7f | ||
|
|
49a5ae0e16 | ||
|
|
d31ffdbb90 | ||
|
|
4555ada91e | ||
|
|
92d71f7f06 | ||
|
|
dada9f140f | ||
|
|
878c1a649a | ||
|
|
1b1a8fdbf4 | ||
|
|
2633b33afc | ||
|
|
e4c4b81e63 | ||
|
|
ec1eff02a8 |
17
.github/workflows/linter.yml
vendored
17
.github/workflows/linter.yml
vendored
@@ -15,8 +15,19 @@ jobs:
|
||||
- name: Fetch Target Branch
|
||||
run: git fetch origin $TARGET_BRANCH --depth=1
|
||||
|
||||
- name: Install Ruff
|
||||
run: pip install ruff
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
cache-dependency-glob: |
|
||||
**/pyproject.toml
|
||||
**/uv.lock
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --dev --no-install-project
|
||||
|
||||
- name: Get Changed Python Files
|
||||
id: changed-files
|
||||
@@ -33,4 +44,4 @@ jobs:
|
||||
echo "${{ steps.changed-files.outputs.files }}" \
|
||||
| tr ' ' '\n' \
|
||||
| grep -v 'src/crewai/cli/templates/' \
|
||||
| xargs -I{} ruff check "{}"
|
||||
| xargs -I{} uv run ruff check "{}"
|
||||
|
||||
16
.github/workflows/security-checker.yml
vendored
16
.github/workflows/security-checker.yml
vendored
@@ -10,14 +10,20 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
enable-cache: true
|
||||
cache-dependency-glob: |
|
||||
**/pyproject.toml
|
||||
**/uv.lock
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.11
|
||||
|
||||
- name: Install dependencies
|
||||
run: pip install bandit
|
||||
run: uv sync --dev --no-install-project
|
||||
|
||||
- name: Run Bandit
|
||||
run: bandit -c pyproject.toml -r src/ -ll
|
||||
run: uv run bandit -c pyproject.toml -r src/ -ll
|
||||
|
||||
|
||||
2
.github/workflows/tests.yml
vendored
2
.github/workflows/tests.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v3
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
cache-dependency-glob: |
|
||||
|
||||
75
.github/workflows/type-checker.yml
vendored
75
.github/workflows/type-checker.yml
vendored
@@ -6,21 +6,78 @@ permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
type-checker:
|
||||
type-checker-matrix:
|
||||
name: type-checker (${{ matrix.python-version }})
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10", "3.11", "3.12", "3.13"]
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
fetch-depth: 0 # Fetch all history for proper diff
|
||||
|
||||
- name: Install Requirements
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
cache-dependency-glob: |
|
||||
**/pyproject.toml
|
||||
**/uv.lock
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
run: uv python install ${{ matrix.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --dev --no-install-project
|
||||
|
||||
- name: Get changed Python files
|
||||
id: changed-files
|
||||
run: |
|
||||
pip install mypy
|
||||
# Get the list of changed Python files compared to the base branch
|
||||
echo "Fetching changed files..."
|
||||
git diff --name-only --diff-filter=ACMRT origin/${{ github.base_ref }}...HEAD -- '*.py' > changed_files.txt
|
||||
|
||||
- name: Run type checks
|
||||
run: mypy src
|
||||
# Filter for files in src/ and tests/ directories
|
||||
grep -E "^(src/|tests/)" changed_files.txt > filtered_changed_files.txt || true
|
||||
|
||||
# Check if there are any changed files
|
||||
if [ -s filtered_changed_files.txt ]; then
|
||||
echo "Changed Python files in src/ and tests/:"
|
||||
cat filtered_changed_files.txt
|
||||
echo "has_changes=true" >> $GITHUB_OUTPUT
|
||||
# Convert newlines to spaces for mypy command
|
||||
echo "files=$(cat filtered_changed_files.txt | tr '\n' ' ')" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "No Python files changed in src/ or tests/"
|
||||
echo "has_changes=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Run type checks on changed files
|
||||
if: steps.changed-files.outputs.has_changes == 'true'
|
||||
run: |
|
||||
echo "Running mypy on changed files with Python ${{ matrix.python-version }}..."
|
||||
uv run mypy ${{ steps.changed-files.outputs.files }}
|
||||
|
||||
- name: No files to check
|
||||
if: steps.changed-files.outputs.has_changes == 'false'
|
||||
run: echo "No Python files in src/ or tests/ were modified - skipping type checks"
|
||||
|
||||
# Summary job to provide single status for branch protection
|
||||
type-checker:
|
||||
name: type-checker
|
||||
runs-on: ubuntu-latest
|
||||
needs: type-checker-matrix
|
||||
if: always()
|
||||
steps:
|
||||
- name: Check matrix results
|
||||
run: |
|
||||
if [ "${{ needs.type-checker-matrix.result }}" == "success" ] || [ "${{ needs.type-checker-matrix.result }}" == "skipped" ]; then
|
||||
echo "✅ All type checks passed"
|
||||
else
|
||||
echo "❌ Type checks failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -1,7 +1,14 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.8.2
|
||||
rev: v0.12.11
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: ["--fix"]
|
||||
args: ["--config", "pyproject.toml"]
|
||||
- id: ruff-format
|
||||
args: ["--config", "pyproject.toml"]
|
||||
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.17.1
|
||||
hooks:
|
||||
- id: mypy
|
||||
args: ["--config-file", "pyproject.toml"]
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
exclude = [
|
||||
"templates",
|
||||
"__init__.py",
|
||||
]
|
||||
12
README.md
12
README.md
@@ -418,10 +418,10 @@ Choose CrewAI to easily build powerful, adaptable, and production-ready AI autom
|
||||
|
||||
You can test different real life examples of AI crews in the [CrewAI-examples repo](https://github.com/crewAIInc/crewAI-examples?tab=readme-ov-file):
|
||||
|
||||
- [Landing Page Generator](https://github.com/crewAIInc/crewAI-examples/tree/main/landing_page_generator)
|
||||
- [Landing Page Generator](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/landing_page_generator)
|
||||
- [Having Human input on the execution](https://docs.crewai.com/how-to/Human-Input-on-Execution)
|
||||
- [Trip Planner](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner)
|
||||
- [Stock Analysis](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis)
|
||||
- [Trip Planner](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/trip_planner)
|
||||
- [Stock Analysis](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/stock_analysis)
|
||||
|
||||
### Quick Tutorial
|
||||
|
||||
@@ -429,19 +429,19 @@ You can test different real life examples of AI crews in the [CrewAI-examples re
|
||||
|
||||
### Write Job Descriptions
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/job-posting) or watch a video below:
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=u98wEMz-9to "Jobs postings")
|
||||
|
||||
### Trip Planner
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/trip_planner) or watch a video below:
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/trip_planner) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=xis7rWp-hjs "Trip Planner")
|
||||
|
||||
### Stock Analysis
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/stock_analysis) or watch a video below:
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/stock_analysis) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=e0Uj4yWdaAg "Stock Analysis")
|
||||
|
||||
|
||||
@@ -282,7 +282,25 @@ Watch this video tutorial for a step-by-step demonstration of deploying your cre
|
||||
allowfullscreen
|
||||
></iframe>
|
||||
|
||||
### 11. API Keys
|
||||
### 12. Login
|
||||
|
||||
Authenticate with CrewAI Enterprise using a secure device code flow (no email entry required).
|
||||
|
||||
```shell Terminal
|
||||
crewai login
|
||||
```
|
||||
|
||||
What happens:
|
||||
- A verification URL and short code are displayed in your terminal
|
||||
- Your browser opens to the verification URL
|
||||
- Enter/confirm the code to complete authentication
|
||||
|
||||
Notes:
|
||||
- The OAuth2 provider and domain are configured via `crewai config` (defaults use `login.crewai.com`)
|
||||
- After successful login, the CLI also attempts to authenticate to the Tool Repository automatically
|
||||
- If you reset your configuration, run `crewai login` again to re-authenticate
|
||||
|
||||
### 13. API Keys
|
||||
|
||||
When running ```crewai create crew``` command, the CLI will show you a list of available LLM providers to choose from, followed by model selection for your chosen provider.
|
||||
|
||||
@@ -310,7 +328,7 @@ See the following link for each provider's key name:
|
||||
|
||||
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)
|
||||
|
||||
### 12. Configuration Management
|
||||
### 14. Configuration Management
|
||||
|
||||
Manage CLI configuration settings for CrewAI.
|
||||
|
||||
@@ -385,6 +403,10 @@ Reset all configuration to defaults:
|
||||
crewai config reset
|
||||
```
|
||||
|
||||
<Tip>
|
||||
After resetting configuration, re-run `crewai login` to authenticate again.
|
||||
</Tip>
|
||||
|
||||
<Note>
|
||||
Configuration settings are stored in `~/.config/crewai/settings.json`. Some settings like organization name and UUID are read-only and managed through authentication and organization commands. Tool repository related settings are hidden and cannot be set directly by users.
|
||||
</Note>
|
||||
|
||||
@@ -44,12 +44,12 @@ To create a custom event listener, you need to:
|
||||
Here's a simple example of a custom event listener class:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
AgentExecutionCompletedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import BaseEventListener
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
def __init__(self):
|
||||
@@ -146,7 +146,7 @@ my_project/
|
||||
|
||||
```python
|
||||
# my_custom_listener.py
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import BaseEventListener
|
||||
# ... import events ...
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
@@ -279,7 +279,7 @@ Additional fields vary by event type. For example, `CrewKickoffCompletedEvent` i
|
||||
For temporary event handling (useful for testing or specific operations), you can use the `scoped_handlers` context manager:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent
|
||||
from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
|
||||
@@ -97,7 +97,13 @@ The state's unique ID and stored data can be useful for tracking flow executions
|
||||
|
||||
### @start()
|
||||
|
||||
The `@start()` decorator is used to mark a method as the starting point of a Flow. When a Flow is started, all the methods decorated with `@start()` are executed in parallel. You can have multiple start methods in a Flow, and they will all be executed when the Flow is started.
|
||||
The `@start()` decorator marks entry points for a Flow. You can:
|
||||
|
||||
- Declare multiple unconditional starts: `@start()`
|
||||
- Gate a start on a prior method or router label: `@start("method_or_label")`
|
||||
- Provide a callable condition to control when a start should fire
|
||||
|
||||
All satisfied `@start()` methods will execute (often in parallel) when the Flow begins or resumes.
|
||||
|
||||
### @listen()
|
||||
|
||||
|
||||
@@ -24,6 +24,41 @@ For file-based Knowledge Sources, make sure to place your files in a `knowledge`
|
||||
Also, use relative paths from the `knowledge` directory when creating the source.
|
||||
</Tip>
|
||||
|
||||
### Vector store (RAG) client configuration
|
||||
|
||||
CrewAI exposes a provider-neutral RAG client abstraction for vector stores. The default provider is ChromaDB, and Qdrant is supported as well. You can switch providers using configuration utilities.
|
||||
|
||||
Supported today:
|
||||
- ChromaDB (default)
|
||||
- Qdrant
|
||||
|
||||
```python Code
|
||||
from crewai.rag.config.utils import set_rag_config, get_rag_client, clear_rag_config
|
||||
|
||||
# ChromaDB (default)
|
||||
from crewai.rag.chromadb.config import ChromaDBConfig
|
||||
set_rag_config(ChromaDBConfig())
|
||||
chromadb_client = get_rag_client()
|
||||
|
||||
# Qdrant
|
||||
from crewai.rag.qdrant.config import QdrantConfig
|
||||
set_rag_config(QdrantConfig())
|
||||
qdrant_client = get_rag_client()
|
||||
|
||||
# Example operations (same API for any provider)
|
||||
client = qdrant_client # or chromadb_client
|
||||
client.create_collection(collection_name="docs")
|
||||
client.add_documents(
|
||||
collection_name="docs",
|
||||
documents=[{"id": "1", "content": "CrewAI enables collaborative AI agents."}],
|
||||
)
|
||||
results = client.search(collection_name="docs", query="collaborative agents", limit=3)
|
||||
|
||||
clear_rag_config() # optional reset
|
||||
```
|
||||
|
||||
This RAG client is separate from Knowledge’s built-in storage. Use it when you need direct vector-store control or custom retrieval pipelines.
|
||||
|
||||
### Basic String Knowledge Example
|
||||
|
||||
```python Code
|
||||
@@ -681,11 +716,11 @@ CrewAI emits events during the knowledge retrieval process that you can listen f
|
||||
#### Example: Monitoring Knowledge Retrieval
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
BaseEventListener,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
|
||||
class KnowledgeMonitorListener(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
|
||||
@@ -733,10 +733,10 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
|
||||
CrewAI emits events for each chunk received during streaming:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
LLMStreamChunkEvent
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import BaseEventListener
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@@ -758,8 +758,8 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
|
||||
|
||||
```python
|
||||
from crewai import LLM, Agent, Task, Crew
|
||||
from crewai.utilities.events import LLMStreamChunkEvent
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import LLMStreamChunkEvent
|
||||
from crewai.events import BaseEventListener
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
|
||||
@@ -738,6 +738,17 @@ print(f"OpenAI: {openai_time:.2f}s")
|
||||
print(f"Ollama: {ollama_time:.2f}s")
|
||||
```
|
||||
|
||||
### Entity Memory batching behavior
|
||||
|
||||
Entity Memory supports batching when saving multiple entities at once. When you pass a list of `EntityMemoryItem`, the system:
|
||||
|
||||
- Emits a single MemorySaveStartedEvent with `entity_count`
|
||||
- Saves each entity internally, collecting any partial errors
|
||||
- Emits MemorySaveCompletedEvent with aggregate metadata (saved count, errors)
|
||||
- Raises a partial-save exception if some entities failed (includes counts)
|
||||
|
||||
This improves performance and observability when writing many entities in one operation.
|
||||
|
||||
## 2. External Memory
|
||||
External Memory provides a standalone memory system that operates independently from the crew's built-in memory. This is ideal for specialized memory providers or cross-application memory sharing.
|
||||
|
||||
@@ -1041,8 +1052,8 @@ CrewAI emits the following memory-related events:
|
||||
Track memory operation timing to optimize your application:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
BaseEventListener,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveCompletedEvent
|
||||
)
|
||||
@@ -1076,8 +1087,8 @@ memory_monitor = MemoryPerformanceMonitor()
|
||||
Log memory operations for debugging and insights:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
BaseEventListener,
|
||||
MemorySaveStartedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent
|
||||
@@ -1117,8 +1128,8 @@ memory_logger = MemoryLogger()
|
||||
Capture and respond to memory errors:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
BaseEventListener,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryFailedEvent
|
||||
)
|
||||
@@ -1167,8 +1178,8 @@ error_tracker = MemoryErrorTracker(notify_email="admin@example.com")
|
||||
Memory events can be forwarded to analytics and monitoring platforms to track performance metrics, detect anomalies, and visualize memory usage patterns:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
BaseEventListener,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveCompletedEvent
|
||||
)
|
||||
|
||||
@@ -61,6 +61,11 @@ crew = Crew(
|
||||
| **Guardrail** _(optional)_ | `guardrail` | `Optional[Callable]` | Function to validate task output before proceeding to next task. |
|
||||
| **Guardrail Max Retries** _(optional)_ | `guardrail_max_retries` | `Optional[int]` | Maximum number of retries when guardrail validation fails. Defaults to 3. |
|
||||
|
||||
<Note type="warning" title="Deprecated: max_retries">
|
||||
The task attribute `max_retries` is deprecated and will be removed in v1.0.0.
|
||||
Use `guardrail_max_retries` instead to control retry attempts when a guardrail fails.
|
||||
</Note>
|
||||
|
||||
## Creating Tasks
|
||||
|
||||
There are two ways to create tasks in CrewAI: using **YAML configuration (recommended)** or defining them **directly in code**.
|
||||
@@ -432,7 +437,7 @@ When a guardrail returns `(False, error)`:
|
||||
2. The agent attempts to fix the issue
|
||||
3. The process repeats until:
|
||||
- The guardrail returns `(True, result)`
|
||||
- Maximum retries are reached
|
||||
- Maximum retries are reached (`guardrail_max_retries`)
|
||||
|
||||
Example with retry handling:
|
||||
```python Code
|
||||
|
||||
@@ -59,7 +59,7 @@ Before using Authentication Integrations, ensure you have:
|
||||
3. Click **Connect** on your desired service from the Authentication Integrations section
|
||||
4. Complete the OAuth authentication flow
|
||||
5. Grant necessary permissions for your use case
|
||||
6. Get your Enterprise Token from your [CrewAI Enterprise](https://app.crewai.com) account page - https://app.crewai.com/crewai_plus/settings/account
|
||||
6. All set! Get your Enterprise Token from your [CrewAI Enterprise](https://app.crewai.com) in **Integration** tab
|
||||
|
||||
<Frame>
|
||||

|
||||
|
||||
@@ -141,6 +141,16 @@ Traces are invaluable for troubleshooting issues with your crews:
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Performance and batching
|
||||
|
||||
CrewAI batches trace uploads to reduce overhead on high-volume runs:
|
||||
|
||||
- A TraceBatchManager buffers events and sends them in batches via the Plus API client
|
||||
- Reduces network chatter and improves reliability on flaky connections
|
||||
- Automatically enabled in the default trace listener; no configuration needed
|
||||
|
||||
This yields more stable tracing under load while preserving detailed task/agent telemetry.
|
||||
|
||||
<Card title="Need Help?" icon="headset" href="mailto:support@crewai.com">
|
||||
Contact our support team for assistance with trace analysis or any other CrewAI Enterprise features.
|
||||
</Card>
|
||||
@@ -96,6 +96,13 @@ class MyAutomatedCrew:
|
||||
|
||||
The crew will automatically receive and can access the trigger payload through the standard CrewAI context mechanisms.
|
||||
|
||||
<Note>
|
||||
Crew and Flow inputs can include `crewai_trigger_payload`. CrewAI automatically injects this payload:
|
||||
- Tasks: appended to the first task's description by default ("Trigger Payload: {crewai_trigger_payload}")
|
||||
- Control via `allow_crewai_trigger_context`: set `True` to always inject, `False` to never inject
|
||||
- Flows: any `@start()` method that accepts a `crewai_trigger_payload` parameter will receive it
|
||||
</Note>
|
||||
|
||||
### Integration with Flows
|
||||
|
||||
For flows, you have more control over how trigger data is handled:
|
||||
|
||||
@@ -348,6 +348,31 @@ class SelectivePersistFlow(Flow):
|
||||
|
||||
## Advanced State Patterns
|
||||
|
||||
### Conditional starts and resumable execution
|
||||
|
||||
Flows support conditional `@start()` and resumable execution for HITL/cyclic scenarios:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen, and_, or_
|
||||
|
||||
class ResumableFlow(Flow):
|
||||
@start() # unconditional start
|
||||
def init(self):
|
||||
...
|
||||
|
||||
# Conditional start: run after "init" or external trigger name
|
||||
@start("init")
|
||||
def maybe_begin(self):
|
||||
...
|
||||
|
||||
@listen(and_(init, maybe_begin))
|
||||
def proceed(self):
|
||||
...
|
||||
```
|
||||
|
||||
- Conditional `@start()` accepts a method name, a router label, or a callable condition.
|
||||
- During resume, listeners continue from prior checkpoints; cycle/router branches honor resumption flags.
|
||||
|
||||
### State-Based Conditional Logic
|
||||
|
||||
You can use state to implement complex conditional logic in your flows:
|
||||
|
||||
@@ -30,6 +30,12 @@ Watch this video tutorial for a step-by-step demonstration of the installation p
|
||||
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
|
||||
</Note>
|
||||
|
||||
<Note>
|
||||
**OpenAI SDK Requirement**
|
||||
|
||||
CrewAI 0.175.0 requires `openai >= 1.13.3`. If you manage dependencies yourself, ensure your environment satisfies this constraint to avoid import/runtime issues.
|
||||
</Note>
|
||||
|
||||
CrewAI uses the `uv` as its dependency management and package handling tool. It simplifies project setup and execution, offering a seamless experience.
|
||||
|
||||
If you haven't installed `uv` yet, follow **step 1** to quickly get it set up on your system, else you can skip to **step 2**.
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 54 KiB After Width: | Height: | Size: 72 KiB |
@@ -44,12 +44,12 @@ Prompt Tracing을 통해 다음과 같은 작업이 가능합니다:
|
||||
아래는 커스텀 이벤트 리스너 클래스의 간단한 예시입니다:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
AgentExecutionCompletedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import BaseEventListener
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
def __init__(self):
|
||||
@@ -146,7 +146,7 @@ my_project/
|
||||
|
||||
```python
|
||||
# my_custom_listener.py
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import BaseEventListener
|
||||
# ... import events ...
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
@@ -279,7 +279,7 @@ CrewAI는 여러분이 청취할 수 있는 다양한 이벤트를 제공합니
|
||||
임시 이벤트 처리가 필요한 경우(테스트 또는 특정 작업에 유용함), `scoped_handlers` 컨텍스트 관리자를 사용할 수 있습니다:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent
|
||||
from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
|
||||
@@ -683,11 +683,11 @@ CrewAI는 knowledge 검색 과정에서 이벤트를 발생시키며, 이벤트
|
||||
#### 예시: Knowledge Retrieval 모니터링
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
BaseEventListener,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
|
||||
class KnowledgeMonitorListener(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
|
||||
@@ -731,10 +731,10 @@ CrewAI는 LLM의 스트리밍 응답을 지원하여, 애플리케이션이 출
|
||||
CrewAI는 스트리밍 중 수신되는 각 청크에 대해 이벤트를 발생시킵니다:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
LLMStreamChunkEvent
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import BaseEventListener
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@@ -756,8 +756,8 @@ CrewAI는 LLM의 스트리밍 응답을 지원하여, 애플리케이션이 출
|
||||
|
||||
```python
|
||||
from crewai import LLM, Agent, Task, Crew
|
||||
from crewai.utilities.events import LLMStreamChunkEvent
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import LLMStreamChunkEvent
|
||||
from crewai.events import BaseEventListener
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
|
||||
@@ -985,8 +985,8 @@ CrewAI는 다음과 같은 메모리 관련 이벤트를 발생시킵니다:
|
||||
애플리케이션을 최적화하기 위해 메모리 작업 타이밍을 추적하세요:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
BaseEventListener,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveCompletedEvent
|
||||
)
|
||||
@@ -1020,8 +1020,8 @@ memory_monitor = MemoryPerformanceMonitor()
|
||||
디버깅 및 인사이트를 위해 메모리 작업을 로깅합니다:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
BaseEventListener,
|
||||
MemorySaveStartedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent
|
||||
@@ -1061,8 +1061,8 @@ memory_logger = MemoryLogger()
|
||||
메모리 오류를 캡처하고 대응합니다:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
BaseEventListener,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryFailedEvent
|
||||
)
|
||||
@@ -1111,8 +1111,8 @@ error_tracker = MemoryErrorTracker(notify_email="admin@example.com")
|
||||
메모리 이벤트는 분석 및 모니터링 플랫폼으로 전달되어 성능 지표를 추적하고, 이상 징후를 감지하며, 메모리 사용 패턴을 시각화할 수 있습니다:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
BaseEventListener,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveCompletedEvent
|
||||
)
|
||||
|
||||
@@ -58,7 +58,7 @@ Authentication Integrations를 사용하기 전에 다음이 준비되어 있는
|
||||
3. Authentication Integrations 섹션에서 원하는 서비스의 **Connect** 버튼을 클릭합니다.
|
||||
4. OAuth 인증 과정을 완료합니다.
|
||||
5. 사용 사례에 필요한 권한을 부여합니다.
|
||||
6. [CrewAI Enterprise](https://app.crewai.com) 계정 페이지 - https://app.crewai.com/crewai_plus/settings/account 에서 Enterprise Token을 받습니다.
|
||||
6. 완료! [CrewAI Enterprise](https://app.crewai.com)의 **Integration** 탭에서 Enterprise Token을 받습니다.
|
||||
|
||||
<Frame>
|
||||

|
||||
@@ -176,4 +176,4 @@ crew를 배포하고 각 통합을 특정 사용자에게 범위 지정할 수
|
||||
|
||||
<Card title="도움이 필요하신가요?" icon="headset" href="mailto:support@crewai.com">
|
||||
통합 설정이나 문제 해결에 대한 지원이 필요하시면 저희 지원팀에 문의하세요.
|
||||
</Card>
|
||||
</Card>
|
||||
|
||||
@@ -44,12 +44,12 @@ Para criar um listener de evento personalizado, você precisa:
|
||||
Veja um exemplo simples de uma classe de listener de evento personalizado:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
AgentExecutionCompletedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import BaseEventListener
|
||||
|
||||
class MeuListenerPersonalizado(BaseEventListener):
|
||||
def __init__(self):
|
||||
@@ -146,7 +146,7 @@ my_project/
|
||||
|
||||
```python
|
||||
# my_custom_listener.py
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import BaseEventListener
|
||||
# ... importe events ...
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
@@ -268,7 +268,7 @@ Campos adicionais variam pelo tipo de evento. Por exemplo, `CrewKickoffCompleted
|
||||
Para lidar temporariamente com eventos (útil para testes ou operações específicas), você pode usar o context manager `scoped_handlers`:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent
|
||||
from crewai.events import crewai_event_bus, CrewKickoffStartedEvent
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
|
||||
@@ -681,11 +681,11 @@ O CrewAI emite eventos durante o processo de recuperação de knowledge que voc
|
||||
#### Exemplo: Monitorando Recuperação de Knowledge
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
BaseEventListener,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
|
||||
class KnowledgeMonitorListener(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
|
||||
@@ -708,10 +708,10 @@ O CrewAI suporta respostas em streaming de LLMs, permitindo que sua aplicação
|
||||
O CrewAI emite eventos para cada chunk recebido durante o streaming:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
from crewai.events import (
|
||||
LLMStreamChunkEvent
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.events import BaseEventListener
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
|
||||
@@ -58,7 +58,7 @@ Antes de usar as Integrações de Autenticação, certifique-se de que você pos
|
||||
3. Clique em **Conectar** no serviço desejado na seção Integrações de Autenticação
|
||||
4. Complete o fluxo de autenticação OAuth
|
||||
5. Conceda as permissões necessárias para seu caso de uso
|
||||
6. Obtenha seu Token Enterprise na sua página de conta do [CrewAI Enterprise](https://app.crewai.com) - https://app.crewai.com/crewai_plus/settings/account
|
||||
6. Pronto! Obtenha seu Token Enterprise do [CrewAI Enterprise](https://app.crewai.com) na aba **Integration**
|
||||
|
||||
<Frame>
|
||||

|
||||
@@ -176,4 +176,4 @@ Use o `user_bearer_token` para direcionar a integração a um usuário específi
|
||||
|
||||
<Card title="Precisa de ajuda?" icon="headset" href="mailto:support@crewai.com">
|
||||
Entre em contato com nosso time de suporte para assistência com a configuração de integrações ou solução de problemas.
|
||||
</Card>
|
||||
</Card>
|
||||
|
||||
@@ -48,7 +48,7 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools~=0.65.0"]
|
||||
tools = ["crewai-tools~=0.69.0"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
]
|
||||
@@ -74,9 +74,10 @@ qdrant = [
|
||||
|
||||
[tool.uv]
|
||||
dev-dependencies = [
|
||||
"ruff>=0.8.2",
|
||||
"mypy>=1.10.0",
|
||||
"pre-commit>=3.6.0",
|
||||
"ruff>=0.12.11",
|
||||
"mypy>=1.17.1",
|
||||
"pre-commit>=4.3.0",
|
||||
"bandit>=1.8.6",
|
||||
"pillow>=10.2.0",
|
||||
"cairosvg>=2.7.1",
|
||||
"pytest>=8.0.0",
|
||||
@@ -88,15 +89,41 @@ dev-dependencies = [
|
||||
"pytest-timeout>=2.3.1",
|
||||
"pytest-xdist>=3.6.1",
|
||||
"pytest-split>=0.9.0",
|
||||
"types-requests==2.32.*",
|
||||
"types-pyyaml==6.0.*",
|
||||
"types-regex==2024.11.6.*",
|
||||
"types-appdirs==1.4.*",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
crewai = "crewai.cli.cli:crewai"
|
||||
|
||||
[tool.ruff]
|
||||
exclude = [
|
||||
"src/crewai/cli/templates",
|
||||
]
|
||||
fix = true
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
"B006",
|
||||
"UP006",
|
||||
"UP007",
|
||||
"UP035",
|
||||
"UP037",
|
||||
"UP004",
|
||||
"UP008",
|
||||
"UP010",
|
||||
"UP018",
|
||||
"UP031",
|
||||
"UP032",
|
||||
"I001",
|
||||
"I002",
|
||||
]
|
||||
|
||||
[tool.mypy]
|
||||
ignore_missing_imports = true
|
||||
disable_error_code = 'import-untyped'
|
||||
exclude = ["cli/templates"]
|
||||
strict = true
|
||||
exclude = ["src/crewai/cli/templates"]
|
||||
|
||||
[tool.bandit]
|
||||
exclude_dirs = ["src/crewai/cli/templates"]
|
||||
|
||||
@@ -1,4 +1,30 @@
|
||||
import warnings
|
||||
from typing import Any
|
||||
|
||||
|
||||
def _suppress_pydantic_deprecation_warnings() -> None:
|
||||
"""Suppress Pydantic deprecation warnings using targeted monkey patch."""
|
||||
original_warn = warnings.warn
|
||||
|
||||
def filtered_warn(
|
||||
message: Any,
|
||||
category: type | None = None,
|
||||
stacklevel: int = 1,
|
||||
source: Any = None,
|
||||
) -> Any:
|
||||
if (
|
||||
category
|
||||
and hasattr(category, "__module__")
|
||||
and category.__module__ == "pydantic.warnings"
|
||||
):
|
||||
return None
|
||||
return original_warn(message, category, stacklevel + 1, source)
|
||||
|
||||
setattr(warnings, "warn", filtered_warn)
|
||||
|
||||
|
||||
_suppress_pydantic_deprecation_warnings()
|
||||
|
||||
import threading
|
||||
import urllib.request
|
||||
|
||||
@@ -15,17 +41,10 @@ from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message="Pydantic serializer warnings:",
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
|
||||
_telemetry_submitted = False
|
||||
|
||||
|
||||
def _track_install():
|
||||
def _track_install() -> None:
|
||||
"""Track package installation/first-use via Scarf analytics."""
|
||||
global _telemetry_submitted
|
||||
|
||||
@@ -36,7 +55,7 @@ def _track_install():
|
||||
pixel_url = "https://api.scarf.sh/v2/packages/CrewAI/crewai/docs/00f2dad1-8334-4a39-934e-003b2e1146db"
|
||||
|
||||
req = urllib.request.Request(pixel_url)
|
||||
req.add_header('User-Agent', f'CrewAI-Python/{__version__}')
|
||||
req.add_header("User-Agent", f"CrewAI-Python/{__version__}")
|
||||
|
||||
with urllib.request.urlopen(req, timeout=2): # nosec B310
|
||||
_telemetry_submitted = True
|
||||
@@ -45,7 +64,7 @@ def _track_install():
|
||||
pass
|
||||
|
||||
|
||||
def _track_install_async():
|
||||
def _track_install_async() -> None:
|
||||
"""Track installation in background thread to avoid blocking imports."""
|
||||
if not Telemetry._is_telemetry_disabled():
|
||||
thread = threading.Thread(target=_track_install, daemon=True)
|
||||
@@ -54,7 +73,7 @@ def _track_install_async():
|
||||
|
||||
_track_install_async()
|
||||
|
||||
__version__ = "0.175.0"
|
||||
__version__ = "0.177.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
|
||||
@@ -4,12 +4,9 @@ import time
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
@@ -38,17 +35,17 @@ from crewai.utilities.agent_utils import (
|
||||
)
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.events.agent_events import (
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.memory_events import (
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
from crewai.utilities.events.knowledge_events import (
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
@@ -151,7 +148,7 @@ class Agent(BaseAgent):
|
||||
default=None,
|
||||
description="Maximum number of reasoning attempts before executing the task. If None, will try until ready.",
|
||||
)
|
||||
embedder: Optional[Dict[str, Any]] = Field(
|
||||
embedder: Optional[dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
)
|
||||
@@ -171,7 +168,7 @@ class Agent(BaseAgent):
|
||||
default=None,
|
||||
description="The Agent's role to be used from your repository.",
|
||||
)
|
||||
guardrail: Optional[Union[Callable[[Any], Tuple[bool, Any]], str]] = Field(
|
||||
guardrail: Optional[Union[Callable[[Any], tuple[bool, Any]], str]] = Field(
|
||||
default=None,
|
||||
description="Function or string description of a guardrail to validate agent output",
|
||||
)
|
||||
@@ -208,7 +205,7 @@ class Agent(BaseAgent):
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
|
||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||
def set_knowledge(self, crew_embedder: Optional[dict[str, Any]] = None):
|
||||
try:
|
||||
if self.embedder is None and crew_embedder:
|
||||
self.embedder = crew_embedder
|
||||
@@ -245,7 +242,7 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
task: Task,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
tools: Optional[list[BaseTool]] = None,
|
||||
) -> str:
|
||||
"""Execute a task with the agent.
|
||||
|
||||
@@ -554,14 +551,14 @@ class Agent(BaseAgent):
|
||||
)["output"]
|
||||
|
||||
def create_agent_executor(
|
||||
self, tools: Optional[List[BaseTool]] = None, task=None
|
||||
self, tools: Optional[list[BaseTool]] = None, task=None
|
||||
) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
|
||||
Returns:
|
||||
An instance of the CrewAgentExecutor class.
|
||||
"""
|
||||
raw_tools: List[BaseTool] = tools or self.tools or []
|
||||
raw_tools: list[BaseTool] = tools or self.tools or []
|
||||
parsed_tools = parse_tools(raw_tools)
|
||||
|
||||
prompt = Prompts(
|
||||
@@ -603,7 +600,7 @@ class Agent(BaseAgent):
|
||||
callbacks=[TokenCalcHandler(self._token_process)],
|
||||
)
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]):
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]):
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
tools = agent_tools.tools()
|
||||
return tools
|
||||
@@ -654,7 +651,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
return task_prompt
|
||||
|
||||
def _render_text_description(self, tools: List[Any]) -> str:
|
||||
def _render_text_description(self, tools: list[Any]) -> str:
|
||||
"""Render the tool name and description in plain text.
|
||||
|
||||
Output will be in the format of:
|
||||
@@ -796,7 +793,7 @@ class Agent(BaseAgent):
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
messages: Union[str, list[dict[str, str]]],
|
||||
response_format: Optional[Type[Any]] = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
@@ -836,7 +833,7 @@ class Agent(BaseAgent):
|
||||
|
||||
async def kickoff_async(
|
||||
self,
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
messages: Union[str, list[dict[str, str]]],
|
||||
response_format: Optional[Type[Any]] = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from .cache.cache_handler import CacheHandler
|
||||
from .parser import CrewAgentParser
|
||||
from .tools_handler import ToolsHandler
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.parser import parse, AgentAction, AgentFinish, OutputParserException
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
|
||||
__all__ = ["CacheHandler", "CrewAgentParser", "ToolsHandler"]
|
||||
__all__ = ["CacheHandler", "parse", "AgentAction", "AgentFinish", "OutputParserException", "ToolsHandler"]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
@@ -16,16 +16,16 @@ class BaseAgentAdapter(BaseAgent, ABC):
|
||||
"""
|
||||
|
||||
adapted_structured_output: bool = False
|
||||
_agent_config: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
_agent_config: Optional[dict[str, Any]] = PrivateAttr(default=None)
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
def __init__(self, agent_config: Optional[Dict[str, Any]] = None, **kwargs: Any):
|
||||
def __init__(self, agent_config: Optional[dict[str, Any]] = None, **kwargs: Any):
|
||||
super().__init__(adapted_agent=True, **kwargs)
|
||||
self._agent_config = agent_config
|
||||
|
||||
@abstractmethod
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
def configure_tools(self, tools: Optional[list[BaseTool]] = None) -> None:
|
||||
"""Configure and adapt tools for the specific agent implementation.
|
||||
|
||||
Args:
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
@@ -12,15 +12,15 @@ class BaseToolAdapter(ABC):
|
||||
different frameworks and platforms.
|
||||
"""
|
||||
|
||||
original_tools: List[BaseTool]
|
||||
converted_tools: List[Any]
|
||||
original_tools: list[BaseTool]
|
||||
converted_tools: list[Any]
|
||||
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
def __init__(self, tools: Optional[list[BaseTool]] = None):
|
||||
self.original_tools = tools or []
|
||||
self.converted_tools = []
|
||||
|
||||
@abstractmethod
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""Configure and convert tools for the specific implementation.
|
||||
|
||||
Args:
|
||||
@@ -28,7 +28,7 @@ class BaseToolAdapter(ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
def tools(self) -> List[Any]:
|
||||
def tools(self) -> list[Any]:
|
||||
"""Return all converted tools."""
|
||||
return self.converted_tools
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, AsyncIterable, Dict, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import Field, PrivateAttr
|
||||
|
||||
@@ -14,15 +14,14 @@ from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.agent_events import (
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
|
||||
try:
|
||||
from langchain_core.messages import ToolMessage
|
||||
from langgraph.checkpoint.memory import MemorySaver
|
||||
from langgraph.prebuilt import create_react_agent
|
||||
|
||||
@@ -52,10 +51,10 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
role: str,
|
||||
goal: str,
|
||||
backstory: str,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
tools: Optional[list[BaseTool]] = None,
|
||||
llm: Any = None,
|
||||
max_iterations: int = 10,
|
||||
agent_config: Optional[Dict[str, Any]] = None,
|
||||
agent_config: Optional[dict[str, Any]] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Initialize the LangGraph agent adapter."""
|
||||
@@ -82,7 +81,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
try:
|
||||
self._memory = MemorySaver()
|
||||
|
||||
converted_tools: List[Any] = self._tool_adapter.tools()
|
||||
converted_tools: list[Any] = self._tool_adapter.tools()
|
||||
if self._agent_config:
|
||||
self._graph = create_react_agent(
|
||||
model=self.llm,
|
||||
@@ -125,7 +124,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
tools: Optional[list[BaseTool]] = None,
|
||||
) -> str:
|
||||
"""Execute a task using the LangGraph workflow."""
|
||||
self.create_agent_executor(tools)
|
||||
@@ -198,11 +197,11 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
)
|
||||
raise
|
||||
|
||||
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
def create_agent_executor(self, tools: Optional[list[BaseTool]] = None) -> None:
|
||||
"""Configure the LangGraph agent for execution."""
|
||||
self.configure_tools(tools)
|
||||
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
def configure_tools(self, tools: Optional[list[BaseTool]] = None) -> None:
|
||||
"""Configure tools for the LangGraph agent."""
|
||||
if tools:
|
||||
all_tools = list(self.tools or []) + list(tools or [])
|
||||
@@ -210,7 +209,7 @@ class LangGraphAgentAdapter(BaseAgentAdapter):
|
||||
available_tools = self._tool_adapter.tools()
|
||||
self._graph.tools = available_tools
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support for LangGraph."""
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
return agent_tools.tools()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import inspect
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
@@ -8,11 +8,11 @@ from crewai.tools.base_tool import BaseTool
|
||||
class LangGraphToolAdapter(BaseToolAdapter):
|
||||
"""Adapts CrewAI tools to LangGraph agent tool compatible format"""
|
||||
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
def __init__(self, tools: Optional[list[BaseTool]] = None):
|
||||
self.original_tools = tools or []
|
||||
self.converted_tools = []
|
||||
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""
|
||||
Configure and convert CrewAI tools to LangGraph-compatible format.
|
||||
LangGraph expects tools in langchain_core.tools format.
|
||||
@@ -57,5 +57,5 @@ class LangGraphToolAdapter(BaseToolAdapter):
|
||||
|
||||
self.converted_tools = converted_tools
|
||||
|
||||
def tools(self) -> List[Any]:
|
||||
def tools(self) -> list[Any]:
|
||||
return self.converted_tools or []
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import Field, PrivateAttr
|
||||
|
||||
@@ -10,8 +10,8 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.events.agent_events import (
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
@@ -44,7 +44,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
def __init__(
|
||||
self,
|
||||
model: str = "gpt-4o-mini",
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
tools: Optional[list[BaseTool]] = None,
|
||||
agent_config: Optional[dict] = None,
|
||||
**kwargs,
|
||||
):
|
||||
@@ -85,7 +85,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
tools: Optional[list[BaseTool]] = None,
|
||||
) -> str:
|
||||
"""Execute a task using the OpenAI Assistant"""
|
||||
self._converter_adapter.configure_structured_output(task)
|
||||
@@ -131,7 +131,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
)
|
||||
raise
|
||||
|
||||
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
def create_agent_executor(self, tools: Optional[list[BaseTool]] = None) -> None:
|
||||
"""
|
||||
Configure the OpenAI agent for execution.
|
||||
While OpenAI handles execution differently through Runner,
|
||||
@@ -152,7 +152,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
|
||||
self.agent_executor = Runner
|
||||
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
def configure_tools(self, tools: Optional[list[BaseTool]] = None) -> None:
|
||||
"""Configure tools for the OpenAI Assistant"""
|
||||
if tools:
|
||||
self._tool_adapter.configure_tools(tools)
|
||||
@@ -163,7 +163,7 @@ class OpenAIAgentAdapter(BaseAgentAdapter):
|
||||
"""Process OpenAI Assistant execution result converting any structured output to a string"""
|
||||
return self._converter_adapter.post_process_result(result.final_output)
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]) -> list[BaseTool]:
|
||||
"""Implement delegation tools support"""
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
tools = agent_tools.tools()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import inspect
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from agents import FunctionTool, Tool
|
||||
|
||||
@@ -10,10 +10,10 @@ from crewai.tools import BaseTool
|
||||
class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
"""Adapter for OpenAI Assistant tools"""
|
||||
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
def __init__(self, tools: Optional[list[BaseTool]] = None):
|
||||
self.original_tools = tools or []
|
||||
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""Configure tools for the OpenAI Assistant"""
|
||||
if self.original_tools:
|
||||
all_tools = tools + self.original_tools
|
||||
@@ -23,8 +23,8 @@ class OpenAIAgentToolAdapter(BaseToolAdapter):
|
||||
self.converted_tools = self._convert_tools_to_openai_format(all_tools)
|
||||
|
||||
def _convert_tools_to_openai_format(
|
||||
self, tools: Optional[List[BaseTool]]
|
||||
) -> List[Tool]:
|
||||
self, tools: Optional[list[BaseTool]]
|
||||
) -> list[Tool]:
|
||||
"""Convert CrewAI tools to OpenAI Assistant tool format"""
|
||||
if not tools:
|
||||
return []
|
||||
|
||||
@@ -2,7 +2,7 @@ import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from typing import Any, Callable, Dict, List, Optional, TypeVar
|
||||
from typing import Any, Callable, Optional, TypeVar
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -40,11 +40,11 @@ class BaseAgent(ABC, BaseModel):
|
||||
goal (str): Objective of the agent.
|
||||
backstory (str): Backstory of the agent.
|
||||
cache (bool): Whether the agent should use a cache for tool usage.
|
||||
config (Optional[Dict[str, Any]]): Configuration for the agent.
|
||||
config (Optional[dict[str, Any]]): Configuration for the agent.
|
||||
verbose (bool): Verbose mode for the Agent Execution.
|
||||
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
|
||||
allow_delegation (bool): Allow delegation of tasks to agents.
|
||||
tools (Optional[List[Any]]): Tools at the agent's disposal.
|
||||
tools (Optional[list[Any]]): Tools at the agent's disposal.
|
||||
max_iter (int): Maximum iterations for an agent to execute a task.
|
||||
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
|
||||
llm (Any): Language model that will run the agent.
|
||||
@@ -59,15 +59,15 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
|
||||
Methods:
|
||||
execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[BaseTool]] = None) -> str:
|
||||
execute_task(task: Any, context: Optional[str] = None, tools: Optional[list[BaseTool]] = None) -> str:
|
||||
Abstract method to execute a task.
|
||||
create_agent_executor(tools=None) -> None:
|
||||
Abstract method to create an agent executor.
|
||||
get_delegation_tools(agents: List["BaseAgent"]):
|
||||
get_delegation_tools(agents: list["BaseAgent"]):
|
||||
Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew.
|
||||
get_output_converter(llm, model, instructions):
|
||||
Abstract method to get the converter class for the agent to create json/pydantic outputs.
|
||||
interpolate_inputs(inputs: Dict[str, Any]) -> None:
|
||||
interpolate_inputs(inputs: dict[str, Any]) -> None:
|
||||
Interpolate inputs into the agent description and backstory.
|
||||
set_cache_handler(cache_handler: CacheHandler) -> None:
|
||||
Set the cache handler for the agent.
|
||||
@@ -91,7 +91,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Objective of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
config: Optional[dict[str, Any]] = Field(
|
||||
description="Configuration for the agent", default=None, exclude=True
|
||||
)
|
||||
cache: bool = Field(
|
||||
@@ -108,7 +108,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
tools: Optional[list[BaseTool]] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: int = Field(
|
||||
@@ -129,7 +129,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default_factory=ToolsHandler,
|
||||
description="An instance of the ToolsHandler class.",
|
||||
)
|
||||
tools_results: List[Dict[str, Any]] = Field(
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
)
|
||||
max_tokens: Optional[int] = Field(
|
||||
@@ -138,7 +138,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
default=None, description="Knowledge for the agent."
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
knowledge_sources: Optional[list[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
@@ -150,7 +150,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default_factory=SecurityConfig,
|
||||
description="Security configuration for the agent, including fingerprinting.",
|
||||
)
|
||||
callbacks: List[Callable] = Field(
|
||||
callbacks: list[Callable] = Field(
|
||||
default=[], description="Callbacks to be used for the agent"
|
||||
)
|
||||
adapted_agent: bool = Field(
|
||||
@@ -168,7 +168,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
@field_validator("tools")
|
||||
@classmethod
|
||||
def validate_tools(cls, tools: List[Any]) -> List[BaseTool]:
|
||||
def validate_tools(cls, tools: list[Any]) -> list[BaseTool]:
|
||||
"""Validate and process the tools provided to the agent.
|
||||
|
||||
This method ensures that each tool is either an instance of BaseTool
|
||||
@@ -253,7 +253,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
tools: Optional[list[BaseTool]] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@@ -262,7 +262,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
|
||||
def get_delegation_tools(self, agents: list["BaseAgent"]) -> list[BaseTool]:
|
||||
"""Set the task tools that init BaseAgenTools class."""
|
||||
pass
|
||||
|
||||
@@ -320,7 +320,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
return copied_agent
|
||||
|
||||
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
def interpolate_inputs(self, inputs: dict[str, Any]) -> None:
|
||||
"""Interpolate inputs into the agent description and backstory."""
|
||||
if self._original_role is None:
|
||||
self._original_role = self.role
|
||||
@@ -362,5 +362,5 @@ class BaseAgent(ABC, BaseModel):
|
||||
self._rpm_controller = rpm_controller
|
||||
self.create_agent_executor()
|
||||
|
||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||
def set_knowledge(self, crew_embedder: Optional[dict[str, Any]] = None):
|
||||
pass
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Dict, List
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
@@ -7,7 +7,7 @@ from crewai.utilities import I18N
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.events.event_listener import event_listener
|
||||
from crewai.events.event_listener import event_listener
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
@@ -21,7 +21,7 @@ class CrewAgentExecutorMixin:
|
||||
task: "Task"
|
||||
iterations: int
|
||||
max_iter: int
|
||||
messages: List[Dict[str, str]]
|
||||
messages: list[dict[str, str]]
|
||||
_i18n: I18N
|
||||
_printer: Printer = Printer()
|
||||
|
||||
|
||||
4
src/crewai/agents/cache/cache_handler.py
vendored
4
src/crewai/agents/cache/cache_handler.py
vendored
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, PrivateAttr
|
||||
|
||||
@@ -6,7 +6,7 @@ from pydantic import BaseModel, PrivateAttr
|
||||
class CacheHandler(BaseModel):
|
||||
"""Callback handler for tool usage."""
|
||||
|
||||
_cache: Dict[str, Any] = PrivateAttr(default_factory=dict)
|
||||
_cache: dict[str, Any] = PrivateAttr(default_factory=dict)
|
||||
|
||||
def add(self, tool, input, output):
|
||||
self._cache[f"{tool}-{input}"] = output
|
||||
|
||||
27
src/crewai/agents/constants.py
Normal file
27
src/crewai/agents/constants.py
Normal file
@@ -0,0 +1,27 @@
|
||||
"""Constants for agent-related modules."""
|
||||
|
||||
import re
|
||||
from typing import Final
|
||||
|
||||
# crewai.agents.parser constants
|
||||
|
||||
FINAL_ANSWER_ACTION: Final[str] = "Final Answer:"
|
||||
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE: Final[str] = (
|
||||
"I did it wrong. Invalid Format: I missed the 'Action:' after 'Thought:'. I will do right next, and don't use a tool I have already used.\n"
|
||||
)
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE: Final[str] = (
|
||||
"I did it wrong. Invalid Format: I missed the 'Action Input:' after 'Action:'. I will do right next, and don't use a tool I have already used.\n"
|
||||
)
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE: Final[str] = (
|
||||
"I did it wrong. Tried to both perform Action and give a Final Answer at the same time, I must do one or the other"
|
||||
)
|
||||
UNABLE_TO_REPAIR_JSON_RESULTS: Final[list[str]] = ['""', "{}"]
|
||||
ACTION_INPUT_REGEX: Final[re.Pattern[str]] = re.compile(
|
||||
r"Action\s*\d*\s*:\s*(.*?)\s*Action\s*\d*\s*Input\s*\d*\s*:\s*(.*)", re.DOTALL
|
||||
)
|
||||
ACTION_REGEX: Final[re.Pattern[str]] = re.compile(
|
||||
r"Action\s*\d*\s*:\s*(.*?)", re.DOTALL
|
||||
)
|
||||
ACTION_INPUT_ONLY_REGEX: Final[re.Pattern[str]] = re.compile(
|
||||
r"\s*Action\s*\d*\s*Input\s*\d*\s*:\s*(.*)", re.DOTALL
|
||||
)
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
@@ -30,11 +30,11 @@ from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.tool_utils import execute_tool_and_check_finality
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
from crewai.utilities.events.agent_events import (
|
||||
from crewai.events.types.logging_events import (
|
||||
AgentLogsStartedEvent,
|
||||
AgentLogsExecutionEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
|
||||
|
||||
class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
@@ -48,17 +48,17 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
agent: BaseAgent,
|
||||
prompt: dict[str, str],
|
||||
max_iter: int,
|
||||
tools: List[CrewStructuredTool],
|
||||
tools: list[CrewStructuredTool],
|
||||
tools_names: str,
|
||||
stop_words: List[str],
|
||||
stop_words: list[str],
|
||||
tools_description: str,
|
||||
tools_handler: ToolsHandler,
|
||||
step_callback: Any = None,
|
||||
original_tools: List[Any] = [],
|
||||
original_tools: list[Any] | None = None,
|
||||
function_calling_llm: Any = None,
|
||||
respect_context_window: bool = False,
|
||||
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
|
||||
callbacks: List[Any] = [],
|
||||
callbacks: list[Any] | None = None,
|
||||
):
|
||||
self._i18n: I18N = I18N()
|
||||
self.llm: BaseLLM = llm
|
||||
@@ -70,10 +70,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.tools_names = tools_names
|
||||
self.stop = stop_words
|
||||
self.max_iter = max_iter
|
||||
self.callbacks = callbacks
|
||||
self.callbacks = callbacks or []
|
||||
self._printer: Printer = Printer()
|
||||
self.tools_handler = tools_handler
|
||||
self.original_tools = original_tools
|
||||
self.original_tools = original_tools or []
|
||||
self.step_callback = step_callback
|
||||
self.use_stop_words = self.llm.supports_stop_words()
|
||||
self.tools_description = tools_description
|
||||
@@ -81,10 +81,10 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.respect_context_window = respect_context_window
|
||||
self.request_within_rpm_limit = request_within_rpm_limit
|
||||
self.ask_for_human_input = False
|
||||
self.messages: List[Dict[str, str]] = []
|
||||
self.messages: list[dict[str, str]] = []
|
||||
self.iterations = 0
|
||||
self.log_error_after = 3
|
||||
self.tool_name_to_tool_map: Dict[str, Union[CrewStructuredTool, BaseTool]] = {
|
||||
self.tool_name_to_tool_map: dict[str, Union[CrewStructuredTool, BaseTool]] = {
|
||||
tool.name: tool for tool in self.tools
|
||||
}
|
||||
existing_stop = self.llm.stop or []
|
||||
@@ -96,7 +96,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
)
|
||||
|
||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
def invoke(self, inputs: dict[str, str]) -> dict[str, Any]:
|
||||
if "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs)
|
||||
user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs)
|
||||
@@ -122,7 +122,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise
|
||||
|
||||
|
||||
if self.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
|
||||
@@ -156,7 +155,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
messages=self.messages,
|
||||
callbacks=self.callbacks,
|
||||
printer=self._printer,
|
||||
from_task=self.task
|
||||
from_task=self.task,
|
||||
)
|
||||
formatted_answer = process_llm_response(answer, self.use_stop_words)
|
||||
|
||||
@@ -372,7 +371,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
training_data[agent_id] = agent_training_data
|
||||
training_handler.save(training_data)
|
||||
|
||||
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
|
||||
def _format_prompt(self, prompt: str, inputs: dict[str, str]) -> str:
|
||||
prompt = prompt.replace("{input}", inputs["input"])
|
||||
prompt = prompt.replace("{tool_names}", inputs["tool_names"])
|
||||
prompt = prompt.replace("{tools}", inputs["tools"])
|
||||
|
||||
@@ -1,50 +1,67 @@
|
||||
import re
|
||||
from typing import Any, Optional, Union
|
||||
"""Agent output parsing module for ReAct-style LLM responses.
|
||||
|
||||
This module provides parsing functionality for agent outputs that follow
|
||||
the ReAct (Reasoning and Acting) format, converting them into structured
|
||||
AgentAction or AgentFinish objects.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from json_repair import repair_json
|
||||
|
||||
from crewai.agents.constants import (
|
||||
ACTION_INPUT_REGEX,
|
||||
ACTION_REGEX,
|
||||
ACTION_INPUT_ONLY_REGEX,
|
||||
FINAL_ANSWER_ACTION,
|
||||
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
UNABLE_TO_REPAIR_JSON_RESULTS,
|
||||
)
|
||||
from crewai.utilities import I18N
|
||||
|
||||
FINAL_ANSWER_ACTION = "Final Answer:"
|
||||
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action:' after 'Thought:'. I will do right next, and don't use a tool I have already used.\n"
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE = "I did it wrong. Invalid Format: I missed the 'Action Input:' after 'Action:'. I will do right next, and don't use a tool I have already used.\n"
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = "I did it wrong. Tried to both perform Action and give a Final Answer at the same time, I must do one or the other"
|
||||
_I18N = I18N()
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentAction:
|
||||
"""Represents an action to be taken by an agent."""
|
||||
|
||||
thought: str
|
||||
tool: str
|
||||
tool_input: str
|
||||
text: str
|
||||
result: str
|
||||
|
||||
def __init__(self, thought: str, tool: str, tool_input: str, text: str):
|
||||
self.thought = thought
|
||||
self.tool = tool
|
||||
self.tool_input = tool_input
|
||||
self.text = text
|
||||
result: str | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class AgentFinish:
|
||||
"""Represents the final answer from an agent."""
|
||||
|
||||
thought: str
|
||||
output: str
|
||||
text: str
|
||||
|
||||
def __init__(self, thought: str, output: str, text: str):
|
||||
self.thought = thought
|
||||
self.output = output
|
||||
self.text = text
|
||||
|
||||
|
||||
class OutputParserException(Exception):
|
||||
error: str
|
||||
"""Exception raised when output parsing fails.
|
||||
|
||||
def __init__(self, error: str):
|
||||
Attributes:
|
||||
error: The error message.
|
||||
"""
|
||||
|
||||
def __init__(self, error: str) -> None:
|
||||
"""Initialize OutputParserException.
|
||||
|
||||
Args:
|
||||
error: The error message.
|
||||
"""
|
||||
self.error = error
|
||||
super().__init__(error)
|
||||
|
||||
|
||||
class CrewAgentParser:
|
||||
"""Parses ReAct-style LLM calls that have a single tool input.
|
||||
def parse(text: str) -> AgentAction | AgentFinish:
|
||||
"""Parse agent output text into AgentAction or AgentFinish.
|
||||
|
||||
Expects output to be in one of two formats.
|
||||
|
||||
@@ -62,108 +79,117 @@ class CrewAgentParser:
|
||||
|
||||
Thought: agent thought here
|
||||
Final Answer: The temperature is 100 degrees
|
||||
|
||||
Args:
|
||||
text: The agent output text to parse.
|
||||
|
||||
Returns:
|
||||
AgentAction or AgentFinish based on the content.
|
||||
|
||||
Raises:
|
||||
OutputParserException: If the text format is invalid.
|
||||
"""
|
||||
thought = _extract_thought(text)
|
||||
includes_answer = FINAL_ANSWER_ACTION in text
|
||||
action_match = ACTION_INPUT_REGEX.search(text)
|
||||
|
||||
_i18n: I18N = I18N()
|
||||
agent: Any = None
|
||||
if includes_answer:
|
||||
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
||||
# Check whether the final answer ends with triple backticks.
|
||||
if final_answer.endswith("```"):
|
||||
# Count occurrences of triple backticks in the final answer.
|
||||
count = final_answer.count("```")
|
||||
# If count is odd then it's an unmatched trailing set; remove it.
|
||||
if count % 2 != 0:
|
||||
final_answer = final_answer[:-3].rstrip()
|
||||
return AgentFinish(thought=thought, output=final_answer, text=text)
|
||||
|
||||
def __init__(self, agent: Optional[Any] = None):
|
||||
self.agent = agent
|
||||
elif action_match:
|
||||
action = action_match.group(1)
|
||||
clean_action = _clean_action(action)
|
||||
|
||||
@staticmethod
|
||||
def parse_text(text: str) -> Union[AgentAction, AgentFinish]:
|
||||
"""
|
||||
Static method to parse text into an AgentAction or AgentFinish without needing to instantiate the class.
|
||||
action_input = action_match.group(2).strip()
|
||||
|
||||
Args:
|
||||
text: The text to parse.
|
||||
tool_input = action_input.strip(" ").strip('"')
|
||||
safe_tool_input = _safe_repair_json(tool_input)
|
||||
|
||||
Returns:
|
||||
Either an AgentAction or AgentFinish based on the parsed content.
|
||||
"""
|
||||
parser = CrewAgentParser()
|
||||
return parser.parse(text)
|
||||
|
||||
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
|
||||
thought = self._extract_thought(text)
|
||||
includes_answer = FINAL_ANSWER_ACTION in text
|
||||
regex = (
|
||||
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
|
||||
return AgentAction(
|
||||
thought=thought, tool=clean_action, tool_input=safe_tool_input, text=text
|
||||
)
|
||||
action_match = re.search(regex, text, re.DOTALL)
|
||||
if includes_answer:
|
||||
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
||||
# Check whether the final answer ends with triple backticks.
|
||||
if final_answer.endswith("```"):
|
||||
# Count occurrences of triple backticks in the final answer.
|
||||
count = final_answer.count("```")
|
||||
# If count is odd then it's an unmatched trailing set; remove it.
|
||||
if count % 2 != 0:
|
||||
final_answer = final_answer[:-3].rstrip()
|
||||
return AgentFinish(thought, final_answer, text)
|
||||
|
||||
elif action_match:
|
||||
action = action_match.group(1)
|
||||
clean_action = self._clean_action(action)
|
||||
if not ACTION_REGEX.search(text):
|
||||
raise OutputParserException(
|
||||
f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{_I18N.slice('final_answer_format')}",
|
||||
)
|
||||
elif not ACTION_INPUT_ONLY_REGEX.search(text):
|
||||
raise OutputParserException(
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
)
|
||||
else:
|
||||
err_format = _I18N.slice("format_without_tools")
|
||||
error = f"{err_format}"
|
||||
raise OutputParserException(
|
||||
error,
|
||||
)
|
||||
|
||||
action_input = action_match.group(2).strip()
|
||||
|
||||
tool_input = action_input.strip(" ").strip('"')
|
||||
safe_tool_input = self._safe_repair_json(tool_input)
|
||||
def _extract_thought(text: str) -> str:
|
||||
"""Extract the thought portion from the text.
|
||||
|
||||
return AgentAction(thought, clean_action, safe_tool_input, text)
|
||||
Args:
|
||||
text: The full agent output text.
|
||||
|
||||
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
||||
raise OutputParserException(
|
||||
f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}",
|
||||
)
|
||||
elif not re.search(
|
||||
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
|
||||
):
|
||||
raise OutputParserException(
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
)
|
||||
else:
|
||||
format = self._i18n.slice("format_without_tools")
|
||||
error = f"{format}"
|
||||
raise OutputParserException(
|
||||
error,
|
||||
)
|
||||
Returns:
|
||||
The extracted thought string.
|
||||
"""
|
||||
thought_index = text.find("\nAction")
|
||||
if thought_index == -1:
|
||||
thought_index = text.find("\nFinal Answer")
|
||||
if thought_index == -1:
|
||||
return ""
|
||||
thought = text[:thought_index].strip()
|
||||
# Remove any triple backticks from the thought string
|
||||
thought = thought.replace("```", "").strip()
|
||||
return thought
|
||||
|
||||
def _extract_thought(self, text: str) -> str:
|
||||
thought_index = text.find("\nAction")
|
||||
if thought_index == -1:
|
||||
thought_index = text.find("\nFinal Answer")
|
||||
if thought_index == -1:
|
||||
return ""
|
||||
thought = text[:thought_index].strip()
|
||||
# Remove any triple backticks from the thought string
|
||||
thought = thought.replace("```", "").strip()
|
||||
return thought
|
||||
|
||||
def _clean_action(self, text: str) -> str:
|
||||
"""Clean action string by removing non-essential formatting characters."""
|
||||
return text.strip().strip("*").strip()
|
||||
def _clean_action(text: str) -> str:
|
||||
"""Clean action string by removing non-essential formatting characters.
|
||||
|
||||
def _safe_repair_json(self, tool_input: str) -> str:
|
||||
UNABLE_TO_REPAIR_JSON_RESULTS = ['""', "{}"]
|
||||
Args:
|
||||
text: The action text to clean.
|
||||
|
||||
# Skip repair if the input starts and ends with square brackets
|
||||
# Explanation: The JSON parser has issues handling inputs that are enclosed in square brackets ('[]').
|
||||
# These are typically valid JSON arrays or strings that do not require repair. Attempting to repair such inputs
|
||||
# might lead to unintended alterations, such as wrapping the entire input in additional layers or modifying
|
||||
# the structure in a way that changes its meaning. By skipping the repair for inputs that start and end with
|
||||
# square brackets, we preserve the integrity of these valid JSON structures and avoid unnecessary modifications.
|
||||
if tool_input.startswith("[") and tool_input.endswith("]"):
|
||||
return tool_input
|
||||
Returns:
|
||||
The cleaned action string.
|
||||
"""
|
||||
return text.strip().strip("*").strip()
|
||||
|
||||
# Before repair, handle common LLM issues:
|
||||
# 1. Replace """ with " to avoid JSON parser errors
|
||||
|
||||
tool_input = tool_input.replace('"""', '"')
|
||||
def _safe_repair_json(tool_input: str) -> str:
|
||||
"""Safely repair JSON input.
|
||||
|
||||
result = repair_json(tool_input)
|
||||
if result in UNABLE_TO_REPAIR_JSON_RESULTS:
|
||||
return tool_input
|
||||
Args:
|
||||
tool_input: The tool input string to repair.
|
||||
|
||||
return str(result)
|
||||
Returns:
|
||||
The repaired JSON string or original if repair fails.
|
||||
"""
|
||||
# Skip repair if the input starts and ends with square brackets
|
||||
# Explanation: The JSON parser has issues handling inputs that are enclosed in square brackets ('[]').
|
||||
# These are typically valid JSON arrays or strings that do not require repair. Attempting to repair such inputs
|
||||
# might lead to unintended alterations, such as wrapping the entire input in additional layers or modifying
|
||||
# the structure in a way that changes its meaning. By skipping the repair for inputs that start and end with
|
||||
# square brackets, we preserve the integrity of these valid JSON structures and avoid unnecessary modifications.
|
||||
if tool_input.startswith("[") and tool_input.endswith("]"):
|
||||
return tool_input
|
||||
|
||||
# Before repair, handle common LLM issues:
|
||||
# 1. Replace """ with " to avoid JSON parser errors
|
||||
|
||||
tool_input = tool_input.replace('"""', '"')
|
||||
|
||||
result = repair_json(tool_input)
|
||||
if result in UNABLE_TO_REPAIR_JSON_RESULTS:
|
||||
return tool_input
|
||||
|
||||
return str(result)
|
||||
|
||||
@@ -1,29 +1,41 @@
|
||||
from typing import Any, Optional, Union
|
||||
"""Tools handler for managing tool execution and caching."""
|
||||
|
||||
from ..tools.cache_tools.cache_tools import CacheTools
|
||||
from ..tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from .cache.cache_handler import CacheHandler
|
||||
from crewai.tools.cache_tools.cache_tools import CacheTools
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
|
||||
|
||||
class ToolsHandler:
|
||||
"""Callback handler for tool usage."""
|
||||
"""Callback handler for tool usage.
|
||||
|
||||
last_used_tool: Optional[ToolCalling] = None
|
||||
cache: Optional[CacheHandler]
|
||||
Attributes:
|
||||
last_used_tool: The most recently used tool calling instance.
|
||||
cache: Optional cache handler for storing tool outputs.
|
||||
"""
|
||||
|
||||
def __init__(self, cache: Optional[CacheHandler] = None):
|
||||
"""Initialize the callback handler."""
|
||||
self.cache = cache
|
||||
self.last_used_tool = None
|
||||
def __init__(self, cache: CacheHandler | None = None) -> None:
|
||||
"""Initialize the callback handler.
|
||||
|
||||
Args:
|
||||
cache: Optional cache handler for storing tool outputs.
|
||||
"""
|
||||
self.cache: CacheHandler | None = cache
|
||||
self.last_used_tool: ToolCalling | InstructorToolCalling | None = None
|
||||
|
||||
def on_tool_use(
|
||||
self,
|
||||
calling: Union[ToolCalling, InstructorToolCalling],
|
||||
calling: ToolCalling | InstructorToolCalling,
|
||||
output: str,
|
||||
should_cache: bool = True,
|
||||
) -> Any:
|
||||
"""Run when tool ends running."""
|
||||
self.last_used_tool = calling # type: ignore # BUG?: Incompatible types in assignment (expression has type "Union[ToolCalling, InstructorToolCalling]", variable has type "ToolCalling")
|
||||
) -> None:
|
||||
"""Run when tool ends running.
|
||||
|
||||
Args:
|
||||
calling: The tool calling instance.
|
||||
output: The output from the tool execution.
|
||||
should_cache: Whether to cache the tool output.
|
||||
"""
|
||||
self.last_used_tool = calling
|
||||
if self.cache and should_cache and calling.tool_name != CacheTools().name:
|
||||
self.cache.add(
|
||||
tool=calling.tool_name,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import time
|
||||
import webbrowser
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
import requests
|
||||
from rich.console import Console
|
||||
@@ -70,7 +70,7 @@ class AuthenticationCommand:
|
||||
|
||||
return self._poll_for_token(device_code_data)
|
||||
|
||||
def _get_device_code(self) -> Dict[str, Any]:
|
||||
def _get_device_code(self) -> dict[str, Any]:
|
||||
"""Get the device code to authenticate the user."""
|
||||
|
||||
device_code_payload = {
|
||||
@@ -86,13 +86,13 @@ class AuthenticationCommand:
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
def _display_auth_instructions(self, device_code_data: Dict[str, str]) -> None:
|
||||
def _display_auth_instructions(self, device_code_data: dict[str, str]) -> None:
|
||||
"""Display the authentication instructions to the user."""
|
||||
console.print("1. Navigate to: ", device_code_data["verification_uri_complete"])
|
||||
console.print("2. Enter the following code: ", device_code_data["user_code"])
|
||||
webbrowser.open(device_code_data["verification_uri_complete"])
|
||||
|
||||
def _poll_for_token(self, device_code_data: Dict[str, Any]) -> None:
|
||||
def _poll_for_token(self, device_code_data: dict[str, Any]) -> None:
|
||||
"""Polls the server for the token until it is received, or max attempts are reached."""
|
||||
|
||||
token_payload = {
|
||||
@@ -135,7 +135,7 @@ class AuthenticationCommand:
|
||||
"Timeout: Failed to get the token. Please try again.", style="bold red"
|
||||
)
|
||||
|
||||
def _validate_and_save_token(self, token_data: Dict[str, Any]) -> None:
|
||||
def _validate_and_save_token(self, token_data: dict[str, Any]) -> None:
|
||||
"""Validates the JWT token and saves the token to the token manager."""
|
||||
|
||||
jwt_token = token_data["access_token"]
|
||||
|
||||
@@ -5,7 +5,7 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
from typing import Any, Optional
|
||||
|
||||
import click
|
||||
import tomli
|
||||
@@ -157,7 +157,7 @@ def build_system_message(crew_chat_inputs: ChatInputs) -> str:
|
||||
)
|
||||
|
||||
|
||||
def create_tool_function(crew: Crew, messages: List[Dict[str, str]]) -> Any:
|
||||
def create_tool_function(crew: Crew, messages: list[dict[str, str]]) -> Any:
|
||||
"""Creates a wrapper function for running the crew tool with messages."""
|
||||
|
||||
def run_crew_tool_with_messages(**kwargs):
|
||||
@@ -221,9 +221,9 @@ def get_user_input() -> str:
|
||||
def handle_user_input(
|
||||
user_input: str,
|
||||
chat_llm: LLM,
|
||||
messages: List[Dict[str, str]],
|
||||
crew_tool_schema: Dict[str, Any],
|
||||
available_functions: Dict[str, Any],
|
||||
messages: list[dict[str, str]],
|
||||
crew_tool_schema: dict[str, Any],
|
||||
available_functions: dict[str, Any],
|
||||
) -> None:
|
||||
if user_input.strip().lower() == "exit":
|
||||
click.echo("Exiting chat. Goodbye!")
|
||||
@@ -281,13 +281,13 @@ def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
|
||||
}
|
||||
|
||||
|
||||
def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||
def run_crew_tool(crew: Crew, messages: list[dict[str, str]], **kwargs):
|
||||
"""
|
||||
Runs the crew using crew.kickoff(inputs=kwargs) and returns the output.
|
||||
|
||||
Args:
|
||||
crew (Crew): The crew instance to run.
|
||||
messages (List[Dict[str, str]]): The chat messages up to this point.
|
||||
messages (list[dict[str, str]]): The chat messages up to this point.
|
||||
**kwargs: The inputs collected from the user.
|
||||
|
||||
Returns:
|
||||
@@ -314,12 +314,12 @@ def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def load_crew_and_name() -> Tuple[Crew, str]:
|
||||
def load_crew_and_name() -> tuple[Crew, str]:
|
||||
"""
|
||||
Loads the crew by importing the crew class from the user's project.
|
||||
|
||||
Returns:
|
||||
Tuple[Crew, str]: A tuple containing the Crew instance and the name of the crew.
|
||||
tuple[Crew, str]: A tuple containing the Crew instance and the name of the crew.
|
||||
"""
|
||||
# Get the current working directory
|
||||
cwd = Path.cwd()
|
||||
@@ -395,7 +395,7 @@ def generate_crew_chat_inputs(crew: Crew, crew_name: str, chat_llm) -> ChatInput
|
||||
)
|
||||
|
||||
|
||||
def fetch_required_inputs(crew: Crew) -> Set[str]:
|
||||
def fetch_required_inputs(crew: Crew) -> set[str]:
|
||||
"""
|
||||
Extracts placeholders from the crew's tasks and agents.
|
||||
|
||||
@@ -403,10 +403,10 @@ def fetch_required_inputs(crew: Crew) -> Set[str]:
|
||||
crew (Crew): The crew object.
|
||||
|
||||
Returns:
|
||||
Set[str]: A set of placeholder names.
|
||||
set[str]: A set of placeholder names.
|
||||
"""
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
required_inputs: Set[str] = set()
|
||||
required_inputs: set[str] = set()
|
||||
|
||||
# Scan tasks
|
||||
for task in crew.tasks:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
@@ -32,12 +32,12 @@ class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
style="bold red",
|
||||
)
|
||||
|
||||
def _display_deployment_info(self, json_response: Dict[str, Any]) -> None:
|
||||
def _display_deployment_info(self, json_response: dict[str, Any]) -> None:
|
||||
"""
|
||||
Display deployment information.
|
||||
|
||||
Args:
|
||||
json_response (Dict[str, Any]): The deployment information to display.
|
||||
json_response (dict[str, Any]): The deployment information to display.
|
||||
"""
|
||||
console.print("Deploying the crew...\n", style="bold blue")
|
||||
for key, value in json_response.items():
|
||||
@@ -47,12 +47,12 @@ class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
console.print(" or")
|
||||
console.print(f"crewai deploy status --uuid \"{json_response['uuid']}\"")
|
||||
|
||||
def _display_logs(self, log_messages: List[Dict[str, Any]]) -> None:
|
||||
def _display_logs(self, log_messages: list[dict[str, Any]]) -> None:
|
||||
"""
|
||||
Display log messages.
|
||||
|
||||
Args:
|
||||
log_messages (List[Dict[str, Any]]): The log messages to display.
|
||||
log_messages (list[dict[str, Any]]): The log messages to display.
|
||||
"""
|
||||
for log_message in log_messages:
|
||||
console.print(
|
||||
@@ -110,13 +110,13 @@ class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
self._display_creation_success(response.json())
|
||||
|
||||
def _confirm_input(
|
||||
self, env_vars: Dict[str, str], remote_repo_url: str, confirm: bool
|
||||
self, env_vars: dict[str, str], remote_repo_url: str, confirm: bool
|
||||
) -> None:
|
||||
"""
|
||||
Confirm input parameters with the user.
|
||||
|
||||
Args:
|
||||
env_vars (Dict[str, str]): Environment variables.
|
||||
env_vars (dict[str, str]): Environment variables.
|
||||
remote_repo_url (str): Remote repository URL.
|
||||
confirm (bool): Whether to confirm input.
|
||||
"""
|
||||
@@ -128,18 +128,18 @@ class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
|
||||
def _create_payload(
|
||||
self,
|
||||
env_vars: Dict[str, str],
|
||||
env_vars: dict[str, str],
|
||||
remote_repo_url: str,
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Create the payload for crew creation.
|
||||
|
||||
Args:
|
||||
remote_repo_url (str): Remote repository URL.
|
||||
env_vars (Dict[str, str]): Environment variables.
|
||||
env_vars (dict[str, str]): Environment variables.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: The payload for crew creation.
|
||||
dict[str, Any]: The payload for crew creation.
|
||||
"""
|
||||
return {
|
||||
"deploy": {
|
||||
@@ -149,12 +149,12 @@ class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
}
|
||||
}
|
||||
|
||||
def _display_creation_success(self, json_response: Dict[str, Any]) -> None:
|
||||
def _display_creation_success(self, json_response: dict[str, Any]) -> None:
|
||||
"""
|
||||
Display success message after crew creation.
|
||||
|
||||
Args:
|
||||
json_response (Dict[str, Any]): The response containing crew information.
|
||||
json_response (dict[str, Any]): The response containing crew information.
|
||||
"""
|
||||
console.print("Deployment created successfully!\n", style="bold green")
|
||||
console.print(
|
||||
@@ -179,12 +179,12 @@ class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
else:
|
||||
self._display_no_crews_message()
|
||||
|
||||
def _display_crews(self, crews_data: List[Dict[str, Any]]) -> None:
|
||||
def _display_crews(self, crews_data: list[dict[str, Any]]) -> None:
|
||||
"""
|
||||
Display the list of crews.
|
||||
|
||||
Args:
|
||||
crews_data (List[Dict[str, Any]]): List of crew data to display.
|
||||
crews_data (list[dict[str, Any]]): List of crew data to display.
|
||||
"""
|
||||
for crew_data in crews_data:
|
||||
console.print(
|
||||
@@ -217,12 +217,12 @@ class DeployCommand(BaseCommand, PlusAPIMixin):
|
||||
self._validate_response(response)
|
||||
self._display_crew_status(response.json())
|
||||
|
||||
def _display_crew_status(self, status_data: Dict[str, str]) -> None:
|
||||
def _display_crew_status(self, status_data: dict[str, str]) -> None:
|
||||
"""
|
||||
Display the status of a crew.
|
||||
|
||||
Args:
|
||||
status_data (Dict[str, str]): The status data to display.
|
||||
status_data (dict[str, str]): The status data to display.
|
||||
"""
|
||||
console.print(f"Name:\t {status_data['name']}")
|
||||
console.print(f"Status:\t {status_data['status']}")
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import requests
|
||||
from typing import Dict, Any
|
||||
from typing import Any
|
||||
from rich.console import Console
|
||||
from requests.exceptions import RequestException, JSONDecodeError
|
||||
|
||||
@@ -32,7 +32,7 @@ class EnterpriseConfigureCommand(BaseCommand):
|
||||
console.print(f"❌ Failed to configure Enterprise settings: {str(e)}", style="bold red")
|
||||
raise SystemExit(1)
|
||||
|
||||
def _fetch_oauth_config(self, enterprise_url: str) -> Dict[str, Any]:
|
||||
def _fetch_oauth_config(self, enterprise_url: str) -> dict[str, Any]:
|
||||
oauth_endpoint = f"{enterprise_url}/auth/parameters"
|
||||
|
||||
try:
|
||||
@@ -64,7 +64,7 @@ class EnterpriseConfigureCommand(BaseCommand):
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error fetching OAuth2 configuration: {str(e)}")
|
||||
|
||||
def _update_oauth_settings(self, enterprise_url: str, oauth_config: Dict[str, Any]) -> None:
|
||||
def _update_oauth_settings(self, enterprise_url: str, oauth_config: dict[str, Any]) -> None:
|
||||
try:
|
||||
config_mapping = {
|
||||
'enterprise_base_url': enterprise_url,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import List, Optional
|
||||
from typing import Optional
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import requests
|
||||
@@ -58,7 +58,7 @@ class PlusAPI:
|
||||
version: str,
|
||||
description: Optional[str],
|
||||
encoded_file: str,
|
||||
available_exports: Optional[List[str]] = None,
|
||||
available_exports: Optional[list[str]] = None,
|
||||
):
|
||||
params = {
|
||||
"handle": handle,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import subprocess
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
from packaging import version
|
||||
|
||||
@@ -10,8 +10,9 @@ console = Console()
|
||||
class SettingsCommand(BaseCommand):
|
||||
"""A class to handle CLI configuration commands."""
|
||||
|
||||
def __init__(self, settings_kwargs: dict[str, Any] = {}):
|
||||
def __init__(self, settings_kwargs: dict[str, Any] | None = None):
|
||||
super().__init__()
|
||||
settings_kwargs = settings_kwargs or {}
|
||||
self.settings = Settings(**settings_kwargs)
|
||||
|
||||
def list(self) -> None:
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from typing import List
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai import Task
|
||||
# If you want to run a snippet of code before or after the crew starts,
|
||||
# you can use the @before_kickoff and @after_kickoff decorators
|
||||
# https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
|
||||
@@ -10,8 +13,8 @@ from typing import List
|
||||
class {{crew_name}}():
|
||||
"""{{crew_name}} crew"""
|
||||
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
agents: list["BaseAgent"]
|
||||
tasks: list["Task"]
|
||||
|
||||
# Learn more about YAML configuration files here:
|
||||
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.175.0,<1.0.0"
|
||||
"crewai[tools]>=0.177.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from typing import List
|
||||
|
||||
# If you want to run a snippet of code before or after the crew starts,
|
||||
# you can use the @before_kickoff and @after_kickoff decorators
|
||||
@@ -12,8 +11,8 @@ from typing import List
|
||||
class PoemCrew:
|
||||
"""Poem Crew"""
|
||||
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
|
||||
# Learn more about YAML configuration files here:
|
||||
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.175.0,<1.0.0",
|
||||
"crewai[tools]>=0.177.0,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.175.0"
|
||||
"crewai[tools]>=0.177.0"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -5,7 +5,7 @@ import sys
|
||||
from functools import reduce
|
||||
from inspect import getmro, isclass, isfunction, ismethod
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, get_type_hints
|
||||
from typing import Any, get_type_hints
|
||||
|
||||
import click
|
||||
import tomli
|
||||
@@ -77,7 +77,7 @@ def get_project_description(
|
||||
|
||||
|
||||
def _get_project_attribute(
|
||||
pyproject_path: str, keys: List[str], require: bool
|
||||
pyproject_path: str, keys: list[str], require: bool
|
||||
) -> Any | None:
|
||||
"""Get an attribute from the pyproject.toml file."""
|
||||
attribute = None
|
||||
@@ -117,7 +117,7 @@ def _get_project_attribute(
|
||||
return attribute
|
||||
|
||||
|
||||
def _get_nested_value(data: Dict[str, Any], keys: List[str]) -> Any:
|
||||
def _get_nested_value(data: dict[str, Any], keys: list[str]) -> Any:
|
||||
return reduce(dict.__getitem__, keys, data)
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import asyncio
|
||||
import json
|
||||
import re
|
||||
import threading
|
||||
import uuid
|
||||
import warnings
|
||||
from concurrent.futures import Future
|
||||
@@ -9,11 +10,7 @@ from hashlib import md5
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Tuple,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
@@ -59,7 +56,8 @@ from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import NOT_SPECIFIED, TRAINING_DATA_FILE
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.events.crew_events import (
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCancelledEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
@@ -70,14 +68,14 @@ from crewai.utilities.events.crew_events import (
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.event_listener import EventListener
|
||||
from crewai.utilities.events.listeners.tracing.trace_listener import (
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.event_listener import EventListener
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
|
||||
|
||||
from crewai.utilities.events.listeners.tracing.utils import (
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.utilities.formatter import (
|
||||
@@ -130,18 +128,19 @@ class Crew(FlowTrackable, BaseModel):
|
||||
_external_memory: Optional[InstanceOf[ExternalMemory]] = PrivateAttr()
|
||||
_train: Optional[bool] = PrivateAttr(default=False)
|
||||
_train_iteration: Optional[int] = PrivateAttr()
|
||||
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
_inputs: Optional[dict[str, Any]] = PrivateAttr(default=None)
|
||||
_logging_color: str = PrivateAttr(
|
||||
default="bold_purple",
|
||||
)
|
||||
_task_output_handler: TaskOutputStorageHandler = PrivateAttr(
|
||||
default_factory=TaskOutputStorageHandler
|
||||
)
|
||||
_cancellation_event: threading.Event = PrivateAttr(default_factory=threading.Event)
|
||||
|
||||
name: Optional[str] = Field(default="crew")
|
||||
cache: bool = Field(default=True)
|
||||
tasks: List[Task] = Field(default_factory=list)
|
||||
agents: List[BaseAgent] = Field(default_factory=list)
|
||||
tasks: list[Task] = Field(default_factory=list)
|
||||
agents: list[BaseAgent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: bool = Field(default=False)
|
||||
memory: bool = Field(
|
||||
@@ -181,7 +180,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||
config: Optional[Union[Json, dict[str, Any]]] = Field(default=None)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
share_crew: Optional[bool] = Field(default=False)
|
||||
step_callback: Optional[Any] = Field(
|
||||
@@ -192,13 +191,13 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default=None,
|
||||
description="Callback to be executed after each task for all agents execution.",
|
||||
)
|
||||
before_kickoff_callbacks: List[
|
||||
Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]
|
||||
before_kickoff_callbacks: list[
|
||||
Callable[[Optional[dict[str, Any]]], Optional[dict[str, Any]]]
|
||||
] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed before crew kickoff. It may be used to adjust inputs before the crew is executed.",
|
||||
)
|
||||
after_kickoff_callbacks: List[Callable[[CrewOutput], CrewOutput]] = Field(
|
||||
after_kickoff_callbacks: list[Callable[[CrewOutput], CrewOutput]] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed after crew kickoff. It may be used to adjust the output of the crew.",
|
||||
)
|
||||
@@ -222,15 +221,15 @@ class Crew(FlowTrackable, BaseModel):
|
||||
default=None,
|
||||
description="Language model that will run the AgentPlanner if planning is True.",
|
||||
)
|
||||
task_execution_output_json_files: Optional[List[str]] = Field(
|
||||
task_execution_output_json_files: Optional[list[str]] = Field(
|
||||
default=None,
|
||||
description="List of file paths for task execution JSON files.",
|
||||
)
|
||||
execution_logs: List[Dict[str, Any]] = Field(
|
||||
execution_logs: list[dict[str, Any]] = Field(
|
||||
default=[],
|
||||
description="List of execution logs for tasks",
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
knowledge_sources: Optional[list[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
|
||||
)
|
||||
@@ -267,8 +266,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
@field_validator("config", mode="before")
|
||||
@classmethod
|
||||
def check_config_type(
|
||||
cls, v: Union[Json, Dict[str, Any]]
|
||||
) -> Union[Json, Dict[str, Any]]:
|
||||
cls, v: Union[Json, dict[str, Any]]
|
||||
) -> Union[Json, dict[str, Any]]:
|
||||
"""Validates that the config is a valid type.
|
||||
Args:
|
||||
v: The config to be validated.
|
||||
@@ -502,7 +501,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
@property
|
||||
def key(self) -> str:
|
||||
source: List[str] = [agent.key for agent in self.agents] + [
|
||||
source: list[str] = [agent.key for agent in self.agents] + [
|
||||
task.key for task in self.tasks
|
||||
]
|
||||
return md5("|".join(source).encode(), usedforsecurity=False).hexdigest()
|
||||
@@ -530,7 +529,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self.agents = [Agent(**agent) for agent in self.config["agents"]]
|
||||
self.tasks = [self._create_task(task) for task in self.config["tasks"]]
|
||||
|
||||
def _create_task(self, task_config: Dict[str, Any]) -> Task:
|
||||
def _create_task(self, task_config: dict[str, Any]) -> Task:
|
||||
"""Creates a task instance from its configuration.
|
||||
|
||||
Args:
|
||||
@@ -559,9 +558,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
CrewTrainingHandler(filename).initialize_file()
|
||||
|
||||
def train(
|
||||
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
|
||||
self, n_iterations: int, filename: str, inputs: Optional[dict[str, Any]] = None
|
||||
) -> None:
|
||||
"""Trains the crew for a given number of iterations."""
|
||||
inputs = inputs or {}
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -610,8 +610,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
inputs: Optional[dict[str, Any]] = None,
|
||||
) -> CrewOutput:
|
||||
self._reset_cancellation()
|
||||
|
||||
ctx = baggage.set_baggage(
|
||||
"crew_context", CrewContext(id=str(self.id), key=self.key)
|
||||
)
|
||||
@@ -681,9 +683,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
finally:
|
||||
detach(token)
|
||||
|
||||
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
|
||||
def kickoff_for_each(self, inputs: list[dict[str, Any]]) -> list[CrewOutput]:
|
||||
"""Executes the Crew's workflow for each input in the list and aggregates results."""
|
||||
results: List[CrewOutput] = []
|
||||
results: list[CrewOutput] = []
|
||||
|
||||
# Initialize the parent crew's usage metrics
|
||||
total_usage_metrics = UsageMetrics()
|
||||
@@ -702,11 +704,14 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self._task_output_handler.reset()
|
||||
return results
|
||||
|
||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = {}) -> CrewOutput:
|
||||
async def kickoff_async(
|
||||
self, inputs: Optional[dict[str, Any]] = None
|
||||
) -> CrewOutput:
|
||||
"""Asynchronous kickoff method to start the crew execution."""
|
||||
inputs = inputs or {}
|
||||
return await asyncio.to_thread(self.kickoff, inputs)
|
||||
|
||||
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[CrewOutput]:
|
||||
async def kickoff_for_each_async(self, inputs: list[dict]) -> list[CrewOutput]:
|
||||
crew_copies = [self.copy() for _ in inputs]
|
||||
|
||||
async def run_crew(crew, input_data):
|
||||
@@ -803,25 +808,37 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def _execute_tasks(
|
||||
self,
|
||||
tasks: List[Task],
|
||||
tasks: list[Task],
|
||||
start_index: Optional[int] = 0,
|
||||
was_replayed: bool = False,
|
||||
) -> CrewOutput:
|
||||
"""Executes tasks sequentially and returns the final output.
|
||||
|
||||
Args:
|
||||
tasks (List[Task]): List of tasks to execute
|
||||
tasks (list[Task]): List of tasks to execute
|
||||
manager (Optional[BaseAgent], optional): Manager agent to use for delegation. Defaults to None.
|
||||
|
||||
Returns:
|
||||
CrewOutput: Final output of the crew
|
||||
"""
|
||||
|
||||
task_outputs: List[TaskOutput] = []
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]] = []
|
||||
task_outputs: list[TaskOutput] = []
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]] = []
|
||||
last_sync_output: Optional[TaskOutput] = None
|
||||
|
||||
for task_index, task in enumerate(tasks):
|
||||
if self.is_cancelled():
|
||||
self._logger.log("info", f"Crew execution cancelled after {task_index} tasks", color="yellow")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffCancelledEvent(
|
||||
crew_name=self.name,
|
||||
completed_tasks=task_index,
|
||||
total_tasks=len(tasks),
|
||||
),
|
||||
)
|
||||
return self._create_crew_output(task_outputs)
|
||||
|
||||
if start_index is not None and task_index < start_index:
|
||||
if task.output:
|
||||
if task.async_execution:
|
||||
@@ -843,7 +860,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools_for_task = self._prepare_tools(
|
||||
agent_to_use,
|
||||
task,
|
||||
cast(Union[List[Tool], List[BaseTool]], tools_for_task),
|
||||
cast(Union[list[Tool], list[BaseTool]], tools_for_task),
|
||||
)
|
||||
|
||||
self._log_task_start(task, agent_to_use.role)
|
||||
@@ -863,7 +880,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
future = task.execute_async(
|
||||
agent=agent_to_use,
|
||||
context=context,
|
||||
tools=cast(List[BaseTool], tools_for_task),
|
||||
tools=cast(list[BaseTool], tools_for_task),
|
||||
)
|
||||
futures.append((task, future, task_index))
|
||||
else:
|
||||
@@ -875,7 +892,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
task_output = task.execute_sync(
|
||||
agent=agent_to_use,
|
||||
context=context,
|
||||
tools=cast(List[BaseTool], tools_for_task),
|
||||
tools=cast(list[BaseTool], tools_for_task),
|
||||
)
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(task, task_output)
|
||||
@@ -889,8 +906,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
def _handle_conditional_task(
|
||||
self,
|
||||
task: ConditionalTask,
|
||||
task_outputs: List[TaskOutput],
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]],
|
||||
task_outputs: list[TaskOutput],
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]],
|
||||
task_index: int,
|
||||
was_replayed: bool,
|
||||
) -> Optional[TaskOutput]:
|
||||
@@ -913,8 +930,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return None
|
||||
|
||||
def _prepare_tools(
|
||||
self, agent: BaseAgent, task: Task, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, agent: BaseAgent, task: Task, tools: Union[list[Tool], list[BaseTool]]
|
||||
) -> list[BaseTool]:
|
||||
# Add delegation tools if agent allows delegation
|
||||
if hasattr(agent, "allow_delegation") and getattr(
|
||||
agent, "allow_delegation", False
|
||||
@@ -943,8 +960,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
):
|
||||
tools = self._add_multimodal_tools(agent, tools)
|
||||
|
||||
# Return a List[BaseTool] which is compatible with both Task.execute_sync and Task.execute_async
|
||||
return cast(List[BaseTool], tools)
|
||||
# Return a list[BaseTool] which is compatible with both Task.execute_sync and Task.execute_async
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _get_agent_to_use(self, task: Task) -> Optional[BaseAgent]:
|
||||
if self.process == Process.hierarchical:
|
||||
@@ -953,12 +970,12 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def _merge_tools(
|
||||
self,
|
||||
existing_tools: Union[List[Tool], List[BaseTool]],
|
||||
new_tools: Union[List[Tool], List[BaseTool]],
|
||||
) -> List[BaseTool]:
|
||||
existing_tools: Union[list[Tool], list[BaseTool]],
|
||||
new_tools: Union[list[Tool], list[BaseTool]],
|
||||
) -> list[BaseTool]:
|
||||
"""Merge new tools into existing tools list, avoiding duplicates by tool name."""
|
||||
if not new_tools:
|
||||
return cast(List[BaseTool], existing_tools)
|
||||
return cast(list[BaseTool], existing_tools)
|
||||
|
||||
# Create mapping of tool names to new tools
|
||||
new_tool_map = {tool.name: tool for tool in new_tools}
|
||||
@@ -969,41 +986,41 @@ class Crew(FlowTrackable, BaseModel):
|
||||
# Add all new tools
|
||||
tools.extend(new_tools)
|
||||
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _inject_delegation_tools(
|
||||
self,
|
||||
tools: Union[List[Tool], List[BaseTool]],
|
||||
tools: Union[list[Tool], list[BaseTool]],
|
||||
task_agent: BaseAgent,
|
||||
agents: List[BaseAgent],
|
||||
) -> List[BaseTool]:
|
||||
agents: list[BaseAgent],
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(task_agent, "get_delegation_tools"):
|
||||
delegation_tools = task_agent.get_delegation_tools(agents)
|
||||
# Cast delegation_tools to the expected type for _merge_tools
|
||||
return self._merge_tools(tools, cast(List[BaseTool], delegation_tools))
|
||||
return cast(List[BaseTool], tools)
|
||||
return self._merge_tools(tools, cast(list[BaseTool], delegation_tools))
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _add_multimodal_tools(
|
||||
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, agent: BaseAgent, tools: Union[list[Tool], list[BaseTool]]
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(agent, "get_multimodal_tools"):
|
||||
multimodal_tools = agent.get_multimodal_tools()
|
||||
# Cast multimodal_tools to the expected type for _merge_tools
|
||||
return self._merge_tools(tools, cast(List[BaseTool], multimodal_tools))
|
||||
return cast(List[BaseTool], tools)
|
||||
return self._merge_tools(tools, cast(list[BaseTool], multimodal_tools))
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _add_code_execution_tools(
|
||||
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, agent: BaseAgent, tools: Union[list[Tool], list[BaseTool]]
|
||||
) -> list[BaseTool]:
|
||||
if hasattr(agent, "get_code_execution_tools"):
|
||||
code_tools = agent.get_code_execution_tools()
|
||||
# Cast code_tools to the expected type for _merge_tools
|
||||
return self._merge_tools(tools, cast(List[BaseTool], code_tools))
|
||||
return cast(List[BaseTool], tools)
|
||||
return self._merge_tools(tools, cast(list[BaseTool], code_tools))
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _add_delegation_tools(
|
||||
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, task: Task, tools: Union[list[Tool], list[BaseTool]]
|
||||
) -> list[BaseTool]:
|
||||
agents_for_delegation = [agent for agent in self.agents if agent != task.agent]
|
||||
if len(self.agents) > 1 and len(agents_for_delegation) > 0 and task.agent:
|
||||
if not tools:
|
||||
@@ -1011,7 +1028,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools = self._inject_delegation_tools(
|
||||
tools, task.agent, agents_for_delegation
|
||||
)
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _log_task_start(self, task: Task, role: str = "None"):
|
||||
if self.output_log_file:
|
||||
@@ -1020,8 +1037,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
def _update_manager_tools(
|
||||
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
|
||||
) -> List[BaseTool]:
|
||||
self, task: Task, tools: Union[list[Tool], list[BaseTool]]
|
||||
) -> list[BaseTool]:
|
||||
if self.manager_agent:
|
||||
if task.agent:
|
||||
tools = self._inject_delegation_tools(tools, task.agent, [task.agent])
|
||||
@@ -1029,9 +1046,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
tools = self._inject_delegation_tools(
|
||||
tools, self.manager_agent, self.agents
|
||||
)
|
||||
return cast(List[BaseTool], tools)
|
||||
return cast(list[BaseTool], tools)
|
||||
|
||||
def _get_context(self, task: Task, task_outputs: List[TaskOutput]) -> str:
|
||||
def _get_context(self, task: Task, task_outputs: list[TaskOutput]) -> str:
|
||||
if not task.context:
|
||||
return ""
|
||||
|
||||
@@ -1053,7 +1070,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
output=output.raw,
|
||||
)
|
||||
|
||||
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
||||
def _create_crew_output(self, task_outputs: list[TaskOutput]) -> CrewOutput:
|
||||
if not task_outputs:
|
||||
raise ValueError("No task outputs available to create crew output.")
|
||||
|
||||
@@ -1084,11 +1101,15 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
def _process_async_tasks(
|
||||
self,
|
||||
futures: List[Tuple[Task, Future[TaskOutput], int]],
|
||||
futures: list[tuple[Task, Future[TaskOutput], int]],
|
||||
was_replayed: bool = False,
|
||||
) -> List[TaskOutput]:
|
||||
task_outputs: List[TaskOutput] = []
|
||||
) -> list[TaskOutput]:
|
||||
task_outputs: list[TaskOutput] = []
|
||||
for future_task, future, task_index in futures:
|
||||
if self.is_cancelled():
|
||||
future.cancel()
|
||||
continue
|
||||
|
||||
task_output = future.result()
|
||||
task_outputs.append(task_output)
|
||||
self._process_task_result(future_task, task_output)
|
||||
@@ -1098,7 +1119,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return task_outputs
|
||||
|
||||
def _find_task_index(
|
||||
self, task_id: str, stored_outputs: List[Any]
|
||||
self, task_id: str, stored_outputs: list[Any]
|
||||
) -> Optional[int]:
|
||||
return next(
|
||||
(
|
||||
@@ -1110,7 +1131,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
|
||||
def replay(
|
||||
self, task_id: str, inputs: Optional[Dict[str, Any]] = None
|
||||
self, task_id: str, inputs: Optional[dict[str, Any]] = None
|
||||
) -> CrewOutput:
|
||||
stored_outputs = self._task_output_handler.load()
|
||||
if not stored_outputs:
|
||||
@@ -1151,15 +1172,15 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return result
|
||||
|
||||
def query_knowledge(
|
||||
self, query: List[str], results_limit: int = 3, score_threshold: float = 0.35
|
||||
) -> Union[List[Dict[str, Any]], None]:
|
||||
self, query: list[str], results_limit: int = 3, score_threshold: float = 0.35
|
||||
) -> Union[list[dict[str, Any]], None]:
|
||||
if self.knowledge:
|
||||
return self.knowledge.query(
|
||||
query, results_limit=results_limit, score_threshold=score_threshold
|
||||
)
|
||||
return None
|
||||
|
||||
def fetch_inputs(self) -> Set[str]:
|
||||
def fetch_inputs(self) -> set[str]:
|
||||
"""
|
||||
Gathers placeholders (e.g., {something}) referenced in tasks or agents.
|
||||
Scans each task's 'description' + 'expected_output', and each agent's
|
||||
@@ -1168,7 +1189,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
Returns a set of all discovered placeholder names.
|
||||
"""
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
required_inputs: Set[str] = set()
|
||||
required_inputs: set[str] = set()
|
||||
|
||||
# Scan tasks for inputs
|
||||
for task in self.tasks:
|
||||
@@ -1270,7 +1291,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if not task.callback:
|
||||
task.callback = self.task_callback
|
||||
|
||||
def _interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
def _interpolate_inputs(self, inputs: dict[str, Any]) -> None:
|
||||
"""Interpolates the inputs in the tasks and agents."""
|
||||
[
|
||||
task.interpolate_inputs_and_add_conversation_history(
|
||||
@@ -1304,7 +1325,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
self,
|
||||
n_iterations: int,
|
||||
eval_llm: Union[str, InstanceOf[BaseLLM]],
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
inputs: Optional[dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
try:
|
||||
@@ -1502,7 +1523,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
},
|
||||
}
|
||||
|
||||
def reset_knowledge(self, knowledges: List[Knowledge]) -> None:
|
||||
def reset_knowledge(self, knowledges: list[Knowledge]) -> None:
|
||||
"""Reset crew and agent knowledge storage."""
|
||||
for ks in knowledges:
|
||||
ks.reset()
|
||||
@@ -1521,3 +1542,16 @@ class Crew(FlowTrackable, BaseModel):
|
||||
and able_to_inject
|
||||
):
|
||||
self.tasks[0].allow_crewai_trigger_context = True
|
||||
|
||||
def cancel(self) -> None:
|
||||
"""Cancel the crew execution. This will stop the crew after the current task completes."""
|
||||
self._cancellation_event.set()
|
||||
self._logger.log("info", "Crew cancellation requested", color="yellow")
|
||||
|
||||
def is_cancelled(self) -> bool:
|
||||
"""Check if the crew execution has been cancelled."""
|
||||
return self._cancellation_event.is_set()
|
||||
|
||||
def _reset_cancellation(self) -> None:
|
||||
"""Reset the cancellation state for reuse of the crew instance."""
|
||||
self._cancellation_event.clear()
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import json
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -15,7 +15,7 @@ class CrewOutput(BaseModel):
|
||||
pydantic: Optional[BaseModel] = Field(
|
||||
description="Pydantic output of Crew", default=None
|
||||
)
|
||||
json_dict: Optional[Dict[str, Any]] = Field(
|
||||
json_dict: Optional[dict[str, Any]] = Field(
|
||||
description="JSON dict output of Crew", default=None
|
||||
)
|
||||
tasks_output: list[TaskOutput] = Field(
|
||||
@@ -32,7 +32,7 @@ class CrewOutput(BaseModel):
|
||||
|
||||
return json.dumps(self.json_dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert json_output and pydantic_output to a dictionary."""
|
||||
output_dict = {}
|
||||
if self.json_dict:
|
||||
|
||||
56
src/crewai/events/__init__.py
Normal file
56
src/crewai/events/__init__.py
Normal file
@@ -0,0 +1,56 @@
|
||||
"""CrewAI events system for monitoring and extending agent behavior.
|
||||
|
||||
This module provides the event infrastructure that allows users to:
|
||||
- Monitor agent, task, and crew execution
|
||||
- Track memory operations and performance
|
||||
- Build custom logging and analytics
|
||||
- Extend CrewAI with custom event handlers
|
||||
"""
|
||||
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
)
|
||||
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
)
|
||||
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
)
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
)
|
||||
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"BaseEventListener",
|
||||
"crewai_event_bus",
|
||||
"MemoryQueryCompletedEvent",
|
||||
"MemorySaveCompletedEvent",
|
||||
"MemorySaveStartedEvent",
|
||||
"MemoryQueryStartedEvent",
|
||||
"MemoryRetrievalCompletedEvent",
|
||||
"MemorySaveFailedEvent",
|
||||
"MemoryQueryFailedEvent",
|
||||
"KnowledgeRetrievalStartedEvent",
|
||||
"KnowledgeRetrievalCompletedEvent",
|
||||
"CrewKickoffStartedEvent",
|
||||
"CrewKickoffCompletedEvent",
|
||||
"AgentExecutionCompletedEvent",
|
||||
"LLMStreamChunkEvent",
|
||||
]
|
||||
15
src/crewai/events/base_event_listener.py
Normal file
15
src/crewai/events/base_event_listener.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from crewai.events.event_bus import CrewAIEventsBus, crewai_event_bus
|
||||
|
||||
|
||||
class BaseEventListener(ABC):
|
||||
verbose: bool = False
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.setup_listeners(crewai_event_bus)
|
||||
|
||||
@abstractmethod
|
||||
def setup_listeners(self, crewai_event_bus: CrewAIEventsBus):
|
||||
pass
|
||||
@@ -1,5 +1,5 @@
|
||||
from datetime import datetime, timezone
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.utilities.serialization import to_serializable
|
||||
@@ -14,7 +14,7 @@ class BaseEvent(BaseModel):
|
||||
source_type: Optional[str] = (
|
||||
None # "agent", "task", "crew", "memory", "entity_memory", "short_term_memory", "long_term_memory", "external_memory"
|
||||
)
|
||||
fingerprint_metadata: Optional[Dict[str, Any]] = None # Any relevant metadata
|
||||
fingerprint_metadata: Optional[dict[str, Any]] = None # Any relevant metadata
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None):
|
||||
"""
|
||||
@@ -28,13 +28,13 @@ class BaseEvent(BaseModel):
|
||||
"""
|
||||
return to_serializable(self, exclude=exclude)
|
||||
|
||||
def _set_task_params(self, data: Dict[str, Any]):
|
||||
def _set_task_params(self, data: dict[str, Any]):
|
||||
if "from_task" in data and (task := data["from_task"]):
|
||||
self.task_id = task.id
|
||||
self.task_name = task.name or task.description
|
||||
self.from_task = None
|
||||
|
||||
def _set_agent_params(self, data: Dict[str, Any]):
|
||||
def _set_agent_params(self, data: dict[str, Any]):
|
||||
task = data.get("from_task", None)
|
||||
agent = task.agent if task else data.get("from_agent", None)
|
||||
|
||||
117
src/crewai/events/event_bus.py
Normal file
117
src/crewai/events/event_bus.py
Normal file
@@ -0,0 +1,117 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Callable, TypeVar, cast
|
||||
|
||||
from blinker import Signal
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from crewai.events.event_types import EventTypes
|
||||
|
||||
EventT = TypeVar("EventT", bound=BaseEvent)
|
||||
|
||||
|
||||
class CrewAIEventsBus:
|
||||
"""
|
||||
A singleton event bus that uses blinker signals for event handling.
|
||||
Allows both internal (Flow/Crew) and external event handling.
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
_lock = threading.Lock()
|
||||
|
||||
def __new__(cls):
|
||||
if cls._instance is None:
|
||||
with cls._lock:
|
||||
if cls._instance is None: # prevent race condition
|
||||
cls._instance = super(CrewAIEventsBus, cls).__new__(cls)
|
||||
cls._instance._initialize()
|
||||
return cls._instance
|
||||
|
||||
def _initialize(self) -> None:
|
||||
"""Initialize the event bus internal state"""
|
||||
self._signal = Signal("crewai_event_bus")
|
||||
self._handlers: dict[type[BaseEvent], list[Callable]] = {}
|
||||
|
||||
def on(
|
||||
self, event_type: type[EventT]
|
||||
) -> Callable[[Callable[[Any, EventT], None]], Callable[[Any, EventT], None]]:
|
||||
"""
|
||||
Decorator to register an event handler for a specific event type.
|
||||
|
||||
Usage:
|
||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
||||
def on_agent_execution_completed(
|
||||
source: Any, event: AgentExecutionCompletedEvent
|
||||
):
|
||||
print(f"👍 Agent '{event.agent}' completed task")
|
||||
print(f" Output: {event.output}")
|
||||
"""
|
||||
|
||||
def decorator(
|
||||
handler: Callable[[Any, EventT], None],
|
||||
) -> Callable[[Any, EventT], None]:
|
||||
if event_type not in self._handlers:
|
||||
self._handlers[event_type] = []
|
||||
self._handlers[event_type].append(
|
||||
cast(Callable[[Any, EventT], None], handler)
|
||||
)
|
||||
return handler
|
||||
|
||||
return decorator
|
||||
|
||||
def emit(self, source: Any, event: BaseEvent) -> None:
|
||||
"""
|
||||
Emit an event to all registered handlers
|
||||
|
||||
Args:
|
||||
source: The object emitting the event
|
||||
event: The event instance to emit
|
||||
"""
|
||||
for event_type, handlers in self._handlers.items():
|
||||
if isinstance(event, event_type):
|
||||
for handler in handlers:
|
||||
try:
|
||||
handler(source, event)
|
||||
except Exception as e:
|
||||
print(
|
||||
f"[EventBus Error] Handler '{handler.__name__}' failed for event '{event_type.__name__}': {e}"
|
||||
)
|
||||
|
||||
self._signal.send(source, event=event)
|
||||
|
||||
def register_handler(
|
||||
self, event_type: type[EventTypes], handler: Callable[[Any, EventTypes], None]
|
||||
) -> None:
|
||||
"""Register an event handler for a specific event type"""
|
||||
if event_type not in self._handlers:
|
||||
self._handlers[event_type] = []
|
||||
self._handlers[event_type].append(
|
||||
cast(Callable[[Any, EventTypes], None], handler)
|
||||
)
|
||||
|
||||
@contextmanager
|
||||
def scoped_handlers(self):
|
||||
"""
|
||||
Context manager for temporary event handling scope.
|
||||
Useful for testing or temporary event handling.
|
||||
|
||||
Usage:
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(CrewKickoffStarted)
|
||||
def temp_handler(source, event):
|
||||
print("Temporary handler")
|
||||
# Do stuff...
|
||||
# Handlers are cleared after the context
|
||||
"""
|
||||
previous_handlers = self._handlers.copy()
|
||||
self._handlers.clear()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self._handlers = previous_handlers
|
||||
|
||||
|
||||
# Global instance
|
||||
crewai_event_bus = CrewAIEventsBus()
|
||||
@@ -1,5 +1,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from io import StringIO
|
||||
from typing import Any, Dict
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field, PrivateAttr
|
||||
from crewai.llm import LLM
|
||||
@@ -7,8 +9,8 @@ from crewai.task import Task
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities import Logger
|
||||
from crewai.utilities.constants import EMITTER_COLOR
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.knowledge_events import (
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
@@ -16,28 +18,30 @@ from crewai.utilities.events.knowledge_events import (
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
from crewai.utilities.events.llm_events import (
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
from crewai.utilities.events.llm_guardrail_events import (
|
||||
from crewai.events.types.llm_guardrail_events import (
|
||||
LLMGuardrailStartedEvent,
|
||||
LLMGuardrailCompletedEvent,
|
||||
)
|
||||
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
|
||||
from .agent_events import (
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
AgentLogsStartedEvent,
|
||||
AgentLogsExecutionEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from .crew_events import (
|
||||
from crewai.events.types.logging_events import (
|
||||
AgentLogsStartedEvent,
|
||||
AgentLogsExecutionEvent,
|
||||
)
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
@@ -49,7 +53,7 @@ from .crew_events import (
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from .flow_events import (
|
||||
from .types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
@@ -57,13 +61,13 @@ from .flow_events import (
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from .task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent
|
||||
from .tool_usage_events import (
|
||||
from .types.task_events import TaskCompletedEvent, TaskFailedEvent, TaskStartedEvent
|
||||
from .types.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from .reasoning_events import (
|
||||
from .types.reasoning_events import (
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
@@ -76,7 +80,7 @@ class EventListener(BaseEventListener):
|
||||
_instance = None
|
||||
_telemetry: Telemetry = PrivateAttr(default_factory=lambda: Telemetry())
|
||||
logger = Logger(verbose=True, default_color=EMITTER_COLOR)
|
||||
execution_spans: Dict[Task, Any] = Field(default_factory=dict)
|
||||
execution_spans: dict[Task, Any] = Field(default_factory=dict)
|
||||
next_chunk = 0
|
||||
text_stream = StringIO()
|
||||
knowledge_retrieval_in_progress = False
|
||||
@@ -162,7 +166,7 @@ class EventListener(BaseEventListener):
|
||||
span = self._telemetry.task_started(crew=source.agent.crew, task=source)
|
||||
self.execution_spans[source] = span
|
||||
# Pass both task ID and task name (if set)
|
||||
task_name = source.name if hasattr(source, 'name') and source.name else None
|
||||
task_name = source.name if hasattr(source, "name") and source.name else None
|
||||
self.formatter.create_task_branch(
|
||||
self.formatter.current_crew_tree, source.id, task_name
|
||||
)
|
||||
@@ -176,13 +180,13 @@ class EventListener(BaseEventListener):
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = source.name if hasattr(source, 'name') and source.name else None
|
||||
task_name = source.name if hasattr(source, "name") and source.name else None
|
||||
self.formatter.update_task_status(
|
||||
self.formatter.current_crew_tree,
|
||||
source.id,
|
||||
source.agent.role,
|
||||
"completed",
|
||||
task_name
|
||||
task_name,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(TaskFailedEvent)
|
||||
@@ -194,13 +198,13 @@ class EventListener(BaseEventListener):
|
||||
self.execution_spans[source] = None
|
||||
|
||||
# Pass task name if it exists
|
||||
task_name = source.name if hasattr(source, 'name') and source.name else None
|
||||
task_name = source.name if hasattr(source, "name") and source.name else None
|
||||
self.formatter.update_task_status(
|
||||
self.formatter.current_crew_tree,
|
||||
source.id,
|
||||
source.agent.role,
|
||||
"failed",
|
||||
task_name
|
||||
task_name,
|
||||
)
|
||||
|
||||
# ----------- AGENT EVENTS -----------
|
||||
@@ -1,12 +1,12 @@
|
||||
from typing import Union
|
||||
|
||||
from .agent_events import (
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
)
|
||||
from .crew_events import (
|
||||
from .types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
@@ -17,39 +17,39 @@ from .crew_events import (
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from .flow_events import (
|
||||
from .types.flow_events import (
|
||||
FlowFinishedEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from .llm_events import (
|
||||
from .types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
from .llm_guardrail_events import (
|
||||
from .types.llm_guardrail_events import (
|
||||
LLMGuardrailCompletedEvent,
|
||||
LLMGuardrailStartedEvent,
|
||||
)
|
||||
from .task_events import (
|
||||
from .types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from .tool_usage_events import (
|
||||
from .types.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from .reasoning_events import (
|
||||
from .types.reasoning_events import (
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
)
|
||||
from .knowledge_events import (
|
||||
from .types.knowledge_events import (
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
@@ -58,7 +58,7 @@ from .knowledge_events import (
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
|
||||
from .memory_events import (
|
||||
from .types.memory_events import (
|
||||
MemorySaveStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
5
src/crewai/events/listeners/__init__.py
Normal file
5
src/crewai/events/listeners/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Event listener implementations for CrewAI.
|
||||
|
||||
This module contains various event listener implementations
|
||||
for handling memory, tracing, and other event-driven functionality.
|
||||
"""
|
||||
@@ -1,5 +1,5 @@
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.memory_events import (
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
@@ -9,8 +9,8 @@ from crewai.utilities.events.memory_events import (
|
||||
MemorySaveFailedEvent,
|
||||
)
|
||||
|
||||
class MemoryListener(BaseEventListener):
|
||||
|
||||
class MemoryListener(BaseEventListener):
|
||||
def __init__(self, formatter):
|
||||
super().__init__()
|
||||
self.formatter = formatter
|
||||
@@ -19,9 +19,7 @@ class MemoryListener(BaseEventListener):
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@crewai_event_bus.on(MemoryRetrievalStartedEvent)
|
||||
def on_memory_retrieval_started(
|
||||
source, event: MemoryRetrievalStartedEvent
|
||||
):
|
||||
def on_memory_retrieval_started(source, event: MemoryRetrievalStartedEvent):
|
||||
if self.memory_retrieval_in_progress:
|
||||
return
|
||||
|
||||
@@ -33,9 +31,7 @@ class MemoryListener(BaseEventListener):
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MemoryRetrievalCompletedEvent)
|
||||
def on_memory_retrieval_completed(
|
||||
source, event: MemoryRetrievalCompletedEvent
|
||||
):
|
||||
def on_memory_retrieval_completed(source, event: MemoryRetrievalCompletedEvent):
|
||||
if not self.memory_retrieval_in_progress:
|
||||
return
|
||||
|
||||
@@ -44,7 +40,7 @@ class MemoryListener(BaseEventListener):
|
||||
self.formatter.current_agent_branch,
|
||||
self.formatter.current_crew_tree,
|
||||
event.memory_content,
|
||||
event.retrieval_time_ms
|
||||
event.retrieval_time_ms,
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(MemoryQueryCompletedEvent)
|
||||
@@ -107,4 +103,4 @@ class MemoryListener(BaseEventListener):
|
||||
event.error,
|
||||
event.source_type,
|
||||
self.formatter.current_crew_tree,
|
||||
)
|
||||
)
|
||||
@@ -1,6 +1,6 @@
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, List, Any, Optional
|
||||
from typing import Any, Optional
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
from crewai.utilities.constants import CREWAI_BASE_URL
|
||||
@@ -11,7 +11,7 @@ from crewai.cli.plus_api import PlusAPI
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
from crewai.utilities.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.events.listeners.tracing.types import TraceEvent
|
||||
from logging import getLogger
|
||||
|
||||
logger = getLogger(__name__)
|
||||
@@ -23,11 +23,11 @@ class TraceBatch:
|
||||
|
||||
version: str = field(default_factory=get_crewai_version)
|
||||
batch_id: str = field(default_factory=lambda: str(uuid.uuid4()))
|
||||
user_context: Dict[str, str] = field(default_factory=dict)
|
||||
execution_metadata: Dict[str, Any] = field(default_factory=dict)
|
||||
events: List[TraceEvent] = field(default_factory=list)
|
||||
user_context: dict[str, str] = field(default_factory=dict)
|
||||
execution_metadata: dict[str, Any] = field(default_factory=dict)
|
||||
events: list[TraceEvent] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return {
|
||||
"version": self.version,
|
||||
"batch_id": self.batch_id,
|
||||
@@ -43,8 +43,8 @@ class TraceBatchManager:
|
||||
is_current_batch_ephemeral: bool = False
|
||||
trace_batch_id: Optional[str] = None
|
||||
current_batch: Optional[TraceBatch] = None
|
||||
event_buffer: List[TraceEvent] = []
|
||||
execution_start_times: Dict[str, datetime] = {}
|
||||
event_buffer: list[TraceEvent] = []
|
||||
execution_start_times: dict[str, datetime] = {}
|
||||
batch_owner_type: Optional[str] = None
|
||||
batch_owner_id: Optional[str] = None
|
||||
|
||||
@@ -58,8 +58,8 @@ class TraceBatchManager:
|
||||
|
||||
def initialize_batch(
|
||||
self,
|
||||
user_context: Dict[str, str],
|
||||
execution_metadata: Dict[str, Any],
|
||||
user_context: dict[str, str],
|
||||
execution_metadata: dict[str, Any],
|
||||
use_ephemeral: bool = False,
|
||||
) -> TraceBatch:
|
||||
"""Initialize a new trace batch"""
|
||||
@@ -76,8 +76,8 @@ class TraceBatchManager:
|
||||
|
||||
def _initialize_backend_batch(
|
||||
self,
|
||||
user_context: Dict[str, str],
|
||||
execution_metadata: Dict[str, Any],
|
||||
user_context: dict[str, str],
|
||||
execution_metadata: dict[str, Any],
|
||||
use_ephemeral: bool = False,
|
||||
):
|
||||
"""Send batch initialization to backend"""
|
||||
@@ -150,10 +150,10 @@ class TraceBatchManager:
|
||||
"""Add event to buffer"""
|
||||
self.event_buffer.append(trace_event)
|
||||
|
||||
def _send_events_to_backend(self):
|
||||
def _send_events_to_backend(self) -> int:
|
||||
"""Send buffered events to backend with graceful failure handling"""
|
||||
if not self.plus_api or not self.trace_batch_id or not self.event_buffer:
|
||||
return
|
||||
return 500
|
||||
|
||||
try:
|
||||
payload = {
|
||||
@@ -173,19 +173,22 @@ class TraceBatchManager:
|
||||
|
||||
if response is None:
|
||||
logger.warning("Failed to send trace events. Events will be lost.")
|
||||
return
|
||||
return 500
|
||||
|
||||
if response.status_code in [200, 201]:
|
||||
self.event_buffer.clear()
|
||||
return 200
|
||||
else:
|
||||
logger.warning(
|
||||
f"Failed to send events: {response.status_code}. Events will be lost."
|
||||
)
|
||||
return 500
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"Error sending events to backend: {str(e)}. Events will be lost."
|
||||
)
|
||||
return 500
|
||||
|
||||
def finalize_batch(self) -> Optional[TraceBatch]:
|
||||
"""Finalize batch and return it for sending"""
|
||||
@@ -194,7 +197,9 @@ class TraceBatchManager:
|
||||
|
||||
self.current_batch.events = self.event_buffer.copy()
|
||||
if self.event_buffer:
|
||||
self._send_events_to_backend()
|
||||
events_sent_to_backend_status = self._send_events_to_backend()
|
||||
if events_sent_to_backend_status == 500:
|
||||
return None
|
||||
self._finalize_backend_batch()
|
||||
|
||||
finalized_batch = self.current_batch
|
||||
@@ -1,10 +1,10 @@
|
||||
import os
|
||||
import uuid
|
||||
|
||||
from typing import Dict, Any, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.agent_events import (
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
@@ -12,34 +12,34 @@ from crewai.utilities.events.agent_events import (
|
||||
LiteAgentExecutionErrorEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
)
|
||||
from crewai.utilities.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.utilities.events.reasoning_events import (
|
||||
from crewai.events.listeners.tracing.types import TraceEvent
|
||||
from crewai.events.types.reasoning_events import (
|
||||
AgentReasoningStartedEvent,
|
||||
AgentReasoningCompletedEvent,
|
||||
AgentReasoningFailedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crew_events import (
|
||||
from crewai.events.types.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.task_events import (
|
||||
from crewai.events.types.task_events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.tool_usage_events import (
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.llm_events import (
|
||||
from crewai.events.types.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
)
|
||||
|
||||
from crewai.utilities.events.flow_events import (
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowStartedEvent,
|
||||
FlowFinishedEvent,
|
||||
@@ -48,7 +48,7 @@ from crewai.utilities.events.flow_events import (
|
||||
MethodExecutionFailedEvent,
|
||||
FlowPlotEvent,
|
||||
)
|
||||
from crewai.utilities.events.llm_guardrail_events import (
|
||||
from crewai.events.types.llm_guardrail_events import (
|
||||
LLMGuardrailStartedEvent,
|
||||
LLMGuardrailCompletedEvent,
|
||||
)
|
||||
@@ -57,7 +57,7 @@ from crewai.utilities.serialization import to_serializable
|
||||
|
||||
from .trace_batch_manager import TraceBatchManager
|
||||
|
||||
from crewai.utilities.events.memory_events import (
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryStartedEvent,
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
@@ -112,7 +112,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
except AuthError:
|
||||
return False
|
||||
|
||||
def _get_user_context(self) -> Dict[str, str]:
|
||||
def _get_user_context(self) -> dict[str, str]:
|
||||
"""Extract user context for tracing"""
|
||||
return {
|
||||
"user_id": os.getenv("CREWAI_USER_ID", "anonymous"),
|
||||
@@ -325,7 +325,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
self._initialize_batch(user_context, execution_metadata)
|
||||
|
||||
def _initialize_batch(
|
||||
self, user_context: Dict[str, str], execution_metadata: Dict[str, Any]
|
||||
self, user_context: dict[str, str], execution_metadata: dict[str, Any]
|
||||
):
|
||||
"""Initialize trace batch if ephemeral"""
|
||||
if not self._check_authenticated():
|
||||
@@ -371,7 +371,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
|
||||
def _build_event_data(
|
||||
self, event_type: str, event: Any, source: Any
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""Build event data"""
|
||||
if event_type not in self.complex_events:
|
||||
return self._safe_serialize_to_dict(event)
|
||||
@@ -429,7 +429,7 @@ class TraceCollectionListener(BaseEventListener):
|
||||
# TODO: move to utils
|
||||
def _safe_serialize_to_dict(
|
||||
self, obj, exclude: set[str] | None = None
|
||||
) -> Dict[str, Any]:
|
||||
) -> dict[str, Any]:
|
||||
"""Safely serialize an object to a dictionary for event data."""
|
||||
try:
|
||||
serialized = to_serializable(obj, exclude)
|
||||
@@ -1,6 +1,6 @@
|
||||
from dataclasses import dataclass, field, asdict
|
||||
from datetime import datetime, timezone
|
||||
from typing import Dict, Any
|
||||
from typing import Any
|
||||
import uuid
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ class TraceEvent:
|
||||
default_factory=lambda: datetime.now(timezone.utc).isoformat()
|
||||
)
|
||||
type: str = ""
|
||||
event_data: Dict[str, Any] = field(default_factory=dict)
|
||||
event_data: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
return asdict(self)
|
||||
5
src/crewai/events/types/__init__.py
Normal file
5
src/crewai/events/types/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Event type definitions for CrewAI.
|
||||
|
||||
This module contains all event types used throughout the CrewAI system
|
||||
for monitoring and extending agent, crew, task, and tool execution.
|
||||
"""
|
||||
@@ -1,13 +1,15 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
|
||||
"""Agent-related events moved to break circular dependencies."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional, Sequence, Union
|
||||
|
||||
from pydantic import model_validator
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
from .base_events import BaseEvent
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class AgentExecutionStartedEvent(BaseEvent):
|
||||
@@ -21,9 +23,9 @@ class AgentExecutionStartedEvent(BaseEvent):
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
# Set fingerprint data from the agent
|
||||
@model_validator(mode="after")
|
||||
def set_fingerprint_data(self):
|
||||
"""Set fingerprint data from the agent if available."""
|
||||
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
|
||||
self.source_fingerprint = self.agent.fingerprint.uuid_str
|
||||
self.source_type = "agent"
|
||||
@@ -32,6 +34,7 @@ class AgentExecutionStartedEvent(BaseEvent):
|
||||
and self.agent.fingerprint.metadata
|
||||
):
|
||||
self.fingerprint_metadata = self.agent.fingerprint.metadata
|
||||
return self
|
||||
|
||||
|
||||
class AgentExecutionCompletedEvent(BaseEvent):
|
||||
@@ -42,9 +45,11 @@ class AgentExecutionCompletedEvent(BaseEvent):
|
||||
output: str
|
||||
type: str = "agent_execution_completed"
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
# Set fingerprint data from the agent
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_fingerprint_data(self):
|
||||
"""Set fingerprint data from the agent if available."""
|
||||
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
|
||||
self.source_fingerprint = self.agent.fingerprint.uuid_str
|
||||
self.source_type = "agent"
|
||||
@@ -53,6 +58,7 @@ class AgentExecutionCompletedEvent(BaseEvent):
|
||||
and self.agent.fingerprint.metadata
|
||||
):
|
||||
self.fingerprint_metadata = self.agent.fingerprint.metadata
|
||||
return self
|
||||
|
||||
|
||||
class AgentExecutionErrorEvent(BaseEvent):
|
||||
@@ -63,9 +69,11 @@ class AgentExecutionErrorEvent(BaseEvent):
|
||||
error: str
|
||||
type: str = "agent_execution_error"
|
||||
|
||||
def __init__(self, **data):
|
||||
super().__init__(**data)
|
||||
# Set fingerprint data from the agent
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_fingerprint_data(self):
|
||||
"""Set fingerprint data from the agent if available."""
|
||||
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
|
||||
self.source_fingerprint = self.agent.fingerprint.uuid_str
|
||||
self.source_type = "agent"
|
||||
@@ -74,15 +82,16 @@ class AgentExecutionErrorEvent(BaseEvent):
|
||||
and self.agent.fingerprint.metadata
|
||||
):
|
||||
self.fingerprint_metadata = self.agent.fingerprint.metadata
|
||||
return self
|
||||
|
||||
|
||||
# New event classes for LiteAgent
|
||||
class LiteAgentExecutionStartedEvent(BaseEvent):
|
||||
"""Event emitted when a LiteAgent starts executing"""
|
||||
|
||||
agent_info: Dict[str, Any]
|
||||
agent_info: dict[str, Any]
|
||||
tools: Optional[Sequence[Union[BaseTool, CrewStructuredTool]]]
|
||||
messages: Union[str, List[Dict[str, str]]]
|
||||
messages: Union[str, list[dict[str, str]]]
|
||||
type: str = "lite_agent_execution_started"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
@@ -91,7 +100,7 @@ class LiteAgentExecutionStartedEvent(BaseEvent):
|
||||
class LiteAgentExecutionCompletedEvent(BaseEvent):
|
||||
"""Event emitted when a LiteAgent completes execution"""
|
||||
|
||||
agent_info: Dict[str, Any]
|
||||
agent_info: dict[str, Any]
|
||||
output: str
|
||||
type: str = "lite_agent_execution_completed"
|
||||
|
||||
@@ -99,31 +108,11 @@ class LiteAgentExecutionCompletedEvent(BaseEvent):
|
||||
class LiteAgentExecutionErrorEvent(BaseEvent):
|
||||
"""Event emitted when a LiteAgent encounters an error during execution"""
|
||||
|
||||
agent_info: Dict[str, Any]
|
||||
agent_info: dict[str, Any]
|
||||
error: str
|
||||
type: str = "lite_agent_execution_error"
|
||||
|
||||
|
||||
# New logging events
|
||||
class AgentLogsStartedEvent(BaseEvent):
|
||||
"""Event emitted when agent logs should be shown at start"""
|
||||
|
||||
agent_role: str
|
||||
task_description: Optional[str] = None
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_started"
|
||||
|
||||
|
||||
class AgentLogsExecutionEvent(BaseEvent):
|
||||
"""Event emitted when agent logs should be shown during execution"""
|
||||
|
||||
agent_role: str
|
||||
formatted_answer: Any
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_execution"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
# Agent Eval events
|
||||
class AgentEvaluationStartedEvent(BaseEvent):
|
||||
agent_id: str
|
||||
@@ -132,6 +121,7 @@ class AgentEvaluationStartedEvent(BaseEvent):
|
||||
iteration: int
|
||||
type: str = "agent_evaluation_started"
|
||||
|
||||
|
||||
class AgentEvaluationCompletedEvent(BaseEvent):
|
||||
agent_id: str
|
||||
agent_role: str
|
||||
@@ -141,6 +131,7 @@ class AgentEvaluationCompletedEvent(BaseEvent):
|
||||
score: Any
|
||||
type: str = "agent_evaluation_completed"
|
||||
|
||||
|
||||
class AgentEvaluationFailedEvent(BaseEvent):
|
||||
agent_id: str
|
||||
agent_role: str
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
|
||||
from typing import TYPE_CHECKING, Any, Optional, Union
|
||||
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.crew import Crew
|
||||
@@ -14,7 +14,7 @@ class CrewBaseEvent(BaseEvent):
|
||||
crew_name: Optional[str]
|
||||
crew: Optional[Crew] = None
|
||||
|
||||
def __init__(self, **data):
|
||||
def __init__(self, **data: Any) -> None:
|
||||
super().__init__(**data)
|
||||
self.set_crew_fingerprint()
|
||||
|
||||
@@ -28,7 +28,7 @@ class CrewBaseEvent(BaseEvent):
|
||||
):
|
||||
self.fingerprint_metadata = self.crew.fingerprint.metadata
|
||||
|
||||
def to_json(self, exclude: set[str] | None = None):
|
||||
def to_json(self, exclude: set[str] | None = None) -> dict[str, Any]:
|
||||
if exclude is None:
|
||||
exclude = set()
|
||||
exclude.add("crew")
|
||||
@@ -38,7 +38,7 @@ class CrewBaseEvent(BaseEvent):
|
||||
class CrewKickoffStartedEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew starts execution"""
|
||||
|
||||
inputs: Optional[Dict[str, Any]]
|
||||
inputs: Optional[dict[str, Any]]
|
||||
type: str = "crew_kickoff_started"
|
||||
|
||||
|
||||
@@ -62,7 +62,7 @@ class CrewTrainStartedEvent(CrewBaseEvent):
|
||||
|
||||
n_iterations: int
|
||||
filename: str
|
||||
inputs: Optional[Dict[str, Any]]
|
||||
inputs: Optional[dict[str, Any]]
|
||||
type: str = "crew_train_started"
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ class CrewTestStartedEvent(CrewBaseEvent):
|
||||
|
||||
n_iterations: int
|
||||
eval_llm: Optional[Union[str, Any]]
|
||||
inputs: Optional[Dict[str, Any]]
|
||||
inputs: Optional[dict[str, Any]]
|
||||
type: str = "crew_test_started"
|
||||
|
||||
|
||||
@@ -110,3 +110,12 @@ class CrewTestResultEvent(CrewBaseEvent):
|
||||
execution_duration: float
|
||||
model: str
|
||||
type: str = "crew_test_result"
|
||||
|
||||
|
||||
class CrewKickoffCancelledEvent(CrewBaseEvent):
|
||||
"""Event emitted when a crew execution is cancelled"""
|
||||
|
||||
reason: str = "External cancellation requested"
|
||||
completed_tasks: int = 0
|
||||
total_tasks: int = 0
|
||||
type: str = "crew_kickoff_cancelled"
|
||||
@@ -1,8 +1,8 @@
|
||||
from typing import Any, Dict, Optional, Union
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, ConfigDict
|
||||
|
||||
from .base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class FlowEvent(BaseEvent):
|
||||
@@ -16,7 +16,7 @@ class FlowStartedEvent(FlowEvent):
|
||||
"""Event emitted when a flow starts execution"""
|
||||
|
||||
flow_name: str
|
||||
inputs: Optional[Dict[str, Any]] = None
|
||||
inputs: Optional[dict[str, Any]] = None
|
||||
type: str = "flow_started"
|
||||
|
||||
|
||||
@@ -32,8 +32,8 @@ class MethodExecutionStartedEvent(FlowEvent):
|
||||
|
||||
flow_name: str
|
||||
method_name: str
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
params: Optional[Dict[str, Any]] = None
|
||||
state: Union[dict[str, Any], BaseModel]
|
||||
params: Optional[dict[str, Any]] = None
|
||||
type: str = "method_execution_started"
|
||||
|
||||
|
||||
@@ -43,7 +43,7 @@ class MethodExecutionFinishedEvent(FlowEvent):
|
||||
flow_name: str
|
||||
method_name: str
|
||||
result: Any = None
|
||||
state: Union[Dict[str, Any], BaseModel]
|
||||
state: Union[dict[str, Any], BaseModel]
|
||||
type: str = "method_execution_finished"
|
||||
|
||||
|
||||
@@ -1,10 +1,6 @@
|
||||
from typing import TYPE_CHECKING, Any
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
|
||||
|
||||
class KnowledgeRetrievalStartedEvent(BaseEvent):
|
||||
@@ -20,7 +16,7 @@ class KnowledgeRetrievalCompletedEvent(BaseEvent):
|
||||
query: str
|
||||
type: str = "knowledge_search_query_completed"
|
||||
agent: BaseAgent
|
||||
retrieved_knowledge: Any
|
||||
retrieved_knowledge: str
|
||||
|
||||
|
||||
class KnowledgeQueryStartedEvent(BaseEvent):
|
||||
@@ -1,9 +1,9 @@
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class LLMEventBase(BaseEvent):
|
||||
@@ -39,10 +39,10 @@ class LLMCallStartedEvent(LLMEventBase):
|
||||
|
||||
type: str = "llm_call_started"
|
||||
model: Optional[str] = None
|
||||
messages: Optional[Union[str, List[Dict[str, Any]]]] = None
|
||||
tools: Optional[List[dict[str, Any]]] = None
|
||||
callbacks: Optional[List[Any]] = None
|
||||
available_functions: Optional[Dict[str, Any]] = None
|
||||
messages: Optional[Union[str, list[dict[str, Any]]]] = None
|
||||
tools: Optional[list[dict[str, Any]]] = None
|
||||
callbacks: Optional[list[Any]] = None
|
||||
available_functions: Optional[dict[str, Any]] = None
|
||||
|
||||
|
||||
class LLMCallCompletedEvent(LLMEventBase):
|
||||
@@ -1,7 +1,7 @@
|
||||
from inspect import getsource
|
||||
from typing import Any, Callable, Optional, Union
|
||||
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class LLMGuardrailStartedEvent(BaseEvent):
|
||||
25
src/crewai/events/types/logging_events.py
Normal file
25
src/crewai/events/types/logging_events.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""Agent logging events that don't reference BaseAgent to avoid circular imports."""
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class AgentLogsStartedEvent(BaseEvent):
|
||||
"""Event emitted when agent logs should be shown at start"""
|
||||
|
||||
agent_role: str
|
||||
task_description: Optional[str] = None
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_started"
|
||||
|
||||
|
||||
class AgentLogsExecutionEvent(BaseEvent):
|
||||
"""Event emitted when agent logs should be shown during execution"""
|
||||
|
||||
agent_role: str
|
||||
formatted_answer: Any
|
||||
verbose: bool = False
|
||||
type: str = "agent_logs_execution"
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class MemoryBaseEvent(BaseEvent):
|
||||
@@ -55,7 +55,7 @@ class MemorySaveStartedEvent(MemoryBaseEvent):
|
||||
|
||||
type: str = "memory_save_started"
|
||||
value: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
metadata: Optional[dict[str, Any]] = None
|
||||
agent_role: Optional[str] = None
|
||||
|
||||
|
||||
@@ -64,7 +64,7 @@ class MemorySaveCompletedEvent(MemoryBaseEvent):
|
||||
|
||||
type: str = "memory_save_completed"
|
||||
value: str
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
metadata: Optional[dict[str, Any]] = None
|
||||
agent_role: Optional[str] = None
|
||||
save_time_ms: float
|
||||
|
||||
@@ -74,7 +74,7 @@ class MemorySaveFailedEvent(MemoryBaseEvent):
|
||||
|
||||
type: str = "memory_save_failed"
|
||||
value: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
metadata: Optional[dict[str, Any]] = None
|
||||
agent_role: Optional[str] = None
|
||||
error: str
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Any, Optional
|
||||
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities.events.base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class TaskStartedEvent(BaseEvent):
|
||||
@@ -1,7 +1,7 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Callable, Dict, Optional
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
from .base_events import BaseEvent
|
||||
from crewai.events.base_events import BaseEvent
|
||||
|
||||
|
||||
class ToolUsageEvent(BaseEvent):
|
||||
@@ -11,7 +11,7 @@ class ToolUsageEvent(BaseEvent):
|
||||
agent_role: Optional[str] = None
|
||||
agent_id: Optional[str] = None
|
||||
tool_name: str
|
||||
tool_args: Dict[str, Any] | str
|
||||
tool_args: dict[str, Any] | str
|
||||
tool_class: Optional[str] = None
|
||||
run_attempts: int | None = None
|
||||
delegations: int | None = None
|
||||
@@ -81,7 +81,7 @@ class ToolExecutionErrorEvent(BaseEvent):
|
||||
error: Any
|
||||
type: str = "tool_execution_error"
|
||||
tool_name: str
|
||||
tool_args: Dict[str, Any]
|
||||
tool_args: dict[str, Any]
|
||||
tool_class: Callable
|
||||
agent: Optional[Any] = None
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
@@ -16,7 +16,7 @@ class ConsoleFormatter:
|
||||
current_flow_tree: Optional[Tree] = None
|
||||
current_method_branch: Optional[Tree] = None
|
||||
current_lite_agent_branch: Optional[Tree] = None
|
||||
tool_usage_counts: Dict[str, int] = {}
|
||||
tool_usage_counts: dict[str, int] = {}
|
||||
current_reasoning_branch: Optional[Tree] = None # Track reasoning status
|
||||
_live_paused: bool = False
|
||||
current_llm_tool_tree: Optional[Tree] = None
|
||||
@@ -45,7 +45,7 @@ class ConsoleFormatter:
|
||||
title: str,
|
||||
name: str,
|
||||
status_style: str = "blue",
|
||||
tool_args: Dict[str, Any] | str = "",
|
||||
tool_args: dict[str, Any] | str = "",
|
||||
**fields,
|
||||
) -> Text:
|
||||
"""Create standardized status content with consistent formatting."""
|
||||
@@ -227,7 +227,7 @@ class ConsoleFormatter:
|
||||
return None
|
||||
|
||||
task_content = Text()
|
||||
|
||||
|
||||
# Display task name if available, otherwise just the ID
|
||||
if task_name:
|
||||
task_content.append("📋 Task: ", style="yellow bold")
|
||||
@@ -235,7 +235,7 @@ class ConsoleFormatter:
|
||||
task_content.append(f" (ID: {task_id})", style="yellow dim")
|
||||
else:
|
||||
task_content.append(f"📋 Task: {task_id}", style="yellow bold")
|
||||
|
||||
|
||||
task_content.append("\nStatus: ", style="white")
|
||||
task_content.append("Executing Task...", style="yellow dim")
|
||||
|
||||
@@ -480,7 +480,7 @@ class ConsoleFormatter:
|
||||
def handle_llm_tool_usage_started(
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_args: Dict[str, Any] | str,
|
||||
tool_args: dict[str, Any] | str,
|
||||
):
|
||||
# Create status content for the tool usage
|
||||
content = self.create_status_content(
|
||||
@@ -523,7 +523,7 @@ class ConsoleFormatter:
|
||||
agent_branch: Optional[Tree],
|
||||
tool_name: str,
|
||||
crew_tree: Optional[Tree],
|
||||
tool_args: Dict[str, Any] | str = "",
|
||||
tool_args: dict[str, Any] | str = "",
|
||||
) -> Optional[Tree]:
|
||||
"""Handle tool usage started event."""
|
||||
if not self.verbose:
|
||||
@@ -938,7 +938,7 @@ class ConsoleFormatter:
|
||||
lite_agent_branch: Optional[Tree],
|
||||
lite_agent_role: str,
|
||||
status: str = "completed",
|
||||
**fields: Dict[str, Any],
|
||||
**fields: dict[str, Any],
|
||||
) -> None:
|
||||
"""Update lite agent status in the tree."""
|
||||
if not self.verbose or lite_agent_branch is None:
|
||||
@@ -981,7 +981,7 @@ class ConsoleFormatter:
|
||||
lite_agent_role: str,
|
||||
status: str = "started",
|
||||
error: Any = None,
|
||||
**fields: Dict[str, Any],
|
||||
**fields: dict[str, Any],
|
||||
) -> None:
|
||||
"""Handle lite agent execution events with consistent formatting."""
|
||||
if not self.verbose:
|
||||
@@ -1,18 +1,30 @@
|
||||
import threading
|
||||
from typing import Any, Optional
|
||||
|
||||
from crewai.experimental.evaluation.base_evaluator import AgentEvaluationResult, AggregationStrategy
|
||||
from crewai.experimental.evaluation.base_evaluator import (
|
||||
AgentEvaluationResult,
|
||||
AggregationStrategy,
|
||||
)
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.experimental.evaluation.evaluation_display import EvaluationDisplayFormatter
|
||||
from crewai.utilities.events.agent_events import AgentEvaluationStartedEvent, AgentEvaluationCompletedEvent, AgentEvaluationFailedEvent
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentEvaluationStartedEvent,
|
||||
AgentEvaluationCompletedEvent,
|
||||
AgentEvaluationFailedEvent,
|
||||
)
|
||||
from crewai.experimental.evaluation import BaseEvaluator, create_evaluation_callbacks
|
||||
from collections.abc import Sequence
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
|
||||
from crewai.utilities.events.task_events import TaskCompletedEvent
|
||||
from crewai.utilities.events.agent_events import LiteAgentExecutionCompletedEvent
|
||||
from crewai.experimental.evaluation.base_evaluator import AgentAggregatedEvaluationResult, EvaluationScore, MetricCategory
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
from crewai.events.types.task_events import TaskCompletedEvent
|
||||
from crewai.events.types.agent_events import LiteAgentExecutionCompletedEvent
|
||||
from crewai.experimental.evaluation.base_evaluator import (
|
||||
AgentAggregatedEvaluationResult,
|
||||
EvaluationScore,
|
||||
MetricCategory,
|
||||
)
|
||||
|
||||
|
||||
class ExecutionState:
|
||||
current_agent_id: Optional[str] = None
|
||||
@@ -24,6 +36,7 @@ class ExecutionState:
|
||||
self.iterations_results = {}
|
||||
self.agent_evaluators = {}
|
||||
|
||||
|
||||
class AgentEvaluator:
|
||||
def __init__(
|
||||
self,
|
||||
@@ -46,27 +59,45 @@ class AgentEvaluator:
|
||||
|
||||
@property
|
||||
def _execution_state(self) -> ExecutionState:
|
||||
if not hasattr(self._thread_local, 'execution_state'):
|
||||
if not hasattr(self._thread_local, "execution_state"):
|
||||
self._thread_local.execution_state = ExecutionState()
|
||||
return self._thread_local.execution_state
|
||||
|
||||
def _subscribe_to_events(self) -> None:
|
||||
from typing import cast
|
||||
crewai_event_bus.register_handler(TaskCompletedEvent, cast(Any, self._handle_task_completed))
|
||||
crewai_event_bus.register_handler(LiteAgentExecutionCompletedEvent, cast(Any, self._handle_lite_agent_completed))
|
||||
|
||||
crewai_event_bus.register_handler(
|
||||
TaskCompletedEvent, cast(Any, self._handle_task_completed)
|
||||
)
|
||||
crewai_event_bus.register_handler(
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
cast(Any, self._handle_lite_agent_completed),
|
||||
)
|
||||
|
||||
def _handle_task_completed(self, source: Any, event: TaskCompletedEvent) -> None:
|
||||
assert event.task is not None
|
||||
agent = event.task.agent
|
||||
if agent and str(getattr(agent, 'id', 'unknown')) in self._execution_state.agent_evaluators:
|
||||
self.emit_evaluation_started_event(agent_role=agent.role, agent_id=str(agent.id), task_id=str(event.task.id))
|
||||
if (
|
||||
agent
|
||||
and str(getattr(agent, "id", "unknown"))
|
||||
in self._execution_state.agent_evaluators
|
||||
):
|
||||
self.emit_evaluation_started_event(
|
||||
agent_role=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
task_id=str(event.task.id),
|
||||
)
|
||||
|
||||
state = ExecutionState()
|
||||
state.current_agent_id = str(agent.id)
|
||||
state.current_task_id = str(event.task.id)
|
||||
|
||||
assert state.current_agent_id is not None and state.current_task_id is not None
|
||||
trace = self.callback.get_trace(state.current_agent_id, state.current_task_id)
|
||||
assert (
|
||||
state.current_agent_id is not None and state.current_task_id is not None
|
||||
)
|
||||
trace = self.callback.get_trace(
|
||||
state.current_agent_id, state.current_task_id
|
||||
)
|
||||
|
||||
if not trace:
|
||||
return
|
||||
@@ -76,19 +107,28 @@ class AgentEvaluator:
|
||||
task=event.task,
|
||||
execution_trace=trace,
|
||||
final_output=event.output,
|
||||
state=state
|
||||
state=state,
|
||||
)
|
||||
|
||||
current_iteration = self._execution_state.iteration
|
||||
if current_iteration not in self._execution_state.iterations_results:
|
||||
self._execution_state.iterations_results[current_iteration] = {}
|
||||
|
||||
if agent.role not in self._execution_state.iterations_results[current_iteration]:
|
||||
self._execution_state.iterations_results[current_iteration][agent.role] = []
|
||||
if (
|
||||
agent.role
|
||||
not in self._execution_state.iterations_results[current_iteration]
|
||||
):
|
||||
self._execution_state.iterations_results[current_iteration][
|
||||
agent.role
|
||||
] = []
|
||||
|
||||
self._execution_state.iterations_results[current_iteration][agent.role].append(result)
|
||||
self._execution_state.iterations_results[current_iteration][
|
||||
agent.role
|
||||
].append(result)
|
||||
|
||||
def _handle_lite_agent_completed(self, source: object, event: LiteAgentExecutionCompletedEvent) -> None:
|
||||
def _handle_lite_agent_completed(
|
||||
self, source: object, event: LiteAgentExecutionCompletedEvent
|
||||
) -> None:
|
||||
agent_info = event.agent_info
|
||||
agent_id = str(agent_info["id"])
|
||||
|
||||
@@ -106,8 +146,12 @@ class AgentEvaluator:
|
||||
if not target_agent:
|
||||
return
|
||||
|
||||
assert state.current_agent_id is not None and state.current_task_id is not None
|
||||
trace = self.callback.get_trace(state.current_agent_id, state.current_task_id)
|
||||
assert (
|
||||
state.current_agent_id is not None and state.current_task_id is not None
|
||||
)
|
||||
trace = self.callback.get_trace(
|
||||
state.current_agent_id, state.current_task_id
|
||||
)
|
||||
|
||||
if not trace:
|
||||
return
|
||||
@@ -116,7 +160,7 @@ class AgentEvaluator:
|
||||
agent=target_agent,
|
||||
execution_trace=trace,
|
||||
final_output=event.output,
|
||||
state=state
|
||||
state=state,
|
||||
)
|
||||
|
||||
current_iteration = self._execution_state.iteration
|
||||
@@ -124,10 +168,17 @@ class AgentEvaluator:
|
||||
self._execution_state.iterations_results[current_iteration] = {}
|
||||
|
||||
agent_role = target_agent.role
|
||||
if agent_role not in self._execution_state.iterations_results[current_iteration]:
|
||||
self._execution_state.iterations_results[current_iteration][agent_role] = []
|
||||
if (
|
||||
agent_role
|
||||
not in self._execution_state.iterations_results[current_iteration]
|
||||
):
|
||||
self._execution_state.iterations_results[current_iteration][
|
||||
agent_role
|
||||
] = []
|
||||
|
||||
self._execution_state.iterations_results[current_iteration][agent_role].append(result)
|
||||
self._execution_state.iterations_results[current_iteration][
|
||||
agent_role
|
||||
].append(result)
|
||||
|
||||
def set_iteration(self, iteration: int) -> None:
|
||||
self._execution_state.iteration = iteration
|
||||
@@ -136,14 +187,26 @@ class AgentEvaluator:
|
||||
self._execution_state.iterations_results = {}
|
||||
|
||||
def get_evaluation_results(self) -> dict[str, list[AgentEvaluationResult]]:
|
||||
if self._execution_state.iterations_results and self._execution_state.iteration in self._execution_state.iterations_results:
|
||||
return self._execution_state.iterations_results[self._execution_state.iteration]
|
||||
if (
|
||||
self._execution_state.iterations_results
|
||||
and self._execution_state.iteration
|
||||
in self._execution_state.iterations_results
|
||||
):
|
||||
return self._execution_state.iterations_results[
|
||||
self._execution_state.iteration
|
||||
]
|
||||
return {}
|
||||
|
||||
def display_results_with_iterations(self) -> None:
|
||||
self.display_formatter.display_summary_results(self._execution_state.iterations_results)
|
||||
self.display_formatter.display_summary_results(
|
||||
self._execution_state.iterations_results
|
||||
)
|
||||
|
||||
def get_agent_evaluation(self, strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE, include_evaluation_feedback: bool = True) -> dict[str, AgentAggregatedEvaluationResult]:
|
||||
def get_agent_evaluation(
|
||||
self,
|
||||
strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE,
|
||||
include_evaluation_feedback: bool = True,
|
||||
) -> dict[str, AgentAggregatedEvaluationResult]:
|
||||
agent_results = {}
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
task_results = self.get_evaluation_results()
|
||||
@@ -157,13 +220,16 @@ class AgentEvaluator:
|
||||
agent_id=agent_id,
|
||||
agent_role=agent_role,
|
||||
results=results,
|
||||
strategy=strategy
|
||||
strategy=strategy,
|
||||
)
|
||||
|
||||
agent_results[agent_role] = aggregated_result
|
||||
|
||||
|
||||
if self._execution_state.iterations_results and self._execution_state.iteration == max(self._execution_state.iterations_results.keys(), default=0):
|
||||
if (
|
||||
self._execution_state.iterations_results
|
||||
and self._execution_state.iteration
|
||||
== max(self._execution_state.iterations_results.keys(), default=0)
|
||||
):
|
||||
self.display_results_with_iterations()
|
||||
|
||||
if include_evaluation_feedback:
|
||||
@@ -172,7 +238,9 @@ class AgentEvaluator:
|
||||
return agent_results
|
||||
|
||||
def display_evaluation_with_feedback(self) -> None:
|
||||
self.display_formatter.display_evaluation_with_feedback(self._execution_state.iterations_results)
|
||||
self.display_formatter.display_evaluation_with_feedback(
|
||||
self._execution_state.iterations_results
|
||||
)
|
||||
|
||||
def evaluate(
|
||||
self,
|
||||
@@ -184,46 +252,91 @@ class AgentEvaluator:
|
||||
) -> AgentEvaluationResult:
|
||||
result = AgentEvaluationResult(
|
||||
agent_id=state.current_agent_id or str(agent.id),
|
||||
task_id=state.current_task_id or (str(task.id) if task else "unknown_task")
|
||||
task_id=state.current_task_id or (str(task.id) if task else "unknown_task"),
|
||||
)
|
||||
|
||||
assert self.evaluators is not None
|
||||
task_id = str(task.id) if task else None
|
||||
for evaluator in self.evaluators:
|
||||
try:
|
||||
self.emit_evaluation_started_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id)
|
||||
self.emit_evaluation_started_event(
|
||||
agent_role=agent.role, agent_id=str(agent.id), task_id=task_id
|
||||
)
|
||||
score = evaluator.evaluate(
|
||||
agent=agent,
|
||||
task=task,
|
||||
execution_trace=execution_trace,
|
||||
final_output=final_output
|
||||
final_output=final_output,
|
||||
)
|
||||
result.metrics[evaluator.metric_category] = score
|
||||
self.emit_evaluation_completed_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id, metric_category=evaluator.metric_category, score=score)
|
||||
self.emit_evaluation_completed_event(
|
||||
agent_role=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
task_id=task_id,
|
||||
metric_category=evaluator.metric_category,
|
||||
score=score,
|
||||
)
|
||||
except Exception as e:
|
||||
self.emit_evaluation_failed_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id, error=str(e))
|
||||
self.console_formatter.print(f"Error in {evaluator.metric_category.value} evaluator: {str(e)}")
|
||||
self.emit_evaluation_failed_event(
|
||||
agent_role=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
task_id=task_id,
|
||||
error=str(e),
|
||||
)
|
||||
self.console_formatter.print(
|
||||
f"Error in {evaluator.metric_category.value} evaluator: {str(e)}"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def emit_evaluation_started_event(self, agent_role: str, agent_id: str, task_id: str | None = None):
|
||||
def emit_evaluation_started_event(
|
||||
self, agent_role: str, agent_id: str, task_id: str | None = None
|
||||
):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
AgentEvaluationStartedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration)
|
||||
AgentEvaluationStartedEvent(
|
||||
agent_role=agent_role,
|
||||
agent_id=agent_id,
|
||||
task_id=task_id,
|
||||
iteration=self._execution_state.iteration,
|
||||
),
|
||||
)
|
||||
|
||||
def emit_evaluation_completed_event(self, agent_role: str, agent_id: str, task_id: str | None = None, metric_category: MetricCategory | None = None, score: EvaluationScore | None = None):
|
||||
def emit_evaluation_completed_event(
|
||||
self,
|
||||
agent_role: str,
|
||||
agent_id: str,
|
||||
task_id: str | None = None,
|
||||
metric_category: MetricCategory | None = None,
|
||||
score: EvaluationScore | None = None,
|
||||
):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
AgentEvaluationCompletedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration, metric_category=metric_category, score=score)
|
||||
AgentEvaluationCompletedEvent(
|
||||
agent_role=agent_role,
|
||||
agent_id=agent_id,
|
||||
task_id=task_id,
|
||||
iteration=self._execution_state.iteration,
|
||||
metric_category=metric_category,
|
||||
score=score,
|
||||
),
|
||||
)
|
||||
|
||||
def emit_evaluation_failed_event(self, agent_role: str, agent_id: str, error: str, task_id: str | None = None):
|
||||
def emit_evaluation_failed_event(
|
||||
self, agent_role: str, agent_id: str, error: str, task_id: str | None = None
|
||||
):
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
AgentEvaluationFailedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration, error=error)
|
||||
AgentEvaluationFailedEvent(
|
||||
agent_role=agent_role,
|
||||
agent_id=agent_id,
|
||||
task_id=task_id,
|
||||
iteration=self._execution_state.iteration,
|
||||
error=error,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def create_default_evaluator(agents: list[Agent], llm: None = None):
|
||||
from crewai.experimental.evaluation import (
|
||||
GoalAlignmentEvaluator,
|
||||
@@ -231,7 +344,7 @@ def create_default_evaluator(agents: list[Agent], llm: None = None):
|
||||
ToolSelectionEvaluator,
|
||||
ParameterExtractionEvaluator,
|
||||
ToolInvocationEvaluator,
|
||||
ReasoningEfficiencyEvaluator
|
||||
ReasoningEfficiencyEvaluator,
|
||||
)
|
||||
|
||||
evaluators = [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import abc
|
||||
import enum
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
@@ -57,7 +57,7 @@ class BaseEvaluator(abc.ABC):
|
||||
def evaluate(
|
||||
self,
|
||||
agent: Agent,
|
||||
execution_trace: Dict[str, Any],
|
||||
execution_trace: dict[str, Any],
|
||||
final_output: Any,
|
||||
task: Task | None = None,
|
||||
) -> EvaluationScore:
|
||||
@@ -67,7 +67,7 @@ class BaseEvaluator(abc.ABC):
|
||||
class AgentEvaluationResult(BaseModel):
|
||||
agent_id: str = Field(description="ID of the evaluated agent")
|
||||
task_id: str = Field(description="ID of the task that was executed")
|
||||
metrics: Dict[MetricCategory, EvaluationScore] = Field(
|
||||
metrics: dict[MetricCategory, EvaluationScore] = Field(
|
||||
default_factory=dict,
|
||||
description="Evaluation scores for each metric category"
|
||||
)
|
||||
@@ -97,11 +97,11 @@ class AgentAggregatedEvaluationResult(BaseModel):
|
||||
default=AggregationStrategy.SIMPLE_AVERAGE,
|
||||
description="Strategy used for aggregation"
|
||||
)
|
||||
metrics: Dict[MetricCategory, EvaluationScore] = Field(
|
||||
metrics: dict[MetricCategory, EvaluationScore] = Field(
|
||||
default_factory=dict,
|
||||
description="Aggregated metrics across all tasks"
|
||||
)
|
||||
task_results: List[str] = Field(
|
||||
task_results: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="IDs of tasks included in this aggregation"
|
||||
)
|
||||
@@ -122,4 +122,4 @@ class AgentAggregatedEvaluationResult(BaseModel):
|
||||
detailed_feedback = "\n ".join(score.feedback.split('\n'))
|
||||
result += f" {detailed_feedback}\n"
|
||||
|
||||
return result
|
||||
return result
|
||||
|
||||
@@ -1,20 +1,30 @@
|
||||
from collections import defaultdict
|
||||
from typing import Dict, Any, List
|
||||
from typing import Any
|
||||
from rich.table import Table
|
||||
from rich.box import HEAVY_EDGE, ROUNDED
|
||||
from collections.abc import Sequence
|
||||
from crewai.experimental.evaluation.base_evaluator import AgentAggregatedEvaluationResult, AggregationStrategy, AgentEvaluationResult, MetricCategory
|
||||
from crewai.experimental.evaluation.base_evaluator import (
|
||||
AgentAggregatedEvaluationResult,
|
||||
AggregationStrategy,
|
||||
AgentEvaluationResult,
|
||||
MetricCategory,
|
||||
)
|
||||
from crewai.experimental.evaluation import EvaluationScore
|
||||
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
|
||||
from crewai.events.utils.console_formatter import ConsoleFormatter
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
|
||||
|
||||
class EvaluationDisplayFormatter:
|
||||
def __init__(self):
|
||||
self.console_formatter = ConsoleFormatter()
|
||||
|
||||
def display_evaluation_with_feedback(self, iterations_results: Dict[int, Dict[str, List[Any]]]):
|
||||
def display_evaluation_with_feedback(
|
||||
self, iterations_results: dict[int, dict[str, list[Any]]]
|
||||
):
|
||||
if not iterations_results:
|
||||
self.console_formatter.print("[yellow]No evaluation results to display[/yellow]")
|
||||
self.console_formatter.print(
|
||||
"[yellow]No evaluation results to display[/yellow]"
|
||||
)
|
||||
return
|
||||
|
||||
all_agent_roles: set[str] = set()
|
||||
@@ -22,7 +32,9 @@ class EvaluationDisplayFormatter:
|
||||
all_agent_roles.update(iter_results.keys())
|
||||
|
||||
for agent_role in sorted(all_agent_roles):
|
||||
self.console_formatter.print(f"\n[bold cyan]Agent: {agent_role}[/bold cyan]")
|
||||
self.console_formatter.print(
|
||||
f"\n[bold cyan]Agent: {agent_role}[/bold cyan]"
|
||||
)
|
||||
|
||||
for iter_num, results in sorted(iterations_results.items()):
|
||||
if agent_role not in results or not results[agent_role]:
|
||||
@@ -62,9 +74,7 @@ class EvaluationDisplayFormatter:
|
||||
|
||||
table.add_section()
|
||||
table.add_row(
|
||||
metric.title(),
|
||||
score_text,
|
||||
evaluation_score.feedback or ""
|
||||
metric.title(), score_text, evaluation_score.feedback or ""
|
||||
)
|
||||
|
||||
if aggregated_result.overall_score is not None:
|
||||
@@ -82,19 +92,26 @@ class EvaluationDisplayFormatter:
|
||||
table.add_row(
|
||||
"Overall Score",
|
||||
f"[{overall_color}]{overall_score:.1f}[/]",
|
||||
"Overall agent evaluation score"
|
||||
"Overall agent evaluation score",
|
||||
)
|
||||
|
||||
self.console_formatter.print(table)
|
||||
|
||||
def display_summary_results(self, iterations_results: Dict[int, Dict[str, List[AgentAggregatedEvaluationResult]]]):
|
||||
def display_summary_results(
|
||||
self,
|
||||
iterations_results: dict[int, dict[str, list[AgentAggregatedEvaluationResult]]],
|
||||
):
|
||||
if not iterations_results:
|
||||
self.console_formatter.print("[yellow]No evaluation results to display[/yellow]")
|
||||
self.console_formatter.print(
|
||||
"[yellow]No evaluation results to display[/yellow]"
|
||||
)
|
||||
return
|
||||
|
||||
self.console_formatter.print("\n")
|
||||
|
||||
table = Table(title="Agent Performance Scores \n (1-10 Higher is better)", box=HEAVY_EDGE)
|
||||
table = Table(
|
||||
title="Agent Performance Scores \n (1-10 Higher is better)", box=HEAVY_EDGE
|
||||
)
|
||||
|
||||
table.add_column("Agent/Metric", style="cyan")
|
||||
|
||||
@@ -123,11 +140,14 @@ class EvaluationDisplayFormatter:
|
||||
agent_id=agent_id,
|
||||
agent_role=agent_role,
|
||||
results=agent_results,
|
||||
strategy=AggregationStrategy.SIMPLE_AVERAGE
|
||||
strategy=AggregationStrategy.SIMPLE_AVERAGE,
|
||||
)
|
||||
|
||||
valid_scores = [score.score for score in aggregated_result.metrics.values()
|
||||
if score.score is not None]
|
||||
valid_scores = [
|
||||
score.score
|
||||
for score in aggregated_result.metrics.values()
|
||||
if score.score is not None
|
||||
]
|
||||
if valid_scores:
|
||||
avg_score = sum(valid_scores) / len(valid_scores)
|
||||
agent_scores_by_iteration[iter_num] = avg_score
|
||||
@@ -137,7 +157,9 @@ class EvaluationDisplayFormatter:
|
||||
if not agent_scores_by_iteration:
|
||||
continue
|
||||
|
||||
avg_across_iterations = sum(agent_scores_by_iteration.values()) / len(agent_scores_by_iteration)
|
||||
avg_across_iterations = sum(agent_scores_by_iteration.values()) / len(
|
||||
agent_scores_by_iteration
|
||||
)
|
||||
|
||||
row = [f"[bold]{agent_role}[/bold]"]
|
||||
|
||||
@@ -178,9 +200,13 @@ class EvaluationDisplayFormatter:
|
||||
row = [f" - {metric.title()}"]
|
||||
|
||||
for iter_num in sorted(iterations_results.keys()):
|
||||
if (iter_num in agent_metrics_by_iteration and
|
||||
metric in agent_metrics_by_iteration[iter_num]):
|
||||
metric_score = agent_metrics_by_iteration[iter_num][metric].score
|
||||
if (
|
||||
iter_num in agent_metrics_by_iteration
|
||||
and metric in agent_metrics_by_iteration[iter_num]
|
||||
):
|
||||
metric_score = agent_metrics_by_iteration[iter_num][
|
||||
metric
|
||||
].score
|
||||
if metric_score is not None:
|
||||
metric_scores.append(metric_score)
|
||||
if metric_score >= 8.0:
|
||||
@@ -225,7 +251,9 @@ class EvaluationDisplayFormatter:
|
||||
results: Sequence[AgentEvaluationResult],
|
||||
strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE,
|
||||
) -> AgentAggregatedEvaluationResult:
|
||||
metrics_by_category: dict[MetricCategory, list[EvaluationScore]] = defaultdict(list)
|
||||
metrics_by_category: dict[MetricCategory, list[EvaluationScore]] = defaultdict(
|
||||
list
|
||||
)
|
||||
|
||||
for result in results:
|
||||
for metric_name, evaluation_score in result.metrics.items():
|
||||
@@ -246,19 +274,20 @@ class EvaluationDisplayFormatter:
|
||||
metric=category.title(),
|
||||
feedbacks=feedbacks,
|
||||
scores=[s.score for s in scores],
|
||||
strategy=strategy
|
||||
strategy=strategy,
|
||||
)
|
||||
else:
|
||||
feedback_summary = feedbacks[0]
|
||||
|
||||
aggregated_metrics[category] = EvaluationScore(
|
||||
score=avg_score,
|
||||
feedback=feedback_summary
|
||||
score=avg_score, feedback=feedback_summary
|
||||
)
|
||||
|
||||
overall_score = None
|
||||
if aggregated_metrics:
|
||||
valid_scores = [m.score for m in aggregated_metrics.values() if m.score is not None]
|
||||
valid_scores = [
|
||||
m.score for m in aggregated_metrics.values() if m.score is not None
|
||||
]
|
||||
if valid_scores:
|
||||
overall_score = sum(valid_scores) / len(valid_scores)
|
||||
|
||||
@@ -268,19 +297,21 @@ class EvaluationDisplayFormatter:
|
||||
metrics=aggregated_metrics,
|
||||
overall_score=overall_score,
|
||||
task_count=len(results),
|
||||
aggregation_strategy=strategy
|
||||
aggregation_strategy=strategy,
|
||||
)
|
||||
|
||||
def _summarize_feedbacks(
|
||||
self,
|
||||
agent_role: str,
|
||||
metric: str,
|
||||
feedbacks: List[str],
|
||||
scores: List[float | None],
|
||||
strategy: AggregationStrategy
|
||||
feedbacks: list[str],
|
||||
scores: list[float | None],
|
||||
strategy: AggregationStrategy,
|
||||
) -> str:
|
||||
if len(feedbacks) <= 2 and all(len(fb) < 200 for fb in feedbacks):
|
||||
return "\n\n".join([f"Feedback {i+1}: {fb}" for i, fb in enumerate(feedbacks)])
|
||||
return "\n\n".join(
|
||||
[f"Feedback {i+1}: {fb}" for i, fb in enumerate(feedbacks)]
|
||||
)
|
||||
|
||||
try:
|
||||
llm = create_llm()
|
||||
@@ -290,20 +321,26 @@ class EvaluationDisplayFormatter:
|
||||
if len(feedback) > 500:
|
||||
feedback = feedback[:500] + "..."
|
||||
score_text = f"{score:.1f}" if score is not None else "N/A"
|
||||
formatted_feedbacks.append(f"Feedback #{i+1} (Score: {score_text}):\n{feedback}")
|
||||
formatted_feedbacks.append(
|
||||
f"Feedback #{i+1} (Score: {score_text}):\n{feedback}"
|
||||
)
|
||||
|
||||
all_feedbacks = "\n\n" + "\n\n---\n\n".join(formatted_feedbacks)
|
||||
|
||||
strategy_guidance = ""
|
||||
if strategy == AggregationStrategy.BEST_PERFORMANCE:
|
||||
strategy_guidance = "Focus on the highest-scoring aspects and strengths demonstrated."
|
||||
strategy_guidance = (
|
||||
"Focus on the highest-scoring aspects and strengths demonstrated."
|
||||
)
|
||||
elif strategy == AggregationStrategy.WORST_PERFORMANCE:
|
||||
strategy_guidance = "Focus on areas that need improvement and common issues across tasks."
|
||||
else:
|
||||
strategy_guidance = "Provide a balanced analysis of strengths and weaknesses across all tasks."
|
||||
|
||||
prompt = [
|
||||
{"role": "system", "content": f"""You are an expert evaluator creating a comprehensive summary of agent performance feedback.
|
||||
{
|
||||
"role": "system",
|
||||
"content": f"""You are an expert evaluator creating a comprehensive summary of agent performance feedback.
|
||||
Your job is to synthesize multiple feedback points about the same metric across different tasks.
|
||||
|
||||
Create a concise, insightful summary that captures the key patterns and themes from all feedback.
|
||||
@@ -315,14 +352,18 @@ class EvaluationDisplayFormatter:
|
||||
3. Highlighting patterns across tasks
|
||||
4. 150-250 words in length
|
||||
|
||||
The summary should be directly usable as final feedback for the agent's performance on this metric."""},
|
||||
{"role": "user", "content": f"""I need a synthesized summary of the following feedback for:
|
||||
The summary should be directly usable as final feedback for the agent's performance on this metric.""",
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"""I need a synthesized summary of the following feedback for:
|
||||
|
||||
Agent Role: {agent_role}
|
||||
Metric: {metric.title()}
|
||||
|
||||
{all_feedbacks}
|
||||
"""}
|
||||
""",
|
||||
},
|
||||
]
|
||||
assert llm is not None
|
||||
response = llm.call(prompt)
|
||||
@@ -330,4 +371,6 @@ class EvaluationDisplayFormatter:
|
||||
return response
|
||||
|
||||
except Exception:
|
||||
return "Synthesized from multiple tasks: " + "\n\n".join([f"- {fb[:500]}..." for fb in feedbacks])
|
||||
return "Synthesized from multiple tasks: " + "\n\n".join(
|
||||
[f"- {fb[:500]}..." for fb in feedbacks]
|
||||
)
|
||||
|
||||
@@ -1,29 +1,27 @@
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, Optional
|
||||
from typing import Any, Optional
|
||||
|
||||
from collections.abc import Sequence
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus
|
||||
from crewai.utilities.events.agent_events import (
|
||||
from crewai.events.base_event_listener import BaseEventListener
|
||||
from crewai.events.event_bus import CrewAIEventsBus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionStartedEvent,
|
||||
AgentExecutionCompletedEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
LiteAgentExecutionCompletedEvent
|
||||
LiteAgentExecutionCompletedEvent,
|
||||
)
|
||||
from crewai.utilities.events.tool_usage_events import (
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolExecutionErrorEvent,
|
||||
ToolSelectionErrorEvent,
|
||||
ToolValidateInputErrorEvent
|
||||
)
|
||||
from crewai.utilities.events.llm_events import (
|
||||
LLMCallStartedEvent,
|
||||
LLMCallCompletedEvent
|
||||
ToolValidateInputErrorEvent,
|
||||
)
|
||||
from crewai.events.types.llm_events import LLMCallStartedEvent, LLMCallCompletedEvent
|
||||
|
||||
|
||||
class EvaluationTraceCallback(BaseEventListener):
|
||||
"""Event listener for collecting execution traces for evaluation.
|
||||
@@ -68,27 +66,49 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
|
||||
@event_bus.on(ToolUsageFinishedEvent)
|
||||
def on_tool_completed(source, event: ToolUsageFinishedEvent):
|
||||
self.on_tool_use(event.tool_name, event.tool_args, event.output, success=True)
|
||||
self.on_tool_use(
|
||||
event.tool_name, event.tool_args, event.output, success=True
|
||||
)
|
||||
|
||||
@event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
self.on_tool_use(event.tool_name, event.tool_args, event.error,
|
||||
success=False, error_type="usage_error")
|
||||
self.on_tool_use(
|
||||
event.tool_name,
|
||||
event.tool_args,
|
||||
event.error,
|
||||
success=False,
|
||||
error_type="usage_error",
|
||||
)
|
||||
|
||||
@event_bus.on(ToolExecutionErrorEvent)
|
||||
def on_tool_execution_error(source, event: ToolExecutionErrorEvent):
|
||||
self.on_tool_use(event.tool_name, event.tool_args, event.error,
|
||||
success=False, error_type="execution_error")
|
||||
self.on_tool_use(
|
||||
event.tool_name,
|
||||
event.tool_args,
|
||||
event.error,
|
||||
success=False,
|
||||
error_type="execution_error",
|
||||
)
|
||||
|
||||
@event_bus.on(ToolSelectionErrorEvent)
|
||||
def on_tool_selection_error(source, event: ToolSelectionErrorEvent):
|
||||
self.on_tool_use(event.tool_name, event.tool_args, event.error,
|
||||
success=False, error_type="selection_error")
|
||||
self.on_tool_use(
|
||||
event.tool_name,
|
||||
event.tool_args,
|
||||
event.error,
|
||||
success=False,
|
||||
error_type="selection_error",
|
||||
)
|
||||
|
||||
@event_bus.on(ToolValidateInputErrorEvent)
|
||||
def on_tool_validate_input_error(source, event: ToolValidateInputErrorEvent):
|
||||
self.on_tool_use(event.tool_name, event.tool_args, event.error,
|
||||
success=False, error_type="validation_error")
|
||||
self.on_tool_use(
|
||||
event.tool_name,
|
||||
event.tool_args,
|
||||
event.error,
|
||||
success=False,
|
||||
error_type="validation_error",
|
||||
)
|
||||
|
||||
@event_bus.on(LLMCallStartedEvent)
|
||||
def on_llm_call_started(source, event: LLMCallStartedEvent):
|
||||
@@ -99,7 +119,7 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
self.on_llm_call_end(event.messages, event.response)
|
||||
|
||||
def on_lite_agent_start(self, agent_info: dict[str, Any]):
|
||||
self.current_agent_id = agent_info['id']
|
||||
self.current_agent_id = agent_info["id"]
|
||||
self.current_task_id = "lite_task"
|
||||
|
||||
trace_key = f"{self.current_agent_id}_{self.current_task_id}"
|
||||
@@ -110,7 +130,7 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
tool_uses=[],
|
||||
llm_calls=[],
|
||||
start_time=datetime.now(),
|
||||
final_output=None
|
||||
final_output=None,
|
||||
)
|
||||
|
||||
def _init_trace(self, trace_key: str, **kwargs: Any):
|
||||
@@ -128,7 +148,7 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
tool_uses=[],
|
||||
llm_calls=[],
|
||||
start_time=datetime.now(),
|
||||
final_output=None
|
||||
final_output=None,
|
||||
)
|
||||
|
||||
def on_agent_finish(self, agent: Agent, task: Task, output: Any):
|
||||
@@ -151,8 +171,14 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
|
||||
self._reset_current()
|
||||
|
||||
def on_tool_use(self, tool_name: str, tool_args: dict[str, Any] | str, result: Any,
|
||||
success: bool = True, error_type: str | None = None):
|
||||
def on_tool_use(
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_args: dict[str, Any] | str,
|
||||
result: Any,
|
||||
success: bool = True,
|
||||
error_type: str | None = None,
|
||||
):
|
||||
if not self.current_agent_id or not self.current_task_id:
|
||||
return
|
||||
|
||||
@@ -163,7 +189,7 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
"args": tool_args,
|
||||
"result": result,
|
||||
"success": success,
|
||||
"timestamp": datetime.now()
|
||||
"timestamp": datetime.now(),
|
||||
}
|
||||
|
||||
# Add error information if applicable
|
||||
@@ -173,7 +199,11 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
|
||||
self.traces[trace_key]["tool_uses"].append(tool_use)
|
||||
|
||||
def on_llm_call_start(self, messages: str | Sequence[dict[str, Any]] | None, tools: Sequence[dict[str, Any]] | None = None):
|
||||
def on_llm_call_start(
|
||||
self,
|
||||
messages: str | Sequence[dict[str, Any]] | None,
|
||||
tools: Sequence[dict[str, Any]] | None = None,
|
||||
):
|
||||
if not self.current_agent_id or not self.current_task_id:
|
||||
return
|
||||
|
||||
@@ -186,10 +216,12 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
"tools": tools,
|
||||
"start_time": datetime.now(),
|
||||
"response": None,
|
||||
"end_time": None
|
||||
"end_time": None,
|
||||
}
|
||||
|
||||
def on_llm_call_end(self, messages: str | list[dict[str, Any]] | None, response: Any):
|
||||
def on_llm_call_end(
|
||||
self, messages: str | list[dict[str, Any]] | None, response: Any
|
||||
):
|
||||
if not self.current_agent_id or not self.current_task_id:
|
||||
return
|
||||
|
||||
@@ -213,7 +245,7 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
"response": response,
|
||||
"start_time": start_time,
|
||||
"end_time": current_time,
|
||||
"total_tokens": total_tokens
|
||||
"total_tokens": total_tokens,
|
||||
}
|
||||
|
||||
self.traces[trace_key]["llm_calls"].append(llm_call)
|
||||
@@ -221,13 +253,13 @@ class EvaluationTraceCallback(BaseEventListener):
|
||||
if hasattr(self, "current_llm_call"):
|
||||
self.current_llm_call = {}
|
||||
|
||||
def get_trace(self, agent_id: str, task_id: str) -> Optional[Dict[str, Any]]:
|
||||
def get_trace(self, agent_id: str, task_id: str) -> Optional[dict[str, Any]]:
|
||||
trace_key = f"{agent_id}_{task_id}"
|
||||
return self.traces.get(trace_key)
|
||||
|
||||
|
||||
def create_evaluation_callbacks() -> EvaluationTraceCallback:
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
|
||||
callback = EvaluationTraceCallback()
|
||||
callback.setup_listeners(crewai_event_bus)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Dict, Any
|
||||
from typing import Any
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
@@ -23,7 +23,7 @@ class ExperimentResultsDisplay:
|
||||
|
||||
self.console.print(table)
|
||||
|
||||
def comparison_summary(self, comparison: Dict[str, Any], baseline_timestamp: str):
|
||||
def comparison_summary(self, comparison: dict[str, Any], baseline_timestamp: str):
|
||||
self.console.print(Panel(f"[bold]Comparison with baseline run from {baseline_timestamp}[/bold]",
|
||||
expand=False))
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict
|
||||
from typing import Any
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
@@ -14,7 +14,7 @@ class GoalAlignmentEvaluator(BaseEvaluator):
|
||||
def evaluate(
|
||||
self,
|
||||
agent: Agent,
|
||||
execution_trace: Dict[str, Any],
|
||||
execution_trace: dict[str, Any],
|
||||
final_output: Any,
|
||||
task: Task | None = None,
|
||||
) -> EvaluationScore:
|
||||
|
||||
@@ -9,7 +9,7 @@ This module provides evaluator implementations for:
|
||||
import logging
|
||||
import re
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Tuple
|
||||
from typing import Any
|
||||
import numpy as np
|
||||
from collections.abc import Sequence
|
||||
|
||||
@@ -36,7 +36,7 @@ class ReasoningEfficiencyEvaluator(BaseEvaluator):
|
||||
def evaluate(
|
||||
self,
|
||||
agent: Agent,
|
||||
execution_trace: Dict[str, Any],
|
||||
execution_trace: dict[str, Any],
|
||||
final_output: TaskOutput | str,
|
||||
task: Task | None = None,
|
||||
) -> EvaluationScore:
|
||||
@@ -183,7 +183,7 @@ Identify any inefficient reasoning patterns and provide specific suggestions for
|
||||
raw_response=response
|
||||
)
|
||||
|
||||
def _detect_loops(self, llm_calls: List[Dict]) -> Tuple[bool, List[Dict]]:
|
||||
def _detect_loops(self, llm_calls: list[dict]) -> tuple[bool, list[dict]]:
|
||||
loop_details = []
|
||||
|
||||
messages = []
|
||||
@@ -227,7 +227,7 @@ Identify any inefficient reasoning patterns and provide specific suggestions for
|
||||
|
||||
return intersection / union if union > 0 else 0.0
|
||||
|
||||
def _analyze_reasoning_patterns(self, llm_calls: List[Dict]) -> Dict[str, Any]:
|
||||
def _analyze_reasoning_patterns(self, llm_calls: list[dict]) -> dict[str, Any]:
|
||||
call_lengths = []
|
||||
response_times = []
|
||||
|
||||
@@ -331,7 +331,7 @@ Identify any inefficient reasoning patterns and provide specific suggestions for
|
||||
|
||||
return np.mean(indicators) if indicators else 0.0
|
||||
|
||||
def _get_call_samples(self, llm_calls: List[Dict]) -> str:
|
||||
def _get_call_samples(self, llm_calls: list[dict]) -> str:
|
||||
samples = []
|
||||
|
||||
if len(llm_calls) <= 6:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Dict
|
||||
from typing import Any
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
@@ -14,7 +14,7 @@ class SemanticQualityEvaluator(BaseEvaluator):
|
||||
def evaluate(
|
||||
self,
|
||||
agent: Agent,
|
||||
execution_trace: Dict[str, Any],
|
||||
execution_trace: dict[str, Any],
|
||||
final_output: Any,
|
||||
task: Task | None = None,
|
||||
) -> EvaluationScore:
|
||||
@@ -65,4 +65,4 @@ Evaluate the semantic quality and reasoning of this output.
|
||||
score=None,
|
||||
feedback=f"Failed to parse evaluation. Raw response: {response}",
|
||||
raw_response=response
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import json
|
||||
from typing import Dict, Any
|
||||
from typing import Any
|
||||
|
||||
from crewai.experimental.evaluation.base_evaluator import BaseEvaluator, EvaluationScore, MetricCategory
|
||||
from crewai.experimental.evaluation.json_parser import extract_json_from_llm_response
|
||||
@@ -16,7 +16,7 @@ class ToolSelectionEvaluator(BaseEvaluator):
|
||||
def evaluate(
|
||||
self,
|
||||
agent: Agent,
|
||||
execution_trace: Dict[str, Any],
|
||||
execution_trace: dict[str, Any],
|
||||
final_output: str,
|
||||
task: Task | None = None,
|
||||
) -> EvaluationScore:
|
||||
@@ -132,7 +132,7 @@ class ParameterExtractionEvaluator(BaseEvaluator):
|
||||
def evaluate(
|
||||
self,
|
||||
agent: Agent,
|
||||
execution_trace: Dict[str, Any],
|
||||
execution_trace: dict[str, Any],
|
||||
final_output: str,
|
||||
task: Task | None = None,
|
||||
) -> EvaluationScore:
|
||||
@@ -274,7 +274,7 @@ class ToolInvocationEvaluator(BaseEvaluator):
|
||||
def evaluate(
|
||||
self,
|
||||
agent: Agent,
|
||||
execution_trace: Dict[str, Any],
|
||||
execution_trace: dict[str, Any],
|
||||
final_output: str,
|
||||
task: Task | None = None,
|
||||
) -> EvaluationScore:
|
||||
|
||||
@@ -5,12 +5,8 @@ import logging
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Generic,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
@@ -25,8 +21,8 @@ from crewai.flow.flow_visualizer import plot_flow
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.types import FlowExecutionData
|
||||
from crewai.flow.utils import get_possible_return_constants
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.flow_events import (
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowPlotEvent,
|
||||
@@ -35,10 +31,10 @@ from crewai.utilities.events.flow_events import (
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.listeners.tracing.trace_listener import (
|
||||
from crewai.events.listeners.tracing.trace_listener import (
|
||||
TraceCollectionListener,
|
||||
)
|
||||
from crewai.utilities.events.listeners.tracing.utils import (
|
||||
from crewai.events.listeners.tracing.utils import (
|
||||
is_tracing_enabled,
|
||||
)
|
||||
from crewai.utilities.printer import Printer
|
||||
@@ -57,14 +53,14 @@ class FlowState(BaseModel):
|
||||
|
||||
# Type variables with explicit bounds
|
||||
T = TypeVar(
|
||||
"T", bound=Union[Dict[str, Any], BaseModel]
|
||||
"T", bound=Union[dict[str, Any], BaseModel]
|
||||
) # Generic flow state type parameter
|
||||
StateT = TypeVar(
|
||||
"StateT", bound=Union[Dict[str, Any], BaseModel]
|
||||
"StateT", bound=Union[dict[str, Any], BaseModel]
|
||||
) # State validation type parameter
|
||||
|
||||
|
||||
def ensure_state_type(state: Any, expected_type: Type[StateT]) -> StateT:
|
||||
def ensure_state_type(state: Any, expected_type: type[StateT]) -> StateT:
|
||||
"""Ensure state matches expected type with proper validation.
|
||||
|
||||
Args:
|
||||
@@ -436,19 +432,19 @@ class FlowMeta(type):
|
||||
class Flow(Generic[T], metaclass=FlowMeta):
|
||||
"""Base class for all flows.
|
||||
|
||||
Type parameter T must be either Dict[str, Any] or a subclass of BaseModel."""
|
||||
Type parameter T must be either dict[str, Any] or a subclass of BaseModel."""
|
||||
|
||||
_printer = Printer()
|
||||
|
||||
_start_methods: List[str] = []
|
||||
_listeners: Dict[str, tuple[str, List[str]]] = {}
|
||||
_routers: Set[str] = set()
|
||||
_router_paths: Dict[str, List[str]] = {}
|
||||
initial_state: Union[Type[T], T, None] = None
|
||||
_start_methods: list[str] = []
|
||||
_listeners: dict[str, tuple[str, list[str]]] = {}
|
||||
_routers: set[str] = set()
|
||||
_router_paths: dict[str, list[str]] = {}
|
||||
initial_state: Union[type[T], T, None] = None
|
||||
name: Optional[str] = None
|
||||
tracing: Optional[bool] = False
|
||||
|
||||
def __class_getitem__(cls: Type["Flow"], item: Type[T]) -> Type["Flow"]:
|
||||
def __class_getitem__(cls: type["Flow"], item: type[T]) -> type["Flow"]:
|
||||
class _FlowGeneric(cls): # type: ignore
|
||||
_initial_state_T = item # type: ignore
|
||||
|
||||
@@ -468,11 +464,11 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
**kwargs: Additional state values to initialize or override
|
||||
"""
|
||||
# Initialize basic instance attributes
|
||||
self._methods: Dict[str, Callable] = {}
|
||||
self._method_execution_counts: Dict[str, int] = {}
|
||||
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||
self._completed_methods: Set[str] = set() # Track completed methods for reload
|
||||
self._methods: dict[str, Callable] = {}
|
||||
self._method_execution_counts: dict[str, int] = {}
|
||||
self._pending_and_listeners: dict[str, set[str]] = {}
|
||||
self._method_outputs: list[Any] = [] # List to store all method outputs
|
||||
self._completed_methods: set[str] = set() # Track completed methods for reload
|
||||
self._persistence: Optional[FlowPersistence] = persistence
|
||||
self._is_execution_resuming: bool = False
|
||||
|
||||
@@ -600,7 +596,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return self._state
|
||||
|
||||
@property
|
||||
def method_outputs(self) -> List[Any]:
|
||||
def method_outputs(self) -> list[Any]:
|
||||
"""Returns the list of all outputs from executed methods."""
|
||||
return self._method_outputs
|
||||
|
||||
@@ -637,7 +633,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
except (AttributeError, TypeError):
|
||||
return "" # Safely handle any unexpected attribute access issues
|
||||
|
||||
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
||||
def _initialize_state(self, inputs: dict[str, Any]) -> None:
|
||||
"""Initialize or update flow state with new inputs.
|
||||
|
||||
Args:
|
||||
@@ -691,7 +687,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
else:
|
||||
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
||||
|
||||
def _restore_state(self, stored_state: Dict[str, Any]) -> None:
|
||||
def _restore_state(self, stored_state: dict[str, Any]) -> None:
|
||||
"""Restore flow state from persistence.
|
||||
|
||||
Args:
|
||||
@@ -783,7 +779,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
elif hasattr(self._state, field_name):
|
||||
object.__setattr__(self._state, field_name, value)
|
||||
|
||||
def _apply_state_updates(self, updates: Dict[str, Any]) -> None:
|
||||
def _apply_state_updates(self, updates: dict[str, Any]) -> None:
|
||||
"""Apply multiple state updates efficiently."""
|
||||
if isinstance(self._state, dict):
|
||||
self._state.update(updates)
|
||||
@@ -792,7 +788,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if hasattr(self._state, key):
|
||||
object.__setattr__(self._state, key, value)
|
||||
|
||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
def kickoff(self, inputs: Optional[dict[str, Any]] = None) -> Any:
|
||||
"""
|
||||
Start the flow execution in a synchronous context.
|
||||
|
||||
@@ -805,7 +801,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
return asyncio.run(run_flow())
|
||||
|
||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
async def kickoff_async(self, inputs: Optional[dict[str, Any]] = None) -> Any:
|
||||
"""
|
||||
Start the flow execution asynchronously.
|
||||
|
||||
@@ -934,12 +930,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
method = self._methods[start_method_name]
|
||||
enhanced_method = self._inject_trigger_payload_for_start_method(method)
|
||||
|
||||
result = await self._execute_method(
|
||||
start_method_name, enhanced_method
|
||||
)
|
||||
result = await self._execute_method(start_method_name, enhanced_method)
|
||||
await self._execute_listeners(start_method_name, result)
|
||||
|
||||
def _inject_trigger_payload_for_start_method(self, original_method: Callable) -> Callable:
|
||||
def _inject_trigger_payload_for_start_method(
|
||||
self, original_method: Callable
|
||||
) -> Callable:
|
||||
def prepare_kwargs(*args, **kwargs):
|
||||
inputs = baggage.get_baggage("flow_inputs") or {}
|
||||
trigger_payload = inputs.get("crewai_trigger_payload")
|
||||
@@ -952,15 +948,17 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
elif trigger_payload is not None:
|
||||
self._log_flow_event(
|
||||
f"Trigger payload available but {original_method.__name__} doesn't accept crewai_trigger_payload parameter",
|
||||
color="yellow"
|
||||
color="yellow",
|
||||
)
|
||||
return args, kwargs
|
||||
|
||||
if asyncio.iscoroutinefunction(original_method):
|
||||
|
||||
async def enhanced_method(*args, **kwargs):
|
||||
args, kwargs = prepare_kwargs(*args, **kwargs)
|
||||
return await original_method(*args, **kwargs)
|
||||
else:
|
||||
|
||||
def enhanced_method(*args, **kwargs):
|
||||
args, kwargs = prepare_kwargs(*args, **kwargs)
|
||||
return original_method(*args, **kwargs)
|
||||
@@ -1107,7 +1105,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
def _find_triggered_methods(
|
||||
self, trigger_method: str, router_only: bool
|
||||
) -> List[str]:
|
||||
) -> list[str]:
|
||||
"""
|
||||
Finds all methods that should be triggered based on conditions.
|
||||
|
||||
@@ -1124,7 +1122,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[str]
|
||||
list[str]
|
||||
Names of methods that should be triggered.
|
||||
|
||||
Notes
|
||||
|
||||
@@ -7,7 +7,7 @@ traversal attacks and ensure paths remain within allowed boundaries.
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Union
|
||||
from typing import Union
|
||||
|
||||
|
||||
def safe_path_join(*parts: str, root: Union[str, Path, None] = None) -> str:
|
||||
@@ -101,7 +101,7 @@ def validate_path_exists(path: Union[str, Path], file_type: str = "file") -> str
|
||||
raise ValueError(f"Invalid path: {str(e)}")
|
||||
|
||||
|
||||
def list_files(directory: Union[str, Path], pattern: str = "*") -> List[str]:
|
||||
def list_files(directory: Union[str, Path], pattern: str = "*") -> list[str]:
|
||||
"""
|
||||
Safely list files in a directory matching a pattern.
|
||||
|
||||
@@ -114,7 +114,7 @@ def list_files(directory: Union[str, Path], pattern: str = "*") -> List[str]:
|
||||
|
||||
Returns
|
||||
-------
|
||||
List[str]
|
||||
list[str]
|
||||
List of matching file paths.
|
||||
|
||||
Raises
|
||||
|
||||
@@ -4,7 +4,7 @@ CrewAI Flow Persistence.
|
||||
This module provides interfaces and implementations for persisting flow states.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, TypeVar, Union
|
||||
from typing import Any, TypeVar, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -14,5 +14,5 @@ from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
|
||||
|
||||
__all__ = ["FlowPersistence", "persist", "SQLiteFlowPersistence"]
|
||||
|
||||
StateType = TypeVar('StateType', bound=Union[Dict[str, Any], BaseModel])
|
||||
DictStateType = Dict[str, Any]
|
||||
StateType = TypeVar('StateType', bound=Union[dict[str, Any], BaseModel])
|
||||
DictStateType = dict[str, Any]
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user