mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-04-14 15:02:37 +00:00
Compare commits
23 Commits
devin/1770
...
matcha/opt
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
de8d28909c | ||
|
|
19b9b9da23 | ||
|
|
2327fd04a3 | ||
|
|
0bdc5a093e | ||
|
|
017189db78 | ||
|
|
02d911494f | ||
|
|
8102d0a6ca | ||
|
|
ee374d01de | ||
|
|
9914e51199 | ||
|
|
2dbb83ae31 | ||
|
|
7377e1aa26 | ||
|
|
51754899a2 | ||
|
|
71b4f8402a | ||
|
|
4a4c99d8a2 | ||
|
|
28a6b855a2 | ||
|
|
d09656664d | ||
|
|
49aa29bb41 | ||
|
|
8df499d471 | ||
|
|
84d57c7a24 | ||
|
|
4aedd58829 | ||
|
|
09e9229efc | ||
|
|
18d266c8e7 | ||
|
|
670cdcacaa |
@@ -21,7 +21,6 @@ OPENROUTER_API_KEY=fake-openrouter-key
|
||||
AWS_ACCESS_KEY_ID=fake-aws-access-key
|
||||
AWS_SECRET_ACCESS_KEY=fake-aws-secret-key
|
||||
AWS_DEFAULT_REGION=us-east-1
|
||||
AWS_REGION_NAME=us-east-1
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Azure OpenAI Configuration
|
||||
|
||||
20
.github/workflows/build-uv-cache.yml
vendored
20
.github/workflows/build-uv-cache.yml
vendored
@@ -25,24 +25,12 @@ jobs:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
- name: Install uv and populate cache
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies and populate cache
|
||||
run: |
|
||||
echo "Building global UV cache for Python ${{ matrix.python-version }}..."
|
||||
uv sync --all-groups --all-extras --no-install-project
|
||||
echo "Cache populated successfully"
|
||||
|
||||
- name: Save uv caches
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras --frozen --no-install-project
|
||||
|
||||
26
.github/workflows/linter.yml
vendored
26
.github/workflows/linter.yml
vendored
@@ -18,27 +18,15 @@ jobs:
|
||||
- name: Fetch Target Branch
|
||||
run: git fetch origin $TARGET_BRANCH --depth=1
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py3.11-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: "3.11"
|
||||
enable-cache: false
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras --no-install-project
|
||||
run: uv sync --all-groups --all-extras --frozen --no-install-project
|
||||
|
||||
- name: Get Changed Python Files
|
||||
id: changed-files
|
||||
@@ -57,13 +45,3 @@ jobs:
|
||||
| grep -v 'src/crewai/cli/templates/' \
|
||||
| grep -v '/tests/' \
|
||||
| xargs -I{} uv run ruff check "{}"
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py3.11-${{ hashFiles('uv.lock') }}
|
||||
|
||||
5
.github/workflows/publish.yml
vendored
5
.github/workflows/publish.yml
vendored
@@ -1,8 +1,6 @@
|
||||
name: Publish to PyPI
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
types: [deployment-tests-passed]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
release_tag:
|
||||
@@ -20,11 +18,8 @@ jobs:
|
||||
- name: Determine release tag
|
||||
id: release
|
||||
run: |
|
||||
# Priority: workflow_dispatch input > repository_dispatch payload > default branch
|
||||
if [ -n "${{ inputs.release_tag }}" ]; then
|
||||
echo "tag=${{ inputs.release_tag }}" >> $GITHUB_OUTPUT
|
||||
elif [ -n "${{ github.event.client_payload.release_tag }}" ]; then
|
||||
echo "tag=${{ github.event.client_payload.release_tag }}" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "tag=" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
99
.github/workflows/tests.yml
vendored
99
.github/workflows/tests.yml
vendored
@@ -1,47 +1,42 @@
|
||||
name: Run Tests
|
||||
|
||||
on: [pull_request]
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches: [main]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: tests-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
name: tests (${{ matrix.python-version }})
|
||||
name: tests (${{ matrix.python-version }}, ${{ matrix.group }}/4)
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
timeout-minutes: 10
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
group: [1, 2, 3, 4, 5, 6, 7, 8]
|
||||
group: [1, 2, 3, 4]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for proper diff
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
enable-cache: true
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
run: uv sync --all-groups --all-extras --frozen
|
||||
|
||||
- name: Restore test durations
|
||||
uses: actions/cache/restore@v4
|
||||
@@ -49,52 +44,56 @@ jobs:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}
|
||||
|
||||
- name: Run tests (group ${{ matrix.group }} of 8)
|
||||
- name: Run tests (group ${{ matrix.group }} of 4)
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
DURATION_FILE="../../.test_durations_py${PYTHON_VERSION_SAFE}"
|
||||
|
||||
# Temporarily always skip cached durations to fix test splitting
|
||||
# When durations don't match, pytest-split runs duplicate tests instead of splitting
|
||||
echo "Using even test splitting (duration cache disabled until fix merged)"
|
||||
DURATIONS_ARG=""
|
||||
if [ -f "$DURATION_FILE" ]; then
|
||||
if git diff "origin/${{ github.base_ref }}...HEAD" --name-only 2>/dev/null | grep -q "^lib/.*/tests/.*\.py$"; then
|
||||
echo "::notice::Test files changed — using even splitting"
|
||||
else
|
||||
echo "::notice::Using cached test durations for optimal splitting"
|
||||
DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
||||
fi
|
||||
else
|
||||
echo "::notice::No cached durations — using even splitting"
|
||||
fi
|
||||
|
||||
# Original logic (disabled temporarily):
|
||||
# if [ ! -f "$DURATION_FILE" ]; then
|
||||
# echo "No cached durations found, tests will be split evenly"
|
||||
# DURATIONS_ARG=""
|
||||
# elif git diff origin/${{ github.base_ref }}...HEAD --name-only 2>/dev/null | grep -q "^tests/.*\.py$"; then
|
||||
# echo "Test files have changed, skipping cached durations to avoid mismatches"
|
||||
# DURATIONS_ARG=""
|
||||
# else
|
||||
# echo "No test changes detected, using cached test durations for optimal splitting"
|
||||
# DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
||||
# fi
|
||||
|
||||
cd lib/crewai && uv run pytest \
|
||||
cd lib/crewai && uv run --frozen pytest \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--splits 4 \
|
||||
--group ${{ matrix.group }} \
|
||||
$DURATIONS_ARG \
|
||||
--splitting-algorithm least_duration \
|
||||
--durations=10 \
|
||||
--maxfail=3
|
||||
|
||||
- name: Run tool tests (group ${{ matrix.group }} of 8)
|
||||
- name: Run tool tests (group ${{ matrix.group }} of 4)
|
||||
run: |
|
||||
cd lib/crewai-tools && uv run pytest \
|
||||
cd lib/crewai-tools && uv run --frozen pytest \
|
||||
-vv \
|
||||
--splits 8 \
|
||||
--splits 4 \
|
||||
--group ${{ matrix.group }} \
|
||||
--durations=10 \
|
||||
--maxfail=3
|
||||
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
# Gate jobs matching required status checks in branch protection
|
||||
tests-gate:
|
||||
name: tests (${{ matrix.python-version }})
|
||||
needs: [tests]
|
||||
if: always()
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
steps:
|
||||
- name: Check test results
|
||||
run: |
|
||||
if [ "${{ needs.tests.result }}" = "success" ]; then
|
||||
echo "All tests passed"
|
||||
else
|
||||
echo "Tests failed: ${{ needs.tests.result }}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
18
.github/workflows/trigger-deployment-tests.yml
vendored
18
.github/workflows/trigger-deployment-tests.yml
vendored
@@ -1,18 +0,0 @@
|
||||
name: Trigger Deployment Tests
|
||||
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
trigger:
|
||||
name: Trigger deployment tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Trigger deployment tests
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.CREWAI_DEPLOYMENTS_PAT }}
|
||||
repository: ${{ secrets.CREWAI_DEPLOYMENTS_REPOSITORY }}
|
||||
event-type: crewai-release
|
||||
client-payload: '{"release_tag": "${{ github.event.release.tag_name }}", "release_name": "${{ github.event.release.name }}"}'
|
||||
30
.github/workflows/type-checker.yml
vendored
30
.github/workflows/type-checker.yml
vendored
@@ -20,27 +20,15 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for proper diff
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
enable-cache: true
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-groups --all-extras
|
||||
run: uv sync --all-groups --all-extras --frozen
|
||||
|
||||
- name: Get changed Python files
|
||||
id: changed-files
|
||||
@@ -74,16 +62,6 @@ jobs:
|
||||
if: steps.changed-files.outputs.has_changes == 'false'
|
||||
run: echo "No Python files in src/ were modified - skipping type checks"
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
|
||||
# Summary job to provide single status for branch protection
|
||||
type-checker:
|
||||
name: type-checker
|
||||
@@ -94,8 +72,8 @@ jobs:
|
||||
- name: Check matrix results
|
||||
run: |
|
||||
if [ "${{ needs.type-checker-matrix.result }}" == "success" ] || [ "${{ needs.type-checker-matrix.result }}" == "skipped" ]; then
|
||||
echo "✅ All type checks passed"
|
||||
echo "All type checks passed"
|
||||
else
|
||||
echo "❌ Type checks failed"
|
||||
echo "Type checks failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
34
.github/workflows/update-test-durations.yml
vendored
34
.github/workflows/update-test-durations.yml
vendored
@@ -5,7 +5,9 @@ on:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'tests/**/*.py'
|
||||
- 'lib/crewai/tests/**/*.py'
|
||||
- 'lib/crewai-tools/tests/**/*.py'
|
||||
- 'lib/crewai-files/tests/**/*.py'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -20,37 +22,25 @@ jobs:
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Restore global uv cache
|
||||
id: cache-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
restore-keys: |
|
||||
uv-main-py${{ matrix.python-version }}-
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v6
|
||||
with:
|
||||
version: "0.8.4"
|
||||
python-version: ${{ matrix.python-version }}
|
||||
enable-cache: false
|
||||
enable-cache: true
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
run: uv sync --all-groups --all-extras --frozen
|
||||
|
||||
- name: Run all tests and store durations
|
||||
run: |
|
||||
PYTHON_VERSION_SAFE=$(echo "${{ matrix.python-version }}" | tr '.' '_')
|
||||
uv run pytest --store-durations --durations-path=.test_durations_py${PYTHON_VERSION_SAFE} -n auto
|
||||
uv run --frozen pytest --store-durations --durations-path=.test_durations_py${PYTHON_VERSION_SAFE} -n auto
|
||||
continue-on-error: true
|
||||
|
||||
- name: Save durations to cache
|
||||
@@ -59,13 +49,3 @@ jobs:
|
||||
with:
|
||||
path: .test_durations_py*
|
||||
key: test-durations-py${{ matrix.python-version }}
|
||||
|
||||
- name: Save uv caches
|
||||
if: steps.cache-restore.outputs.cache-hit != 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cache/uv
|
||||
~/.local/share/uv
|
||||
.venv
|
||||
key: uv-main-py${{ matrix.python-version }}-${{ hashFiles('uv.lock') }}
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -27,3 +27,6 @@ conceptual_plan.md
|
||||
build_image
|
||||
chromadb-*.lock
|
||||
.claude
|
||||
.crewai/memory
|
||||
blogs/*
|
||||
secrets/*
|
||||
|
||||
@@ -11,7 +11,11 @@ from typing import Any
|
||||
from dotenv import load_dotenv
|
||||
import pytest
|
||||
from vcr.request import Request # type: ignore[import-untyped]
|
||||
import vcr.stubs.httpx_stubs as httpx_stubs # type: ignore[import-untyped]
|
||||
|
||||
try:
|
||||
import vcr.stubs.httpx_stubs as httpx_stubs # type: ignore[import-untyped]
|
||||
except ModuleNotFoundError:
|
||||
import vcr.stubs.httpcore_stubs as httpx_stubs # type: ignore[import-untyped]
|
||||
|
||||
|
||||
env_test_path = Path(__file__).parent / ".env.test"
|
||||
|
||||
2299
docs/docs.json
2299
docs/docs.json
File diff suppressed because it is too large
Load Diff
@@ -975,6 +975,79 @@ result = streaming.result
|
||||
|
||||
Learn more about streaming in the [Streaming Flow Execution](/en/learn/streaming-flow-execution) guide.
|
||||
|
||||
## Memory in Flows
|
||||
|
||||
Every Flow automatically has access to CrewAI's unified [Memory](/concepts/memory) system. You can store, recall, and extract memories directly inside any flow method using three built-in convenience methods.
|
||||
|
||||
### Built-in Methods
|
||||
|
||||
| Method | Description |
|
||||
| :--- | :--- |
|
||||
| `self.remember(content, **kwargs)` | Store content in memory. Accepts optional `scope`, `categories`, `metadata`, `importance`. |
|
||||
| `self.recall(query, **kwargs)` | Retrieve relevant memories. Accepts optional `scope`, `categories`, `limit`, `depth`. |
|
||||
| `self.extract_memories(content)` | Break raw text into discrete, self-contained memory statements. |
|
||||
|
||||
A default `Memory()` instance is created automatically when the Flow initializes. You can also pass a custom one:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai import Memory
|
||||
|
||||
custom_memory = Memory(
|
||||
recency_weight=0.5,
|
||||
recency_half_life_days=7,
|
||||
embedder={"provider": "ollama", "config": {"model_name": "mxbai-embed-large"}},
|
||||
)
|
||||
|
||||
flow = MyFlow(memory=custom_memory)
|
||||
```
|
||||
|
||||
### Example: Research and Analyze Flow
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
|
||||
|
||||
class ResearchAnalysisFlow(Flow):
|
||||
@start()
|
||||
def gather_data(self):
|
||||
# Simulate research findings
|
||||
findings = (
|
||||
"PostgreSQL handles 10k concurrent connections with connection pooling. "
|
||||
"MySQL caps at around 5k. MongoDB scales horizontally but adds complexity."
|
||||
)
|
||||
|
||||
# Extract atomic facts and remember each one
|
||||
memories = self.extract_memories(findings)
|
||||
for mem in memories:
|
||||
self.remember(mem, scope="/research/databases")
|
||||
|
||||
return findings
|
||||
|
||||
@listen(gather_data)
|
||||
def analyze(self, raw_findings):
|
||||
# Recall relevant past research (from this run or previous runs)
|
||||
past = self.recall("database performance and scaling", limit=10, depth="shallow")
|
||||
|
||||
context_lines = [f"- {m.record.content}" for m in past]
|
||||
context = "\n".join(context_lines) if context_lines else "No prior context."
|
||||
|
||||
return {
|
||||
"new_findings": raw_findings,
|
||||
"prior_context": context,
|
||||
"total_memories": len(past),
|
||||
}
|
||||
|
||||
|
||||
flow = ResearchAnalysisFlow()
|
||||
result = flow.kickoff()
|
||||
print(result)
|
||||
```
|
||||
|
||||
Because memory persists across runs (backed by LanceDB on disk), the `analyze` step will recall findings from previous executions too -- enabling flows that learn and accumulate knowledge over time.
|
||||
|
||||
See the [Memory documentation](/concepts/memory) for details on scopes, slices, composite scoring, embedder configuration, and more.
|
||||
|
||||
### Using the CLI
|
||||
|
||||
Starting from version 0.103.0, you can run flows using the `crewai run` command:
|
||||
|
||||
@@ -470,7 +470,7 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
To get an Express mode API key:
|
||||
- New Google Cloud users: Get an [express mode API key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey)
|
||||
- Existing Google Cloud users: Get a [Google Cloud API key bound to a service account](https://cloud.google.com/docs/authentication/api-keys)
|
||||
|
||||
|
||||
For more details, see the [Vertex AI Express mode documentation](https://docs.cloud.google.com/vertex-ai/generative-ai/docs/start/quickstart?usertype=apikey).
|
||||
</Info>
|
||||
|
||||
@@ -652,6 +652,7 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
# Optional
|
||||
AWS_SESSION_TOKEN=<your-session-token> # For temporary credentials
|
||||
AWS_DEFAULT_REGION=<your-region> # Defaults to us-east-1
|
||||
AWS_REGION_NAME=<your-region> # Alternative configuration for backwards compatibility with LiteLLM. Defaults to us-east-1
|
||||
```
|
||||
|
||||
**Basic Usage:**
|
||||
@@ -695,6 +696,7 @@ In this section, you'll find detailed examples that help you select, configure,
|
||||
- `AWS_SECRET_ACCESS_KEY`: AWS secret key (required)
|
||||
- `AWS_SESSION_TOKEN`: AWS session token for temporary credentials (optional)
|
||||
- `AWS_DEFAULT_REGION`: AWS region (defaults to `us-east-1`)
|
||||
- `AWS_REGION_NAME`: AWS region (defaults to `us-east-1`). Alternative configuration for backwards compatibility with LiteLLM
|
||||
|
||||
**Features:**
|
||||
- Native tool calling support via Converse API
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -38,22 +38,21 @@ CrewAI Enterprise provides a comprehensive Human-in-the-Loop (HITL) management s
|
||||
Configure human review checkpoints within your Flows using the `@human_feedback` decorator. When execution reaches a review point, the system pauses, notifies the assignee via email, and waits for a response.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from crewai.flow.flow import Flow, start, listen, or_
|
||||
from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
|
||||
|
||||
class ContentApprovalFlow(Flow):
|
||||
@start()
|
||||
def generate_content(self):
|
||||
# AI generates content
|
||||
return "Generated marketing copy for Q1 campaign..."
|
||||
|
||||
@listen(generate_content)
|
||||
@human_feedback(
|
||||
message="Please review this content for brand compliance:",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
)
|
||||
def review_content(self, content):
|
||||
return content
|
||||
@listen(or_("generate_content", "needs_revision"))
|
||||
def review_content(self):
|
||||
return "Marketing copy for review..."
|
||||
|
||||
@listen("approved")
|
||||
def publish_content(self, result: HumanFeedbackResult):
|
||||
@@ -62,10 +61,6 @@ class ContentApprovalFlow(Flow):
|
||||
@listen("rejected")
|
||||
def archive_content(self, result: HumanFeedbackResult):
|
||||
print(f"Content rejected. Reason: {result.feedback}")
|
||||
|
||||
@listen("needs_revision")
|
||||
def revise_content(self, result: HumanFeedbackResult):
|
||||
print(f"Revision requested: {result.feedback}")
|
||||
```
|
||||
|
||||
For complete implementation details, see the [Human Feedback in Flows](/en/learn/human-feedback-in-flows) guide.
|
||||
|
||||
@@ -177,6 +177,11 @@ You need to push your crew to a GitHub repository. If you haven't created a crew
|
||||

|
||||
</Frame>
|
||||
|
||||
<Info>
|
||||
Using private Python packages? You'll need to add your registry credentials here too.
|
||||
See [Private Package Registries](/en/enterprise/guides/private-package-registry) for the required variables.
|
||||
</Info>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Deploy Your Crew">
|
||||
|
||||
@@ -256,6 +256,12 @@ Before deployment, ensure you have:
|
||||
1. **LLM API keys** ready (OpenAI, Anthropic, Google, etc.)
|
||||
2. **Tool API keys** if using external tools (Serper, etc.)
|
||||
|
||||
<Info>
|
||||
If your project depends on packages from a **private PyPI registry**, you'll also need to configure
|
||||
registry authentication credentials as environment variables. See the
|
||||
[Private Package Registries](/en/enterprise/guides/private-package-registry) guide for details.
|
||||
</Info>
|
||||
|
||||
<Tip>
|
||||
Test your project locally with the same environment variables before deploying
|
||||
to catch configuration issues early.
|
||||
|
||||
263
docs/en/enterprise/guides/private-package-registry.mdx
Normal file
263
docs/en/enterprise/guides/private-package-registry.mdx
Normal file
@@ -0,0 +1,263 @@
|
||||
---
|
||||
title: "Private Package Registries"
|
||||
description: "Install private Python packages from authenticated PyPI registries in CrewAI AMP"
|
||||
icon: "lock"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
This guide covers how to configure your CrewAI project to install Python packages
|
||||
from private PyPI registries (Azure DevOps Artifacts, GitHub Packages, GitLab, AWS CodeArtifact, etc.)
|
||||
when deploying to CrewAI AMP.
|
||||
</Note>
|
||||
|
||||
## When You Need This
|
||||
|
||||
If your project depends on internal or proprietary Python packages hosted on a private registry
|
||||
rather than the public PyPI, you'll need to:
|
||||
|
||||
1. Tell UV **where** to find the package (an index URL)
|
||||
2. Tell UV **which** packages come from that index (a source mapping)
|
||||
3. Provide **credentials** so UV can authenticate during install
|
||||
|
||||
CrewAI AMP uses [UV](https://docs.astral.sh/uv/) for dependency resolution and installation.
|
||||
UV supports authenticated private registries through `pyproject.toml` configuration combined
|
||||
with environment variables for credentials.
|
||||
|
||||
## Step 1: Configure pyproject.toml
|
||||
|
||||
Three pieces work together in your `pyproject.toml`:
|
||||
|
||||
### 1a. Declare the dependency
|
||||
|
||||
Add the private package to your `[project.dependencies]` like any other dependency:
|
||||
|
||||
```toml
|
||||
[project]
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
```
|
||||
|
||||
### 1b. Define the index
|
||||
|
||||
Register your private registry as a named index under `[[tool.uv.index]]`:
|
||||
|
||||
```toml
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
```
|
||||
|
||||
<Info>
|
||||
The `name` field is important — UV uses it to construct the environment variable names
|
||||
for authentication (see [Step 2](#step-2-set-authentication-credentials) below).
|
||||
|
||||
Setting `explicit = true` means UV won't search this index for every package — only the
|
||||
ones you explicitly map to it in `[tool.uv.sources]`. This avoids unnecessary queries
|
||||
against your private registry and protects against dependency confusion attacks.
|
||||
</Info>
|
||||
|
||||
### 1c. Map the package to the index
|
||||
|
||||
Tell UV which packages should be resolved from your private index using `[tool.uv.sources]`:
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
### Complete example
|
||||
|
||||
```toml
|
||||
[project]
|
||||
name = "my-crew-project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
After updating `pyproject.toml`, regenerate your lock file:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
```
|
||||
|
||||
<Warning>
|
||||
Always commit the updated `uv.lock` along with your `pyproject.toml` changes.
|
||||
The lock file is required for deployment — see [Prepare for Deployment](/en/enterprise/guides/prepare-for-deployment).
|
||||
</Warning>
|
||||
|
||||
## Step 2: Set Authentication Credentials
|
||||
|
||||
UV authenticates against private indexes using environment variables that follow a naming convention
|
||||
based on the index name you defined in `pyproject.toml`:
|
||||
|
||||
```
|
||||
UV_INDEX_{UPPER_NAME}_USERNAME
|
||||
UV_INDEX_{UPPER_NAME}_PASSWORD
|
||||
```
|
||||
|
||||
Where `{UPPER_NAME}` is your index name converted to **uppercase** with **hyphens replaced by underscores**.
|
||||
|
||||
For example, an index named `my-private-registry` uses:
|
||||
|
||||
| Variable | Value |
|
||||
|----------|-------|
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME` | Your registry username or token name |
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD` | Your registry password or token/PAT |
|
||||
|
||||
<Warning>
|
||||
These environment variables **must** be added via the CrewAI AMP **Environment Variables** settings —
|
||||
either globally or at the deployment level. They cannot be set in `.env` files or hardcoded in your project.
|
||||
|
||||
See [Setting Environment Variables in AMP](#setting-environment-variables-in-amp) below.
|
||||
</Warning>
|
||||
|
||||
## Registry Provider Reference
|
||||
|
||||
The table below shows the index URL format and credential values for common registry providers.
|
||||
Replace placeholder values with your actual organization and feed details.
|
||||
|
||||
| Provider | Index URL | Username | Password |
|
||||
|----------|-----------|----------|----------|
|
||||
| **Azure DevOps Artifacts** | `https://pkgs.dev.azure.com/{org}/_packaging/{feed}/pypi/simple/` | Any non-empty string (e.g. `token`) | Personal Access Token (PAT) with Packaging Read scope |
|
||||
| **GitHub Packages** | `https://pypi.pkg.github.com/{owner}/simple/` | GitHub username | Personal Access Token (classic) with `read:packages` scope |
|
||||
| **GitLab Package Registry** | `https://gitlab.com/api/v4/projects/{project_id}/packages/pypi/simple/` | `__token__` | Project or Personal Access Token with `read_api` scope |
|
||||
| **AWS CodeArtifact** | Use the URL from `aws codeartifact get-repository-endpoint` | `aws` | Token from `aws codeartifact get-authorization-token` |
|
||||
| **Google Artifact Registry** | `https://{region}-python.pkg.dev/{project}/{repo}/simple/` | `_json_key_base64` | Base64-encoded service account key |
|
||||
| **JFrog Artifactory** | `https://{instance}.jfrog.io/artifactory/api/pypi/{repo}/simple/` | Username or email | API key or identity token |
|
||||
| **Self-hosted (devpi, Nexus, etc.)** | Your registry's simple API URL | Registry username | Registry password |
|
||||
|
||||
<Tip>
|
||||
For **AWS CodeArtifact**, the authorization token expires periodically.
|
||||
You'll need to refresh the `UV_INDEX_*_PASSWORD` value when it expires.
|
||||
Consider automating this in your CI/CD pipeline.
|
||||
</Tip>
|
||||
|
||||
## Setting Environment Variables in AMP
|
||||
|
||||
Private registry credentials must be configured as environment variables in CrewAI AMP.
|
||||
You have two options:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Web Interface">
|
||||
1. Log in to [CrewAI AMP](https://app.crewai.com)
|
||||
2. Navigate to your automation
|
||||
3. Open the **Environment Variables** tab
|
||||
4. Add each variable (`UV_INDEX_*_USERNAME` and `UV_INDEX_*_PASSWORD`) with its value
|
||||
|
||||
See the [Deploy to AMP — Set Environment Variables](/en/enterprise/guides/deploy-to-amp#set-environment-variables) step for details.
|
||||
</Tab>
|
||||
<Tab title="CLI Deployment">
|
||||
Add the variables to your local `.env` file before running `crewai deploy create`.
|
||||
The CLI will securely transfer them to the platform:
|
||||
|
||||
```bash
|
||||
# .env
|
||||
OPENAI_API_KEY=sk-...
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat-here
|
||||
```
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
**Never** commit credentials to your repository. Use AMP environment variables for all secrets.
|
||||
The `.env` file should be listed in `.gitignore`.
|
||||
</Warning>
|
||||
|
||||
To update credentials on an existing deployment, see [Update Your Crew — Environment Variables](/en/enterprise/guides/update-crew).
|
||||
|
||||
## How It All Fits Together
|
||||
|
||||
When CrewAI AMP builds your automation, the resolution flow works like this:
|
||||
|
||||
<Steps>
|
||||
<Step title="Build starts">
|
||||
AMP pulls your repository and reads `pyproject.toml` and `uv.lock`.
|
||||
</Step>
|
||||
<Step title="UV resolves dependencies">
|
||||
UV reads `[tool.uv.sources]` to determine which index each package should come from.
|
||||
</Step>
|
||||
<Step title="UV authenticates">
|
||||
For each private index, UV looks up `UV_INDEX_{NAME}_USERNAME` and `UV_INDEX_{NAME}_PASSWORD`
|
||||
from the environment variables you configured in AMP.
|
||||
</Step>
|
||||
<Step title="Packages install">
|
||||
UV downloads and installs all packages — both public (from PyPI) and private (from your registry).
|
||||
</Step>
|
||||
<Step title="Automation runs">
|
||||
Your crew or flow starts with all dependencies available.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Authentication Errors During Build
|
||||
|
||||
**Symptom**: Build fails with `401 Unauthorized` or `403 Forbidden` when resolving a private package.
|
||||
|
||||
**Check**:
|
||||
- The `UV_INDEX_*` environment variable names match your index name exactly (uppercased, hyphens → underscores)
|
||||
- Credentials are set in AMP environment variables, not just in a local `.env`
|
||||
- Your token/PAT has the required read permissions for the package feed
|
||||
- The token hasn't expired (especially relevant for AWS CodeArtifact)
|
||||
|
||||
### Package Not Found
|
||||
|
||||
**Symptom**: `No matching distribution found for my-private-package`.
|
||||
|
||||
**Check**:
|
||||
- The index URL in `pyproject.toml` ends with `/simple/`
|
||||
- The `[tool.uv.sources]` entry maps the correct package name to the correct index name
|
||||
- The package is actually published to your private registry
|
||||
- Run `uv lock` locally with the same credentials to verify resolution works
|
||||
|
||||
### Lock File Conflicts
|
||||
|
||||
**Symptom**: `uv lock` fails or produces unexpected results after adding a private index.
|
||||
|
||||
**Solution**: Set the credentials locally and regenerate:
|
||||
|
||||
```bash
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat
|
||||
uv lock
|
||||
```
|
||||
|
||||
Then commit the updated `uv.lock`.
|
||||
|
||||
## Related Guides
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Prepare for Deployment" icon="clipboard-check" href="/en/enterprise/guides/prepare-for-deployment">
|
||||
Verify project structure and dependencies before deploying.
|
||||
</Card>
|
||||
<Card title="Deploy to AMP" icon="rocket" href="/en/enterprise/guides/deploy-to-amp">
|
||||
Deploy your crew or flow and configure environment variables.
|
||||
</Card>
|
||||
<Card title="Update Your Crew" icon="arrows-rotate" href="/en/enterprise/guides/update-crew">
|
||||
Update environment variables and push changes to a running deployment.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -73,6 +73,8 @@ When this flow runs, it will:
|
||||
| `default_outcome` | `str` | No | Outcome to use if no feedback provided. Must be in `emit` |
|
||||
| `metadata` | `dict` | No | Additional data for enterprise integrations |
|
||||
| `provider` | `HumanFeedbackProvider` | No | Custom provider for async/non-blocking feedback. See [Async Human Feedback](#async-human-feedback-non-blocking) |
|
||||
| `learn` | `bool` | No | Enable HITL learning: distill lessons from feedback and pre-review future output. Default `False`. See [Learning from Feedback](#learning-from-feedback) |
|
||||
| `learn_limit` | `int` | No | Max past lessons to recall for pre-review. Default `5` |
|
||||
|
||||
### Basic Usage (No Routing)
|
||||
|
||||
@@ -96,33 +98,43 @@ def handle_feedback(self, result):
|
||||
When you specify `emit`, the decorator becomes a router. The human's free-form feedback is interpreted by an LLM and collapsed into one of the specified outcomes:
|
||||
|
||||
```python Code
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="Do you approve this content for publication?",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
)
|
||||
def review_content(self):
|
||||
return "Draft blog post content here..."
|
||||
from crewai.flow.flow import Flow, start, listen, or_
|
||||
from crewai.flow.human_feedback import human_feedback
|
||||
|
||||
@listen("approved")
|
||||
def publish(self, result):
|
||||
print(f"Publishing! User said: {result.feedback}")
|
||||
class ReviewFlow(Flow):
|
||||
@start()
|
||||
def generate_content(self):
|
||||
return "Draft blog post content here..."
|
||||
|
||||
@listen("rejected")
|
||||
def discard(self, result):
|
||||
print(f"Discarding. Reason: {result.feedback}")
|
||||
@human_feedback(
|
||||
message="Do you approve this content for publication?",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
)
|
||||
@listen(or_("generate_content", "needs_revision"))
|
||||
def review_content(self):
|
||||
return "Draft blog post content here..."
|
||||
|
||||
@listen("needs_revision")
|
||||
def revise(self, result):
|
||||
print(f"Revising based on: {result.feedback}")
|
||||
@listen("approved")
|
||||
def publish(self, result):
|
||||
print(f"Publishing! User said: {result.feedback}")
|
||||
|
||||
@listen("rejected")
|
||||
def discard(self, result):
|
||||
print(f"Discarding. Reason: {result.feedback}")
|
||||
```
|
||||
|
||||
When the human says something like "needs more detail", the LLM collapses that to `"needs_revision"`, which triggers `review_content` again via `or_()` — creating a revision loop. The loop continues until the outcome is `"approved"` or `"rejected"`.
|
||||
|
||||
<Tip>
|
||||
The LLM uses structured outputs (function calling) when available to guarantee the response is one of your specified outcomes. This makes routing reliable and predictable.
|
||||
</Tip>
|
||||
|
||||
<Warning>
|
||||
A `@start()` method only runs once at the beginning of the flow. If you need a revision loop, separate the start method from the review method and use `@listen(or_("trigger", "revision_outcome"))` on the review method to enable the self-loop.
|
||||
</Warning>
|
||||
|
||||
## HumanFeedbackResult
|
||||
|
||||
The `HumanFeedbackResult` dataclass contains all information about a human feedback interaction:
|
||||
@@ -186,127 +198,183 @@ Each `HumanFeedbackResult` is appended to `human_feedback_history`, so multiple
|
||||
|
||||
## Complete Example: Content Approval Workflow
|
||||
|
||||
Here's a full example implementing a content review and approval workflow:
|
||||
Here's a full example implementing a content review and approval workflow with a revision loop:
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from crewai.flow.flow import Flow, start, listen, or_
|
||||
from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ContentState(BaseModel):
|
||||
topic: str = ""
|
||||
draft: str = ""
|
||||
final_content: str = ""
|
||||
revision_count: int = 0
|
||||
status: str = "pending"
|
||||
|
||||
|
||||
class ContentApprovalFlow(Flow[ContentState]):
|
||||
"""A flow that generates content and gets human approval."""
|
||||
"""A flow that generates content and loops until the human approves."""
|
||||
|
||||
@start()
|
||||
def get_topic(self):
|
||||
self.state.topic = input("What topic should I write about? ")
|
||||
return self.state.topic
|
||||
|
||||
@listen(get_topic)
|
||||
def generate_draft(self, topic):
|
||||
# In real use, this would call an LLM
|
||||
self.state.draft = f"# {topic}\n\nThis is a draft about {topic}..."
|
||||
def generate_draft(self):
|
||||
self.state.draft = "# AI Safety\n\nThis is a draft about AI Safety..."
|
||||
return self.state.draft
|
||||
|
||||
@listen(generate_draft)
|
||||
@human_feedback(
|
||||
message="Please review this draft. Reply 'approved', 'rejected', or provide revision feedback:",
|
||||
message="Please review this draft. Approve, reject, or describe what needs changing:",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
)
|
||||
def review_draft(self, draft):
|
||||
return draft
|
||||
@listen(or_("generate_draft", "needs_revision"))
|
||||
def review_draft(self):
|
||||
self.state.revision_count += 1
|
||||
return f"{self.state.draft} (v{self.state.revision_count})"
|
||||
|
||||
@listen("approved")
|
||||
def publish_content(self, result: HumanFeedbackResult):
|
||||
self.state.final_content = result.output
|
||||
print("\n✅ Content approved and published!")
|
||||
print(f"Reviewer comment: {result.feedback}")
|
||||
self.state.status = "published"
|
||||
print(f"Content approved and published! Reviewer said: {result.feedback}")
|
||||
return "published"
|
||||
|
||||
@listen("rejected")
|
||||
def handle_rejection(self, result: HumanFeedbackResult):
|
||||
print("\n❌ Content rejected")
|
||||
print(f"Reason: {result.feedback}")
|
||||
self.state.status = "rejected"
|
||||
print(f"Content rejected. Reason: {result.feedback}")
|
||||
return "rejected"
|
||||
|
||||
@listen("needs_revision")
|
||||
def revise_content(self, result: HumanFeedbackResult):
|
||||
self.state.revision_count += 1
|
||||
print(f"\n📝 Revision #{self.state.revision_count} requested")
|
||||
print(f"Feedback: {result.feedback}")
|
||||
|
||||
# In a real flow, you might loop back to generate_draft
|
||||
# For this example, we just acknowledge
|
||||
return "revision_requested"
|
||||
|
||||
|
||||
# Run the flow
|
||||
flow = ContentApprovalFlow()
|
||||
result = flow.kickoff()
|
||||
print(f"\nFlow completed. Revisions requested: {flow.state.revision_count}")
|
||||
print(f"\nFlow completed. Status: {flow.state.status}, Reviews: {flow.state.revision_count}")
|
||||
```
|
||||
|
||||
```text Output
|
||||
What topic should I write about? AI Safety
|
||||
==================================================
|
||||
OUTPUT FOR REVIEW:
|
||||
==================================================
|
||||
# AI Safety
|
||||
|
||||
This is a draft about AI Safety... (v1)
|
||||
==================================================
|
||||
|
||||
Please review this draft. Approve, reject, or describe what needs changing:
|
||||
(Press Enter to skip, or type your feedback)
|
||||
|
||||
Your feedback: Needs more detail on alignment research
|
||||
|
||||
==================================================
|
||||
OUTPUT FOR REVIEW:
|
||||
==================================================
|
||||
# AI Safety
|
||||
|
||||
This is a draft about AI Safety...
|
||||
This is a draft about AI Safety... (v2)
|
||||
==================================================
|
||||
|
||||
Please review this draft. Reply 'approved', 'rejected', or provide revision feedback:
|
||||
Please review this draft. Approve, reject, or describe what needs changing:
|
||||
(Press Enter to skip, or type your feedback)
|
||||
|
||||
Your feedback: Looks good, approved!
|
||||
|
||||
✅ Content approved and published!
|
||||
Reviewer comment: Looks good, approved!
|
||||
Content approved and published! Reviewer said: Looks good, approved!
|
||||
|
||||
Flow completed. Revisions requested: 0
|
||||
Flow completed. Status: published, Reviews: 2
|
||||
```
|
||||
|
||||
</CodeGroup>
|
||||
|
||||
The key pattern is `@listen(or_("generate_draft", "needs_revision"))` — the review method listens to both the initial trigger and its own revision outcome, creating a self-loop that repeats until the human approves or rejects.
|
||||
|
||||
## Combining with Other Decorators
|
||||
|
||||
The `@human_feedback` decorator works with other flow decorators. Place it as the innermost decorator (closest to the function):
|
||||
The `@human_feedback` decorator works with `@start()`, `@listen()`, and `or_()`. Both decorator orderings work — the framework propagates attributes in both directions — but the recommended patterns are:
|
||||
|
||||
```python Code
|
||||
# Correct: @human_feedback is innermost (closest to the function)
|
||||
# One-shot review at the start of a flow (no self-loop)
|
||||
@start()
|
||||
@human_feedback(message="Review this:")
|
||||
@human_feedback(message="Review this:", emit=["approved", "rejected"], llm="gpt-4o-mini")
|
||||
def my_start_method(self):
|
||||
return "content"
|
||||
|
||||
# Linear review on a listener (no self-loop)
|
||||
@listen(other_method)
|
||||
@human_feedback(message="Review this too:")
|
||||
@human_feedback(message="Review this too:", emit=["good", "bad"], llm="gpt-4o-mini")
|
||||
def my_listener(self, data):
|
||||
return f"processed: {data}"
|
||||
|
||||
# Self-loop: review that can loop back for revisions
|
||||
@human_feedback(message="Approve or revise?", emit=["approved", "revise"], llm="gpt-4o-mini")
|
||||
@listen(or_("upstream_method", "revise"))
|
||||
def review_with_loop(self):
|
||||
return "content for review"
|
||||
```
|
||||
|
||||
<Tip>
|
||||
Place `@human_feedback` as the innermost decorator (last/closest to the function) so it wraps the method directly and can capture the return value before passing to the flow system.
|
||||
</Tip>
|
||||
### Self-loop pattern
|
||||
|
||||
To create a revision loop, the review method must listen to **both** an upstream trigger and its own revision outcome using `or_()`:
|
||||
|
||||
```python Code
|
||||
@start()
|
||||
def generate(self):
|
||||
return "initial draft"
|
||||
|
||||
@human_feedback(
|
||||
message="Approve or request changes?",
|
||||
emit=["revise", "approved"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="approved",
|
||||
)
|
||||
@listen(or_("generate", "revise"))
|
||||
def review(self):
|
||||
return "content"
|
||||
|
||||
@listen("approved")
|
||||
def publish(self):
|
||||
return "published"
|
||||
```
|
||||
|
||||
When the outcome is `"revise"`, the flow routes back to `review` (because it listens to `"revise"` via `or_()`). When the outcome is `"approved"`, the flow continues to `publish`. This works because the flow engine exempts routers from the "fire once" rule, allowing them to re-execute on each loop iteration.
|
||||
|
||||
### Chained routers
|
||||
|
||||
A listener triggered by one router's outcome can itself be a router:
|
||||
|
||||
```python Code
|
||||
@start()
|
||||
def generate(self):
|
||||
return "draft content"
|
||||
|
||||
@human_feedback(message="First review:", emit=["approved", "rejected"], llm="gpt-4o-mini")
|
||||
@listen("generate")
|
||||
def first_review(self):
|
||||
return "draft content"
|
||||
|
||||
@human_feedback(message="Final review:", emit=["publish", "hold"], llm="gpt-4o-mini")
|
||||
@listen("approved")
|
||||
def final_review(self, prev):
|
||||
return "final content"
|
||||
|
||||
@listen("publish")
|
||||
def on_publish(self, prev):
|
||||
return "published"
|
||||
|
||||
@listen("hold")
|
||||
def on_hold(self, prev):
|
||||
return "held for later"
|
||||
```
|
||||
|
||||
### Limitations
|
||||
|
||||
- **`@start()` methods run once**: A `@start()` method cannot self-loop. If you need a revision cycle, use a separate `@start()` method as the entry point and put the `@human_feedback` on a `@listen()` method.
|
||||
- **No `@start()` + `@listen()` on the same method**: This is a Flow framework constraint. A method is either a start point or a listener, not both.
|
||||
|
||||
## Best Practices
|
||||
|
||||
### 1. Write Clear Request Messages
|
||||
|
||||
The `request` parameter is what the human sees. Make it actionable:
|
||||
The `message` parameter is what the human sees. Make it actionable:
|
||||
|
||||
```python Code
|
||||
# ✅ Good - clear and actionable
|
||||
@@ -514,9 +582,9 @@ class ContentPipeline(Flow):
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="Approve this content for publication?",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
emit=["approved", "rejected"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
default_outcome="rejected",
|
||||
provider=SlackNotificationProvider("#content-reviews"),
|
||||
)
|
||||
def generate_content(self):
|
||||
@@ -532,11 +600,6 @@ class ContentPipeline(Flow):
|
||||
print(f"Archived. Reason: {result.feedback}")
|
||||
return {"status": "archived"}
|
||||
|
||||
@listen("needs_revision")
|
||||
def queue_revision(self, result):
|
||||
print(f"Queued for revision: {result.feedback}")
|
||||
return {"status": "revision_needed"}
|
||||
|
||||
|
||||
# Starting the flow (will pause and wait for Slack response)
|
||||
def start_content_pipeline():
|
||||
@@ -576,6 +639,64 @@ If you're using an async web framework (FastAPI, aiohttp, Slack Bolt async mode)
|
||||
5. **Automatic persistence**: State is automatically saved when `HumanFeedbackPending` is raised and uses `SQLiteFlowPersistence` by default
|
||||
6. **Custom persistence**: Pass a custom persistence instance to `from_pending()` if needed
|
||||
|
||||
## Learning from Feedback
|
||||
|
||||
The `learn=True` parameter enables a feedback loop between human reviewers and the memory system. When enabled, the system progressively improves its outputs by learning from past human corrections.
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **After feedback**: The LLM extracts generalizable lessons from the output + feedback and stores them in memory with `source="hitl"`. If the feedback is just approval (e.g. "looks good"), nothing is stored.
|
||||
2. **Before next review**: Past HITL lessons are recalled from memory and applied by the LLM to improve the output before the human sees it.
|
||||
|
||||
Over time, the human sees progressively better pre-reviewed output because each correction informs future reviews.
|
||||
|
||||
### Example
|
||||
|
||||
```python Code
|
||||
class ArticleReviewFlow(Flow):
|
||||
@start()
|
||||
def generate_article(self):
|
||||
return self.crew.kickoff(inputs={"topic": "AI Safety"}).raw
|
||||
|
||||
@human_feedback(
|
||||
message="Review this article draft:",
|
||||
emit=["approved", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
learn=True, # enable HITL learning
|
||||
)
|
||||
@listen(or_("generate_article", "needs_revision"))
|
||||
def review_article(self):
|
||||
return self.last_human_feedback.output if self.last_human_feedback else "article draft"
|
||||
|
||||
@listen("approved")
|
||||
def publish(self):
|
||||
print(f"Publishing: {self.last_human_feedback.output}")
|
||||
```
|
||||
|
||||
**First run**: The human sees the raw output and says "Always include citations for factual claims." The lesson is distilled and stored in memory.
|
||||
|
||||
**Second run**: The system recalls the citation lesson, pre-reviews the output to add citations, then shows the improved version. The human's job shifts from "fix everything" to "catch what the system missed."
|
||||
|
||||
### Configuration
|
||||
|
||||
| Parameter | Default | Description |
|
||||
|-----------|---------|-------------|
|
||||
| `learn` | `False` | Enable HITL learning |
|
||||
| `learn_limit` | `5` | Max past lessons to recall for pre-review |
|
||||
|
||||
### Key Design Decisions
|
||||
|
||||
- **Same LLM for everything**: The `llm` parameter on the decorator is shared by outcome collapsing, lesson distillation, and pre-review. No need to configure multiple models.
|
||||
- **Structured output**: Both distillation and pre-review use function calling with Pydantic models when the LLM supports it, falling back to text parsing otherwise.
|
||||
- **Non-blocking storage**: Lessons are stored via `remember_many()` which runs in a background thread -- the flow continues immediately.
|
||||
- **Graceful degradation**: If the LLM fails during distillation, nothing is stored. If it fails during pre-review, the raw output is shown. Neither failure blocks the flow.
|
||||
- **No scope/categories needed**: When storing lessons, only `source` is passed. The encoding pipeline infers scope, categories, and importance automatically.
|
||||
|
||||
<Note>
|
||||
`learn=True` requires the Flow to have memory available. Flows get memory automatically by default, but if you've disabled it with `_skip_auto_memory`, HITL learning will be silently skipped.
|
||||
</Note>
|
||||
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Flows Overview](/en/concepts/flows) - Learn about CrewAI Flows
|
||||
@@ -583,3 +704,4 @@ If you're using an async web framework (FastAPI, aiohttp, Slack Bolt async mode)
|
||||
- [Flow Persistence](/en/concepts/flows#persistence) - Persisting flow state
|
||||
- [Routing with @router](/en/concepts/flows#router) - More about conditional routing
|
||||
- [Human Input on Execution](/en/learn/human-input-on-execution) - Task-level human input
|
||||
- [Memory](/en/concepts/memory) - The unified memory system used by HITL learning
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -38,22 +38,21 @@ CrewAI Enterprise는 AI 워크플로우를 협업적인 인간-AI 프로세스
|
||||
`@human_feedback` 데코레이터를 사용하여 Flow 내에 인간 검토 체크포인트를 구성합니다. 실행이 검토 포인트에 도달하면 시스템이 일시 중지되고, 담당자에게 이메일로 알리며, 응답을 기다립니다.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from crewai.flow.flow import Flow, start, listen, or_
|
||||
from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
|
||||
|
||||
class ContentApprovalFlow(Flow):
|
||||
@start()
|
||||
def generate_content(self):
|
||||
# AI가 콘텐츠 생성
|
||||
return "Q1 캠페인용 마케팅 카피 생성..."
|
||||
|
||||
@listen(generate_content)
|
||||
@human_feedback(
|
||||
message="브랜드 준수를 위해 이 콘텐츠를 검토해 주세요:",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
)
|
||||
def review_content(self, content):
|
||||
return content
|
||||
@listen(or_("generate_content", "needs_revision"))
|
||||
def review_content(self):
|
||||
return "검토용 마케팅 카피..."
|
||||
|
||||
@listen("approved")
|
||||
def publish_content(self, result: HumanFeedbackResult):
|
||||
@@ -62,10 +61,6 @@ class ContentApprovalFlow(Flow):
|
||||
@listen("rejected")
|
||||
def archive_content(self, result: HumanFeedbackResult):
|
||||
print(f"콘텐츠 거부됨. 사유: {result.feedback}")
|
||||
|
||||
@listen("needs_revision")
|
||||
def revise_content(self, result: HumanFeedbackResult):
|
||||
print(f"수정 요청: {result.feedback}")
|
||||
```
|
||||
|
||||
완전한 구현 세부 사항은 [Flow에서 인간 피드백](/ko/learn/human-feedback-in-flows) 가이드를 참조하세요.
|
||||
|
||||
@@ -176,6 +176,11 @@ Crew를 GitHub 저장소에 푸시해야 합니다. 아직 Crew를 만들지 않
|
||||

|
||||
</Frame>
|
||||
|
||||
<Info>
|
||||
프라이빗 Python 패키지를 사용하시나요? 여기에 레지스트리 자격 증명도 추가해야 합니다.
|
||||
필요한 변수는 [프라이빗 패키지 레지스트리](/ko/enterprise/guides/private-package-registry)를 참조하세요.
|
||||
</Info>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Crew 배포하기">
|
||||
|
||||
@@ -256,6 +256,12 @@ Crews와 Flows 모두 `src/project_name/main.py`에 진입점이 있습니다:
|
||||
1. **LLM API 키** (OpenAI, Anthropic, Google 등)
|
||||
2. **도구 API 키** - 외부 도구를 사용하는 경우 (Serper 등)
|
||||
|
||||
<Info>
|
||||
프로젝트가 **프라이빗 PyPI 레지스트리**의 패키지에 의존하는 경우, 레지스트리 인증 자격 증명도
|
||||
환경 변수로 구성해야 합니다. 자세한 내용은
|
||||
[프라이빗 패키지 레지스트리](/ko/enterprise/guides/private-package-registry) 가이드를 참조하세요.
|
||||
</Info>
|
||||
|
||||
<Tip>
|
||||
구성 문제를 조기에 발견하기 위해 배포 전에 동일한 환경 변수로
|
||||
로컬에서 프로젝트를 테스트하세요.
|
||||
|
||||
261
docs/ko/enterprise/guides/private-package-registry.mdx
Normal file
261
docs/ko/enterprise/guides/private-package-registry.mdx
Normal file
@@ -0,0 +1,261 @@
|
||||
---
|
||||
title: "프라이빗 패키지 레지스트리"
|
||||
description: "CrewAI AMP에서 인증된 PyPI 레지스트리의 프라이빗 Python 패키지 설치하기"
|
||||
icon: "lock"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
이 가이드는 CrewAI AMP에 배포할 때 프라이빗 PyPI 레지스트리(Azure DevOps Artifacts, GitHub Packages,
|
||||
GitLab, AWS CodeArtifact 등)에서 Python 패키지를 설치하도록 CrewAI 프로젝트를 구성하는 방법을 다룹니다.
|
||||
</Note>
|
||||
|
||||
## 이 가이드가 필요한 경우
|
||||
|
||||
프로젝트가 공개 PyPI가 아닌 프라이빗 레지스트리에 호스팅된 내부 또는 독점 Python 패키지에
|
||||
의존하는 경우, 다음을 수행해야 합니다:
|
||||
|
||||
1. UV에 패키지를 **어디서** 찾을지 알려줍니다 (index URL)
|
||||
2. UV에 **어떤** 패키지가 해당 index에서 오는지 알려줍니다 (source 매핑)
|
||||
3. UV가 설치 중에 인증할 수 있도록 **자격 증명**을 제공합니다
|
||||
|
||||
CrewAI AMP는 의존성 해결 및 설치에 [UV](https://docs.astral.sh/uv/)를 사용합니다.
|
||||
UV는 `pyproject.toml` 구성과 자격 증명용 환경 변수를 결합하여 인증된 프라이빗 레지스트리를 지원합니다.
|
||||
|
||||
## 1단계: pyproject.toml 구성
|
||||
|
||||
`pyproject.toml`에서 세 가지 요소가 함께 작동합니다:
|
||||
|
||||
### 1a. 의존성 선언
|
||||
|
||||
프라이빗 패키지를 다른 의존성과 마찬가지로 `[project.dependencies]`에 추가합니다:
|
||||
|
||||
```toml
|
||||
[project]
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
```
|
||||
|
||||
### 1b. index 정의
|
||||
|
||||
프라이빗 레지스트리를 `[[tool.uv.index]]` 아래에 명명된 index로 등록합니다:
|
||||
|
||||
```toml
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
```
|
||||
|
||||
<Info>
|
||||
`name` 필드는 중요합니다 — UV는 이를 사용하여 인증을 위한 환경 변수 이름을
|
||||
구성합니다 (아래 [2단계](#2단계-인증-자격-증명-설정)를 참조하세요).
|
||||
|
||||
`explicit = true`를 설정하면 UV가 모든 패키지에 대해 이 index를 검색하지 않습니다 —
|
||||
`[tool.uv.sources]`에서 명시적으로 매핑한 패키지만 검색합니다. 이렇게 하면 프라이빗
|
||||
레지스트리에 대한 불필요한 쿼리를 방지하고 의존성 혼동 공격을 차단할 수 있습니다.
|
||||
</Info>
|
||||
|
||||
### 1c. 패키지를 index에 매핑
|
||||
|
||||
`[tool.uv.sources]`를 사용하여 프라이빗 index에서 해결해야 할 패키지를 UV에 알려줍니다:
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
### 전체 예시
|
||||
|
||||
```toml
|
||||
[project]
|
||||
name = "my-crew-project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
`pyproject.toml`을 업데이트한 후 lock 파일을 다시 생성합니다:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
```
|
||||
|
||||
<Warning>
|
||||
업데이트된 `uv.lock`을 항상 `pyproject.toml` 변경 사항과 함께 커밋하세요.
|
||||
lock 파일은 배포에 필수입니다 — [배포 준비하기](/ko/enterprise/guides/prepare-for-deployment)를 참조하세요.
|
||||
</Warning>
|
||||
|
||||
## 2단계: 인증 자격 증명 설정
|
||||
|
||||
UV는 `pyproject.toml`에서 정의한 index 이름을 기반으로 한 명명 규칙을 따르는
|
||||
환경 변수를 사용하여 프라이빗 index에 인증합니다:
|
||||
|
||||
```
|
||||
UV_INDEX_{UPPER_NAME}_USERNAME
|
||||
UV_INDEX_{UPPER_NAME}_PASSWORD
|
||||
```
|
||||
|
||||
여기서 `{UPPER_NAME}`은 index 이름을 **대문자**로 변환하고 **하이픈을 언더스코어로 대체**한 것입니다.
|
||||
|
||||
예를 들어, `my-private-registry`라는 이름의 index는 다음을 사용합니다:
|
||||
|
||||
| 변수 | 값 |
|
||||
|------|-----|
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME` | 레지스트리 사용자 이름 또는 토큰 이름 |
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD` | 레지스트리 비밀번호 또는 토큰/PAT |
|
||||
|
||||
<Warning>
|
||||
이 환경 변수는 CrewAI AMP **환경 변수** 설정을 통해 **반드시** 추가해야 합니다 —
|
||||
전역적으로 또는 배포 수준에서. `.env` 파일에 설정하거나 프로젝트에 하드코딩할 수 없습니다.
|
||||
|
||||
아래 [AMP에서 환경 변수 설정](#amp에서-환경-변수-설정)을 참조하세요.
|
||||
</Warning>
|
||||
|
||||
## 레지스트리 제공업체 참조
|
||||
|
||||
아래 표는 일반적인 레지스트리 제공업체의 index URL 형식과 자격 증명 값을 보여줍니다.
|
||||
자리 표시자 값을 실제 조직 및 피드 세부 정보로 대체하세요.
|
||||
|
||||
| 제공업체 | Index URL | 사용자 이름 | 비밀번호 |
|
||||
|---------|-----------|-----------|---------|
|
||||
| **Azure DevOps Artifacts** | `https://pkgs.dev.azure.com/{org}/_packaging/{feed}/pypi/simple/` | 비어 있지 않은 임의의 문자열 (예: `token`) | Packaging Read 범위의 Personal Access Token (PAT) |
|
||||
| **GitHub Packages** | `https://pypi.pkg.github.com/{owner}/simple/` | GitHub 사용자 이름 | `read:packages` 범위의 Personal Access Token (classic) |
|
||||
| **GitLab Package Registry** | `https://gitlab.com/api/v4/projects/{project_id}/packages/pypi/simple/` | `__token__` | `read_api` 범위의 Project 또는 Personal Access Token |
|
||||
| **AWS CodeArtifact** | `aws codeartifact get-repository-endpoint`의 URL 사용 | `aws` | `aws codeartifact get-authorization-token`의 토큰 |
|
||||
| **Google Artifact Registry** | `https://{region}-python.pkg.dev/{project}/{repo}/simple/` | `_json_key_base64` | Base64로 인코딩된 서비스 계정 키 |
|
||||
| **JFrog Artifactory** | `https://{instance}.jfrog.io/artifactory/api/pypi/{repo}/simple/` | 사용자 이름 또는 이메일 | API 키 또는 ID 토큰 |
|
||||
| **자체 호스팅 (devpi, Nexus 등)** | 레지스트리의 simple API URL | 레지스트리 사용자 이름 | 레지스트리 비밀번호 |
|
||||
|
||||
<Tip>
|
||||
**AWS CodeArtifact**의 경우 인증 토큰이 주기적으로 만료됩니다.
|
||||
만료되면 `UV_INDEX_*_PASSWORD` 값을 갱신해야 합니다.
|
||||
CI/CD 파이프라인에서 이를 자동화하는 것을 고려하세요.
|
||||
</Tip>
|
||||
|
||||
## AMP에서 환경 변수 설정
|
||||
|
||||
프라이빗 레지스트리 자격 증명은 CrewAI AMP에서 환경 변수로 구성해야 합니다.
|
||||
두 가지 옵션이 있습니다:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="웹 인터페이스">
|
||||
1. [CrewAI AMP](https://app.crewai.com)에 로그인합니다
|
||||
2. 자동화로 이동합니다
|
||||
3. **Environment Variables** 탭을 엽니다
|
||||
4. 각 변수 (`UV_INDEX_*_USERNAME` 및 `UV_INDEX_*_PASSWORD`)에 값을 추가합니다
|
||||
|
||||
자세한 내용은 [AMP에 배포하기 — 환경 변수 설정하기](/ko/enterprise/guides/deploy-to-amp#환경-변수-설정하기) 단계를 참조하세요.
|
||||
</Tab>
|
||||
<Tab title="CLI 배포">
|
||||
`crewai deploy create`를 실행하기 전에 로컬 `.env` 파일에 변수를 추가합니다.
|
||||
CLI가 이를 안전하게 플랫폼으로 전송합니다:
|
||||
|
||||
```bash
|
||||
# .env
|
||||
OPENAI_API_KEY=sk-...
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat-here
|
||||
```
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
자격 증명을 저장소에 **절대** 커밋하지 마세요. 모든 비밀 정보에는 AMP 환경 변수를 사용하세요.
|
||||
`.env` 파일은 `.gitignore`에 포함되어야 합니다.
|
||||
</Warning>
|
||||
|
||||
기존 배포의 자격 증명을 업데이트하려면 [Crew 업데이트하기 — 환경 변수](/ko/enterprise/guides/update-crew)를 참조하세요.
|
||||
|
||||
## 전체 동작 흐름
|
||||
|
||||
CrewAI AMP가 자동화를 빌드할 때, 해결 흐름은 다음과 같이 작동합니다:
|
||||
|
||||
<Steps>
|
||||
<Step title="빌드 시작">
|
||||
AMP가 저장소를 가져오고 `pyproject.toml`과 `uv.lock`을 읽습니다.
|
||||
</Step>
|
||||
<Step title="UV가 의존성 해결">
|
||||
UV가 `[tool.uv.sources]`를 읽어 각 패키지가 어떤 index에서 와야 하는지 결정합니다.
|
||||
</Step>
|
||||
<Step title="UV가 인증">
|
||||
각 프라이빗 index에 대해 UV가 AMP에서 구성한 환경 변수에서
|
||||
`UV_INDEX_{NAME}_USERNAME`과 `UV_INDEX_{NAME}_PASSWORD`를 조회합니다.
|
||||
</Step>
|
||||
<Step title="패키지 설치">
|
||||
UV가 공개(PyPI) 및 프라이빗(레지스트리) 패키지를 모두 다운로드하고 설치합니다.
|
||||
</Step>
|
||||
<Step title="자동화 실행">
|
||||
모든 의존성이 사용 가능한 상태에서 crew 또는 flow가 시작됩니다.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 문제 해결
|
||||
|
||||
### 빌드 중 인증 오류
|
||||
|
||||
**증상**: 프라이빗 패키지를 해결할 때 `401 Unauthorized` 또는 `403 Forbidden`으로 빌드가 실패합니다.
|
||||
|
||||
**확인사항**:
|
||||
- `UV_INDEX_*` 환경 변수 이름이 index 이름과 정확히 일치하는지 확인합니다 (대문자, 하이픈 -> 언더스코어)
|
||||
- 자격 증명이 로컬 `.env`뿐만 아니라 AMP 환경 변수에 설정되어 있는지 확인합니다
|
||||
- 토큰/PAT에 패키지 피드에 필요한 읽기 권한이 있는지 확인합니다
|
||||
- 토큰이 만료되지 않았는지 확인합니다 (특히 AWS CodeArtifact의 경우)
|
||||
|
||||
### 패키지를 찾을 수 없음
|
||||
|
||||
**증상**: `No matching distribution found for my-private-package`.
|
||||
|
||||
**확인사항**:
|
||||
- `pyproject.toml`의 index URL이 `/simple/`로 끝나는지 확인합니다
|
||||
- `[tool.uv.sources]` 항목이 올바른 패키지 이름을 올바른 index 이름에 매핑하는지 확인합니다
|
||||
- 패키지가 실제로 프라이빗 레지스트리에 게시되어 있는지 확인합니다
|
||||
- 동일한 자격 증명으로 로컬에서 `uv lock`을 실행하여 해결이 작동하는지 확인합니다
|
||||
|
||||
### Lock 파일 충돌
|
||||
|
||||
**증상**: 프라이빗 index를 추가한 후 `uv lock`이 실패하거나 예상치 못한 결과를 생성합니다.
|
||||
|
||||
**해결책**: 로컬에서 자격 증명을 설정하고 다시 생성합니다:
|
||||
|
||||
```bash
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat
|
||||
uv lock
|
||||
```
|
||||
|
||||
그런 다음 업데이트된 `uv.lock`을 커밋합니다.
|
||||
|
||||
## 관련 가이드
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="배포 준비하기" icon="clipboard-check" href="/ko/enterprise/guides/prepare-for-deployment">
|
||||
배포 전에 프로젝트 구조와 의존성을 확인합니다.
|
||||
</Card>
|
||||
<Card title="AMP에 배포하기" icon="rocket" href="/ko/enterprise/guides/deploy-to-amp">
|
||||
crew 또는 flow를 배포하고 환경 변수를 구성합니다.
|
||||
</Card>
|
||||
<Card title="Crew 업데이트하기" icon="arrows-rotate" href="/ko/enterprise/guides/update-crew">
|
||||
환경 변수를 업데이트하고 실행 중인 배포에 변경 사항을 푸시합니다.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -73,6 +73,8 @@ flow.kickoff()
|
||||
| `default_outcome` | `str` | 아니오 | 피드백이 제공되지 않을 때 사용할 outcome. `emit`에 있어야 합니다 |
|
||||
| `metadata` | `dict` | 아니오 | 엔터프라이즈 통합을 위한 추가 데이터 |
|
||||
| `provider` | `HumanFeedbackProvider` | 아니오 | 비동기/논블로킹 피드백을 위한 커스텀 프로바이더. [비동기 인간 피드백](#비동기-인간-피드백-논블로킹) 참조 |
|
||||
| `learn` | `bool` | 아니오 | HITL 학습 활성화: 피드백에서 교훈을 추출하고 향후 출력을 사전 검토합니다. 기본값 `False`. [피드백에서 학습하기](#피드백에서-학습하기) 참조 |
|
||||
| `learn_limit` | `int` | 아니오 | 사전 검토를 위해 불러올 최대 과거 교훈 수. 기본값 `5` |
|
||||
|
||||
### 기본 사용법 (라우팅 없음)
|
||||
|
||||
@@ -96,33 +98,43 @@ def handle_feedback(self, result):
|
||||
`emit`을 지정하면, 데코레이터는 라우터가 됩니다. 인간의 자유 형식 피드백이 LLM에 의해 해석되어 지정된 outcome 중 하나로 매핑됩니다:
|
||||
|
||||
```python Code
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="이 콘텐츠의 출판을 승인하시겠습니까?",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
)
|
||||
def review_content(self):
|
||||
return "블로그 게시물 초안 내용..."
|
||||
from crewai.flow.flow import Flow, start, listen, or_
|
||||
from crewai.flow.human_feedback import human_feedback
|
||||
|
||||
@listen("approved")
|
||||
def publish(self, result):
|
||||
print(f"출판 중! 사용자 의견: {result.feedback}")
|
||||
class ReviewFlow(Flow):
|
||||
@start()
|
||||
def generate_content(self):
|
||||
return "블로그 게시물 초안 내용..."
|
||||
|
||||
@listen("rejected")
|
||||
def discard(self, result):
|
||||
print(f"폐기됨. 이유: {result.feedback}")
|
||||
@human_feedback(
|
||||
message="이 콘텐츠의 출판을 승인하시겠습니까?",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
)
|
||||
@listen(or_("generate_content", "needs_revision"))
|
||||
def review_content(self):
|
||||
return "블로그 게시물 초안 내용..."
|
||||
|
||||
@listen("needs_revision")
|
||||
def revise(self, result):
|
||||
print(f"다음을 기반으로 수정 중: {result.feedback}")
|
||||
@listen("approved")
|
||||
def publish(self, result):
|
||||
print(f"출판 중! 사용자 의견: {result.feedback}")
|
||||
|
||||
@listen("rejected")
|
||||
def discard(self, result):
|
||||
print(f"폐기됨. 이유: {result.feedback}")
|
||||
```
|
||||
|
||||
사용자가 "더 자세한 내용이 필요합니다"와 같이 말하면, LLM이 이를 `"needs_revision"`으로 매핑하고, `or_()`를 통해 `review_content`가 다시 트리거됩니다 — 수정 루프가 생성됩니다. outcome이 `"approved"` 또는 `"rejected"`가 될 때까지 루프가 계속됩니다.
|
||||
|
||||
<Tip>
|
||||
LLM은 가능한 경우 구조화된 출력(function calling)을 사용하여 응답이 지정된 outcome 중 하나임을 보장합니다. 이로 인해 라우팅이 신뢰할 수 있고 예측 가능해집니다.
|
||||
</Tip>
|
||||
|
||||
<Warning>
|
||||
`@start()` 메서드는 flow 시작 시 한 번만 실행됩니다. 수정 루프가 필요한 경우, start 메서드를 review 메서드와 분리하고 review 메서드에 `@listen(or_("trigger", "revision_outcome"))`를 사용하여 self-loop을 활성화하세요.
|
||||
</Warning>
|
||||
|
||||
## HumanFeedbackResult
|
||||
|
||||
`HumanFeedbackResult` 데이터클래스는 인간 피드백 상호작용에 대한 모든 정보를 포함합니다:
|
||||
@@ -191,116 +203,162 @@ def summarize(self):
|
||||
<CodeGroup>
|
||||
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from crewai.flow.flow import Flow, start, listen, or_
|
||||
from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ContentState(BaseModel):
|
||||
topic: str = ""
|
||||
draft: str = ""
|
||||
final_content: str = ""
|
||||
revision_count: int = 0
|
||||
status: str = "pending"
|
||||
|
||||
|
||||
class ContentApprovalFlow(Flow[ContentState]):
|
||||
"""콘텐츠를 생성하고 인간의 승인을 받는 Flow입니다."""
|
||||
"""콘텐츠를 생성하고 승인될 때까지 반복하는 Flow."""
|
||||
|
||||
@start()
|
||||
def get_topic(self):
|
||||
self.state.topic = input("어떤 주제에 대해 글을 쓸까요? ")
|
||||
return self.state.topic
|
||||
|
||||
@listen(get_topic)
|
||||
def generate_draft(self, topic):
|
||||
# 실제 사용에서는 LLM을 호출합니다
|
||||
self.state.draft = f"# {topic}\n\n{topic}에 대한 초안입니다..."
|
||||
def generate_draft(self):
|
||||
self.state.draft = "# AI 안전\n\nAI 안전에 대한 초안..."
|
||||
return self.state.draft
|
||||
|
||||
@listen(generate_draft)
|
||||
@human_feedback(
|
||||
message="이 초안을 검토해 주세요. 'approved', 'rejected'로 답하거나 수정 피드백을 제공해 주세요:",
|
||||
message="이 초안을 검토해 주세요. 승인, 거부 또는 변경이 필요한 사항을 설명해 주세요:",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
)
|
||||
def review_draft(self, draft):
|
||||
return draft
|
||||
@listen(or_("generate_draft", "needs_revision"))
|
||||
def review_draft(self):
|
||||
self.state.revision_count += 1
|
||||
return f"{self.state.draft} (v{self.state.revision_count})"
|
||||
|
||||
@listen("approved")
|
||||
def publish_content(self, result: HumanFeedbackResult):
|
||||
self.state.final_content = result.output
|
||||
print("\n✅ 콘텐츠가 승인되어 출판되었습니다!")
|
||||
print(f"검토자 코멘트: {result.feedback}")
|
||||
self.state.status = "published"
|
||||
print(f"콘텐츠 승인 및 게시! 리뷰어 의견: {result.feedback}")
|
||||
return "published"
|
||||
|
||||
@listen("rejected")
|
||||
def handle_rejection(self, result: HumanFeedbackResult):
|
||||
print("\n❌ 콘텐츠가 거부되었습니다")
|
||||
print(f"이유: {result.feedback}")
|
||||
self.state.status = "rejected"
|
||||
print(f"콘텐츠 거부됨. 이유: {result.feedback}")
|
||||
return "rejected"
|
||||
|
||||
@listen("needs_revision")
|
||||
def revise_content(self, result: HumanFeedbackResult):
|
||||
self.state.revision_count += 1
|
||||
print(f"\n📝 수정 #{self.state.revision_count} 요청됨")
|
||||
print(f"피드백: {result.feedback}")
|
||||
|
||||
# 실제 Flow에서는 generate_draft로 돌아갈 수 있습니다
|
||||
# 이 예제에서는 단순히 확인합니다
|
||||
return "revision_requested"
|
||||
|
||||
|
||||
# Flow 실행
|
||||
flow = ContentApprovalFlow()
|
||||
result = flow.kickoff()
|
||||
print(f"\nFlow 완료. 요청된 수정: {flow.state.revision_count}")
|
||||
print(f"\nFlow 완료. 상태: {flow.state.status}, 검토 횟수: {flow.state.revision_count}")
|
||||
```
|
||||
|
||||
```text Output
|
||||
어떤 주제에 대해 글을 쓸까요? AI 안전
|
||||
==================================================
|
||||
OUTPUT FOR REVIEW:
|
||||
==================================================
|
||||
# AI 안전
|
||||
|
||||
AI 안전에 대한 초안... (v1)
|
||||
==================================================
|
||||
|
||||
이 초안을 검토해 주세요. 승인, 거부 또는 변경이 필요한 사항을 설명해 주세요:
|
||||
(Press Enter to skip, or type your feedback)
|
||||
|
||||
Your feedback: 더 자세한 내용이 필요합니다
|
||||
|
||||
==================================================
|
||||
OUTPUT FOR REVIEW:
|
||||
==================================================
|
||||
# AI 안전
|
||||
|
||||
AI 안전에 대한 초안입니다...
|
||||
AI 안전에 대한 초안... (v2)
|
||||
==================================================
|
||||
|
||||
이 초안을 검토해 주세요. 'approved', 'rejected'로 답하거나 수정 피드백을 제공해 주세요:
|
||||
이 초안을 검토해 주세요. 승인, 거부 또는 변경이 필요한 사항을 설명해 주세요:
|
||||
(Press Enter to skip, or type your feedback)
|
||||
|
||||
Your feedback: 좋아 보입니다, 승인!
|
||||
|
||||
✅ 콘텐츠가 승인되어 출판되었습니다!
|
||||
검토자 코멘트: 좋아 보입니다, 승인!
|
||||
콘텐츠 승인 및 게시! 리뷰어 의견: 좋아 보입니다, 승인!
|
||||
|
||||
Flow 완료. 요청된 수정: 0
|
||||
Flow 완료. 상태: published, 검토 횟수: 2
|
||||
```
|
||||
|
||||
</CodeGroup>
|
||||
|
||||
## 다른 데코레이터와 결합하기
|
||||
|
||||
`@human_feedback` 데코레이터는 다른 Flow 데코레이터와 함께 작동합니다. 가장 안쪽 데코레이터(함수에 가장 가까운)로 배치하세요:
|
||||
`@human_feedback` 데코레이터는 `@start()`, `@listen()`, `or_()`와 함께 작동합니다. 데코레이터 순서는 두 가지 모두 동작합니다—프레임워크가 양방향으로 속성을 전파합니다—하지만 권장 패턴은 다음과 같습니다:
|
||||
|
||||
```python Code
|
||||
# 올바름: @human_feedback이 가장 안쪽(함수에 가장 가까움)
|
||||
# Flow 시작 시 일회성 검토 (self-loop 없음)
|
||||
@start()
|
||||
@human_feedback(message="이것을 검토해 주세요:")
|
||||
@human_feedback(message="이것을 검토해 주세요:", emit=["approved", "rejected"], llm="gpt-4o-mini")
|
||||
def my_start_method(self):
|
||||
return "content"
|
||||
|
||||
# 리스너에서 선형 검토 (self-loop 없음)
|
||||
@listen(other_method)
|
||||
@human_feedback(message="이것도 검토해 주세요:")
|
||||
@human_feedback(message="이것도 검토해 주세요:", emit=["good", "bad"], llm="gpt-4o-mini")
|
||||
def my_listener(self, data):
|
||||
return f"processed: {data}"
|
||||
|
||||
# Self-loop: 수정을 위해 반복할 수 있는 검토
|
||||
@human_feedback(message="승인 또는 수정 요청?", emit=["approved", "revise"], llm="gpt-4o-mini")
|
||||
@listen(or_("upstream_method", "revise"))
|
||||
def review_with_loop(self):
|
||||
return "content for review"
|
||||
```
|
||||
|
||||
<Tip>
|
||||
`@human_feedback`를 가장 안쪽 데코레이터(마지막/함수에 가장 가까움)로 배치하여 메서드를 직접 래핑하고 Flow 시스템에 전달하기 전에 반환 값을 캡처할 수 있도록 하세요.
|
||||
</Tip>
|
||||
### Self-loop 패턴
|
||||
|
||||
수정 루프를 만들려면 `or_()`를 사용하여 검토 메서드가 **상위 트리거**와 **자체 수정 outcome**을 모두 리스닝해야 합니다:
|
||||
|
||||
```python Code
|
||||
@start()
|
||||
def generate(self):
|
||||
return "initial draft"
|
||||
|
||||
@human_feedback(
|
||||
message="승인하시겠습니까, 아니면 변경을 요청하시겠습니까?",
|
||||
emit=["revise", "approved"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="approved",
|
||||
)
|
||||
@listen(or_("generate", "revise"))
|
||||
def review(self):
|
||||
return "content"
|
||||
|
||||
@listen("approved")
|
||||
def publish(self):
|
||||
return "published"
|
||||
```
|
||||
|
||||
outcome이 `"revise"`이면 flow가 `review`로 다시 라우팅됩니다 (`or_()`를 통해 `"revise"`를 리스닝하기 때문). outcome이 `"approved"`이면 flow가 `publish`로 계속됩니다. flow 엔진이 라우터를 "한 번만 실행" 규칙에서 제외하여 각 루프 반복마다 재실행할 수 있기 때문에 이 패턴이 동작합니다.
|
||||
|
||||
### 체인된 라우터
|
||||
|
||||
한 라우터의 outcome으로 트리거된 리스너가 그 자체로 라우터가 될 수 있습니다:
|
||||
|
||||
```python Code
|
||||
@start()
|
||||
@human_feedback(message="첫 번째 검토:", emit=["approved", "rejected"], llm="gpt-4o-mini")
|
||||
def draft(self):
|
||||
return "draft content"
|
||||
|
||||
@listen("approved")
|
||||
@human_feedback(message="최종 검토:", emit=["publish", "revise"], llm="gpt-4o-mini")
|
||||
def final_review(self, prev):
|
||||
return "final content"
|
||||
|
||||
@listen("publish")
|
||||
def on_publish(self, prev):
|
||||
return "published"
|
||||
```
|
||||
|
||||
### 제한 사항
|
||||
|
||||
- **`@start()` 메서드는 한 번만 실행**: `@start()` 메서드는 self-loop할 수 없습니다. 수정 주기가 필요하면 별도의 `@start()` 메서드를 진입점으로 사용하고 `@listen()` 메서드에 `@human_feedback`를 배치하세요.
|
||||
- **동일 메서드에 `@start()` + `@listen()` 불가**: 이는 Flow 프레임워크 제약입니다. 메서드는 시작점이거나 리스너여야 하며, 둘 다일 수 없습니다.
|
||||
|
||||
## 모범 사례
|
||||
|
||||
@@ -514,9 +572,9 @@ class ContentPipeline(Flow):
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="이 콘텐츠의 출판을 승인하시겠습니까?",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
emit=["approved", "rejected"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
default_outcome="rejected",
|
||||
provider=SlackNotificationProvider("#content-reviews"),
|
||||
)
|
||||
def generate_content(self):
|
||||
@@ -532,11 +590,6 @@ class ContentPipeline(Flow):
|
||||
print(f"보관됨. 이유: {result.feedback}")
|
||||
return {"status": "archived"}
|
||||
|
||||
@listen("needs_revision")
|
||||
def queue_revision(self, result):
|
||||
print(f"수정 대기열에 추가됨: {result.feedback}")
|
||||
return {"status": "revision_needed"}
|
||||
|
||||
|
||||
# Flow 시작 (Slack 응답을 기다리며 일시 중지)
|
||||
def start_content_pipeline():
|
||||
@@ -576,6 +629,64 @@ async def on_slack_feedback_async(flow_id: str, slack_message: str):
|
||||
5. **자동 영속성**: `HumanFeedbackPending`이 발생하면 상태가 자동으로 저장되며 기본적으로 `SQLiteFlowPersistence` 사용
|
||||
6. **커스텀 영속성**: 필요한 경우 `from_pending()`에 커스텀 영속성 인스턴스 전달
|
||||
|
||||
## 피드백에서 학습하기
|
||||
|
||||
`learn=True` 매개변수는 인간 검토자와 메모리 시스템 간의 피드백 루프를 활성화합니다. 활성화되면 시스템은 과거 인간의 수정 사항에서 학습하여 출력을 점진적으로 개선합니다.
|
||||
|
||||
### 작동 방식
|
||||
|
||||
1. **피드백 후**: LLM이 출력 + 피드백에서 일반화 가능한 교훈을 추출하고 `source="hitl"`로 메모리에 저장합니다. 피드백이 단순한 승인(예: "좋아 보입니다")인 경우 아무것도 저장하지 않습니다.
|
||||
2. **다음 검토 전**: 과거 HITL 교훈을 메모리에서 불러와 LLM이 인간이 보기 전에 출력을 개선하는 데 적용합니다.
|
||||
|
||||
시간이 지남에 따라 각 수정 사항이 향후 검토에 반영되므로 인간은 점진적으로 더 나은 사전 검토된 출력을 보게 됩니다.
|
||||
|
||||
### 예제
|
||||
|
||||
```python Code
|
||||
class ArticleReviewFlow(Flow):
|
||||
@start()
|
||||
def generate_article(self):
|
||||
return self.crew.kickoff(inputs={"topic": "AI Safety"}).raw
|
||||
|
||||
@human_feedback(
|
||||
message="이 글 초안을 검토해 주세요:",
|
||||
emit=["approved", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
learn=True,
|
||||
)
|
||||
@listen(or_("generate_article", "needs_revision"))
|
||||
def review_article(self):
|
||||
return self.last_human_feedback.output if self.last_human_feedback else "article draft"
|
||||
|
||||
@listen("approved")
|
||||
def publish(self):
|
||||
print(f"Publishing: {self.last_human_feedback.output}")
|
||||
```
|
||||
|
||||
**첫 번째 실행**: 인간이 원시 출력을 보고 "사실에 대한 주장에는 항상 인용을 포함하세요."라고 말합니다. 교훈이 추출되어 메모리에 저장됩니다.
|
||||
|
||||
**두 번째 실행**: 시스템이 인용 교훈을 불러와 출력을 사전 검토하여 인용을 추가한 후 개선된 버전을 표시합니다. 인간의 역할이 "모든 것을 수정"에서 "시스템이 놓친 것을 찾기"로 전환됩니다.
|
||||
|
||||
### 구성
|
||||
|
||||
| 매개변수 | 기본값 | 설명 |
|
||||
|-----------|--------|------|
|
||||
| `learn` | `False` | HITL 학습 활성화 |
|
||||
| `learn_limit` | `5` | 사전 검토를 위해 불러올 최대 과거 교훈 수 |
|
||||
|
||||
### 주요 설계 결정
|
||||
|
||||
- **모든 것에 동일한 LLM 사용**: 데코레이터의 `llm` 매개변수는 outcome 매핑, 교훈 추출, 사전 검토에 공유됩니다. 여러 모델을 구성할 필요가 없습니다.
|
||||
- **구조화된 출력**: 추출과 사전 검토 모두 LLM이 지원하는 경우 Pydantic 모델과 함께 function calling을 사용하고, 그렇지 않으면 텍스트 파싱으로 폴백합니다.
|
||||
- **논블로킹 저장**: 교훈은 백그라운드 스레드에서 실행되는 `remember_many()`를 통해 저장됩니다 -- Flow는 즉시 계속됩니다.
|
||||
- **우아한 저하**: 추출 중 LLM이 실패하면 아무것도 저장하지 않습니다. 사전 검토 중 실패하면 원시 출력이 표시됩니다. 어느 쪽의 실패도 Flow를 차단하지 않습니다.
|
||||
- **범위/카테고리 불필요**: 교훈을 저장할 때 `source`만 전달됩니다. 인코딩 파이프라인이 범위, 카테고리, 중요도를 자동으로 추론합니다.
|
||||
|
||||
<Note>
|
||||
`learn=True`는 Flow에 메모리가 사용 가능해야 합니다. Flow는 기본적으로 자동으로 메모리를 얻지만, `_skip_auto_memory`로 비활성화한 경우 HITL 학습은 조용히 건너뜁니다.
|
||||
</Note>
|
||||
|
||||
|
||||
## 관련 문서
|
||||
|
||||
- [Flow 개요](/ko/concepts/flows) - CrewAI Flow에 대해 알아보기
|
||||
@@ -583,3 +694,4 @@ async def on_slack_feedback_async(flow_id: str, slack_message: str):
|
||||
- [Flow 영속성](/ko/concepts/flows#persistence) - Flow 상태 영속화
|
||||
- [@router를 사용한 라우팅](/ko/concepts/flows#router) - 조건부 라우팅에 대해 더 알아보기
|
||||
- [실행 시 인간 입력](/ko/learn/human-input-on-execution) - 태스크 수준 인간 입력
|
||||
- [메모리](/ko/concepts/memory) - HITL 학습에서 사용되는 통합 메모리 시스템
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -38,22 +38,21 @@ O CrewAI Enterprise oferece um sistema abrangente de gerenciamento Human-in-the-
|
||||
Configure checkpoints de revisão humana em seus Flows usando o decorador `@human_feedback`. Quando a execução atinge um ponto de revisão, o sistema pausa, notifica o responsável via email e aguarda uma resposta.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from crewai.flow.flow import Flow, start, listen, or_
|
||||
from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
|
||||
|
||||
class ContentApprovalFlow(Flow):
|
||||
@start()
|
||||
def generate_content(self):
|
||||
# IA gera conteúdo
|
||||
return "Texto de marketing gerado para campanha Q1..."
|
||||
|
||||
@listen(generate_content)
|
||||
@human_feedback(
|
||||
message="Por favor, revise este conteúdo para conformidade com a marca:",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
)
|
||||
def review_content(self, content):
|
||||
return content
|
||||
@listen(or_("generate_content", "needs_revision"))
|
||||
def review_content(self):
|
||||
return "Texto de marketing para revisão..."
|
||||
|
||||
@listen("approved")
|
||||
def publish_content(self, result: HumanFeedbackResult):
|
||||
@@ -62,10 +61,6 @@ class ContentApprovalFlow(Flow):
|
||||
@listen("rejected")
|
||||
def archive_content(self, result: HumanFeedbackResult):
|
||||
print(f"Conteúdo rejeitado. Motivo: {result.feedback}")
|
||||
|
||||
@listen("needs_revision")
|
||||
def revise_content(self, result: HumanFeedbackResult):
|
||||
print(f"Revisão solicitada: {result.feedback}")
|
||||
```
|
||||
|
||||
Para detalhes completos de implementação, consulte o guia [Feedback Humano em Flows](/pt-BR/learn/human-feedback-in-flows).
|
||||
|
||||
@@ -176,6 +176,11 @@ Você precisa enviar seu crew para um repositório do GitHub. Caso ainda não te
|
||||

|
||||
</Frame>
|
||||
|
||||
<Info>
|
||||
Usando pacotes Python privados? Você também precisará adicionar suas credenciais de registro aqui.
|
||||
Consulte [Registros de Pacotes Privados](/pt-BR/enterprise/guides/private-package-registry) para as variáveis necessárias.
|
||||
</Info>
|
||||
|
||||
</Step>
|
||||
|
||||
<Step title="Implante Seu Crew">
|
||||
|
||||
@@ -256,6 +256,12 @@ Antes da implantação, certifique-se de ter:
|
||||
1. **Chaves de API de LLM** prontas (OpenAI, Anthropic, Google, etc.)
|
||||
2. **Chaves de API de ferramentas** se estiver usando ferramentas externas (Serper, etc.)
|
||||
|
||||
<Info>
|
||||
Se seu projeto depende de pacotes de um **registro PyPI privado**, você também precisará configurar
|
||||
credenciais de autenticação do registro como variáveis de ambiente. Consulte o guia
|
||||
[Registros de Pacotes Privados](/pt-BR/enterprise/guides/private-package-registry) para mais detalhes.
|
||||
</Info>
|
||||
|
||||
<Tip>
|
||||
Teste seu projeto localmente com as mesmas variáveis de ambiente antes de implantar
|
||||
para detectar problemas de configuração antecipadamente.
|
||||
|
||||
263
docs/pt-BR/enterprise/guides/private-package-registry.mdx
Normal file
263
docs/pt-BR/enterprise/guides/private-package-registry.mdx
Normal file
@@ -0,0 +1,263 @@
|
||||
---
|
||||
title: "Registros de Pacotes Privados"
|
||||
description: "Instale pacotes Python privados de registros PyPI autenticados no CrewAI AMP"
|
||||
icon: "lock"
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
<Note>
|
||||
Este guia aborda como configurar seu projeto CrewAI para instalar pacotes Python
|
||||
de registros PyPI privados (Azure DevOps Artifacts, GitHub Packages, GitLab, AWS CodeArtifact, etc.)
|
||||
ao implantar no CrewAI AMP.
|
||||
</Note>
|
||||
|
||||
## Quando Você Precisa Disso
|
||||
|
||||
Se seu projeto depende de pacotes Python internos ou proprietários hospedados em um registro privado
|
||||
em vez do PyPI público, você precisará:
|
||||
|
||||
1. Informar ao UV **onde** encontrar o pacote (uma URL de index)
|
||||
2. Informar ao UV **quais** pacotes vêm desse index (um mapeamento de source)
|
||||
3. Fornecer **credenciais** para que o UV possa autenticar durante a instalação
|
||||
|
||||
O CrewAI AMP usa [UV](https://docs.astral.sh/uv/) para resolução e instalação de dependências.
|
||||
O UV suporta registros privados autenticados por meio da configuração do `pyproject.toml` combinada
|
||||
com variáveis de ambiente para credenciais.
|
||||
|
||||
## Passo 1: Configurar o pyproject.toml
|
||||
|
||||
Três elementos trabalham juntos no seu `pyproject.toml`:
|
||||
|
||||
### 1a. Declarar a dependência
|
||||
|
||||
Adicione o pacote privado ao seu `[project.dependencies]` como qualquer outra dependência:
|
||||
|
||||
```toml
|
||||
[project]
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
```
|
||||
|
||||
### 1b. Definir o index
|
||||
|
||||
Registre seu registro privado como um index nomeado em `[[tool.uv.index]]`:
|
||||
|
||||
```toml
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
```
|
||||
|
||||
<Info>
|
||||
O campo `name` é importante — o UV o utiliza para construir os nomes das variáveis de ambiente
|
||||
para autenticação (veja o [Passo 2](#passo-2-configurar-credenciais-de-autenticação) abaixo).
|
||||
|
||||
Definir `explicit = true` significa que o UV não consultará esse index para todos os pacotes — apenas
|
||||
os que você mapear explicitamente em `[tool.uv.sources]`. Isso evita consultas desnecessárias
|
||||
ao seu registro privado e protege contra ataques de confusão de dependências.
|
||||
</Info>
|
||||
|
||||
### 1c. Mapear o pacote para o index
|
||||
|
||||
Informe ao UV quais pacotes devem ser resolvidos a partir do seu index privado usando `[tool.uv.sources]`:
|
||||
|
||||
```toml
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
### Exemplo completo
|
||||
|
||||
```toml
|
||||
[project]
|
||||
name = "my-crew-project"
|
||||
version = "0.1.0"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.100.1,<1.0.0",
|
||||
"my-private-package>=1.2.0",
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "my-private-registry"
|
||||
url = "https://pkgs.dev.azure.com/my-org/_packaging/my-feed/pypi/simple/"
|
||||
explicit = true
|
||||
|
||||
[tool.uv.sources]
|
||||
my-private-package = { index = "my-private-registry" }
|
||||
```
|
||||
|
||||
Após atualizar o `pyproject.toml`, regenere seu arquivo lock:
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
```
|
||||
|
||||
<Warning>
|
||||
Sempre faça commit do `uv.lock` atualizado junto com as alterações no `pyproject.toml`.
|
||||
O arquivo lock é obrigatório para implantação — veja [Preparar para Implantação](/pt-BR/enterprise/guides/prepare-for-deployment).
|
||||
</Warning>
|
||||
|
||||
## Passo 2: Configurar Credenciais de Autenticação
|
||||
|
||||
O UV autentica em indexes privados usando variáveis de ambiente que seguem uma convenção de nomenclatura
|
||||
baseada no nome do index que você definiu no `pyproject.toml`:
|
||||
|
||||
```
|
||||
UV_INDEX_{UPPER_NAME}_USERNAME
|
||||
UV_INDEX_{UPPER_NAME}_PASSWORD
|
||||
```
|
||||
|
||||
Onde `{UPPER_NAME}` é o nome do seu index convertido para **maiúsculas** com **hifens substituídos por underscores**.
|
||||
|
||||
Por exemplo, um index chamado `my-private-registry` usa:
|
||||
|
||||
| Variável | Valor |
|
||||
|----------|-------|
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME` | Seu nome de usuário ou nome do token do registro |
|
||||
| `UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD` | Sua senha ou token/PAT do registro |
|
||||
|
||||
<Warning>
|
||||
Essas variáveis de ambiente **devem** ser adicionadas pelas configurações de **Variáveis de Ambiente** do CrewAI AMP —
|
||||
globalmente ou no nível da implantação. Elas não podem ser definidas em arquivos `.env` ou codificadas no seu projeto.
|
||||
|
||||
Veja [Configurar Variáveis de Ambiente no AMP](#configurar-variáveis-de-ambiente-no-amp) abaixo.
|
||||
</Warning>
|
||||
|
||||
## Referência de Provedores de Registro
|
||||
|
||||
A tabela abaixo mostra o formato da URL de index e os valores de credenciais para provedores de registro comuns.
|
||||
Substitua os valores de exemplo pelos detalhes reais da sua organização e feed.
|
||||
|
||||
| Provedor | URL do Index | Usuário | Senha |
|
||||
|----------|-------------|---------|-------|
|
||||
| **Azure DevOps Artifacts** | `https://pkgs.dev.azure.com/{org}/_packaging/{feed}/pypi/simple/` | Qualquer string não vazia (ex: `token`) | Personal Access Token (PAT) com escopo Packaging Read |
|
||||
| **GitHub Packages** | `https://pypi.pkg.github.com/{owner}/simple/` | Nome de usuário do GitHub | Personal Access Token (classic) com escopo `read:packages` |
|
||||
| **GitLab Package Registry** | `https://gitlab.com/api/v4/projects/{project_id}/packages/pypi/simple/` | `__token__` | Project ou Personal Access Token com escopo `read_api` |
|
||||
| **AWS CodeArtifact** | Use a URL de `aws codeartifact get-repository-endpoint` | `aws` | Token de `aws codeartifact get-authorization-token` |
|
||||
| **Google Artifact Registry** | `https://{region}-python.pkg.dev/{project}/{repo}/simple/` | `_json_key_base64` | Chave de conta de serviço codificada em Base64 |
|
||||
| **JFrog Artifactory** | `https://{instance}.jfrog.io/artifactory/api/pypi/{repo}/simple/` | Nome de usuário ou email | Chave API ou token de identidade |
|
||||
| **Auto-hospedado (devpi, Nexus, etc.)** | URL da API simple do seu registro | Nome de usuário do registro | Senha do registro |
|
||||
|
||||
<Tip>
|
||||
Para **AWS CodeArtifact**, o token de autorização expira periodicamente.
|
||||
Você precisará atualizar o valor de `UV_INDEX_*_PASSWORD` quando ele expirar.
|
||||
Considere automatizar isso no seu pipeline de CI/CD.
|
||||
</Tip>
|
||||
|
||||
## Configurar Variáveis de Ambiente no AMP
|
||||
|
||||
As credenciais do registro privado devem ser configuradas como variáveis de ambiente no CrewAI AMP.
|
||||
Você tem duas opções:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Interface Web">
|
||||
1. Faça login no [CrewAI AMP](https://app.crewai.com)
|
||||
2. Navegue até sua automação
|
||||
3. Abra a aba **Environment Variables**
|
||||
4. Adicione cada variável (`UV_INDEX_*_USERNAME` e `UV_INDEX_*_PASSWORD`) com seu valor
|
||||
|
||||
Veja o passo [Deploy para AMP — Definir Variáveis de Ambiente](/pt-BR/enterprise/guides/deploy-to-amp#definir-as-variáveis-de-ambiente) para detalhes.
|
||||
</Tab>
|
||||
<Tab title="Implantação via CLI">
|
||||
Adicione as variáveis ao seu arquivo `.env` local antes de executar `crewai deploy create`.
|
||||
A CLI as transferirá com segurança para a plataforma:
|
||||
|
||||
```bash
|
||||
# .env
|
||||
OPENAI_API_KEY=sk-...
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat-here
|
||||
```
|
||||
|
||||
```bash
|
||||
crewai deploy create
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
<Warning>
|
||||
**Nunca** faça commit de credenciais no seu repositório. Use variáveis de ambiente do AMP para todos os segredos.
|
||||
O arquivo `.env` deve estar listado no `.gitignore`.
|
||||
</Warning>
|
||||
|
||||
Para atualizar credenciais em uma implantação existente, veja [Atualizar Seu Crew — Variáveis de Ambiente](/pt-BR/enterprise/guides/update-crew).
|
||||
|
||||
## Como Tudo se Conecta
|
||||
|
||||
Quando o CrewAI AMP faz o build da sua automação, o fluxo de resolução funciona assim:
|
||||
|
||||
<Steps>
|
||||
<Step title="Build inicia">
|
||||
O AMP busca seu repositório e lê o `pyproject.toml` e o `uv.lock`.
|
||||
</Step>
|
||||
<Step title="UV resolve dependências">
|
||||
O UV lê `[tool.uv.sources]` para determinar de qual index cada pacote deve vir.
|
||||
</Step>
|
||||
<Step title="UV autentica">
|
||||
Para cada index privado, o UV busca `UV_INDEX_{NAME}_USERNAME` e `UV_INDEX_{NAME}_PASSWORD`
|
||||
nas variáveis de ambiente que você configurou no AMP.
|
||||
</Step>
|
||||
<Step title="Pacotes são instalados">
|
||||
O UV baixa e instala todos os pacotes — tanto públicos (do PyPI) quanto privados (do seu registro).
|
||||
</Step>
|
||||
<Step title="Automação executa">
|
||||
Seu crew ou flow inicia com todas as dependências disponíveis.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Solução de Problemas
|
||||
|
||||
### Erros de Autenticação Durante o Build
|
||||
|
||||
**Sintoma**: Build falha com `401 Unauthorized` ou `403 Forbidden` ao resolver um pacote privado.
|
||||
|
||||
**Verifique**:
|
||||
- Os nomes das variáveis de ambiente `UV_INDEX_*` correspondem exatamente ao nome do seu index (maiúsculas, hifens -> underscores)
|
||||
- As credenciais estão definidas nas variáveis de ambiente do AMP, não apenas em um `.env` local
|
||||
- Seu token/PAT tem as permissões de leitura necessárias para o feed de pacotes
|
||||
- O token não expirou (especialmente relevante para AWS CodeArtifact)
|
||||
|
||||
### Pacote Não Encontrado
|
||||
|
||||
**Sintoma**: `No matching distribution found for my-private-package`.
|
||||
|
||||
**Verifique**:
|
||||
- A URL do index no `pyproject.toml` termina com `/simple/`
|
||||
- A entrada `[tool.uv.sources]` mapeia o nome correto do pacote para o nome correto do index
|
||||
- O pacote está realmente publicado no seu registro privado
|
||||
- Execute `uv lock` localmente com as mesmas credenciais para verificar se a resolução funciona
|
||||
|
||||
### Conflitos no Arquivo Lock
|
||||
|
||||
**Sintoma**: `uv lock` falha ou produz resultados inesperados após adicionar um index privado.
|
||||
|
||||
**Solução**: Defina as credenciais localmente e regenere:
|
||||
|
||||
```bash
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_USERNAME=token
|
||||
export UV_INDEX_MY_PRIVATE_REGISTRY_PASSWORD=your-pat
|
||||
uv lock
|
||||
```
|
||||
|
||||
Em seguida, faça commit do `uv.lock` atualizado.
|
||||
|
||||
## Guias Relacionados
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card title="Preparar para Implantação" icon="clipboard-check" href="/pt-BR/enterprise/guides/prepare-for-deployment">
|
||||
Verifique a estrutura do projeto e as dependências antes de implantar.
|
||||
</Card>
|
||||
<Card title="Deploy para AMP" icon="rocket" href="/pt-BR/enterprise/guides/deploy-to-amp">
|
||||
Implante seu crew ou flow e configure variáveis de ambiente.
|
||||
</Card>
|
||||
<Card title="Atualizar Seu Crew" icon="arrows-rotate" href="/pt-BR/enterprise/guides/update-crew">
|
||||
Atualize variáveis de ambiente e envie alterações para uma implantação em execução.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -73,6 +73,8 @@ Quando este flow é executado, ele irá:
|
||||
| `default_outcome` | `str` | Não | Outcome a usar se nenhum feedback for fornecido. Deve estar em `emit` |
|
||||
| `metadata` | `dict` | Não | Dados adicionais para integrações enterprise |
|
||||
| `provider` | `HumanFeedbackProvider` | Não | Provider customizado para feedback assíncrono/não-bloqueante. Veja [Feedback Humano Assíncrono](#feedback-humano-assíncrono-não-bloqueante) |
|
||||
| `learn` | `bool` | Não | Habilitar aprendizado HITL: destila lições do feedback e pré-revisa saídas futuras. Padrão `False`. Veja [Aprendendo com Feedback](#aprendendo-com-feedback) |
|
||||
| `learn_limit` | `int` | Não | Máximo de lições passadas para recuperar na pré-revisão. Padrão `5` |
|
||||
|
||||
### Uso Básico (Sem Roteamento)
|
||||
|
||||
@@ -96,33 +98,43 @@ def handle_feedback(self, result):
|
||||
Quando você especifica `emit`, o decorador se torna um roteador. O feedback livre do humano é interpretado por um LLM e mapeado para um dos outcomes especificados:
|
||||
|
||||
```python Code
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="Você aprova este conteúdo para publicação?",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
)
|
||||
def review_content(self):
|
||||
return "Rascunho do post do blog aqui..."
|
||||
from crewai.flow.flow import Flow, start, listen, or_
|
||||
from crewai.flow.human_feedback import human_feedback
|
||||
|
||||
@listen("approved")
|
||||
def publish(self, result):
|
||||
print(f"Publicando! Usuário disse: {result.feedback}")
|
||||
class ReviewFlow(Flow):
|
||||
@start()
|
||||
def generate_content(self):
|
||||
return "Rascunho do post do blog aqui..."
|
||||
|
||||
@listen("rejected")
|
||||
def discard(self, result):
|
||||
print(f"Descartando. Motivo: {result.feedback}")
|
||||
@human_feedback(
|
||||
message="Você aprova este conteúdo para publicação?",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
)
|
||||
@listen(or_("generate_content", "needs_revision"))
|
||||
def review_content(self):
|
||||
return "Rascunho do post do blog aqui..."
|
||||
|
||||
@listen("needs_revision")
|
||||
def revise(self, result):
|
||||
print(f"Revisando baseado em: {result.feedback}")
|
||||
@listen("approved")
|
||||
def publish(self, result):
|
||||
print(f"Publicando! Usuário disse: {result.feedback}")
|
||||
|
||||
@listen("rejected")
|
||||
def discard(self, result):
|
||||
print(f"Descartando. Motivo: {result.feedback}")
|
||||
```
|
||||
|
||||
Quando o humano diz algo como "precisa de mais detalhes", o LLM mapeia para `"needs_revision"`, que dispara `review_content` novamente via `or_()` — criando um loop de revisão. O loop continua até que o outcome seja `"approved"` ou `"rejected"`.
|
||||
|
||||
<Tip>
|
||||
O LLM usa saídas estruturadas (function calling) quando disponível para garantir que a resposta seja um dos seus outcomes especificados. Isso torna o roteamento confiável e previsível.
|
||||
</Tip>
|
||||
|
||||
<Warning>
|
||||
Um método `@start()` só executa uma vez no início do flow. Se você precisa de um loop de revisão, separe o método start do método de revisão e use `@listen(or_("trigger", "revision_outcome"))` no método de revisão para habilitar o self-loop.
|
||||
</Warning>
|
||||
|
||||
## HumanFeedbackResult
|
||||
|
||||
O dataclass `HumanFeedbackResult` contém todas as informações sobre uma interação de feedback humano:
|
||||
@@ -191,116 +203,162 @@ Aqui está um exemplo completo implementando um fluxo de revisão e aprovação
|
||||
<CodeGroup>
|
||||
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, start, listen
|
||||
from crewai.flow.flow import Flow, start, listen, or_
|
||||
from crewai.flow.human_feedback import human_feedback, HumanFeedbackResult
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ContentState(BaseModel):
|
||||
topic: str = ""
|
||||
draft: str = ""
|
||||
final_content: str = ""
|
||||
revision_count: int = 0
|
||||
status: str = "pending"
|
||||
|
||||
|
||||
class ContentApprovalFlow(Flow[ContentState]):
|
||||
"""Um flow que gera conteúdo e obtém aprovação humana."""
|
||||
"""Um flow que gera conteúdo e faz loop até o humano aprovar."""
|
||||
|
||||
@start()
|
||||
def get_topic(self):
|
||||
self.state.topic = input("Sobre qual tópico devo escrever? ")
|
||||
return self.state.topic
|
||||
|
||||
@listen(get_topic)
|
||||
def generate_draft(self, topic):
|
||||
# Em uso real, isso chamaria um LLM
|
||||
self.state.draft = f"# {topic}\n\nEste é um rascunho sobre {topic}..."
|
||||
def generate_draft(self):
|
||||
self.state.draft = "# IA Segura\n\nEste é um rascunho sobre IA Segura..."
|
||||
return self.state.draft
|
||||
|
||||
@listen(generate_draft)
|
||||
@human_feedback(
|
||||
message="Por favor, revise este rascunho. Responda 'approved', 'rejected', ou forneça feedback de revisão:",
|
||||
message="Por favor, revise este rascunho. Aprove, rejeite ou descreva o que precisa mudar:",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
)
|
||||
def review_draft(self, draft):
|
||||
return draft
|
||||
@listen(or_("generate_draft", "needs_revision"))
|
||||
def review_draft(self):
|
||||
self.state.revision_count += 1
|
||||
return f"{self.state.draft} (v{self.state.revision_count})"
|
||||
|
||||
@listen("approved")
|
||||
def publish_content(self, result: HumanFeedbackResult):
|
||||
self.state.final_content = result.output
|
||||
print("\n✅ Conteúdo aprovado e publicado!")
|
||||
print(f"Comentário do revisor: {result.feedback}")
|
||||
self.state.status = "published"
|
||||
print(f"Conteúdo aprovado e publicado! Revisor disse: {result.feedback}")
|
||||
return "published"
|
||||
|
||||
@listen("rejected")
|
||||
def handle_rejection(self, result: HumanFeedbackResult):
|
||||
print("\n❌ Conteúdo rejeitado")
|
||||
print(f"Motivo: {result.feedback}")
|
||||
self.state.status = "rejected"
|
||||
print(f"Conteúdo rejeitado. Motivo: {result.feedback}")
|
||||
return "rejected"
|
||||
|
||||
@listen("needs_revision")
|
||||
def revise_content(self, result: HumanFeedbackResult):
|
||||
self.state.revision_count += 1
|
||||
print(f"\n📝 Revisão #{self.state.revision_count} solicitada")
|
||||
print(f"Feedback: {result.feedback}")
|
||||
|
||||
# Em um flow real, você pode voltar para generate_draft
|
||||
# Para este exemplo, apenas reconhecemos
|
||||
return "revision_requested"
|
||||
|
||||
|
||||
# Executar o flow
|
||||
flow = ContentApprovalFlow()
|
||||
result = flow.kickoff()
|
||||
print(f"\nFlow concluído. Revisões solicitadas: {flow.state.revision_count}")
|
||||
print(f"\nFlow finalizado. Status: {flow.state.status}, Revisões: {flow.state.revision_count}")
|
||||
```
|
||||
|
||||
```text Output
|
||||
Sobre qual tópico devo escrever? Segurança em IA
|
||||
==================================================
|
||||
OUTPUT FOR REVIEW:
|
||||
==================================================
|
||||
# IA Segura
|
||||
|
||||
Este é um rascunho sobre IA Segura... (v1)
|
||||
==================================================
|
||||
|
||||
Por favor, revise este rascunho. Aprove, rejeite ou descreva o que precisa mudar:
|
||||
(Press Enter to skip, or type your feedback)
|
||||
|
||||
Your feedback: Preciso de mais detalhes sobre segurança em IA.
|
||||
|
||||
==================================================
|
||||
OUTPUT FOR REVIEW:
|
||||
==================================================
|
||||
# Segurança em IA
|
||||
# IA Segura
|
||||
|
||||
Este é um rascunho sobre Segurança em IA...
|
||||
Este é um rascunho sobre IA Segura... (v2)
|
||||
==================================================
|
||||
|
||||
Por favor, revise este rascunho. Responda 'approved', 'rejected', ou forneça feedback de revisão:
|
||||
Por favor, revise este rascunho. Aprove, rejeite ou descreva o que precisa mudar:
|
||||
(Press Enter to skip, or type your feedback)
|
||||
|
||||
Your feedback: Parece bom, aprovado!
|
||||
|
||||
✅ Conteúdo aprovado e publicado!
|
||||
Comentário do revisor: Parece bom, aprovado!
|
||||
Conteúdo aprovado e publicado! Revisor disse: Parece bom, aprovado!
|
||||
|
||||
Flow concluído. Revisões solicitadas: 0
|
||||
Flow finalizado. Status: published, Revisões: 2
|
||||
```
|
||||
|
||||
</CodeGroup>
|
||||
|
||||
## Combinando com Outros Decoradores
|
||||
|
||||
O decorador `@human_feedback` funciona com outros decoradores de flow. Coloque-o como o decorador mais interno (mais próximo da função):
|
||||
O decorador `@human_feedback` funciona com `@start()`, `@listen()` e `or_()`. Ambas as ordens de decoradores funcionam — o framework propaga atributos em ambas as direções — mas os padrões recomendados são:
|
||||
|
||||
```python Code
|
||||
# Correto: @human_feedback é o mais interno (mais próximo da função)
|
||||
# Revisão única no início do flow (sem self-loop)
|
||||
@start()
|
||||
@human_feedback(message="Revise isto:")
|
||||
@human_feedback(message="Revise isto:", emit=["approved", "rejected"], llm="gpt-4o-mini")
|
||||
def my_start_method(self):
|
||||
return "content"
|
||||
|
||||
# Revisão linear em um listener (sem self-loop)
|
||||
@listen(other_method)
|
||||
@human_feedback(message="Revise isto também:")
|
||||
@human_feedback(message="Revise isto também:", emit=["good", "bad"], llm="gpt-4o-mini")
|
||||
def my_listener(self, data):
|
||||
return f"processed: {data}"
|
||||
|
||||
# Self-loop: revisão que pode voltar para revisões
|
||||
@human_feedback(message="Aprovar ou revisar?", emit=["approved", "revise"], llm="gpt-4o-mini")
|
||||
@listen(or_("upstream_method", "revise"))
|
||||
def review_with_loop(self):
|
||||
return "content for review"
|
||||
```
|
||||
|
||||
<Tip>
|
||||
Coloque `@human_feedback` como o decorador mais interno (último/mais próximo da função) para que ele envolva o método diretamente e possa capturar o valor de retorno antes de passar para o sistema de flow.
|
||||
</Tip>
|
||||
### Padrão de self-loop
|
||||
|
||||
Para criar um loop de revisão, o método de revisão deve escutar **ambos** um gatilho upstream e seu próprio outcome de revisão usando `or_()`:
|
||||
|
||||
```python Code
|
||||
@start()
|
||||
def generate(self):
|
||||
return "initial draft"
|
||||
|
||||
@human_feedback(
|
||||
message="Aprovar ou solicitar alterações?",
|
||||
emit=["revise", "approved"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="approved",
|
||||
)
|
||||
@listen(or_("generate", "revise"))
|
||||
def review(self):
|
||||
return "content"
|
||||
|
||||
@listen("approved")
|
||||
def publish(self):
|
||||
return "published"
|
||||
```
|
||||
|
||||
Quando o outcome é `"revise"`, o flow roteia de volta para `review` (porque ele escuta `"revise"` via `or_()`). Quando o outcome é `"approved"`, o flow continua para `publish`. Isso funciona porque o engine de flow isenta roteadores da regra "fire once", permitindo que eles re-executem em cada iteração do loop.
|
||||
|
||||
### Roteadores encadeados
|
||||
|
||||
Um listener disparado pelo outcome de um roteador pode ser ele mesmo um roteador:
|
||||
|
||||
```python Code
|
||||
@start()
|
||||
@human_feedback(message="Primeira revisão:", emit=["approved", "rejected"], llm="gpt-4o-mini")
|
||||
def draft(self):
|
||||
return "draft content"
|
||||
|
||||
@listen("approved")
|
||||
@human_feedback(message="Revisão final:", emit=["publish", "revise"], llm="gpt-4o-mini")
|
||||
def final_review(self, prev):
|
||||
return "final content"
|
||||
|
||||
@listen("publish")
|
||||
def on_publish(self, prev):
|
||||
return "published"
|
||||
```
|
||||
|
||||
### Limitações
|
||||
|
||||
- **Métodos `@start()` executam uma vez**: Um método `@start()` não pode fazer self-loop. Se você precisa de um ciclo de revisão, use um método `@start()` separado como ponto de entrada e coloque o `@human_feedback` em um método `@listen()`.
|
||||
- **Sem `@start()` + `@listen()` no mesmo método**: Esta é uma restrição do framework de Flow. Um método é ou um ponto de início ou um listener, não ambos.
|
||||
|
||||
## Melhores Práticas
|
||||
|
||||
@@ -514,9 +572,9 @@ class ContentPipeline(Flow):
|
||||
@start()
|
||||
@human_feedback(
|
||||
message="Aprova este conteúdo para publicação?",
|
||||
emit=["approved", "rejected", "needs_revision"],
|
||||
emit=["approved", "rejected"],
|
||||
llm="gpt-4o-mini",
|
||||
default_outcome="needs_revision",
|
||||
default_outcome="rejected",
|
||||
provider=SlackNotificationProvider("#content-reviews"),
|
||||
)
|
||||
def generate_content(self):
|
||||
@@ -532,11 +590,6 @@ class ContentPipeline(Flow):
|
||||
print(f"Arquivado. Motivo: {result.feedback}")
|
||||
return {"status": "archived"}
|
||||
|
||||
@listen("needs_revision")
|
||||
def queue_revision(self, result):
|
||||
print(f"Na fila para revisão: {result.feedback}")
|
||||
return {"status": "revision_needed"}
|
||||
|
||||
|
||||
# Iniciando o flow (vai pausar e aguardar resposta do Slack)
|
||||
def start_content_pipeline():
|
||||
@@ -576,6 +629,64 @@ Se você está usando um framework web assíncrono (FastAPI, aiohttp, Slack Bolt
|
||||
5. **Persistência automática**: O estado é automaticamente salvo quando `HumanFeedbackPending` é lançado e usa `SQLiteFlowPersistence` por padrão
|
||||
6. **Persistência customizada**: Passe uma instância de persistência customizada para `from_pending()` se necessário
|
||||
|
||||
## Aprendendo com Feedback
|
||||
|
||||
O parâmetro `learn=True` habilita um ciclo de feedback entre revisores humanos e o sistema de memória. Quando habilitado, o sistema melhora progressivamente suas saídas aprendendo com correções humanas anteriores.
|
||||
|
||||
### Como Funciona
|
||||
|
||||
1. **Após o feedback**: O LLM extrai lições generalizáveis da saída + feedback e as armazena na memória com `source="hitl"`. Se o feedback for apenas aprovação (ex: "parece bom"), nada é armazenado.
|
||||
2. **Antes da próxima revisão**: Lições HITL passadas são recuperadas da memória e aplicadas pelo LLM para melhorar a saída antes que o humano a veja.
|
||||
|
||||
Com o tempo, o humano vê saídas pré-revisadas progressivamente melhores porque cada correção informa revisões futuras.
|
||||
|
||||
### Exemplo
|
||||
|
||||
```python Code
|
||||
class ArticleReviewFlow(Flow):
|
||||
@start()
|
||||
def generate_article(self):
|
||||
return self.crew.kickoff(inputs={"topic": "AI Safety"}).raw
|
||||
|
||||
@human_feedback(
|
||||
message="Revise este rascunho do artigo:",
|
||||
emit=["approved", "needs_revision"],
|
||||
llm="gpt-4o-mini",
|
||||
learn=True, # enable HITL learning
|
||||
)
|
||||
@listen(or_("generate_article", "needs_revision"))
|
||||
def review_article(self):
|
||||
return self.last_human_feedback.output if self.last_human_feedback else "article draft"
|
||||
|
||||
@listen("approved")
|
||||
def publish(self):
|
||||
print(f"Publishing: {self.last_human_feedback.output}")
|
||||
```
|
||||
|
||||
**Primeira execução**: O humano vê a saída bruta e diz "Sempre inclua citações para afirmações factuais." A lição é destilada e armazenada na memória.
|
||||
|
||||
**Segunda execução**: O sistema recupera a lição sobre citações, pré-revisa a saída para adicionar citações e então mostra a versão melhorada. O trabalho do humano muda de "corrigir tudo" para "identificar o que o sistema deixou passar."
|
||||
|
||||
### Configuração
|
||||
|
||||
| Parâmetro | Padrão | Descrição |
|
||||
|-----------|--------|-----------|
|
||||
| `learn` | `False` | Habilitar aprendizado HITL |
|
||||
| `learn_limit` | `5` | Máximo de lições passadas para recuperar na pré-revisão |
|
||||
|
||||
### Decisões de Design Principais
|
||||
|
||||
- **Mesmo LLM para tudo**: O parâmetro `llm` no decorador é compartilhado pelo mapeamento de outcome, destilação de lições e pré-revisão. Não é necessário configurar múltiplos modelos.
|
||||
- **Saída estruturada**: Tanto a destilação quanto a pré-revisão usam function calling com modelos Pydantic quando o LLM suporta, com fallback para parsing de texto caso contrário.
|
||||
- **Armazenamento não-bloqueante**: Lições são armazenadas via `remember_many()` que executa em uma thread em segundo plano -- o flow continua imediatamente.
|
||||
- **Degradação graciosa**: Se o LLM falhar durante a destilação, nada é armazenado. Se falhar durante a pré-revisão, a saída bruta é mostrada. Nenhuma falha bloqueia o flow.
|
||||
- **Sem escopo/categorias necessários**: Ao armazenar lições, apenas `source` é passado. O pipeline de codificação infere escopo, categorias e importância automaticamente.
|
||||
|
||||
<Note>
|
||||
`learn=True` requer que o Flow tenha memória disponível. Flows obtêm memória automaticamente por padrão, mas se você a desabilitou com `_skip_auto_memory`, o aprendizado HITL será silenciosamente ignorado.
|
||||
</Note>
|
||||
|
||||
|
||||
## Documentação Relacionada
|
||||
|
||||
- [Visão Geral de Flows](/pt-BR/concepts/flows) - Aprenda sobre CrewAI Flows
|
||||
@@ -583,3 +694,4 @@ Se você está usando um framework web assíncrono (FastAPI, aiohttp, Slack Bolt
|
||||
- [Persistência de Flows](/pt-BR/concepts/flows#persistence) - Persistindo estado de flows
|
||||
- [Roteamento com @router](/pt-BR/concepts/flows#router) - Mais sobre roteamento condicional
|
||||
- [Input Humano na Execução](/pt-BR/learn/human-input-on-execution) - Input humano no nível de task
|
||||
- [Memória](/pt-BR/concepts/memory) - O sistema unificado de memória usado pelo aprendizado HITL
|
||||
|
||||
@@ -16,9 +16,9 @@ dependencies = [
|
||||
"pdfplumber~=0.11.4",
|
||||
"regex~=2026.1.15",
|
||||
# Telemetry and Monitoring
|
||||
"opentelemetry-api>=1.34.0,<2",
|
||||
"opentelemetry-sdk>=1.34.0,<2",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.34.0,<2",
|
||||
"opentelemetry-api~=1.34.0",
|
||||
"opentelemetry-sdk~=1.34.0",
|
||||
"opentelemetry-exporter-otlp-proto-http~=1.34.0",
|
||||
# Data Handling
|
||||
"chromadb~=1.1.0",
|
||||
"tokenizers~=0.20.3",
|
||||
@@ -26,6 +26,8 @@ dependencies = [
|
||||
# Authentication and Security
|
||||
"python-dotenv~=1.1.1",
|
||||
"pyjwt>=2.9.0,<3",
|
||||
# TUI
|
||||
"textual>=7.5.0",
|
||||
# Configuration and Utils
|
||||
"click~=8.1.7",
|
||||
"appdirs~=1.4.4",
|
||||
@@ -36,9 +38,11 @@ dependencies = [
|
||||
"json5~=0.10.0",
|
||||
"portalocker~=2.7.0",
|
||||
"pydantic-settings~=2.10.1",
|
||||
"httpx~=0.28.1",
|
||||
"mcp~=1.26.0",
|
||||
"uv~=0.9.13",
|
||||
"aiosqlite~=0.21.0",
|
||||
"lancedb>=0.4.0",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
|
||||
@@ -10,6 +10,7 @@ from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.memory.unified_memory import Memory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.llm_guardrail import LLMGuardrail
|
||||
@@ -80,6 +81,7 @@ __all__ = [
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
"LLMGuardrail",
|
||||
"Memory",
|
||||
"Process",
|
||||
"Task",
|
||||
"TaskOutput",
|
||||
|
||||
@@ -71,7 +71,6 @@ from crewai.mcp import (
|
||||
from crewai.mcp.transports.http import HTTPTransport
|
||||
from crewai.mcp.transports.sse import SSETransport
|
||||
from crewai.mcp.transports.stdio import StdioTransport
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
from crewai.security.fingerprint import Fingerprint
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
@@ -311,19 +310,12 @@ class Agent(BaseAgent):
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {e!s}") from e
|
||||
|
||||
def _is_any_available_memory(self) -> bool:
|
||||
"""Check if any memory is available."""
|
||||
if not self.crew:
|
||||
return False
|
||||
|
||||
memory_attributes = [
|
||||
"memory",
|
||||
"_short_term_memory",
|
||||
"_long_term_memory",
|
||||
"_entity_memory",
|
||||
"_external_memory",
|
||||
]
|
||||
|
||||
return any(getattr(self.crew, attr) for attr in memory_attributes)
|
||||
"""Check if unified memory is available (agent or crew)."""
|
||||
if getattr(self, "memory", None):
|
||||
return True
|
||||
if self.crew and getattr(self.crew, "_memory", None):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _supports_native_tool_calling(self, tools: list[BaseTool]) -> bool:
|
||||
"""Check if the LLM supports native function calling with the given tools.
|
||||
@@ -387,15 +379,16 @@ class Agent(BaseAgent):
|
||||
memory = ""
|
||||
|
||||
try:
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._external_memory,
|
||||
agent=self,
|
||||
task=task,
|
||||
unified_memory = getattr(self, "memory", None) or (
|
||||
getattr(self.crew, "_memory", None) if self.crew else None
|
||||
)
|
||||
memory = contextual_memory.build_context_for_task(task, context or "")
|
||||
if unified_memory is not None:
|
||||
query = task.description
|
||||
matches = unified_memory.recall(query, limit=10)
|
||||
if matches:
|
||||
memory = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
@@ -624,17 +617,16 @@ class Agent(BaseAgent):
|
||||
memory = ""
|
||||
|
||||
try:
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._external_memory,
|
||||
agent=self,
|
||||
task=task,
|
||||
)
|
||||
memory = await contextual_memory.abuild_context_for_task(
|
||||
task, context or ""
|
||||
unified_memory = getattr(self, "memory", None) or (
|
||||
getattr(self.crew, "_memory", None) if self.crew else None
|
||||
)
|
||||
if unified_memory is not None:
|
||||
query = task.description
|
||||
matches = unified_memory.recall(query, limit=10)
|
||||
if matches:
|
||||
memory = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
@@ -1712,6 +1704,18 @@ class Agent(BaseAgent):
|
||||
|
||||
# Prepare tools
|
||||
raw_tools: list[BaseTool] = self.tools or []
|
||||
|
||||
# Inject memory tools for standalone kickoff (crew path handles its own)
|
||||
agent_memory = getattr(self, "memory", None)
|
||||
if agent_memory is not None:
|
||||
from crewai.tools.memory_tools import create_memory_tools
|
||||
|
||||
existing_names = {sanitize_tool_name(t.name) for t in raw_tools}
|
||||
raw_tools.extend(
|
||||
mt for mt in create_memory_tools(agent_memory)
|
||||
if sanitize_tool_name(mt.name) not in existing_names
|
||||
)
|
||||
|
||||
parsed_tools = parse_tools(raw_tools)
|
||||
|
||||
# Build agent_info for backward-compatible event emission
|
||||
@@ -1786,6 +1790,49 @@ class Agent(BaseAgent):
|
||||
if input_files:
|
||||
all_files.update(input_files)
|
||||
|
||||
# Inject memory context for standalone kickoff (recall before execution)
|
||||
if agent_memory is not None:
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalStartedEvent(
|
||||
task_id=None,
|
||||
source_type="agent_kickoff",
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
start_time = time.time()
|
||||
matches = agent_memory.recall(formatted_messages, limit=10)
|
||||
memory_block = ""
|
||||
if matches:
|
||||
memory_block = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
)
|
||||
if memory_block:
|
||||
formatted_messages += "\n\n" + self.i18n.slice("memory").format(
|
||||
memory=memory_block
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalCompletedEvent(
|
||||
task_id=None,
|
||||
memory_content=memory_block,
|
||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="agent_kickoff",
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalFailedEvent(
|
||||
task_id=None,
|
||||
source_type="agent_kickoff",
|
||||
from_agent=self,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
|
||||
# Build the input dict for the executor
|
||||
inputs: dict[str, Any] = {
|
||||
"input": formatted_messages,
|
||||
@@ -1856,6 +1903,9 @@ class Agent(BaseAgent):
|
||||
response_format=response_format,
|
||||
)
|
||||
|
||||
# Save to memory after execution (passive save)
|
||||
self._save_kickoff_to_memory(messages, output.raw)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionCompletedEvent(
|
||||
@@ -1876,6 +1926,31 @@ class Agent(BaseAgent):
|
||||
)
|
||||
raise
|
||||
|
||||
def _save_kickoff_to_memory(
|
||||
self, messages: str | list[LLMMessage], output_text: str
|
||||
) -> None:
|
||||
"""Save kickoff result to memory. No-op if agent has no memory."""
|
||||
agent_memory = getattr(self, "memory", None)
|
||||
if agent_memory is None:
|
||||
return
|
||||
try:
|
||||
if isinstance(messages, str):
|
||||
input_str = messages
|
||||
else:
|
||||
input_str = "\n".join(
|
||||
str(msg.get("content", "")) for msg in messages if msg.get("content")
|
||||
) or "User request"
|
||||
raw = (
|
||||
f"Input: {input_str}\n"
|
||||
f"Agent: {self.role}\n"
|
||||
f"Result: {output_text}"
|
||||
)
|
||||
extracted = agent_memory.extract_memories(raw)
|
||||
if extracted:
|
||||
agent_memory.remember_many(extracted)
|
||||
except Exception as e:
|
||||
self._logger.log("error", f"Failed to save kickoff result to memory: {e}")
|
||||
|
||||
def _execute_and_build_output(
|
||||
self,
|
||||
executor: AgentExecutor,
|
||||
@@ -2158,6 +2233,9 @@ class Agent(BaseAgent):
|
||||
response_format=response_format,
|
||||
)
|
||||
|
||||
# Save to memory after async execution (passive save)
|
||||
self._save_kickoff_to_memory(messages, output.raw)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LiteAgentExecutionCompletedEvent(
|
||||
|
||||
@@ -199,6 +199,14 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
default=None,
|
||||
description="List of MCP server references. Supports 'https://server.com/path' for external servers and 'crewai-amp:mcp-name' for AMP marketplace. Use '#tool_name' suffix for specific tools.",
|
||||
)
|
||||
memory: Any = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"Enable agent memory. Pass True for default Memory(), "
|
||||
"or a Memory/MemoryScope/MemorySlice instance for custom configuration. "
|
||||
"If not set, falls back to crew memory."
|
||||
),
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -329,6 +337,17 @@ class BaseAgent(BaseModel, ABC, metaclass=AgentMeta):
|
||||
self._token_process = TokenProcess()
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def resolve_memory(self) -> Self:
|
||||
"""Resolve memory field: True creates a default Memory(), instance is used as-is."""
|
||||
if self.memory is True:
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
self.memory = Memory()
|
||||
elif self.memory is False:
|
||||
self.memory = None
|
||||
return self
|
||||
|
||||
@property
|
||||
def key(self) -> str:
|
||||
source = [
|
||||
|
||||
@@ -1,13 +1,8 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.agents.parser import AgentFinish
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.string_utils import sanitize_tool_name
|
||||
|
||||
@@ -30,110 +25,29 @@ class CrewAgentExecutorMixin:
|
||||
_i18n: I18N
|
||||
_printer: Printer = Printer()
|
||||
|
||||
def _create_short_term_memory(self, output: AgentFinish) -> None:
|
||||
"""Create and save a short-term memory item if conditions are met."""
|
||||
def _save_to_memory(self, output: AgentFinish) -> None:
|
||||
"""Save task result to unified memory (memory or crew._memory)."""
|
||||
memory = getattr(self.agent, "memory", None) or (
|
||||
getattr(self.crew, "_memory", None) if self.crew else None
|
||||
)
|
||||
if memory is None or not self.task:
|
||||
return
|
||||
if (
|
||||
self.crew
|
||||
and self.agent
|
||||
and self.task
|
||||
and f"Action: {sanitize_tool_name('Delegate work to coworker')}"
|
||||
not in output.text
|
||||
f"Action: {sanitize_tool_name('Delegate work to coworker')}"
|
||||
in output.text
|
||||
):
|
||||
try:
|
||||
if (
|
||||
hasattr(self.crew, "_short_term_memory")
|
||||
and self.crew._short_term_memory
|
||||
):
|
||||
self.crew._short_term_memory.save(
|
||||
value=output.text,
|
||||
metadata={
|
||||
"observation": self.task.description,
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Failed to add to short term memory: {e}"
|
||||
)
|
||||
|
||||
def _create_external_memory(self, output: AgentFinish) -> None:
|
||||
"""Create and save a external-term memory item if conditions are met."""
|
||||
if (
|
||||
self.crew
|
||||
and self.agent
|
||||
and self.task
|
||||
and hasattr(self.crew, "_external_memory")
|
||||
and self.crew._external_memory
|
||||
):
|
||||
try:
|
||||
self.crew._external_memory.save(
|
||||
value=output.text,
|
||||
metadata={
|
||||
"description": self.task.description,
|
||||
"messages": self.messages,
|
||||
},
|
||||
)
|
||||
except Exception as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Failed to add to external memory: {e}"
|
||||
)
|
||||
|
||||
def _create_long_term_memory(self, output: AgentFinish) -> None:
|
||||
"""Create and save long-term and entity memory items based on evaluation."""
|
||||
if (
|
||||
self.crew
|
||||
and self.crew._long_term_memory
|
||||
and self.crew._entity_memory
|
||||
and self.task
|
||||
and self.agent
|
||||
):
|
||||
try:
|
||||
ltm_agent = TaskEvaluator(self.agent)
|
||||
evaluation = ltm_agent.evaluate(self.task, output.text)
|
||||
|
||||
if isinstance(evaluation, ConverterError):
|
||||
return
|
||||
|
||||
long_term_memory = LongTermMemoryItem(
|
||||
task=self.task.description,
|
||||
agent=self.agent.role,
|
||||
quality=evaluation.quality,
|
||||
datetime=str(time.time()),
|
||||
expected_output=self.task.expected_output,
|
||||
metadata={
|
||||
"suggestions": evaluation.suggestions,
|
||||
"quality": evaluation.quality,
|
||||
},
|
||||
)
|
||||
self.crew._long_term_memory.save(long_term_memory)
|
||||
|
||||
entity_memories = [
|
||||
EntityMemoryItem(
|
||||
name=entity.name,
|
||||
type=entity.type,
|
||||
description=entity.description,
|
||||
relationships="\n".join(
|
||||
[f"- {r}" for r in entity.relationships]
|
||||
),
|
||||
)
|
||||
for entity in evaluation.entities
|
||||
]
|
||||
if entity_memories:
|
||||
self.crew._entity_memory.save(entity_memories)
|
||||
except AttributeError as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Missing attributes for long term memory: {e}"
|
||||
)
|
||||
except Exception as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Failed to add to long term memory: {e}"
|
||||
)
|
||||
elif (
|
||||
self.crew
|
||||
and self.crew._long_term_memory
|
||||
and self.crew._entity_memory is None
|
||||
):
|
||||
if self.agent and self.agent.verbose:
|
||||
self._printer.print(
|
||||
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
|
||||
color="bold_yellow",
|
||||
)
|
||||
return
|
||||
try:
|
||||
raw = (
|
||||
f"Task: {self.task.description}\n"
|
||||
f"Agent: {self.agent.role}\n"
|
||||
f"Expected result: {self.task.expected_output}\n"
|
||||
f"Result: {output.text}"
|
||||
)
|
||||
extracted = memory.extract_memories(raw)
|
||||
if extracted:
|
||||
memory.remember_many(extracted, agent_role=self.agent.role)
|
||||
except Exception as e:
|
||||
self.agent._logger.log(
|
||||
"error", f"Failed to save to memory: {e}"
|
||||
)
|
||||
|
||||
@@ -6,7 +6,10 @@ and memory management.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
import inspect
|
||||
import logging
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast
|
||||
|
||||
@@ -47,6 +50,7 @@ from crewai.utilities.agent_utils import (
|
||||
handle_unknown_error,
|
||||
has_reached_max_iterations,
|
||||
is_context_length_exceeded,
|
||||
parse_tool_call_args,
|
||||
process_llm_response,
|
||||
track_delegation_if_needed,
|
||||
)
|
||||
@@ -234,9 +238,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if self.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
self._save_to_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _inject_multimodal_files(self, inputs: dict[str, Any] | None = None) -> None:
|
||||
@@ -687,30 +689,142 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
Returns:
|
||||
AgentFinish if tool has result_as_answer=True, None otherwise.
|
||||
"""
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
from crewai.events import crewai_event_bus
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
|
||||
if not tool_calls:
|
||||
return None
|
||||
|
||||
# Only process the FIRST tool call for sequential execution with reflection
|
||||
tool_call = tool_calls[0]
|
||||
parsed_calls = [
|
||||
parsed
|
||||
for tool_call in tool_calls
|
||||
if (parsed := self._parse_native_tool_call(tool_call)) is not None
|
||||
]
|
||||
if not parsed_calls:
|
||||
return None
|
||||
|
||||
# Extract tool call info - handle OpenAI-style, Anthropic-style, and Gemini-style
|
||||
original_tools_by_name: dict[str, Any] = {}
|
||||
for tool in self.original_tools or []:
|
||||
original_tools_by_name[sanitize_tool_name(tool.name)] = tool
|
||||
|
||||
if len(parsed_calls) > 1:
|
||||
has_result_as_answer_in_batch = any(
|
||||
bool(
|
||||
original_tools_by_name.get(func_name)
|
||||
and getattr(
|
||||
original_tools_by_name.get(func_name), "result_as_answer", False
|
||||
)
|
||||
)
|
||||
for _, func_name, _ in parsed_calls
|
||||
)
|
||||
has_max_usage_count_in_batch = any(
|
||||
bool(
|
||||
original_tools_by_name.get(func_name)
|
||||
and getattr(
|
||||
original_tools_by_name.get(func_name),
|
||||
"max_usage_count",
|
||||
None,
|
||||
)
|
||||
is not None
|
||||
)
|
||||
for _, func_name, _ in parsed_calls
|
||||
)
|
||||
|
||||
# Preserve historical sequential behavior for result_as_answer batches.
|
||||
# Also avoid threading around usage counters for max_usage_count tools.
|
||||
if has_result_as_answer_in_batch or has_max_usage_count_in_batch:
|
||||
logger.debug(
|
||||
"Skipping parallel native execution because batch includes result_as_answer or max_usage_count tool"
|
||||
)
|
||||
else:
|
||||
execution_plan: list[
|
||||
tuple[str, str, str | dict[str, Any], Any | None]
|
||||
] = []
|
||||
for call_id, func_name, func_args in parsed_calls:
|
||||
original_tool = original_tools_by_name.get(func_name)
|
||||
execution_plan.append(
|
||||
(call_id, func_name, func_args, original_tool)
|
||||
)
|
||||
|
||||
self._append_assistant_tool_calls_message(
|
||||
[
|
||||
(call_id, func_name, func_args)
|
||||
for call_id, func_name, func_args, _ in execution_plan
|
||||
]
|
||||
)
|
||||
|
||||
max_workers = min(8, len(execution_plan))
|
||||
ordered_results: list[dict[str, Any] | None] = [None] * len(
|
||||
execution_plan
|
||||
)
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as pool:
|
||||
futures = {
|
||||
pool.submit(
|
||||
self._execute_single_native_tool_call,
|
||||
call_id=call_id,
|
||||
func_name=func_name,
|
||||
func_args=func_args,
|
||||
available_functions=available_functions,
|
||||
original_tool=original_tool,
|
||||
should_execute=True,
|
||||
): idx
|
||||
for idx, (
|
||||
call_id,
|
||||
func_name,
|
||||
func_args,
|
||||
original_tool,
|
||||
) in enumerate(execution_plan)
|
||||
}
|
||||
for future in as_completed(futures):
|
||||
idx = futures[future]
|
||||
ordered_results[idx] = future.result()
|
||||
|
||||
for execution_result in ordered_results:
|
||||
if not execution_result:
|
||||
continue
|
||||
tool_finish = self._append_tool_result_and_check_finality(
|
||||
execution_result
|
||||
)
|
||||
if tool_finish:
|
||||
return tool_finish
|
||||
|
||||
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
|
||||
reasoning_message: LLMMessage = {
|
||||
"role": "user",
|
||||
"content": reasoning_prompt,
|
||||
}
|
||||
self.messages.append(reasoning_message)
|
||||
return None
|
||||
|
||||
# Sequential behavior: process only first tool call, then force reflection.
|
||||
call_id, func_name, func_args = parsed_calls[0]
|
||||
self._append_assistant_tool_calls_message([(call_id, func_name, func_args)])
|
||||
|
||||
execution_result = self._execute_single_native_tool_call(
|
||||
call_id=call_id,
|
||||
func_name=func_name,
|
||||
func_args=func_args,
|
||||
available_functions=available_functions,
|
||||
original_tool=original_tools_by_name.get(func_name),
|
||||
should_execute=True,
|
||||
)
|
||||
tool_finish = self._append_tool_result_and_check_finality(execution_result)
|
||||
if tool_finish:
|
||||
return tool_finish
|
||||
|
||||
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
|
||||
reasoning_message = {
|
||||
"role": "user",
|
||||
"content": reasoning_prompt,
|
||||
}
|
||||
self.messages.append(reasoning_message)
|
||||
return None
|
||||
|
||||
def _parse_native_tool_call(
|
||||
self, tool_call: Any
|
||||
) -> tuple[str, str, str | dict[str, Any]] | None:
|
||||
if hasattr(tool_call, "function"):
|
||||
# OpenAI-style: has .function.name and .function.arguments
|
||||
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
|
||||
func_name = sanitize_tool_name(tool_call.function.name)
|
||||
func_args = tool_call.function.arguments
|
||||
elif hasattr(tool_call, "function_call") and tool_call.function_call:
|
||||
# Gemini-style: has .function_call.name and .function_call.args
|
||||
return call_id, func_name, tool_call.function.arguments
|
||||
if hasattr(tool_call, "function_call") and tool_call.function_call:
|
||||
call_id = f"call_{id(tool_call)}"
|
||||
func_name = sanitize_tool_name(tool_call.function_call.name)
|
||||
func_args = (
|
||||
@@ -718,13 +832,12 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if tool_call.function_call.args
|
||||
else {}
|
||||
)
|
||||
elif hasattr(tool_call, "name") and hasattr(tool_call, "input"):
|
||||
# Anthropic format: has .name and .input (ToolUseBlock)
|
||||
return call_id, func_name, func_args
|
||||
if hasattr(tool_call, "name") and hasattr(tool_call, "input"):
|
||||
call_id = getattr(tool_call, "id", f"call_{id(tool_call)}")
|
||||
func_name = sanitize_tool_name(tool_call.name)
|
||||
func_args = tool_call.input # Already a dict in Anthropic
|
||||
elif isinstance(tool_call, dict):
|
||||
# Support OpenAI "id", Bedrock "toolUseId", or generate one
|
||||
return call_id, func_name, tool_call.input
|
||||
if isinstance(tool_call, dict):
|
||||
call_id = (
|
||||
tool_call.get("id")
|
||||
or tool_call.get("toolUseId")
|
||||
@@ -735,10 +848,15 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
func_info.get("name", "") or tool_call.get("name", "")
|
||||
)
|
||||
func_args = func_info.get("arguments", "{}") or tool_call.get("input", {})
|
||||
else:
|
||||
return None
|
||||
return call_id, func_name, func_args
|
||||
return None
|
||||
|
||||
def _append_assistant_tool_calls_message(
|
||||
self,
|
||||
parsed_calls: list[tuple[str, str, str | dict[str, Any]]],
|
||||
) -> None:
|
||||
import json
|
||||
|
||||
# Append assistant message with single tool call
|
||||
assistant_message: LLMMessage = {
|
||||
"role": "assistant",
|
||||
"content": None,
|
||||
@@ -753,42 +871,54 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
else json.dumps(func_args),
|
||||
},
|
||||
}
|
||||
for call_id, func_name, func_args in parsed_calls
|
||||
],
|
||||
}
|
||||
|
||||
self.messages.append(assistant_message)
|
||||
|
||||
# Parse arguments for the single tool call
|
||||
if isinstance(func_args, str):
|
||||
try:
|
||||
args_dict = json.loads(func_args)
|
||||
except json.JSONDecodeError:
|
||||
args_dict = {}
|
||||
else:
|
||||
args_dict = func_args
|
||||
def _execute_single_native_tool_call(
|
||||
self,
|
||||
*,
|
||||
call_id: str,
|
||||
func_name: str,
|
||||
func_args: str | dict[str, Any],
|
||||
available_functions: dict[str, Callable[..., Any]],
|
||||
original_tool: Any | None = None,
|
||||
should_execute: bool = True,
|
||||
) -> dict[str, Any]:
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown"
|
||||
from crewai.events.types.tool_usage_events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageFinishedEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
|
||||
# Find original tool by matching sanitized name (needed for cache_function and result_as_answer)
|
||||
args_dict, parse_error = parse_tool_call_args(func_args, func_name, call_id, original_tool)
|
||||
if parse_error is not None:
|
||||
return parse_error
|
||||
|
||||
original_tool = None
|
||||
for tool in self.original_tools or []:
|
||||
if sanitize_tool_name(tool.name) == func_name:
|
||||
original_tool = tool
|
||||
break
|
||||
if original_tool is None:
|
||||
for tool in self.original_tools or []:
|
||||
if sanitize_tool_name(tool.name) == func_name:
|
||||
original_tool = tool
|
||||
break
|
||||
|
||||
# Check if tool has reached max usage count
|
||||
max_usage_reached = False
|
||||
if original_tool:
|
||||
if (
|
||||
hasattr(original_tool, "max_usage_count")
|
||||
and original_tool.max_usage_count is not None
|
||||
and original_tool.current_usage_count >= original_tool.max_usage_count
|
||||
):
|
||||
max_usage_reached = True
|
||||
if not should_execute and original_tool:
|
||||
max_usage_reached = True
|
||||
elif (
|
||||
should_execute
|
||||
and original_tool
|
||||
and (max_count := getattr(original_tool, "max_usage_count", None))
|
||||
is not None
|
||||
and getattr(original_tool, "current_usage_count", 0) >= max_count
|
||||
):
|
||||
max_usage_reached = True
|
||||
|
||||
# Check cache before executing
|
||||
from_cache = False
|
||||
result: str = "Tool not found"
|
||||
input_str = json.dumps(args_dict) if args_dict else ""
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
cached_result = self.tools_handler.cache.read(
|
||||
@@ -802,7 +932,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
from_cache = True
|
||||
|
||||
# Emit tool usage started event
|
||||
agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown"
|
||||
started_at = datetime.now()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
@@ -818,14 +948,12 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
# Find the structured tool for hook context
|
||||
structured_tool: CrewStructuredTool | None = None
|
||||
for structured in self.tools or []:
|
||||
if sanitize_tool_name(structured.name) == func_name:
|
||||
structured_tool = structured
|
||||
break
|
||||
|
||||
# Execute before_tool_call hooks
|
||||
hook_blocked = False
|
||||
before_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
@@ -849,58 +977,48 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
color="red",
|
||||
)
|
||||
|
||||
# If hook blocked execution, set result and skip tool execution
|
||||
if hook_blocked:
|
||||
result = f"Tool execution blocked by hook. Tool: {func_name}"
|
||||
# Execute the tool (only if not cached, not at max usage, and not blocked by hook)
|
||||
elif not from_cache and not max_usage_reached:
|
||||
result = "Tool not found"
|
||||
if func_name in available_functions:
|
||||
try:
|
||||
tool_func = available_functions[func_name]
|
||||
raw_result = tool_func(**args_dict)
|
||||
|
||||
# Add to cache after successful execution (before string conversion)
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
should_cache = True
|
||||
if (
|
||||
original_tool
|
||||
and hasattr(original_tool, "cache_function")
|
||||
and callable(original_tool.cache_function)
|
||||
):
|
||||
should_cache = original_tool.cache_function(
|
||||
args_dict, raw_result
|
||||
)
|
||||
if should_cache:
|
||||
self.tools_handler.cache.add(
|
||||
tool=func_name, input=input_str, output=raw_result
|
||||
)
|
||||
|
||||
# Convert to string for message
|
||||
result = (
|
||||
str(raw_result)
|
||||
if not isinstance(raw_result, str)
|
||||
else raw_result
|
||||
)
|
||||
except Exception as e:
|
||||
result = f"Error executing tool: {e}"
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageErrorEvent(
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
error_event_emitted = True
|
||||
elif max_usage_reached and original_tool:
|
||||
# Return error message when max usage limit is reached
|
||||
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
|
||||
elif not from_cache and func_name in available_functions:
|
||||
try:
|
||||
raw_result = available_functions[func_name](**args_dict)
|
||||
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
should_cache = True
|
||||
if (
|
||||
original_tool
|
||||
and hasattr(original_tool, "cache_function")
|
||||
and callable(original_tool.cache_function)
|
||||
):
|
||||
should_cache = original_tool.cache_function(
|
||||
args_dict, raw_result
|
||||
)
|
||||
if should_cache:
|
||||
self.tools_handler.cache.add(
|
||||
tool=func_name, input=input_str, output=raw_result
|
||||
)
|
||||
|
||||
result = (
|
||||
str(raw_result) if not isinstance(raw_result, str) else raw_result
|
||||
)
|
||||
except Exception as e:
|
||||
result = f"Error executing tool: {e}"
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageErrorEvent(
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
error_event_emitted = True
|
||||
|
||||
after_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
@@ -940,7 +1058,23 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
),
|
||||
)
|
||||
|
||||
# Append tool result message
|
||||
return {
|
||||
"call_id": call_id,
|
||||
"func_name": func_name,
|
||||
"result": result,
|
||||
"from_cache": from_cache,
|
||||
"original_tool": original_tool,
|
||||
}
|
||||
|
||||
def _append_tool_result_and_check_finality(
|
||||
self, execution_result: dict[str, Any]
|
||||
) -> AgentFinish | None:
|
||||
call_id = cast(str, execution_result["call_id"])
|
||||
func_name = cast(str, execution_result["func_name"])
|
||||
result = cast(str, execution_result["result"])
|
||||
from_cache = cast(bool, execution_result["from_cache"])
|
||||
original_tool = execution_result["original_tool"]
|
||||
|
||||
tool_message: LLMMessage = {
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
@@ -949,7 +1083,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
}
|
||||
self.messages.append(tool_message)
|
||||
|
||||
# Log the tool execution
|
||||
if self.agent and self.agent.verbose:
|
||||
cache_info = " (from cache)" if from_cache else ""
|
||||
self._printer.print(
|
||||
@@ -962,20 +1095,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
and hasattr(original_tool, "result_as_answer")
|
||||
and original_tool.result_as_answer
|
||||
):
|
||||
# Return immediately with tool result as final answer
|
||||
return AgentFinish(
|
||||
thought="Tool result is the final answer",
|
||||
output=result,
|
||||
text=result,
|
||||
)
|
||||
|
||||
# Inject post-tool reasoning prompt to enforce analysis
|
||||
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
|
||||
reasoning_message: LLMMessage = {
|
||||
"role": "user",
|
||||
"content": reasoning_prompt,
|
||||
}
|
||||
self.messages.append(reasoning_message)
|
||||
return None
|
||||
|
||||
async def ainvoke(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
||||
@@ -1011,9 +1135,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if self.ask_for_human_input:
|
||||
formatted_answer = await self._ahandle_human_feedback(formatted_answer)
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
self._save_to_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
async def _ainvoke_loop(self) -> AgentFinish:
|
||||
@@ -1375,7 +1497,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
formatted_answer: Current agent response.
|
||||
"""
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
cb_result = self.step_callback(formatted_answer)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
def _append_message(
|
||||
self, text: str, role: Literal["user", "assistant", "system"] = "assistant"
|
||||
|
||||
@@ -2,8 +2,8 @@ import time
|
||||
from typing import TYPE_CHECKING, Any, TypeVar, cast
|
||||
import webbrowser
|
||||
|
||||
import httpx
|
||||
from pydantic import BaseModel, Field
|
||||
import requests
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.utils import validate_jwt_token
|
||||
@@ -98,7 +98,7 @@ class AuthenticationCommand:
|
||||
"scope": " ".join(self.oauth2_provider.get_oauth_scopes()),
|
||||
"audience": self.oauth2_provider.get_audience(),
|
||||
}
|
||||
response = requests.post(
|
||||
response = httpx.post(
|
||||
url=self.oauth2_provider.get_authorize_url(),
|
||||
data=device_code_payload,
|
||||
timeout=20,
|
||||
@@ -130,7 +130,7 @@ class AuthenticationCommand:
|
||||
|
||||
attempts = 0
|
||||
while True and attempts < 10:
|
||||
response = requests.post(
|
||||
response = httpx.post(
|
||||
self.oauth2_provider.get_token_url(), data=token_payload, timeout=30
|
||||
)
|
||||
token_data = response.json()
|
||||
@@ -149,7 +149,7 @@ class AuthenticationCommand:
|
||||
return
|
||||
|
||||
if token_data["error"] not in ("authorization_pending", "slow_down"):
|
||||
raise requests.HTTPError(
|
||||
raise httpx.HTTPError(
|
||||
token_data.get("error_description") or token_data.get("error")
|
||||
)
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from importlib.metadata import version as get_version
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Any
|
||||
|
||||
import click
|
||||
|
||||
@@ -179,9 +180,19 @@ def log_tasks_outputs() -> None:
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.option("-l", "--long", is_flag=True, help="Reset LONG TERM memory")
|
||||
@click.option("-s", "--short", is_flag=True, help="Reset SHORT TERM memory")
|
||||
@click.option("-e", "--entities", is_flag=True, help="Reset ENTITIES memory")
|
||||
@click.option("-m", "--memory", is_flag=True, help="Reset MEMORY")
|
||||
@click.option(
|
||||
"-l", "--long", is_flag=True, hidden=True,
|
||||
help="[Deprecated: use --memory] Reset memory",
|
||||
)
|
||||
@click.option(
|
||||
"-s", "--short", is_flag=True, hidden=True,
|
||||
help="[Deprecated: use --memory] Reset memory",
|
||||
)
|
||||
@click.option(
|
||||
"-e", "--entities", is_flag=True, hidden=True,
|
||||
help="[Deprecated: use --memory] Reset memory",
|
||||
)
|
||||
@click.option("-kn", "--knowledge", is_flag=True, help="Reset KNOWLEDGE storage")
|
||||
@click.option(
|
||||
"-akn", "--agent-knowledge", is_flag=True, help="Reset AGENT KNOWLEDGE storage"
|
||||
@@ -191,6 +202,7 @@ def log_tasks_outputs() -> None:
|
||||
)
|
||||
@click.option("-a", "--all", is_flag=True, help="Reset ALL memories")
|
||||
def reset_memories(
|
||||
memory: bool,
|
||||
long: bool,
|
||||
short: bool,
|
||||
entities: bool,
|
||||
@@ -200,13 +212,22 @@ def reset_memories(
|
||||
all: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_ouputs, knowledge, agent_knowledge). This will delete all the data saved.
|
||||
Reset the crew memories (memory, knowledge, agent_knowledge, kickoff_outputs). This will delete all the data saved.
|
||||
"""
|
||||
try:
|
||||
# Treat legacy flags as --memory with a deprecation warning
|
||||
if long or short or entities:
|
||||
legacy_used = [
|
||||
f for f, v in [("--long", long), ("--short", short), ("--entities", entities)] if v
|
||||
]
|
||||
click.echo(
|
||||
f"Warning: {', '.join(legacy_used)} {'is' if len(legacy_used) == 1 else 'are'} "
|
||||
"deprecated. Use --memory (-m) instead. All memory is now unified."
|
||||
)
|
||||
memory = True
|
||||
|
||||
memory_types = [
|
||||
long,
|
||||
short,
|
||||
entities,
|
||||
memory,
|
||||
knowledge,
|
||||
agent_knowledge,
|
||||
kickoff_outputs,
|
||||
@@ -218,12 +239,73 @@ def reset_memories(
|
||||
)
|
||||
return
|
||||
reset_memories_command(
|
||||
long, short, entities, knowledge, agent_knowledge, kickoff_outputs, all
|
||||
memory, knowledge, agent_knowledge, kickoff_outputs, all
|
||||
)
|
||||
except Exception as e:
|
||||
click.echo(f"An error occurred while resetting memories: {e}", err=True)
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.option(
|
||||
"--storage-path",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Path to LanceDB memory directory. If omitted, uses ./.crewai/memory.",
|
||||
)
|
||||
@click.option(
|
||||
"--embedder-provider",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Embedder provider for recall queries (e.g. openai, google-vertex, cohere, ollama).",
|
||||
)
|
||||
@click.option(
|
||||
"--embedder-model",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Embedder model name (e.g. text-embedding-3-small, gemini-embedding-001).",
|
||||
)
|
||||
@click.option(
|
||||
"--embedder-config",
|
||||
type=str,
|
||||
default=None,
|
||||
help='Full embedder config as JSON (e.g. \'{"provider": "cohere", "config": {"model_name": "embed-v4.0"}}\').',
|
||||
)
|
||||
def memory(
|
||||
storage_path: str | None,
|
||||
embedder_provider: str | None,
|
||||
embedder_model: str | None,
|
||||
embedder_config: str | None,
|
||||
) -> None:
|
||||
"""Open the Memory TUI to browse scopes and recall memories."""
|
||||
try:
|
||||
from crewai.cli.memory_tui import MemoryTUI
|
||||
except ImportError as exc:
|
||||
click.echo(
|
||||
"Textual is required for the memory TUI but could not be imported. "
|
||||
"Try reinstalling crewai or: pip install textual"
|
||||
)
|
||||
raise SystemExit(1) from exc
|
||||
|
||||
# Build embedder spec from CLI flags.
|
||||
embedder_spec: dict[str, Any] | None = None
|
||||
if embedder_config:
|
||||
import json as _json
|
||||
|
||||
try:
|
||||
embedder_spec = _json.loads(embedder_config)
|
||||
except _json.JSONDecodeError as exc:
|
||||
click.echo(f"Invalid --embedder-config JSON: {exc}")
|
||||
raise SystemExit(1) from exc
|
||||
elif embedder_provider:
|
||||
cfg: dict[str, str] = {}
|
||||
if embedder_model:
|
||||
cfg["model_name"] = embedder_model
|
||||
embedder_spec = {"provider": embedder_provider, "config": cfg}
|
||||
|
||||
app = MemoryTUI(storage_path=storage_path, embedder_config=embedder_spec)
|
||||
app.run()
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.option(
|
||||
"-n",
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import requests
|
||||
from requests.exceptions import JSONDecodeError
|
||||
import json
|
||||
|
||||
import httpx
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
@@ -30,16 +31,16 @@ class PlusAPIMixin:
|
||||
console.print("Run 'crewai login' to sign up/login.", style="bold green")
|
||||
raise SystemExit from None
|
||||
|
||||
def _validate_response(self, response: requests.Response) -> None:
|
||||
def _validate_response(self, response: httpx.Response) -> None:
|
||||
"""
|
||||
Handle and display error messages from API responses.
|
||||
|
||||
Args:
|
||||
response (requests.Response): The response from the Plus API
|
||||
response (httpx.Response): The response from the Plus API
|
||||
"""
|
||||
try:
|
||||
json_response = response.json()
|
||||
except (JSONDecodeError, ValueError):
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
console.print(
|
||||
"Failed to parse response from Enterprise API failed. Details:",
|
||||
style="bold red",
|
||||
@@ -62,7 +63,7 @@ class PlusAPIMixin:
|
||||
)
|
||||
raise SystemExit
|
||||
|
||||
if not response.ok:
|
||||
if not response.is_success:
|
||||
console.print(
|
||||
"Request to Enterprise API failed. Details:", style="bold red"
|
||||
)
|
||||
|
||||
@@ -69,7 +69,7 @@ ENV_VARS: dict[str, list[dict[str, Any]]] = {
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AWS Region Name (press Enter to skip)",
|
||||
"key_name": "AWS_REGION_NAME",
|
||||
"key_name": "AWS_DEFAULT_REGION",
|
||||
},
|
||||
],
|
||||
"azure": [
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
from typing import Any, cast
|
||||
|
||||
import requests
|
||||
from requests.exceptions import JSONDecodeError, RequestException
|
||||
import httpx
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.main import Oauth2Settings, ProviderFactory
|
||||
@@ -47,12 +47,12 @@ class EnterpriseConfigureCommand(BaseCommand):
|
||||
"User-Agent": f"CrewAI-CLI/{get_crewai_version()}",
|
||||
"X-Crewai-Version": get_crewai_version(),
|
||||
}
|
||||
response = requests.get(oauth_endpoint, timeout=30, headers=headers)
|
||||
response = httpx.get(oauth_endpoint, timeout=30, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
try:
|
||||
oauth_config = response.json()
|
||||
except JSONDecodeError as e:
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid JSON response from {oauth_endpoint}") from e
|
||||
|
||||
self._validate_oauth_config(oauth_config)
|
||||
@@ -62,7 +62,7 @@ class EnterpriseConfigureCommand(BaseCommand):
|
||||
)
|
||||
return cast(dict[str, Any], oauth_config)
|
||||
|
||||
except RequestException as e:
|
||||
except httpx.HTTPError as e:
|
||||
raise ValueError(f"Failed to connect to enterprise URL: {e!s}") from e
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error fetching OAuth2 configuration: {e!s}") from e
|
||||
|
||||
398
lib/crewai/src/crewai/cli/memory_tui.py
Normal file
398
lib/crewai/src/crewai/cli/memory_tui.py
Normal file
@@ -0,0 +1,398 @@
|
||||
"""Textual TUI for browsing and recalling unified memory."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import Any
|
||||
|
||||
from textual.app import App, ComposeResult
|
||||
from textual.containers import Horizontal, Vertical
|
||||
from textual.widgets import Footer, Header, Input, OptionList, Static, Tree
|
||||
|
||||
|
||||
# -- CrewAI brand palette --
|
||||
_PRIMARY = "#eb6658" # coral
|
||||
_SECONDARY = "#1F7982" # teal
|
||||
_TERTIARY = "#ffffff" # white
|
||||
|
||||
|
||||
def _format_scope_info(info: Any) -> str:
|
||||
"""Format ScopeInfo with Rich markup."""
|
||||
return (
|
||||
f"[bold {_PRIMARY}]{info.path}[/]\n\n"
|
||||
f"[dim]Records:[/] [bold]{info.record_count}[/]\n"
|
||||
f"[dim]Categories:[/] {', '.join(info.categories) or 'none'}\n"
|
||||
f"[dim]Oldest:[/] {info.oldest_record or '-'}\n"
|
||||
f"[dim]Newest:[/] {info.newest_record or '-'}\n"
|
||||
f"[dim]Children:[/] {', '.join(info.child_scopes) or 'none'}"
|
||||
)
|
||||
|
||||
|
||||
class MemoryTUI(App[None]):
|
||||
"""TUI to browse memory scopes and run recall queries."""
|
||||
|
||||
TITLE = "CrewAI Memory"
|
||||
SUB_TITLE = "Browse scopes and recall memories"
|
||||
|
||||
CSS = f"""
|
||||
Header {{
|
||||
background: {_PRIMARY};
|
||||
color: {_TERTIARY};
|
||||
}}
|
||||
Footer {{
|
||||
background: {_SECONDARY};
|
||||
color: {_TERTIARY};
|
||||
}}
|
||||
Footer > .footer-key--key {{
|
||||
background: {_PRIMARY};
|
||||
color: {_TERTIARY};
|
||||
}}
|
||||
Horizontal {{
|
||||
height: 1fr;
|
||||
}}
|
||||
#scope-tree {{
|
||||
width: 30%;
|
||||
padding: 1 2;
|
||||
background: {_SECONDARY} 8%;
|
||||
border-right: solid {_SECONDARY};
|
||||
}}
|
||||
#scope-tree:focus > .tree--cursor {{
|
||||
background: {_SECONDARY};
|
||||
color: {_TERTIARY};
|
||||
}}
|
||||
#scope-tree > .tree--guides {{
|
||||
color: {_SECONDARY} 50%;
|
||||
}}
|
||||
#scope-tree > .tree--guides-hover {{
|
||||
color: {_PRIMARY};
|
||||
}}
|
||||
#scope-tree > .tree--guides-selected {{
|
||||
color: {_SECONDARY};
|
||||
}}
|
||||
#right-panel {{
|
||||
width: 70%;
|
||||
padding: 0 1;
|
||||
}}
|
||||
#info-panel {{
|
||||
height: 2fr;
|
||||
padding: 1 2;
|
||||
overflow-y: auto;
|
||||
border: round {_SECONDARY};
|
||||
}}
|
||||
#info-panel:focus {{
|
||||
border: round {_PRIMARY};
|
||||
}}
|
||||
#info-panel LoadingIndicator {{
|
||||
color: {_PRIMARY};
|
||||
}}
|
||||
#entry-list {{
|
||||
height: 1fr;
|
||||
border: round {_SECONDARY};
|
||||
padding: 0 1;
|
||||
scrollbar-color: {_PRIMARY};
|
||||
}}
|
||||
#entry-list:focus {{
|
||||
border: round {_PRIMARY};
|
||||
}}
|
||||
#entry-list > .option-list--option-highlighted {{
|
||||
background: {_SECONDARY};
|
||||
color: {_TERTIARY};
|
||||
}}
|
||||
#recall-input {{
|
||||
margin: 0 1 1 1;
|
||||
border: tall {_SECONDARY};
|
||||
}}
|
||||
#recall-input:focus {{
|
||||
border: tall {_PRIMARY};
|
||||
}}
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage_path: str | None = None,
|
||||
embedder_config: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._memory: Any = None
|
||||
self._init_error: str | None = None
|
||||
self._selected_scope: str = "/"
|
||||
self._entries: list[Any] = []
|
||||
self._view_mode: str = "list" # "list" | "recall"
|
||||
self._recall_matches: list[Any] = []
|
||||
self._last_scope_info: Any = None
|
||||
self._custom_embedder = embedder_config is not None
|
||||
try:
|
||||
from crewai.memory.storage.lancedb_storage import LanceDBStorage
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
storage = LanceDBStorage(path=storage_path) if storage_path else LanceDBStorage()
|
||||
embedder = None
|
||||
if embedder_config is not None:
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
|
||||
embedder = build_embedder(embedder_config)
|
||||
self._memory = Memory(storage=storage, embedder=embedder) if embedder else Memory(storage=storage)
|
||||
except Exception as e:
|
||||
self._init_error = str(e)
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
yield Header(show_clock=False)
|
||||
with Horizontal():
|
||||
yield self._build_scope_tree()
|
||||
initial = (
|
||||
self._init_error
|
||||
if self._init_error
|
||||
else "Select a scope or type a recall query."
|
||||
)
|
||||
with Vertical(id="right-panel"):
|
||||
yield Static(initial, id="info-panel")
|
||||
yield OptionList(id="entry-list")
|
||||
yield Input(
|
||||
placeholder="Type a query and press Enter to recall...",
|
||||
id="recall-input",
|
||||
)
|
||||
yield Footer()
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Set initial border titles on mounted widgets."""
|
||||
self.query_one("#info-panel", Static).border_title = "Detail"
|
||||
self.query_one("#entry-list", OptionList).border_title = "Entries"
|
||||
|
||||
def _build_scope_tree(self) -> Tree[str]:
|
||||
tree: Tree[str] = Tree("/", id="scope-tree")
|
||||
if self._memory is None:
|
||||
tree.root.data = "/"
|
||||
tree.root.label = "/ (0 records)"
|
||||
return tree
|
||||
info = self._memory.info("/")
|
||||
tree.root.label = f"/ ({info.record_count} records)"
|
||||
tree.root.data = "/"
|
||||
self._add_children(tree.root, "/", depth=0, max_depth=3)
|
||||
tree.root.expand()
|
||||
return tree
|
||||
|
||||
def _add_children(
|
||||
self,
|
||||
parent_node: Tree.Node[str],
|
||||
path: str,
|
||||
depth: int,
|
||||
max_depth: int,
|
||||
) -> None:
|
||||
if depth >= max_depth or self._memory is None:
|
||||
return
|
||||
info = self._memory.info(path)
|
||||
for child in info.child_scopes:
|
||||
child_info = self._memory.info(child)
|
||||
label = f"{child} ({child_info.record_count})"
|
||||
node = parent_node.add(label, data=child)
|
||||
self._add_children(node, child, depth + 1, max_depth)
|
||||
|
||||
# -- Populating the OptionList -------------------------------------------
|
||||
|
||||
def _populate_entry_list(self) -> None:
|
||||
"""Clear the OptionList and fill it with the current scope's entries."""
|
||||
option_list = self.query_one("#entry-list", OptionList)
|
||||
option_list.clear_options()
|
||||
for record in self._entries:
|
||||
date_str = record.created_at.strftime("%Y-%m-%d")
|
||||
preview = (
|
||||
(record.content[:80] + "…")
|
||||
if len(record.content) > 80
|
||||
else record.content
|
||||
)
|
||||
label = (
|
||||
f"{date_str} "
|
||||
f"[bold]{record.importance:.1f}[/] "
|
||||
f"{preview}"
|
||||
)
|
||||
option_list.add_option(label)
|
||||
|
||||
def _populate_recall_list(self) -> None:
|
||||
"""Clear the OptionList and fill it with the current recall matches."""
|
||||
option_list = self.query_one("#entry-list", OptionList)
|
||||
option_list.clear_options()
|
||||
if not self._recall_matches:
|
||||
return
|
||||
for m in self._recall_matches:
|
||||
preview = (
|
||||
(m.record.content[:80] + "…")
|
||||
if len(m.record.content) > 80
|
||||
else m.record.content
|
||||
)
|
||||
label = (
|
||||
f"[bold]\\[{m.score:.2f}][/] "
|
||||
f"{preview} "
|
||||
f"[dim]scope={m.record.scope}[/]"
|
||||
)
|
||||
option_list.add_option(label)
|
||||
|
||||
# -- Detail rendering ----------------------------------------------------
|
||||
|
||||
def _format_record_detail(self, record: Any, context_line: str = "") -> str:
|
||||
"""Format a full MemoryRecord as Rich markup for the detail view.
|
||||
|
||||
Args:
|
||||
record: A MemoryRecord instance.
|
||||
context_line: Optional header line shown above the fields
|
||||
(e.g. "Entry 3 of 47").
|
||||
|
||||
Returns:
|
||||
A Rich-markup string with all meaningful record fields.
|
||||
"""
|
||||
sep = f"[bold {_PRIMARY}]{'─' * 44}[/]"
|
||||
lines: list[str] = []
|
||||
|
||||
if context_line:
|
||||
lines.append(context_line)
|
||||
lines.append("")
|
||||
|
||||
# -- Fields block --
|
||||
lines.append(f"[dim]ID:[/] {record.id}")
|
||||
lines.append(f"[dim]Scope:[/] [bold]{record.scope}[/]")
|
||||
lines.append(f"[dim]Importance:[/] [bold]{record.importance:.2f}[/]")
|
||||
lines.append(
|
||||
f"[dim]Created:[/] "
|
||||
f"{record.created_at.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
)
|
||||
lines.append(
|
||||
f"[dim]Last accessed:[/] "
|
||||
f"{record.last_accessed.strftime('%Y-%m-%d %H:%M:%S')}"
|
||||
)
|
||||
lines.append(
|
||||
f"[dim]Categories:[/] "
|
||||
f"{', '.join(record.categories) if record.categories else 'none'}"
|
||||
)
|
||||
lines.append(f"[dim]Source:[/] {record.source or '-'}")
|
||||
lines.append(f"[dim]Private:[/] {'Yes' if record.private else 'No'}")
|
||||
|
||||
# -- Content block --
|
||||
lines.append(f"\n{sep}")
|
||||
lines.append("[bold]Content[/]\n")
|
||||
lines.append(record.content)
|
||||
|
||||
# -- Metadata block --
|
||||
if record.metadata:
|
||||
lines.append(f"\n{sep}")
|
||||
lines.append("[bold]Metadata[/]\n")
|
||||
for k, v in record.metadata.items():
|
||||
lines.append(f"[dim]{k}:[/] {v}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
# -- Event handlers ------------------------------------------------------
|
||||
|
||||
def on_tree_node_selected(self, event: Tree.NodeSelected[str]) -> None:
|
||||
"""Load entries for the selected scope and populate the OptionList."""
|
||||
path = event.node.data if event.node.data is not None else "/"
|
||||
self._selected_scope = path
|
||||
self._view_mode = "list"
|
||||
panel = self.query_one("#info-panel", Static)
|
||||
if self._memory is None:
|
||||
panel.update(self._init_error or "No memory loaded.")
|
||||
return
|
||||
info = self._memory.info(path)
|
||||
self._last_scope_info = info
|
||||
self._entries = self._memory.list_records(scope=path, limit=200)
|
||||
panel.update(_format_scope_info(info))
|
||||
panel.border_title = "Detail"
|
||||
entry_list = self.query_one("#entry-list", OptionList)
|
||||
entry_list.border_title = f"Entries ({len(self._entries)})"
|
||||
self._populate_entry_list()
|
||||
|
||||
def on_option_list_option_highlighted(
|
||||
self, event: OptionList.OptionHighlighted
|
||||
) -> None:
|
||||
"""Live-update the info panel with the detail of the highlighted entry."""
|
||||
panel = self.query_one("#info-panel", Static)
|
||||
idx = event.option_index
|
||||
|
||||
if self._view_mode == "list":
|
||||
if idx < len(self._entries):
|
||||
record = self._entries[idx]
|
||||
total = len(self._entries)
|
||||
context = (
|
||||
f"[bold {_PRIMARY}]Entry {idx + 1} of {total}[/] "
|
||||
f"[dim]in[/] [bold]{self._selected_scope}[/]"
|
||||
)
|
||||
panel.border_title = f"Entry {idx + 1} of {total}"
|
||||
panel.update(self._format_record_detail(record, context_line=context))
|
||||
|
||||
elif self._view_mode == "recall":
|
||||
if idx < len(self._recall_matches):
|
||||
match = self._recall_matches[idx]
|
||||
total = len(self._recall_matches)
|
||||
panel.border_title = f"Match {idx + 1} of {total}"
|
||||
score_color = _PRIMARY if match.score >= 0.5 else "dim"
|
||||
header_lines: list[str] = [
|
||||
f"[bold {_PRIMARY}]Recall Match {idx + 1} of {total}[/]\n",
|
||||
f"[dim]Score:[/] [{score_color}][bold]{match.score:.2f}[/][/]",
|
||||
(
|
||||
f"[dim]Match reasons:[/] "
|
||||
f"{', '.join(match.match_reasons) if match.match_reasons else '-'}"
|
||||
),
|
||||
(
|
||||
f"[dim]Evidence gaps:[/] "
|
||||
f"{', '.join(match.evidence_gaps) if match.evidence_gaps else 'none'}"
|
||||
),
|
||||
f"\n[bold {_PRIMARY}]{'─' * 44}[/]",
|
||||
]
|
||||
record_detail = self._format_record_detail(match.record)
|
||||
header_lines.append(record_detail)
|
||||
panel.update("\n".join(header_lines))
|
||||
|
||||
def on_input_submitted(self, event: Input.Submitted) -> None:
|
||||
query = event.value.strip()
|
||||
if not query:
|
||||
return
|
||||
if self._memory is None:
|
||||
panel = self.query_one("#info-panel", Static)
|
||||
panel.update(self._init_error or "No memory loaded. Cannot recall.")
|
||||
return
|
||||
self.run_worker(self._do_recall(query), exclusive=True)
|
||||
|
||||
async def _do_recall(self, query: str) -> None:
|
||||
"""Execute a recall query and display results in the OptionList."""
|
||||
panel = self.query_one("#info-panel", Static)
|
||||
panel.loading = True
|
||||
try:
|
||||
scope = (
|
||||
self._selected_scope
|
||||
if self._selected_scope != "/"
|
||||
else None
|
||||
)
|
||||
loop = asyncio.get_event_loop()
|
||||
matches = await loop.run_in_executor(
|
||||
None,
|
||||
lambda: self._memory.recall(
|
||||
query, scope=scope, limit=10, depth="deep"
|
||||
),
|
||||
)
|
||||
self._recall_matches = matches or []
|
||||
self._view_mode = "recall"
|
||||
|
||||
if not self._recall_matches:
|
||||
panel.update("[dim]No memories found.[/]")
|
||||
self.query_one("#entry-list", OptionList).clear_options()
|
||||
return
|
||||
|
||||
info_lines: list[str] = []
|
||||
if not self._custom_embedder:
|
||||
info_lines.append(
|
||||
"[dim italic]Note: Using default OpenAI embedder. "
|
||||
"If memories were created with a different embedder, "
|
||||
"pass --embedder-provider to match.[/]\n"
|
||||
)
|
||||
info_lines.append(
|
||||
f"[bold]Recall Results[/] [dim]"
|
||||
f"({len(self._recall_matches)} matches)[/]\n"
|
||||
f"[dim]Navigate the list below to view details.[/]"
|
||||
)
|
||||
panel.update("\n".join(info_lines))
|
||||
panel.border_title = "Recall Detail"
|
||||
entry_list = self.query_one("#entry-list", OptionList)
|
||||
entry_list.border_title = f"Recall Results ({len(self._recall_matches)})"
|
||||
self._populate_recall_list()
|
||||
except Exception as e:
|
||||
panel.update(f"[bold red]Error:[/] {e}")
|
||||
finally:
|
||||
panel.loading = False
|
||||
@@ -1,4 +1,4 @@
|
||||
from requests import HTTPError
|
||||
from httpx import HTTPStatusError
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
@@ -10,11 +10,11 @@ console = Console()
|
||||
|
||||
|
||||
class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
BaseCommand.__init__(self)
|
||||
PlusAPIMixin.__init__(self, telemetry=self._telemetry)
|
||||
|
||||
def list(self):
|
||||
def list(self) -> None:
|
||||
try:
|
||||
response = self.plus_api_client.get_organizations()
|
||||
response.raise_for_status()
|
||||
@@ -33,7 +33,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
table.add_row(org["name"], org["uuid"])
|
||||
|
||||
console.print(table)
|
||||
except HTTPError as e:
|
||||
except HTTPStatusError as e:
|
||||
if e.response.status_code == 401:
|
||||
console.print(
|
||||
"You are not logged in to any organization. Use 'crewai login' to login.",
|
||||
@@ -50,7 +50,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
)
|
||||
raise SystemExit(1) from e
|
||||
|
||||
def switch(self, org_id):
|
||||
def switch(self, org_id: str) -> None:
|
||||
try:
|
||||
response = self.plus_api_client.get_organizations()
|
||||
response.raise_for_status()
|
||||
@@ -72,7 +72,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
f"Successfully switched to {org['name']} ({org['uuid']})",
|
||||
style="bold green",
|
||||
)
|
||||
except HTTPError as e:
|
||||
except HTTPStatusError as e:
|
||||
if e.response.status_code == 401:
|
||||
console.print(
|
||||
"You are not logged in to any organization. Use 'crewai login' to login.",
|
||||
@@ -87,7 +87,7 @@ class OrganizationCommand(BaseCommand, PlusAPIMixin):
|
||||
console.print(f"Failed to switch organization: {e!s}", style="bold red")
|
||||
raise SystemExit(1) from e
|
||||
|
||||
def current(self):
|
||||
def current(self) -> None:
|
||||
settings = Settings()
|
||||
if settings.org_uuid:
|
||||
console.print(
|
||||
|
||||
@@ -3,7 +3,6 @@ from typing import Any
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import httpx
|
||||
import requests
|
||||
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
@@ -43,16 +42,16 @@ class PlusAPI:
|
||||
|
||||
def _make_request(
|
||||
self, method: str, endpoint: str, **kwargs: Any
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
url = urljoin(self.base_url, endpoint)
|
||||
session = requests.Session()
|
||||
session.trust_env = False
|
||||
return session.request(method, url, headers=self.headers, **kwargs)
|
||||
verify = kwargs.pop("verify", True)
|
||||
with httpx.Client(trust_env=False, verify=verify) as client:
|
||||
return client.request(method, url, headers=self.headers, **kwargs)
|
||||
|
||||
def login_to_tool_repository(self) -> requests.Response:
|
||||
def login_to_tool_repository(self) -> httpx.Response:
|
||||
return self._make_request("POST", f"{self.TOOLS_RESOURCE}/login")
|
||||
|
||||
def get_tool(self, handle: str) -> requests.Response:
|
||||
def get_tool(self, handle: str) -> httpx.Response:
|
||||
return self._make_request("GET", f"{self.TOOLS_RESOURCE}/{handle}")
|
||||
|
||||
async def get_agent(self, handle: str) -> httpx.Response:
|
||||
@@ -68,7 +67,7 @@ class PlusAPI:
|
||||
description: str | None,
|
||||
encoded_file: str,
|
||||
available_exports: list[dict[str, Any]] | None = None,
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
params = {
|
||||
"handle": handle,
|
||||
"public": is_public,
|
||||
@@ -79,54 +78,52 @@ class PlusAPI:
|
||||
}
|
||||
return self._make_request("POST", f"{self.TOOLS_RESOURCE}", json=params)
|
||||
|
||||
def deploy_by_name(self, project_name: str) -> requests.Response:
|
||||
def deploy_by_name(self, project_name: str) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST", f"{self.CREWS_RESOURCE}/by-name/{project_name}/deploy"
|
||||
)
|
||||
|
||||
def deploy_by_uuid(self, uuid: str) -> requests.Response:
|
||||
def deploy_by_uuid(self, uuid: str) -> httpx.Response:
|
||||
return self._make_request("POST", f"{self.CREWS_RESOURCE}/{uuid}/deploy")
|
||||
|
||||
def crew_status_by_name(self, project_name: str) -> requests.Response:
|
||||
def crew_status_by_name(self, project_name: str) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"GET", f"{self.CREWS_RESOURCE}/by-name/{project_name}/status"
|
||||
)
|
||||
|
||||
def crew_status_by_uuid(self, uuid: str) -> requests.Response:
|
||||
def crew_status_by_uuid(self, uuid: str) -> httpx.Response:
|
||||
return self._make_request("GET", f"{self.CREWS_RESOURCE}/{uuid}/status")
|
||||
|
||||
def crew_by_name(
|
||||
self, project_name: str, log_type: str = "deployment"
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"GET", f"{self.CREWS_RESOURCE}/by-name/{project_name}/logs/{log_type}"
|
||||
)
|
||||
|
||||
def crew_by_uuid(
|
||||
self, uuid: str, log_type: str = "deployment"
|
||||
) -> requests.Response:
|
||||
def crew_by_uuid(self, uuid: str, log_type: str = "deployment") -> httpx.Response:
|
||||
return self._make_request(
|
||||
"GET", f"{self.CREWS_RESOURCE}/{uuid}/logs/{log_type}"
|
||||
)
|
||||
|
||||
def delete_crew_by_name(self, project_name: str) -> requests.Response:
|
||||
def delete_crew_by_name(self, project_name: str) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"DELETE", f"{self.CREWS_RESOURCE}/by-name/{project_name}"
|
||||
)
|
||||
|
||||
def delete_crew_by_uuid(self, uuid: str) -> requests.Response:
|
||||
def delete_crew_by_uuid(self, uuid: str) -> httpx.Response:
|
||||
return self._make_request("DELETE", f"{self.CREWS_RESOURCE}/{uuid}")
|
||||
|
||||
def list_crews(self) -> requests.Response:
|
||||
def list_crews(self) -> httpx.Response:
|
||||
return self._make_request("GET", self.CREWS_RESOURCE)
|
||||
|
||||
def create_crew(self, payload: dict[str, Any]) -> requests.Response:
|
||||
def create_crew(self, payload: dict[str, Any]) -> httpx.Response:
|
||||
return self._make_request("POST", self.CREWS_RESOURCE, json=payload)
|
||||
|
||||
def get_organizations(self) -> requests.Response:
|
||||
def get_organizations(self) -> httpx.Response:
|
||||
return self._make_request("GET", self.ORGANIZATIONS_RESOURCE)
|
||||
|
||||
def initialize_trace_batch(self, payload: dict[str, Any]) -> requests.Response:
|
||||
def initialize_trace_batch(self, payload: dict[str, Any]) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.TRACING_RESOURCE}/batches",
|
||||
@@ -136,7 +133,7 @@ class PlusAPI:
|
||||
|
||||
def initialize_ephemeral_trace_batch(
|
||||
self, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches",
|
||||
@@ -145,7 +142,7 @@ class PlusAPI:
|
||||
|
||||
def send_trace_events(
|
||||
self, trace_batch_id: str, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/events",
|
||||
@@ -155,7 +152,7 @@ class PlusAPI:
|
||||
|
||||
def send_ephemeral_trace_events(
|
||||
self, trace_batch_id: str, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"POST",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/events",
|
||||
@@ -165,7 +162,7 @@ class PlusAPI:
|
||||
|
||||
def finalize_trace_batch(
|
||||
self, trace_batch_id: str, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
|
||||
@@ -175,7 +172,7 @@ class PlusAPI:
|
||||
|
||||
def finalize_ephemeral_trace_batch(
|
||||
self, trace_batch_id: str, payload: dict[str, Any]
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.EPHEMERAL_TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
|
||||
@@ -185,7 +182,7 @@ class PlusAPI:
|
||||
|
||||
def mark_trace_batch_as_failed(
|
||||
self, trace_batch_id: str, error_message: str
|
||||
) -> requests.Response:
|
||||
) -> httpx.Response:
|
||||
return self._make_request(
|
||||
"PATCH",
|
||||
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}",
|
||||
@@ -193,13 +190,11 @@ class PlusAPI:
|
||||
timeout=30,
|
||||
)
|
||||
|
||||
def get_triggers(self) -> requests.Response:
|
||||
def get_triggers(self) -> httpx.Response:
|
||||
"""Get all available triggers from integrations."""
|
||||
return self._make_request("GET", f"{self.INTEGRATIONS_RESOURCE}/apps")
|
||||
|
||||
def get_trigger_payload(
|
||||
self, app_slug: str, trigger_slug: str
|
||||
) -> requests.Response:
|
||||
def get_trigger_payload(self, app_slug: str, trigger_slug: str) -> httpx.Response:
|
||||
"""Get sample payload for a specific trigger."""
|
||||
return self._make_request(
|
||||
"GET", f"{self.INTEGRATIONS_RESOURCE}/{app_slug}/{trigger_slug}/payload"
|
||||
|
||||
@@ -8,7 +8,7 @@ from typing import Any
|
||||
|
||||
import certifi
|
||||
import click
|
||||
import requests
|
||||
import httpx
|
||||
|
||||
from crewai.cli.constants import JSON_URL, MODELS, PROVIDERS
|
||||
|
||||
@@ -165,20 +165,20 @@ def fetch_provider_data(cache_file: Path) -> dict[str, Any] | None:
|
||||
ssl_config = os.environ["SSL_CERT_FILE"] = certifi.where()
|
||||
|
||||
try:
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60, verify=ssl_config)
|
||||
response.raise_for_status()
|
||||
data = download_data(response)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
return data
|
||||
except requests.RequestException as e:
|
||||
with httpx.stream("GET", JSON_URL, timeout=60, verify=ssl_config) as response:
|
||||
response.raise_for_status()
|
||||
data = download_data(response)
|
||||
with open(cache_file, "w") as f:
|
||||
json.dump(data, f)
|
||||
return data
|
||||
except httpx.HTTPError as e:
|
||||
click.secho(f"Error fetching provider data: {e}", fg="red")
|
||||
except json.JSONDecodeError:
|
||||
click.secho("Error parsing provider data. Invalid JSON format.", fg="red")
|
||||
return None
|
||||
|
||||
|
||||
def download_data(response: requests.Response) -> dict[str, Any]:
|
||||
def download_data(response: httpx.Response) -> dict[str, Any]:
|
||||
"""Downloads data from a given HTTP response and returns the JSON content.
|
||||
|
||||
Args:
|
||||
@@ -194,7 +194,7 @@ def download_data(response: requests.Response) -> dict[str, Any]:
|
||||
with click.progressbar(
|
||||
length=total_size, label="Downloading", show_pos=True
|
||||
) as bar:
|
||||
for chunk in response.iter_content(block_size):
|
||||
for chunk in response.iter_bytes(block_size):
|
||||
if chunk:
|
||||
data_chunks.append(chunk)
|
||||
bar.update(len(chunk))
|
||||
|
||||
@@ -2,43 +2,61 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.utils import get_crews
|
||||
from crewai.cli.utils import get_crews, get_flows
|
||||
from crewai.flow import Flow
|
||||
|
||||
|
||||
def _reset_flow_memory(flow: Flow) -> None:
|
||||
"""Reset memory for a single flow instance.
|
||||
|
||||
Handles Memory, MemoryScope (both have .reset()), and MemorySlice
|
||||
(delegates to the underlying ._memory). Silently succeeds when the
|
||||
storage directory does not exist yet (nothing to reset).
|
||||
|
||||
Args:
|
||||
flow: The flow instance whose memory should be reset.
|
||||
"""
|
||||
mem = flow.memory
|
||||
if mem is None:
|
||||
return
|
||||
try:
|
||||
if hasattr(mem, "reset"):
|
||||
mem.reset()
|
||||
elif hasattr(mem, "_memory") and hasattr(mem._memory, "reset"):
|
||||
mem._memory.reset()
|
||||
except (FileNotFoundError, OSError):
|
||||
pass
|
||||
|
||||
|
||||
def reset_memories_command(
|
||||
long,
|
||||
short,
|
||||
entity,
|
||||
knowledge,
|
||||
agent_knowledge,
|
||||
kickoff_outputs,
|
||||
all,
|
||||
memory: bool,
|
||||
knowledge: bool,
|
||||
agent_knowledge: bool,
|
||||
kickoff_outputs: bool,
|
||||
all: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories.
|
||||
"""Reset the crew and flow memories.
|
||||
|
||||
Args:
|
||||
long (bool): Whether to reset the long-term memory.
|
||||
short (bool): Whether to reset the short-term memory.
|
||||
entity (bool): Whether to reset the entity memory.
|
||||
kickoff_outputs (bool): Whether to reset the latest kickoff task outputs.
|
||||
all (bool): Whether to reset all memories.
|
||||
knowledge (bool): Whether to reset the knowledge.
|
||||
agent_knowledge (bool): Whether to reset the agents knowledge.
|
||||
memory: Whether to reset the unified memory.
|
||||
knowledge: Whether to reset the knowledge.
|
||||
agent_knowledge: Whether to reset the agents knowledge.
|
||||
kickoff_outputs: Whether to reset the latest kickoff task outputs.
|
||||
all: Whether to reset all memories.
|
||||
"""
|
||||
|
||||
try:
|
||||
if not any(
|
||||
[long, short, entity, kickoff_outputs, knowledge, agent_knowledge, all]
|
||||
):
|
||||
if not any([memory, kickoff_outputs, knowledge, agent_knowledge, all]):
|
||||
click.echo(
|
||||
"No memory type specified. Please specify at least one type to reset."
|
||||
)
|
||||
return
|
||||
|
||||
crews = get_crews()
|
||||
if not crews:
|
||||
raise ValueError("No crew found.")
|
||||
flows = get_flows()
|
||||
|
||||
if not crews and not flows:
|
||||
raise ValueError("No crew or flow found.")
|
||||
|
||||
for crew in crews:
|
||||
if all:
|
||||
crew.reset_memories(command_type="all")
|
||||
@@ -46,20 +64,10 @@ def reset_memories_command(
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Reset memories command has been completed."
|
||||
)
|
||||
continue
|
||||
if long:
|
||||
crew.reset_memories(command_type="long")
|
||||
if memory:
|
||||
crew.reset_memories(command_type="memory")
|
||||
click.echo(
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Long term memory has been reset."
|
||||
)
|
||||
if short:
|
||||
crew.reset_memories(command_type="short")
|
||||
click.echo(
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Short term memory has been reset."
|
||||
)
|
||||
if entity:
|
||||
crew.reset_memories(command_type="entity")
|
||||
click.echo(
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Entity memory has been reset."
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Memory has been reset."
|
||||
)
|
||||
if kickoff_outputs:
|
||||
crew.reset_memories(command_type="kickoff_outputs")
|
||||
@@ -77,6 +85,20 @@ def reset_memories_command(
|
||||
f"[Crew ({crew.name if crew.name else crew.id})] Agents knowledge has been reset."
|
||||
)
|
||||
|
||||
for flow in flows:
|
||||
flow_name = flow.name or flow.__class__.__name__
|
||||
if all:
|
||||
_reset_flow_memory(flow)
|
||||
click.echo(
|
||||
f"[Flow ({flow_name})] Reset memories command has been completed."
|
||||
)
|
||||
continue
|
||||
if memory:
|
||||
_reset_flow_memory(flow)
|
||||
click.echo(
|
||||
f"[Flow ({flow_name})] Memory has been reset."
|
||||
)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
||||
click.echo(e.output, err=True)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from typing import List
|
||||
# If you want to run a snippet of code before or after the crew starts,
|
||||
# you can use the @before_kickoff and @after_kickoff decorators
|
||||
# https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
|
||||
@@ -10,8 +9,8 @@ from typing import List
|
||||
class {{crew_name}}():
|
||||
"""{{crew_name}} crew"""
|
||||
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
|
||||
# Learn more about YAML configuration files here:
|
||||
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
from typing import List
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
@@ -13,8 +11,8 @@ from crewai.project import CrewBase, agent, crew, task
|
||||
class PoemCrew:
|
||||
"""Poem Crew"""
|
||||
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
agents: list[BaseAgent]
|
||||
tasks: list[Task]
|
||||
|
||||
# Learn more about YAML configuration files here:
|
||||
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
|
||||
|
||||
@@ -386,6 +386,109 @@ def fetch_crews(module_attr: Any) -> list[Crew]:
|
||||
return crew_instances
|
||||
|
||||
|
||||
def get_flow_instance(module_attr: Any) -> Flow | None:
|
||||
"""Check if a module attribute is a user-defined Flow subclass and return an instance.
|
||||
|
||||
Args:
|
||||
module_attr: An attribute from a loaded module.
|
||||
|
||||
Returns:
|
||||
A Flow instance if the attribute is a valid user-defined Flow subclass,
|
||||
None otherwise.
|
||||
"""
|
||||
if (
|
||||
isinstance(module_attr, type)
|
||||
and issubclass(module_attr, Flow)
|
||||
and module_attr is not Flow
|
||||
):
|
||||
try:
|
||||
return module_attr()
|
||||
except Exception:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
_SKIP_DIRS = frozenset(
|
||||
{".venv", "venv", ".git", "__pycache__", "node_modules", ".tox", ".nox"}
|
||||
)
|
||||
|
||||
|
||||
def get_flows(flow_path: str = "main.py") -> list[Flow]:
|
||||
"""Get the flow instances from project files.
|
||||
|
||||
Walks the project directory looking for files matching ``flow_path``
|
||||
(default ``main.py``), loads each module, and extracts Flow subclass
|
||||
instances. Directories that are clearly not user source code (virtual
|
||||
environments, ``.git``, etc.) are pruned to avoid noisy import errors.
|
||||
|
||||
Args:
|
||||
flow_path: Filename to search for (default ``main.py``).
|
||||
|
||||
Returns:
|
||||
A list of discovered Flow instances.
|
||||
"""
|
||||
flow_instances: list[Flow] = []
|
||||
try:
|
||||
current_dir = os.getcwd()
|
||||
if current_dir not in sys.path:
|
||||
sys.path.insert(0, current_dir)
|
||||
|
||||
src_dir = os.path.join(current_dir, "src")
|
||||
if os.path.isdir(src_dir) and src_dir not in sys.path:
|
||||
sys.path.insert(0, src_dir)
|
||||
|
||||
search_paths = [".", "src"] if os.path.isdir("src") else ["."]
|
||||
|
||||
for search_path in search_paths:
|
||||
for root, dirs, files in os.walk(search_path):
|
||||
dirs[:] = [
|
||||
d
|
||||
for d in dirs
|
||||
if d not in _SKIP_DIRS and not d.startswith(".")
|
||||
]
|
||||
if flow_path in files and "cli/templates" not in root:
|
||||
file_os_path = os.path.join(root, flow_path)
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"flow_module", file_os_path
|
||||
)
|
||||
if not spec or not spec.loader:
|
||||
continue
|
||||
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
sys.modules[spec.name] = module
|
||||
|
||||
try:
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
for attr_name in dir(module):
|
||||
module_attr = getattr(module, attr_name)
|
||||
try:
|
||||
if flow_instance := get_flow_instance(
|
||||
module_attr
|
||||
):
|
||||
flow_instances.append(flow_instance)
|
||||
except Exception: # noqa: S112
|
||||
continue
|
||||
|
||||
if flow_instances:
|
||||
break
|
||||
|
||||
except Exception: # noqa: S112
|
||||
continue
|
||||
|
||||
except (ImportError, AttributeError):
|
||||
continue
|
||||
|
||||
if flow_instances:
|
||||
break
|
||||
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
|
||||
return flow_instances
|
||||
|
||||
|
||||
def is_valid_tool(obj: Any) -> bool:
|
||||
from crewai.tools.base_tool import Tool
|
||||
|
||||
|
||||
@@ -83,10 +83,6 @@ from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.llm import LLM
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.process import Process
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
from crewai.rag.types import SearchResult
|
||||
@@ -174,10 +170,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
_logger: Logger = PrivateAttr()
|
||||
_file_handler: FileHandler = PrivateAttr()
|
||||
_cache_handler: InstanceOf[CacheHandler] = PrivateAttr(default_factory=CacheHandler)
|
||||
_short_term_memory: InstanceOf[ShortTermMemory] | None = PrivateAttr()
|
||||
_long_term_memory: InstanceOf[LongTermMemory] | None = PrivateAttr()
|
||||
_entity_memory: InstanceOf[EntityMemory] | None = PrivateAttr()
|
||||
_external_memory: InstanceOf[ExternalMemory] | None = PrivateAttr()
|
||||
_memory: Any = PrivateAttr(default=None) # Unified Memory | MemoryScope
|
||||
_train: bool | None = PrivateAttr(default=False)
|
||||
_train_iteration: int | None = PrivateAttr()
|
||||
_inputs: dict[str, Any] | None = PrivateAttr(default=None)
|
||||
@@ -195,25 +188,12 @@ class Crew(FlowTrackable, BaseModel):
|
||||
agents: list[BaseAgent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: bool = Field(default=False)
|
||||
memory: bool = Field(
|
||||
memory: bool | Any = Field(
|
||||
default=False,
|
||||
description="If crew should use memory to store memories of it's execution",
|
||||
)
|
||||
short_term_memory: InstanceOf[ShortTermMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the ShortTermMemory to be used by the Crew",
|
||||
)
|
||||
long_term_memory: InstanceOf[LongTermMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the LongTermMemory to be used by the Crew",
|
||||
)
|
||||
entity_memory: InstanceOf[EntityMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the EntityMemory to be used by the Crew",
|
||||
)
|
||||
external_memory: InstanceOf[ExternalMemory] | None = Field(
|
||||
default=None,
|
||||
description="An Instance of the ExternalMemory to be used by the Crew",
|
||||
description=(
|
||||
"Enable crew memory. Pass True for default Memory(), "
|
||||
"or a Memory/MemoryScope/MemorySlice instance for custom configuration."
|
||||
),
|
||||
)
|
||||
embedder: EmbedderConfig | None = Field(
|
||||
default=None,
|
||||
@@ -372,31 +352,23 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
return self
|
||||
|
||||
def _initialize_default_memories(self) -> None:
|
||||
self._long_term_memory = self._long_term_memory or LongTermMemory()
|
||||
self._short_term_memory = self._short_term_memory or ShortTermMemory(
|
||||
crew=self,
|
||||
embedder_config=self.embedder,
|
||||
)
|
||||
self._entity_memory = self.entity_memory or EntityMemory(
|
||||
crew=self, embedder_config=self.embedder
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def create_crew_memory(self) -> Crew:
|
||||
"""Initialize private memory attributes."""
|
||||
self._external_memory = (
|
||||
# External memory does not support a default value since it was
|
||||
# designed to be managed entirely externally
|
||||
self.external_memory.set_crew(self) if self.external_memory else None
|
||||
)
|
||||
"""Initialize unified memory, respecting crew embedder config."""
|
||||
if self.memory is True:
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
self._long_term_memory = self.long_term_memory
|
||||
self._short_term_memory = self.short_term_memory
|
||||
self._entity_memory = self.entity_memory
|
||||
embedder = None
|
||||
if self.embedder is not None:
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
|
||||
if self.memory:
|
||||
self._initialize_default_memories()
|
||||
embedder = build_embedder(self.embedder)
|
||||
self._memory = Memory(embedder=embedder)
|
||||
elif self.memory:
|
||||
# User passed a Memory / MemoryScope / MemorySlice instance
|
||||
self._memory = self.memory
|
||||
else:
|
||||
self._memory = None
|
||||
|
||||
return self
|
||||
|
||||
@@ -768,6 +740,9 @@ class Crew(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise
|
||||
finally:
|
||||
# Ensure all background memory saves complete before returning
|
||||
if self._memory is not None and hasattr(self._memory, "drain_writes"):
|
||||
self._memory.drain_writes()
|
||||
clear_files(self.id)
|
||||
detach(token)
|
||||
|
||||
@@ -1323,6 +1298,11 @@ class Crew(FlowTrackable, BaseModel):
|
||||
if agent and (hasattr(agent, "mcps") and getattr(agent, "mcps", None)):
|
||||
tools = self._add_mcp_tools(task, tools)
|
||||
|
||||
# Add memory tools if memory is available (agent or crew level)
|
||||
resolved_memory = getattr(agent, "memory", None) or self._memory
|
||||
if resolved_memory is not None:
|
||||
tools = self._add_memory_tools(tools, resolved_memory)
|
||||
|
||||
files = get_all_files(self.id, task.id)
|
||||
if files:
|
||||
supported_types: list[str] = []
|
||||
@@ -1430,6 +1410,22 @@ class Crew(FlowTrackable, BaseModel):
|
||||
return self._merge_tools(tools, cast(list[BaseTool], code_tools))
|
||||
return tools
|
||||
|
||||
def _add_memory_tools(
|
||||
self, tools: list[BaseTool], memory: Any
|
||||
) -> list[BaseTool]:
|
||||
"""Add recall and remember tools when memory is available.
|
||||
|
||||
Args:
|
||||
tools: Current list of tools.
|
||||
memory: The resolved Memory, MemoryScope, or MemorySlice instance.
|
||||
|
||||
Returns:
|
||||
Updated list with memory tools added.
|
||||
"""
|
||||
from crewai.tools.memory_tools import create_memory_tools
|
||||
|
||||
return self._merge_tools(tools, create_memory_tools(memory))
|
||||
|
||||
def _add_file_tools(
|
||||
self, tools: list[BaseTool], files: dict[str, Any]
|
||||
) -> list[BaseTool]:
|
||||
@@ -1674,10 +1670,7 @@ class Crew(FlowTrackable, BaseModel):
|
||||
"_execution_span",
|
||||
"_file_handler",
|
||||
"_cache_handler",
|
||||
"_short_term_memory",
|
||||
"_long_term_memory",
|
||||
"_entity_memory",
|
||||
"_external_memory",
|
||||
"_memory",
|
||||
"agents",
|
||||
"tasks",
|
||||
"knowledge_sources",
|
||||
@@ -1711,18 +1704,8 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
if self.short_term_memory:
|
||||
copied_data["short_term_memory"] = self.short_term_memory.model_copy(
|
||||
deep=True
|
||||
)
|
||||
if self.long_term_memory:
|
||||
copied_data["long_term_memory"] = self.long_term_memory.model_copy(
|
||||
deep=True
|
||||
)
|
||||
if self.entity_memory:
|
||||
copied_data["entity_memory"] = self.entity_memory.model_copy(deep=True)
|
||||
if self.external_memory:
|
||||
copied_data["external_memory"] = self.external_memory.model_copy(deep=True)
|
||||
if getattr(self, "_memory", None):
|
||||
copied_data["memory"] = self._memory
|
||||
|
||||
copied_data.pop("agents", None)
|
||||
copied_data.pop("tasks", None)
|
||||
@@ -1853,23 +1836,24 @@ class Crew(FlowTrackable, BaseModel):
|
||||
|
||||
Args:
|
||||
command_type: Type of memory to reset.
|
||||
Valid options: 'long', 'short', 'entity', 'knowledge', 'agent_knowledge'
|
||||
'kickoff_outputs', or 'all'
|
||||
Valid options: 'memory', 'knowledge', 'agent_knowledge',
|
||||
'kickoff_outputs', or 'all'. Legacy names 'long', 'short',
|
||||
'entity', 'external' are treated as 'memory'.
|
||||
|
||||
Raises:
|
||||
ValueError: If an invalid command type is provided.
|
||||
RuntimeError: If memory reset operation fails.
|
||||
"""
|
||||
legacy_memory = frozenset(["long", "short", "entity", "external"])
|
||||
if command_type in legacy_memory:
|
||||
command_type = "memory"
|
||||
valid_types = frozenset(
|
||||
[
|
||||
"long",
|
||||
"short",
|
||||
"entity",
|
||||
"memory",
|
||||
"knowledge",
|
||||
"agent_knowledge",
|
||||
"kickoff_outputs",
|
||||
"all",
|
||||
"external",
|
||||
]
|
||||
)
|
||||
|
||||
@@ -1975,25 +1959,10 @@ class Crew(FlowTrackable, BaseModel):
|
||||
) + agent_knowledges
|
||||
|
||||
return {
|
||||
"short": {
|
||||
"system": getattr(self, "_short_term_memory", None),
|
||||
"memory": {
|
||||
"system": getattr(self, "_memory", None),
|
||||
"reset": default_reset,
|
||||
"name": "Short Term",
|
||||
},
|
||||
"entity": {
|
||||
"system": getattr(self, "_entity_memory", None),
|
||||
"reset": default_reset,
|
||||
"name": "Entity",
|
||||
},
|
||||
"external": {
|
||||
"system": getattr(self, "_external_memory", None),
|
||||
"reset": default_reset,
|
||||
"name": "External",
|
||||
},
|
||||
"long": {
|
||||
"system": getattr(self, "_long_term_memory", None),
|
||||
"reset": default_reset,
|
||||
"name": "Long Term",
|
||||
"name": "Memory",
|
||||
},
|
||||
"kickoff_outputs": {
|
||||
"system": getattr(self, "_task_output_handler", None),
|
||||
|
||||
@@ -120,6 +120,52 @@ class FlowPlotEvent(FlowEvent):
|
||||
type: str = "flow_plot"
|
||||
|
||||
|
||||
class FlowInputRequestedEvent(FlowEvent):
|
||||
"""Event emitted when a flow requests user input via ``Flow.ask()``.
|
||||
|
||||
This event is emitted before the flow suspends waiting for user input,
|
||||
allowing UI frameworks and observability tools to know when a flow
|
||||
needs user interaction.
|
||||
|
||||
Attributes:
|
||||
flow_name: Name of the flow requesting input.
|
||||
method_name: Name of the flow method that called ``ask()``.
|
||||
message: The question or prompt being shown to the user.
|
||||
metadata: Optional metadata sent with the question (e.g., user ID,
|
||||
channel, session context).
|
||||
"""
|
||||
|
||||
method_name: str
|
||||
message: str
|
||||
metadata: dict[str, Any] | None = None
|
||||
type: str = "flow_input_requested"
|
||||
|
||||
|
||||
class FlowInputReceivedEvent(FlowEvent):
|
||||
"""Event emitted when user input is received after ``Flow.ask()``.
|
||||
|
||||
This event is emitted after the user provides input (or the request
|
||||
times out), allowing UI frameworks and observability tools to track
|
||||
input collection.
|
||||
|
||||
Attributes:
|
||||
flow_name: Name of the flow that received input.
|
||||
method_name: Name of the flow method that called ``ask()``.
|
||||
message: The original question or prompt.
|
||||
response: The user's response, or None if timed out / unavailable.
|
||||
metadata: Optional metadata sent with the question.
|
||||
response_metadata: Optional metadata from the provider about the
|
||||
response (e.g., who responded, thread ID, timestamps).
|
||||
"""
|
||||
|
||||
method_name: str
|
||||
message: str
|
||||
response: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
response_metadata: dict[str, Any] | None = None
|
||||
type: str = "flow_input_received"
|
||||
|
||||
|
||||
class HumanFeedbackRequestedEvent(FlowEvent):
|
||||
"""Event emitted when human feedback is requested.
|
||||
|
||||
|
||||
@@ -170,16 +170,16 @@ To enable tracing, do any one of these:
|
||||
"""Create standardized status content with consistent formatting."""
|
||||
content = Text()
|
||||
content.append(f"{title}\n", style=f"{status_style} bold")
|
||||
content.append("Name: \n", style="white")
|
||||
content.append("Name: ", style="white")
|
||||
content.append(f"{name}\n", style=status_style)
|
||||
|
||||
for label, value in fields.items():
|
||||
content.append(f"{label}: \n", style="white")
|
||||
content.append(f"{label}: ", style="white")
|
||||
content.append(
|
||||
f"{value}\n", style=fields.get(f"{label}_style", status_style)
|
||||
)
|
||||
if tool_args:
|
||||
content.append("Tool Args: \n", style="white")
|
||||
content.append("Tool Args: ", style="white")
|
||||
content.append(f"{tool_args}\n", style=status_style)
|
||||
|
||||
return content
|
||||
@@ -737,6 +737,27 @@ To enable tracing, do any one of these:
|
||||
|
||||
self.print_panel(content, title, style)
|
||||
|
||||
@staticmethod
|
||||
def _simplify_tools_field(fields: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Simplify the tools field to show only tool names instead of full definitions.
|
||||
|
||||
Args:
|
||||
fields: Dictionary of fields that may contain a 'tools' key with
|
||||
full tool objects.
|
||||
|
||||
Returns:
|
||||
The fields dictionary with 'tools' replaced by a comma-separated
|
||||
string of tool names.
|
||||
"""
|
||||
if "tools" in fields:
|
||||
tools = fields["tools"]
|
||||
if tools:
|
||||
tool_names = [getattr(t, "name", str(t)) for t in tools]
|
||||
fields["tools"] = ", ".join(tool_names) if tool_names else "None"
|
||||
else:
|
||||
fields["tools"] = "None"
|
||||
return fields
|
||||
|
||||
def handle_lite_agent_execution(
|
||||
self,
|
||||
lite_agent_role: str,
|
||||
@@ -748,6 +769,8 @@ To enable tracing, do any one of these:
|
||||
if not self.verbose:
|
||||
return
|
||||
|
||||
fields = self._simplify_tools_field(fields)
|
||||
|
||||
if status == "started":
|
||||
self.create_lite_agent_branch(lite_agent_role)
|
||||
if fields:
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable, Coroutine
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from datetime import datetime
|
||||
import inspect
|
||||
import json
|
||||
import threading
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast
|
||||
@@ -63,6 +66,7 @@ from crewai.utilities.agent_utils import (
|
||||
has_reached_max_iterations,
|
||||
is_context_length_exceeded,
|
||||
is_inside_event_loop,
|
||||
parse_tool_call_args,
|
||||
process_llm_response,
|
||||
track_delegation_if_needed,
|
||||
)
|
||||
@@ -668,9 +672,12 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
if not self.state.pending_tool_calls:
|
||||
return "native_tool_completed"
|
||||
|
||||
pending_tool_calls = list(self.state.pending_tool_calls)
|
||||
self.state.pending_tool_calls.clear()
|
||||
|
||||
# Group all tool calls into a single assistant message
|
||||
tool_calls_to_report = []
|
||||
for tool_call in self.state.pending_tool_calls:
|
||||
for tool_call in pending_tool_calls:
|
||||
info = extract_tool_call_info(tool_call)
|
||||
if not info:
|
||||
continue
|
||||
@@ -695,202 +702,86 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
"content": None,
|
||||
"tool_calls": tool_calls_to_report,
|
||||
}
|
||||
if all(
|
||||
type(tc).__qualname__ == "Part" for tc in self.state.pending_tool_calls
|
||||
):
|
||||
assistant_message["raw_tool_call_parts"] = list(
|
||||
self.state.pending_tool_calls
|
||||
)
|
||||
if all(type(tc).__qualname__ == "Part" for tc in pending_tool_calls):
|
||||
assistant_message["raw_tool_call_parts"] = list(pending_tool_calls)
|
||||
self.state.messages.append(assistant_message)
|
||||
|
||||
# Now execute each tool
|
||||
while self.state.pending_tool_calls:
|
||||
tool_call = self.state.pending_tool_calls.pop(0)
|
||||
info = extract_tool_call_info(tool_call)
|
||||
if not info:
|
||||
continue
|
||||
runnable_tool_calls = [
|
||||
tool_call
|
||||
for tool_call in pending_tool_calls
|
||||
if extract_tool_call_info(tool_call) is not None
|
||||
]
|
||||
should_parallelize = self._should_parallelize_native_tool_calls(
|
||||
runnable_tool_calls
|
||||
)
|
||||
|
||||
call_id, func_name, func_args = info
|
||||
|
||||
# Parse arguments
|
||||
if isinstance(func_args, str):
|
||||
try:
|
||||
args_dict = json.loads(func_args)
|
||||
except json.JSONDecodeError:
|
||||
args_dict = {}
|
||||
else:
|
||||
args_dict = func_args
|
||||
|
||||
# Get agent_key for event tracking
|
||||
agent_key = (
|
||||
getattr(self.agent, "key", "unknown") if self.agent else "unknown"
|
||||
)
|
||||
|
||||
# Find original tool by matching sanitized name (needed for cache_function and result_as_answer)
|
||||
original_tool = None
|
||||
for tool in self.original_tools or []:
|
||||
if sanitize_tool_name(tool.name) == func_name:
|
||||
original_tool = tool
|
||||
break
|
||||
|
||||
# Check if tool has reached max usage count
|
||||
max_usage_reached = False
|
||||
if (
|
||||
original_tool
|
||||
and original_tool.max_usage_count is not None
|
||||
and original_tool.current_usage_count >= original_tool.max_usage_count
|
||||
):
|
||||
max_usage_reached = True
|
||||
|
||||
# Check cache before executing
|
||||
from_cache = False
|
||||
input_str = json.dumps(args_dict) if args_dict else ""
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
cached_result = self.tools_handler.cache.read(
|
||||
tool=func_name, input=input_str
|
||||
execution_results: list[dict[str, Any]] = []
|
||||
if should_parallelize:
|
||||
max_workers = min(8, len(runnable_tool_calls))
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as pool:
|
||||
future_to_idx = {
|
||||
pool.submit(self._execute_single_native_tool_call, tool_call): idx
|
||||
for idx, tool_call in enumerate(runnable_tool_calls)
|
||||
}
|
||||
ordered_results: list[dict[str, Any] | None] = [None] * len(
|
||||
runnable_tool_calls
|
||||
)
|
||||
if cached_result is not None:
|
||||
result = (
|
||||
str(cached_result)
|
||||
if not isinstance(cached_result, str)
|
||||
else cached_result
|
||||
)
|
||||
from_cache = True
|
||||
for future in as_completed(future_to_idx):
|
||||
idx = future_to_idx[future]
|
||||
ordered_results[idx] = future.result()
|
||||
execution_results = [
|
||||
result for result in ordered_results if result is not None
|
||||
]
|
||||
else:
|
||||
# Execute sequentially so result_as_answer tools can short-circuit
|
||||
# immediately without running remaining calls.
|
||||
for tool_call in runnable_tool_calls:
|
||||
execution_result = self._execute_single_native_tool_call(tool_call)
|
||||
call_id = cast(str, execution_result["call_id"])
|
||||
func_name = cast(str, execution_result["func_name"])
|
||||
result = cast(str, execution_result["result"])
|
||||
from_cache = cast(bool, execution_result["from_cache"])
|
||||
original_tool = execution_result["original_tool"]
|
||||
|
||||
# Emit tool usage started event
|
||||
started_at = datetime.now()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageStartedEvent(
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
),
|
||||
)
|
||||
error_event_emitted = False
|
||||
tool_message: LLMMessage = {
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"name": func_name,
|
||||
"content": result,
|
||||
}
|
||||
self.state.messages.append(tool_message)
|
||||
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
structured_tool: CrewStructuredTool | None = None
|
||||
for structured in self.tools or []:
|
||||
if sanitize_tool_name(structured.name) == func_name:
|
||||
structured_tool = structured
|
||||
break
|
||||
|
||||
hook_blocked = False
|
||||
before_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
tool_input=args_dict,
|
||||
tool=structured_tool, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
task=self.task,
|
||||
crew=self.crew,
|
||||
)
|
||||
before_hooks = get_before_tool_call_hooks()
|
||||
try:
|
||||
for hook in before_hooks:
|
||||
hook_result = hook(before_hook_context)
|
||||
if hook_result is False:
|
||||
hook_blocked = True
|
||||
break
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
# Log the tool execution
|
||||
if self.agent and self.agent.verbose:
|
||||
cache_info = " (from cache)" if from_cache else ""
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
content=f"Tool {func_name} executed with result{cache_info}: {result[:200]}...",
|
||||
color="green",
|
||||
)
|
||||
|
||||
if hook_blocked:
|
||||
result = f"Tool execution blocked by hook. Tool: {func_name}"
|
||||
elif not from_cache and not max_usage_reached:
|
||||
result = "Tool not found"
|
||||
if func_name in self._available_functions:
|
||||
try:
|
||||
tool_func = self._available_functions[func_name]
|
||||
raw_result = tool_func(**args_dict)
|
||||
|
||||
# Add to cache after successful execution (before string conversion)
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
should_cache = True
|
||||
if original_tool:
|
||||
should_cache = original_tool.cache_function(
|
||||
args_dict, raw_result
|
||||
)
|
||||
if should_cache:
|
||||
self.tools_handler.cache.add(
|
||||
tool=func_name, input=input_str, output=raw_result
|
||||
)
|
||||
|
||||
# Convert to string for message
|
||||
result = (
|
||||
str(raw_result)
|
||||
if not isinstance(raw_result, str)
|
||||
else raw_result
|
||||
)
|
||||
except Exception as e:
|
||||
result = f"Error executing tool: {e}"
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
# Emit tool usage error event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageErrorEvent(
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
error_event_emitted = True
|
||||
elif max_usage_reached and original_tool:
|
||||
# Return error message when max usage limit is reached
|
||||
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
|
||||
|
||||
# Execute after_tool_call hooks (even if blocked, to allow logging/monitoring)
|
||||
after_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
tool_input=args_dict,
|
||||
tool=structured_tool, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
task=self.task,
|
||||
crew=self.crew,
|
||||
tool_result=result,
|
||||
)
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
try:
|
||||
for after_hook in after_hooks:
|
||||
after_hook_result = after_hook(after_hook_context)
|
||||
if after_hook_result is not None:
|
||||
result = after_hook_result
|
||||
after_hook_context.tool_result = result
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if not error_event_emitted:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
if (
|
||||
original_tool
|
||||
and hasattr(original_tool, "result_as_answer")
|
||||
and original_tool.result_as_answer
|
||||
):
|
||||
self.state.current_answer = AgentFinish(
|
||||
thought="Tool result is the final answer",
|
||||
output=result,
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
),
|
||||
)
|
||||
text=result,
|
||||
)
|
||||
self.state.is_finished = True
|
||||
return "tool_result_is_final"
|
||||
|
||||
# Append tool result message
|
||||
tool_message: LLMMessage = {
|
||||
return "native_tool_completed"
|
||||
|
||||
for execution_result in execution_results:
|
||||
call_id = cast(str, execution_result["call_id"])
|
||||
func_name = cast(str, execution_result["func_name"])
|
||||
result = cast(str, execution_result["result"])
|
||||
from_cache = cast(bool, execution_result["from_cache"])
|
||||
original_tool = execution_result["original_tool"]
|
||||
|
||||
tool_message = {
|
||||
"role": "tool",
|
||||
"tool_call_id": call_id,
|
||||
"name": func_name,
|
||||
@@ -922,6 +813,220 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
|
||||
return "native_tool_completed"
|
||||
|
||||
def _should_parallelize_native_tool_calls(self, tool_calls: list[Any]) -> bool:
|
||||
"""Determine if native tool calls are safe to run in parallel."""
|
||||
if len(tool_calls) <= 1:
|
||||
return False
|
||||
|
||||
for tool_call in tool_calls:
|
||||
info = extract_tool_call_info(tool_call)
|
||||
if not info:
|
||||
continue
|
||||
_, func_name, _ = info
|
||||
|
||||
original_tool = None
|
||||
for tool in self.original_tools or []:
|
||||
if sanitize_tool_name(tool.name) == func_name:
|
||||
original_tool = tool
|
||||
break
|
||||
|
||||
if not original_tool:
|
||||
continue
|
||||
|
||||
if getattr(original_tool, "result_as_answer", False):
|
||||
return False
|
||||
if getattr(original_tool, "max_usage_count", None) is not None:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _execute_single_native_tool_call(self, tool_call: Any) -> dict[str, Any]:
|
||||
"""Execute a single native tool call and return metadata/result."""
|
||||
info = extract_tool_call_info(tool_call)
|
||||
if not info:
|
||||
raise ValueError("Invalid native tool call format")
|
||||
|
||||
call_id, func_name, func_args = info
|
||||
|
||||
# Parse arguments
|
||||
args_dict, parse_error = parse_tool_call_args(func_args, func_name, call_id)
|
||||
if parse_error is not None:
|
||||
return parse_error
|
||||
|
||||
# Get agent_key for event tracking
|
||||
agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown"
|
||||
|
||||
# Find original tool by matching sanitized name (needed for cache_function and result_as_answer)
|
||||
original_tool = None
|
||||
for tool in self.original_tools or []:
|
||||
if sanitize_tool_name(tool.name) == func_name:
|
||||
original_tool = tool
|
||||
break
|
||||
|
||||
# Check if tool has reached max usage count
|
||||
max_usage_reached = False
|
||||
if (
|
||||
original_tool
|
||||
and original_tool.max_usage_count is not None
|
||||
and original_tool.current_usage_count >= original_tool.max_usage_count
|
||||
):
|
||||
max_usage_reached = True
|
||||
|
||||
# Check cache before executing
|
||||
from_cache = False
|
||||
input_str = json.dumps(args_dict) if args_dict else ""
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
cached_result = self.tools_handler.cache.read(
|
||||
tool=func_name, input=input_str
|
||||
)
|
||||
if cached_result is not None:
|
||||
result = (
|
||||
str(cached_result)
|
||||
if not isinstance(cached_result, str)
|
||||
else cached_result
|
||||
)
|
||||
from_cache = True
|
||||
|
||||
# Emit tool usage started event
|
||||
started_at = datetime.now()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageStartedEvent(
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
),
|
||||
)
|
||||
error_event_emitted = False
|
||||
|
||||
track_delegation_if_needed(func_name, args_dict, self.task)
|
||||
|
||||
structured_tool: CrewStructuredTool | None = None
|
||||
for structured in self.tools or []:
|
||||
if sanitize_tool_name(structured.name) == func_name:
|
||||
structured_tool = structured
|
||||
break
|
||||
|
||||
hook_blocked = False
|
||||
before_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
tool_input=args_dict,
|
||||
tool=structured_tool, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
task=self.task,
|
||||
crew=self.crew,
|
||||
)
|
||||
before_hooks = get_before_tool_call_hooks()
|
||||
try:
|
||||
for hook in before_hooks:
|
||||
hook_result = hook(before_hook_context)
|
||||
if hook_result is False:
|
||||
hook_blocked = True
|
||||
break
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in before_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if hook_blocked:
|
||||
result = f"Tool execution blocked by hook. Tool: {func_name}"
|
||||
elif not from_cache and not max_usage_reached:
|
||||
result = "Tool not found"
|
||||
if func_name in self._available_functions:
|
||||
try:
|
||||
tool_func = self._available_functions[func_name]
|
||||
raw_result = tool_func(**args_dict)
|
||||
|
||||
# Add to cache after successful execution (before string conversion)
|
||||
if self.tools_handler and self.tools_handler.cache:
|
||||
should_cache = True
|
||||
if original_tool:
|
||||
should_cache = original_tool.cache_function(
|
||||
args_dict, raw_result
|
||||
)
|
||||
if should_cache:
|
||||
self.tools_handler.cache.add(
|
||||
tool=func_name, input=input_str, output=raw_result
|
||||
)
|
||||
|
||||
# Convert to string for message
|
||||
result = (
|
||||
str(raw_result)
|
||||
if not isinstance(raw_result, str)
|
||||
else raw_result
|
||||
)
|
||||
except Exception as e:
|
||||
result = f"Error executing tool: {e}"
|
||||
if self.task:
|
||||
self.task.increment_tools_errors()
|
||||
# Emit tool usage error event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageErrorEvent(
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
error_event_emitted = True
|
||||
elif max_usage_reached and original_tool:
|
||||
# Return error message when max usage limit is reached
|
||||
result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore."
|
||||
|
||||
# Execute after_tool_call hooks (even if blocked, to allow logging/monitoring)
|
||||
after_hook_context = ToolCallHookContext(
|
||||
tool_name=func_name,
|
||||
tool_input=args_dict,
|
||||
tool=structured_tool, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
task=self.task,
|
||||
crew=self.crew,
|
||||
tool_result=result,
|
||||
)
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
try:
|
||||
for after_hook in after_hooks:
|
||||
after_hook_result = after_hook(after_hook_context)
|
||||
if after_hook_result is not None:
|
||||
result = after_hook_result
|
||||
after_hook_context.tool_result = result
|
||||
except Exception as hook_error:
|
||||
if self.agent.verbose:
|
||||
self._printer.print(
|
||||
content=f"Error in after_tool_call hook: {hook_error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if not error_event_emitted:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageFinishedEvent(
|
||||
output=result,
|
||||
tool_name=func_name,
|
||||
tool_args=args_dict,
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
agent_key=agent_key,
|
||||
started_at=started_at,
|
||||
finished_at=datetime.now(),
|
||||
),
|
||||
)
|
||||
|
||||
return {
|
||||
"call_id": call_id,
|
||||
"func_name": func_name,
|
||||
"result": result,
|
||||
"from_cache": from_cache,
|
||||
"original_tool": original_tool,
|
||||
}
|
||||
|
||||
def _extract_tool_name(self, tool_call: Any) -> str:
|
||||
"""Extract tool name from various tool call formats."""
|
||||
if hasattr(tool_call, "function"):
|
||||
@@ -1106,9 +1211,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
if self.state.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
self._save_to_memory(formatted_answer)
|
||||
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
@@ -1191,9 +1294,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
if self.state.ask_for_human_input:
|
||||
formatted_answer = await self._ahandle_human_feedback(formatted_answer)
|
||||
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
self._create_external_memory(formatted_answer)
|
||||
self._save_to_memory(formatted_answer)
|
||||
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
@@ -1256,7 +1357,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
|
||||
formatted_answer: Current agent response.
|
||||
"""
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
cb_result = self.step_callback(formatted_answer)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
def _append_message_to_state(
|
||||
self, text: str, role: Literal["user", "assistant", "system"] = "assistant"
|
||||
|
||||
@@ -7,6 +7,7 @@ from crewai.flow.async_feedback import (
|
||||
from crewai.flow.flow import Flow, and_, listen, or_, router, start
|
||||
from crewai.flow.flow_config import flow_config
|
||||
from crewai.flow.human_feedback import HumanFeedbackResult, human_feedback
|
||||
from crewai.flow.input_provider import InputProvider, InputResponse
|
||||
from crewai.flow.persistence import persist
|
||||
from crewai.flow.visualization import (
|
||||
FlowStructure,
|
||||
@@ -22,6 +23,8 @@ __all__ = [
|
||||
"HumanFeedbackPending",
|
||||
"HumanFeedbackProvider",
|
||||
"HumanFeedbackResult",
|
||||
"InputProvider",
|
||||
"InputResponse",
|
||||
"PendingFeedbackContext",
|
||||
"and_",
|
||||
"build_flow_structure",
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
"""Default provider implementations for human feedback.
|
||||
"""Default provider implementations for human feedback and user input.
|
||||
|
||||
This module provides the ConsoleProvider, which is the default synchronous
|
||||
provider that collects feedback via console input.
|
||||
provider that collects both feedback (for ``@human_feedback``) and user input
|
||||
(for ``Flow.ask()``) via console.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -16,20 +17,23 @@ if TYPE_CHECKING:
|
||||
|
||||
|
||||
class ConsoleProvider:
|
||||
"""Default synchronous console-based feedback provider.
|
||||
"""Default synchronous console-based provider for feedback and input.
|
||||
|
||||
This provider blocks execution and waits for console input from the user.
|
||||
It displays the method output with formatting and prompts for feedback.
|
||||
It serves two purposes:
|
||||
|
||||
- **Feedback** (``request_feedback``): Used by ``@human_feedback`` to
|
||||
display method output and collect review feedback.
|
||||
- **Input** (``request_input``): Used by ``Flow.ask()`` to prompt the
|
||||
user with a question and collect a response.
|
||||
|
||||
This is the default provider used when no custom provider is specified
|
||||
in the @human_feedback decorator.
|
||||
in the ``@human_feedback`` decorator or on the Flow's ``input_provider``.
|
||||
|
||||
Example:
|
||||
Example (feedback):
|
||||
```python
|
||||
from crewai.flow.async_feedback import ConsoleProvider
|
||||
|
||||
|
||||
# Explicitly use console provider
|
||||
@human_feedback(
|
||||
message="Review this:",
|
||||
provider=ConsoleProvider(),
|
||||
@@ -37,9 +41,20 @@ class ConsoleProvider:
|
||||
def my_method(self):
|
||||
return "Content to review"
|
||||
```
|
||||
|
||||
Example (input):
|
||||
```python
|
||||
from crewai.flow import Flow, start
|
||||
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
def gather_info(self):
|
||||
topic = self.ask("What topic should we research?")
|
||||
return topic
|
||||
```
|
||||
"""
|
||||
|
||||
def __init__(self, verbose: bool = True):
|
||||
def __init__(self, verbose: bool = True) -> None:
|
||||
"""Initialize the console provider.
|
||||
|
||||
Args:
|
||||
@@ -124,3 +139,55 @@ class ConsoleProvider:
|
||||
finally:
|
||||
# Resume live updates
|
||||
formatter.resume_live_updates()
|
||||
|
||||
def request_input(
|
||||
self,
|
||||
message: str,
|
||||
flow: Flow[Any],
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> str | None:
|
||||
"""Request user input via console (blocking).
|
||||
|
||||
Displays the prompt message with formatting and waits for the user
|
||||
to type their response. Used by ``Flow.ask()``.
|
||||
|
||||
Unlike ``request_feedback``, this method does not display an
|
||||
"OUTPUT FOR REVIEW" panel or emit feedback-specific events (those
|
||||
are handled by ``ask()`` itself).
|
||||
|
||||
Args:
|
||||
message: The question or prompt to display to the user.
|
||||
flow: The Flow instance requesting input.
|
||||
metadata: Optional metadata from the caller. Ignored by the
|
||||
console provider (console has no concept of user routing).
|
||||
|
||||
Returns:
|
||||
The user's input as a stripped string. Returns empty string
|
||||
if user presses Enter without input. Never returns None
|
||||
(console input is always available).
|
||||
"""
|
||||
from crewai.events.event_listener import event_listener
|
||||
|
||||
# Pause live updates during human input
|
||||
formatter = event_listener.formatter
|
||||
formatter.pause_live_updates()
|
||||
|
||||
try:
|
||||
console = formatter.console
|
||||
|
||||
if self.verbose:
|
||||
console.print()
|
||||
console.print(message, style="yellow")
|
||||
console.print()
|
||||
|
||||
response = input(">>> \n").strip()
|
||||
else:
|
||||
response = input(f"{message} ").strip()
|
||||
|
||||
# Add line break after input so formatter output starts clean
|
||||
console.print()
|
||||
|
||||
return response
|
||||
finally:
|
||||
# Resume live updates
|
||||
formatter.resume_live_updates()
|
||||
|
||||
@@ -10,6 +10,7 @@ import asyncio
|
||||
from collections.abc import (
|
||||
Callable,
|
||||
ItemsView,
|
||||
Iterable,
|
||||
Iterator,
|
||||
KeysView,
|
||||
Sequence,
|
||||
@@ -17,6 +18,7 @@ from collections.abc import (
|
||||
)
|
||||
from concurrent.futures import Future
|
||||
import copy
|
||||
import enum
|
||||
import inspect
|
||||
import logging
|
||||
import threading
|
||||
@@ -27,8 +29,10 @@ from typing import (
|
||||
Generic,
|
||||
Literal,
|
||||
ParamSpec,
|
||||
SupportsIndex,
|
||||
TypeVar,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
from uuid import uuid4
|
||||
|
||||
@@ -77,7 +81,12 @@ from crewai.flow.flow_wrappers import (
|
||||
StartMethod,
|
||||
)
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.types import FlowExecutionData, FlowMethodName, PendingListenerKey
|
||||
from crewai.flow.types import (
|
||||
FlowExecutionData,
|
||||
FlowMethodName,
|
||||
InputHistoryEntry,
|
||||
PendingListenerKey,
|
||||
)
|
||||
from crewai.flow.utils import (
|
||||
_extract_all_methods,
|
||||
_extract_all_methods_recursive,
|
||||
@@ -416,13 +425,17 @@ def and_(*conditions: str | FlowCondition | Callable[..., Any]) -> FlowCondition
|
||||
return {"type": AND_CONDITION, "conditions": processed_conditions}
|
||||
|
||||
|
||||
class LockedListProxy(Generic[T]):
|
||||
class LockedListProxy(list, Generic[T]): # type: ignore[type-arg]
|
||||
"""Thread-safe proxy for list operations.
|
||||
|
||||
Wraps a list and uses a lock for all mutating operations.
|
||||
Subclasses ``list`` so that ``isinstance(proxy, list)`` returns True,
|
||||
which is required by libraries like LanceDB and Pydantic that do strict
|
||||
type checks. All mutations go through the lock; reads delegate to the
|
||||
underlying list.
|
||||
"""
|
||||
|
||||
def __init__(self, lst: list[T], lock: threading.Lock) -> None:
|
||||
super().__init__() # empty builtin list; all access goes through self._list
|
||||
self._list = lst
|
||||
self._lock = lock
|
||||
|
||||
@@ -430,11 +443,11 @@ class LockedListProxy(Generic[T]):
|
||||
with self._lock:
|
||||
self._list.append(item)
|
||||
|
||||
def extend(self, items: list[T]) -> None:
|
||||
def extend(self, items: Iterable[T]) -> None:
|
||||
with self._lock:
|
||||
self._list.extend(items)
|
||||
|
||||
def insert(self, index: int, item: T) -> None:
|
||||
def insert(self, index: SupportsIndex, item: T) -> None:
|
||||
with self._lock:
|
||||
self._list.insert(index, item)
|
||||
|
||||
@@ -442,7 +455,7 @@ class LockedListProxy(Generic[T]):
|
||||
with self._lock:
|
||||
self._list.remove(item)
|
||||
|
||||
def pop(self, index: int = -1) -> T:
|
||||
def pop(self, index: SupportsIndex = -1) -> T:
|
||||
with self._lock:
|
||||
return self._list.pop(index)
|
||||
|
||||
@@ -450,15 +463,23 @@ class LockedListProxy(Generic[T]):
|
||||
with self._lock:
|
||||
self._list.clear()
|
||||
|
||||
def __setitem__(self, index: int, value: T) -> None:
|
||||
@overload
|
||||
def __setitem__(self, index: SupportsIndex, value: T) -> None: ...
|
||||
@overload
|
||||
def __setitem__(self, index: slice, value: Iterable[T]) -> None: ...
|
||||
def __setitem__(self, index: Any, value: Any) -> None:
|
||||
with self._lock:
|
||||
self._list[index] = value
|
||||
|
||||
def __delitem__(self, index: int) -> None:
|
||||
def __delitem__(self, index: SupportsIndex | slice) -> None:
|
||||
with self._lock:
|
||||
del self._list[index]
|
||||
|
||||
def __getitem__(self, index: int) -> T:
|
||||
@overload
|
||||
def __getitem__(self, index: SupportsIndex) -> T: ...
|
||||
@overload
|
||||
def __getitem__(self, index: slice) -> list[T]: ...
|
||||
def __getitem__(self, index: Any) -> Any:
|
||||
return self._list[index]
|
||||
|
||||
def __len__(self) -> int:
|
||||
@@ -476,14 +497,31 @@ class LockedListProxy(Generic[T]):
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self._list)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""Compare based on the underlying list contents."""
|
||||
if isinstance(other, LockedListProxy):
|
||||
# Avoid deadlocks by acquiring locks in a consistent order.
|
||||
first, second = (self, other) if id(self) <= id(other) else (other, self)
|
||||
with first._lock:
|
||||
with second._lock:
|
||||
return first._list == second._list
|
||||
with self._lock:
|
||||
return self._list == other
|
||||
|
||||
class LockedDictProxy(Generic[T]):
|
||||
def __ne__(self, other: object) -> bool:
|
||||
return not self.__eq__(other)
|
||||
|
||||
|
||||
class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg]
|
||||
"""Thread-safe proxy for dict operations.
|
||||
|
||||
Wraps a dict and uses a lock for all mutating operations.
|
||||
Subclasses ``dict`` so that ``isinstance(proxy, dict)`` returns True,
|
||||
which is required by libraries like Pydantic that do strict type checks.
|
||||
All mutations go through the lock; reads delegate to the underlying dict.
|
||||
"""
|
||||
|
||||
def __init__(self, d: dict[str, T], lock: threading.Lock) -> None:
|
||||
super().__init__() # empty builtin dict; all access goes through self._dict
|
||||
self._dict = d
|
||||
self._lock = lock
|
||||
|
||||
@@ -495,11 +533,11 @@ class LockedDictProxy(Generic[T]):
|
||||
with self._lock:
|
||||
del self._dict[key]
|
||||
|
||||
def pop(self, key: str, *default: T) -> T:
|
||||
def pop(self, key: str, *default: T) -> T: # type: ignore[override]
|
||||
with self._lock:
|
||||
return self._dict.pop(key, *default)
|
||||
|
||||
def update(self, other: dict[str, T]) -> None:
|
||||
def update(self, other: dict[str, T]) -> None: # type: ignore[override]
|
||||
with self._lock:
|
||||
self._dict.update(other)
|
||||
|
||||
@@ -507,7 +545,7 @@ class LockedDictProxy(Generic[T]):
|
||||
with self._lock:
|
||||
self._dict.clear()
|
||||
|
||||
def setdefault(self, key: str, default: T) -> T:
|
||||
def setdefault(self, key: str, default: T) -> T: # type: ignore[override]
|
||||
with self._lock:
|
||||
return self._dict.setdefault(key, default)
|
||||
|
||||
@@ -523,16 +561,16 @@ class LockedDictProxy(Generic[T]):
|
||||
def __contains__(self, key: object) -> bool:
|
||||
return key in self._dict
|
||||
|
||||
def keys(self) -> KeysView[str]:
|
||||
def keys(self) -> KeysView[str]: # type: ignore[override]
|
||||
return self._dict.keys()
|
||||
|
||||
def values(self) -> ValuesView[T]:
|
||||
def values(self) -> ValuesView[T]: # type: ignore[override]
|
||||
return self._dict.values()
|
||||
|
||||
def items(self) -> ItemsView[str, T]:
|
||||
def items(self) -> ItemsView[str, T]: # type: ignore[override]
|
||||
return self._dict.items()
|
||||
|
||||
def get(self, key: str, default: T | None = None) -> T | None:
|
||||
def get(self, key: str, default: T | None = None) -> T | None: # type: ignore[override]
|
||||
return self._dict.get(key, default)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
@@ -541,6 +579,20 @@ class LockedDictProxy(Generic[T]):
|
||||
def __bool__(self) -> bool:
|
||||
return bool(self._dict)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
"""Compare based on the underlying dict contents."""
|
||||
if isinstance(other, LockedDictProxy):
|
||||
# Avoid deadlocks by acquiring locks in a consistent order.
|
||||
first, second = (self, other) if id(self) <= id(other) else (other, self)
|
||||
with first._lock:
|
||||
with second._lock:
|
||||
return first._dict == second._dict
|
||||
with self._lock:
|
||||
return self._dict == other
|
||||
|
||||
def __ne__(self, other: object) -> bool:
|
||||
return not self.__eq__(other)
|
||||
|
||||
|
||||
class StateProxy(Generic[T]):
|
||||
"""Proxy that provides thread-safe access to flow state.
|
||||
@@ -700,6 +752,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
name: str | None = None
|
||||
tracing: bool | None = None
|
||||
stream: bool = False
|
||||
memory: Any = (
|
||||
None # Memory | MemoryScope | MemorySlice | None; auto-created if not set
|
||||
)
|
||||
input_provider: Any = None # InputProvider | None; per-flow override for self.ask()
|
||||
|
||||
def __class_getitem__(cls: type[Flow[T]], item: type[T]) -> type[Flow[T]]:
|
||||
class _FlowGeneric(cls): # type: ignore
|
||||
@@ -746,6 +802,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._pending_feedback_context: PendingFeedbackContext | None = None
|
||||
self.suppress_flow_events: bool = suppress_flow_events
|
||||
|
||||
# User input history (for self.ask())
|
||||
self._input_history: list[InputHistoryEntry] = []
|
||||
|
||||
# Initialize state with initial values
|
||||
self._state = self._create_initial_state()
|
||||
self.tracing = tracing
|
||||
@@ -767,6 +826,14 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
),
|
||||
)
|
||||
|
||||
# Auto-create memory if not provided at class or instance level.
|
||||
# Internal flows (RecallFlow, EncodingFlow) set _skip_auto_memory
|
||||
# to avoid creating a wasteful standalone Memory instance.
|
||||
if self.memory is None and not getattr(self, "_skip_auto_memory", False):
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
self.memory = Memory()
|
||||
|
||||
# Register all flow-related methods
|
||||
for method_name in dir(self):
|
||||
if not method_name.startswith("_"):
|
||||
@@ -777,6 +844,63 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
method = method.__get__(self, self.__class__)
|
||||
self._methods[method.__name__] = method
|
||||
|
||||
def recall(self, query: str, **kwargs: Any) -> Any:
|
||||
"""Recall relevant memories. Delegates to this flow's memory.
|
||||
|
||||
Args:
|
||||
query: Natural language query.
|
||||
**kwargs: Passed to memory.recall (e.g. scope, categories, limit, depth).
|
||||
|
||||
Returns:
|
||||
Result of memory.recall(query, **kwargs).
|
||||
|
||||
Raises:
|
||||
ValueError: If no memory is configured for this flow.
|
||||
"""
|
||||
if self.memory is None:
|
||||
raise ValueError("No memory configured for this flow")
|
||||
return self.memory.recall(query, **kwargs)
|
||||
|
||||
def remember(self, content: str | list[str], **kwargs: Any) -> Any:
|
||||
"""Store one or more items in memory.
|
||||
|
||||
Pass a single string for synchronous save (returns the MemoryRecord).
|
||||
Pass a list of strings for non-blocking batch save (returns immediately).
|
||||
|
||||
Args:
|
||||
content: Text or list of texts to remember.
|
||||
**kwargs: Passed to memory.remember / remember_many
|
||||
(e.g. scope, categories, metadata, importance).
|
||||
|
||||
Returns:
|
||||
MemoryRecord for single item, empty list for batch (background save).
|
||||
|
||||
Raises:
|
||||
ValueError: If no memory is configured for this flow.
|
||||
"""
|
||||
if self.memory is None:
|
||||
raise ValueError("No memory configured for this flow")
|
||||
if isinstance(content, list):
|
||||
return self.memory.remember_many(content, **kwargs)
|
||||
return self.memory.remember(content, **kwargs)
|
||||
|
||||
def extract_memories(self, content: str) -> list[str]:
|
||||
"""Extract discrete memories from content. Delegates to this flow's memory.
|
||||
|
||||
Args:
|
||||
content: Raw text (e.g. task + result dump).
|
||||
|
||||
Returns:
|
||||
List of short, self-contained memory statements.
|
||||
|
||||
Raises:
|
||||
ValueError: If no memory is configured for this flow.
|
||||
"""
|
||||
if self.memory is None:
|
||||
raise ValueError("No memory configured for this flow")
|
||||
result: list[str] = self.memory.extract_memories(content)
|
||||
return result
|
||||
|
||||
def _mark_or_listener_fired(self, listener_name: FlowMethodName) -> bool:
|
||||
"""Mark an OR listener as fired atomically.
|
||||
|
||||
@@ -1246,8 +1370,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
ValueError: If structured state model lacks 'id' field
|
||||
TypeError: If state is neither BaseModel nor dictionary
|
||||
"""
|
||||
init_state = self.initial_state
|
||||
|
||||
# Handle case where initial_state is None but we have a type parameter
|
||||
if self.initial_state is None and hasattr(self, "_initial_state_t"):
|
||||
if init_state is None and hasattr(self, "_initial_state_t"):
|
||||
state_type = self._initial_state_t
|
||||
if isinstance(state_type, type):
|
||||
if issubclass(state_type, FlowState):
|
||||
@@ -1271,12 +1397,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
# Handle case where no initial state is provided
|
||||
if self.initial_state is None:
|
||||
if init_state is None:
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
# Handle case where initial_state is a type (class)
|
||||
if isinstance(self.initial_state, type):
|
||||
state_class: type[T] = self.initial_state
|
||||
if isinstance(init_state, type):
|
||||
state_class = init_state
|
||||
if issubclass(state_class, FlowState):
|
||||
return state_class()
|
||||
if issubclass(state_class, BaseModel):
|
||||
@@ -1287,19 +1413,19 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if not getattr(model_instance, "id", None):
|
||||
object.__setattr__(model_instance, "id", str(uuid4()))
|
||||
return model_instance
|
||||
if self.initial_state is dict:
|
||||
if init_state is dict:
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
# Handle dictionary instance case
|
||||
if isinstance(self.initial_state, dict):
|
||||
new_state = dict(self.initial_state) # Copy to avoid mutations
|
||||
if isinstance(init_state, dict):
|
||||
new_state = dict(init_state) # Copy to avoid mutations
|
||||
if "id" not in new_state:
|
||||
new_state["id"] = str(uuid4())
|
||||
return cast(T, new_state)
|
||||
|
||||
# Handle BaseModel instance case
|
||||
if isinstance(self.initial_state, BaseModel):
|
||||
model = cast(BaseModel, self.initial_state)
|
||||
if isinstance(init_state, BaseModel):
|
||||
model = cast(BaseModel, init_state)
|
||||
if not hasattr(model, "id"):
|
||||
raise ValueError("Flow state model must have an 'id' field")
|
||||
|
||||
@@ -1698,8 +1824,13 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
self._pending_and_listeners.clear()
|
||||
self._clear_or_listeners()
|
||||
else:
|
||||
# We're restoring from persistence, set the flag
|
||||
self._is_execution_resuming = True
|
||||
# Only enter resumption mode if there are completed methods to
|
||||
# replay. When _completed_methods is empty (e.g. a pure
|
||||
# state-reload via kickoff(inputs={"id": ...})), the flow
|
||||
# executes from scratch and the flag would incorrectly
|
||||
# suppress cyclic re-execution on the second iteration.
|
||||
if self._completed_methods:
|
||||
self._is_execution_resuming = True
|
||||
|
||||
if inputs:
|
||||
# Override the id in the state if it exists in inputs
|
||||
@@ -1872,6 +2003,9 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
return final_output
|
||||
finally:
|
||||
# Ensure all background memory saves complete before returning
|
||||
if self.memory is not None and hasattr(self.memory, "drain_writes"):
|
||||
self.memory.drain_writes()
|
||||
if request_id_token is not None:
|
||||
current_flow_request_id.reset(request_id_token)
|
||||
if flow_id_token is not None:
|
||||
@@ -2014,15 +2148,24 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if future:
|
||||
self._event_futures.append(future)
|
||||
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
result = await method(*args, **kwargs)
|
||||
else:
|
||||
# Run sync methods in thread pool for isolation
|
||||
# This allows Agent.kickoff() to work synchronously inside Flow methods
|
||||
import contextvars
|
||||
# Set method name in context so ask() can read it without
|
||||
# stack inspection. Must happen before copy_context() so the
|
||||
# value propagates into the thread pool for sync methods.
|
||||
from crewai.flow.flow_context import current_flow_method_name
|
||||
|
||||
ctx = contextvars.copy_context()
|
||||
result = await asyncio.to_thread(ctx.run, method, *args, **kwargs)
|
||||
method_name_token = current_flow_method_name.set(method_name)
|
||||
try:
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
result = await method(*args, **kwargs)
|
||||
else:
|
||||
# Run sync methods in thread pool for isolation
|
||||
# This allows Agent.kickoff() to work synchronously inside Flow methods
|
||||
import contextvars
|
||||
|
||||
ctx = contextvars.copy_context()
|
||||
result = await asyncio.to_thread(ctx.run, method, *args, **kwargs)
|
||||
finally:
|
||||
current_flow_method_name.reset(method_name_token)
|
||||
|
||||
# Auto-await coroutines returned from sync methods (enables AgentExecutor pattern)
|
||||
if asyncio.iscoroutine(result):
|
||||
@@ -2055,6 +2198,8 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
from crewai.flow.async_feedback.types import HumanFeedbackPending
|
||||
|
||||
if isinstance(e, HumanFeedbackPending):
|
||||
e.context.method_name = method_name
|
||||
|
||||
# Auto-save pending feedback (create default persistence if needed)
|
||||
if self._persistence is None:
|
||||
from crewai.flow.persistence import SQLiteFlowPersistence
|
||||
@@ -2154,14 +2299,23 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
router_name, router_input, current_triggering_event_id
|
||||
)
|
||||
if router_result: # Only add non-None results
|
||||
router_results.append(FlowMethodName(str(router_result)))
|
||||
router_result_str = (
|
||||
router_result.value
|
||||
if isinstance(router_result, enum.Enum)
|
||||
else str(router_result)
|
||||
)
|
||||
router_results.append(FlowMethodName(router_result_str))
|
||||
# If this was a human_feedback router, map the outcome to the feedback
|
||||
if self.last_human_feedback is not None:
|
||||
router_result_to_feedback[str(router_result)] = (
|
||||
router_result_to_feedback[router_result_str] = (
|
||||
self.last_human_feedback
|
||||
)
|
||||
current_trigger = (
|
||||
FlowMethodName(str(router_result))
|
||||
FlowMethodName(
|
||||
router_result.value
|
||||
if isinstance(router_result, enum.Enum)
|
||||
else str(router_result)
|
||||
)
|
||||
if router_result is not None
|
||||
else FlowMethodName("") # Update for next iteration of router chain
|
||||
)
|
||||
@@ -2428,8 +2582,12 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
return (None, None)
|
||||
# For cyclic flows, clear from completed to allow re-execution
|
||||
self._completed_methods.discard(listener_name)
|
||||
# Also clear from fired OR listeners for cyclic flows
|
||||
self._discard_or_listener(listener_name)
|
||||
# Clear ALL fired OR listeners so they can fire again in the new cycle.
|
||||
# This mirrors what _execute_start_method does for start-method cycles.
|
||||
# Only discarding the individual listener is insufficient because
|
||||
# downstream or_() listeners (e.g., method_a listening to
|
||||
# or_(handler_a, handler_b)) would remain suppressed across iterations.
|
||||
self._clear_or_listeners()
|
||||
|
||||
try:
|
||||
method = self._methods[listener_name]
|
||||
@@ -2473,6 +2631,206 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
logger.error(f"Error executing listener {listener_name}: {e}")
|
||||
raise
|
||||
|
||||
# ── User Input (self.ask) ────────────────────────────────────────
|
||||
|
||||
def _resolve_input_provider(self) -> Any:
|
||||
"""Resolve the input provider using the priority chain.
|
||||
|
||||
Resolution order:
|
||||
1. ``self.input_provider`` (per-flow override)
|
||||
2. ``flow_config.input_provider`` (global default)
|
||||
3. ``ConsoleInputProvider()`` (built-in fallback)
|
||||
|
||||
Returns:
|
||||
An object implementing the ``InputProvider`` protocol.
|
||||
"""
|
||||
from crewai.flow.async_feedback.providers import ConsoleProvider
|
||||
from crewai.flow.flow_config import flow_config
|
||||
|
||||
if self.input_provider is not None:
|
||||
return self.input_provider
|
||||
if flow_config.input_provider is not None:
|
||||
return flow_config.input_provider
|
||||
return ConsoleProvider()
|
||||
|
||||
def _checkpoint_state_for_ask(self) -> None:
|
||||
"""Auto-checkpoint flow state before waiting for user input.
|
||||
|
||||
If persistence is configured, saves the current state so that
|
||||
``self.state`` is recoverable even if the process crashes while
|
||||
waiting for input.
|
||||
|
||||
This is best-effort: if persistence is not configured, this is a no-op.
|
||||
"""
|
||||
if self._persistence is None:
|
||||
return
|
||||
try:
|
||||
state_data = (
|
||||
self._state
|
||||
if isinstance(self._state, dict)
|
||||
else self._state.model_dump()
|
||||
)
|
||||
self._persistence.save_state(
|
||||
flow_uuid=self.flow_id,
|
||||
method_name="_ask_checkpoint",
|
||||
state_data=state_data,
|
||||
)
|
||||
except Exception:
|
||||
logger.debug("Failed to checkpoint state before ask()", exc_info=True)
|
||||
|
||||
def ask(
|
||||
self,
|
||||
message: str,
|
||||
timeout: float | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> str | None:
|
||||
"""Request input from the user during flow execution.
|
||||
|
||||
Blocks the current thread until the user provides input or the
|
||||
timeout expires. Works in both sync and async flow methods (the
|
||||
flow framework runs sync methods in a thread pool via
|
||||
``asyncio.to_thread``, so the event loop stays free).
|
||||
|
||||
Timeout ensures flows always terminate. When timeout expires,
|
||||
``None`` is returned, enabling the pattern::
|
||||
|
||||
while (msg := self.ask("You: ", timeout=300)) is not None:
|
||||
process(msg)
|
||||
|
||||
Before waiting for input, the current ``self.state`` is automatically
|
||||
checkpointed to persistence (if configured) for durability.
|
||||
|
||||
Args:
|
||||
message: The question or prompt to display to the user.
|
||||
timeout: Maximum seconds to wait for input. ``None`` means
|
||||
wait indefinitely. When timeout expires, returns ``None``.
|
||||
Note: timeout is best-effort for the provider call --
|
||||
``ask()`` returns ``None`` promptly, but the underlying
|
||||
``request_input()`` may continue running in a background
|
||||
thread until it completes naturally. Network providers
|
||||
should implement their own internal timeouts.
|
||||
metadata: Optional metadata to send to the input provider,
|
||||
such as user ID, channel, session context. The provider
|
||||
can use this to route the question to the right recipient.
|
||||
|
||||
Returns:
|
||||
The user's input as a string, or ``None`` on timeout, disconnect,
|
||||
or provider error. Empty string ``""`` means the user pressed
|
||||
Enter without typing (intentional empty input).
|
||||
|
||||
Example:
|
||||
```python
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
def gather_info(self):
|
||||
topic = self.ask(
|
||||
"What topic should we research?",
|
||||
metadata={"user_id": "u123", "channel": "#research"},
|
||||
)
|
||||
if topic is None:
|
||||
return "No input received"
|
||||
return topic
|
||||
```
|
||||
"""
|
||||
from concurrent.futures import (
|
||||
ThreadPoolExecutor,
|
||||
TimeoutError as FuturesTimeoutError,
|
||||
)
|
||||
from datetime import datetime
|
||||
|
||||
from crewai.events.types.flow_events import (
|
||||
FlowInputReceivedEvent,
|
||||
FlowInputRequestedEvent,
|
||||
)
|
||||
from crewai.flow.flow_context import current_flow_method_name
|
||||
from crewai.flow.input_provider import InputResponse
|
||||
|
||||
method_name = current_flow_method_name.get("unknown")
|
||||
|
||||
# Emit input requested event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowInputRequestedEvent(
|
||||
type="flow_input_requested",
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
method_name=method_name,
|
||||
message=message,
|
||||
metadata=metadata,
|
||||
),
|
||||
)
|
||||
|
||||
# Auto-checkpoint state before waiting
|
||||
self._checkpoint_state_for_ask()
|
||||
|
||||
provider = self._resolve_input_provider()
|
||||
raw: str | InputResponse | None = None
|
||||
|
||||
try:
|
||||
if timeout is not None:
|
||||
# Manual executor management to avoid shutdown(wait=True)
|
||||
# deadlock when the provider call outlives the timeout.
|
||||
executor = ThreadPoolExecutor(max_workers=1)
|
||||
future = executor.submit(
|
||||
provider.request_input, message, self, metadata
|
||||
)
|
||||
try:
|
||||
raw = future.result(timeout=timeout)
|
||||
except FuturesTimeoutError:
|
||||
future.cancel()
|
||||
raw = None
|
||||
finally:
|
||||
# wait=False so we don't block if the provider is still
|
||||
# running (e.g. input() stuck waiting for user).
|
||||
# cancel_futures=True cleans up any queued-but-not-started tasks.
|
||||
executor.shutdown(wait=False, cancel_futures=True)
|
||||
else:
|
||||
raw = provider.request_input(message, self, metadata=metadata)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception:
|
||||
logger.debug("Input provider error in ask()", exc_info=True)
|
||||
raw = None
|
||||
|
||||
# Normalize provider response: str, InputResponse, or None
|
||||
response: str | None = None
|
||||
response_metadata: dict[str, Any] | None = None
|
||||
|
||||
if isinstance(raw, InputResponse):
|
||||
response = raw.text
|
||||
response_metadata = raw.metadata
|
||||
elif isinstance(raw, str):
|
||||
response = raw
|
||||
else:
|
||||
response = None
|
||||
|
||||
# Record in history
|
||||
self._input_history.append(
|
||||
{
|
||||
"message": message,
|
||||
"response": response,
|
||||
"method_name": method_name,
|
||||
"timestamp": datetime.now(),
|
||||
"metadata": metadata,
|
||||
"response_metadata": response_metadata,
|
||||
}
|
||||
)
|
||||
|
||||
# Emit input received event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowInputReceivedEvent(
|
||||
type="flow_input_received",
|
||||
flow_name=self.name or self.__class__.__name__,
|
||||
method_name=method_name,
|
||||
message=message,
|
||||
response=response,
|
||||
metadata=metadata,
|
||||
response_metadata=response_metadata,
|
||||
),
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
def _request_human_feedback(
|
||||
self,
|
||||
message: str,
|
||||
|
||||
@@ -11,6 +11,7 @@ from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.async_feedback.types import HumanFeedbackProvider
|
||||
from crewai.flow.input_provider import InputProvider
|
||||
|
||||
|
||||
class FlowConfig:
|
||||
@@ -20,10 +21,15 @@ class FlowConfig:
|
||||
hitl_provider: The human-in-the-loop feedback provider.
|
||||
Defaults to None (uses console input).
|
||||
Can be overridden by deployments at startup.
|
||||
input_provider: The input provider used by ``Flow.ask()``.
|
||||
Defaults to None (uses ``ConsoleProvider``).
|
||||
Can be overridden by
|
||||
deployments at startup.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._hitl_provider: HumanFeedbackProvider | None = None
|
||||
self._input_provider: InputProvider | None = None
|
||||
|
||||
@property
|
||||
def hitl_provider(self) -> Any:
|
||||
@@ -35,6 +41,32 @@ class FlowConfig:
|
||||
"""Set the HITL provider."""
|
||||
self._hitl_provider = provider
|
||||
|
||||
@property
|
||||
def input_provider(self) -> Any:
|
||||
"""Get the configured input provider for ``Flow.ask()``.
|
||||
|
||||
Returns:
|
||||
The configured InputProvider instance, or None if not set
|
||||
(in which case ``ConsoleInputProvider`` is used as default).
|
||||
"""
|
||||
return self._input_provider
|
||||
|
||||
@input_provider.setter
|
||||
def input_provider(self, provider: Any) -> None:
|
||||
"""Set the input provider for ``Flow.ask()``.
|
||||
|
||||
Args:
|
||||
provider: An object implementing the ``InputProvider`` protocol.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from crewai.flow import flow_config
|
||||
|
||||
flow_config.input_provider = WebSocketInputProvider(...)
|
||||
```
|
||||
"""
|
||||
self._input_provider = provider
|
||||
|
||||
|
||||
# Singleton instance
|
||||
flow_config = FlowConfig()
|
||||
|
||||
@@ -14,3 +14,7 @@ current_flow_request_id: contextvars.ContextVar[str | None] = contextvars.Contex
|
||||
current_flow_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
|
||||
"flow_id", default=None
|
||||
)
|
||||
|
||||
current_flow_method_name: contextvars.ContextVar[str] = contextvars.ContextVar(
|
||||
"flow_method_name", default="unknown"
|
||||
)
|
||||
|
||||
@@ -62,6 +62,8 @@ from datetime import datetime
|
||||
from functools import wraps
|
||||
from typing import TYPE_CHECKING, Any, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.flow.flow_wrappers import FlowMethod
|
||||
|
||||
|
||||
@@ -132,10 +134,12 @@ class HumanFeedbackConfig:
|
||||
|
||||
message: str
|
||||
emit: Sequence[str] | None = None
|
||||
llm: str | BaseLLM | None = None
|
||||
llm: str | BaseLLM | None = "gpt-4o-mini"
|
||||
default_outcome: str | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
provider: HumanFeedbackProvider | None = None
|
||||
learn: bool = False
|
||||
learn_source: str = "hitl"
|
||||
|
||||
|
||||
class HumanFeedbackMethod(FlowMethod[Any, Any]):
|
||||
@@ -155,13 +159,36 @@ class HumanFeedbackMethod(FlowMethod[Any, Any]):
|
||||
__human_feedback_config__: HumanFeedbackConfig | None = None
|
||||
|
||||
|
||||
class PreReviewResult(BaseModel):
|
||||
"""Structured output from the HITL pre-review LLM call."""
|
||||
|
||||
improved_output: str = Field(
|
||||
description="The improved version of the output with past human feedback lessons applied.",
|
||||
)
|
||||
|
||||
|
||||
class DistilledLessons(BaseModel):
|
||||
"""Structured output from the HITL lesson distillation LLM call."""
|
||||
|
||||
lessons: list[str] = Field(
|
||||
default_factory=list,
|
||||
description=(
|
||||
"Generalizable lessons extracted from the human feedback. "
|
||||
"Each lesson should be a reusable rule or preference. "
|
||||
"Return an empty list if the feedback contains no generalizable guidance."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def human_feedback(
|
||||
message: str,
|
||||
emit: Sequence[str] | None = None,
|
||||
llm: str | BaseLLM | None = None,
|
||||
llm: str | BaseLLM | None = "gpt-4o-mini",
|
||||
default_outcome: str | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
provider: HumanFeedbackProvider | None = None,
|
||||
learn: bool = False,
|
||||
learn_source: str = "hitl"
|
||||
) -> Callable[[F], F]:
|
||||
"""Decorator for Flow methods that require human feedback.
|
||||
|
||||
@@ -256,7 +283,9 @@ def human_feedback(
|
||||
if not llm:
|
||||
raise ValueError(
|
||||
"llm is required when emit is specified. "
|
||||
"Provide an LLM model string (e.g., 'gpt-4o-mini') or a BaseLLM instance."
|
||||
"Provide an LLM model string (e.g., 'gpt-4o-mini') or a BaseLLM instance. "
|
||||
"See the CrewAI Human-in-the-Loop (HITL) documentation for more information: "
|
||||
"https://docs.crewai.com/en/learn/human-feedback-in-flows"
|
||||
)
|
||||
if default_outcome is not None and default_outcome not in emit:
|
||||
raise ValueError(
|
||||
@@ -269,6 +298,101 @@ def human_feedback(
|
||||
def decorator(func: F) -> F:
|
||||
"""Inner decorator that wraps the function."""
|
||||
|
||||
# -- HITL learning helpers (only used when learn=True) --------
|
||||
|
||||
def _get_hitl_prompt(key: str) -> str:
|
||||
"""Read a HITL prompt from the i18n translations."""
|
||||
from crewai.utilities.i18n import get_i18n
|
||||
|
||||
return get_i18n().slice(key)
|
||||
|
||||
def _resolve_llm_instance() -> Any:
|
||||
"""Resolve the ``llm`` parameter to a BaseLLM instance.
|
||||
|
||||
Uses the SAME model specified in the decorator so pre-review,
|
||||
distillation, and outcome collapsing all share one model.
|
||||
"""
|
||||
if llm is None:
|
||||
from crewai.llm import LLM
|
||||
|
||||
return LLM(model="gpt-4o-mini")
|
||||
if isinstance(llm, str):
|
||||
from crewai.llm import LLM
|
||||
|
||||
return LLM(model=llm)
|
||||
return llm # already a BaseLLM instance
|
||||
|
||||
def _pre_review_with_lessons(
|
||||
flow_instance: Flow[Any], method_output: Any
|
||||
) -> Any:
|
||||
"""Recall past HITL lessons and use LLM to pre-review the output."""
|
||||
try:
|
||||
query = f"human feedback lessons for {func.__name__}: {method_output!s}"
|
||||
matches = flow_instance.memory.recall(
|
||||
query, source=learn_source
|
||||
)
|
||||
if not matches:
|
||||
return method_output
|
||||
|
||||
lessons = "\n".join(f"- {m.record.content}" for m in matches)
|
||||
llm_inst = _resolve_llm_instance()
|
||||
prompt = _get_hitl_prompt("hitl_pre_review_user").format(
|
||||
output=str(method_output),
|
||||
lessons=lessons,
|
||||
)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_hitl_prompt("hitl_pre_review_system")},
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
if getattr(llm_inst, "supports_function_calling", lambda: False)():
|
||||
response = llm_inst.call(messages, response_model=PreReviewResult)
|
||||
if isinstance(response, PreReviewResult):
|
||||
return response.improved_output
|
||||
return PreReviewResult.model_validate(response).improved_output
|
||||
reviewed = llm_inst.call(messages)
|
||||
return reviewed if isinstance(reviewed, str) else str(reviewed)
|
||||
except Exception:
|
||||
return method_output # fallback to raw output on any failure
|
||||
|
||||
def _distill_and_store_lessons(
|
||||
flow_instance: Flow[Any], method_output: Any, raw_feedback: str
|
||||
) -> None:
|
||||
"""Extract generalizable lessons from output + feedback, store in memory."""
|
||||
try:
|
||||
llm_inst = _resolve_llm_instance()
|
||||
prompt = _get_hitl_prompt("hitl_distill_user").format(
|
||||
method_name=func.__name__,
|
||||
output=str(method_output),
|
||||
feedback=raw_feedback,
|
||||
)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_hitl_prompt("hitl_distill_system")},
|
||||
{"role": "user", "content": prompt},
|
||||
]
|
||||
|
||||
lessons: list[str] = []
|
||||
if getattr(llm_inst, "supports_function_calling", lambda: False)():
|
||||
response = llm_inst.call(messages, response_model=DistilledLessons)
|
||||
if isinstance(response, DistilledLessons):
|
||||
lessons = response.lessons
|
||||
else:
|
||||
lessons = DistilledLessons.model_validate(response).lessons
|
||||
else:
|
||||
response = llm_inst.call(messages)
|
||||
if isinstance(response, str):
|
||||
lessons = [
|
||||
line.strip("- ").strip()
|
||||
for line in response.strip().split("\n")
|
||||
if line.strip() and line.strip() != "NONE"
|
||||
]
|
||||
|
||||
if lessons:
|
||||
flow_instance.memory.remember_many(lessons, source=learn_source)
|
||||
except Exception: # noqa: S110
|
||||
pass # non-critical: don't fail the flow because lesson storage failed
|
||||
|
||||
# -- Core feedback helpers ------------------------------------
|
||||
|
||||
def _request_feedback(flow_instance: Flow[Any], method_output: Any) -> str:
|
||||
"""Request feedback using provider or default console."""
|
||||
from crewai.flow.async_feedback.types import PendingFeedbackContext
|
||||
@@ -353,28 +477,40 @@ def human_feedback(
|
||||
# Async wrapper
|
||||
@wraps(func)
|
||||
async def async_wrapper(self: Flow[Any], *args: Any, **kwargs: Any) -> Any:
|
||||
# Execute the original method
|
||||
method_output = await func(self, *args, **kwargs)
|
||||
|
||||
# Request human feedback (may raise HumanFeedbackPending)
|
||||
raw_feedback = _request_feedback(self, method_output)
|
||||
# Pre-review: apply past HITL lessons before human sees it
|
||||
if learn and getattr(self, "memory", None) is not None:
|
||||
method_output = _pre_review_with_lessons(self, method_output)
|
||||
|
||||
# Process and return
|
||||
return _process_feedback(self, method_output, raw_feedback)
|
||||
raw_feedback = _request_feedback(self, method_output)
|
||||
result = _process_feedback(self, method_output, raw_feedback)
|
||||
|
||||
# Distill: extract lessons from output + feedback, store in memory
|
||||
if learn and getattr(self, "memory", None) is not None and raw_feedback.strip():
|
||||
_distill_and_store_lessons(self, method_output, raw_feedback)
|
||||
|
||||
return result
|
||||
|
||||
wrapper: Any = async_wrapper
|
||||
else:
|
||||
# Sync wrapper
|
||||
@wraps(func)
|
||||
def sync_wrapper(self: Flow[Any], *args: Any, **kwargs: Any) -> Any:
|
||||
# Execute the original method
|
||||
method_output = func(self, *args, **kwargs)
|
||||
|
||||
# Request human feedback (may raise HumanFeedbackPending)
|
||||
raw_feedback = _request_feedback(self, method_output)
|
||||
# Pre-review: apply past HITL lessons before human sees it
|
||||
if learn and getattr(self, "memory", None) is not None:
|
||||
method_output = _pre_review_with_lessons(self, method_output)
|
||||
|
||||
# Process and return
|
||||
return _process_feedback(self, method_output, raw_feedback)
|
||||
raw_feedback = _request_feedback(self, method_output)
|
||||
result = _process_feedback(self, method_output, raw_feedback)
|
||||
|
||||
# Distill: extract lessons from output + feedback, store in memory
|
||||
if learn and getattr(self, "memory", None) is not None and raw_feedback.strip():
|
||||
_distill_and_store_lessons(self, method_output, raw_feedback)
|
||||
|
||||
return result
|
||||
|
||||
wrapper = sync_wrapper
|
||||
|
||||
@@ -397,6 +533,8 @@ def human_feedback(
|
||||
default_outcome=default_outcome,
|
||||
metadata=metadata,
|
||||
provider=provider,
|
||||
learn=learn,
|
||||
learn_source=learn_source
|
||||
)
|
||||
wrapper.__is_flow_method__ = True
|
||||
|
||||
|
||||
151
lib/crewai/src/crewai/flow/input_provider.py
Normal file
151
lib/crewai/src/crewai/flow/input_provider.py
Normal file
@@ -0,0 +1,151 @@
|
||||
"""Input provider protocol for Flow.ask().
|
||||
|
||||
This module provides the InputProvider protocol and InputResponse dataclass
|
||||
used by Flow.ask() to request input from users during flow execution.
|
||||
|
||||
The default implementation is ``ConsoleProvider`` (from
|
||||
``crewai.flow.async_feedback.providers``), which serves both feedback
|
||||
and input collection via console.
|
||||
|
||||
Example (default console input):
|
||||
```python
|
||||
from crewai.flow import Flow, start
|
||||
|
||||
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
def gather_info(self):
|
||||
topic = self.ask("What topic should we research?")
|
||||
return topic
|
||||
```
|
||||
|
||||
Example (custom provider with metadata):
|
||||
```python
|
||||
from crewai.flow import Flow, start
|
||||
from crewai.flow.input_provider import InputProvider, InputResponse
|
||||
|
||||
|
||||
class SlackProvider:
|
||||
def request_input(self, message, flow, metadata=None):
|
||||
channel = metadata.get("channel", "#general") if metadata else "#general"
|
||||
thread = self.post_question(channel, message)
|
||||
reply = self.wait_for_reply(thread)
|
||||
return InputResponse(
|
||||
text=reply.text,
|
||||
metadata={"responded_by": reply.user_id, "thread_id": thread.id},
|
||||
)
|
||||
|
||||
|
||||
class MyFlow(Flow):
|
||||
input_provider = SlackProvider()
|
||||
|
||||
@start()
|
||||
def gather_info(self):
|
||||
topic = self.ask("What topic?", metadata={"channel": "#research"})
|
||||
return topic
|
||||
```
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.flow.flow import Flow
|
||||
|
||||
|
||||
@dataclass
|
||||
class InputResponse:
|
||||
"""Response from an InputProvider, optionally carrying metadata.
|
||||
|
||||
Simple providers can just return a string from ``request_input()``.
|
||||
Providers that need to send metadata back (e.g., who responded,
|
||||
thread ID, external timestamps) return an ``InputResponse`` instead.
|
||||
|
||||
``ask()`` normalizes both cases -- callers always get ``str | None``.
|
||||
The response metadata is stored in ``_input_history`` and emitted
|
||||
in ``FlowInputReceivedEvent``.
|
||||
|
||||
Attributes:
|
||||
text: The user's input text, or None if unavailable.
|
||||
metadata: Optional metadata from the provider about the response
|
||||
(e.g., who responded, thread ID, timestamps).
|
||||
|
||||
Example:
|
||||
```python
|
||||
class MyProvider:
|
||||
def request_input(self, message, flow, metadata=None):
|
||||
response = get_response_from_external_system(message)
|
||||
return InputResponse(
|
||||
text=response.text,
|
||||
metadata={"responded_by": response.user_id},
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
text: str | None
|
||||
metadata: dict[str, Any] | None = field(default=None)
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class InputProvider(Protocol):
|
||||
"""Protocol for user input collection strategies.
|
||||
|
||||
Implement this protocol to create custom input providers that integrate
|
||||
with external systems like websockets, web UIs, Slack, or custom APIs.
|
||||
|
||||
The default provider is ``ConsoleProvider``, which blocks waiting for
|
||||
console input via Python's built-in ``input()`` function.
|
||||
|
||||
Providers are always synchronous. The flow framework runs sync methods
|
||||
in a thread pool (via ``asyncio.to_thread``), so ``ask()`` never blocks
|
||||
the event loop even inside async flow methods.
|
||||
|
||||
Providers can return either:
|
||||
- ``str | None`` for simple cases (no response metadata)
|
||||
- ``InputResponse`` when they need to send metadata back with the answer
|
||||
|
||||
Example (simple):
|
||||
```python
|
||||
class SimpleProvider:
|
||||
def request_input(self, message: str, flow: Flow) -> str | None:
|
||||
return input(message)
|
||||
```
|
||||
|
||||
Example (with metadata):
|
||||
```python
|
||||
class SlackProvider:
|
||||
def request_input(self, message, flow, metadata=None):
|
||||
channel = metadata.get("channel") if metadata else "#general"
|
||||
reply = self.post_and_wait(channel, message)
|
||||
return InputResponse(
|
||||
text=reply.text,
|
||||
metadata={"responded_by": reply.user_id},
|
||||
)
|
||||
```
|
||||
"""
|
||||
|
||||
def request_input(
|
||||
self,
|
||||
message: str,
|
||||
flow: Flow[Any],
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> str | InputResponse | None:
|
||||
"""Request input from the user.
|
||||
|
||||
Args:
|
||||
message: The question or prompt to display to the user.
|
||||
flow: The Flow instance requesting input. Can be used to
|
||||
access flow state, name, or other context.
|
||||
metadata: Optional metadata from the caller, such as user ID,
|
||||
channel, session context, etc. Providers can use this to
|
||||
route the question to the right recipient.
|
||||
|
||||
Returns:
|
||||
The user's input as a string, an ``InputResponse`` with text
|
||||
and optional response metadata, or None if input is unavailable
|
||||
(e.g., user cancelled, connection dropped).
|
||||
"""
|
||||
...
|
||||
@@ -4,6 +4,7 @@ This module contains TypedDict definitions and type aliases used throughout
|
||||
the Flow system.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import (
|
||||
Annotated,
|
||||
Any,
|
||||
@@ -101,6 +102,30 @@ class FlowData(TypedDict):
|
||||
flow_methods_attributes: list[FlowMethodData]
|
||||
|
||||
|
||||
class InputHistoryEntry(TypedDict):
|
||||
"""A single entry in the flow's input history from ``self.ask()``.
|
||||
|
||||
Each call to ``Flow.ask()`` appends one entry recording the question,
|
||||
the user's response, which method asked, and any metadata exchanged
|
||||
between the caller and the input provider.
|
||||
|
||||
Attributes:
|
||||
message: The question or prompt that was displayed to the user.
|
||||
response: The user's response, or None on timeout/error.
|
||||
method_name: The flow method that called ``ask()``.
|
||||
timestamp: When the input was received.
|
||||
metadata: Metadata sent with the question (caller to provider).
|
||||
response_metadata: Metadata received with the answer (provider to caller).
|
||||
"""
|
||||
|
||||
message: str
|
||||
response: str | None
|
||||
method_name: str
|
||||
timestamp: datetime
|
||||
metadata: dict[str, Any] | None
|
||||
response_metadata: dict[str, Any] | None
|
||||
|
||||
|
||||
class FlowExecutionData(TypedDict):
|
||||
"""Flow execution data.
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from collections.abc import Callable
|
||||
import time
|
||||
from functools import wraps
|
||||
import inspect
|
||||
import json
|
||||
@@ -48,6 +49,11 @@ from crewai.events.types.agent_events import (
|
||||
LiteAgentExecutionErrorEvent,
|
||||
LiteAgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalFailedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
)
|
||||
from crewai.events.types.logging_events import AgentLogsExecutionEvent
|
||||
from crewai.flow.flow_trackable import FlowTrackable
|
||||
from crewai.hooks.llm_hooks import get_after_llm_call_hooks, get_before_llm_call_hooks
|
||||
@@ -244,6 +250,10 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
description="A2A (Agent-to-Agent) configuration for delegating tasks to remote agents. "
|
||||
"Can be a single A2AConfig/A2AClientConfig/A2AServerConfig, or a list of configurations.",
|
||||
)
|
||||
memory: bool | Any | None = Field(
|
||||
default=None,
|
||||
description="If True, use default Memory(). If Memory/MemoryScope/MemorySlice, use it for recall and remember.",
|
||||
)
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default_factory=list, description="Results of the tools used by the agent."
|
||||
)
|
||||
@@ -266,6 +276,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
_after_llm_call_hooks: list[AfterLLMCallHookType] = PrivateAttr(
|
||||
default_factory=get_after_llm_call_hooks
|
||||
)
|
||||
_memory: Any = PrivateAttr(default=None)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def emit_deprecation_warning(self) -> Self:
|
||||
@@ -363,6 +374,19 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def resolve_memory(self) -> Self:
|
||||
"""Resolve memory field to _memory: default Memory() when True, else user instance or None."""
|
||||
if self.memory is True:
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
object.__setattr__(self, "_memory", Memory())
|
||||
elif self.memory is not None and self.memory is not False:
|
||||
object.__setattr__(self, "_memory", self.memory)
|
||||
else:
|
||||
object.__setattr__(self, "_memory", None)
|
||||
return self
|
||||
|
||||
@field_validator("guardrail", mode="before")
|
||||
@classmethod
|
||||
def validate_guardrail_function(
|
||||
@@ -455,6 +479,19 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
Returns:
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
# Inject memory tools once if memory is configured (mirrors Agent._prepare_kickoff)
|
||||
if self._memory is not None:
|
||||
from crewai.tools.memory_tools import create_memory_tools
|
||||
from crewai.utilities.agent_utils import sanitize_tool_name
|
||||
|
||||
existing_names = {sanitize_tool_name(t.name) for t in self._parsed_tools}
|
||||
memory_tools = [
|
||||
mt for mt in create_memory_tools(self._memory)
|
||||
if sanitize_tool_name(mt.name) not in existing_names
|
||||
]
|
||||
if memory_tools:
|
||||
self._parsed_tools = self._parsed_tools + parse_tools(memory_tools)
|
||||
|
||||
# Create agent info for event emission
|
||||
agent_info = {
|
||||
"id": self.id,
|
||||
@@ -474,6 +511,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
self._messages = self._format_messages(
|
||||
messages, response_format=response_format, input_files=input_files
|
||||
)
|
||||
self._inject_memory_context()
|
||||
|
||||
return self._execute_core(
|
||||
agent_info=agent_info, response_format=response_format
|
||||
@@ -496,6 +534,80 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
)
|
||||
raise e
|
||||
|
||||
def _get_last_user_content(self) -> str:
|
||||
"""Get the last user message content from _messages for recall/input."""
|
||||
for msg in reversed(self._messages):
|
||||
if msg.get("role") == "user":
|
||||
content = msg.get("content")
|
||||
return content if isinstance(content, str) else ""
|
||||
return ""
|
||||
|
||||
def _inject_memory_context(self) -> None:
|
||||
"""Recall relevant memories and append to the system message. No-op if _memory is None."""
|
||||
if self._memory is None:
|
||||
return
|
||||
query = self._get_last_user_content()
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalStartedEvent(
|
||||
task_id=None,
|
||||
source_type="lite_agent",
|
||||
),
|
||||
)
|
||||
start_time = time.time()
|
||||
memory_block = ""
|
||||
try:
|
||||
matches = self._memory.recall(query, limit=10)
|
||||
if matches:
|
||||
memory_block = "Relevant memories:\n" + "\n".join(
|
||||
f"- {m.record.content}" for m in matches
|
||||
)
|
||||
if memory_block:
|
||||
formatted = self.i18n.slice("memory").format(memory=memory_block)
|
||||
if self._messages and self._messages[0].get("role") == "system":
|
||||
self._messages[0]["content"] = (
|
||||
self._messages[0].get("content", "") + "\n\n" + formatted
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalCompletedEvent(
|
||||
task_id=None,
|
||||
memory_content=memory_block,
|
||||
retrieval_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="lite_agent",
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryRetrievalFailedEvent(
|
||||
task_id=None,
|
||||
source_type="lite_agent",
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
|
||||
def _save_to_memory(self, output_text: str) -> None:
|
||||
"""Extract discrete memories from the run and remember each. No-op if _memory is None."""
|
||||
if self._memory is None:
|
||||
return
|
||||
input_str = self._get_last_user_content() or "User request"
|
||||
try:
|
||||
raw = (
|
||||
f"Input: {input_str}\n"
|
||||
f"Agent: {self.role}\n"
|
||||
f"Result: {output_text}"
|
||||
)
|
||||
extracted = self._memory.extract_memories(raw)
|
||||
if extracted:
|
||||
self._memory.remember_many(extracted, agent_role=self.role)
|
||||
except Exception as e:
|
||||
if self.verbose:
|
||||
self._printer.print(
|
||||
content=f"Failed to save to memory: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
def _execute_core(
|
||||
self, agent_info: dict[str, Any], response_format: type[BaseModel] | None = None
|
||||
) -> LiteAgentOutput:
|
||||
@@ -511,6 +623,8 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
|
||||
# Execute the agent using invoke loop
|
||||
agent_finish = self._invoke_loop()
|
||||
if self._memory is not None:
|
||||
self._save_to_memory(agent_finish.output)
|
||||
formatted_result: BaseModel | None = None
|
||||
|
||||
active_response_format = response_format or self.response_format
|
||||
|
||||
@@ -419,8 +419,22 @@ class LLM(BaseLLM):
|
||||
|
||||
# FALLBACK to LiteLLM
|
||||
if not LITELLM_AVAILABLE:
|
||||
logger.error("LiteLLM is not available, falling back to LiteLLM")
|
||||
raise ImportError("Fallback to LiteLLM is not available") from None
|
||||
native_list = ", ".join(SUPPORTED_NATIVE_PROVIDERS)
|
||||
error_msg = (
|
||||
f"Unable to initialize LLM with model '{model}'. "
|
||||
f"The model did not match any supported native provider "
|
||||
f"({native_list}), and the LiteLLM fallback package is not "
|
||||
f"installed.\n\n"
|
||||
f"To fix this, either:\n"
|
||||
f" 1. Install LiteLLM for broad model support: "
|
||||
f"uv add litellm\n"
|
||||
f"or\n"
|
||||
f"pip install litellm\n\n"
|
||||
f"For more details, see: "
|
||||
f"https://docs.crewai.com/en/learn/llm-connections"
|
||||
)
|
||||
logger.error(error_msg)
|
||||
raise ImportError(error_msg) from None
|
||||
|
||||
instance = object.__new__(cls)
|
||||
super(LLM, instance).__init__(model=model, is_litellm=True, **kwargs)
|
||||
|
||||
@@ -234,7 +234,7 @@ class BedrockCompletion(BaseLLM):
|
||||
aws_access_key_id: str | None = None,
|
||||
aws_secret_access_key: str | None = None,
|
||||
aws_session_token: str | None = None,
|
||||
region_name: str = "us-east-1",
|
||||
region_name: str | None = None,
|
||||
temperature: float | None = None,
|
||||
max_tokens: int | None = None,
|
||||
top_p: float | None = None,
|
||||
@@ -287,15 +287,6 @@ class BedrockCompletion(BaseLLM):
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
# Initialize Bedrock client with proper configuration
|
||||
session = Session(
|
||||
aws_access_key_id=aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID"),
|
||||
aws_secret_access_key=aws_secret_access_key
|
||||
or os.getenv("AWS_SECRET_ACCESS_KEY"),
|
||||
aws_session_token=aws_session_token or os.getenv("AWS_SESSION_TOKEN"),
|
||||
region_name=region_name,
|
||||
)
|
||||
|
||||
# Configure client with timeouts and retries following AWS best practices
|
||||
config = Config(
|
||||
read_timeout=300,
|
||||
@@ -306,8 +297,12 @@ class BedrockCompletion(BaseLLM):
|
||||
tcp_keepalive=True,
|
||||
)
|
||||
|
||||
self.client = session.client("bedrock-runtime", config=config)
|
||||
self.region_name = region_name
|
||||
self.region_name = (
|
||||
region_name
|
||||
or os.getenv("AWS_DEFAULT_REGION")
|
||||
or os.getenv("AWS_REGION_NAME")
|
||||
or "us-east-1"
|
||||
)
|
||||
|
||||
self.aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
|
||||
self.aws_secret_access_key = aws_secret_access_key or os.getenv(
|
||||
@@ -315,6 +310,16 @@ class BedrockCompletion(BaseLLM):
|
||||
)
|
||||
self.aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN")
|
||||
|
||||
# Initialize Bedrock client with proper configuration
|
||||
session = Session(
|
||||
aws_access_key_id=self.aws_access_key_id,
|
||||
aws_secret_access_key=self.aws_secret_access_key,
|
||||
aws_session_token=self.aws_session_token,
|
||||
region_name=self.region_name,
|
||||
)
|
||||
|
||||
self.client = session.client("bedrock-runtime", config=config)
|
||||
|
||||
self._async_exit_stack = AsyncExitStack() if AIOBOTOCORE_AVAILABLE else None
|
||||
self._async_client_initialized = False
|
||||
|
||||
|
||||
@@ -1,13 +1,27 @@
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.external.external_memory import ExternalMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
"""Memory module: unified Memory with LLM analysis and pluggable storage."""
|
||||
|
||||
from crewai.memory.encoding_flow import EncodingFlow
|
||||
from crewai.memory.memory_scope import MemoryScope, MemorySlice
|
||||
from crewai.memory.types import (
|
||||
MemoryMatch,
|
||||
MemoryRecord,
|
||||
ScopeInfo,
|
||||
compute_composite_score,
|
||||
embed_text,
|
||||
embed_texts,
|
||||
)
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
|
||||
__all__ = [
|
||||
"EntityMemory",
|
||||
"ExternalMemory",
|
||||
"LongTermMemory",
|
||||
"ShortTermMemory",
|
||||
"EncodingFlow",
|
||||
"Memory",
|
||||
"MemoryMatch",
|
||||
"MemoryRecord",
|
||||
"MemoryScope",
|
||||
"MemorySlice",
|
||||
"ScopeInfo",
|
||||
"compute_composite_score",
|
||||
"embed_text",
|
||||
"embed_texts",
|
||||
]
|
||||
|
||||
371
lib/crewai/src/crewai/memory/analyze.py
Normal file
371
lib/crewai/src/crewai/memory/analyze.py
Normal file
@@ -0,0 +1,371 @@
|
||||
"""LLM-powered analysis for memory save and recall."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.memory.types import MemoryRecord, ScopeInfo
|
||||
from crewai.utilities.i18n import get_i18n
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExtractedMetadata(BaseModel):
|
||||
"""Fixed schema for LLM-extracted metadata (OpenAI requires additionalProperties: false)."""
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
entities: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Entities (people, orgs, places) mentioned in the content.",
|
||||
)
|
||||
dates: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Dates or time references in the content.",
|
||||
)
|
||||
topics: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Topics or themes in the content.",
|
||||
)
|
||||
|
||||
|
||||
class MemoryAnalysis(BaseModel):
|
||||
"""LLM output for analyzing content before saving to memory."""
|
||||
|
||||
suggested_scope: str = Field(
|
||||
description="Best matching existing scope or new path (e.g. /company/decisions).",
|
||||
)
|
||||
categories: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Categories for the memory (prefer existing, add new if needed).",
|
||||
)
|
||||
importance: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Importance score from 0.0 to 1.0.",
|
||||
)
|
||||
extracted_metadata: ExtractedMetadata = Field(
|
||||
default_factory=ExtractedMetadata,
|
||||
description="Entities, dates, topics extracted from the content.",
|
||||
)
|
||||
|
||||
|
||||
class QueryAnalysis(BaseModel):
|
||||
"""LLM output for analyzing a recall query."""
|
||||
|
||||
keywords: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Key entities or keywords for filtering.",
|
||||
)
|
||||
suggested_scopes: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Scope paths to search (subset of available scopes).",
|
||||
)
|
||||
complexity: str = Field(
|
||||
default="simple",
|
||||
description="One of 'simple' (single fact) or 'complex' (aggregation/reasoning).",
|
||||
)
|
||||
recall_queries: list[str] = Field(
|
||||
default_factory=list,
|
||||
description=(
|
||||
"1-3 short, targeted search phrases distilled from the query. "
|
||||
"Each should be a concise question or keyword phrase optimized "
|
||||
"for semantic vector search. If the query is already short and "
|
||||
"focused, return it as a single item."
|
||||
),
|
||||
)
|
||||
time_filter: str | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"If the query references a specific time period (e.g. 'last week', "
|
||||
"'yesterday', 'in January'), return an ISO 8601 date string representing "
|
||||
"the earliest date that results should match (e.g. '2026-02-01'). "
|
||||
"Return null if no time constraint is implied."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class ExtractedMemories(BaseModel):
|
||||
"""LLM output for extracting discrete memories from raw content."""
|
||||
|
||||
memories: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="List of discrete, self-contained memory statements extracted from the content.",
|
||||
)
|
||||
|
||||
|
||||
class ConsolidationAction(BaseModel):
|
||||
"""A single action in a consolidation plan."""
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
action: str = Field(
|
||||
description="One of 'keep', 'update', or 'delete'.",
|
||||
)
|
||||
record_id: str = Field(
|
||||
description="ID of the existing record this action applies to.",
|
||||
)
|
||||
new_content: str | None = Field(
|
||||
default=None,
|
||||
description="Updated content text. Required when action is 'update'.",
|
||||
)
|
||||
reason: str = Field(
|
||||
default="",
|
||||
description="Brief reason for this action.",
|
||||
)
|
||||
|
||||
|
||||
class ConsolidationPlan(BaseModel):
|
||||
"""LLM output for consolidating new content with existing memories."""
|
||||
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
actions: list[ConsolidationAction] = Field(
|
||||
default_factory=list,
|
||||
description="Actions to take on existing records (keep/update/delete).",
|
||||
)
|
||||
insert_new: bool = Field(
|
||||
default=True,
|
||||
description="Whether to also insert the new content as a separate record.",
|
||||
)
|
||||
insert_reason: str = Field(
|
||||
default="",
|
||||
description="Why the new content should or should not be inserted.",
|
||||
)
|
||||
|
||||
|
||||
def _get_prompt(key: str) -> str:
|
||||
"""Retrieve a memory prompt from the i18n translations.
|
||||
|
||||
Args:
|
||||
key: The prompt key under the "memory" section.
|
||||
|
||||
Returns:
|
||||
The prompt string.
|
||||
"""
|
||||
return get_i18n().memory(key)
|
||||
|
||||
|
||||
def extract_memories_from_content(content: str, llm: Any) -> list[str]:
|
||||
"""Use the LLM to extract discrete memory statements from raw content.
|
||||
|
||||
This is a pure helper: it does NOT store anything. Callers should call
|
||||
memory.remember() on each returned string to persist them.
|
||||
|
||||
On LLM failure, returns the full content as a single memory so callers
|
||||
still persist something rather than dropping the output.
|
||||
|
||||
Args:
|
||||
content: Raw text (e.g. task description + result dump).
|
||||
llm: The LLM instance to use.
|
||||
|
||||
Returns:
|
||||
List of short, self-contained memory statements (or [content] on failure).
|
||||
"""
|
||||
if not (content or "").strip():
|
||||
return []
|
||||
user = _get_prompt("extract_memories_user").format(content=content)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_prompt("extract_memories_system")},
|
||||
{"role": "user", "content": user},
|
||||
]
|
||||
try:
|
||||
if getattr(llm, "supports_function_calling", lambda: False)():
|
||||
response = llm.call(messages, response_model=ExtractedMemories)
|
||||
if isinstance(response, ExtractedMemories):
|
||||
return response.memories
|
||||
return ExtractedMemories.model_validate(response).memories
|
||||
response = llm.call(messages)
|
||||
if isinstance(response, ExtractedMemories):
|
||||
return response.memories
|
||||
if isinstance(response, str):
|
||||
data = json.loads(response)
|
||||
return ExtractedMemories.model_validate(data).memories
|
||||
return ExtractedMemories.model_validate(response).memories
|
||||
except Exception as e:
|
||||
_logger.warning(
|
||||
"Memory extraction failed, storing full content as single memory: %s",
|
||||
e,
|
||||
exc_info=False,
|
||||
)
|
||||
return [content]
|
||||
|
||||
|
||||
def analyze_query(
|
||||
query: str,
|
||||
available_scopes: list[str],
|
||||
scope_info: ScopeInfo | None,
|
||||
llm: Any,
|
||||
) -> QueryAnalysis:
|
||||
"""Use the LLM to analyze a recall query.
|
||||
|
||||
On LLM failure, returns safe defaults so recall degrades to plain vector search.
|
||||
|
||||
Args:
|
||||
query: The user's recall query.
|
||||
available_scopes: Scope paths that exist in the store.
|
||||
scope_info: Optional info about the current scope.
|
||||
llm: The LLM instance to use.
|
||||
|
||||
Returns:
|
||||
QueryAnalysis with keywords, suggested_scopes, complexity, recall_queries, time_filter.
|
||||
"""
|
||||
scope_desc = ""
|
||||
if scope_info:
|
||||
scope_desc = f"Current scope has {scope_info.record_count} records, categories: {scope_info.categories}"
|
||||
user = _get_prompt("query_user").format(
|
||||
query=query,
|
||||
available_scopes=available_scopes or ["/"],
|
||||
scope_desc=scope_desc,
|
||||
)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_prompt("query_system")},
|
||||
{"role": "user", "content": user},
|
||||
]
|
||||
try:
|
||||
if getattr(llm, "supports_function_calling", lambda: False)():
|
||||
response = llm.call(messages, response_model=QueryAnalysis)
|
||||
if isinstance(response, QueryAnalysis):
|
||||
return response
|
||||
return QueryAnalysis.model_validate(response)
|
||||
response = llm.call(messages)
|
||||
if isinstance(response, QueryAnalysis):
|
||||
return response
|
||||
if isinstance(response, str):
|
||||
data = json.loads(response)
|
||||
return QueryAnalysis.model_validate(data)
|
||||
return QueryAnalysis.model_validate(response)
|
||||
except Exception as e:
|
||||
_logger.warning(
|
||||
"Query analysis failed, using defaults (complexity=simple): %s",
|
||||
e,
|
||||
exc_info=False,
|
||||
)
|
||||
scopes = (available_scopes or ["/"])[:5]
|
||||
return QueryAnalysis(
|
||||
keywords=[],
|
||||
suggested_scopes=scopes,
|
||||
complexity="simple",
|
||||
recall_queries=[query],
|
||||
)
|
||||
|
||||
|
||||
_SAVE_DEFAULTS = MemoryAnalysis(
|
||||
suggested_scope="/",
|
||||
categories=[],
|
||||
importance=0.5,
|
||||
extracted_metadata=ExtractedMetadata(),
|
||||
)
|
||||
|
||||
|
||||
def analyze_for_save(
|
||||
content: str,
|
||||
existing_scopes: list[str],
|
||||
existing_categories: list[str],
|
||||
llm: Any,
|
||||
) -> MemoryAnalysis:
|
||||
"""Infer scope, categories, importance, and metadata for a single memory.
|
||||
|
||||
Uses the small ``MemoryAnalysis`` schema (4 fields) for fast LLM response.
|
||||
On failure, returns safe defaults so the memory still gets persisted.
|
||||
|
||||
Args:
|
||||
content: The memory content to analyze.
|
||||
existing_scopes: Current scope paths in the memory store.
|
||||
existing_categories: Current categories in use.
|
||||
llm: The LLM instance to use.
|
||||
|
||||
Returns:
|
||||
MemoryAnalysis with suggested_scope, categories, importance, extracted_metadata.
|
||||
"""
|
||||
user = _get_prompt("save_user").format(
|
||||
content=content,
|
||||
existing_scopes=existing_scopes or ["/"],
|
||||
existing_categories=existing_categories or [],
|
||||
)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_prompt("save_system")},
|
||||
{"role": "user", "content": user},
|
||||
]
|
||||
try:
|
||||
if getattr(llm, "supports_function_calling", lambda: False)():
|
||||
response = llm.call(messages, response_model=MemoryAnalysis)
|
||||
if isinstance(response, MemoryAnalysis):
|
||||
return response
|
||||
return MemoryAnalysis.model_validate(response)
|
||||
response = llm.call(messages)
|
||||
if isinstance(response, MemoryAnalysis):
|
||||
return response
|
||||
if isinstance(response, str):
|
||||
data = json.loads(response)
|
||||
return MemoryAnalysis.model_validate(data)
|
||||
return MemoryAnalysis.model_validate(response)
|
||||
except Exception as e:
|
||||
_logger.warning(
|
||||
"Memory save analysis failed, using defaults: %s", e, exc_info=False,
|
||||
)
|
||||
return _SAVE_DEFAULTS
|
||||
|
||||
|
||||
_CONSOLIDATION_DEFAULT = ConsolidationPlan(actions=[], insert_new=True)
|
||||
|
||||
|
||||
def analyze_for_consolidation(
|
||||
new_content: str,
|
||||
existing_records: list[MemoryRecord],
|
||||
llm: Any,
|
||||
) -> ConsolidationPlan:
|
||||
"""Decide insert/update/delete for a single memory against similar existing records.
|
||||
|
||||
Uses the small ``ConsolidationPlan`` schema (3 fields) for fast LLM response.
|
||||
On failure, returns a safe default (insert_new=True) so the memory still gets persisted.
|
||||
|
||||
Args:
|
||||
new_content: The new content to store.
|
||||
existing_records: Existing records that are semantically similar.
|
||||
llm: The LLM instance to use.
|
||||
|
||||
Returns:
|
||||
ConsolidationPlan with actions per record and whether to insert the new content.
|
||||
"""
|
||||
if not existing_records:
|
||||
return ConsolidationPlan(actions=[], insert_new=True)
|
||||
records_lines: list[str] = []
|
||||
for r in existing_records:
|
||||
created = r.created_at.isoformat() if r.created_at else ""
|
||||
records_lines.append(
|
||||
f"- id={r.id} | scope={r.scope} | importance={r.importance:.2f} | created={created}\n"
|
||||
f" content: {r.content[:200]}{'...' if len(r.content) > 200 else ''}"
|
||||
)
|
||||
user = _get_prompt("consolidation_user").format(
|
||||
new_content=new_content,
|
||||
records_summary="\n\n".join(records_lines),
|
||||
)
|
||||
messages = [
|
||||
{"role": "system", "content": _get_prompt("consolidation_system")},
|
||||
{"role": "user", "content": user},
|
||||
]
|
||||
try:
|
||||
if getattr(llm, "supports_function_calling", lambda: False)():
|
||||
response = llm.call(messages, response_model=ConsolidationPlan)
|
||||
if isinstance(response, ConsolidationPlan):
|
||||
return response
|
||||
return ConsolidationPlan.model_validate(response)
|
||||
response = llm.call(messages)
|
||||
if isinstance(response, ConsolidationPlan):
|
||||
return response
|
||||
if isinstance(response, str):
|
||||
data = json.loads(response)
|
||||
return ConsolidationPlan.model_validate(data)
|
||||
return ConsolidationPlan.model_validate(response)
|
||||
except Exception as e:
|
||||
_logger.warning(
|
||||
"Consolidation analysis failed, defaulting to insert: %s", e, exc_info=False,
|
||||
)
|
||||
return _CONSOLIDATION_DEFAULT
|
||||
@@ -1,254 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.memory import (
|
||||
EntityMemory,
|
||||
ExternalMemory,
|
||||
LongTermMemory,
|
||||
ShortTermMemory,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class ContextualMemory:
|
||||
"""Aggregates and retrieves context from multiple memory sources."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
stm: ShortTermMemory,
|
||||
ltm: LongTermMemory,
|
||||
em: EntityMemory,
|
||||
exm: ExternalMemory,
|
||||
agent: Agent | None = None,
|
||||
task: Task | None = None,
|
||||
) -> None:
|
||||
self.stm = stm
|
||||
self.ltm = ltm
|
||||
self.em = em
|
||||
self.exm = exm
|
||||
self.agent = agent
|
||||
self.task = task
|
||||
|
||||
if self.stm is not None:
|
||||
self.stm.agent = self.agent
|
||||
self.stm.task = self.task
|
||||
if self.ltm is not None:
|
||||
self.ltm.agent = self.agent
|
||||
self.ltm.task = self.task
|
||||
if self.em is not None:
|
||||
self.em.agent = self.agent
|
||||
self.em.task = self.task
|
||||
if self.exm is not None:
|
||||
self.exm.agent = self.agent
|
||||
self.exm.task = self.task
|
||||
|
||||
def build_context_for_task(self, task: Task, context: str) -> str:
|
||||
"""Build contextual information for a task synchronously.
|
||||
|
||||
Args:
|
||||
task: The task to build context for.
|
||||
context: Additional context string.
|
||||
|
||||
Returns:
|
||||
Formatted context string from all memory sources.
|
||||
"""
|
||||
query = f"{task.description} {context}".strip()
|
||||
|
||||
if query == "":
|
||||
return ""
|
||||
|
||||
context_parts = [
|
||||
self._fetch_ltm_context(task.description),
|
||||
self._fetch_stm_context(query),
|
||||
self._fetch_entity_context(query),
|
||||
self._fetch_external_context(query),
|
||||
]
|
||||
return "\n".join(filter(None, context_parts))
|
||||
|
||||
async def abuild_context_for_task(self, task: Task, context: str) -> str:
|
||||
"""Build contextual information for a task asynchronously.
|
||||
|
||||
Args:
|
||||
task: The task to build context for.
|
||||
context: Additional context string.
|
||||
|
||||
Returns:
|
||||
Formatted context string from all memory sources.
|
||||
"""
|
||||
query = f"{task.description} {context}".strip()
|
||||
|
||||
if query == "":
|
||||
return ""
|
||||
|
||||
# Fetch all contexts concurrently
|
||||
results = await asyncio.gather(
|
||||
self._afetch_ltm_context(task.description),
|
||||
self._afetch_stm_context(query),
|
||||
self._afetch_entity_context(query),
|
||||
self._afetch_external_context(query),
|
||||
)
|
||||
|
||||
return "\n".join(filter(None, results))
|
||||
|
||||
def _fetch_stm_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches recent relevant insights from STM related to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
"""
|
||||
|
||||
if self.stm is None:
|
||||
return ""
|
||||
|
||||
stm_results = self.stm.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['content']}" for result in stm_results]
|
||||
)
|
||||
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
||||
|
||||
def _fetch_ltm_context(self, task: str) -> str | None:
|
||||
"""
|
||||
Fetches historical data or insights from LTM that are relevant to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
"""
|
||||
|
||||
if self.ltm is None:
|
||||
return ""
|
||||
|
||||
ltm_results = self.ltm.search(task, latest_n=2)
|
||||
if not ltm_results:
|
||||
return None
|
||||
|
||||
formatted_results = [
|
||||
suggestion
|
||||
for result in ltm_results
|
||||
for suggestion in result["metadata"]["suggestions"]
|
||||
]
|
||||
formatted_results = list(dict.fromkeys(formatted_results))
|
||||
formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]")
|
||||
|
||||
return f"Historical Data:\n{formatted_results}" if ltm_results else ""
|
||||
|
||||
def _fetch_entity_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches relevant entity information from Entity Memory related to the task's description and expected_output,
|
||||
formatted as bullet points.
|
||||
"""
|
||||
if self.em is None:
|
||||
return ""
|
||||
|
||||
em_results = self.em.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['content']}" for result in em_results]
|
||||
)
|
||||
return f"Entities:\n{formatted_results}" if em_results else ""
|
||||
|
||||
def _fetch_external_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches and formats relevant information from External Memory.
|
||||
Args:
|
||||
query (str): The search query to find relevant information.
|
||||
Returns:
|
||||
str: Formatted information as bullet points, or an empty string if none found.
|
||||
"""
|
||||
if self.exm is None:
|
||||
return ""
|
||||
|
||||
external_memories = self.exm.search(query)
|
||||
|
||||
if not external_memories:
|
||||
return ""
|
||||
|
||||
formatted_memories = "\n".join(
|
||||
f"- {result['content']}" for result in external_memories
|
||||
)
|
||||
return f"External memories:\n{formatted_memories}"
|
||||
|
||||
async def _afetch_stm_context(self, query: str) -> str:
|
||||
"""Fetch recent relevant insights from STM asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
|
||||
Returns:
|
||||
Formatted insights as bullet points, or empty string if none found.
|
||||
"""
|
||||
if self.stm is None:
|
||||
return ""
|
||||
|
||||
stm_results = await self.stm.asearch(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['content']}" for result in stm_results]
|
||||
)
|
||||
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
||||
|
||||
async def _afetch_ltm_context(self, task: str) -> str | None:
|
||||
"""Fetch historical data from LTM asynchronously.
|
||||
|
||||
Args:
|
||||
task: The task description to search for.
|
||||
|
||||
Returns:
|
||||
Formatted historical data as bullet points, or None if none found.
|
||||
"""
|
||||
if self.ltm is None:
|
||||
return ""
|
||||
|
||||
ltm_results = await self.ltm.asearch(task, latest_n=2)
|
||||
if not ltm_results:
|
||||
return None
|
||||
|
||||
formatted_results = [
|
||||
suggestion
|
||||
for result in ltm_results
|
||||
for suggestion in result["metadata"]["suggestions"]
|
||||
]
|
||||
formatted_results = list(dict.fromkeys(formatted_results))
|
||||
formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]")
|
||||
|
||||
return f"Historical Data:\n{formatted_results}" if ltm_results else ""
|
||||
|
||||
async def _afetch_entity_context(self, query: str) -> str:
|
||||
"""Fetch relevant entity information asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
|
||||
Returns:
|
||||
Formatted entity information as bullet points, or empty string if none found.
|
||||
"""
|
||||
if self.em is None:
|
||||
return ""
|
||||
|
||||
em_results = await self.em.asearch(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['content']}" for result in em_results]
|
||||
)
|
||||
return f"Entities:\n{formatted_results}" if em_results else ""
|
||||
|
||||
async def _afetch_external_context(self, query: str) -> str:
|
||||
"""Fetch relevant information from External Memory asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
|
||||
Returns:
|
||||
Formatted information as bullet points, or empty string if none found.
|
||||
"""
|
||||
if self.exm is None:
|
||||
return ""
|
||||
|
||||
external_memories = await self.exm.asearch(query)
|
||||
|
||||
if not external_memories:
|
||||
return ""
|
||||
|
||||
formatted_memories = "\n".join(
|
||||
f"- {result['content']}" for result in external_memories
|
||||
)
|
||||
return f"External memories:\n{formatted_memories}"
|
||||
444
lib/crewai/src/crewai/memory/encoding_flow.py
Normal file
444
lib/crewai/src/crewai/memory/encoding_flow.py
Normal file
@@ -0,0 +1,444 @@
|
||||
"""Batch-native encoding flow: full save pipeline for one or more memories.
|
||||
|
||||
Orchestrates the encoding side of memory in a single Flow with 5 steps:
|
||||
1. Batch embed (ONE embedder call for all items)
|
||||
2. Intra-batch dedup (cosine matrix, drop near-exact duplicates)
|
||||
3. Parallel find similar (concurrent storage searches)
|
||||
4. Parallel analyze (N concurrent LLM calls -- field resolution + consolidation)
|
||||
5. Execute plans (batch re-embed updates + bulk insert)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from concurrent.futures import Future, ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
import math
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from crewai.memory.analyze import (
|
||||
ConsolidationPlan,
|
||||
MemoryAnalysis,
|
||||
analyze_for_consolidation,
|
||||
analyze_for_save,
|
||||
)
|
||||
from crewai.memory.types import MemoryConfig, MemoryRecord, embed_texts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# State models
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class ItemState(BaseModel):
|
||||
"""Per-item tracking within a batch."""
|
||||
|
||||
content: str = ""
|
||||
# Caller-provided (None = infer via LLM)
|
||||
scope: str | None = None
|
||||
categories: list[str] | None = None
|
||||
metadata: dict[str, Any] | None = None
|
||||
importance: float | None = None
|
||||
source: str | None = None
|
||||
private: bool = False
|
||||
# Resolved values
|
||||
resolved_scope: str = "/"
|
||||
resolved_categories: list[str] = Field(default_factory=list)
|
||||
resolved_metadata: dict[str, Any] = Field(default_factory=dict)
|
||||
resolved_importance: float = 0.5
|
||||
resolved_source: str | None = None
|
||||
resolved_private: bool = False
|
||||
# Embedding
|
||||
embedding: list[float] = Field(default_factory=list)
|
||||
# Intra-batch dedup
|
||||
dropped: bool = False
|
||||
# Consolidation
|
||||
similar_records: list[MemoryRecord] = Field(default_factory=list)
|
||||
top_similarity: float = 0.0
|
||||
plan: ConsolidationPlan | None = None
|
||||
result_record: MemoryRecord | None = None
|
||||
|
||||
|
||||
class EncodingState(BaseModel):
|
||||
"""Batch-level state for the encoding flow."""
|
||||
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
items: list[ItemState] = Field(default_factory=list)
|
||||
# Aggregate stats
|
||||
records_inserted: int = 0
|
||||
records_updated: int = 0
|
||||
records_deleted: int = 0
|
||||
items_dropped_dedup: int = 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Flow
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class EncodingFlow(Flow[EncodingState]):
|
||||
"""Batch-native encoding pipeline for memory.remember() / remember_many().
|
||||
|
||||
Processes N items through 5 sequential steps, maximising parallelism:
|
||||
- ONE embedder call for all items
|
||||
- N concurrent storage searches
|
||||
- N concurrent individual LLM calls (field resolution + consolidation)
|
||||
- ONE batch re-embed for updates + ONE bulk storage write
|
||||
"""
|
||||
|
||||
_skip_auto_memory: bool = True
|
||||
|
||||
initial_state = EncodingState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage: Any,
|
||||
llm: Any,
|
||||
embedder: Any,
|
||||
config: MemoryConfig | None = None,
|
||||
) -> None:
|
||||
super().__init__(suppress_flow_events=True)
|
||||
self._storage = storage
|
||||
self._llm = llm
|
||||
self._embedder = embedder
|
||||
self._config = config or MemoryConfig()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 1: Batch embed (ONE embedder call)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@start()
|
||||
def batch_embed(self) -> None:
|
||||
"""Embed all items in a single embedder call."""
|
||||
items = list(self.state.items)
|
||||
texts = [item.content for item in items]
|
||||
embeddings = embed_texts(self._embedder, texts)
|
||||
for item, emb in zip(items, embeddings, strict=False):
|
||||
item.embedding = emb
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 2: Intra-batch dedup (cosine similarity matrix)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@listen(batch_embed)
|
||||
def intra_batch_dedup(self) -> None:
|
||||
"""Drop near-exact duplicates within the batch."""
|
||||
items = list(self.state.items)
|
||||
if len(items) <= 1:
|
||||
return
|
||||
|
||||
threshold = self._config.batch_dedup_threshold
|
||||
n = len(items)
|
||||
for j in range(1, n):
|
||||
if items[j].dropped or not items[j].embedding:
|
||||
continue
|
||||
for i in range(j):
|
||||
if items[i].dropped or not items[i].embedding:
|
||||
continue
|
||||
sim = self._cosine_similarity(items[i].embedding, items[j].embedding)
|
||||
if sim >= threshold:
|
||||
items[j].dropped = True
|
||||
self.state.items_dropped_dedup += 1
|
||||
break
|
||||
|
||||
@staticmethod
|
||||
def _cosine_similarity(a: list[float], b: list[float]) -> float:
|
||||
"""Compute cosine similarity between two vectors."""
|
||||
if len(a) != len(b) or not a:
|
||||
return 0.0
|
||||
dot = sum(x * y for x, y in zip(a, b, strict=False))
|
||||
norm_a = math.sqrt(sum(x * x for x in a))
|
||||
norm_b = math.sqrt(sum(x * x for x in b))
|
||||
if norm_a == 0.0 or norm_b == 0.0:
|
||||
return 0.0
|
||||
return dot / (norm_a * norm_b)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 3: Parallel find similar (concurrent storage searches)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@listen(intra_batch_dedup)
|
||||
def parallel_find_similar(self) -> None:
|
||||
"""Search storage for similar records, concurrently for all active items."""
|
||||
items = list(self.state.items)
|
||||
active = [(i, item) for i, item in enumerate(items) if not item.dropped and item.embedding]
|
||||
|
||||
if not active:
|
||||
return
|
||||
|
||||
def _search_one(item: ItemState) -> list[tuple[MemoryRecord, float]]:
|
||||
scope_prefix = item.scope if item.scope and item.scope.strip("/") else None
|
||||
return self._storage.search(
|
||||
item.embedding,
|
||||
scope_prefix=scope_prefix,
|
||||
categories=None,
|
||||
limit=self._config.consolidation_limit,
|
||||
min_score=0.0,
|
||||
)
|
||||
|
||||
if len(active) == 1:
|
||||
_, item = active[0]
|
||||
raw = _search_one(item)
|
||||
item.similar_records = [r for r, _ in raw]
|
||||
item.top_similarity = float(raw[0][1]) if raw else 0.0
|
||||
else:
|
||||
with ThreadPoolExecutor(max_workers=min(len(active), 8)) as pool:
|
||||
futures = [(i, item, pool.submit(_search_one, item)) for i, item in active]
|
||||
for _, item, future in futures:
|
||||
raw = future.result()
|
||||
item.similar_records = [r for r, _ in raw]
|
||||
item.top_similarity = float(raw[0][1]) if raw else 0.0
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 4: Parallel analyze (N concurrent LLM calls)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@listen(parallel_find_similar)
|
||||
def parallel_analyze(self) -> None:
|
||||
"""Field resolution + consolidation via parallel individual LLM calls.
|
||||
|
||||
Classifies each active item into one of four groups:
|
||||
- Group A: fields provided + no similar records -> fast insert, 0 LLM calls.
|
||||
- Group B: fields provided + similar records above threshold -> 1 consolidation call.
|
||||
- Group C: fields missing + no similar records -> 1 field-resolution call.
|
||||
- Group D: fields missing + similar records above threshold -> 2 concurrent calls.
|
||||
|
||||
All LLM calls across all items run in parallel via ThreadPoolExecutor.
|
||||
"""
|
||||
items = list(self.state.items)
|
||||
threshold = self._config.consolidation_threshold
|
||||
|
||||
# Pre-fetch scope/category info (shared across all field-resolution calls)
|
||||
any_needs_fields = any(
|
||||
not it.dropped
|
||||
and (it.scope is None or it.categories is None or it.importance is None)
|
||||
for it in items
|
||||
)
|
||||
existing_scopes: list[str] = []
|
||||
existing_categories: list[str] = []
|
||||
if any_needs_fields:
|
||||
existing_scopes = self._storage.list_scopes("/") or ["/"]
|
||||
existing_categories = list(
|
||||
self._storage.list_categories(scope_prefix=None).keys()
|
||||
)
|
||||
|
||||
# Classify items and submit LLM calls
|
||||
save_futures: dict[int, Future[MemoryAnalysis]] = {}
|
||||
consol_futures: dict[int, Future[ConsolidationPlan]] = {}
|
||||
|
||||
pool = ThreadPoolExecutor(max_workers=10)
|
||||
try:
|
||||
for i, item in enumerate(items):
|
||||
if item.dropped:
|
||||
continue
|
||||
|
||||
fields_provided = (
|
||||
item.scope is not None
|
||||
and item.categories is not None
|
||||
and item.importance is not None
|
||||
)
|
||||
has_similar = item.top_similarity >= threshold
|
||||
|
||||
if fields_provided and not has_similar:
|
||||
# Group A: fast path
|
||||
self._apply_defaults(item)
|
||||
item.plan = ConsolidationPlan(actions=[], insert_new=True)
|
||||
elif fields_provided and has_similar:
|
||||
# Group B: consolidation only
|
||||
self._apply_defaults(item)
|
||||
consol_futures[i] = pool.submit(
|
||||
analyze_for_consolidation,
|
||||
item.content, list(item.similar_records), self._llm,
|
||||
)
|
||||
elif not fields_provided and not has_similar:
|
||||
# Group C: field resolution only
|
||||
save_futures[i] = pool.submit(
|
||||
analyze_for_save,
|
||||
item.content, existing_scopes, existing_categories, self._llm,
|
||||
)
|
||||
else:
|
||||
# Group D: both in parallel
|
||||
save_futures[i] = pool.submit(
|
||||
analyze_for_save,
|
||||
item.content, existing_scopes, existing_categories, self._llm,
|
||||
)
|
||||
consol_futures[i] = pool.submit(
|
||||
analyze_for_consolidation,
|
||||
item.content, list(item.similar_records), self._llm,
|
||||
)
|
||||
|
||||
# Collect field-resolution results
|
||||
for i, future in save_futures.items():
|
||||
analysis = future.result()
|
||||
item = items[i]
|
||||
item.resolved_scope = item.scope or analysis.suggested_scope or "/"
|
||||
item.resolved_categories = (
|
||||
item.categories
|
||||
if item.categories is not None
|
||||
else analysis.categories
|
||||
)
|
||||
item.resolved_importance = (
|
||||
item.importance
|
||||
if item.importance is not None
|
||||
else analysis.importance
|
||||
)
|
||||
item.resolved_metadata = dict(
|
||||
item.metadata or {},
|
||||
**(
|
||||
analysis.extracted_metadata.model_dump()
|
||||
if analysis.extracted_metadata
|
||||
else {}
|
||||
),
|
||||
)
|
||||
item.resolved_source = item.source
|
||||
item.resolved_private = item.private
|
||||
# If no consolidation future, it's Group C -> insert
|
||||
if i not in consol_futures:
|
||||
item.plan = ConsolidationPlan(actions=[], insert_new=True)
|
||||
|
||||
# Collect consolidation results
|
||||
for i, future in consol_futures.items():
|
||||
items[i].plan = future.result()
|
||||
finally:
|
||||
pool.shutdown(wait=False)
|
||||
|
||||
def _apply_defaults(self, item: ItemState) -> None:
|
||||
"""Apply caller values with config defaults (fast path)."""
|
||||
item.resolved_scope = item.scope or "/"
|
||||
item.resolved_categories = item.categories or []
|
||||
item.resolved_metadata = item.metadata or {}
|
||||
item.resolved_importance = (
|
||||
item.importance
|
||||
if item.importance is not None
|
||||
else self._config.default_importance
|
||||
)
|
||||
item.resolved_source = item.source
|
||||
item.resolved_private = item.private
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Step 5: Execute plans (batch re-embed + bulk insert)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@listen(parallel_analyze)
|
||||
def execute_plans(self) -> None:
|
||||
"""Apply all consolidation plans with batch re-embedding and bulk insert.
|
||||
|
||||
Actions are deduplicated across items before applying: when multiple
|
||||
items reference the same existing record (e.g. both want to delete it),
|
||||
only the first action is applied. This prevents LanceDB commit
|
||||
conflicts from two operations targeting the same record.
|
||||
"""
|
||||
items = list(self.state.items)
|
||||
now = datetime.utcnow()
|
||||
|
||||
# --- Deduplicate actions across all items ---
|
||||
# Multiple items may reference the same existing record (because their
|
||||
# similar_records overlap). Collect one action per record_id, first wins.
|
||||
# Also build a map from record_id to the original MemoryRecord for updates.
|
||||
dedup_deletes: set[str] = set() # record_ids to delete
|
||||
dedup_updates: dict[str, tuple[int, str]] = {} # record_id -> (item_idx, new_content)
|
||||
all_similar: dict[str, MemoryRecord] = {} # record_id -> MemoryRecord
|
||||
|
||||
for i, item in enumerate(items):
|
||||
if item.dropped or item.plan is None:
|
||||
continue
|
||||
for r in item.similar_records:
|
||||
if r.id not in all_similar:
|
||||
all_similar[r.id] = r
|
||||
for action in item.plan.actions:
|
||||
rid = action.record_id
|
||||
if action.action == "delete" and rid not in dedup_deletes and rid not in dedup_updates:
|
||||
dedup_deletes.add(rid)
|
||||
elif action.action == "update" and action.new_content and rid not in dedup_deletes and rid not in dedup_updates:
|
||||
dedup_updates[rid] = (i, action.new_content)
|
||||
|
||||
# --- Batch re-embed all update contents in ONE call ---
|
||||
update_list = list(dedup_updates.items()) # [(record_id, (item_idx, new_content)), ...]
|
||||
update_embeddings: list[list[float]] = []
|
||||
if update_list:
|
||||
update_contents = [content for _, (_, content) in update_list]
|
||||
update_embeddings = embed_texts(self._embedder, update_contents)
|
||||
|
||||
update_emb_map: dict[str, list[float]] = {}
|
||||
for (rid, _), emb in zip(update_list, update_embeddings, strict=False):
|
||||
update_emb_map[rid] = emb
|
||||
|
||||
# --- Apply all storage mutations under one lock ---
|
||||
# Hold the write lock for the entire delete + update + insert sequence
|
||||
# so no other pipeline can interleave and cause version conflicts.
|
||||
# The lock is reentrant (RLock), so the individual storage methods
|
||||
# can re-acquire it without deadlocking.
|
||||
# Collect records to insert (outside lock -- pure data assembly)
|
||||
to_insert: list[tuple[int, MemoryRecord]] = []
|
||||
for i, item in enumerate(items):
|
||||
if item.dropped or item.plan is None:
|
||||
continue
|
||||
if item.plan.insert_new:
|
||||
to_insert.append((i, MemoryRecord(
|
||||
content=item.content,
|
||||
scope=item.resolved_scope,
|
||||
categories=item.resolved_categories,
|
||||
metadata=item.resolved_metadata,
|
||||
importance=item.resolved_importance,
|
||||
embedding=item.embedding if item.embedding else None,
|
||||
source=item.resolved_source,
|
||||
private=item.resolved_private,
|
||||
)))
|
||||
|
||||
# All storage mutations under one lock so no other pipeline can
|
||||
# interleave and cause version conflicts. The lock is reentrant
|
||||
# (RLock) so the individual storage methods re-acquire it safely.
|
||||
updated_records: dict[str, MemoryRecord] = {}
|
||||
with self._storage.write_lock:
|
||||
if dedup_deletes:
|
||||
self._storage.delete(record_ids=list(dedup_deletes))
|
||||
self.state.records_deleted += len(dedup_deletes)
|
||||
|
||||
for rid, (_item_idx, new_content) in dedup_updates.items():
|
||||
existing = all_similar.get(rid)
|
||||
if existing is not None:
|
||||
new_emb = update_emb_map.get(rid, [])
|
||||
updated = MemoryRecord(
|
||||
id=existing.id,
|
||||
content=new_content,
|
||||
scope=existing.scope,
|
||||
categories=existing.categories,
|
||||
metadata=existing.metadata,
|
||||
importance=existing.importance,
|
||||
created_at=existing.created_at,
|
||||
last_accessed=now,
|
||||
embedding=new_emb if new_emb else existing.embedding,
|
||||
)
|
||||
self._storage.update(updated)
|
||||
self.state.records_updated += 1
|
||||
updated_records[rid] = updated
|
||||
|
||||
if to_insert:
|
||||
records = [r for _, r in to_insert]
|
||||
self._storage.save(records)
|
||||
self.state.records_inserted += len(records)
|
||||
for idx, record in to_insert:
|
||||
items[idx].result_record = record
|
||||
|
||||
# Set result_record for non-insert items (after lock, using updated_records)
|
||||
for _i, item in enumerate(items):
|
||||
if item.dropped or item.plan is None or item.plan.insert_new:
|
||||
continue
|
||||
if item.result_record is not None:
|
||||
continue
|
||||
first_updated = next(
|
||||
(
|
||||
updated_records[a.record_id]
|
||||
for a in item.plan.actions
|
||||
if a.action == "update" and a.record_id in updated_records
|
||||
),
|
||||
None,
|
||||
)
|
||||
item.result_record = (
|
||||
first_updated
|
||||
if first_updated is not None
|
||||
else (item.similar_records[0] if item.similar_records else None)
|
||||
)
|
||||
@@ -1,404 +0,0 @@
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
|
||||
|
||||
class EntityMemory(Memory):
|
||||
"""
|
||||
EntityMemory class for managing structured information about entities
|
||||
and their relationships using SQLite storage.
|
||||
Inherits from the Memory class.
|
||||
"""
|
||||
|
||||
_memory_provider: str | None = PrivateAttr()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
crew: Any = None,
|
||||
embedder_config: Any = None,
|
||||
storage: Any = None,
|
||||
path: str | None = None,
|
||||
) -> None:
|
||||
memory_provider = None
|
||||
if embedder_config and isinstance(embedder_config, dict):
|
||||
memory_provider = embedder_config.get("provider")
|
||||
|
||||
if memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
) from e
|
||||
config = (
|
||||
embedder_config.get("config")
|
||||
if embedder_config and isinstance(embedder_config, dict)
|
||||
else None
|
||||
)
|
||||
storage = Mem0Storage(type="short_term", crew=crew, config=config) # type: ignore[no-untyped-call]
|
||||
else:
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="entities",
|
||||
allow_reset=True,
|
||||
embedder_config=embedder_config,
|
||||
crew=crew,
|
||||
path=path,
|
||||
)
|
||||
)
|
||||
|
||||
super().__init__(storage=storage)
|
||||
self._memory_provider = memory_provider
|
||||
|
||||
def save(
|
||||
self,
|
||||
value: EntityMemoryItem | list[EntityMemoryItem],
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Saves one or more entity items into the SQLite storage.
|
||||
|
||||
Args:
|
||||
value: Single EntityMemoryItem or list of EntityMemoryItems to save.
|
||||
metadata: Optional metadata dict (included for supertype compatibility but not used).
|
||||
|
||||
Notes:
|
||||
The metadata parameter is included to satisfy the supertype signature but is not
|
||||
used - entity metadata is extracted from the EntityMemoryItem objects themselves.
|
||||
"""
|
||||
|
||||
if not value:
|
||||
return
|
||||
|
||||
items = value if isinstance(value, list) else [value]
|
||||
is_batch = len(items) > 1
|
||||
|
||||
metadata = {"entity_count": len(items)} if is_batch else items[0].metadata
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
metadata=metadata,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
saved_count = 0
|
||||
errors = []
|
||||
|
||||
def save_single_item(item: EntityMemoryItem) -> tuple[bool, str | None]:
|
||||
"""Save a single item and return success status."""
|
||||
try:
|
||||
if self._memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
|
||||
super(EntityMemory, self).save(data, item.metadata)
|
||||
return True, None
|
||||
except Exception as e:
|
||||
return False, f"{item.name}: {e!s}"
|
||||
|
||||
try:
|
||||
for item in items:
|
||||
success, error = save_single_item(item)
|
||||
if success:
|
||||
saved_count += 1
|
||||
else:
|
||||
errors.append(error)
|
||||
|
||||
if is_batch:
|
||||
emit_value = f"Saved {saved_count} entities"
|
||||
metadata = {"entity_count": saved_count, "errors": errors}
|
||||
else:
|
||||
emit_value = f"{items[0].name}({items[0].type}): {items[0].description}"
|
||||
metadata = items[0].metadata
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=emit_value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
if errors:
|
||||
raise Exception(
|
||||
f"Partial save: {len(errors)} failed out of {len(items)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
fail_metadata = (
|
||||
{"entity_count": len(items), "saved": saved_count}
|
||||
if is_batch
|
||||
else items[0].metadata
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
metadata=fail_metadata,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search entity memory for relevant entries.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = super().search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
value: EntityMemoryItem | list[EntityMemoryItem],
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save entity items asynchronously.
|
||||
|
||||
Args:
|
||||
value: Single EntityMemoryItem or list of EntityMemoryItems to save.
|
||||
metadata: Optional metadata dict (not used, for signature compatibility).
|
||||
"""
|
||||
if not value:
|
||||
return
|
||||
|
||||
items = value if isinstance(value, list) else [value]
|
||||
is_batch = len(items) > 1
|
||||
|
||||
metadata = {"entity_count": len(items)} if is_batch else items[0].metadata
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
metadata=metadata,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
saved_count = 0
|
||||
errors: list[str | None] = []
|
||||
|
||||
async def save_single_item(item: EntityMemoryItem) -> tuple[bool, str | None]:
|
||||
"""Save a single item asynchronously."""
|
||||
try:
|
||||
if self._memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
|
||||
await super(EntityMemory, self).asave(data, item.metadata)
|
||||
return True, None
|
||||
except Exception as e:
|
||||
return False, f"{item.name}: {e!s}"
|
||||
|
||||
try:
|
||||
for item in items:
|
||||
success, error = await save_single_item(item)
|
||||
if success:
|
||||
saved_count += 1
|
||||
else:
|
||||
errors.append(error)
|
||||
|
||||
if is_batch:
|
||||
emit_value = f"Saved {saved_count} entities"
|
||||
metadata = {"entity_count": saved_count, "errors": errors}
|
||||
else:
|
||||
emit_value = f"{items[0].name}({items[0].type}): {items[0].description}"
|
||||
metadata = items[0].metadata
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=emit_value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
if errors:
|
||||
raise Exception(
|
||||
f"Partial save: {len(errors)} failed out of {len(items)}"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
fail_metadata = (
|
||||
{"entity_count": len(items), "saved": saved_count}
|
||||
if is_batch
|
||||
else items[0].metadata
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
metadata=fail_metadata,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search entity memory asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = await super().asearch(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="entity_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="entity_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
self.storage.reset()
|
||||
except Exception as e:
|
||||
raise Exception(
|
||||
f"An error occurred while resetting the entity memory: {e}"
|
||||
) from e
|
||||
@@ -1,12 +0,0 @@
|
||||
class EntityMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
type: str,
|
||||
description: str,
|
||||
relationships: str,
|
||||
):
|
||||
self.name = name
|
||||
self.type = type
|
||||
self.description = description
|
||||
self.metadata = {"relationships": relationships}
|
||||
@@ -1,301 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.external.external_memory_item import ExternalMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.rag.embeddings.types import ProviderSpec
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
|
||||
|
||||
class ExternalMemory(Memory):
|
||||
def __init__(self, storage: Storage | None = None, **data: Any):
|
||||
super().__init__(storage=storage, **data)
|
||||
|
||||
@staticmethod
|
||||
def _configure_mem0(crew: Any, config: dict[str, Any]) -> Mem0Storage:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
|
||||
return Mem0Storage(type="external", crew=crew, config=config) # type: ignore[no-untyped-call]
|
||||
|
||||
@staticmethod
|
||||
def external_supported_storages() -> dict[str, Any]:
|
||||
return {
|
||||
"mem0": ExternalMemory._configure_mem0,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create_storage(
|
||||
crew: Any, embedder_config: dict[str, Any] | ProviderSpec | None
|
||||
) -> Storage:
|
||||
if not embedder_config:
|
||||
raise ValueError("embedder_config is required")
|
||||
|
||||
if "provider" not in embedder_config:
|
||||
raise ValueError("embedder_config must include a 'provider' key")
|
||||
|
||||
provider = embedder_config["provider"]
|
||||
supported_storages = ExternalMemory.external_supported_storages()
|
||||
if provider not in supported_storages:
|
||||
raise ValueError(f"Provider {provider} not supported")
|
||||
|
||||
storage: Storage = supported_storages[provider](
|
||||
crew, embedder_config.get("config", {})
|
||||
)
|
||||
return storage
|
||||
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Saves a value into the external storage."""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ExternalMemoryItem(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent=self.agent.role if self.agent else None,
|
||||
)
|
||||
super().save(value=item.value, metadata=item.metadata)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search external memory for relevant entries.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = super().search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save a value to external memory asynchronously.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Optional metadata to associate with the value.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ExternalMemoryItem(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
agent=self.agent.role if self.agent else None,
|
||||
)
|
||||
await super().asave(value=item.value, metadata=item.metadata)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search external memory asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = await super().asearch(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="external_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="external_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
self.storage.reset()
|
||||
|
||||
def set_crew(self, crew: Any) -> ExternalMemory:
|
||||
super().set_crew(crew)
|
||||
|
||||
if not self.storage:
|
||||
self.storage = self.create_storage(crew, self.embedder_config) # type: ignore[arg-type]
|
||||
|
||||
return self
|
||||
@@ -1,13 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
|
||||
class ExternalMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
agent: str | None = None,
|
||||
):
|
||||
self.value = value
|
||||
self.metadata = metadata
|
||||
self.agent = agent
|
||||
@@ -1,255 +0,0 @@
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.ltm_sqlite_storage import LTMSQLiteStorage
|
||||
|
||||
|
||||
class LongTermMemory(Memory):
|
||||
"""
|
||||
LongTermMemory class for managing cross runs data related to overall crew's
|
||||
execution and performance.
|
||||
Inherits from the Memory class and utilizes an instance of a class that
|
||||
adheres to the Storage for data storage, specifically working with
|
||||
LongTermMemoryItem instances.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage: LTMSQLiteStorage | None = None,
|
||||
path: str | None = None,
|
||||
) -> None:
|
||||
if not storage:
|
||||
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
||||
super().__init__(storage=storage)
|
||||
|
||||
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
metadata = item.metadata
|
||||
metadata.update(
|
||||
{"agent": item.agent, "expected_output": item.expected_output}
|
||||
)
|
||||
self.storage.save(
|
||||
task_description=item.task,
|
||||
score=metadata["quality"],
|
||||
metadata=metadata,
|
||||
datetime=item.datetime,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search( # type: ignore[override]
|
||||
self,
|
||||
task: str,
|
||||
latest_n: int = 3,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Search long-term memory for relevant entries.
|
||||
|
||||
Args:
|
||||
task: The task description to search for.
|
||||
latest_n: Maximum number of results to return.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = self.storage.load(task, latest_n)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=task,
|
||||
results=results,
|
||||
limit=latest_n,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results or []
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asave(self, item: LongTermMemoryItem) -> None: # type: ignore[override]
|
||||
"""Save an item to long-term memory asynchronously.
|
||||
|
||||
Args:
|
||||
item: The LongTermMemoryItem to save.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
metadata = item.metadata
|
||||
metadata.update(
|
||||
{"agent": item.agent, "expected_output": item.expected_output}
|
||||
)
|
||||
await self.storage.asave(
|
||||
task_description=item.task,
|
||||
score=metadata["quality"],
|
||||
metadata=metadata,
|
||||
datetime=item.datetime,
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=item.task,
|
||||
metadata=item.metadata,
|
||||
agent_role=item.agent,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asearch( # type: ignore[override]
|
||||
self,
|
||||
task: str,
|
||||
latest_n: int = 3,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Search long-term memory asynchronously.
|
||||
|
||||
Args:
|
||||
task: The task description to search for.
|
||||
latest_n: Maximum number of results to return.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = await self.storage.aload(task, latest_n)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=task,
|
||||
results=results,
|
||||
limit=latest_n,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="long_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return results or []
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=task,
|
||||
limit=latest_n,
|
||||
error=str(e),
|
||||
source_type="long_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Reset long-term memory."""
|
||||
self.storage.reset()
|
||||
@@ -1,19 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
|
||||
class LongTermMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
agent: str,
|
||||
task: str,
|
||||
expected_output: str,
|
||||
datetime: str,
|
||||
quality: int | float | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
):
|
||||
self.task = task
|
||||
self.agent = agent
|
||||
self.quality = quality
|
||||
self.datetime = datetime
|
||||
self.expected_output = expected_output
|
||||
self.metadata = metadata if metadata is not None else {}
|
||||
@@ -1,121 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.rag.embeddings.types import EmbedderConfig
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class Memory(BaseModel):
|
||||
"""Base class for memory, supporting agent tags and generic metadata."""
|
||||
|
||||
embedder_config: EmbedderConfig | dict[str, Any] | None = None
|
||||
crew: Any | None = None
|
||||
|
||||
storage: Any
|
||||
_agent: Agent | None = None
|
||||
_task: Task | None = None
|
||||
|
||||
def __init__(self, storage: Any, **data: Any):
|
||||
super().__init__(storage=storage, **data)
|
||||
|
||||
@property
|
||||
def task(self) -> Task | None:
|
||||
"""Get the current task associated with this memory."""
|
||||
return self._task
|
||||
|
||||
@task.setter
|
||||
def task(self, task: Task | None) -> None:
|
||||
"""Set the current task associated with this memory."""
|
||||
self._task = task
|
||||
|
||||
@property
|
||||
def agent(self) -> Agent | None:
|
||||
"""Get the current agent associated with this memory."""
|
||||
return self._agent
|
||||
|
||||
@agent.setter
|
||||
def agent(self, agent: Agent | None) -> None:
|
||||
"""Set the current agent associated with this memory."""
|
||||
self._agent = agent
|
||||
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save a value to memory.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Optional metadata to associate with the value.
|
||||
"""
|
||||
metadata = metadata or {}
|
||||
self.storage.save(value, metadata)
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save a value to memory asynchronously.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Optional metadata to associate with the value.
|
||||
"""
|
||||
metadata = metadata or {}
|
||||
await self.storage.asave(value, metadata)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search memory for relevant entries.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
results: list[Any] = self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
return results
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search memory for relevant entries asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
results: list[Any] = await self.storage.asearch(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
return results
|
||||
|
||||
def set_crew(self, crew: Any) -> Memory:
|
||||
"""Set the crew for this memory instance."""
|
||||
self.crew = crew
|
||||
return self
|
||||
272
lib/crewai/src/crewai/memory/memory_scope.py
Normal file
272
lib/crewai/src/crewai/memory/memory_scope.py
Normal file
@@ -0,0 +1,272 @@
|
||||
"""Scoped and sliced views over unified Memory."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.memory.unified_memory import Memory
|
||||
|
||||
from crewai.memory.types import (
|
||||
_RECALL_OVERSAMPLE_FACTOR,
|
||||
MemoryMatch,
|
||||
MemoryRecord,
|
||||
ScopeInfo,
|
||||
)
|
||||
|
||||
|
||||
class MemoryScope:
|
||||
"""View of Memory restricted to a root path. All operations are scoped under that path."""
|
||||
|
||||
def __init__(self, memory: Memory, root_path: str) -> None:
|
||||
"""Initialize scope.
|
||||
|
||||
Args:
|
||||
memory: The underlying Memory instance.
|
||||
root_path: Root path for this scope (e.g. /agent/1).
|
||||
"""
|
||||
self._memory = memory
|
||||
self._root = root_path.rstrip("/") or ""
|
||||
if self._root and not self._root.startswith("/"):
|
||||
self._root = "/" + self._root
|
||||
|
||||
def _scope_path(self, scope: str | None) -> str:
|
||||
if not scope or scope == "/":
|
||||
return self._root or "/"
|
||||
s = scope.rstrip("/")
|
||||
if not s.startswith("/"):
|
||||
s = "/" + s
|
||||
if not self._root:
|
||||
return s
|
||||
base = self._root.rstrip("/")
|
||||
return f"{base}{s}"
|
||||
|
||||
def remember(
|
||||
self,
|
||||
content: str,
|
||||
scope: str | None = "/",
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
source: str | None = None,
|
||||
private: bool = False,
|
||||
) -> MemoryRecord:
|
||||
"""Remember content; scope is relative to this scope's root."""
|
||||
path = self._scope_path(scope)
|
||||
return self._memory.remember(
|
||||
content,
|
||||
scope=path,
|
||||
categories=categories,
|
||||
metadata=metadata,
|
||||
importance=importance,
|
||||
source=source,
|
||||
private=private,
|
||||
)
|
||||
|
||||
def recall(
|
||||
self,
|
||||
query: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
depth: str = "deep",
|
||||
source: str | None = None,
|
||||
include_private: bool = False,
|
||||
) -> list[MemoryMatch]:
|
||||
"""Recall within this scope (root path and below)."""
|
||||
search_scope = self._scope_path(scope) if scope else (self._root or "/")
|
||||
return self._memory.recall(
|
||||
query,
|
||||
scope=search_scope,
|
||||
categories=categories,
|
||||
limit=limit,
|
||||
depth=depth,
|
||||
source=source,
|
||||
include_private=include_private,
|
||||
)
|
||||
|
||||
def extract_memories(self, content: str) -> list[str]:
|
||||
"""Extract discrete memories from content; delegates to underlying Memory."""
|
||||
return self._memory.extract_memories(content)
|
||||
|
||||
def forget(
|
||||
self,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
) -> int:
|
||||
"""Forget within this scope."""
|
||||
prefix = self._scope_path(scope) if scope else (self._root or "/")
|
||||
return self._memory.forget(
|
||||
scope=prefix,
|
||||
categories=categories,
|
||||
older_than=older_than,
|
||||
metadata_filter=metadata_filter,
|
||||
record_ids=record_ids,
|
||||
)
|
||||
|
||||
def list_scopes(self, path: str = "/") -> list[str]:
|
||||
"""List child scopes under path (relative to this scope's root)."""
|
||||
full = self._scope_path(path)
|
||||
return self._memory.list_scopes(full)
|
||||
|
||||
def info(self, path: str = "/") -> ScopeInfo:
|
||||
"""Info for path under this scope."""
|
||||
full = self._scope_path(path)
|
||||
return self._memory.info(full)
|
||||
|
||||
def tree(self, path: str = "/", max_depth: int = 3) -> str:
|
||||
"""Tree under path within this scope."""
|
||||
full = self._scope_path(path)
|
||||
return self._memory.tree(full, max_depth=max_depth)
|
||||
|
||||
def list_categories(self, path: str | None = None) -> dict[str, int]:
|
||||
"""Categories in this scope; path None means this scope root."""
|
||||
full = self._scope_path(path) if path else (self._root or "/")
|
||||
return self._memory.list_categories(full)
|
||||
|
||||
def reset(self, scope: str | None = None) -> None:
|
||||
"""Reset within this scope."""
|
||||
prefix = self._scope_path(scope) if scope else (self._root or "/")
|
||||
self._memory.reset(scope=prefix)
|
||||
|
||||
def subscope(self, path: str) -> MemoryScope:
|
||||
"""Return a narrower scope under this scope."""
|
||||
child = path.strip("/")
|
||||
if not child:
|
||||
return MemoryScope(self._memory, self._root or "/")
|
||||
base = self._root.rstrip("/") or ""
|
||||
new_root = f"{base}/{child}" if base else f"/{child}"
|
||||
return MemoryScope(self._memory, new_root)
|
||||
|
||||
|
||||
class MemorySlice:
|
||||
"""View over multiple scopes: recall searches all, remember requires explicit scope unless read_only."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
memory: Memory,
|
||||
scopes: list[str],
|
||||
categories: list[str] | None = None,
|
||||
read_only: bool = True,
|
||||
) -> None:
|
||||
"""Initialize slice.
|
||||
|
||||
Args:
|
||||
memory: The underlying Memory instance.
|
||||
scopes: List of scope paths to include.
|
||||
categories: Optional category filter for recall.
|
||||
read_only: If True, remember() raises PermissionError.
|
||||
"""
|
||||
self._memory = memory
|
||||
self._scopes = [s.rstrip("/") or "/" for s in scopes]
|
||||
self._categories = categories
|
||||
self._read_only = read_only
|
||||
|
||||
def remember(
|
||||
self,
|
||||
content: str,
|
||||
scope: str,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
source: str | None = None,
|
||||
private: bool = False,
|
||||
) -> MemoryRecord:
|
||||
"""Remember into an explicit scope. Required when read_only=False."""
|
||||
if self._read_only:
|
||||
raise PermissionError("This MemorySlice is read-only")
|
||||
return self._memory.remember(
|
||||
content,
|
||||
scope=scope,
|
||||
categories=categories,
|
||||
metadata=metadata,
|
||||
importance=importance,
|
||||
source=source,
|
||||
private=private,
|
||||
)
|
||||
|
||||
def recall(
|
||||
self,
|
||||
query: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
depth: str = "deep",
|
||||
source: str | None = None,
|
||||
include_private: bool = False,
|
||||
) -> list[MemoryMatch]:
|
||||
"""Recall across all slice scopes; results merged and re-ranked."""
|
||||
cats = categories or self._categories
|
||||
all_matches: list[MemoryMatch] = []
|
||||
for sc in self._scopes:
|
||||
matches = self._memory.recall(
|
||||
query,
|
||||
scope=sc,
|
||||
categories=cats,
|
||||
limit=limit * _RECALL_OVERSAMPLE_FACTOR,
|
||||
depth=depth,
|
||||
source=source,
|
||||
include_private=include_private,
|
||||
)
|
||||
all_matches.extend(matches)
|
||||
seen_ids: set[str] = set()
|
||||
unique: list[MemoryMatch] = []
|
||||
for m in sorted(all_matches, key=lambda x: x.score, reverse=True):
|
||||
if m.record.id not in seen_ids:
|
||||
seen_ids.add(m.record.id)
|
||||
unique.append(m)
|
||||
if len(unique) >= limit:
|
||||
break
|
||||
return unique
|
||||
|
||||
def extract_memories(self, content: str) -> list[str]:
|
||||
"""Extract discrete memories from content; delegates to underlying Memory."""
|
||||
return self._memory.extract_memories(content)
|
||||
|
||||
def list_scopes(self, path: str = "/") -> list[str]:
|
||||
"""List scopes across all slice roots."""
|
||||
out: list[str] = []
|
||||
for sc in self._scopes:
|
||||
full = f"{sc.rstrip('/')}{path}" if sc != "/" else path
|
||||
out.extend(self._memory.list_scopes(full))
|
||||
return sorted(set(out))
|
||||
|
||||
def info(self, path: str = "/") -> ScopeInfo:
|
||||
"""Aggregate info across slice scopes (record counts summed)."""
|
||||
total_records = 0
|
||||
all_categories: set[str] = set()
|
||||
oldest: datetime | None = None
|
||||
newest: datetime | None = None
|
||||
children: list[str] = []
|
||||
for sc in self._scopes:
|
||||
full = f"{sc.rstrip('/')}{path}" if sc != "/" else path
|
||||
inf = self._memory.info(full)
|
||||
total_records += inf.record_count
|
||||
all_categories.update(inf.categories)
|
||||
if inf.oldest_record:
|
||||
oldest = inf.oldest_record if oldest is None else min(oldest, inf.oldest_record)
|
||||
if inf.newest_record:
|
||||
newest = inf.newest_record if newest is None else max(newest, inf.newest_record)
|
||||
children.extend(inf.child_scopes)
|
||||
return ScopeInfo(
|
||||
path=path,
|
||||
record_count=total_records,
|
||||
categories=sorted(all_categories),
|
||||
oldest_record=oldest,
|
||||
newest_record=newest,
|
||||
child_scopes=sorted(set(children)),
|
||||
)
|
||||
|
||||
def list_categories(self, path: str | None = None) -> dict[str, int]:
|
||||
"""Categories and counts across slice scopes."""
|
||||
counts: dict[str, int] = {}
|
||||
for sc in self._scopes:
|
||||
full = (f"{sc.rstrip('/')}{path}" if sc != "/" else path) if path else sc
|
||||
for k, v in self._memory.list_categories(full).items():
|
||||
counts[k] = counts.get(k, 0) + v
|
||||
return counts
|
||||
367
lib/crewai/src/crewai/memory/recall_flow.py
Normal file
367
lib/crewai/src/crewai/memory/recall_flow.py
Normal file
@@ -0,0 +1,367 @@
|
||||
"""RLM-inspired intelligent recall flow for memory retrieval.
|
||||
|
||||
Implements adaptive-depth retrieval with:
|
||||
- LLM query distillation into targeted sub-queries
|
||||
- Keyword-driven category filtering
|
||||
- Time-based filtering from temporal hints
|
||||
- Parallel multi-query, multi-scope search
|
||||
- Confidence-based routing with iterative deepening (budget loop)
|
||||
- Evidence gap tracking propagated to results
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.flow.flow import Flow, listen, router, start
|
||||
from crewai.memory.analyze import QueryAnalysis, analyze_query
|
||||
from crewai.memory.types import (
|
||||
_RECALL_OVERSAMPLE_FACTOR,
|
||||
MemoryConfig,
|
||||
MemoryMatch,
|
||||
MemoryRecord,
|
||||
compute_composite_score,
|
||||
embed_texts,
|
||||
)
|
||||
|
||||
|
||||
class RecallState(BaseModel):
|
||||
"""State for the recall flow."""
|
||||
|
||||
id: str = Field(default_factory=lambda: str(uuid4()))
|
||||
query: str = ""
|
||||
scope: str | None = None
|
||||
categories: list[str] | None = None
|
||||
inferred_categories: list[str] = Field(default_factory=list)
|
||||
time_cutoff: datetime | None = None
|
||||
source: str | None = None
|
||||
include_private: bool = False
|
||||
limit: int = 10
|
||||
query_embeddings: list[tuple[str, list[float]]] = Field(default_factory=list)
|
||||
query_analysis: QueryAnalysis | None = None
|
||||
candidate_scopes: list[str] = Field(default_factory=list)
|
||||
chunk_findings: list[Any] = Field(default_factory=list)
|
||||
evidence_gaps: list[str] = Field(default_factory=list)
|
||||
confidence: float = 0.0
|
||||
final_results: list[MemoryMatch] = Field(default_factory=list)
|
||||
exploration_budget: int = 1
|
||||
|
||||
|
||||
class RecallFlow(Flow[RecallState]):
|
||||
"""RLM-inspired intelligent memory recall flow.
|
||||
|
||||
Analyzes the query via LLM to produce targeted sub-queries and filters,
|
||||
embeds each sub-query, searches across candidate scopes in parallel,
|
||||
and iteratively deepens exploration when confidence is low.
|
||||
"""
|
||||
|
||||
_skip_auto_memory: bool = True
|
||||
|
||||
initial_state = RecallState
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
storage: Any,
|
||||
llm: Any,
|
||||
embedder: Any,
|
||||
config: MemoryConfig | None = None,
|
||||
) -> None:
|
||||
super().__init__(suppress_flow_events=True)
|
||||
self._storage = storage
|
||||
self._llm = llm
|
||||
self._embedder = embedder
|
||||
self._config = config or MemoryConfig()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _merged_categories(self) -> list[str] | None:
|
||||
"""Merge caller-supplied and LLM-inferred categories."""
|
||||
merged = list(
|
||||
set((self.state.categories or []) + self.state.inferred_categories)
|
||||
)
|
||||
return merged or None
|
||||
|
||||
def _do_search(self) -> list[dict[str, Any]]:
|
||||
"""Run parallel search across (embeddings x scopes) with filters.
|
||||
|
||||
Populates ``state.chunk_findings`` and ``state.confidence``.
|
||||
Returns the findings list.
|
||||
"""
|
||||
search_categories = self._merged_categories()
|
||||
|
||||
def _search_one(
|
||||
embedding: list[float], scope: str
|
||||
) -> tuple[str, list[tuple[MemoryRecord, float]]]:
|
||||
raw = self._storage.search(
|
||||
embedding,
|
||||
scope_prefix=scope,
|
||||
categories=search_categories,
|
||||
limit=self.state.limit * _RECALL_OVERSAMPLE_FACTOR,
|
||||
min_score=0.0,
|
||||
)
|
||||
# Post-filter by time cutoff
|
||||
if self.state.time_cutoff and raw:
|
||||
raw = [
|
||||
(r, s) for r, s in raw if r.created_at >= self.state.time_cutoff
|
||||
]
|
||||
# Privacy filter
|
||||
if not self.state.include_private and raw:
|
||||
raw = [
|
||||
(r, s) for r, s in raw
|
||||
if not r.private or r.source == self.state.source
|
||||
]
|
||||
return scope, raw
|
||||
|
||||
# Build (embedding, scope) task list
|
||||
tasks: list[tuple[list[float], str]] = [
|
||||
(embedding, scope)
|
||||
for _query_text, embedding in self.state.query_embeddings
|
||||
for scope in self.state.candidate_scopes
|
||||
]
|
||||
|
||||
findings: list[dict[str, Any]] = []
|
||||
|
||||
if len(tasks) <= 1:
|
||||
for emb, sc in tasks:
|
||||
scope, results = _search_one(emb, sc)
|
||||
if results:
|
||||
top_composite, _ = compute_composite_score(
|
||||
results[0][0], results[0][1], self._config
|
||||
)
|
||||
findings.append({
|
||||
"scope": scope,
|
||||
"results": results,
|
||||
"top_score": top_composite,
|
||||
})
|
||||
else:
|
||||
with ThreadPoolExecutor(max_workers=min(len(tasks), 4)) as pool:
|
||||
futures = {
|
||||
pool.submit(_search_one, emb, sc): (emb, sc)
|
||||
for emb, sc in tasks
|
||||
}
|
||||
for future in as_completed(futures):
|
||||
scope, results = future.result()
|
||||
if results:
|
||||
top_composite, _ = compute_composite_score(
|
||||
results[0][0], results[0][1], self._config
|
||||
)
|
||||
findings.append({
|
||||
"scope": scope,
|
||||
"results": results,
|
||||
"top_score": top_composite,
|
||||
})
|
||||
|
||||
self.state.chunk_findings = findings
|
||||
self.state.confidence = max(
|
||||
(f["top_score"] for f in findings), default=0.0
|
||||
)
|
||||
return findings
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Flow steps
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
@start()
|
||||
def analyze_query_step(self) -> QueryAnalysis:
|
||||
"""Analyze the query, embed distilled sub-queries, extract filters.
|
||||
|
||||
Short queries (below ``query_analysis_threshold`` characters) skip
|
||||
the LLM call entirely and embed the raw query directly -- saving
|
||||
~1-3s per recall. Longer queries (e.g. full task descriptions)
|
||||
benefit from LLM distillation into targeted sub-queries.
|
||||
|
||||
Sub-queries are embedded in a single batch ``embed_texts()`` call
|
||||
rather than sequential ``embed_text()`` calls.
|
||||
"""
|
||||
self.state.exploration_budget = self._config.exploration_budget
|
||||
|
||||
query_len = len(self.state.query)
|
||||
skip_llm = query_len < self._config.query_analysis_threshold
|
||||
|
||||
if skip_llm:
|
||||
# Short query: skip LLM, embed raw query directly
|
||||
analysis = QueryAnalysis(
|
||||
keywords=[],
|
||||
suggested_scopes=[],
|
||||
complexity="simple",
|
||||
recall_queries=[self.state.query],
|
||||
)
|
||||
self.state.query_analysis = analysis
|
||||
else:
|
||||
# Long query: use LLM to distill sub-queries and extract filters
|
||||
available = self._storage.list_scopes(self.state.scope or "/")
|
||||
if not available:
|
||||
available = ["/"]
|
||||
scope_info = (
|
||||
self._storage.get_scope_info(self.state.scope or "/")
|
||||
if self.state.scope
|
||||
else None
|
||||
)
|
||||
analysis = analyze_query(
|
||||
self.state.query,
|
||||
available,
|
||||
scope_info,
|
||||
self._llm,
|
||||
)
|
||||
self.state.query_analysis = analysis
|
||||
|
||||
# Wire keywords -> category filter
|
||||
if analysis.keywords:
|
||||
self.state.inferred_categories = analysis.keywords
|
||||
|
||||
# Parse time_filter into a datetime cutoff
|
||||
if analysis.time_filter:
|
||||
try:
|
||||
self.state.time_cutoff = datetime.fromisoformat(analysis.time_filter)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Batch-embed all sub-queries in ONE call
|
||||
queries = analysis.recall_queries if analysis.recall_queries else [self.state.query]
|
||||
queries = queries[:3]
|
||||
embeddings = embed_texts(self._embedder, queries)
|
||||
pairs: list[tuple[str, list[float]]] = [
|
||||
(q, emb) for q, emb in zip(queries, embeddings, strict=False) if emb
|
||||
]
|
||||
if not pairs:
|
||||
# Fallback: embed the raw query if distilled queries all failed
|
||||
fallback_emb = embed_texts(self._embedder, [self.state.query])
|
||||
if fallback_emb and fallback_emb[0]:
|
||||
pairs = [(self.state.query, fallback_emb[0])]
|
||||
self.state.query_embeddings = pairs
|
||||
return analysis
|
||||
|
||||
@listen(analyze_query_step)
|
||||
def filter_and_chunk(self) -> list[str]:
|
||||
"""Select candidate scopes based on LLM analysis."""
|
||||
analysis = self.state.query_analysis
|
||||
scope_prefix = (self.state.scope or "/").rstrip("/") or "/"
|
||||
if analysis and analysis.suggested_scopes:
|
||||
candidates = [s for s in analysis.suggested_scopes if s]
|
||||
else:
|
||||
candidates = self._storage.list_scopes(scope_prefix)
|
||||
if not candidates:
|
||||
info = self._storage.get_scope_info(scope_prefix)
|
||||
if info.record_count > 0:
|
||||
candidates = [scope_prefix]
|
||||
else:
|
||||
candidates = [scope_prefix]
|
||||
self.state.candidate_scopes = candidates[:20]
|
||||
return self.state.candidate_scopes
|
||||
|
||||
@listen(filter_and_chunk)
|
||||
def search_chunks(self) -> list[Any]:
|
||||
"""Initial parallel search across (embeddings x scopes) with filters."""
|
||||
return self._do_search()
|
||||
|
||||
@router(search_chunks)
|
||||
def decide_depth(self) -> str:
|
||||
"""Route based on confidence, complexity, and remaining budget."""
|
||||
analysis = self.state.query_analysis
|
||||
if (
|
||||
analysis
|
||||
and analysis.complexity == "complex"
|
||||
and self.state.confidence < self._config.complex_query_threshold
|
||||
):
|
||||
if self.state.exploration_budget > 0:
|
||||
return "explore_deeper"
|
||||
if self.state.confidence >= self._config.confidence_threshold_high:
|
||||
return "synthesize"
|
||||
if (
|
||||
self.state.exploration_budget > 0
|
||||
and self.state.confidence < self._config.confidence_threshold_low
|
||||
):
|
||||
return "explore_deeper"
|
||||
return "synthesize"
|
||||
|
||||
@listen("explore_deeper")
|
||||
def recursive_exploration(self) -> list[Any]:
|
||||
"""Feed top results back to LLM for deeper context extraction.
|
||||
|
||||
Decrements the exploration budget so the loop terminates.
|
||||
"""
|
||||
self.state.exploration_budget -= 1
|
||||
|
||||
enhanced = []
|
||||
for finding in self.state.chunk_findings:
|
||||
if not finding.get("results"):
|
||||
continue
|
||||
content_parts = [r[0].content for r in finding["results"][:5]]
|
||||
chunk_text = "\n---\n".join(content_parts)
|
||||
prompt = (
|
||||
f"Query: {self.state.query}\n\n"
|
||||
f"Relevant memory excerpts:\n{chunk_text}\n\n"
|
||||
"Extract the most relevant information for the query. "
|
||||
"If something is missing, say what's missing in one short line."
|
||||
)
|
||||
try:
|
||||
response = self._llm.call([{"role": "user", "content": prompt}])
|
||||
if isinstance(response, str) and "missing" in response.lower():
|
||||
self.state.evidence_gaps.append(response[:200])
|
||||
enhanced.append({
|
||||
"scope": finding["scope"],
|
||||
"extraction": response,
|
||||
"results": finding["results"],
|
||||
})
|
||||
except Exception:
|
||||
enhanced.append({
|
||||
"scope": finding["scope"],
|
||||
"extraction": "",
|
||||
"results": finding["results"],
|
||||
})
|
||||
self.state.chunk_findings = enhanced
|
||||
return enhanced
|
||||
|
||||
@listen(recursive_exploration)
|
||||
def re_search(self) -> list[Any]:
|
||||
"""Re-search after exploration to update confidence for the router loop."""
|
||||
return self._do_search()
|
||||
|
||||
@router(re_search)
|
||||
def re_decide_depth(self) -> str:
|
||||
"""Re-evaluate depth after re-search. Same logic as decide_depth."""
|
||||
return self.decide_depth()
|
||||
|
||||
@listen("synthesize")
|
||||
def synthesize_results(self) -> list[MemoryMatch]:
|
||||
"""Deduplicate, composite-score, rank, and attach evidence gaps."""
|
||||
seen_ids: set[str] = set()
|
||||
matches: list[MemoryMatch] = []
|
||||
for finding in self.state.chunk_findings:
|
||||
if not isinstance(finding, dict):
|
||||
continue
|
||||
results = finding.get("results", [])
|
||||
if not isinstance(results, list):
|
||||
continue
|
||||
for item in results:
|
||||
if isinstance(item, (list, tuple)) and len(item) >= 2:
|
||||
record, score = item[0], item[1]
|
||||
else:
|
||||
continue
|
||||
if isinstance(record, MemoryRecord) and record.id not in seen_ids:
|
||||
seen_ids.add(record.id)
|
||||
composite, reasons = compute_composite_score(
|
||||
record, float(score), self._config
|
||||
)
|
||||
matches.append(
|
||||
MemoryMatch(
|
||||
record=record,
|
||||
score=composite,
|
||||
match_reasons=reasons,
|
||||
)
|
||||
)
|
||||
matches.sort(key=lambda m: m.score, reverse=True)
|
||||
self.state.final_results = matches[: self.state.limit]
|
||||
|
||||
# Attach evidence gaps to the first result so callers can inspect them
|
||||
if self.state.evidence_gaps and self.state.final_results:
|
||||
self.state.final_results[0].evidence_gaps = list(self.state.evidence_gaps)
|
||||
|
||||
return self.state.final_results
|
||||
@@ -1,318 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
|
||||
|
||||
class ShortTermMemory(Memory):
|
||||
"""
|
||||
ShortTermMemory class for managing transient data related to immediate tasks
|
||||
and interactions.
|
||||
Inherits from the Memory class and utilizes an instance of a class that
|
||||
adheres to the Storage for data storage, specifically working with
|
||||
MemoryItem instances.
|
||||
"""
|
||||
|
||||
_memory_provider: str | None = PrivateAttr()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
crew: Any = None,
|
||||
embedder_config: Any = None,
|
||||
storage: Any = None,
|
||||
path: str | None = None,
|
||||
) -> None:
|
||||
memory_provider = None
|
||||
if embedder_config and isinstance(embedder_config, dict):
|
||||
memory_provider = embedder_config.get("provider")
|
||||
|
||||
if memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError as e:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
) from e
|
||||
config = (
|
||||
embedder_config.get("config")
|
||||
if embedder_config and isinstance(embedder_config, dict)
|
||||
else None
|
||||
)
|
||||
storage = Mem0Storage(type="short_term", crew=crew, config=config) # type: ignore[no-untyped-call]
|
||||
else:
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="short_term",
|
||||
embedder_config=embedder_config,
|
||||
crew=crew,
|
||||
path=path,
|
||||
)
|
||||
)
|
||||
super().__init__(storage=storage)
|
||||
self._memory_provider = memory_provider
|
||||
|
||||
def save(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ShortTermMemoryItem(
|
||||
data=value,
|
||||
metadata=metadata,
|
||||
agent=self.agent.role if self.agent else None,
|
||||
)
|
||||
if self._memory_provider == "mem0":
|
||||
item.data = (
|
||||
f"Remember the following insights from Agent run: {item.data}"
|
||||
)
|
||||
|
||||
super().save(value=item.data, metadata=item.metadata)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
# agent_role=agent,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search short-term memory for relevant entries.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return list(results)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
value: Any,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Save a value to short-term memory asynchronously.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Optional metadata to associate with the value.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveStartedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
item = ShortTermMemoryItem(
|
||||
data=value,
|
||||
metadata=metadata,
|
||||
agent=self.agent.role if self.agent else None,
|
||||
)
|
||||
if self._memory_provider == "mem0":
|
||||
item.data = (
|
||||
f"Remember the following insights from Agent run: {item.data}"
|
||||
)
|
||||
|
||||
await super().asave(value=item.data, metadata=item.metadata)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveCompletedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
save_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemorySaveFailedEvent(
|
||||
value=value,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search short-term memory asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries.
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
start_time = time.time()
|
||||
try:
|
||||
results = await self.storage.asearch(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
query_time_ms=(time.time() - start_time) * 1000,
|
||||
source_type="short_term_memory",
|
||||
from_agent=self.agent,
|
||||
from_task=self.task,
|
||||
),
|
||||
)
|
||||
|
||||
return list(results)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
error=str(e),
|
||||
source_type="short_term_memory",
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
self.storage.reset()
|
||||
except Exception as e:
|
||||
raise Exception(
|
||||
f"An error occurred while resetting the short-term memory: {e}"
|
||||
) from e
|
||||
@@ -1,13 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
|
||||
class ShortTermMemoryItem:
|
||||
def __init__(
|
||||
self,
|
||||
data: Any,
|
||||
agent: str | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
):
|
||||
self.data = data
|
||||
self.agent = agent
|
||||
self.metadata = metadata if metadata is not None else {}
|
||||
179
lib/crewai/src/crewai/memory/storage/backend.py
Normal file
179
lib/crewai/src/crewai/memory/storage/backend.py
Normal file
@@ -0,0 +1,179 @@
|
||||
"""Storage backend protocol for the unified memory system."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Any, Protocol, runtime_checkable
|
||||
|
||||
from crewai.memory.types import MemoryRecord, ScopeInfo
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class StorageBackend(Protocol):
|
||||
"""Protocol for pluggable memory storage backends."""
|
||||
|
||||
def save(self, records: list[MemoryRecord]) -> None:
|
||||
"""Save memory records to storage.
|
||||
|
||||
Args:
|
||||
records: List of memory records to persist.
|
||||
"""
|
||||
...
|
||||
|
||||
def search(
|
||||
self,
|
||||
query_embedding: list[float],
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
limit: int = 10,
|
||||
min_score: float = 0.0,
|
||||
) -> list[tuple[MemoryRecord, float]]:
|
||||
"""Search for memories by vector similarity with optional filters.
|
||||
|
||||
Args:
|
||||
query_embedding: Embedding vector for the query.
|
||||
scope_prefix: Optional scope path prefix to filter results.
|
||||
categories: Optional list of categories to filter by.
|
||||
metadata_filter: Optional metadata key-value filter.
|
||||
limit: Maximum number of results to return.
|
||||
min_score: Minimum similarity score threshold.
|
||||
|
||||
Returns:
|
||||
List of (MemoryRecord, score) tuples ordered by relevance.
|
||||
"""
|
||||
...
|
||||
|
||||
def delete(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
) -> int:
|
||||
"""Delete memories matching the given criteria.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path prefix.
|
||||
categories: Optional list of categories.
|
||||
record_ids: Optional list of record IDs to delete.
|
||||
older_than: Optional cutoff datetime (delete older records).
|
||||
metadata_filter: Optional metadata key-value filter.
|
||||
|
||||
Returns:
|
||||
Number of records deleted.
|
||||
"""
|
||||
...
|
||||
|
||||
def update(self, record: MemoryRecord) -> None:
|
||||
"""Update an existing record. Replaces the record with the same ID."""
|
||||
...
|
||||
|
||||
def get_record(self, record_id: str) -> MemoryRecord | None:
|
||||
"""Return a single record by ID, or None if not found.
|
||||
|
||||
Args:
|
||||
record_id: The unique ID of the record.
|
||||
|
||||
Returns:
|
||||
The MemoryRecord, or None if no record with that ID exists.
|
||||
"""
|
||||
...
|
||||
|
||||
def list_records(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
limit: int = 200,
|
||||
offset: int = 0,
|
||||
) -> list[MemoryRecord]:
|
||||
"""List records in a scope, newest first.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path prefix to filter by.
|
||||
limit: Maximum number of records to return.
|
||||
offset: Number of records to skip (for pagination).
|
||||
|
||||
Returns:
|
||||
List of MemoryRecord, ordered by created_at descending.
|
||||
"""
|
||||
...
|
||||
|
||||
def get_scope_info(self, scope: str) -> ScopeInfo:
|
||||
"""Get information about a scope.
|
||||
|
||||
Args:
|
||||
scope: The scope path.
|
||||
|
||||
Returns:
|
||||
ScopeInfo with record count, categories, date range, child scopes.
|
||||
"""
|
||||
...
|
||||
|
||||
def list_scopes(self, parent: str = "/") -> list[str]:
|
||||
"""List immediate child scopes under a parent path.
|
||||
|
||||
Args:
|
||||
parent: Parent scope path (default root).
|
||||
|
||||
Returns:
|
||||
List of immediate child scope paths.
|
||||
"""
|
||||
...
|
||||
|
||||
def list_categories(self, scope_prefix: str | None = None) -> dict[str, int]:
|
||||
"""List categories and their counts within a scope.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope to limit to (None = global).
|
||||
|
||||
Returns:
|
||||
Mapping of category name to record count.
|
||||
"""
|
||||
...
|
||||
|
||||
def count(self, scope_prefix: str | None = None) -> int:
|
||||
"""Count records in scope (and subscopes).
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path (None = all).
|
||||
|
||||
Returns:
|
||||
Number of records.
|
||||
"""
|
||||
...
|
||||
|
||||
def reset(self, scope_prefix: str | None = None) -> None:
|
||||
"""Reset (delete all) memories in scope.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path (None = reset all).
|
||||
"""
|
||||
...
|
||||
|
||||
async def asave(self, records: list[MemoryRecord]) -> None:
|
||||
"""Save memory records asynchronously."""
|
||||
...
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query_embedding: list[float],
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
limit: int = 10,
|
||||
min_score: float = 0.0,
|
||||
) -> list[tuple[MemoryRecord, float]]:
|
||||
"""Search for memories asynchronously."""
|
||||
...
|
||||
|
||||
async def adelete(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
) -> int:
|
||||
"""Delete memories asynchronously."""
|
||||
...
|
||||
@@ -1,16 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
|
||||
class Storage:
|
||||
"""Abstract base class defining the storage interface"""
|
||||
|
||||
def save(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
pass
|
||||
|
||||
def search(
|
||||
self, query: str, limit: int, score_threshold: float
|
||||
) -> dict[str, Any] | list[Any]:
|
||||
return {}
|
||||
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
536
lib/crewai/src/crewai/memory/storage/lancedb_storage.py
Normal file
536
lib/crewai/src/crewai/memory/storage/lancedb_storage.py
Normal file
@@ -0,0 +1,536 @@
|
||||
"""LanceDB storage backend for the unified memory system."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, ClassVar
|
||||
|
||||
import lancedb
|
||||
|
||||
from crewai.memory.types import MemoryRecord, ScopeInfo
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
# Default embedding vector dimensionality (matches OpenAI text-embedding-3-small).
|
||||
# Used when creating new tables and for zero-vector placeholder scans.
|
||||
# Callers can override via the ``vector_dim`` constructor parameter.
|
||||
DEFAULT_VECTOR_DIM = 1536
|
||||
|
||||
# Safety cap on the number of rows returned by a single scan query.
|
||||
# Prevents unbounded memory use when scanning large tables for scope info,
|
||||
# listing, or deletion. Internal only -- not user-configurable.
|
||||
_SCAN_ROWS_LIMIT = 50_000
|
||||
|
||||
# Retry settings for LanceDB commit conflicts (optimistic concurrency).
|
||||
# Under heavy write load (many concurrent saves), the table version can
|
||||
# advance rapidly. 5 retries with 0.2s base delay (0.2 + 0.4 + 0.8 + 1.6 + 3.2 = 6.2s max)
|
||||
# gives enough headroom to catch up with version advancement.
|
||||
_MAX_RETRIES = 5
|
||||
_RETRY_BASE_DELAY = 0.2 # seconds; doubles on each retry
|
||||
|
||||
|
||||
class LanceDBStorage:
|
||||
"""LanceDB-backed storage for the unified memory system."""
|
||||
|
||||
# Class-level registry: maps resolved database path -> shared write lock.
|
||||
# When multiple Memory instances (e.g. agent + crew) independently create
|
||||
# LanceDBStorage pointing at the same directory, they share one lock so
|
||||
# their writes don't conflict.
|
||||
# Uses RLock (reentrant) so callers can hold the lock for a batch of
|
||||
# operations while the individual methods re-acquire it without deadlocking.
|
||||
_path_locks: ClassVar[dict[str, threading.RLock]] = {}
|
||||
_path_locks_guard: ClassVar[threading.Lock] = threading.Lock()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
path: str | Path | None = None,
|
||||
table_name: str = "memories",
|
||||
vector_dim: int | None = None,
|
||||
) -> None:
|
||||
"""Initialize LanceDB storage.
|
||||
|
||||
Args:
|
||||
path: Directory path for the LanceDB database. Defaults to
|
||||
``$CREWAI_STORAGE_DIR/memory`` if the env var is set,
|
||||
otherwise ``db_storage_path() / memory`` (platform data dir).
|
||||
table_name: Name of the table for memory records.
|
||||
vector_dim: Dimensionality of the embedding vector. When ``None``
|
||||
(default), the dimension is auto-detected from the existing
|
||||
table schema or from the first saved embedding.
|
||||
"""
|
||||
if path is None:
|
||||
storage_dir = os.environ.get("CREWAI_STORAGE_DIR")
|
||||
if storage_dir:
|
||||
path = Path(storage_dir) / "memory"
|
||||
else:
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
path = Path(db_storage_path()) / "memory"
|
||||
self._path = Path(path)
|
||||
self._path.mkdir(parents=True, exist_ok=True)
|
||||
self._table_name = table_name
|
||||
self._db = lancedb.connect(str(self._path))
|
||||
|
||||
# Get or create a shared write lock for this database path.
|
||||
resolved = str(self._path.resolve())
|
||||
with LanceDBStorage._path_locks_guard:
|
||||
if resolved not in LanceDBStorage._path_locks:
|
||||
LanceDBStorage._path_locks[resolved] = threading.RLock()
|
||||
self._write_lock = LanceDBStorage._path_locks[resolved]
|
||||
|
||||
# Try to open an existing table and infer dimension from its schema.
|
||||
# If no table exists yet, defer creation until the first save so the
|
||||
# dimension can be auto-detected from the embedder's actual output.
|
||||
try:
|
||||
self._table: lancedb.table.Table | None = self._db.open_table(self._table_name)
|
||||
self._vector_dim: int = self._infer_dim_from_table(self._table)
|
||||
except Exception:
|
||||
self._table = None
|
||||
self._vector_dim = vector_dim or 0 # 0 = not yet known
|
||||
|
||||
# Explicit dim provided: create the table immediately if it doesn't exist.
|
||||
if self._table is None and vector_dim is not None:
|
||||
self._vector_dim = vector_dim
|
||||
self._table = self._create_table(vector_dim)
|
||||
|
||||
@property
|
||||
def write_lock(self) -> threading.RLock:
|
||||
"""The shared reentrant write lock for this database path.
|
||||
|
||||
Callers can acquire this to hold the lock across multiple storage
|
||||
operations (e.g. delete + update + save as one atomic batch).
|
||||
Individual methods also acquire it internally, but since it's
|
||||
reentrant (RLock), the same thread won't deadlock.
|
||||
"""
|
||||
return self._write_lock
|
||||
|
||||
@staticmethod
|
||||
def _infer_dim_from_table(table: lancedb.table.Table) -> int:
|
||||
"""Read vector dimension from an existing table's schema."""
|
||||
schema = table.schema
|
||||
for field in schema:
|
||||
if field.name == "vector":
|
||||
try:
|
||||
return field.type.list_size
|
||||
except Exception:
|
||||
break
|
||||
return DEFAULT_VECTOR_DIM
|
||||
|
||||
def _retry_write(self, op: str, *args: Any, **kwargs: Any) -> Any:
|
||||
"""Execute a table operation with retry on LanceDB commit conflicts.
|
||||
|
||||
Args:
|
||||
op: Method name on the table object (e.g. "add", "delete").
|
||||
*args, **kwargs: Passed to the table method.
|
||||
|
||||
LanceDB uses optimistic concurrency: if two transactions overlap,
|
||||
the second to commit fails with an ``OSError`` containing
|
||||
"Commit conflict". This helper retries with exponential backoff,
|
||||
refreshing the table reference before each retry so the retried
|
||||
call uses the latest committed version (not a stale reference).
|
||||
"""
|
||||
delay = _RETRY_BASE_DELAY
|
||||
for attempt in range(_MAX_RETRIES + 1):
|
||||
try:
|
||||
return getattr(self._table, op)(*args, **kwargs)
|
||||
except OSError as e: # noqa: PERF203
|
||||
if "Commit conflict" not in str(e) or attempt >= _MAX_RETRIES:
|
||||
raise
|
||||
_logger.debug(
|
||||
"LanceDB commit conflict on %s (attempt %d/%d), retrying in %.1fs",
|
||||
op, attempt + 1, _MAX_RETRIES, delay,
|
||||
)
|
||||
# Refresh table to pick up the latest version before retrying.
|
||||
# The next getattr(self._table, op) will use the fresh table.
|
||||
try:
|
||||
self._table = self._db.open_table(self._table_name)
|
||||
except Exception: # noqa: S110
|
||||
pass # table refresh is best-effort
|
||||
time.sleep(delay)
|
||||
delay *= 2
|
||||
return None # unreachable, but satisfies type checker
|
||||
|
||||
def _create_table(self, vector_dim: int) -> lancedb.table.Table:
|
||||
"""Create a new table with the given vector dimension."""
|
||||
placeholder = [
|
||||
{
|
||||
"id": "__schema_placeholder__",
|
||||
"content": "",
|
||||
"scope": "/",
|
||||
"categories_str": "[]",
|
||||
"metadata_str": "{}",
|
||||
"importance": 0.5,
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"last_accessed": datetime.utcnow().isoformat(),
|
||||
"source": "",
|
||||
"private": False,
|
||||
"vector": [0.0] * vector_dim,
|
||||
}
|
||||
]
|
||||
table = self._db.create_table(self._table_name, placeholder)
|
||||
table.delete("id = '__schema_placeholder__'")
|
||||
return table
|
||||
|
||||
def _ensure_table(self, vector_dim: int | None = None) -> lancedb.table.Table:
|
||||
"""Return the table, creating it lazily if needed.
|
||||
|
||||
Args:
|
||||
vector_dim: Dimension hint (e.g. from the first embedding).
|
||||
Falls back to the stored ``_vector_dim`` or ``DEFAULT_VECTOR_DIM``.
|
||||
"""
|
||||
if self._table is not None:
|
||||
return self._table
|
||||
dim = vector_dim or self._vector_dim or DEFAULT_VECTOR_DIM
|
||||
self._vector_dim = dim
|
||||
self._table = self._create_table(dim)
|
||||
return self._table
|
||||
|
||||
def _record_to_row(self, record: MemoryRecord) -> dict[str, Any]:
|
||||
return {
|
||||
"id": record.id,
|
||||
"content": record.content,
|
||||
"scope": record.scope,
|
||||
"categories_str": json.dumps(record.categories),
|
||||
"metadata_str": json.dumps(record.metadata),
|
||||
"importance": record.importance,
|
||||
"created_at": record.created_at.isoformat(),
|
||||
"last_accessed": record.last_accessed.isoformat(),
|
||||
"source": record.source or "",
|
||||
"private": record.private,
|
||||
"vector": record.embedding if record.embedding else [0.0] * self._vector_dim,
|
||||
}
|
||||
|
||||
def _row_to_record(self, row: dict[str, Any]) -> MemoryRecord:
|
||||
def _parse_dt(val: Any) -> datetime:
|
||||
if val is None:
|
||||
return datetime.utcnow()
|
||||
if isinstance(val, datetime):
|
||||
return val
|
||||
s = str(val)
|
||||
return datetime.fromisoformat(s.replace("Z", "+00:00"))
|
||||
|
||||
return MemoryRecord(
|
||||
id=str(row["id"]),
|
||||
content=str(row["content"]),
|
||||
scope=str(row["scope"]),
|
||||
categories=json.loads(row["categories_str"]) if row.get("categories_str") else [],
|
||||
metadata=json.loads(row["metadata_str"]) if row.get("metadata_str") else {},
|
||||
importance=float(row.get("importance", 0.5)),
|
||||
created_at=_parse_dt(row.get("created_at")),
|
||||
last_accessed=_parse_dt(row.get("last_accessed")),
|
||||
embedding=row.get("vector"),
|
||||
source=row.get("source") or None,
|
||||
private=bool(row.get("private", False)),
|
||||
)
|
||||
|
||||
def save(self, records: list[MemoryRecord]) -> None:
|
||||
if not records:
|
||||
return
|
||||
# Auto-detect dimension from the first real embedding.
|
||||
dim = None
|
||||
for r in records:
|
||||
if r.embedding and len(r.embedding) > 0:
|
||||
dim = len(r.embedding)
|
||||
break
|
||||
with self._write_lock:
|
||||
self._ensure_table(vector_dim=dim)
|
||||
rows = [self._record_to_row(r) for r in records]
|
||||
for r in rows:
|
||||
if r["vector"] is None or len(r["vector"]) != self._vector_dim:
|
||||
r["vector"] = [0.0] * self._vector_dim
|
||||
self._retry_write("add", rows)
|
||||
|
||||
def update(self, record: MemoryRecord) -> None:
|
||||
"""Update a record by ID. Preserves created_at, updates last_accessed."""
|
||||
with self._write_lock:
|
||||
self._ensure_table()
|
||||
safe_id = str(record.id).replace("'", "''")
|
||||
self._retry_write("delete", f"id = '{safe_id}'")
|
||||
row = self._record_to_row(record)
|
||||
if row["vector"] is None or len(row["vector"]) != self._vector_dim:
|
||||
row["vector"] = [0.0] * self._vector_dim
|
||||
self._retry_write("add", [row])
|
||||
|
||||
def touch_records(self, record_ids: list[str]) -> None:
|
||||
"""Update last_accessed to now for the given record IDs.
|
||||
|
||||
Args:
|
||||
record_ids: IDs of records to touch.
|
||||
"""
|
||||
if not record_ids or self._table is None:
|
||||
return
|
||||
with self._write_lock:
|
||||
now = datetime.utcnow().isoformat()
|
||||
for rid in record_ids:
|
||||
safe_id = str(rid).replace("'", "''")
|
||||
rows = (
|
||||
self._table.search([0.0] * self._vector_dim)
|
||||
.where(f"id = '{safe_id}'")
|
||||
.limit(1)
|
||||
.to_list()
|
||||
)
|
||||
if rows:
|
||||
rows[0]["last_accessed"] = now
|
||||
self._retry_write("delete", f"id = '{safe_id}'")
|
||||
self._retry_write("add", [rows[0]])
|
||||
|
||||
def get_record(self, record_id: str) -> MemoryRecord | None:
|
||||
"""Return a single record by ID, or None if not found."""
|
||||
if self._table is None:
|
||||
return None
|
||||
safe_id = str(record_id).replace("'", "''")
|
||||
rows = self._table.search([0.0] * self._vector_dim).where(f"id = '{safe_id}'").limit(1).to_list()
|
||||
if not rows:
|
||||
return None
|
||||
return self._row_to_record(rows[0])
|
||||
|
||||
def search(
|
||||
self,
|
||||
query_embedding: list[float],
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
limit: int = 10,
|
||||
min_score: float = 0.0,
|
||||
) -> list[tuple[MemoryRecord, float]]:
|
||||
if self._table is None:
|
||||
return []
|
||||
query = self._table.search(query_embedding)
|
||||
if scope_prefix is not None and scope_prefix.strip("/"):
|
||||
prefix = scope_prefix.rstrip("/")
|
||||
like_val = prefix + "%"
|
||||
query = query.where(f"scope LIKE '{like_val}'")
|
||||
results = query.limit(limit * 3 if (categories or metadata_filter) else limit).to_list()
|
||||
out: list[tuple[MemoryRecord, float]] = []
|
||||
for row in results:
|
||||
record = self._row_to_record(row)
|
||||
if categories and not any(c in record.categories for c in categories):
|
||||
continue
|
||||
if metadata_filter and not all(record.metadata.get(k) == v for k, v in metadata_filter.items()):
|
||||
continue
|
||||
distance = row.get("_distance", 0.0)
|
||||
score = 1.0 / (1.0 + float(distance)) if distance is not None else 1.0
|
||||
if score >= min_score:
|
||||
out.append((record, score))
|
||||
if len(out) >= limit:
|
||||
break
|
||||
return out[:limit]
|
||||
|
||||
def delete(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
) -> int:
|
||||
if self._table is None:
|
||||
return 0
|
||||
with self._write_lock:
|
||||
if record_ids and not (categories or metadata_filter):
|
||||
before = self._table.count_rows()
|
||||
ids_expr = ", ".join(f"'{rid}'" for rid in record_ids)
|
||||
self._retry_write("delete", f"id IN ({ids_expr})")
|
||||
return before - self._table.count_rows()
|
||||
if categories or metadata_filter:
|
||||
rows = self._scan_rows(scope_prefix)
|
||||
to_delete: list[str] = []
|
||||
for row in rows:
|
||||
record = self._row_to_record(row)
|
||||
if categories and not any(c in record.categories for c in categories):
|
||||
continue
|
||||
if metadata_filter and not all(record.metadata.get(k) == v for k, v in metadata_filter.items()):
|
||||
continue
|
||||
if older_than and record.created_at >= older_than:
|
||||
continue
|
||||
to_delete.append(record.id)
|
||||
if not to_delete:
|
||||
return 0
|
||||
before = self._table.count_rows()
|
||||
ids_expr = ", ".join(f"'{rid}'" for rid in to_delete)
|
||||
self._retry_write("delete", f"id IN ({ids_expr})")
|
||||
return before - self._table.count_rows()
|
||||
conditions = []
|
||||
if scope_prefix is not None and scope_prefix.strip("/"):
|
||||
prefix = scope_prefix.rstrip("/")
|
||||
if not prefix.startswith("/"):
|
||||
prefix = "/" + prefix
|
||||
conditions.append(f"scope LIKE '{prefix}%' OR scope = '/'")
|
||||
if older_than is not None:
|
||||
conditions.append(f"created_at < '{older_than.isoformat()}'")
|
||||
if not conditions:
|
||||
before = self._table.count_rows()
|
||||
self._retry_write("delete", "id != ''")
|
||||
return before - self._table.count_rows()
|
||||
where_expr = " AND ".join(conditions)
|
||||
before = self._table.count_rows()
|
||||
self._retry_write("delete", where_expr)
|
||||
return before - self._table.count_rows()
|
||||
|
||||
def _scan_rows(self, scope_prefix: str | None = None, limit: int = _SCAN_ROWS_LIMIT) -> list[dict[str, Any]]:
|
||||
"""Scan rows optionally filtered by scope prefix."""
|
||||
if self._table is None:
|
||||
return []
|
||||
q = self._table.search([0.0] * self._vector_dim)
|
||||
if scope_prefix is not None and scope_prefix.strip("/"):
|
||||
q = q.where(f"scope LIKE '{scope_prefix.rstrip('/')}%'")
|
||||
return q.limit(limit).to_list()
|
||||
|
||||
def list_records(
|
||||
self, scope_prefix: str | None = None, limit: int = 200, offset: int = 0
|
||||
) -> list[MemoryRecord]:
|
||||
"""List records in a scope, newest first.
|
||||
|
||||
Args:
|
||||
scope_prefix: Optional scope path prefix to filter by.
|
||||
limit: Maximum number of records to return.
|
||||
offset: Number of records to skip (for pagination).
|
||||
|
||||
Returns:
|
||||
List of MemoryRecord, ordered by created_at descending.
|
||||
"""
|
||||
rows = self._scan_rows(scope_prefix, limit=limit + offset)
|
||||
records = [self._row_to_record(r) for r in rows]
|
||||
records.sort(key=lambda r: r.created_at, reverse=True)
|
||||
return records[offset : offset + limit]
|
||||
|
||||
def get_scope_info(self, scope: str) -> ScopeInfo:
|
||||
scope = scope.rstrip("/") or "/"
|
||||
prefix = scope if scope != "/" else ""
|
||||
if prefix and not prefix.startswith("/"):
|
||||
prefix = "/" + prefix
|
||||
rows = self._scan_rows(prefix or None)
|
||||
if not rows:
|
||||
return ScopeInfo(
|
||||
path=scope or "/",
|
||||
record_count=0,
|
||||
categories=[],
|
||||
oldest_record=None,
|
||||
newest_record=None,
|
||||
child_scopes=[],
|
||||
)
|
||||
categories_set: set[str] = set()
|
||||
oldest: datetime | None = None
|
||||
newest: datetime | None = None
|
||||
child_prefix = (prefix + "/") if prefix else "/"
|
||||
children: set[str] = set()
|
||||
for row in rows:
|
||||
sc = str(row.get("scope", ""))
|
||||
if child_prefix and sc.startswith(child_prefix):
|
||||
rest = sc[len(child_prefix):]
|
||||
first_component = rest.split("/", 1)[0]
|
||||
if first_component:
|
||||
children.add(child_prefix + first_component)
|
||||
try:
|
||||
cat_str = row.get("categories_str") or "[]"
|
||||
categories_set.update(json.loads(cat_str))
|
||||
except Exception: # noqa: S110
|
||||
pass
|
||||
created = row.get("created_at")
|
||||
if created:
|
||||
dt = datetime.fromisoformat(str(created).replace("Z", "+00:00")) if isinstance(created, str) else created
|
||||
if isinstance(dt, datetime):
|
||||
if oldest is None or dt < oldest:
|
||||
oldest = dt
|
||||
if newest is None or dt > newest:
|
||||
newest = dt
|
||||
return ScopeInfo(
|
||||
path=scope or "/",
|
||||
record_count=len(rows),
|
||||
categories=sorted(categories_set),
|
||||
oldest_record=oldest,
|
||||
newest_record=newest,
|
||||
child_scopes=sorted(children),
|
||||
)
|
||||
|
||||
def list_scopes(self, parent: str = "/") -> list[str]:
|
||||
parent = parent.rstrip("/") or ""
|
||||
prefix = (parent + "/") if parent else "/"
|
||||
rows = self._scan_rows(prefix if prefix != "/" else None)
|
||||
children: set[str] = set()
|
||||
for row in rows:
|
||||
sc = str(row.get("scope", ""))
|
||||
if sc.startswith(prefix) and sc != (prefix.rstrip("/") or "/"):
|
||||
rest = sc[len(prefix):]
|
||||
first_component = rest.split("/", 1)[0]
|
||||
if first_component:
|
||||
children.add(prefix + first_component)
|
||||
return sorted(children)
|
||||
|
||||
def list_categories(self, scope_prefix: str | None = None) -> dict[str, int]:
|
||||
rows = self._scan_rows(scope_prefix)
|
||||
counts: dict[str, int] = {}
|
||||
for row in rows:
|
||||
cat_str = row.get("categories_str") or "[]"
|
||||
try:
|
||||
parsed = json.loads(cat_str)
|
||||
except Exception: # noqa: S112
|
||||
continue
|
||||
for c in parsed:
|
||||
counts[c] = counts.get(c, 0) + 1
|
||||
return counts
|
||||
|
||||
def count(self, scope_prefix: str | None = None) -> int:
|
||||
if self._table is None:
|
||||
return 0
|
||||
if scope_prefix is None or scope_prefix.strip("/") == "":
|
||||
return self._table.count_rows()
|
||||
info = self.get_scope_info(scope_prefix)
|
||||
return info.record_count
|
||||
|
||||
def reset(self, scope_prefix: str | None = None) -> None:
|
||||
if scope_prefix is None or scope_prefix.strip("/") == "":
|
||||
if self._table is not None:
|
||||
self._db.drop_table(self._table_name)
|
||||
self._table = None
|
||||
# Dimension is preserved; table will be recreated on next save.
|
||||
return
|
||||
if self._table is None:
|
||||
return
|
||||
prefix = scope_prefix.rstrip("/")
|
||||
if prefix:
|
||||
self._table.delete(f"scope >= '{prefix}' AND scope < '{prefix}/\uFFFF'")
|
||||
|
||||
async def asave(self, records: list[MemoryRecord]) -> None:
|
||||
self.save(records)
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query_embedding: list[float],
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
limit: int = 10,
|
||||
min_score: float = 0.0,
|
||||
) -> list[tuple[MemoryRecord, float]]:
|
||||
return self.search(
|
||||
query_embedding,
|
||||
scope_prefix=scope_prefix,
|
||||
categories=categories,
|
||||
metadata_filter=metadata_filter,
|
||||
limit=limit,
|
||||
min_score=min_score,
|
||||
)
|
||||
|
||||
async def adelete(
|
||||
self,
|
||||
scope_prefix: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
) -> int:
|
||||
return self.delete(
|
||||
scope_prefix=scope_prefix,
|
||||
categories=categories,
|
||||
record_ids=record_ids,
|
||||
older_than=older_than,
|
||||
metadata_filter=metadata_filter,
|
||||
)
|
||||
@@ -1,215 +0,0 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
import sqlite3
|
||||
from typing import Any
|
||||
|
||||
import aiosqlite
|
||||
|
||||
from crewai.utilities import Printer
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
|
||||
class LTMSQLiteStorage:
|
||||
"""SQLite storage class for long-term memory data."""
|
||||
|
||||
def __init__(self, db_path: str | None = None, verbose: bool = True) -> None:
|
||||
"""Initialize the SQLite storage.
|
||||
|
||||
Args:
|
||||
db_path: Optional path to the database file.
|
||||
verbose: Whether to print error messages.
|
||||
"""
|
||||
if db_path is None:
|
||||
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
||||
self.db_path = db_path
|
||||
self._verbose = verbose
|
||||
self._printer: Printer = Printer()
|
||||
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
self._initialize_db()
|
||||
|
||||
def _initialize_db(self) -> None:
|
||||
"""Initialize the SQLite database and create LTM table."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS long_term_memories (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_description TEXT,
|
||||
metadata TEXT,
|
||||
datetime TEXT,
|
||||
score REAL
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def save(
|
||||
self,
|
||||
task_description: str,
|
||||
metadata: dict[str, Any],
|
||||
datetime: str,
|
||||
score: int | float,
|
||||
) -> None:
|
||||
"""Saves data to the LTM table with error handling."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO long_term_memories (task_description, metadata, datetime, score)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
(task_description, json.dumps(metadata), datetime, score),
|
||||
)
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def load(self, task_description: str, latest_n: int) -> list[dict[str, Any]] | None:
|
||||
"""Queries the LTM table by task description with error handling."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
f"""
|
||||
SELECT metadata, datetime, score
|
||||
FROM long_term_memories
|
||||
WHERE task_description = ?
|
||||
ORDER BY datetime DESC, score ASC
|
||||
LIMIT {latest_n}
|
||||
""", # nosec # noqa: S608
|
||||
(task_description,),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
if rows:
|
||||
return [
|
||||
{
|
||||
"metadata": json.loads(row[0]),
|
||||
"datetime": row[1],
|
||||
"score": row[2],
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
def reset(self) -> None:
|
||||
"""Resets the LTM table with error handling."""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("DELETE FROM long_term_memories")
|
||||
conn.commit()
|
||||
|
||||
except sqlite3.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def asave(
|
||||
self,
|
||||
task_description: str,
|
||||
metadata: dict[str, Any],
|
||||
datetime: str,
|
||||
score: int | float,
|
||||
) -> None:
|
||||
"""Save data to the LTM table asynchronously.
|
||||
|
||||
Args:
|
||||
task_description: Description of the task.
|
||||
metadata: Metadata associated with the memory.
|
||||
datetime: Timestamp of the memory.
|
||||
score: Quality score of the memory.
|
||||
"""
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as conn:
|
||||
await conn.execute(
|
||||
"""
|
||||
INSERT INTO long_term_memories (task_description, metadata, datetime, score)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
(task_description, json.dumps(metadata), datetime, score),
|
||||
)
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while saving to LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
async def aload(
|
||||
self, task_description: str, latest_n: int
|
||||
) -> list[dict[str, Any]] | None:
|
||||
"""Query the LTM table by task description asynchronously.
|
||||
|
||||
Args:
|
||||
task_description: Description of the task to search for.
|
||||
latest_n: Maximum number of results to return.
|
||||
|
||||
Returns:
|
||||
List of matching memory entries or None if error occurs.
|
||||
"""
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as conn:
|
||||
cursor = await conn.execute(
|
||||
f"""
|
||||
SELECT metadata, datetime, score
|
||||
FROM long_term_memories
|
||||
WHERE task_description = ?
|
||||
ORDER BY datetime DESC, score ASC
|
||||
LIMIT {latest_n}
|
||||
""", # nosec # noqa: S608
|
||||
(task_description,),
|
||||
)
|
||||
rows = await cursor.fetchall()
|
||||
if rows:
|
||||
return [
|
||||
{
|
||||
"metadata": json.loads(row[0]),
|
||||
"datetime": row[1],
|
||||
"score": row[2],
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while querying LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
|
||||
async def areset(self) -> None:
|
||||
"""Reset the LTM table asynchronously."""
|
||||
try:
|
||||
async with aiosqlite.connect(self.db_path) as conn:
|
||||
await conn.execute("DELETE FROM long_term_memories")
|
||||
await conn.commit()
|
||||
except aiosqlite.Error as e:
|
||||
if self._verbose:
|
||||
self._printer.print(
|
||||
content=f"MEMORY ERROR: An error occurred while deleting all rows in LTM: {e}",
|
||||
color="red",
|
||||
)
|
||||
@@ -1,230 +0,0 @@
|
||||
from collections import defaultdict
|
||||
from collections.abc import Iterable
|
||||
import os
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from mem0 import Memory, MemoryClient # type: ignore[import-untyped,import-not-found]
|
||||
|
||||
from crewai.memory.storage.interface import Storage
|
||||
from crewai.rag.chromadb.utils import _sanitize_collection_name
|
||||
|
||||
|
||||
MAX_AGENT_ID_LENGTH_MEM0 = 255
|
||||
|
||||
|
||||
class Mem0Storage(Storage):
|
||||
"""
|
||||
Extends Storage to handle embedding and searching across entities using Mem0.
|
||||
"""
|
||||
|
||||
def __init__(self, type, crew=None, config=None):
|
||||
super().__init__()
|
||||
|
||||
self._validate_type(type)
|
||||
self.memory_type = type
|
||||
self.crew = crew
|
||||
self.config = config or {}
|
||||
|
||||
self._extract_config_values()
|
||||
self._initialize_memory()
|
||||
|
||||
def _validate_type(self, type):
|
||||
supported_types = {"short_term", "long_term", "entities", "external"}
|
||||
if type not in supported_types:
|
||||
raise ValueError(
|
||||
f"Invalid type '{type}' for Mem0Storage. "
|
||||
f"Must be one of: {', '.join(supported_types)}"
|
||||
)
|
||||
|
||||
def _extract_config_values(self):
|
||||
self.mem0_run_id = self.config.get("run_id")
|
||||
self.includes = self.config.get("includes")
|
||||
self.excludes = self.config.get("excludes")
|
||||
self.custom_categories = self.config.get("custom_categories")
|
||||
self.infer = self.config.get("infer", True)
|
||||
|
||||
def _initialize_memory(self):
|
||||
api_key = self.config.get("api_key") or os.getenv("MEM0_API_KEY")
|
||||
org_id = self.config.get("org_id")
|
||||
project_id = self.config.get("project_id")
|
||||
local_config = self.config.get("local_mem0_config")
|
||||
|
||||
if api_key:
|
||||
self.memory = (
|
||||
MemoryClient(api_key=api_key, org_id=org_id, project_id=project_id)
|
||||
if org_id and project_id
|
||||
else MemoryClient(api_key=api_key)
|
||||
)
|
||||
if self.custom_categories:
|
||||
self.memory.update_project(custom_categories=self.custom_categories)
|
||||
else:
|
||||
self.memory = (
|
||||
Memory.from_config(local_config)
|
||||
if local_config and len(local_config)
|
||||
else Memory()
|
||||
)
|
||||
|
||||
def _create_filter_for_search(self):
|
||||
"""
|
||||
Returns:
|
||||
dict: A filter dictionary containing AND conditions for querying data.
|
||||
- Includes user_id and agent_id if both are present.
|
||||
- Includes user_id if only user_id is present.
|
||||
- Includes agent_id if only agent_id is present.
|
||||
- Includes run_id if memory_type is 'short_term' and
|
||||
mem0_run_id is present.
|
||||
"""
|
||||
filter = defaultdict(list)
|
||||
|
||||
if self.memory_type == "short_term" and self.mem0_run_id:
|
||||
filter["AND"].append({"run_id": self.mem0_run_id})
|
||||
else:
|
||||
user_id = self.config.get("user_id", "")
|
||||
agent_id = self.config.get("agent_id", "")
|
||||
|
||||
if user_id and agent_id:
|
||||
filter["OR"].append({"user_id": user_id})
|
||||
filter["OR"].append({"agent_id": agent_id})
|
||||
elif user_id:
|
||||
filter["AND"].append({"user_id": user_id})
|
||||
elif agent_id:
|
||||
filter["AND"].append({"agent_id": agent_id})
|
||||
|
||||
return filter
|
||||
|
||||
def save(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
def _last_content(messages: Iterable[dict[str, Any]], role: str) -> str:
|
||||
return next(
|
||||
(
|
||||
m.get("content", "")
|
||||
for m in reversed(list(messages))
|
||||
if m.get("role") == role
|
||||
),
|
||||
"",
|
||||
)
|
||||
|
||||
conversations = []
|
||||
messages = metadata.pop("messages", None)
|
||||
if messages:
|
||||
last_user = _last_content(messages, "user")
|
||||
last_assistant = _last_content(messages, "assistant")
|
||||
|
||||
if user_msg := self._get_user_message(last_user):
|
||||
conversations.append({"role": "user", "content": user_msg})
|
||||
|
||||
if assistant_msg := self._get_assistant_message(last_assistant):
|
||||
conversations.append({"role": "assistant", "content": assistant_msg})
|
||||
else:
|
||||
conversations.append({"role": "assistant", "content": value})
|
||||
|
||||
user_id = self.config.get("user_id", "")
|
||||
|
||||
base_metadata = {
|
||||
"short_term": "short_term",
|
||||
"long_term": "long_term",
|
||||
"entities": "entity",
|
||||
"external": "external",
|
||||
}
|
||||
|
||||
# Shared base params
|
||||
params: dict[str, Any] = {
|
||||
"metadata": {"type": base_metadata[self.memory_type], **metadata},
|
||||
"infer": self.infer,
|
||||
}
|
||||
|
||||
# MemoryClient-specific overrides
|
||||
if isinstance(self.memory, MemoryClient):
|
||||
params["includes"] = self.includes
|
||||
params["excludes"] = self.excludes
|
||||
params["output_format"] = "v1.1"
|
||||
params["version"] = "v2"
|
||||
|
||||
if self.memory_type == "short_term" and self.mem0_run_id:
|
||||
params["run_id"] = self.mem0_run_id
|
||||
|
||||
if user_id:
|
||||
params["user_id"] = user_id
|
||||
|
||||
if agent_id := self.config.get("agent_id", self._get_agent_name()):
|
||||
params["agent_id"] = agent_id
|
||||
|
||||
self.memory.add(conversations, **params)
|
||||
|
||||
def search(
|
||||
self, query: str, limit: int = 5, score_threshold: float = 0.6
|
||||
) -> list[Any]:
|
||||
params = {
|
||||
"query": query,
|
||||
"limit": limit,
|
||||
"version": "v2",
|
||||
"output_format": "v1.1",
|
||||
}
|
||||
|
||||
if user_id := self.config.get("user_id", ""):
|
||||
params["user_id"] = user_id
|
||||
|
||||
memory_type_map = {
|
||||
"short_term": {"type": "short_term"},
|
||||
"long_term": {"type": "long_term"},
|
||||
"entities": {"type": "entity"},
|
||||
"external": {"type": "external"},
|
||||
}
|
||||
|
||||
if self.memory_type in memory_type_map:
|
||||
params["metadata"] = memory_type_map[self.memory_type]
|
||||
if self.memory_type == "short_term":
|
||||
params["run_id"] = self.mem0_run_id
|
||||
|
||||
# Discard the filters for now since we create the filters
|
||||
# automatically when the crew is created.
|
||||
|
||||
params["filters"] = self._create_filter_for_search()
|
||||
params["threshold"] = score_threshold
|
||||
|
||||
if isinstance(self.memory, Memory):
|
||||
del params["metadata"], params["version"], params["output_format"]
|
||||
if params.get("run_id"):
|
||||
del params["run_id"]
|
||||
|
||||
results = self.memory.search(**params)
|
||||
|
||||
# This makes it compatible for Contextual Memory to retrieve
|
||||
for result in results["results"]:
|
||||
result["content"] = result["memory"]
|
||||
|
||||
return [r for r in results["results"]]
|
||||
|
||||
def reset(self):
|
||||
if self.memory:
|
||||
self.memory.reset()
|
||||
|
||||
def _sanitize_role(self, role: str) -> str:
|
||||
"""
|
||||
Sanitizes agent roles to ensure valid directory names.
|
||||
"""
|
||||
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||
|
||||
def _get_agent_name(self) -> str:
|
||||
if not self.crew:
|
||||
return ""
|
||||
|
||||
agents = self.crew.agents
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
return _sanitize_collection_name(
|
||||
name=agents, max_collection_length=MAX_AGENT_ID_LENGTH_MEM0
|
||||
)
|
||||
|
||||
def _get_assistant_message(self, text: str) -> str:
|
||||
marker = "Final Answer:"
|
||||
if marker in text:
|
||||
return text.split(marker, 1)[1].strip()
|
||||
return text
|
||||
|
||||
def _get_user_message(self, text: str) -> str:
|
||||
pattern = r"User message:\s*(.*)"
|
||||
match = re.search(pattern, text)
|
||||
if match:
|
||||
return match.group(1).strip()
|
||||
return text
|
||||
@@ -1,315 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import traceback
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
import warnings
|
||||
|
||||
from crewai.rag.chromadb.config import ChromaDBConfig
|
||||
from crewai.rag.chromadb.types import ChromaEmbeddingFunctionWrapper
|
||||
from crewai.rag.config.utils import get_rag_client
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
from crewai.rag.factory import create_client
|
||||
from crewai.rag.storage.base_rag_storage import BaseRAGStorage
|
||||
from crewai.utilities.constants import MAX_FILE_NAME_LENGTH
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.crew import Crew
|
||||
from crewai.rag.core.base_client import BaseClient
|
||||
from crewai.rag.core.base_embeddings_provider import BaseEmbeddingsProvider
|
||||
from crewai.rag.embeddings.types import ProviderSpec
|
||||
from crewai.rag.types import BaseRecord
|
||||
|
||||
|
||||
class RAGStorage(BaseRAGStorage):
|
||||
"""
|
||||
Extends Storage to handle embeddings for memory entries, improving
|
||||
search efficiency.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
type: str,
|
||||
allow_reset: bool = True,
|
||||
embedder_config: ProviderSpec | BaseEmbeddingsProvider[Any] | None = None,
|
||||
crew: Crew | None = None,
|
||||
path: str | None = None,
|
||||
) -> None:
|
||||
super().__init__(type, allow_reset, embedder_config, crew)
|
||||
crew_agents = crew.agents if crew else []
|
||||
sanitized_roles = [self._sanitize_role(agent.role) for agent in crew_agents]
|
||||
agents_str = "_".join(sanitized_roles)
|
||||
self.agents = agents_str
|
||||
self.storage_file_name = self._build_storage_file_name(type, agents_str)
|
||||
|
||||
self.type = type
|
||||
self._client: BaseClient | None = None
|
||||
|
||||
self.allow_reset = allow_reset
|
||||
self.path = path
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
message=r".*'model_fields'.*is deprecated.*",
|
||||
module=r"^chromadb(\.|$)",
|
||||
)
|
||||
|
||||
if self.embedder_config:
|
||||
embedding_function = build_embedder(self.embedder_config)
|
||||
|
||||
try:
|
||||
_ = embedding_function(["test"])
|
||||
except Exception as e:
|
||||
provider = (
|
||||
self.embedder_config["provider"]
|
||||
if isinstance(self.embedder_config, dict)
|
||||
else self.embedder_config.__class__.__name__.replace(
|
||||
"Provider", ""
|
||||
).lower()
|
||||
)
|
||||
raise ValueError(
|
||||
f"Failed to initialize embedder. Please check your configuration or connection.\n"
|
||||
f"Provider: {provider}\n"
|
||||
f"Error: {e}"
|
||||
) from e
|
||||
|
||||
batch_size = None
|
||||
if (
|
||||
isinstance(self.embedder_config, dict)
|
||||
and "config" in self.embedder_config
|
||||
):
|
||||
nested_config = self.embedder_config["config"]
|
||||
if isinstance(nested_config, dict):
|
||||
batch_size = nested_config.get("batch_size")
|
||||
|
||||
if batch_size is not None:
|
||||
config = ChromaDBConfig(
|
||||
embedding_function=cast(
|
||||
ChromaEmbeddingFunctionWrapper, embedding_function
|
||||
),
|
||||
batch_size=cast(int, batch_size),
|
||||
)
|
||||
else:
|
||||
config = ChromaDBConfig(
|
||||
embedding_function=cast(
|
||||
ChromaEmbeddingFunctionWrapper, embedding_function
|
||||
)
|
||||
)
|
||||
|
||||
if self.path:
|
||||
config.settings.persist_directory = self.path
|
||||
|
||||
self._client = create_client(config)
|
||||
|
||||
def _get_client(self) -> BaseClient:
|
||||
"""Get the appropriate client - instance-specific or global."""
|
||||
return self._client if self._client else get_rag_client()
|
||||
|
||||
def _sanitize_role(self, role: str) -> str:
|
||||
"""
|
||||
Sanitizes agent roles to ensure valid directory names.
|
||||
"""
|
||||
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||
|
||||
@staticmethod
|
||||
def _build_storage_file_name(type: str, file_name: str) -> str:
|
||||
"""
|
||||
Ensures file name does not exceed max allowed by OS
|
||||
"""
|
||||
base_path = f"{db_storage_path()}/{type}"
|
||||
|
||||
if len(file_name) > MAX_FILE_NAME_LENGTH:
|
||||
logging.warning(
|
||||
f"Trimming file name from {len(file_name)} to {MAX_FILE_NAME_LENGTH} characters."
|
||||
)
|
||||
file_name = file_name[:MAX_FILE_NAME_LENGTH]
|
||||
|
||||
return f"{base_path}/{file_name}"
|
||||
|
||||
def save(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
"""Save a value to storage.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Metadata to associate with the value.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
client.get_or_create_collection(collection_name=collection_name)
|
||||
|
||||
document: BaseRecord = {"content": value}
|
||||
if metadata:
|
||||
document["metadata"] = metadata
|
||||
|
||||
batch_size = None
|
||||
if (
|
||||
self.embedder_config
|
||||
and isinstance(self.embedder_config, dict)
|
||||
and "config" in self.embedder_config
|
||||
):
|
||||
nested_config = self.embedder_config["config"]
|
||||
if isinstance(nested_config, dict):
|
||||
batch_size = nested_config.get("batch_size")
|
||||
|
||||
if batch_size is not None:
|
||||
client.add_documents(
|
||||
collection_name=collection_name,
|
||||
documents=[document],
|
||||
batch_size=cast(int, batch_size),
|
||||
)
|
||||
else:
|
||||
client.add_documents(
|
||||
collection_name=collection_name, documents=[document]
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during {self.type} save: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
|
||||
async def asave(self, value: Any, metadata: dict[str, Any]) -> None:
|
||||
"""Save a value to storage asynchronously.
|
||||
|
||||
Args:
|
||||
value: The value to save.
|
||||
metadata: Metadata to associate with the value.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
await client.aget_or_create_collection(collection_name=collection_name)
|
||||
|
||||
document: BaseRecord = {"content": value}
|
||||
if metadata:
|
||||
document["metadata"] = metadata
|
||||
|
||||
batch_size = None
|
||||
if (
|
||||
self.embedder_config
|
||||
and isinstance(self.embedder_config, dict)
|
||||
and "config" in self.embedder_config
|
||||
):
|
||||
nested_config = self.embedder_config["config"]
|
||||
if isinstance(nested_config, dict):
|
||||
batch_size = nested_config.get("batch_size")
|
||||
|
||||
if batch_size is not None:
|
||||
await client.aadd_documents(
|
||||
collection_name=collection_name,
|
||||
documents=[document],
|
||||
batch_size=cast(int, batch_size),
|
||||
)
|
||||
else:
|
||||
await client.aadd_documents(
|
||||
collection_name=collection_name, documents=[document]
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during {self.type} async save: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
filter: dict[str, Any] | None = None,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search for matching entries in storage.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
filter: Optional metadata filter.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching entries.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
return client.search(
|
||||
collection_name=collection_name,
|
||||
query=query,
|
||||
limit=limit,
|
||||
metadata_filter=filter,
|
||||
score_threshold=score_threshold,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during {self.type} search: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
return []
|
||||
|
||||
async def asearch(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 5,
|
||||
filter: dict[str, Any] | None = None,
|
||||
score_threshold: float = 0.6,
|
||||
) -> list[Any]:
|
||||
"""Search for matching entries in storage asynchronously.
|
||||
|
||||
Args:
|
||||
query: The search query.
|
||||
limit: Maximum number of results to return.
|
||||
filter: Optional metadata filter.
|
||||
score_threshold: Minimum similarity score for results.
|
||||
|
||||
Returns:
|
||||
List of matching entries.
|
||||
"""
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
return await client.asearch(
|
||||
collection_name=collection_name,
|
||||
query=query,
|
||||
limit=limit,
|
||||
metadata_filter=filter,
|
||||
score_threshold=score_threshold,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error during {self.type} async search: {e!s}\n{traceback.format_exc()}"
|
||||
)
|
||||
return []
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
client = self._get_client()
|
||||
collection_name = (
|
||||
f"memory_{self.type}_{self.agents}"
|
||||
if self.agents
|
||||
else f"memory_{self.type}"
|
||||
)
|
||||
client.delete_collection(collection_name=collection_name)
|
||||
except Exception as e:
|
||||
if "attempt to write a readonly database" in str(
|
||||
e
|
||||
) or "does not exist" in str(e):
|
||||
# Ignore readonly database and collection not found errors (already reset)
|
||||
pass
|
||||
else:
|
||||
raise Exception(
|
||||
f"An error occurred while resetting the {self.type} memory: {e}"
|
||||
) from e
|
||||
369
lib/crewai/src/crewai/memory/types.py
Normal file
369
lib/crewai/src/crewai/memory/types.py
Normal file
@@ -0,0 +1,369 @@
|
||||
"""Data types for the unified memory system."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
# When searching the vector store, we ask for more results than the caller
|
||||
# requested so that post-search steps (composite scoring, deduplication,
|
||||
# category filtering) have enough candidates to fill the final result set.
|
||||
# For example, if the caller asks for 10 results and this is 2, we fetch 20
|
||||
# from the vector store and then trim down after scoring.
|
||||
_RECALL_OVERSAMPLE_FACTOR = 2
|
||||
|
||||
|
||||
class MemoryRecord(BaseModel):
|
||||
"""A single memory entry stored in the memory system."""
|
||||
|
||||
id: str = Field(
|
||||
default_factory=lambda: str(uuid4()),
|
||||
description="Unique identifier for the memory record.",
|
||||
)
|
||||
content: str = Field(description="The textual content of the memory.")
|
||||
scope: str = Field(
|
||||
default="/",
|
||||
description="Hierarchical path organizing the memory (e.g. /company/team/user).",
|
||||
)
|
||||
categories: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Categories or tags for the memory.",
|
||||
)
|
||||
metadata: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Arbitrary metadata associated with the memory.",
|
||||
)
|
||||
importance: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Importance score from 0.0 to 1.0, affects retrieval ranking.",
|
||||
)
|
||||
created_at: datetime = Field(
|
||||
default_factory=datetime.utcnow,
|
||||
description="When the memory was created.",
|
||||
)
|
||||
last_accessed: datetime = Field(
|
||||
default_factory=datetime.utcnow,
|
||||
description="When the memory was last accessed.",
|
||||
)
|
||||
embedding: list[float] | None = Field(
|
||||
default=None,
|
||||
description="Vector embedding for semantic search. Computed on save if not provided.",
|
||||
)
|
||||
source: str | None = Field(
|
||||
default=None,
|
||||
description=(
|
||||
"Origin of this memory (e.g. user ID, session ID). "
|
||||
"Used for provenance tracking and privacy filtering."
|
||||
),
|
||||
)
|
||||
private: bool = Field(
|
||||
default=False,
|
||||
description=(
|
||||
"If True, this memory is only visible to recall requests from the same source, "
|
||||
"or when include_private=True is passed."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class MemoryMatch(BaseModel):
|
||||
"""A memory record with relevance score from a recall operation."""
|
||||
|
||||
record: MemoryRecord = Field(description="The matched memory record.")
|
||||
score: float = Field(
|
||||
description="Combined relevance score (semantic, recency, importance).",
|
||||
)
|
||||
match_reasons: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Reasons for the match (e.g. semantic, recency, importance).",
|
||||
)
|
||||
evidence_gaps: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Information the system looked for but could not find.",
|
||||
)
|
||||
|
||||
|
||||
class ScopeInfo(BaseModel):
|
||||
"""Information about a scope in the memory hierarchy."""
|
||||
|
||||
path: str = Field(description="The scope path (e.g. /company/engineering).")
|
||||
record_count: int = Field(
|
||||
default=0,
|
||||
description="Number of records in this scope (including subscopes if applicable).",
|
||||
)
|
||||
categories: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Categories used in this scope.",
|
||||
)
|
||||
oldest_record: datetime | None = Field(
|
||||
default=None,
|
||||
description="Timestamp of the oldest record in this scope.",
|
||||
)
|
||||
newest_record: datetime | None = Field(
|
||||
default=None,
|
||||
description="Timestamp of the newest record in this scope.",
|
||||
)
|
||||
child_scopes: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Immediate child scope paths.",
|
||||
)
|
||||
|
||||
|
||||
class MemoryConfig(BaseModel):
|
||||
"""Internal configuration for memory scoring, consolidation, and recall behavior.
|
||||
|
||||
Users configure these values via ``Memory(...)`` keyword arguments.
|
||||
This model is not part of the public API -- it exists so that the config
|
||||
can be passed as a single object to RecallFlow, EncodingFlow, and
|
||||
compute_composite_score.
|
||||
"""
|
||||
|
||||
# -- Composite score weights --
|
||||
# The recall composite score is:
|
||||
# semantic_weight * similarity + recency_weight * decay + importance_weight * importance
|
||||
# These should sum to ~1.0 for intuitive 0-1 scoring.
|
||||
|
||||
recency_weight: float = Field(
|
||||
default=0.3,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Weight for recency in the composite relevance score. "
|
||||
"Higher values favor recently created memories over older ones."
|
||||
),
|
||||
)
|
||||
semantic_weight: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Weight for semantic similarity in the composite relevance score. "
|
||||
"Higher values make recall rely more on vector-search closeness."
|
||||
),
|
||||
)
|
||||
importance_weight: float = Field(
|
||||
default=0.2,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Weight for explicit importance in the composite relevance score. "
|
||||
"Higher values make high-importance memories surface more often."
|
||||
),
|
||||
)
|
||||
recency_half_life_days: int = Field(
|
||||
default=30,
|
||||
ge=1,
|
||||
description=(
|
||||
"Number of days for the recency score to halve (exponential decay). "
|
||||
"Lower values make memories lose relevance faster; higher values "
|
||||
"keep old memories relevant longer."
|
||||
),
|
||||
)
|
||||
|
||||
# -- Consolidation (on save) --
|
||||
|
||||
consolidation_threshold: float = Field(
|
||||
default=0.85,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Semantic similarity above which the consolidation flow is triggered "
|
||||
"when saving new content. The LLM then decides whether to merge, "
|
||||
"update, or delete overlapping records. Set to 1.0 to disable."
|
||||
),
|
||||
)
|
||||
consolidation_limit: int = Field(
|
||||
default=5,
|
||||
ge=1,
|
||||
description=(
|
||||
"Maximum number of existing records to compare against when checking "
|
||||
"for consolidation during a save."
|
||||
),
|
||||
)
|
||||
batch_dedup_threshold: float = Field(
|
||||
default=0.98,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Cosine similarity threshold for dropping near-exact duplicates "
|
||||
"within a single remember_many() batch. Only items with similarity "
|
||||
">= this value are dropped. Set very high (0.98) to avoid "
|
||||
"discarding useful memories that are merely similar."
|
||||
),
|
||||
)
|
||||
|
||||
# -- Save defaults --
|
||||
|
||||
default_importance: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"Importance assigned to new memories when no explicit value is given "
|
||||
"and the LLM analysis path is skipped (i.e. all fields provided by "
|
||||
"the caller)."
|
||||
),
|
||||
)
|
||||
|
||||
# -- Recall depth control --
|
||||
# The RecallFlow router uses these thresholds to decide between returning
|
||||
# results immediately ("synthesize") and doing an extra LLM-driven
|
||||
# exploration round ("explore_deeper").
|
||||
|
||||
confidence_threshold_high: float = Field(
|
||||
default=0.8,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"When recall confidence is at or above this value, results are "
|
||||
"returned directly without deeper exploration."
|
||||
),
|
||||
)
|
||||
confidence_threshold_low: float = Field(
|
||||
default=0.5,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"When recall confidence is below this value and exploration budget "
|
||||
"remains, a deeper LLM-driven exploration round is triggered."
|
||||
),
|
||||
)
|
||||
complex_query_threshold: float = Field(
|
||||
default=0.7,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description=(
|
||||
"For queries classified as 'complex' by the LLM, deeper exploration "
|
||||
"is triggered when confidence is below this value."
|
||||
),
|
||||
)
|
||||
exploration_budget: int = Field(
|
||||
default=1,
|
||||
ge=0,
|
||||
description=(
|
||||
"Number of LLM-driven exploration rounds allowed during deep recall. "
|
||||
"0 means recall always uses direct vector search only; higher values "
|
||||
"allow more thorough but slower retrieval."
|
||||
),
|
||||
)
|
||||
recall_oversample_factor: int = Field(
|
||||
default=_RECALL_OVERSAMPLE_FACTOR,
|
||||
ge=1,
|
||||
description=(
|
||||
"When searching the vector store, fetch this many times more results "
|
||||
"than the caller requested so that post-search steps (composite "
|
||||
"scoring, deduplication, category filtering) have enough candidates "
|
||||
"to fill the final result set."
|
||||
),
|
||||
)
|
||||
query_analysis_threshold: int = Field(
|
||||
default=250,
|
||||
ge=0,
|
||||
description=(
|
||||
"Character count threshold for LLM query analysis during deep recall. "
|
||||
"Queries shorter than this are embedded directly without an LLM call "
|
||||
"to distill sub-queries or infer scopes (saving ~1-3s). Longer queries "
|
||||
"(e.g. full task descriptions) benefit from LLM distillation. "
|
||||
"Set to 0 to always use LLM analysis."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def embed_text(embedder: Any, text: str) -> list[float]:
|
||||
"""Embed a single text string and return a list of floats.
|
||||
|
||||
Args:
|
||||
embedder: Callable that accepts a list of strings and returns embeddings.
|
||||
text: The text to embed.
|
||||
|
||||
Returns:
|
||||
List of floats representing the embedding, or empty list on failure.
|
||||
"""
|
||||
if not text or not text.strip():
|
||||
return []
|
||||
result = embedder([text])
|
||||
if not result:
|
||||
return []
|
||||
first = result[0]
|
||||
if hasattr(first, "tolist"):
|
||||
return first.tolist()
|
||||
if isinstance(first, list):
|
||||
return [float(x) for x in first]
|
||||
return list(first)
|
||||
|
||||
|
||||
def embed_texts(embedder: Any, texts: list[str]) -> list[list[float]]:
|
||||
"""Embed multiple texts in a single API call.
|
||||
|
||||
The embedder already accepts ``list[str]``, so this just calls it once
|
||||
with the full batch and normalises the output format.
|
||||
|
||||
Args:
|
||||
embedder: Callable that accepts a list of strings and returns embeddings.
|
||||
texts: List of texts to embed.
|
||||
|
||||
Returns:
|
||||
List of embeddings, one per input text. Empty texts produce empty lists.
|
||||
"""
|
||||
if not texts:
|
||||
return []
|
||||
# Filter out empty texts, remembering their positions
|
||||
valid: list[tuple[int, str]] = [
|
||||
(i, t) for i, t in enumerate(texts) if t and t.strip()
|
||||
]
|
||||
if not valid:
|
||||
return [[] for _ in texts]
|
||||
|
||||
result = embedder([t for _, t in valid])
|
||||
embeddings: list[list[float]] = [[] for _ in texts]
|
||||
for (orig_idx, _), emb in zip(valid, result, strict=False):
|
||||
if hasattr(emb, "tolist"):
|
||||
embeddings[orig_idx] = emb.tolist()
|
||||
elif isinstance(emb, list):
|
||||
embeddings[orig_idx] = [float(x) for x in emb]
|
||||
else:
|
||||
embeddings[orig_idx] = list(emb)
|
||||
return embeddings
|
||||
|
||||
|
||||
def compute_composite_score(
|
||||
record: MemoryRecord,
|
||||
semantic_score: float,
|
||||
config: MemoryConfig,
|
||||
) -> tuple[float, list[str]]:
|
||||
"""Compute a weighted composite relevance score from semantic, recency, and importance.
|
||||
|
||||
composite = w_semantic * semantic + w_recency * decay + w_importance * importance
|
||||
where decay = 0.5^(age_days / half_life_days).
|
||||
|
||||
Args:
|
||||
record: The memory record (provides created_at and importance).
|
||||
semantic_score: Raw semantic similarity from vector search, in [0, 1].
|
||||
config: Weights and recency half-life.
|
||||
|
||||
Returns:
|
||||
Tuple of (composite_score, match_reasons). match_reasons includes
|
||||
"semantic" always; "recency" if decay > 0.5; "importance" if record.importance > 0.5.
|
||||
"""
|
||||
age_seconds = (datetime.utcnow() - record.created_at).total_seconds()
|
||||
age_days = max(age_seconds / 86400.0, 0.0)
|
||||
decay = 0.5 ** (age_days / config.recency_half_life_days)
|
||||
|
||||
composite = (
|
||||
config.semantic_weight * semantic_score
|
||||
+ config.recency_weight * decay
|
||||
+ config.importance_weight * record.importance
|
||||
)
|
||||
|
||||
reasons: list[str] = ["semantic"]
|
||||
if decay > 0.5:
|
||||
reasons.append("recency")
|
||||
if record.importance > 0.5:
|
||||
reasons.append("importance")
|
||||
|
||||
return composite, reasons
|
||||
838
lib/crewai/src/crewai/memory/unified_memory.py
Normal file
838
lib/crewai/src/crewai/memory/unified_memory.py
Normal file
@@ -0,0 +1,838 @@
|
||||
"""Unified Memory class: single intelligent memory with LLM analysis and pluggable storage."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from concurrent.futures import Future, ThreadPoolExecutor
|
||||
from datetime import datetime
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Literal
|
||||
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryQueryCompletedEvent,
|
||||
MemoryQueryFailedEvent,
|
||||
MemoryQueryStartedEvent,
|
||||
MemorySaveCompletedEvent,
|
||||
MemorySaveFailedEvent,
|
||||
MemorySaveStartedEvent,
|
||||
)
|
||||
from crewai.llms.base_llm import BaseLLM
|
||||
from crewai.memory.analyze import extract_memories_from_content
|
||||
from crewai.memory.recall_flow import RecallFlow
|
||||
from crewai.memory.storage.backend import StorageBackend
|
||||
from crewai.memory.storage.lancedb_storage import LanceDBStorage
|
||||
from crewai.memory.types import (
|
||||
MemoryConfig,
|
||||
MemoryMatch,
|
||||
MemoryRecord,
|
||||
ScopeInfo,
|
||||
compute_composite_score,
|
||||
embed_text,
|
||||
)
|
||||
|
||||
|
||||
def _default_embedder() -> Any:
|
||||
"""Build default OpenAI embedder for memory."""
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
|
||||
return build_embedder({"provider": "openai", "config": {}})
|
||||
|
||||
|
||||
class Memory:
|
||||
"""Unified memory: standalone, LLM-analyzed, with intelligent recall flow.
|
||||
|
||||
Works without agent/crew. Uses LLM to infer scope, categories, importance on save.
|
||||
Uses RecallFlow for adaptive-depth recall. Supports scope/slice views and
|
||||
pluggable storage (LanceDB default).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm: BaseLLM | str = "gpt-4o-mini",
|
||||
storage: StorageBackend | str = "lancedb",
|
||||
embedder: Any = None,
|
||||
# -- Scoring weights --
|
||||
# These three weights control how recall results are ranked.
|
||||
# The composite score is: semantic_weight * similarity + recency_weight * decay + importance_weight * importance.
|
||||
# They should sum to ~1.0 for intuitive scoring.
|
||||
recency_weight: float = 0.3,
|
||||
semantic_weight: float = 0.5,
|
||||
importance_weight: float = 0.2,
|
||||
# How quickly old memories lose relevance. The recency score halves every
|
||||
# N days (exponential decay). Lower = faster forgetting; higher = longer relevance.
|
||||
recency_half_life_days: int = 30,
|
||||
# -- Consolidation --
|
||||
# When remembering new content, if an existing record has similarity >= this
|
||||
# threshold, the LLM is asked to merge/update/delete. Set to 1.0 to disable.
|
||||
consolidation_threshold: float = 0.85,
|
||||
# Max existing records to compare against when checking for consolidation.
|
||||
consolidation_limit: int = 5,
|
||||
# -- Save defaults --
|
||||
# Importance assigned to new memories when no explicit value is given and
|
||||
# the LLM analysis path is skipped (all fields provided by the caller).
|
||||
default_importance: float = 0.5,
|
||||
# -- Recall depth control --
|
||||
# These thresholds govern the RecallFlow router that decides between
|
||||
# returning results immediately ("synthesize") vs. doing an extra
|
||||
# LLM-driven exploration round ("explore_deeper").
|
||||
# confidence >= confidence_threshold_high => always synthesize
|
||||
# confidence < confidence_threshold_low => explore deeper (if budget > 0)
|
||||
# complex query + confidence < complex_query_threshold => explore deeper
|
||||
confidence_threshold_high: float = 0.8,
|
||||
confidence_threshold_low: float = 0.5,
|
||||
complex_query_threshold: float = 0.7,
|
||||
# How many LLM-driven exploration rounds the RecallFlow is allowed to run.
|
||||
# 0 = always shallow (vector search only); higher = more thorough but slower.
|
||||
exploration_budget: int = 1,
|
||||
# Queries shorter than this skip LLM analysis (saving ~1-3s).
|
||||
# Longer queries (full task descriptions) benefit from LLM distillation.
|
||||
query_analysis_threshold: int = 200,
|
||||
) -> None:
|
||||
"""Initialize Memory.
|
||||
|
||||
Args:
|
||||
llm: LLM for analysis (model name or BaseLLM instance).
|
||||
storage: Backend: "lancedb" or a StorageBackend instance.
|
||||
embedder: Embedding callable, provider config dict, or None (default OpenAI).
|
||||
recency_weight: Weight for recency in the composite relevance score.
|
||||
semantic_weight: Weight for semantic similarity in the composite relevance score.
|
||||
importance_weight: Weight for importance in the composite relevance score.
|
||||
recency_half_life_days: Recency score halves every N days (exponential decay).
|
||||
consolidation_threshold: Similarity above which consolidation is triggered on save.
|
||||
consolidation_limit: Max existing records to compare during consolidation.
|
||||
default_importance: Default importance when not provided or inferred.
|
||||
confidence_threshold_high: Recall confidence above which results are returned directly.
|
||||
confidence_threshold_low: Recall confidence below which deeper exploration is triggered.
|
||||
complex_query_threshold: For complex queries, explore deeper below this confidence.
|
||||
exploration_budget: Number of LLM-driven exploration rounds during deep recall.
|
||||
query_analysis_threshold: Queries shorter than this skip LLM analysis during deep recall.
|
||||
"""
|
||||
self._config = MemoryConfig(
|
||||
recency_weight=recency_weight,
|
||||
semantic_weight=semantic_weight,
|
||||
importance_weight=importance_weight,
|
||||
recency_half_life_days=recency_half_life_days,
|
||||
consolidation_threshold=consolidation_threshold,
|
||||
consolidation_limit=consolidation_limit,
|
||||
default_importance=default_importance,
|
||||
confidence_threshold_high=confidence_threshold_high,
|
||||
confidence_threshold_low=confidence_threshold_low,
|
||||
complex_query_threshold=complex_query_threshold,
|
||||
exploration_budget=exploration_budget,
|
||||
query_analysis_threshold=query_analysis_threshold,
|
||||
)
|
||||
|
||||
# Store raw config for lazy initialization. LLM and embedder are only
|
||||
# built on first access so that Memory() never fails at construction
|
||||
# time (e.g. when auto-created by Flow without an API key set).
|
||||
self._llm_config: BaseLLM | str = llm
|
||||
self._llm_instance: BaseLLM | None = None if isinstance(llm, str) else llm
|
||||
self._embedder_config: Any = embedder
|
||||
self._embedder_instance: Any = (
|
||||
embedder if (embedder is not None and not isinstance(embedder, dict)) else None
|
||||
)
|
||||
|
||||
# Storage is initialized eagerly (local, no API key needed).
|
||||
if storage == "lancedb":
|
||||
self._storage = LanceDBStorage()
|
||||
elif isinstance(storage, str):
|
||||
self._storage = LanceDBStorage(path=storage)
|
||||
else:
|
||||
self._storage = storage
|
||||
|
||||
# Background save queue. max_workers=1 serializes saves to avoid
|
||||
# concurrent storage mutations (two saves finding the same similar
|
||||
# record and both trying to update/delete it). Within each save,
|
||||
# the parallel LLM calls still run on their own thread pool.
|
||||
self._save_pool = ThreadPoolExecutor(
|
||||
max_workers=1, thread_name_prefix="memory-save"
|
||||
)
|
||||
self._pending_saves: list[Future[Any]] = []
|
||||
self._pending_lock = threading.Lock()
|
||||
|
||||
_MEMORY_DOCS_URL = "https://docs.crewai.com/concepts/memory"
|
||||
|
||||
@property
|
||||
def _llm(self) -> BaseLLM:
|
||||
"""Lazy LLM initialization -- only created when first needed."""
|
||||
if self._llm_instance is None:
|
||||
from crewai.llm import LLM
|
||||
|
||||
try:
|
||||
self._llm_instance = LLM(model=self._llm_config)
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"Memory requires an LLM for analysis but initialization failed: {e}\n\n"
|
||||
"To fix this, do one of the following:\n"
|
||||
' - Set OPENAI_API_KEY for the default model (gpt-4o-mini)\n'
|
||||
' - Pass a different model: Memory(llm="anthropic/claude-3-haiku-20240307")\n'
|
||||
' - Pass any LLM instance: Memory(llm=LLM(model="your-model"))\n'
|
||||
" - To skip LLM analysis, pass all fields explicitly to remember()\n"
|
||||
' and use depth="shallow" for recall.\n\n'
|
||||
f"Docs: {self._MEMORY_DOCS_URL}"
|
||||
) from e
|
||||
return self._llm_instance
|
||||
|
||||
@property
|
||||
def _embedder(self) -> Any:
|
||||
"""Lazy embedder initialization -- only created when first needed."""
|
||||
if self._embedder_instance is None:
|
||||
try:
|
||||
if isinstance(self._embedder_config, dict):
|
||||
from crewai.rag.embeddings.factory import build_embedder
|
||||
|
||||
self._embedder_instance = build_embedder(self._embedder_config)
|
||||
else:
|
||||
self._embedder_instance = _default_embedder()
|
||||
except Exception as e:
|
||||
raise RuntimeError(
|
||||
f"Memory requires an embedder for vector search but initialization failed: {e}\n\n"
|
||||
"To fix this, do one of the following:\n"
|
||||
" - Set OPENAI_API_KEY for the default embedder (text-embedding-3-small)\n"
|
||||
' - Pass a different embedder: Memory(embedder={{"provider": "google", "config": {{...}}}})\n'
|
||||
" - Pass a callable: Memory(embedder=my_embedding_function)\n\n"
|
||||
f"Docs: {self._MEMORY_DOCS_URL}"
|
||||
) from e
|
||||
return self._embedder_instance
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Background write queue
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def _submit_save(self, fn: Any, *args: Any, **kwargs: Any) -> Future[Any]:
|
||||
"""Submit a save operation to the background thread pool.
|
||||
|
||||
The future is tracked so that ``drain_writes()`` can wait for it.
|
||||
If the pool has been shut down (e.g. after ``close()``), the save
|
||||
runs synchronously as a fallback so late saves still succeed.
|
||||
"""
|
||||
try:
|
||||
future: Future[Any] = self._save_pool.submit(fn, *args, **kwargs)
|
||||
except RuntimeError:
|
||||
# Pool shut down -- run synchronously as fallback
|
||||
future = Future()
|
||||
try:
|
||||
result = fn(*args, **kwargs)
|
||||
future.set_result(result)
|
||||
except Exception as exc:
|
||||
future.set_exception(exc)
|
||||
return future
|
||||
with self._pending_lock:
|
||||
self._pending_saves.append(future)
|
||||
future.add_done_callback(self._on_save_done)
|
||||
return future
|
||||
|
||||
def _on_save_done(self, future: Future[Any]) -> None:
|
||||
"""Remove a completed future from the pending list and emit failure event if needed.
|
||||
|
||||
This callback must never raise -- it runs from the thread pool's
|
||||
internal machinery during process shutdown when executors and the
|
||||
event bus may already be closed.
|
||||
"""
|
||||
try:
|
||||
with self._pending_lock:
|
||||
try:
|
||||
self._pending_saves.remove(future)
|
||||
except ValueError:
|
||||
pass # already removed
|
||||
exc = future.exception()
|
||||
if exc is not None:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemorySaveFailedEvent(
|
||||
value="background save",
|
||||
error=str(exc),
|
||||
source_type="unified_memory",
|
||||
),
|
||||
)
|
||||
except Exception: # noqa: S110
|
||||
pass # swallow everything during shutdown
|
||||
|
||||
def drain_writes(self) -> None:
|
||||
"""Block until all pending background saves have completed.
|
||||
|
||||
Called automatically by ``recall()`` and should be called by the
|
||||
crew at shutdown to ensure no saves are lost.
|
||||
"""
|
||||
with self._pending_lock:
|
||||
pending = list(self._pending_saves)
|
||||
for future in pending:
|
||||
future.result() # blocks until done; re-raises exceptions
|
||||
|
||||
def close(self) -> None:
|
||||
"""Drain pending saves and shut down the background thread pool."""
|
||||
self.drain_writes()
|
||||
self._save_pool.shutdown(wait=True)
|
||||
|
||||
def _encode_batch(
|
||||
self,
|
||||
contents: list[str],
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
source: str | None = None,
|
||||
private: bool = False,
|
||||
) -> list[MemoryRecord]:
|
||||
"""Run the batch EncodingFlow for one or more items. No event emission.
|
||||
|
||||
This is the core encoding logic shared by ``remember()`` and
|
||||
``remember_many()``. Events are managed by the calling method.
|
||||
"""
|
||||
from crewai.memory.encoding_flow import EncodingFlow
|
||||
|
||||
flow = EncodingFlow(
|
||||
storage=self._storage,
|
||||
llm=self._llm,
|
||||
embedder=self._embedder,
|
||||
config=self._config,
|
||||
)
|
||||
items_input = [
|
||||
{
|
||||
"content": c,
|
||||
"scope": scope,
|
||||
"categories": categories,
|
||||
"metadata": metadata,
|
||||
"importance": importance,
|
||||
"source": source,
|
||||
"private": private,
|
||||
}
|
||||
for c in contents
|
||||
]
|
||||
flow.kickoff(inputs={"items": items_input})
|
||||
return [
|
||||
item.result_record
|
||||
for item in flow.state.items
|
||||
if not item.dropped and item.result_record is not None
|
||||
]
|
||||
|
||||
def remember(
|
||||
self,
|
||||
content: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
source: str | None = None,
|
||||
private: bool = False,
|
||||
agent_role: str | None = None,
|
||||
) -> MemoryRecord:
|
||||
"""Store a single item in memory (synchronous).
|
||||
|
||||
Routes through the same serialized save pool as ``remember_many``
|
||||
to prevent races, but blocks until the save completes so the caller
|
||||
gets the ``MemoryRecord`` back immediately.
|
||||
|
||||
Args:
|
||||
content: Text to remember.
|
||||
scope: Optional scope path; inferred if None.
|
||||
categories: Optional categories; inferred if None.
|
||||
metadata: Optional metadata; merged with LLM-extracted if inferred.
|
||||
importance: Optional importance 0-1; inferred if None.
|
||||
source: Optional provenance identifier (e.g. user ID, session ID).
|
||||
private: If True, only visible to recall from the same source.
|
||||
agent_role: Optional agent role for event metadata.
|
||||
|
||||
Returns:
|
||||
The created MemoryRecord.
|
||||
|
||||
Raises:
|
||||
Exception: On save failure (events emitted).
|
||||
"""
|
||||
_source_type = "unified_memory"
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemorySaveStartedEvent(
|
||||
value=content,
|
||||
metadata=metadata,
|
||||
source_type=_source_type,
|
||||
),
|
||||
)
|
||||
start = time.perf_counter()
|
||||
|
||||
# Submit through the save pool for proper serialization,
|
||||
# then immediately wait for the result.
|
||||
future = self._submit_save(
|
||||
self._encode_batch,
|
||||
[content], scope, categories, metadata, importance, source, private,
|
||||
)
|
||||
records = future.result()
|
||||
record = records[0] if records else None
|
||||
|
||||
elapsed_ms = (time.perf_counter() - start) * 1000
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemorySaveCompletedEvent(
|
||||
value=content,
|
||||
metadata=metadata or {},
|
||||
agent_role=agent_role,
|
||||
save_time_ms=elapsed_ms,
|
||||
source_type=_source_type,
|
||||
),
|
||||
)
|
||||
return record
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemorySaveFailedEvent(
|
||||
value=content,
|
||||
metadata=metadata,
|
||||
error=str(e),
|
||||
source_type=_source_type,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def remember_many(
|
||||
self,
|
||||
contents: list[str],
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
source: str | None = None,
|
||||
private: bool = False,
|
||||
agent_role: str | None = None,
|
||||
) -> list[MemoryRecord]:
|
||||
"""Store multiple items in memory (non-blocking).
|
||||
|
||||
The encoding pipeline runs in a background thread. This method
|
||||
returns immediately so the caller (e.g. agent) is not blocked.
|
||||
A ``MemorySaveStartedEvent`` is emitted immediately; the
|
||||
``MemorySaveCompletedEvent`` is emitted when the background
|
||||
save finishes.
|
||||
|
||||
Any subsequent ``recall()`` call will automatically wait for
|
||||
pending saves to complete before searching (read barrier).
|
||||
|
||||
Args:
|
||||
contents: List of text items to remember.
|
||||
scope: Optional scope applied to all items.
|
||||
categories: Optional categories applied to all items.
|
||||
metadata: Optional metadata applied to all items.
|
||||
importance: Optional importance applied to all items.
|
||||
source: Optional provenance identifier applied to all items.
|
||||
private: Privacy flag applied to all items.
|
||||
agent_role: Optional agent role for event metadata.
|
||||
|
||||
Returns:
|
||||
Empty list (records are not available until the background save completes).
|
||||
"""
|
||||
if not contents:
|
||||
return []
|
||||
|
||||
self._submit_save(
|
||||
self._background_encode_batch,
|
||||
contents, scope, categories, metadata,
|
||||
importance, source, private, agent_role,
|
||||
)
|
||||
return []
|
||||
|
||||
def _background_encode_batch(
|
||||
self,
|
||||
contents: list[str],
|
||||
scope: str | None,
|
||||
categories: list[str] | None,
|
||||
metadata: dict[str, Any] | None,
|
||||
importance: float | None,
|
||||
source: str | None,
|
||||
private: bool,
|
||||
agent_role: str | None,
|
||||
) -> list[MemoryRecord]:
|
||||
"""Run the encoding pipeline in a background thread with event emission.
|
||||
|
||||
Both started and completed events are emitted here (in the background
|
||||
thread) so they pair correctly on the event bus scope stack.
|
||||
|
||||
All ``emit`` calls are wrapped in try/except to handle the case where
|
||||
the event bus shuts down before the background save finishes (e.g.
|
||||
during process exit).
|
||||
"""
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemorySaveStartedEvent(
|
||||
value=f"{len(contents)} memories (background)",
|
||||
metadata=metadata,
|
||||
source_type="unified_memory",
|
||||
),
|
||||
)
|
||||
except RuntimeError:
|
||||
pass # event bus shut down during process exit
|
||||
|
||||
try:
|
||||
start = time.perf_counter()
|
||||
records = self._encode_batch(
|
||||
contents, scope, categories, metadata, importance, source, private
|
||||
)
|
||||
elapsed_ms = (time.perf_counter() - start) * 1000
|
||||
except RuntimeError:
|
||||
# The encoding pipeline uses asyncio.run() -> to_thread() internally.
|
||||
# If the process is shutting down, the default executor is closed and
|
||||
# to_thread raises "cannot schedule new futures after shutdown".
|
||||
# Silently abandon the save -- the process is exiting anyway.
|
||||
return []
|
||||
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemorySaveCompletedEvent(
|
||||
value=f"{len(records)} memories saved",
|
||||
metadata=metadata or {},
|
||||
agent_role=agent_role,
|
||||
save_time_ms=elapsed_ms,
|
||||
source_type="unified_memory",
|
||||
),
|
||||
)
|
||||
except RuntimeError:
|
||||
pass # event bus shut down during process exit
|
||||
return records
|
||||
|
||||
def extract_memories(self, content: str) -> list[str]:
|
||||
"""Extract discrete memories from a raw content blob using the LLM.
|
||||
|
||||
This is a pure helper -- it does NOT store anything.
|
||||
Call remember() on each returned string to persist them.
|
||||
|
||||
Args:
|
||||
content: Raw text (e.g. task + result dump).
|
||||
|
||||
Returns:
|
||||
List of short, self-contained memory statements.
|
||||
"""
|
||||
return extract_memories_from_content(content, self._llm)
|
||||
|
||||
def recall(
|
||||
self,
|
||||
query: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
depth: Literal["shallow", "deep"] = "deep",
|
||||
source: str | None = None,
|
||||
include_private: bool = False,
|
||||
) -> list[MemoryMatch]:
|
||||
"""Retrieve relevant memories.
|
||||
|
||||
``shallow`` embeds the query directly and runs a single vector search.
|
||||
``deep`` (default) uses the RecallFlow: the LLM distills the query into
|
||||
targeted sub-queries, selects scopes, searches in parallel, and applies
|
||||
confidence-based routing for optional deeper exploration.
|
||||
|
||||
Args:
|
||||
query: Natural language query.
|
||||
scope: Optional scope prefix to search within.
|
||||
categories: Optional category filter.
|
||||
limit: Max number of results.
|
||||
depth: "shallow" for direct vector search, "deep" for intelligent flow.
|
||||
source: Optional provenance filter. Private records are only visible
|
||||
when this matches the record's source.
|
||||
include_private: If True, all private records are visible regardless of source.
|
||||
|
||||
Returns:
|
||||
List of MemoryMatch, ordered by relevance.
|
||||
"""
|
||||
# Read barrier: wait for any pending background saves to finish
|
||||
# so that the search sees all persisted records.
|
||||
self.drain_writes()
|
||||
|
||||
_source = "unified_memory"
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemoryQueryStartedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=None,
|
||||
source_type=_source,
|
||||
),
|
||||
)
|
||||
start = time.perf_counter()
|
||||
|
||||
if depth == "shallow":
|
||||
embedding = embed_text(self._embedder, query)
|
||||
if not embedding:
|
||||
results: list[MemoryMatch] = []
|
||||
else:
|
||||
raw = self._storage.search(
|
||||
embedding,
|
||||
scope_prefix=scope,
|
||||
categories=categories,
|
||||
limit=limit,
|
||||
min_score=0.0,
|
||||
)
|
||||
# Privacy filter
|
||||
if not include_private:
|
||||
raw = [
|
||||
(r, s) for r, s in raw
|
||||
if not r.private or r.source == source
|
||||
]
|
||||
results = []
|
||||
for r, s in raw:
|
||||
composite, reasons = compute_composite_score(
|
||||
r, s, self._config
|
||||
)
|
||||
results.append(
|
||||
MemoryMatch(
|
||||
record=r,
|
||||
score=composite,
|
||||
match_reasons=reasons,
|
||||
)
|
||||
)
|
||||
results.sort(key=lambda m: m.score, reverse=True)
|
||||
else:
|
||||
flow = RecallFlow(
|
||||
storage=self._storage,
|
||||
llm=self._llm,
|
||||
embedder=self._embedder,
|
||||
config=self._config,
|
||||
)
|
||||
flow.kickoff(
|
||||
inputs={
|
||||
"query": query,
|
||||
"scope": scope,
|
||||
"categories": categories or [],
|
||||
"limit": limit,
|
||||
"source": source,
|
||||
"include_private": include_private,
|
||||
}
|
||||
)
|
||||
results = flow.state.final_results
|
||||
|
||||
# Update last_accessed for recalled records
|
||||
if results:
|
||||
try:
|
||||
touch = getattr(self._storage, "touch_records", None)
|
||||
if touch is not None:
|
||||
touch([m.record.id for m in results])
|
||||
except Exception: # noqa: S110
|
||||
pass # Non-critical: don't fail recall because of touch
|
||||
|
||||
elapsed_ms = (time.perf_counter() - start) * 1000
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemoryQueryCompletedEvent(
|
||||
query=query,
|
||||
results=results,
|
||||
limit=limit,
|
||||
score_threshold=None,
|
||||
query_time_ms=elapsed_ms,
|
||||
source_type=_source,
|
||||
),
|
||||
)
|
||||
return results
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MemoryQueryFailedEvent(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=None,
|
||||
error=str(e),
|
||||
source_type=_source,
|
||||
),
|
||||
)
|
||||
raise
|
||||
|
||||
def forget(
|
||||
self,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
older_than: datetime | None = None,
|
||||
metadata_filter: dict[str, Any] | None = None,
|
||||
record_ids: list[str] | None = None,
|
||||
) -> int:
|
||||
"""Delete memories matching criteria.
|
||||
|
||||
Returns:
|
||||
Number of records deleted.
|
||||
"""
|
||||
return self._storage.delete(
|
||||
scope_prefix=scope,
|
||||
categories=categories,
|
||||
record_ids=record_ids,
|
||||
older_than=older_than,
|
||||
metadata_filter=metadata_filter,
|
||||
)
|
||||
|
||||
def update(
|
||||
self,
|
||||
record_id: str,
|
||||
content: str | None = None,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
) -> MemoryRecord:
|
||||
"""Update an existing memory record by ID.
|
||||
|
||||
Args:
|
||||
record_id: ID of the record to update.
|
||||
content: New content; re-embedded if provided.
|
||||
scope: New scope path.
|
||||
categories: New categories.
|
||||
metadata: New metadata.
|
||||
importance: New importance score.
|
||||
|
||||
Returns:
|
||||
The updated MemoryRecord.
|
||||
|
||||
Raises:
|
||||
ValueError: If the record is not found.
|
||||
"""
|
||||
existing = self._storage.get_record(record_id)
|
||||
if existing is None:
|
||||
raise ValueError(f"Record not found: {record_id}")
|
||||
now = datetime.utcnow()
|
||||
updates: dict[str, Any] = {"last_accessed": now}
|
||||
if content is not None:
|
||||
updates["content"] = content
|
||||
embedding = embed_text(self._embedder, content)
|
||||
updates["embedding"] = embedding if embedding else existing.embedding
|
||||
if scope is not None:
|
||||
updates["scope"] = scope
|
||||
if categories is not None:
|
||||
updates["categories"] = categories
|
||||
if metadata is not None:
|
||||
updates["metadata"] = metadata
|
||||
if importance is not None:
|
||||
updates["importance"] = importance
|
||||
updated = existing.model_copy(update=updates)
|
||||
self._storage.update(updated)
|
||||
return updated
|
||||
|
||||
def scope(self, path: str) -> Any:
|
||||
"""Return a scoped view of this memory."""
|
||||
from crewai.memory.memory_scope import MemoryScope
|
||||
|
||||
return MemoryScope(memory=self, root_path=path)
|
||||
|
||||
def slice(
|
||||
self,
|
||||
scopes: list[str],
|
||||
categories: list[str] | None = None,
|
||||
read_only: bool = True,
|
||||
) -> Any:
|
||||
"""Return a multi-scope view (slice) of this memory."""
|
||||
from crewai.memory.memory_scope import MemorySlice
|
||||
|
||||
return MemorySlice(
|
||||
memory=self,
|
||||
scopes=scopes,
|
||||
categories=categories,
|
||||
read_only=read_only,
|
||||
)
|
||||
|
||||
def list_scopes(self, path: str = "/") -> list[str]:
|
||||
"""List immediate child scopes under path."""
|
||||
return self._storage.list_scopes(path)
|
||||
|
||||
def list_records(
|
||||
self, scope: str | None = None, limit: int = 200, offset: int = 0
|
||||
) -> list[MemoryRecord]:
|
||||
"""List records in a scope, newest first.
|
||||
|
||||
Args:
|
||||
scope: Optional scope path prefix to filter by.
|
||||
limit: Maximum number of records to return.
|
||||
offset: Number of records to skip (for pagination).
|
||||
"""
|
||||
return self._storage.list_records(scope_prefix=scope, limit=limit, offset=offset)
|
||||
|
||||
def info(self, path: str = "/") -> ScopeInfo:
|
||||
"""Return scope info for path."""
|
||||
return self._storage.get_scope_info(path)
|
||||
|
||||
def tree(self, path: str = "/", max_depth: int = 3) -> str:
|
||||
"""Return a formatted tree of scopes (string)."""
|
||||
lines: list[str] = []
|
||||
|
||||
def _walk(p: str, depth: int, prefix: str) -> None:
|
||||
if depth > max_depth:
|
||||
return
|
||||
info = self._storage.get_scope_info(p)
|
||||
lines.append(f"{prefix}{p or '/'} ({info.record_count} records)")
|
||||
for child in info.child_scopes[:20]:
|
||||
_walk(child, depth + 1, prefix + " ")
|
||||
|
||||
_walk(path.rstrip("/") or "/", 0, "")
|
||||
return "\n".join(lines) if lines else f"{path or '/'} (0 records)"
|
||||
|
||||
def list_categories(self, path: str | None = None) -> dict[str, int]:
|
||||
"""List categories and counts; path=None means global."""
|
||||
return self._storage.list_categories(scope_prefix=path)
|
||||
|
||||
def reset(self, scope: str | None = None) -> None:
|
||||
"""Reset (delete all) memories in scope. None = all."""
|
||||
self._storage.reset(scope_prefix=scope)
|
||||
|
||||
async def aextract_memories(self, content: str) -> list[str]:
|
||||
"""Async variant of extract_memories."""
|
||||
return self.extract_memories(content)
|
||||
|
||||
async def aremember(
|
||||
self,
|
||||
content: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
source: str | None = None,
|
||||
private: bool = False,
|
||||
) -> MemoryRecord:
|
||||
"""Async remember: delegates to sync for now."""
|
||||
return self.remember(
|
||||
content,
|
||||
scope=scope,
|
||||
categories=categories,
|
||||
metadata=metadata,
|
||||
importance=importance,
|
||||
source=source,
|
||||
private=private,
|
||||
)
|
||||
|
||||
async def aremember_many(
|
||||
self,
|
||||
contents: list[str],
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
metadata: dict[str, Any] | None = None,
|
||||
importance: float | None = None,
|
||||
source: str | None = None,
|
||||
private: bool = False,
|
||||
agent_role: str | None = None,
|
||||
) -> list[MemoryRecord]:
|
||||
"""Async remember_many: delegates to sync for now."""
|
||||
return self.remember_many(
|
||||
contents,
|
||||
scope=scope,
|
||||
categories=categories,
|
||||
metadata=metadata,
|
||||
importance=importance,
|
||||
source=source,
|
||||
private=private,
|
||||
agent_role=agent_role,
|
||||
)
|
||||
|
||||
async def arecall(
|
||||
self,
|
||||
query: str,
|
||||
scope: str | None = None,
|
||||
categories: list[str] | None = None,
|
||||
limit: int = 10,
|
||||
depth: Literal["shallow", "deep"] = "deep",
|
||||
source: str | None = None,
|
||||
include_private: bool = False,
|
||||
) -> list[MemoryMatch]:
|
||||
"""Async recall: delegates to sync for now."""
|
||||
return self.recall(
|
||||
query,
|
||||
scope=scope,
|
||||
categories=categories,
|
||||
limit=limit,
|
||||
depth=depth,
|
||||
source=source,
|
||||
include_private=include_private,
|
||||
)
|
||||
@@ -1,5 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from concurrent.futures import Future
|
||||
from copy import copy as shallow_copy
|
||||
import datetime
|
||||
@@ -624,11 +625,15 @@ class Task(BaseModel):
|
||||
self.end_time = datetime.datetime.now()
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
cb_result = self.callback(self.output)
|
||||
if inspect.isawaitable(cb_result):
|
||||
await cb_result
|
||||
|
||||
crew = self.agent.crew # type: ignore[union-attr]
|
||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||
crew.task_callback(self.output)
|
||||
cb_result = crew.task_callback(self.output)
|
||||
if inspect.isawaitable(cb_result):
|
||||
await cb_result
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
@@ -722,11 +727,15 @@ class Task(BaseModel):
|
||||
self.end_time = datetime.datetime.now()
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
cb_result = self.callback(self.output)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
crew = self.agent.crew # type: ignore[union-attr]
|
||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||
crew.task_callback(self.output)
|
||||
cb_result = crew.task_callback(self.output)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
|
||||
@@ -18,6 +18,7 @@ from pydantic import (
|
||||
BaseModel as PydanticBaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
ValidationError,
|
||||
create_model,
|
||||
field_validator,
|
||||
)
|
||||
@@ -150,14 +151,37 @@ class BaseTool(BaseModel, ABC):
|
||||
|
||||
super().model_post_init(__context)
|
||||
|
||||
def _validate_kwargs(self, kwargs: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Validate keyword arguments against args_schema if present.
|
||||
|
||||
Args:
|
||||
kwargs: The keyword arguments to validate.
|
||||
|
||||
Returns:
|
||||
Validated (and possibly coerced) keyword arguments.
|
||||
|
||||
Raises:
|
||||
ValueError: If validation against args_schema fails.
|
||||
"""
|
||||
if kwargs and self.args_schema is not None and self.args_schema.model_fields:
|
||||
try:
|
||||
validated = self.args_schema.model_validate(kwargs)
|
||||
return validated.model_dump()
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
f"Tool '{self.name}' arguments validation failed: {e}"
|
||||
) from e
|
||||
return kwargs
|
||||
|
||||
def run(
|
||||
self,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
kwargs = self._validate_kwargs(kwargs)
|
||||
|
||||
result = self._run(*args, **kwargs)
|
||||
|
||||
# If _run is async, we safely run it
|
||||
if asyncio.iscoroutine(result):
|
||||
result = asyncio.run(result)
|
||||
|
||||
@@ -179,6 +203,7 @@ class BaseTool(BaseModel, ABC):
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
kwargs = self._validate_kwargs(kwargs)
|
||||
result = await self._arun(*args, **kwargs)
|
||||
self.current_usage_count += 1
|
||||
return result
|
||||
@@ -331,6 +356,8 @@ class Tool(BaseTool, Generic[P, R]):
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
kwargs = self._validate_kwargs(kwargs)
|
||||
|
||||
result = self.func(*args, **kwargs)
|
||||
|
||||
if asyncio.iscoroutine(result):
|
||||
@@ -361,6 +388,7 @@ class Tool(BaseTool, Generic[P, R]):
|
||||
Returns:
|
||||
The result of the tool execution.
|
||||
"""
|
||||
kwargs = self._validate_kwargs(kwargs)
|
||||
result = await self._arun(*args, **kwargs)
|
||||
self.current_usage_count += 1
|
||||
return result
|
||||
|
||||
136
lib/crewai/src/crewai/tools/memory_tools.py
Normal file
136
lib/crewai/src/crewai/tools/memory_tools.py
Normal file
@@ -0,0 +1,136 @@
|
||||
"""Memory tools that give agents active recall and remember capabilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.i18n import get_i18n
|
||||
|
||||
|
||||
class RecallMemorySchema(BaseModel):
|
||||
"""Schema for the recall memory tool."""
|
||||
|
||||
queries: list[str] = Field(
|
||||
...,
|
||||
description=(
|
||||
"One or more search queries. Pass a single item for a focused search, "
|
||||
"or multiple items to search for several things at once."
|
||||
),
|
||||
)
|
||||
scope: str | None = Field(
|
||||
default=None,
|
||||
description="Optional scope to narrow the search (e.g. /project/alpha)",
|
||||
)
|
||||
depth: str = Field(
|
||||
default="shallow",
|
||||
description="'shallow' for fast vector search, 'deep' for LLM-analyzed retrieval",
|
||||
)
|
||||
|
||||
|
||||
class RecallMemoryTool(BaseTool):
|
||||
"""Tool that lets an agent search memory for one or more queries at once."""
|
||||
|
||||
name: str = "Search memory"
|
||||
description: str = ""
|
||||
args_schema: type[BaseModel] = RecallMemorySchema
|
||||
memory: Any = Field(exclude=True)
|
||||
|
||||
def _run(
|
||||
self,
|
||||
queries: list[str] | str,
|
||||
scope: str | None = None,
|
||||
depth: str = "shallow",
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
"""Search memory for relevant information.
|
||||
|
||||
Args:
|
||||
queries: One or more search queries (string or list of strings).
|
||||
scope: Optional scope prefix to narrow the search.
|
||||
depth: "shallow" for fast vector search, "deep" for LLM-analyzed retrieval.
|
||||
|
||||
Returns:
|
||||
Formatted string of matching memories, or a message if none found.
|
||||
"""
|
||||
if isinstance(queries, str):
|
||||
queries = [queries]
|
||||
actual_depth = depth if depth in ("shallow", "deep") else "shallow"
|
||||
|
||||
all_lines: list[str] = []
|
||||
seen_ids: set[str] = set()
|
||||
for query in queries:
|
||||
matches = self.memory.recall(query, scope=scope, limit=5, depth=actual_depth)
|
||||
for m in matches:
|
||||
if m.record.id not in seen_ids:
|
||||
seen_ids.add(m.record.id)
|
||||
all_lines.append(f"- (score={m.score:.2f}) {m.record.content}")
|
||||
|
||||
if not all_lines:
|
||||
return "No relevant memories found."
|
||||
return "Found memories:\n" + "\n".join(all_lines)
|
||||
|
||||
|
||||
class RememberSchema(BaseModel):
|
||||
"""Schema for the remember tool."""
|
||||
|
||||
contents: list[str] = Field(
|
||||
...,
|
||||
description=(
|
||||
"One or more facts, decisions, or observations to remember. "
|
||||
"Pass a single item or multiple items at once."
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
class RememberTool(BaseTool):
|
||||
"""Tool that lets an agent save one or more items to memory at once."""
|
||||
|
||||
name: str = "Save to memory"
|
||||
description: str = ""
|
||||
args_schema: type[BaseModel] = RememberSchema
|
||||
memory: Any = Field(exclude=True)
|
||||
|
||||
def _run(self, contents: list[str] | str, **kwargs: Any) -> str:
|
||||
"""Store one or more items in memory. The system infers scope, categories, and importance.
|
||||
|
||||
Args:
|
||||
contents: One or more items to remember (string or list of strings).
|
||||
|
||||
Returns:
|
||||
Confirmation with the number of items saved.
|
||||
"""
|
||||
if isinstance(contents, str):
|
||||
contents = [contents]
|
||||
if len(contents) == 1:
|
||||
record = self.memory.remember(contents[0])
|
||||
return (
|
||||
f"Saved to memory (scope={record.scope}, "
|
||||
f"importance={record.importance:.1f})."
|
||||
)
|
||||
self.memory.remember_many(contents)
|
||||
return f"Saving {len(contents)} items to memory in background."
|
||||
|
||||
|
||||
def create_memory_tools(memory: Any) -> list[BaseTool]:
|
||||
"""Create Recall and Remember tools for the given memory instance.
|
||||
|
||||
Args:
|
||||
memory: A Memory, MemoryScope, or MemorySlice instance.
|
||||
|
||||
Returns:
|
||||
List containing a RecallMemoryTool and a RememberTool.
|
||||
"""
|
||||
i18n = get_i18n()
|
||||
return [
|
||||
RecallMemoryTool(
|
||||
memory=memory,
|
||||
description=i18n.tools("recall_memory"),
|
||||
),
|
||||
RememberTool(
|
||||
memory=memory,
|
||||
description=i18n.tools("save_to_memory"),
|
||||
),
|
||||
]
|
||||
@@ -34,7 +34,11 @@
|
||||
"lite_agent_response_format": "Format your final answer according to the following OpenAPI schema: {response_format}\n\nIMPORTANT: Preserve the original content exactly as-is. Do NOT rewrite, paraphrase, or modify the meaning of the content. Only structure it to match the schema format.\n\nDo not include the OpenAPI schema in the final output. Ensure the final output does not include any code block markers like ```json or ```python.",
|
||||
"knowledge_search_query": "The original query is: {task_prompt}.",
|
||||
"knowledge_search_query_system_prompt": "Your goal is to rewrite the user query so that it is optimized for retrieval from a vector database. Consider how the query will be used to find relevant documents, and aim to make it more specific and context-aware. \n\n Do not include any other text than the rewritten query, especially any preamble or postamble and only add expected output format if its relevant to the rewritten query. \n\n Focus on the key words of the intended task and to retrieve the most relevant information. \n\n There will be some extra context provided that might need to be removed such as expected_output formats structured_outputs and other instructions.",
|
||||
"human_feedback_collapse": "Based on the following human feedback, determine which outcome best matches their intent.\n\nFeedback: {feedback}\n\nPossible outcomes: {outcomes}\n\nRespond with ONLY one of the exact outcome values listed above, nothing else."
|
||||
"human_feedback_collapse": "Based on the following human feedback, determine which outcome best matches their intent.\n\nFeedback: {feedback}\n\nPossible outcomes: {outcomes}\n\nRespond with ONLY one of the exact outcome values listed above, nothing else.",
|
||||
"hitl_pre_review_system": "You are reviewing content before a human sees it. Apply the lessons from past human feedback to improve the output. Preserve the original meaning and structure, but incorporate the corrections and preferences indicated by the lessons.",
|
||||
"hitl_pre_review_user": "Output to review:\n{output}\n\nLessons from past human feedback:\n{lessons}\n\nApply the lessons to improve the output.",
|
||||
"hitl_distill_system": "You extract generalizable lessons from human feedback on system outputs. A lesson should be a reusable rule or preference that applies to future similar outputs -- not a one-time correction specific to this exact content.\n\nExamples of good lessons:\n- Always include source citations when making factual claims\n- Use bullet points instead of long paragraphs for action items\n- Avoid technical jargon when the audience is non-technical\n\nIf the feedback is just approval (e.g. looks good, approved) or contains no generalizable guidance, return an empty list.",
|
||||
"hitl_distill_user": "Method: {method_name}\n\nSystem output:\n{output}\n\nHuman feedback:\n{feedback}\n\nExtract generalizable lessons. Return an empty list if none."
|
||||
},
|
||||
"errors": {
|
||||
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",
|
||||
@@ -55,7 +59,19 @@
|
||||
"name": "Add image to content",
|
||||
"description": "See image to understand its content, you can optionally ask a question about the image",
|
||||
"default_action": "Please provide a detailed description of this image, including all visual elements, context, and any notable details you can observe."
|
||||
}
|
||||
},
|
||||
"recall_memory": "Search through the team's shared memory for relevant information. Pass one or more queries to search for multiple things at once. Use this when you need to find facts, decisions, preferences, or past results that may have been stored previously.",
|
||||
"save_to_memory": "Store one or more important facts, decisions, observations, or lessons in memory so they can be recalled later by you or other agents. Pass multiple items at once when you have several things worth remembering."
|
||||
},
|
||||
"memory": {
|
||||
"query_system": "You analyze a query for searching memory.\nGiven the query and available scopes, output:\n1. keywords: Key entities or keywords that can be used to filter by category.\n2. suggested_scopes: Which available scopes are most relevant (empty for all).\n3. complexity: 'simple' or 'complex'.\n4. recall_queries: 1-3 short, targeted search phrases distilled from the query. Each should be a concise phrase optimized for semantic vector search. If the query is already short and focused, return it as-is in a single-item list. For long task descriptions, extract the distinct things worth searching for.\n5. time_filter: If the query references a time period (like 'last week', 'yesterday', 'in January'), return an ISO 8601 date string for the earliest relevant date (e.g. '2026-02-01'). Return null if no time constraint is implied.",
|
||||
"extract_memories_system": "You extract discrete, reusable memory statements from raw content (e.g. a task description and its result).\n\nFor the given content, output a list of memory statements. Each memory must:\n- Be one clear sentence or short statement\n- Be understandable without the original context\n- Capture a decision, fact, outcome, preference, lesson, or observation worth remembering\n- NOT be a vague summary or a restatement of the task description\n- NOT duplicate the same idea in different words\n\nIf there is nothing worth remembering (e.g. empty result, no decisions or facts), return an empty list.\nOutput a JSON object with a single key \"memories\" whose value is a list of strings.",
|
||||
"extract_memories_user": "Content:\n{content}\n\nExtract memory statements as described. Return structured output.",
|
||||
"query_user": "Query: {query}\n\nAvailable scopes: {available_scopes}\n{scope_desc}\n\nReturn the analysis as structured output.",
|
||||
"save_system": "You analyze content to be stored in a hierarchical memory system.\nGiven the content and the existing scopes and categories, output:\n1. suggested_scope: The best matching existing scope path, or a new path if none fit (use / for root).\n2. categories: A list of categories (reuse existing when relevant, add new ones if needed).\n3. importance: A number from 0.0 to 1.0 indicating how significant this memory is.\n4. extracted_metadata: A JSON object with any entities, dates, or topics you can extract.",
|
||||
"save_user": "Content to store:\n{content}\n\nExisting scopes: {existing_scopes}\nExisting categories: {existing_categories}\n\nReturn the analysis as structured output.",
|
||||
"consolidation_system": "You are comparing new content against existing memories to decide how to consolidate them.\n\nFor each existing memory, choose one action:\n- 'keep': The existing memory is still accurate and not redundant with the new content.\n- 'update': The existing memory should be updated with new information. Provide the updated content.\n- 'delete': The existing memory is outdated, superseded, or contradicted by the new content.\n\nAlso decide whether the new content should be inserted as a separate memory:\n- insert_new=true: The new content adds information not fully captured by existing memories (even after updates).\n- insert_new=false: The new content is fully captured by the existing memories (after any updates).\n\nBe conservative: prefer 'keep' when unsure. Only 'update' or 'delete' when there is a clear contradiction, supersession, or redundancy.",
|
||||
"consolidation_user": "New content to consider storing:\n{new_content}\n\nExisting similar memories:\n{records_summary}\n\nReturn the consolidation plan as structured output."
|
||||
},
|
||||
"reasoning": {
|
||||
"initial_plan": "You are {role}, a professional with the following background: {backstory}\n\nYour primary goal is: {goal}\n\nAs {role}, you are creating a strategic plan for a task that requires your expertise and unique perspective.",
|
||||
|
||||
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
from collections.abc import Callable, Sequence
|
||||
import concurrent.futures
|
||||
import inspect
|
||||
import json
|
||||
import re
|
||||
from typing import TYPE_CHECKING, Any, Final, Literal, TypedDict
|
||||
@@ -501,7 +502,9 @@ def handle_agent_action_core(
|
||||
- TODO: Remove messages parameter and its usage.
|
||||
"""
|
||||
if step_callback:
|
||||
step_callback(tool_result)
|
||||
cb_result = step_callback(tool_result)
|
||||
if inspect.iscoroutine(cb_result):
|
||||
asyncio.run(cb_result)
|
||||
|
||||
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
||||
formatted_answer.result = tool_result.result
|
||||
@@ -1143,6 +1146,36 @@ def extract_tool_call_info(
|
||||
return None
|
||||
|
||||
|
||||
def parse_tool_call_args(
|
||||
func_args: dict[str, Any] | str,
|
||||
func_name: str,
|
||||
call_id: str,
|
||||
original_tool: Any = None,
|
||||
) -> tuple[dict[str, Any], None] | tuple[None, dict[str, Any]]:
|
||||
"""Parse tool call arguments from a JSON string or dict.
|
||||
|
||||
Returns:
|
||||
``(args_dict, None)`` on success, or ``(None, error_result)`` on
|
||||
JSON parse failure where ``error_result`` is a ready-to-return dict
|
||||
with the same shape as ``_execute_single_native_tool_call`` return values.
|
||||
"""
|
||||
if isinstance(func_args, str):
|
||||
try:
|
||||
return json.loads(func_args), None
|
||||
except json.JSONDecodeError as e:
|
||||
return None, {
|
||||
"call_id": call_id,
|
||||
"func_name": func_name,
|
||||
"result": (
|
||||
f"Error: Failed to parse tool arguments as JSON: {e}. "
|
||||
f"Please provide valid JSON arguments for the '{func_name}' tool."
|
||||
),
|
||||
"from_cache": False,
|
||||
"original_tool": original_tool,
|
||||
}
|
||||
return func_args, None
|
||||
|
||||
|
||||
def _setup_before_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | AgentExecutor | LiteAgent | None,
|
||||
printer: Printer,
|
||||
|
||||
@@ -86,10 +86,21 @@ class I18N(BaseModel):
|
||||
"""
|
||||
return self.retrieve("tools", tool)
|
||||
|
||||
def memory(self, key: str) -> str:
|
||||
"""Retrieve a memory prompt by key.
|
||||
|
||||
Args:
|
||||
key: The key of the memory prompt to retrieve.
|
||||
|
||||
Returns:
|
||||
The memory prompt as a string.
|
||||
"""
|
||||
return self.retrieve("memory", key)
|
||||
|
||||
def retrieve(
|
||||
self,
|
||||
kind: Literal[
|
||||
"slices", "errors", "tools", "reasoning", "hierarchical_manager_agent"
|
||||
"slices", "errors", "tools", "reasoning", "hierarchical_manager_agent", "memory"
|
||||
],
|
||||
key: str,
|
||||
) -> str:
|
||||
|
||||
@@ -69,7 +69,7 @@ def create_llm(
|
||||
UNACCEPTED_ATTRIBUTES: Final[list[str]] = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"AWS_REGION_NAME",
|
||||
"AWS_DEFAULT_REGION",
|
||||
]
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ def _llm_via_environment_or_fallback() -> LLM | None:
|
||||
unaccepted_attributes = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"AWS_REGION_NAME",
|
||||
"AWS_DEFAULT_REGION",
|
||||
]
|
||||
set_provider = model_name.partition("/")[0] if "/" in model_name else "openai"
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user