Compare commits

..

5 Commits

Author SHA1 Message Date
Joao Moura
5e08e03e43 memory improvements 2026-03-09 20:11:39 -07:00
Joao Moura
88ad3a3ac4 refactor: simplify category handling in RecallFlow
- Updated the _merged_categories method to return only caller-supplied categories, removing the previous merging logic for inferred categories. This change enhances clarity and maintains consistency in category management.
2026-03-04 09:13:44 -08:00
Joao Moura
a3ea6d280a refactor: remove inferred_categories from RecallState and update category merging logic
- Removed the inferred_categories field from RecallState to simplify state management.
- Updated the _merged_categories method to only merge caller-supplied categories, enhancing clarity in category handling.
2026-03-04 09:13:44 -08:00
Joao Moura
9d09a173e6 feat: increase memory recall limit and enhance memory context documentation
- Increased the memory recall limit in the Agent class from 15 to 20.
- Updated the memory context message to clarify the nature of the memories presented and the importance of using the Search memory tool for comprehensive results.
2026-03-04 09:13:44 -08:00
Joao Moura
c06aa4d476 feat: enhance memory recall limits and update documentation
- Increased the memory recall limit in the Agent class from 5 to 15.
- Updated the RecallMemoryTool to allow a recall limit of 20.
- Expanded the documentation for the recall_memory feature to emphasize the importance of multiple queries for comprehensive results.
2026-03-04 09:13:44 -08:00
70 changed files with 1474 additions and 8197 deletions

68
.github/workflows/docs-stale-check.yml vendored Normal file
View File

@@ -0,0 +1,68 @@
name: Check EXPANDED_CLAUDE.md freshness
on:
pull_request:
paths:
- "lib/crewai/src/crewai/crew.py"
- "lib/crewai/src/crewai/task.py"
- "lib/crewai/src/crewai/llm.py"
- "lib/crewai/src/crewai/lite_agent.py"
- "lib/crewai/src/crewai/agent/**"
- "lib/crewai/src/crewai/agents/**"
- "lib/crewai/src/crewai/flow/**"
- "lib/crewai/src/crewai/memory/**"
- "lib/crewai/src/crewai/tools/**"
- "lib/crewai/src/crewai/events/**"
- "lib/crewai/src/crewai/llms/**"
- "lib/crewai/src/crewai/knowledge/**"
- "lib/crewai/src/crewai/rag/**"
- "lib/crewai/src/crewai/security/**"
- "lib/crewai/src/crewai/a2a/**"
- "lib/crewai/src/crewai/cli/**"
- "lib/crewai/src/crewai/project/**"
- "lib/crewai/src/crewai/translations/**"
- "lib/crewai-tools/src/**"
- "lib/crewai-files/src/**"
jobs:
check-docs:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check if EXPANDED_CLAUDE.md was updated
id: check
run: |
if git diff --name-only origin/${{ github.base_ref }}...HEAD | grep -q "^EXPANDED_CLAUDE.md$"; then
echo "updated=true" >> "$GITHUB_OUTPUT"
else
echo "updated=false" >> "$GITHUB_OUTPUT"
fi
- name: Comment on PR
if: steps.check.outputs.updated == 'false'
uses: actions/github-script@v7
with:
script: |
const marker = '<!-- docs-stale-check -->';
const body = `${marker}\n**Heads up:** This PR changes core source files but \`EXPANDED_CLAUDE.md\` wasn't updated. If the changes affect architecture (new modules, changed APIs, renamed classes), consider running \`/update-docs\` in Claude Code before merging.`;
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existing = comments.find(c => c.body.includes(marker));
if (!existing) {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body,
});
}

View File

@@ -1,127 +0,0 @@
name: Nightly Canary Release
on:
schedule:
- cron: '0 6 * * *' # daily at 6am UTC
workflow_dispatch:
jobs:
check:
name: Check for new commits
runs-on: ubuntu-latest
permissions:
contents: read
outputs:
has_changes: ${{ steps.check.outputs.has_changes }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check for commits in last 24h
id: check
run: |
RECENT=$(git log --since="24 hours ago" --oneline | head -1)
if [ -n "$RECENT" ]; then
echo "has_changes=true" >> "$GITHUB_OUTPUT"
else
echo "has_changes=false" >> "$GITHUB_OUTPUT"
fi
build:
name: Build nightly packages
needs: check
if: needs.check.outputs.has_changes == 'true' || github.event_name == 'workflow_dispatch'
runs-on: ubuntu-latest
permissions:
contents: read
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Install uv
uses: astral-sh/setup-uv@v4
- name: Stamp nightly versions
run: |
DATE=$(date +%Y%m%d)
for init_file in \
lib/crewai/src/crewai/__init__.py \
lib/crewai-tools/src/crewai_tools/__init__.py \
lib/crewai-files/src/crewai_files/__init__.py; do
CURRENT=$(python -c "
import re
text = open('$init_file').read()
print(re.search(r'__version__\s*=\s*\"(.*?)\"\s*$', text, re.MULTILINE).group(1))
")
NIGHTLY="${CURRENT}.dev${DATE}"
sed -i "s/__version__ = .*/__version__ = \"${NIGHTLY}\"/" "$init_file"
echo "$init_file: $CURRENT -> $NIGHTLY"
done
# Update cross-package dependency pins to nightly versions
sed -i "s/\"crewai-tools==[^\"]*\"/\"crewai-tools==${NIGHTLY}\"/" lib/crewai/pyproject.toml
sed -i "s/\"crewai==[^\"]*\"/\"crewai==${NIGHTLY}\"/" lib/crewai-tools/pyproject.toml
echo "Updated cross-package dependency pins to ${NIGHTLY}"
- name: Build packages
run: |
uv build --all-packages
rm dist/.gitignore
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: dist
path: dist/
publish:
name: Publish nightly to PyPI
needs: build
runs-on: ubuntu-latest
environment:
name: pypi
url: https://pypi.org/p/crewai
permissions:
id-token: write
contents: read
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v6
with:
version: "0.8.4"
python-version: "3.12"
enable-cache: false
- name: Download artifacts
uses: actions/download-artifact@v4
with:
name: dist
path: dist
- name: Publish to PyPI
env:
UV_PUBLISH_TOKEN: ${{ secrets.PYPI_API_TOKEN }}
run: |
failed=0
for package in dist/*; do
if [[ "$package" == *"crewai_devtools"* ]]; then
echo "Skipping private package: $package"
continue
fi
echo "Publishing $package"
if ! uv publish "$package"; then
echo "Failed to publish $package"
failed=1
fi
done
if [ $failed -eq 1 ]; then
echo "Some packages failed to publish"
exit 1
fi

92
CLAUDE.md Normal file
View File

@@ -0,0 +1,92 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
CrewAI is a standalone Python framework for orchestrating autonomous AI agents. It provides two complementary paradigms: **Crews** (autonomous agent teams) and **Flows** (event-driven workflows). This is a **UV workspace monorepo**.
## Repository Structure
```
lib/
├── crewai/ # Core framework (agents, tasks, crews, flows, memory, tools, LLMs)
├── crewai-tools/ # Pre-built tool library (70+ tools)
├── crewai-files/ # Multimodal file handling (cache, processing, uploading)
└── devtools/ # Internal dev utilities (version bumping)
```
Source code lives under `lib/<package>/src/` and tests under `lib/<package>/tests/`.
## Common Commands
```bash
# Install dependencies
uv lock && uv sync
# Run all tests (parallel by default via pytest-xdist)
uv run pytest
# Run a single test file
uv run pytest lib/crewai/tests/memory/test_unified_memory.py
# Run a single test
uv run pytest lib/crewai/tests/memory/test_unified_memory.py::test_function_name -x
# Run tests for a specific workspace member
uv run pytest lib/crewai/tests/
uv run pytest lib/crewai-tools/tests/
uv run pytest lib/crewai-files/tests/
# Linting and formatting (Ruff)
uv run ruff check lib/
uv run ruff format lib/
# Type checking (strict mypy)
uv run mypy lib/
# Pre-commit hooks
pre-commit install
pre-commit run --all-files
```
**Pytest defaults** (from pyproject.toml): `--tb=short -n auto --timeout=60 --dist=loadfile --block-network --import-mode=importlib`. Network is blocked in tests; use VCR cassettes for HTTP interactions.
## Deep Dive
For detailed architecture documentation on any subsystem, use `/deep-dive <subsystem>` (e.g. `/deep-dive memory`, `/deep-dive flow`). This pulls the relevant section from **[EXPANDED_CLAUDE.md](./EXPANDED_CLAUDE.md)**, which covers all major components, execution flows, data types, and integration patterns. To regenerate it after major changes, use `/update-docs`.
## Architecture
### Core modules (`lib/crewai/src/crewai/`)
- **`crew.py`** - `Crew` class: orchestrates agents executing tasks (sequential or hierarchical process)
- **`task.py`** - `Task` class: work units with description, expected output, assigned agent, guardrails
- **`agent/core.py`** - `Agent` class: autonomous entity with role/goal/backstory, LLM, tools, memory
- **`flow/flow.py`** - `Flow` class: event-driven workflows using `@start`, `@listen`, `@router` decorators
- **`llm.py`** + **`llms/`** - Provider-agnostic LLM abstraction with per-provider adapters (OpenAI, Gemini, Claude, Bedrock, etc.)
- **`memory/`** - Unified memory system (LanceDB-backed) with vector embeddings, encoding/recall flows, scope-based filtering
- **`tools/`** - Tool ecosystem: `BaseTool`, structured tools, MCP integration, memory tools
- **`events/`** - Central event bus for observability (agent, crew, flow, task, memory events)
- **`knowledge/`** - Knowledge base integration with multiple source types
- **`cli/`** - CLI for project scaffolding, deployment, and interactive crew chat
- **`utilities/`** - Shared helpers (prompt templates, schema utils, LLM utils, i18n, guardrails)
### Key patterns
- **Pydantic models** throughout for validation and type safety
- **Event-driven observability** via `events/event_bus.py` with sync/async handlers
- **Lazy loading** of heavy modules (Memory, EncodingFlow) via `__getattr__`
- **Pluggable storage** backends for memory (LanceDB default)
- **VCR cassettes** for recording/replaying HTTP interactions in tests
- **Translations** in `translations/en.json` for all agent-facing prompts
## Code Standards
- **Python 3.10+**, use modern syntax (`X | Y` unions, `collections.abc`, f-strings)
- **Ruff** for linting and formatting (E501 line length ignored)
- **mypy strict** mode: all functions need type annotations
- **Google-style docstrings**
- **No relative imports** (`ban-relative-imports = "all"` in Ruff config)
- **Commitizen** commit message format enforced via pre-commit
- Tests allow `assert`, unnecessary assignments, and hardcoded passwords (`S101`, `RET504`, `S105`, `S106` suppressed)

482
EXPANDED_CLAUDE.md Normal file
View File

@@ -0,0 +1,482 @@
# EXPANDED_CLAUDE.md
Deep architectural reference for the CrewAI codebase. See [CLAUDE.md](./CLAUDE.md) for quick-start commands and overview.
## Table of Contents
- [1. Execution Flow: Crew.kickoff() to Agent Output](#1-execution-flow)
- [2. Agent System](#2-agent-system)
- [3. Task System](#3-task-system)
- [4. Flow System](#4-flow-system)
- [5. Memory System](#5-memory-system)
- [6. Tool System](#6-tool-system)
- [7. Event System](#7-event-system)
- [8. LLM Abstraction](#8-llm-abstraction)
- [9. crewai-tools Package](#9-crewai-tools-package)
- [10. crewai-files Package](#10-crewai-files-package)
- [11. CLI & Project Scaffolding](#11-cli--project-scaffolding)
- [12. Project Decorators (@CrewBase)](#12-project-decorators)
- [13. Knowledge & RAG](#13-knowledge--rag)
- [14. Security & Fingerprinting](#14-security--fingerprinting)
- [15. Agent-to-Agent (A2A)](#15-agent-to-agent-a2a)
- [16. Translations & i18n](#16-translations--i18n)
---
## 1. Execution Flow
The end-to-end path from `Crew.kickoff()` to final output:
```
Crew.kickoff(inputs)
├── prepare_kickoff() # Validate inputs, store files
├── Determine process type
│ ├── Sequential: _run_sequential_process()
│ └── Hierarchical: _run_hierarchical_process() → _create_manager_agent()
├── _execute_tasks(tasks) # Main loop
│ └── For each task:
│ ├── If ConditionalTask: check condition(previous_output)
│ ├── If async_execution: create asyncio task
│ └── If sync: task.execute_sync(agent, context, tools)
│ └── agent.execute_task(task, context, tools)
│ ├── Memory recall (if enabled)
│ ├── Knowledge retrieval (if enabled)
│ ├── Build prompt with context
│ └── CrewAgentExecutor.invoke()
│ └── Loop until AgentFinish:
│ ├── Native tool calling (if LLM supports)
│ └── OR ReAct text pattern (fallback)
├── Apply guardrails with retries
├── after_kickoff_callbacks()
└── Return CrewOutput
```
**Process types:**
- **Sequential**: Tasks execute in order; each gets context from all prior TaskOutputs
- **Hierarchical**: A manager agent delegates to other agents via delegation tools
**Agent execution loop** (`agents/crew_agent_executor.py`):
- **Native function calling**: LLM returns structured `tool_calls`; executor runs first tool, appends result, loops
- **ReAct text pattern** (fallback): LLM outputs `Thought/Action/Action Input`; executor parses text, runs tool, appends `Observation`
---
## 2. Agent System
**Key files:** `agent/core.py`, `agents/agent_builder/base_agent.py`, `agents/crew_agent_executor.py`
### Agent class (`agent/core.py`)
Extends `BaseAgent`. Core fields:
- `role`, `goal`, `backstory` — define agent identity/prompting
- `llm` — BaseLLM instance (auto-created from string)
- `function_calling_llm` — optional specialized LLM for tool calls
- `tools` — list of BaseTool instances
- `memory` — optional unified Memory instance
- `knowledge_sources` — optional knowledge base
- `max_iter` (default 25), `max_rpm`, `max_retry_limit` (default 2)
- `allow_delegation` — enables delegation tools
- `reasoning` — enables planning before execution
- `guardrail` — validation function for output
- `code_execution_mode` — "safe" (Docker) or "unsafe" (local)
- `apps` — platform integrations (Asana, GitHub, Slack, etc.)
- `mcps` — MCP server configurations
### BaseAgent (`agents/agent_builder/base_agent.py`)
Abstract base with: `id` (UUID4), `agent_executor`, `cache_handler`, `tools_handler`, `security_config`, `i18n`. Defines abstract methods: `execute_task()`, `create_agent_executor()`, `get_delegation_tools()`, `get_platform_tools()`.
### CrewAgentExecutor (`agents/crew_agent_executor.py`)
The agent execution loop. Key attributes: `llm`, `task`, `crew`, `agent`, `prompt`, `tools`, `messages`, `iterations`, `max_iter`, `respect_context_window`. Entry point: `invoke(inputs)``_invoke_loop()`.
### LiteAgent (`lite_agent.py`)
Lightweight alternative agent implementation with: event-driven execution, memory integration, LLM hooks, guardrail support, structured output via Converter.
---
## 3. Task System
**Key files:** `task.py`, `tasks/task_output.py`, `tasks/conditional_task.py`
### Task class (`task.py`)
Core fields:
- `description`, `expected_output` — task prompt and LLM guidance
- `agent` — assigned BaseAgent
- `tools` — optional task-specific tools (override agent tools)
- `context` — list of prior Tasks whose output provides context
- `output_file`, `output_pydantic`, `output_json` — output format
- `guardrail` + `guardrail_max_retries` (default 3) — output validation
- `async_execution` — run in background thread
- `human_input` — request human feedback
- `callback` — post-completion callback
### TaskOutput (`tasks/task_output.py`)
Result container: `raw` (text), `pydantic` (model instance), `json_dict`, `agent` (role string), `output_format`, `messages`.
### ConditionalTask (`tasks/conditional_task.py`)
Extends Task with `condition: Callable[[TaskOutput], bool]`. Evaluates against previous output; if False, appends empty TaskOutput and skips. Cannot be first/only task or async.
---
## 4. Flow System
**Key files:** `flow/flow.py`, `flow/flow_wrappers.py`, `flow/persistence/`, `flow/human_feedback.py`
### Flow class (`flow/flow.py`)
Generic `Flow[T]` where T is `dict` or Pydantic `BaseModel` (must have `id` field). Uses `FlowMeta` metaclass to register decorators at class definition.
**Decorator API:**
```python
@start(condition=None) # Entry point (unconditional or conditional)
@listen(condition) # Event handler (fires when condition met)
@router(condition) # Decision point (return value becomes trigger)
@human_feedback(message, emit) # Collect human feedback, optionally route
or_(*conditions) # Fire when ANY condition met
and_(*conditions) # Fire when ALL conditions met
```
**Execution model:**
1. Execute all unconditional `@start` methods in parallel
2. After each method completes: find triggered routers (sequential), then listeners (parallel)
3. Continue chain until no more triggers
**Key rules:**
- Routers are sequential; listeners are parallel
- OR listeners fire once on first trigger; AND listeners wait for all
- State access is thread-safe via `StateProxy` with `_state_lock`
- Cyclic flows: methods cleared from `_completed_methods` to allow re-execution
### Persistence (`flow/persistence/`)
- `FlowPersistence` ABC: `save_state()`, `load_state()`, `save_pending_feedback()`, `load_pending_feedback()`
- `SQLiteFlowPersistence`: stores in `~/.crewai/flows.db`
- Enables resumption via `Flow.from_pending(flow_id, persistence)`
### Human Feedback (`flow/human_feedback.py`)
`@human_feedback` decorator wraps method to collect feedback. With `emit` parameter, acts as router (LLM collapses feedback to outcome). Supports async providers that raise `HumanFeedbackPending` to pause flow. Optional `learn=True` stores lessons in memory.
### Flow Methods
- `kickoff(inputs)` / `akickoff(inputs)` — sync/async execution
- `resume(feedback)` / `resume_async(feedback)` — resume from pause
- `ask(message, timeout)` — request user input (auto-checkpoints state)
- `state` — thread-safe state proxy
- `recall(query)` / `remember(content)` — memory integration
---
## 5. Memory System
**Key files:** `memory/unified_memory.py`, `memory/types.py`, `memory/encoding_flow.py`, `memory/recall_flow.py`, `memory/memory_scope.py`, `memory/analyze.py`, `memory/storage/`
### Memory class (`memory/unified_memory.py`)
Singleton-style with lazy LLM/embedder init. Pluggable storage backend (default LanceDB). Background save queue via ThreadPoolExecutor(max_workers=1).
**Public API:**
- **Write:** `remember(content, scope, categories, importance, ...)`, `remember_many(contents, ...)` (non-blocking batch)
- **Read:** `recall(query, scope, categories, limit, depth="shallow"|"deep")`
- **Manage:** `forget(scope, categories, older_than, ...)`, `update(record_id, ...)`, `drain_writes()`
- **Scoping:** `scope(path)``MemoryScope`, `slice(scopes, read_only)``MemorySlice`
- **Introspection:** `list_scopes()`, `list_records()`, `list_categories()`, `info()`, `tree()`
**Configuration:**
- Scoring weights: `semantic_weight=0.5`, `recency_weight=0.3`, `importance_weight=0.2`
- `recency_half_life_days=30` — exponential decay
- `consolidation_threshold=0.85` — dedup trigger similarity
- `confidence_threshold_high=0.8`, `confidence_threshold_low=0.5` — recall routing
- `exploration_budget=1` — LLM exploration rounds for deep recall
### Data Types (`memory/types.py`)
- **MemoryRecord**: `id`, `content`, `scope` (hierarchical path like `/company/team`), `categories`, `metadata`, `importance` (0-1), `created_at`, `last_accessed`, `embedding`, `source`, `private`
- **MemoryMatch**: `record`, `score` (composite), `match_reasons`, `evidence_gaps`
- **ScopeInfo**: `path`, `record_count`, `categories`, date range, `child_scopes`
**Composite scoring formula:**
```
score = semantic_weight × similarity + recency_weight × (0.5 ^ (age_days / half_life)) + importance_weight × importance
```
### Encoding Flow (`memory/encoding_flow.py`)
5-step batch pipeline on save:
1. **Batch embed** all items (single API call)
2. **Intra-batch dedup** via cosine similarity matrix (threshold 0.98)
3. **Parallel find similar** records in storage (8 workers)
4. **Parallel analyze** — Groups: A (insert, 0 LLM), B (consolidation, 1 LLM), C (save analysis, 1 LLM), D (both, 2 LLM) — 10 workers
5. **Execute plans** — batch re-embed, atomic storage mutations (delete + update + insert under write lock)
### Recall Flow (`memory/recall_flow.py`)
Adaptive recall pipeline:
1. **Analyze query** — short queries skip LLM; long queries get sub-queries, scope suggestions, complexity classification, time filters
2. **Filter & chunk** candidate scopes (max 20)
3. **Parallel search** across queries × scopes (4 workers), apply filters, compute composite scores
4. **Route** — high confidence → synthesize; low confidence + budget → explore deeper
5. **Recursive exploration** (if deeper) — LLM extracts relevant info + gaps; decrements budget; re-searches
6. **Synthesize** — deduplicate by ID, rank by composite score, return top N
### Storage Backend (`memory/storage/backend.py`)
Protocol interface: `save()`, `update()`, `delete()`, `search()`, `get_record()`, `list_records()`, `get_scope_info()`, `list_scopes()`, `list_categories()`, `count()`, `reset()`, `write_lock` property.
**LanceDB implementation** (`memory/storage/lancedb_storage.py`): auto-detects vector dimensions, class-level shared RLock per DB path, auto-compaction every 100 saves, retry logic for commit conflicts (exponential backoff, 5 retries), oversamples 3x when filters present.
### Scoped Views (`memory/memory_scope.py`)
- **MemoryScope**: wraps Memory with root_path prefix; all operations relative to that root
- **MemorySlice**: multi-scope view; recall searches all scopes in parallel; optional `read_only=True`
---
## 6. Tool System
**Key files:** `tools/base_tool.py`, `tools/structured_tool.py`, `tools/tool_calling.py`, `tools/tool_usage.py`, `tools/memory_tools.py`
### BaseTool (`tools/base_tool.py`)
Abstract Pydantic BaseModel. Key fields: `name`, `description`, `args_schema` (Pydantic model), `result_as_answer`, `max_usage_count`, `cache_function`. Subclasses implement `_run(**kwargs)` and optionally `_arun(**kwargs)`.
**`@tool` decorator:** creates tool from function, auto-infers schema from type hints.
### CrewStructuredTool (`tools/structured_tool.py`)
Wraps functions as structured tools for LLM function calling. `from_function()` factory. Validates inputs before execution, enforces usage limits.
### Tool Execution Flow (`tools/tool_usage.py`)
`ToolUsage` manages selection → validation → execution:
1. Parse tool call from LLM output
2. Select tool (fuzzy matching, 85%+ ratio)
3. Validate arguments against schema
4. Execute with fingerprint metadata
5. Cache results if configured
6. Emit events throughout lifecycle
Retry: max 3 parsing attempts with fallback methods (JSON, JSON5, AST, JSON repair).
### Memory Tools (`tools/memory_tools.py`)
- **RecallMemoryTool**: searches memory with single/multiple queries, returns formatted results with deduplication
- **RememberTool**: stores facts/decisions, infers scope/categories/importance
- **CalculatorTool**: safe arithmetic via AST parser (no `eval()`), supports date differences
### MCP Integration
- **MCPToolWrapper** (`tools/mcp_tool_wrapper.py`): on-demand connections, retry with exponential backoff, timeouts (15s connect, 60s execute)
- **MCPNativeTool** (`tools/mcp_native_tool.py`): reuses persistent MCP sessions, auto-reconnect on event loop changes
---
## 7. Event System
**Key files:** `events/event_bus.py`, `events/event_listener.py`, `events/base_events.py`, `events/types/`
### Event Bus (`events/event_bus.py`)
Singleton `CrewAIEventsBus`. Thread-safe with RWLock. Supports sync handlers (ThreadPoolExecutor, 10 workers) and async handlers (dedicated daemon event loop). Handler dependency injection via `Depends()`.
**Key methods:** `emit(source, event)`, `aemit()`, `flush(timeout=30)`, `register_handler()`, `scoped_handlers()` (context manager for temporary handlers).
### Event Types (`events/types/`)
- **Tool events**: `ToolUsageStartedEvent`, `ToolUsageFinishedEvent`, `ToolUsageErrorEvent`, `ToolValidateInputErrorEvent`, `ToolSelectionErrorEvent`
- **LLM events**: `LLMCallStartedEvent`, `LLMCallCompletedEvent`, `LLMCallFailedEvent`, `LLMStreamChunkEvent`, `LLMThinkingChunkEvent`
- **Agent/Task/Crew events**: lifecycle tracking (started, completed, failed)
- **Flow events**: method execution states, paused, input requested/received
- **Memory events**: retrieval started/completed/failed
- **MCP events**: connection, tool execution
- **A2A events**: agent-to-agent delegation
All events carry: UUID, timestamp, parent/previous chain, fingerprint context.
---
## 8. LLM Abstraction
**Key files:** `llm.py`, `llms/base_llm.py`, `llms/providers/`
### BaseLLM (`llms/base_llm.py`)
Abstract interface: `call(messages, tools, ...)` and `acall(...)`. Provider-specific constants for context windows (1KB2MB). Emits LLM events. Handles context window management, timeout/auth errors, streaming.
### LLM class (`llm.py`)
High-level wrapper integrating with litellm for multi-provider support. Handles model identification, tool function calling, JSON schema responses, streaming chunk aggregation, multimodal content formatting.
### Providers (`llms/providers/`)
Per-provider adapters: OpenAI, Azure, Gemini, Claude/Anthropic, Bedrock, Watson, etc.
---
## 9. crewai-tools Package
**Location:** `lib/crewai-tools/`
93+ pre-built tools. All inherit from `crewai.tools.BaseTool`.
**Pattern for creating tools:**
```python
class MyToolSchema(BaseModel):
param: str = Field(..., description="...")
class MyTool(BaseTool):
name: str = "My Tool"
description: str = "..."
args_schema: type[BaseModel] = MyToolSchema
def _run(self, param: str) -> str:
return result
```
**Tool categories:**
- **Search/Web**: BraveSearch, Tavily, EXASearch, Serper, Spider, SerpAPI
- **Scraping**: Firecrawl, Jina, Scrapfly, Selenium, Browserbase, Stagehand
- **File search**: PDF, CSV, JSON, XML, MDX, DOCX, TXT search tools
- **Database**: MySQL, Snowflake, SingleStore, MongoDB, Qdrant, Weaviate, Couchbase
- **File I/O**: FileRead, FileWriter, DirectoryRead, DirectorySearch, FileCompressor, OCR, Vision
- **Code**: CodeInterpreter, CodeDocsSearch, NL2SQL, DallE
- **AWS**: Bedrock agent/KB, S3 reader/writer
- **Integrations**: Composio, Zapier, MCP, LlamaIndex, GitHub
- **RAG**: RagTool base with 17 loaders (CSV, Directory, Docs, DOCX, GitHub, JSON, MySQL, Postgres, etc.)
**43+ optional dependency groups** for external services.
---
## 10. crewai-files Package
**Location:** `lib/crewai-files/`
Multimodal file handling for LLM providers.
**Structure:**
- `core/` — File type classes (Image, PDF, Audio, Video, Text), source types (FilePath, FileBytes, FileUrl, FileStream), resolved representations
- `processing/` — FileProcessor validates against per-provider constraints, optional transforms (resize, compress, chunk)
- `uploaders/` — Provider-specific uploaders (Anthropic, OpenAI, Gemini, Bedrock/S3)
- `formatting/` — Format files for provider APIs: `format_multimodal_content()`, `aformat_multimodal_content()`
- `resolution/` — FileResolver decides inline base64 vs upload based on size/provider
- `cache/` — UploadCache tracks uploads by content hash, cleanup utilities
**Provider constraints**: max file sizes, supported formats, image dimensions per provider (Anthropic, OpenAI, Gemini, Bedrock).
---
## 11. CLI & Project Scaffolding
**Key file:** `cli/cli.py` (Click-based)
**Core commands:**
- `crewai create <crew|flow> <name>` — scaffold project
- `crewai run` / `crewai flow kickoff` — execute crew/flow
- `crewai chat` — interactive conversation with crew
- `crewai train [-n N]` / `crewai test [-n N] [-m MODEL]` — training and evaluation
- `crewai replay [-t TASK_ID]` — replay from specific task
**Memory/config:**
- `crewai reset_memories` — reset memory, knowledge, or all
- `crewai memory` — open Memory TUI
- `crewai config list|set|reset` — CLI configuration
**Deployment:**
- `crewai deploy create|list|push|status|logs|remove`
**Tool repository:**
- `crewai tool create|install|publish`
**Flow-specific:**
- `crewai flow kickoff|plot|add-crew`
**Other:** `crewai login`, `crewai org list|switch|current`, `crewai traces enable|disable|status`, `crewai env view`
---
## 12. Project Decorators
**Key files:** `project/crew_base.py`, `project/annotations.py`
### @CrewBase decorator
Applies `CrewBaseMeta` metaclass. Auto-loads YAML configs (`config/agents.yaml`, `config/tasks.yaml`). Registers agent/task factory methods, MCP adapters, lifecycle hooks.
### Method decorators (`project/annotations.py`)
**Component factories** (all memoized):
- `@agent` — agent factory method
- `@task` — task factory method
- `@llm` — LLM provider factory
- `@tool` — tool factory
- `@callback`, `@cache_handler`
**Lifecycle:**
- `@before_kickoff` / `@after_kickoff` — pre/post execution hooks
- `@crew` — main crew entry point (instantiates agents/tasks, manages callbacks)
**Output format:** `@output_json`, `@output_pydantic`
**LLM/Tool hooks** (optional agent/tool filtering):
- `@before_llm_call_hook` / `@after_llm_call_hook`
- `@before_tool_call_hook` / `@after_tool_call_hook`
---
## 13. Knowledge & RAG
**Key files:** `knowledge/knowledge.py`, `rag/`
### Knowledge class
Vector store integration: `query(queries, results_limit, score_threshold)`, `add_sources()`, `reset()`. Async variants available. Used by agents via `knowledge_sources` parameter.
### RAG system (`rag/`)
- **Vector DBs**: ChromaDB, Qdrant (client wrappers, factories, config)
- **Embeddings**: 25+ providers (OpenAI, Cohere, HuggingFace, Jina, Voyage, Ollama, Bedrock, Azure, Vertex, etc.)
- **Core**: `BaseClient`, `BaseEmbeddingsProvider` abstractions
- **Storage**: `BaseRAGStorage` interface
---
## 14. Security & Fingerprinting
**Key files:** `security/security_config.py`, `security/fingerprint.py`
- **SecurityConfig**: manages component fingerprints, serialization
- **Fingerprint**: dual identifiers (human-readable ID + UUID), `uuid5()` with CrewAI namespace for deterministic seeding, metadata support (1-level nesting, 10KB limit), timestamp tracking
- Every event carries fingerprint context for audit trails
---
## 15. Agent-to-Agent (A2A)
**Key files:** `a2a/config.py`, `a2a/`
Protocol for inter-agent communication:
- `A2AClientConfig`, `A2AServerConfig` — configuration
- `AgentCardSigningConfig` — JWS signing (RS256, ES256, PS256)
- `GRPCServerConfig` — gRPC transport with TLS
- Supporting: `auth/`, `updates/` (polling/push/streaming), `extensions/`, `utils/`
---
## 16. Translations & i18n
**Key file:** `translations/en.json`
All agent-facing prompts are externalized. Key sections:
- `slices/` — agent prompting templates (task, memory, role_playing, tools, format, final_answer_format)
- `errors/` — tool execution, validation, format violation, guardrail failure messages
- `tools/` — tool descriptions (delegate_work, ask_question, recall_memory, calculator, save_to_memory)
- `memory/` — query analysis, extraction rules, consolidation logic, temporal reasoning
- HITL prompts — pre-review, lesson distillation
- Lite agent prompts — system prompts with/without tools

File diff suppressed because it is too large Load Diff

View File

@@ -4,71 +4,6 @@ description: "Product updates, improvements, and bug fixes for CrewAI"
icon: "clock"
mode: "wide"
---
<Update label="Mar 11, 2026">
## v1.10.2a1
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.2a1)
## What's Changed
### Features
- Add support for tool search, saving tokens, and dynamically injecting appropriate tools during execution for Anthropics.
- Introduce more Brave Search tools.
- Create action for nightly releases.
### Bug Fixes
- Fix LockException under concurrent multi-process execution.
- Resolve issues with grouping parallel tool results in a single user message.
- Address MCP tools resolutions and eliminate all shared mutable connections.
- Update LLM parameter handling in the human_feedback function.
- Add missing list/dict methods to LockedListProxy and LockedDictProxy.
- Propagate contextvars context to parallel tool call threads.
- Bump gitpython dependency to >=3.1.41 to resolve CVE path traversal vulnerability.
### Refactoring
- Refactor memory classes to be serializable.
### Documentation
- Update changelog and version for v1.10.1.
## Contributors
@akaKuruma, @github-actions[bot], @giulio-leone, @greysonlalonde, @joaomdmoura, @jonathansampson, @lorenzejay, @lucasgomide, @mattatcha
</Update>
<Update label="Mar 04, 2026">
## v1.10.1
[View release on GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1)
## What's Changed
### Features
- Upgrade Gemini GenAI
### Bug Fixes
- Adjust executor listener value to avoid recursion
- Group parallel function response parts in a single Content object in Gemini
- Surface thought output from thinking models in Gemini
- Load MCP and platform tools when agent tools are None
- Support Jupyter environments with running event loops in A2A
- Use anonymous ID for ephemeral traces
- Conditionally pass plus header
- Skip signal handler registration in non-main threads for telemetry
- Inject tool errors as observations and resolve name collisions
- Upgrade pypdf from 4.x to 6.7.4 to resolve Dependabot alerts
- Resolve critical and high Dependabot security alerts
### Documentation
- Sync Composio tool documentation across locales
## Contributors
@giulio-leone, @greysonlalonde, @haxzie, @joaomdmoura, @lorenzejay, @mattatcha, @mplachta, @nicoferdi96
</Update>
<Update label="Feb 27, 2026">
## v1.10.1a1

View File

@@ -1,316 +1,97 @@
---
title: Brave Search Tools
description: A suite of tools for querying the Brave Search API — covering web, news, image, and video search.
title: Brave Search
description: The `BraveSearchTool` is designed to search the internet using the Brave Search API.
icon: searchengin
mode: "wide"
---
# Brave Search Tools
# `BraveSearchTool`
## Description
CrewAI offers a family of Brave Search tools, each targeting a specific [Brave Search API](https://brave.com/search/api/) endpoint.
Rather than a single catch-all tool, you can pick exactly the tool that matches the kind of results your agent needs:
| Tool | Endpoint | Use case |
| --- | --- | --- |
| `BraveWebSearchTool` | Web Search | General web results, snippets, and URLs |
| `BraveNewsSearchTool` | News Search | Recent news articles and headlines |
| `BraveImageSearchTool` | Image Search | Image results with dimensions and source URLs |
| `BraveVideoSearchTool` | Video Search | Video results from across the web |
| `BraveLocalPOIsTool` | Local POIs | Find points of interest (e.g., restaurants) |
| `BraveLocalPOIsDescriptionTool` | Local POIs | Retrieve AI-generated location descriptions |
| `BraveLLMContextTool` | LLM Context | Pre-extracted web content optimized for AI agents, LLM grounding, and RAG pipelines. |
All tools share a common base class (`BraveSearchToolBase`) that provides consistent behavior — rate limiting, automatic retries on `429` responses, header and parameter validation, and optional file saving.
<Note>
The older `BraveSearchTool` class is still available for backwards compatibility, but it is considered **legacy** and will not receive the same level of attention going forward. We recommend migrating to the specific tools listed above, which offer richer configuration and a more focused interface.
</Note>
<Note>
While many tools (e.g., _BraveWebSearchTool_, _BraveNewsSearchTool_, _BraveImageSearchTool_, and _BraveVideoSearchTool_) can be used with a free Brave Search API subscription/plan, some parameters (e.g., `enable_snippets`) and tools (e.g., _BraveLocalPOIsTool_ and _BraveLocalPOIsDescriptionTool_) require a paid plan. Consult your subscription plan's capabilities for clarification.
</Note>
This tool is designed to perform web searches using the Brave Search API. It allows you to search the internet with a specified query and retrieve relevant results. The tool supports customizable result counts and country-specific searches.
## Installation
To incorporate this tool into your project, follow the installation instructions below:
```shell
pip install 'crewai[tools]'
```
## Getting Started
## Steps to Get Started
1. **Install the package** — confirm that `crewai[tools]` is installed in your Python environment.
2. **Get an API key** — sign up at [api-dashboard.search.brave.com/login](https://api-dashboard.search.brave.com/login) to generate a key.
3. **Set the environment variable** — store your key as `BRAVE_API_KEY`, or pass it directly via the `api_key` parameter.
To effectively use the `BraveSearchTool`, follow these steps:
## Quick Examples
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
2. **API Key Acquisition**: Acquire a Brave Search API key at https://api.search.brave.com/app/keys (sign in to generate a key).
3. **Environment Configuration**: Store your obtained API key in an environment variable named `BRAVE_API_KEY` to facilitate its use by the tool.
### Web Search
## Example
The following example demonstrates how to initialize the tool and execute a search with a given query:
```python Code
from crewai_tools import BraveWebSearchTool
from crewai_tools import BraveSearchTool
tool = BraveWebSearchTool()
results = tool.run(q="CrewAI agent framework")
# Initialize the tool for internet searching capabilities
tool = BraveSearchTool()
# Execute a search
results = tool.run(search_query="CrewAI agent framework")
print(results)
```
### News Search
## Parameters
The `BraveSearchTool` accepts the following parameters:
- **search_query**: Mandatory. The search query you want to use to search the internet.
- **country**: Optional. Specify the country for the search results. Default is empty string.
- **n_results**: Optional. Number of search results to return. Default is `10`.
- **save_file**: Optional. Whether to save the search results to a file. Default is `False`.
## Example with Parameters
Here is an example demonstrating how to use the tool with additional parameters:
```python Code
from crewai_tools import BraveNewsSearchTool
from crewai_tools import BraveSearchTool
tool = BraveNewsSearchTool()
results = tool.run(q="latest AI breakthroughs")
print(results)
```
### Image Search
```python Code
from crewai_tools import BraveImageSearchTool
tool = BraveImageSearchTool()
results = tool.run(q="northern lights photography")
print(results)
```
### Video Search
```python Code
from crewai_tools import BraveVideoSearchTool
tool = BraveVideoSearchTool()
results = tool.run(q="how to build AI agents")
print(results)
```
### Location POI Descriptions
```python Code
from crewai_tools import (
BraveWebSearchTool,
BraveLocalPOIsDescriptionTool,
# Initialize the tool with custom parameters
tool = BraveSearchTool(
country="US",
n_results=5,
save_file=True
)
web_search = BraveWebSearchTool(raw=True)
poi_details = BraveLocalPOIsDescriptionTool()
results = web_search.run(q="italian restaurants in pensacola, florida")
if "locations" in results:
location_ids = [ loc["id"] for loc in results["locations"]["results"] ]
if location_ids:
descriptions = poi_details.run(ids=location_ids)
print(descriptions)
```
## Common Constructor Parameters
Every Brave Search tool accepts the following parameters at initialization:
| Parameter | Type | Default | Description |
| --- | --- | --- | --- |
| `api_key` | `str \| None` | `None` | Brave API key. Falls back to the `BRAVE_API_KEY` environment variable. |
| `headers` | `dict \| None` | `None` | Additional HTTP headers to send with every request (e.g., `api-version`, geolocation headers). |
| `requests_per_second` | `float` | `1.0` | Maximum request rate. The tool will sleep between calls to stay within this limit. |
| `save_file` | `bool` | `False` | When `True`, each response is written to a timestamped `.txt` file. |
| `raw` | `bool` | `False` | When `True`, the full API JSON response is returned without any refinement. |
| `timeout` | `int` | `30` | HTTP request timeout in seconds. |
| `country` | `str \| None` | `None` | Legacy shorthand for geo-targeting (e.g., `"US"`). Prefer using the `country` query parameter directly. |
| `n_results` | `int` | `10` | Legacy shorthand for result count. Prefer using the `count` query parameter directly. |
<Warning>
The `country` and `n_results` constructor parameters exist for backwards compatibility. They are applied as defaults when the corresponding query parameters (`country`, `count`) are not provided at call time. For new code, we recommend passing `country` and `count` directly as query parameters instead.
</Warning>
## Query Parameters
Each tool validates its query parameters against a Pydantic schema before sending the request.
The parameters vary slightly per endpoint — here is a summary of the most commonly used ones:
### BraveWebSearchTool
| Parameter | Description |
| --- | --- |
| `q` | **(required)** Search query string (max 400 chars). |
| `country` | Two-letter country code for geo-targeting (e.g., `"US"`). |
| `search_lang` | Two-letter language code for results (e.g., `"en"`). |
| `count` | Max number of results to return (120). |
| `offset` | Skip the first N pages of results (09). |
| `safesearch` | Content filter: `"off"`, `"moderate"`, or `"strict"`. |
| `freshness` | Recency filter: `"pd"` (past day), `"pw"` (past week), `"pm"` (past month), `"py"` (past year), or a date range like `"2025-01-01to2025-06-01"`. |
| `extra_snippets` | Include up to 5 additional text snippets per result. |
| `goggles` | Brave Goggles URL(s) and/or source for custom re-ranking. |
For the complete parameter and header reference, see the [Brave Web Search API documentation](https://api-dashboard.search.brave.com/api-reference/web/search/get).
### BraveNewsSearchTool
| Parameter | Description |
| --- | --- |
| `q` | **(required)** Search query string (max 400 chars). |
| `country` | Two-letter country code for geo-targeting. |
| `search_lang` | Two-letter language code for results. |
| `count` | Max number of results to return (150). |
| `offset` | Skip the first N pages of results (09). |
| `safesearch` | Content filter: `"off"`, `"moderate"`, or `"strict"`. |
| `freshness` | Recency filter (same options as Web Search). |
| `goggles` | Brave Goggles URL(s) and/or source for custom re-ranking. |
For the complete parameter and header reference, see the [Brave News Search API documentation](https://api-dashboard.search.brave.com/api-reference/news/news_search/get).
### BraveImageSearchTool
| Parameter | Description |
| --- | --- |
| `q` | **(required)** Search query string (max 400 chars). |
| `country` | Two-letter country code for geo-targeting. |
| `search_lang` | Two-letter language code for results. |
| `count` | Max number of results to return (1200). |
| `safesearch` | Content filter: `"off"` or `"strict"`. |
| `spellcheck` | Attempt to correct spelling errors in the query. |
For the complete parameter and header reference, see the [Brave Image Search API documentation](https://api-dashboard.search.brave.com/api-reference/images/image_search).
### BraveVideoSearchTool
| Parameter | Description |
| --- | --- |
| `q` | **(required)** Search query string (max 400 chars). |
| `country` | Two-letter country code for geo-targeting. |
| `search_lang` | Two-letter language code for results. |
| `count` | Max number of results to return (150). |
| `offset` | Skip the first N pages of results (09). |
| `safesearch` | Content filter: `"off"`, `"moderate"`, or `"strict"`. |
| `freshness` | Recency filter (same options as Web Search). |
For the complete parameter and header reference, see the [Brave Video Search API documentation](https://api-dashboard.search.brave.com/api-reference/videos/video_search/get).
### BraveLocalPOIsTool
| Parameter | Description |
| --- | --- |
| `ids` | **(required)** A list of unique identifiers for the desired locations. |
| `search_lang` | Two-letter language code for results. |
For the complete parameter and header reference, see [Brave Local POIs API documentation](https://api-dashboard.search.brave.com/api-reference/web/local_pois).
### BraveLocalPOIsDescriptionTool
| Parameter | Description |
| --- | --- |
| `ids` | **(required)** A list of unique identifiers for the desired locations. |
For the complete parameter and header reference, see [Brave POI Descriptions API documentation](https://api-dashboard.search.brave.com/api-reference/web/poi_descriptions).
## Custom Headers
All tools support custom HTTP request headers. The Web Search tool, for example, accepts geolocation headers for location-aware results:
```python Code
from crewai_tools import BraveWebSearchTool
tool = BraveWebSearchTool(
headers={
"x-loc-lat": "37.7749",
"x-loc-long": "-122.4194",
"x-loc-city": "San Francisco",
"x-loc-state": "CA",
"x-loc-country": "US",
}
)
results = tool.run(q="best coffee shops nearby")
```
You can also update headers after initialization using the `set_headers()` method:
```python Code
tool.set_headers({"api-version": "2025-01-01"})
```
## Raw Mode
By default, each tool refines the API response into a concise list of results. If you need the full, unprocessed API response, enable raw mode:
```python Code
from crewai_tools import BraveWebSearchTool
tool = BraveWebSearchTool(raw=True)
full_response = tool.run(q="Brave Search API")
# Execute a search
results = tool.run(search_query="Latest AI developments")
print(results)
```
## Agent Integration Example
Here's how to equip a CrewAI agent with multiple Brave Search tools:
Here's how to integrate the `BraveSearchTool` with a CrewAI agent:
```python Code
from crewai import Agent
from crewai.project import agent
from crewai_tools import BraveWebSearchTool, BraveNewsSearchTool
from crewai_tools import BraveSearchTool
web_search = BraveWebSearchTool()
news_search = BraveNewsSearchTool()
# Initialize the tool
brave_search_tool = BraveSearchTool()
# Define an agent with the BraveSearchTool
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config["researcher"],
tools=[web_search, news_search],
allow_delegation=False,
tools=[brave_search_tool]
)
```
## Advanced Example
Combining multiple parameters for a targeted search:
```python Code
from crewai_tools import BraveWebSearchTool
tool = BraveWebSearchTool(
requests_per_second=0.5, # conservative rate limit
save_file=True,
)
results = tool.run(
q="artificial intelligence news",
country="US",
search_lang="en",
count=5,
freshness="pm", # past month only
extra_snippets=True,
)
print(results)
```
## Migrating from `BraveSearchTool` (Legacy)
If you are currently using `BraveSearchTool`, switching to the new tools is straightforward:
```python Code
# Before (legacy)
from crewai_tools import BraveSearchTool
tool = BraveSearchTool(country="US", n_results=5, save_file=True)
results = tool.run(search_query="AI agents")
# After (recommended)
from crewai_tools import BraveWebSearchTool
tool = BraveWebSearchTool(save_file=True)
results = tool.run(q="AI agents", country="US", count=5)
```
Key differences:
- **Import**: Use `BraveWebSearchTool` (or the news/image/video variant) instead of `BraveSearchTool`.
- **Query parameter**: Use `q` instead of `search_query`. (Both `search_query` and `query` are still accepted for convenience, but `q` is the preferred parameter.)
- **Result count**: Pass `count` as a query parameter instead of `n_results` at init time.
- **Country**: Pass `country` as a query parameter instead of at init time.
- **API key**: Can now be passed directly via `api_key=` in addition to the `BRAVE_API_KEY` environment variable.
- **Rate limiting**: Configurable via `requests_per_second` with automatic retry on `429` responses.
## Conclusion
The Brave Search tool suite gives your CrewAI agents flexible, endpoint-specific access to the Brave Search API. Whether you need web pages, breaking news, images, or videos, there is a dedicated tool with validated parameters and built-in resilience. Pick the tool that fits your use case, and refer to the [Brave Search API documentation](https://brave.com/search/api/) for the full details on available parameters and response formats.
By integrating the `BraveSearchTool` into Python projects, users gain the ability to conduct real-time, relevant searches across the internet directly from their applications. The tool provides a simple interface to the powerful Brave Search API, making it easy to retrieve and process search results programmatically. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is streamlined and straightforward.

View File

@@ -4,71 +4,6 @@ description: "CrewAI의 제품 업데이트, 개선 사항 및 버그 수정"
icon: "clock"
mode: "wide"
---
<Update label="2026년 3월 11일">
## v1.10.2a1
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.10.2a1)
## 변경 사항
### 기능
- Anthropics에 대한 도구 검색 지원 추가, 토큰 저장, 실행 중 적절한 도구를 동적으로 주입하는 기능 추가.
- 더 많은 Brave Search 도구 도입.
- 야간 릴리스를 위한 액션 생성.
### 버그 수정
- 동시 다중 프로세스 실행 중 LockException 수정.
- 단일 사용자 메시지에서 병렬 도구 결과 그룹화 문제 해결.
- MCP 도구 해상도 문제 해결 및 모든 공유 가변 연결 제거.
- human_feedback 함수에서 LLM 매개변수 처리 업데이트.
- LockedListProxy 및 LockedDictProxy에 누락된 list/dict 메서드 추가.
- 병렬 도구 호출 스레드에 contextvars 컨텍스트 전파.
- CVE 경로 탐색 취약점을 해결하기 위해 gitpython 의존성을 >=3.1.41로 업데이트.
### 리팩토링
- 메모리 클래스를 직렬화 가능하도록 리팩토링.
### 문서
- v1.10.1에 대한 변경 로그 및 버전 업데이트.
## 기여자
@akaKuruma, @github-actions[bot], @giulio-leone, @greysonlalonde, @joaomdmoura, @jonathansampson, @lorenzejay, @lucasgomide, @mattatcha
</Update>
<Update label="2026년 3월 4일">
## v1.10.1
[GitHub 릴리스 보기](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1)
## 변경 사항
### 기능
- Gemini GenAI 업그레이드
### 버그 수정
- 재귀를 피하기 위해 실행기 리스너 값을 조정
- Gemini에서 병렬 함수 응답 부분을 단일 Content 객체로 그룹화
- Gemini에서 사고 모델의 사고 출력을 표시
- 에이전트 도구가 None일 때 MCP 및 플랫폼 도구 로드
- A2A에서 실행 이벤트 루프가 있는 Jupyter 환경 지원
- 일시적인 추적을 위해 익명 ID 사용
- 조건부로 플러스 헤더 전달
- 원격 측정을 위해 비주 스레드에서 신호 처리기 등록 건너뛰기
- 도구 오류를 관찰로 주입하고 이름 충돌 해결
- Dependabot 경고를 해결하기 위해 pypdf를 4.x에서 6.7.4로 업그레이드
- 심각 및 높은 Dependabot 보안 경고 해결
### 문서
- Composio 도구 문서를 지역별로 동기화
## 기여자
@giulio-leone, @greysonlalonde, @haxzie, @joaomdmoura, @lorenzejay, @mattatcha, @mplachta, @nicoferdi96
</Update>
<Update label="2026년 2월 27일">
## v1.10.1a1

View File

@@ -4,71 +4,6 @@ description: "Atualizações de produto, melhorias e correções do CrewAI"
icon: "clock"
mode: "wide"
---
<Update label="11 mar 2026">
## v1.10.2a1
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.2a1)
## O que mudou
### Recursos
- Adicionar suporte para busca de ferramentas, salvamento de tokens e injeção dinâmica de ferramentas apropriadas durante a execução para Anthropics.
- Introduzir mais ferramentas de Busca Brave.
- Criar ação para lançamentos noturnos.
### Correções de Bugs
- Corrigir LockException durante a execução concorrente de múltiplos processos.
- Resolver problemas com a agrupação de resultados de ferramentas paralelas em uma única mensagem de usuário.
- Abordar resoluções de ferramentas MCP e eliminar todas as conexões mutáveis compartilhadas.
- Atualizar o manuseio de parâmetros LLM na função human_feedback.
- Adicionar métodos de lista/dicionário ausentes a LockedListProxy e LockedDictProxy.
- Propagar o contexto de contextvars para as threads de chamada de ferramentas paralelas.
- Atualizar a dependência gitpython para >=3.1.41 para resolver a vulnerabilidade de travessia de diretórios CVE.
### Refatoração
- Refatorar classes de memória para serem serializáveis.
### Documentação
- Atualizar o changelog e a versão para v1.10.1.
## Contribuidores
@akaKuruma, @github-actions[bot], @giulio-leone, @greysonlalonde, @joaomdmoura, @jonathansampson, @lorenzejay, @lucasgomide, @mattatcha
</Update>
<Update label="04 mar 2026">
## v1.10.1
[Ver release no GitHub](https://github.com/crewAIInc/crewAI/releases/tag/1.10.1)
## O que mudou
### Recursos
- Atualizar Gemini GenAI
### Correções de Bugs
- Ajustar o valor do listener do executor para evitar recursão
- Agrupar partes da resposta da função paralela em um único objeto Content no Gemini
- Exibir a saída de pensamento dos modelos de pensamento no Gemini
- Carregar ferramentas MCP e da plataforma quando as ferramentas do agente forem None
- Suportar ambientes Jupyter com loops de eventos em A2A
- Usar ID anônimo para rastreamentos efêmeros
- Passar condicionalmente o cabeçalho plus
- Ignorar o registro do manipulador de sinal em threads não principais para telemetria
- Injetar erros de ferramentas como observações e resolver colisões de nomes
- Atualizar pypdf de 4.x para 6.7.4 para resolver alertas do Dependabot
- Resolver alertas de segurança críticos e altos do Dependabot
### Documentação
- Sincronizar a documentação da ferramenta Composio entre locais
## Contribuidores
@giulio-leone, @greysonlalonde, @haxzie, @joaomdmoura, @lorenzejay, @mattatcha, @mplachta, @nicoferdi96
</Update>
<Update label="27 fev 2026">
## v1.10.1a1

View File

@@ -152,4 +152,4 @@ __all__ = [
"wrap_file_source",
]
__version__ = "1.10.2a1"
__version__ = "1.10.1a1"

View File

@@ -11,7 +11,7 @@ dependencies = [
"pytube~=15.0.0",
"requests~=2.32.5",
"docker~=7.1.0",
"crewai==1.10.2a1",
"crewai==1.10.1a1",
"tiktoken~=0.8.0",
"beautifulsoup4~=4.13.4",
"python-docx~=1.2.0",
@@ -108,7 +108,7 @@ stagehand = [
"stagehand>=0.4.1",
]
github = [
"gitpython>=3.1.41,<4",
"gitpython==3.1.38",
"PyGithub==1.59.1",
]
rag = [

View File

@@ -10,18 +10,7 @@ from crewai_tools.aws.s3.writer_tool import S3WriterTool
from crewai_tools.tools.ai_mind_tool.ai_mind_tool import AIMindTool
from crewai_tools.tools.apify_actors_tool.apify_actors_tool import ApifyActorsTool
from crewai_tools.tools.arxiv_paper_tool.arxiv_paper_tool import ArxivPaperTool
from crewai_tools.tools.brave_search_tool.brave_image_tool import BraveImageSearchTool
from crewai_tools.tools.brave_search_tool.brave_llm_context_tool import (
BraveLLMContextTool,
)
from crewai_tools.tools.brave_search_tool.brave_local_pois_tool import (
BraveLocalPOIsDescriptionTool,
BraveLocalPOIsTool,
)
from crewai_tools.tools.brave_search_tool.brave_news_tool import BraveNewsSearchTool
from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool
from crewai_tools.tools.brave_search_tool.brave_video_tool import BraveVideoSearchTool
from crewai_tools.tools.brave_search_tool.brave_web_tool import BraveWebSearchTool
from crewai_tools.tools.brightdata_tool.brightdata_dataset import (
BrightDataDatasetTool,
)
@@ -211,14 +200,7 @@ __all__ = [
"ArxivPaperTool",
"BedrockInvokeAgentTool",
"BedrockKBRetrieverTool",
"BraveImageSearchTool",
"BraveLLMContextTool",
"BraveLocalPOIsDescriptionTool",
"BraveLocalPOIsTool",
"BraveNewsSearchTool",
"BraveSearchTool",
"BraveVideoSearchTool",
"BraveWebSearchTool",
"BrightDataDatasetTool",
"BrightDataSearchTool",
"BrightDataWebUnlockerTool",
@@ -309,4 +291,4 @@ __all__ = [
"ZapierActionTools",
]
__version__ = "1.10.2a1"
__version__ = "1.10.1a1"

View File

@@ -1,18 +1,7 @@
from crewai_tools.tools.ai_mind_tool.ai_mind_tool import AIMindTool
from crewai_tools.tools.apify_actors_tool.apify_actors_tool import ApifyActorsTool
from crewai_tools.tools.arxiv_paper_tool.arxiv_paper_tool import ArxivPaperTool
from crewai_tools.tools.brave_search_tool.brave_image_tool import BraveImageSearchTool
from crewai_tools.tools.brave_search_tool.brave_llm_context_tool import (
BraveLLMContextTool,
)
from crewai_tools.tools.brave_search_tool.brave_local_pois_tool import (
BraveLocalPOIsDescriptionTool,
BraveLocalPOIsTool,
)
from crewai_tools.tools.brave_search_tool.brave_news_tool import BraveNewsSearchTool
from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool
from crewai_tools.tools.brave_search_tool.brave_video_tool import BraveVideoSearchTool
from crewai_tools.tools.brave_search_tool.brave_web_tool import BraveWebSearchTool
from crewai_tools.tools.brightdata_tool import (
BrightDataDatasetTool,
BrightDataSearchTool,
@@ -196,14 +185,7 @@ __all__ = [
"AIMindTool",
"ApifyActorsTool",
"ArxivPaperTool",
"BraveImageSearchTool",
"BraveLLMContextTool",
"BraveLocalPOIsDescriptionTool",
"BraveLocalPOIsTool",
"BraveNewsSearchTool",
"BraveSearchTool",
"BraveVideoSearchTool",
"BraveWebSearchTool",
"BrightDataDatasetTool",
"BrightDataSearchTool",
"BrightDataWebUnlockerTool",

View File

@@ -1,322 +0,0 @@
from __future__ import annotations
from abc import ABC, abstractmethod
from datetime import datetime
import json
import logging
import os
import threading
import time
from typing import Any, ClassVar
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
logger = logging.getLogger(__name__)
# Brave API error codes that indicate non-retryable quota/usage exhaustion.
_QUOTA_CODES = frozenset({"QUOTA_LIMITED", "USAGE_LIMIT_EXCEEDED"})
def _save_results_to_file(content: str) -> None:
"""Saves the search results to a file."""
filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt"
with open(filename, "w") as file:
file.write(content)
def _parse_error_body(resp: requests.Response) -> dict[str, Any] | None:
"""Extract the structured "error" object from a Brave API error response."""
try:
body = resp.json()
error = body.get("error")
return error if isinstance(error, dict) else None
except (ValueError, KeyError):
return None
def _raise_for_error(resp: requests.Response) -> None:
"""Brave Search API error responses contain helpful JSON payloads"""
status = resp.status_code
try:
body = json.dumps(resp.json())
except (ValueError, KeyError):
body = resp.text[:500]
raise RuntimeError(f"Brave Search API error (HTTP {status}): {body}")
def _is_retryable(resp: requests.Response) -> bool:
"""Return True for transient failures that are worth retrying.
* 429 + RATE_LIMITED — the per-second sliding window is full.
* 5xx — transient server-side errors.
Quota exhaustion (QUOTA_LIMITED, USAGE_LIMIT_EXCEEDED) is
explicitly excluded: retrying will never succeed until the billing
period resets.
"""
if resp.status_code == 429:
error = _parse_error_body(resp) or {}
return error.get("code") not in _QUOTA_CODES
return 500 <= resp.status_code < 600
def _retry_delay(resp: requests.Response, attempt: int) -> float:
"""Compute wait time before the next retry attempt.
Prefers the server-supplied Retry-After header when available;
falls back to exponential backoff (1s, 2s, 4s, ...).
"""
retry_after = resp.headers.get("Retry-After")
if retry_after is not None:
try:
return max(0.0, float(retry_after))
except (ValueError, TypeError):
pass
return float(2**attempt)
class BraveSearchToolBase(BaseTool, ABC):
"""
Base class for Brave Search API interactions.
Individual tool subclasses must provide the following:
- search_url
- header_schema (pydantic model)
- args_schema (pydantic model)
- _refine_payload() -> dict[str, Any]
"""
search_url: str
raw: bool = False
args_schema: type[BaseModel]
header_schema: type[BaseModel]
# Tool options (legacy parameters)
country: str | None = None
save_file: bool = False
n_results: int = 10
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="BRAVE_API_KEY",
description="API key for Brave Search",
required=True,
),
]
)
def __init__(
self,
*,
api_key: str | None = None,
headers: dict[str, Any] | None = None,
requests_per_second: float = 1.0,
save_file: bool = False,
raw: bool = False,
timeout: int = 30,
**kwargs: Any,
):
super().__init__(**kwargs)
self._api_key = api_key or os.environ.get("BRAVE_API_KEY")
if not self._api_key:
raise ValueError("BRAVE_API_KEY environment variable is required")
self.raw = bool(raw)
self._timeout = int(timeout)
self.save_file = bool(save_file)
self._requests_per_second = float(requests_per_second)
self._headers = self._build_and_validate_headers(headers or {})
# Per-instance rate limiting: each instance has its own clock and lock.
# Total process rate is the sum of limits of instances you create.
self._last_request_time: float = 0
self._rate_limit_lock = threading.Lock()
@property
def api_key(self) -> str:
return self._api_key
@property
def headers(self) -> dict[str, Any]:
return self._headers
def set_headers(self, headers: dict[str, Any]) -> BraveSearchToolBase:
merged = {**self._headers, **{k.lower(): v for k, v in headers.items()}}
self._headers = self._build_and_validate_headers(merged)
return self
def _build_and_validate_headers(self, headers: dict[str, Any]) -> dict[str, Any]:
normalized = {k.lower(): v for k, v in headers.items()}
normalized.setdefault("x-subscription-token", self._api_key)
normalized.setdefault("accept", "application/json")
try:
self.header_schema(**normalized)
except Exception as e:
raise ValueError(f"Invalid headers: {e}") from e
return normalized
def _rate_limit(self) -> None:
"""Enforce minimum interval between requests for this instance. Thread-safe."""
if self._requests_per_second <= 0:
return
min_interval = 1.0 / self._requests_per_second
with self._rate_limit_lock:
now = time.time()
next_allowed = self._last_request_time + min_interval
if now < next_allowed:
time.sleep(next_allowed - now)
now = time.time()
self._last_request_time = now
def _make_request(
self, params: dict[str, Any], *, _max_retries: int = 3
) -> dict[str, Any]:
"""Execute an HTTP GET against the Brave Search API with retry logic."""
last_resp: requests.Response | None = None
# Retry the request up to _max_retries times
for attempt in range(_max_retries):
self._rate_limit()
# Make the request
try:
resp = requests.get(
self.search_url,
headers=self._headers,
params=params,
timeout=self._timeout,
)
except requests.ConnectionError as exc:
raise RuntimeError(
f"Brave Search API connection failed: {exc}"
) from exc
except requests.Timeout as exc:
raise RuntimeError(
f"Brave Search API request timed out after {self._timeout}s: {exc}"
) from exc
# Log the rate limit headers and request details
logger.debug(
"Brave Search API request: %s %s -> %d",
"GET",
resp.url,
resp.status_code,
)
# Response was OK, return the JSON body
if resp.ok:
try:
return resp.json()
except ValueError as exc:
raise RuntimeError(
f"Brave Search API returned invalid JSON (HTTP {resp.status_code}): {exc}"
) from exc
# Response was not OK, but is retryable
# (e.g., 429 Too Many Requests, 500 Internal Server Error)
if _is_retryable(resp) and attempt < _max_retries - 1:
delay = _retry_delay(resp, attempt)
logger.warning(
"Brave Search API returned %d. Retrying in %.1fs (attempt %d/%d)",
resp.status_code,
delay,
attempt + 1,
_max_retries,
)
time.sleep(delay)
last_resp = resp
continue
# Response was not OK, nor was it retryable
# (e.g., 422 Unprocessable Entity, 400 Bad Request (OPTION_NOT_IN_PLAN))
_raise_for_error(resp)
# All retries exhausted
_raise_for_error(last_resp or resp) # type: ignore[possibly-undefined]
return {} # unreachable (here to satisfy the type checker and linter)
def _run(self, q: str | None = None, **params: Any) -> Any:
# Allow positional usage: tool.run("latest Brave browser features")
if q is not None:
params["q"] = q
params = self._common_payload_refinement(params)
# Validate only schema fields
schema_keys = self.args_schema.model_fields
payload_in = {k: v for k, v in params.items() if k in schema_keys}
try:
validated = self.args_schema(**payload_in)
except Exception as e:
raise ValueError(f"Invalid parameters: {e}") from e
# The subclass may have additional refinements to apply to the payload, such as goggles or other parameters
payload = self._refine_request_payload(validated.model_dump(exclude_none=True))
response = self._make_request(payload)
if not self.raw:
response = self._refine_response(response)
if self.save_file:
_save_results_to_file(json.dumps(response, indent=2))
return response
@abstractmethod
def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]:
"""Subclass must implement: transform validated params dict into API request params."""
raise NotImplementedError
@abstractmethod
def _refine_response(self, response: dict[str, Any]) -> Any:
"""Subclass must implement: transform response dict into a more useful format."""
raise NotImplementedError
_EMPTY_VALUES: ClassVar[tuple[None, str, str, list[Any]]] = (None, "", "null", [])
def _common_payload_refinement(self, params: dict[str, Any]) -> dict[str, Any]:
"""Common payload refinement for all tools."""
# crewAI's schema pipeline (ensure_all_properties_required in
# pydantic_schema_utils.py) marks every property as required so
# that OpenAI strict-mode structured outputs work correctly.
# The side-effect is that the LLM fills in *every* parameter —
# even truly optional ones — using placeholder values such as
# None, "", "null", or []. Only optional fields are affected,
# so we limit the check to those.
fields = self.args_schema.model_fields
params = {
k: v
for k, v in params.items()
# Permit custom and required fields, and fields with non-empty values
if k not in fields or fields[k].is_required() or v not in self._EMPTY_VALUES
}
# Make sure params has "q" for query instead of "query" or "search_query"
query = params.get("query") or params.get("search_query")
if query is not None and "q" not in params:
params["q"] = query
params.pop("query", None)
params.pop("search_query", None)
# If "count" was not explicitly provided, use n_results
# (only when the schema actually supports a "count" field)
if "count" in self.args_schema.model_fields:
if "count" not in params and self.n_results is not None:
params["count"] = self.n_results
# If "country" was not explicitly provided, but self.country is set, use it
# (only when the schema actually supports a "country" field)
if "country" in self.args_schema.model_fields:
if "country" not in params and self.country is not None:
params["country"] = self.country
return params

View File

@@ -1,42 +0,0 @@
from typing import Any
from pydantic import BaseModel
from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase
from crewai_tools.tools.brave_search_tool.schemas import (
ImageSearchHeaders,
ImageSearchParams,
)
class BraveImageSearchTool(BraveSearchToolBase):
"""A tool that performs image searches using the Brave Search API."""
name: str = "Brave Image Search"
args_schema: type[BaseModel] = ImageSearchParams
header_schema: type[BaseModel] = ImageSearchHeaders
description: str = (
"A tool that performs image searches using the Brave Search API. "
"Results are returned as structured JSON data."
)
search_url: str = "https://api.search.brave.com/res/v1/images/search"
def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]:
return params
def _refine_response(self, response: dict[str, Any]) -> list[dict[str, Any]]:
# Make the response more concise, and easier to consume
results = response.get("results", [])
return [
{
"title": result.get("title"),
"url": result.get("properties", {}).get("url"),
"dimensions": f"{w}x{h}"
if (w := result.get("properties", {}).get("width"))
and (h := result.get("properties", {}).get("height"))
else None,
}
for result in results
]

View File

@@ -1,32 +0,0 @@
from typing import Any
from pydantic import BaseModel
from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase
from crewai_tools.tools.brave_search_tool.response_types import LLMContext
from crewai_tools.tools.brave_search_tool.schemas import (
LLMContextHeaders,
LLMContextParams,
)
class BraveLLMContextTool(BraveSearchToolBase):
"""A tool that retrieves context for LLM usage from the Brave Search API."""
name: str = "Brave LLM Context"
args_schema: type[BaseModel] = LLMContextParams
header_schema: type[BaseModel] = LLMContextHeaders
description: str = (
"A tool that retrieves context for LLM usage from the Brave Search API. "
"Results are returned as structured JSON data."
)
search_url: str = "https://api.search.brave.com/res/v1/llm/context"
def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]:
return params
def _refine_response(self, response: LLMContext.Response) -> LLMContext.Response:
"""The LLM Context response schema is fairly simple. Return as is."""
return response

View File

@@ -1,109 +0,0 @@
from typing import Any
from pydantic import BaseModel
from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase
from crewai_tools.tools.brave_search_tool.response_types import LocalPOIs
from crewai_tools.tools.brave_search_tool.schemas import (
LocalPOIsDescriptionHeaders,
LocalPOIsDescriptionParams,
LocalPOIsHeaders,
LocalPOIsParams,
)
DayOpeningHours = LocalPOIs.DayOpeningHours
OpeningHours = LocalPOIs.OpeningHours
LocationResult = LocalPOIs.LocationResult
LocalPOIsResponse = LocalPOIs.Response
def _flatten_slots(slots: list[DayOpeningHours]) -> list[dict[str, str]]:
"""Convert a list of DayOpeningHours dicts into simplified entries."""
return [
{
"day": slot["full_name"].lower(),
"opens": slot["opens"],
"closes": slot["closes"],
}
for slot in slots
]
def _simplify_opening_hours(result: LocationResult) -> list[dict[str, str]] | None:
"""Collapse opening_hours into a flat list of {day, opens, closes} dicts."""
hours = result.get("opening_hours")
if not hours:
return None
entries: list[dict[str, str]] = []
current = hours.get("current_day")
if current:
entries.extend(_flatten_slots(current))
days = hours.get("days")
if days:
for day_slots in days:
entries.extend(_flatten_slots(day_slots))
return entries or None
class BraveLocalPOIsTool(BraveSearchToolBase):
"""A tool that retrieves local POIs using the Brave Search API."""
name: str = "Brave Local POIs"
args_schema: type[BaseModel] = LocalPOIsParams
header_schema: type[BaseModel] = LocalPOIsHeaders
description: str = (
"A tool that retrieves local POIs using the Brave Search API. "
"Results are returned as structured JSON data."
)
search_url: str = "https://api.search.brave.com/res/v1/local/pois"
def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]:
return params
def _refine_response(self, response: LocalPOIsResponse) -> list[dict[str, Any]]:
results = response.get("results", [])
return [
{
"title": result.get("title"),
"url": result.get("url"),
"description": result.get("description"),
"address": result.get("postal_address", {}).get("displayAddress"),
"contact": result.get("contact", {}).get("telephone")
or result.get("contact", {}).get("email")
or None,
"opening_hours": _simplify_opening_hours(result),
}
for result in results
]
class BraveLocalPOIsDescriptionTool(BraveSearchToolBase):
"""A tool that retrieves AI-generated descriptions for local POIs using the Brave Search API."""
name: str = "Brave Local POI Descriptions"
args_schema: type[BaseModel] = LocalPOIsDescriptionParams
header_schema: type[BaseModel] = LocalPOIsDescriptionHeaders
description: str = (
"A tool that retrieves AI-generated descriptions for local POIs using the Brave Search API. "
"Results are returned as structured JSON data."
)
search_url: str = "https://api.search.brave.com/res/v1/local/descriptions"
def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]:
return params
def _refine_response(self, response: LocalPOIsResponse) -> list[dict[str, Any]]:
# Make the response more concise, and easier to consume
results = response.get("results", [])
return [
{
"id": result.get("id"),
"description": result.get("description"),
}
for result in results
]

View File

@@ -1,39 +0,0 @@
from typing import Any
from pydantic import BaseModel
from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase
from crewai_tools.tools.brave_search_tool.schemas import (
NewsSearchHeaders,
NewsSearchParams,
)
class BraveNewsSearchTool(BraveSearchToolBase):
"""A tool that performs news searches using the Brave Search API."""
name: str = "Brave News Search"
args_schema: type[BaseModel] = NewsSearchParams
header_schema: type[BaseModel] = NewsSearchHeaders
description: str = (
"A tool that performs news searches using the Brave Search API. "
"Results are returned as structured JSON data."
)
search_url: str = "https://api.search.brave.com/res/v1/news/search"
def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]:
return params
def _refine_response(self, response: dict[str, Any]) -> list[dict[str, Any]]:
# Make the response more concise, and easier to consume
results = response.get("results", [])
return [
{
"url": result.get("url"),
"title": result.get("title"),
"description": result.get("description"),
}
for result in results
]

View File

@@ -10,13 +10,17 @@ from pydantic import BaseModel, Field
from pydantic.types import StringConstraints
import requests
from crewai_tools.tools.brave_search_tool.schemas import WebSearchParams
from crewai_tools.tools.brave_search_tool.base import _save_results_to_file
load_dotenv()
def _save_results_to_file(content: str) -> None:
"""Saves the search results to a file."""
filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt"
with open(filename, "w") as file:
file.write(content)
FreshnessPreset = Literal["pd", "pw", "pm", "py"]
FreshnessRange = Annotated[
str, StringConstraints(pattern=r"^\d{4}-\d{2}-\d{2}to\d{4}-\d{2}-\d{2}$")
@@ -25,6 +29,51 @@ Freshness = FreshnessPreset | FreshnessRange
SafeSearch = Literal["off", "moderate", "strict"]
class BraveSearchToolSchema(BaseModel):
"""Input for BraveSearchTool"""
query: str = Field(..., description="Search query to perform")
country: str | None = Field(
default=None,
description="Country code for geo-targeting (e.g., 'US', 'BR').",
)
search_language: str | None = Field(
default=None,
description="Language code for the search results (e.g., 'en', 'es').",
)
count: int | None = Field(
default=None,
description="The maximum number of results to return. Actual number may be less.",
)
offset: int | None = Field(
default=None, description="Skip the first N result sets/pages. Max is 9."
)
safesearch: SafeSearch | None = Field(
default=None,
description="Filter out explicit content. Options: off/moderate/strict",
)
spellcheck: bool | None = Field(
default=None,
description="Attempt to correct spelling errors in the search query.",
)
freshness: Freshness | None = Field(
default=None,
description="Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD",
)
text_decorations: bool | None = Field(
default=None,
description="Include markup to highlight search terms in the results.",
)
extra_snippets: bool | None = Field(
default=None,
description="Include up to 5 text snippets for each page if possible.",
)
operators: bool | None = Field(
default=None,
description="Whether to apply search operators (e.g., site:example.com).",
)
# TODO: Extend support to additional endpoints (e.g., /images, /news, etc.)
class BraveSearchTool(BaseTool):
"""A tool that performs web searches using the Brave Search API."""
@@ -34,7 +83,7 @@ class BraveSearchTool(BaseTool):
"A tool that performs web searches using the Brave Search API. "
"Results are returned as structured JSON data."
)
args_schema: type[BaseModel] = WebSearchParams
args_schema: type[BaseModel] = BraveSearchToolSchema
search_url: str = "https://api.search.brave.com/res/v1/web/search"
n_results: int = 10
save_file: bool = False
@@ -71,8 +120,8 @@ class BraveSearchTool(BaseTool):
# Construct and send the request
try:
# Fallback to "query" or "search_query" for backwards compatibility
query = kwargs.get("q") or kwargs.get("query") or kwargs.get("search_query")
# Maintain both "search_query" and "query" for backwards compatibility
query = kwargs.get("search_query") or kwargs.get("query")
if not query:
raise ValueError("Query is required")
@@ -81,11 +130,8 @@ class BraveSearchTool(BaseTool):
if country := kwargs.get("country"):
payload["country"] = country
# Fallback to "search_language" for backwards compatibility
if search_lang := kwargs.get("search_lang") or kwargs.get(
"search_language"
):
payload["search_lang"] = search_lang
if search_language := kwargs.get("search_language"):
payload["search_language"] = search_language
# Fallback to deprecated n_results parameter if no count is provided
count = kwargs.get("count")

View File

@@ -1,39 +0,0 @@
from typing import Any
from pydantic import BaseModel
from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase
from crewai_tools.tools.brave_search_tool.schemas import (
VideoSearchHeaders,
VideoSearchParams,
)
class BraveVideoSearchTool(BraveSearchToolBase):
"""A tool that performs video searches using the Brave Search API."""
name: str = "Brave Video Search"
args_schema: type[BaseModel] = VideoSearchParams
header_schema: type[BaseModel] = VideoSearchHeaders
description: str = (
"A tool that performs video searches using the Brave Search API. "
"Results are returned as structured JSON data."
)
search_url: str = "https://api.search.brave.com/res/v1/videos/search"
def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]:
return params
def _refine_response(self, response: dict[str, Any]) -> list[dict[str, Any]]:
# Make the response more concise, and easier to consume
results = response.get("results", [])
return [
{
"url": result.get("url"),
"title": result.get("title"),
"description": result.get("description"),
}
for result in results
]

View File

@@ -1,45 +0,0 @@
from typing import Any
from pydantic import BaseModel
from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase
from crewai_tools.tools.brave_search_tool.schemas import (
WebSearchHeaders,
WebSearchParams,
)
class BraveWebSearchTool(BraveSearchToolBase):
"""A tool that performs web searches using the Brave Search API."""
name: str = "Brave Web Search"
args_schema: type[BaseModel] = WebSearchParams
header_schema: type[BaseModel] = WebSearchHeaders
description: str = (
"A tool that performs web searches using the Brave Search API. "
"Results are returned as structured JSON data."
)
search_url: str = "https://api.search.brave.com/res/v1/web/search"
def _refine_request_payload(self, params: dict[str, Any]) -> dict[str, Any]:
return params
def _refine_response(self, response: dict[str, Any]) -> list[dict[str, Any]]:
results = response.get("web", {}).get("results", [])
refined = []
for result in results:
snippets = result.get("extra_snippets") or []
if not snippets:
desc = result.get("description")
if desc:
snippets = [desc]
refined.append(
{
"url": result.get("url"),
"title": result.get("title"),
"snippets": snippets,
}
)
return refined

View File

@@ -1,67 +0,0 @@
from __future__ import annotations
from typing import Literal, TypedDict
class LocalPOIs:
class PostalAddress(TypedDict, total=False):
type: Literal["PostalAddress"]
country: str
postalCode: str
streetAddress: str
addressRegion: str
addressLocality: str
displayAddress: str
class DayOpeningHours(TypedDict):
abbr_name: str
full_name: str
opens: str
closes: str
class OpeningHours(TypedDict, total=False):
current_day: list[LocalPOIs.DayOpeningHours]
days: list[list[LocalPOIs.DayOpeningHours]]
class LocationResult(TypedDict, total=False):
provider_url: str
title: str
url: str
id: str | None
opening_hours: LocalPOIs.OpeningHours | None
postal_address: LocalPOIs.PostalAddress | None
class Response(TypedDict, total=False):
type: Literal["local_pois"]
results: list[LocalPOIs.LocationResult]
class LLMContext:
class LLMContextItem(TypedDict, total=False):
snippets: list[str]
title: str
url: str
class LLMContextMapItem(TypedDict, total=False):
name: str
snippets: list[str]
title: str
url: str
class LLMContextPOIItem(TypedDict, total=False):
name: str
snippets: list[str]
title: str
url: str
class Grounding(TypedDict, total=False):
generic: list[LLMContext.LLMContextItem]
poi: LLMContext.LLMContextPOIItem
map: list[LLMContext.LLMContextMapItem]
class Sources(TypedDict, total=False):
pass
class Response(TypedDict, total=False):
grounding: LLMContext.Grounding
sources: LLMContext.Sources

View File

@@ -1,525 +0,0 @@
from typing import Annotated, Literal
from pydantic import BaseModel, Field
from pydantic.types import StringConstraints
# Common types
Units = Literal["metric", "imperial"]
SafeSearch = Literal["off", "moderate", "strict"]
Freshness = (
Literal["pd", "pw", "pm", "py"]
| Annotated[
str, StringConstraints(pattern=r"^\d{4}-\d{2}-\d{2}to\d{4}-\d{2}-\d{2}$")
]
)
ResultFilter = list[
Literal[
"discussions",
"faq",
"infobox",
"news",
"query",
"summarizer",
"videos",
"web",
"locations",
]
]
class LLMContextParams(BaseModel):
"""Parameters for Brave LLM Context endpoint."""
q: str = Field(
description="Search query to perform",
min_length=1,
max_length=400,
)
country: str | None = Field(
default=None,
description="Country code for geo-targeting (e.g., 'US', 'BR').",
pattern=r"^[A-Z]{2}$",
)
search_lang: str | None = Field(
default=None,
description="Language code for the search results (e.g., 'en', 'es').",
pattern=r"^[a-z]{2}$",
)
count: int | None = Field(
default=None,
description="The maximum number of results to return. Actual number may be less.",
ge=1,
le=50,
)
maximum_number_of_urls: int | None = Field(
default=None,
description="The maximum number of URLs to include in the context.",
ge=1,
le=50,
)
maximum_number_of_tokens: int | None = Field(
default=None,
description="The approximate maximum number of tokens to include in the context.",
ge=1,
le=32768,
)
maximum_number_of_snippets: int | None = Field(
default=None,
description="The maximum number of different snippets to include in the context.",
ge=1,
le=100,
)
context_threshold_mode: (
Literal["disabled", "strict", "lenient", "balanced"] | None
) = Field(
default=None,
description="The mode to use for the context thresholding.",
)
maximum_number_of_tokens_per_url: int | None = Field(
default=None,
description="The maximum number of tokens to include for each URL in the context.",
ge=1,
le=8192,
)
maximum_number_of_snippets_per_url: int | None = Field(
default=None,
description="The maximum number of snippets to include per URL.",
ge=1,
le=100,
)
goggles: str | list[str] | None = Field(
default=None,
description="Goggles act as a custom re-ranking mechanism. Goggle source or URLs.",
)
enable_local: bool | None = Field(
default=None,
description="Whether to enable local recall. Not setting this value means auto-detect and uses local recall if any of the localization headers are provided.",
)
class WebSearchParams(BaseModel):
"""Parameters for Brave Web Search endpoint."""
q: str = Field(
description="Search query to perform",
min_length=1,
max_length=400,
)
country: str | None = Field(
default=None,
description="Country code for geo-targeting (e.g., 'US', 'BR').",
pattern=r"^[A-Z]{2}$",
)
search_lang: str | None = Field(
default=None,
description="Language code for the search results (e.g., 'en', 'es').",
pattern=r"^[a-z]{2}$",
)
ui_lang: str | None = Field(
default=None,
description="Language code for the user interface (e.g., 'en-US', 'es-AR').",
pattern=r"^[a-z]{2}-[A-Z]{2}$",
)
count: int | None = Field(
default=None,
description="The maximum number of results to return. Actual number may be less.",
ge=1,
le=20,
)
offset: int | None = Field(
default=None,
description="Skip the first N result sets/pages. Max is 9.",
ge=0,
le=9,
)
safesearch: Literal["off", "moderate", "strict"] | None = Field(
default=None,
description="Filter out explicit content. Options: off/moderate/strict",
)
spellcheck: bool | None = Field(
default=None,
description="Attempt to correct spelling errors in the search query.",
)
freshness: Freshness | None = Field(
default=None,
description="Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD",
)
text_decorations: bool | None = Field(
default=None,
description="Include markup to highlight search terms in the results.",
)
extra_snippets: bool | None = Field(
default=None,
description="Include up to 5 text snippets for each page if possible.",
)
result_filter: ResultFilter | None = Field(
default=None,
description="Filter the results by type. Options: discussions/faq/infobox/news/query/summarizer/videos/web/locations. Note: The `count` parameter is applied only to the `web` results.",
)
units: Units | None = Field(
default=None,
description="The units to use for the results. Options: metric/imperial",
)
goggles: str | list[str] | None = Field(
default=None,
description="Goggles act as a custom re-ranking mechanism. Goggle source or URLs.",
)
summary: bool | None = Field(
default=None,
description="Whether to generate a summarizer ID for the results.",
)
enable_rich_callback: bool | None = Field(
default=None,
description="Whether to enable rich callbacks for the results. Requires Pro level subscription.",
)
include_fetch_metadata: bool | None = Field(
default=None,
description="Whether to include fetch metadata (e.g., last fetch time) in the results.",
)
operators: bool | None = Field(
default=None,
description="Whether to apply search operators (e.g., site:example.com).",
)
class LocalPOIsParams(BaseModel):
"""Parameters for Brave Local POIs endpoint."""
ids: list[str] = Field(
description="List of POI IDs to retrieve. Maximum of 20. IDs are valid for 8 hours.",
min_length=1,
max_length=20,
)
search_lang: str | None = Field(
default=None,
description="Language code for the search results (e.g., 'en', 'es').",
pattern=r"^[a-z]{2}$",
)
ui_lang: str | None = Field(
default=None,
description="Language code for the user interface (e.g., 'en-US', 'es-AR').",
pattern=r"^[a-z]{2}-[A-Z]{2}$",
)
units: Units | None = Field(
default=None,
description="The units to use for the results. Options: metric/imperial",
)
class LocalPOIsDescriptionParams(BaseModel):
"""Parameters for Brave Local POI Descriptions endpoint."""
ids: list[str] = Field(
description="List of POI IDs to retrieve. Maximum of 20. IDs are valid for 8 hours.",
min_length=1,
max_length=20,
)
class ImageSearchParams(BaseModel):
"""Parameters for Brave Image Search endpoint."""
q: str = Field(
description="Search query to perform",
min_length=1,
max_length=400,
)
search_lang: str | None = Field(
default=None,
description="Language code for the search results (e.g., 'en', 'es').",
pattern=r"^[a-z]{2}$",
)
country: str | None = Field(
default=None,
description="Country code for geo-targeting (e.g., 'US', 'BR').",
pattern=r"^[A-Z]{2}$",
)
safesearch: Literal["off", "strict"] | None = Field(
default=None,
description="Filter out explicit content. Default is strict.",
)
count: int | None = Field(
default=None,
description="The maximum number of results to return.",
ge=1,
le=200,
)
spellcheck: bool | None = Field(
default=None,
description="Attempt to correct spelling errors in the search query.",
)
class VideoSearchParams(BaseModel):
"""Parameters for Brave Video Search endpoint."""
q: str = Field(
description="Search query to perform",
min_length=1,
max_length=400,
)
search_lang: str | None = Field(
default=None,
description="Language code for the search results (e.g., 'en', 'es').",
pattern=r"^[a-z]{2}$",
)
ui_lang: str | None = Field(
default=None,
description="Language code for the user interface (e.g., 'en-US', 'es-AR').",
pattern=r"^[a-z]{2}-[A-Z]{2}$",
)
country: str | None = Field(
default=None,
description="Country code for geo-targeting (e.g., 'US', 'BR').",
pattern=r"^[A-Z]{2}$",
)
safesearch: SafeSearch | None = Field(
default=None,
description="Filter out explicit content. Options: off/moderate/strict",
)
count: int | None = Field(
default=None,
description="The maximum number of results to return.",
ge=1,
le=50,
)
offset: int | None = Field(
default=None,
description="Skip the first N result sets/pages. Max is 9.",
ge=0,
le=9,
)
spellcheck: bool | None = Field(
default=None,
description="Attempt to correct spelling errors in the search query.",
)
freshness: Freshness | None = Field(
default=None,
description="Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD",
)
include_fetch_metadata: bool | None = Field(
default=None,
description="Whether to include fetch metadata (e.g., last fetch time) in the results.",
)
operators: bool | None = Field(
default=None,
description="Whether to apply search operators (e.g., site:example.com).",
)
class NewsSearchParams(BaseModel):
"""Parameters for Brave News Search endpoint."""
q: str = Field(
description="Search query to perform",
min_length=1,
max_length=400,
)
search_lang: str | None = Field(
default=None,
description="Language code for the search results (e.g., 'en', 'es').",
pattern=r"^[a-z]{2}$",
)
ui_lang: str | None = Field(
default=None,
description="Language code for the user interface (e.g., 'en-US', 'es-AR').",
pattern=r"^[a-z]{2}-[A-Z]{2}$",
)
country: str | None = Field(
default=None,
description="Country code for geo-targeting (e.g., 'US', 'BR').",
pattern=r"^[A-Z]{2}$",
)
safesearch: Literal["off", "moderate", "strict"] | None = Field(
default=None,
description="Filter out explicit content. Options: off/moderate/strict",
)
count: int | None = Field(
default=None,
description="The maximum number of results to return.",
ge=1,
le=50,
)
offset: int | None = Field(
default=None,
description="Skip the first N result sets/pages. Max is 9.",
ge=0,
le=9,
)
spellcheck: bool | None = Field(
default=None,
description="Attempt to correct spelling errors in the search query.",
)
freshness: Freshness | None = Field(
default=None,
description="Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD",
)
extra_snippets: bool | None = Field(
default=None,
description="Include up to 5 text snippets for each page if possible.",
)
goggles: str | list[str] | None = Field(
default=None,
description="Goggles act as a custom re-ranking mechanism. Goggle source or URLs.",
)
include_fetch_metadata: bool | None = Field(
default=None,
description="Whether to include fetch metadata in the results.",
)
operators: bool | None = Field(
default=None,
description="Whether to apply search operators (e.g., site:example.com).",
)
class BaseSearchHeaders(BaseModel):
"""Common headers for Brave Search endpoints."""
x_subscription_token: str = Field(
alias="x-subscription-token",
description="API key for Brave Search",
)
api_version: str | None = Field(
alias="api-version",
default=None,
description="API version to use. Default is latest available.",
pattern=r"^\d{4}-\d{2}-\d{2}$", # YYYY-MM-DD
)
accept: Literal["application/json"] | Literal["*/*"] | None = Field(
default=None,
description="Accept header for the request.",
)
cache_control: Literal["no-cache"] | None = Field(
alias="cache-control",
default=None,
description="Cache control header for the request.",
)
user_agent: str | None = Field(
alias="user-agent",
default=None,
description="User agent for the request.",
)
class LLMContextHeaders(BaseSearchHeaders):
"""Headers for Brave LLM Context endpoint."""
x_loc_lat: float | None = Field(
alias="x-loc-lat",
default=None,
description="Latitude of the user's location.",
ge=-90.0,
le=90.0,
)
x_loc_long: float | None = Field(
alias="x-loc-long",
default=None,
description="Longitude of the user's location.",
ge=-180.0,
le=180.0,
)
x_loc_city: str | None = Field(
alias="x-loc-city",
default=None,
description="City of the user's location.",
)
x_loc_state: str | None = Field(
alias="x-loc-state",
default=None,
description="State of the user's location.",
)
x_loc_state_name: str | None = Field(
alias="x-loc-state-name",
default=None,
description="Name of the state of the user's location.",
)
x_loc_country: str | None = Field(
alias="x-loc-country",
default=None,
description="The ISO 3166-1 alpha-2 country code of the user's location.",
)
class LocalPOIsHeaders(BaseSearchHeaders):
"""Headers for Brave Local POIs endpoint."""
x_loc_lat: float | None = Field(
alias="x-loc-lat",
default=None,
description="Latitude of the user's location.",
ge=-90.0,
le=90.0,
)
x_loc_long: float | None = Field(
alias="x-loc-long",
default=None,
description="Longitude of the user's location.",
ge=-180.0,
le=180.0,
)
class LocalPOIsDescriptionHeaders(BaseSearchHeaders):
"""Headers for Brave Local POI Descriptions endpoint."""
class VideoSearchHeaders(BaseSearchHeaders):
"""Headers for Brave Video Search endpoint."""
class ImageSearchHeaders(BaseSearchHeaders):
"""Headers for Brave Image Search endpoint."""
class NewsSearchHeaders(BaseSearchHeaders):
"""Headers for Brave News Search endpoint."""
class WebSearchHeaders(BaseSearchHeaders):
"""Headers for Brave Web Search endpoint."""
x_loc_lat: float | None = Field(
alias="x-loc-lat",
default=None,
description="Latitude of the user's location.",
ge=-90.0,
le=90.0,
)
x_loc_long: float | None = Field(
alias="x-loc-long",
default=None,
description="Longitude of the user's location.",
ge=-180.0,
le=180.0,
)
x_loc_timezone: str | None = Field(
alias="x-loc-timezone",
default=None,
description="Timezone of the user's location.",
)
x_loc_city: str | None = Field(
alias="x-loc-city",
default=None,
description="City of the user's location.",
)
x_loc_state: str | None = Field(
alias="x-loc-state",
default=None,
description="State of the user's location.",
)
x_loc_state_name: str | None = Field(
alias="x-loc-state-name",
default=None,
description="Name of the state of the user's location.",
)
x_loc_country: str | None = Field(
alias="x-loc-country",
default=None,
description="The ISO 3166-1 alpha-2 country code of the user's location.",
)
x_loc_postal_code: str | None = Field(
alias="x-loc-postal-code",
default=None,
description="The postal code of the user's location.",
)

View File

@@ -1,777 +1,80 @@
import os
from unittest.mock import MagicMock, patch
import json
from unittest.mock import patch
import pytest
import requests as requests_lib
from crewai_tools.tools.brave_search_tool.base import BraveSearchToolBase
from crewai_tools.tools.brave_search_tool.brave_web_tool import BraveWebSearchTool
from crewai_tools.tools.brave_search_tool.brave_image_tool import BraveImageSearchTool
from crewai_tools.tools.brave_search_tool.brave_news_tool import BraveNewsSearchTool
from crewai_tools.tools.brave_search_tool.brave_video_tool import BraveVideoSearchTool
from crewai_tools.tools.brave_search_tool.brave_llm_context_tool import (
BraveLLMContextTool,
)
from crewai_tools.tools.brave_search_tool.brave_local_pois_tool import (
BraveLocalPOIsTool,
BraveLocalPOIsDescriptionTool,
)
from crewai_tools.tools.brave_search_tool.schemas import (
WebSearchParams,
WebSearchHeaders,
ImageSearchParams,
ImageSearchHeaders,
NewsSearchParams,
NewsSearchHeaders,
VideoSearchParams,
VideoSearchHeaders,
LLMContextParams,
LLMContextHeaders,
LocalPOIsParams,
LocalPOIsHeaders,
LocalPOIsDescriptionParams,
LocalPOIsDescriptionHeaders,
)
def _mock_response(
status_code: int = 200,
json_data: dict | None = None,
headers: dict | None = None,
text: str = "",
) -> MagicMock:
"""Build a ``requests.Response``-like mock with the attributes used by ``_make_request``."""
resp = MagicMock(spec=requests_lib.Response)
resp.status_code = status_code
resp.ok = 200 <= status_code < 400
resp.url = "https://api.search.brave.com/res/v1/web/search?q=test"
resp.text = text or (str(json_data) if json_data else "")
resp.headers = headers or {}
resp.json.return_value = json_data if json_data is not None else {}
return resp
# Fixtures
@pytest.fixture(autouse=True)
def _brave_env_and_rate_limit():
"""Set BRAVE_API_KEY for every test. Rate limiting is per-instance (each tool starts with a fresh clock)."""
with patch.dict(os.environ, {"BRAVE_API_KEY": "test-api-key"}):
yield
from crewai_tools.tools.brave_search_tool.brave_search_tool import BraveSearchTool
@pytest.fixture
def web_tool():
return BraveWebSearchTool()
def brave_tool():
return BraveSearchTool(n_results=2)
@pytest.fixture
def image_tool():
return BraveImageSearchTool()
@pytest.fixture
def news_tool():
return BraveNewsSearchTool()
@pytest.fixture
def video_tool():
return BraveVideoSearchTool()
# Initialization
ALL_TOOL_CLASSES = [
BraveWebSearchTool,
BraveImageSearchTool,
BraveNewsSearchTool,
BraveVideoSearchTool,
BraveLLMContextTool,
BraveLocalPOIsTool,
BraveLocalPOIsDescriptionTool,
]
@pytest.mark.parametrize("tool_cls", ALL_TOOL_CLASSES)
def test_instantiation_with_env_var(tool_cls):
"""Each tool can be created when BRAVE_API_KEY is in the environment."""
tool = tool_cls()
assert tool.api_key == "test-api-key"
@pytest.mark.parametrize("tool_cls", ALL_TOOL_CLASSES)
def test_instantiation_with_explicit_key(tool_cls):
"""An explicit api_key takes precedence over the environment."""
tool = tool_cls(api_key="explicit-key")
assert tool.api_key == "explicit-key"
def test_missing_api_key_raises():
with patch.dict(os.environ, {}, clear=True):
with pytest.raises(ValueError, match="BRAVE_API_KEY"):
BraveWebSearchTool()
def test_default_attributes():
tool = BraveWebSearchTool()
assert tool.save_file is False
def test_brave_tool_initialization():
tool = BraveSearchTool()
assert tool.n_results == 10
assert tool._timeout == 30
assert tool._requests_per_second == 1.0
assert tool.raw is False
assert tool.save_file is False
def test_custom_constructor_args():
tool = BraveWebSearchTool(
save_file=True,
timeout=60,
n_results=5,
requests_per_second=0.5,
raw=True,
)
assert tool.save_file is True
assert tool._timeout == 60
assert tool.n_results == 5
assert tool._requests_per_second == 0.5
assert tool.raw is True
# Headers
def test_default_headers():
tool = BraveWebSearchTool()
assert tool.headers["x-subscription-token"] == "test-api-key"
assert tool.headers["accept"] == "application/json"
def test_set_headers_merges_and_normalizes():
tool = BraveWebSearchTool()
tool.set_headers({"Cache-Control": "no-cache"})
assert tool.headers["cache-control"] == "no-cache"
assert tool.headers["x-subscription-token"] == "test-api-key"
def test_set_headers_returns_self_for_chaining():
tool = BraveWebSearchTool()
assert tool.set_headers({"Cache-Control": "no-cache"}) is tool
def test_invalid_header_value_raises():
tool = BraveImageSearchTool()
with pytest.raises(ValueError, match="Invalid headers"):
tool.set_headers({"Accept": "text/xml"})
# Endpoint & Schema Wiring
@pytest.mark.parametrize(
"tool_cls, expected_url, expected_params, expected_headers",
[
(
BraveWebSearchTool,
"https://api.search.brave.com/res/v1/web/search",
WebSearchParams,
WebSearchHeaders,
),
(
BraveImageSearchTool,
"https://api.search.brave.com/res/v1/images/search",
ImageSearchParams,
ImageSearchHeaders,
),
(
BraveNewsSearchTool,
"https://api.search.brave.com/res/v1/news/search",
NewsSearchParams,
NewsSearchHeaders,
),
(
BraveVideoSearchTool,
"https://api.search.brave.com/res/v1/videos/search",
VideoSearchParams,
VideoSearchHeaders,
),
(
BraveLLMContextTool,
"https://api.search.brave.com/res/v1/llm/context",
LLMContextParams,
LLMContextHeaders,
),
(
BraveLocalPOIsTool,
"https://api.search.brave.com/res/v1/local/pois",
LocalPOIsParams,
LocalPOIsHeaders,
),
(
BraveLocalPOIsDescriptionTool,
"https://api.search.brave.com/res/v1/local/descriptions",
LocalPOIsDescriptionParams,
LocalPOIsDescriptionHeaders,
),
],
)
def test_tool_wiring(tool_cls, expected_url, expected_params, expected_headers):
tool = tool_cls()
assert tool.search_url == expected_url
assert tool.args_schema is expected_params
assert tool.header_schema is expected_headers
# Payload Refinement (e.g., `query` -> `q`, `count` fallback, param pass-through)
def test_web_refine_request_payload_passes_all_params(web_tool):
params = web_tool._common_payload_refinement(
{
"query": "test",
"country": "US",
"search_lang": "en",
"count": 5,
"offset": 2,
"safesearch": "moderate",
"freshness": "pw",
}
)
refined_params = web_tool._refine_request_payload(params)
assert refined_params["q"] == "test"
assert "query" not in refined_params
assert refined_params["count"] == 5
assert refined_params["country"] == "US"
assert refined_params["search_lang"] == "en"
assert refined_params["offset"] == 2
assert refined_params["safesearch"] == "moderate"
assert refined_params["freshness"] == "pw"
def test_image_refine_request_payload_passes_all_params(image_tool):
params = image_tool._common_payload_refinement(
{
"query": "cat photos",
"country": "US",
"search_lang": "en",
"safesearch": "strict",
"count": 50,
"spellcheck": True,
}
)
refined_params = image_tool._refine_request_payload(params)
assert refined_params["q"] == "cat photos"
assert "query" not in refined_params
assert refined_params["country"] == "US"
assert refined_params["safesearch"] == "strict"
assert refined_params["count"] == 50
assert refined_params["spellcheck"] is True
def test_news_refine_request_payload_passes_all_params(news_tool):
params = news_tool._common_payload_refinement(
{
"query": "breaking news",
"country": "US",
"count": 10,
"offset": 1,
"freshness": "pd",
"extra_snippets": True,
}
)
refined_params = news_tool._refine_request_payload(params)
assert refined_params["q"] == "breaking news"
assert "query" not in refined_params
assert refined_params["country"] == "US"
assert refined_params["offset"] == 1
assert refined_params["freshness"] == "pd"
assert refined_params["extra_snippets"] is True
def test_video_refine_request_payload_passes_all_params(video_tool):
params = video_tool._common_payload_refinement(
{
"query": "tutorial",
"country": "US",
"count": 25,
"offset": 0,
"safesearch": "strict",
"freshness": "pm",
}
)
refined_params = video_tool._refine_request_payload(params)
assert refined_params["q"] == "tutorial"
assert "query" not in refined_params
assert refined_params["country"] == "US"
assert refined_params["offset"] == 0
assert refined_params["freshness"] == "pm"
def test_legacy_constructor_params_flow_into_query_params():
"""The legacy n_results and country constructor params are applied as defaults
when count/country are not explicitly provided at call time."""
tool = BraveWebSearchTool(n_results=3, country="BR")
params = tool._common_payload_refinement({"query": "test"})
assert params["count"] == 3
assert params["country"] == "BR"
def test_legacy_constructor_params_do_not_override_explicit_query_params():
"""Explicit query-time count/country take precedence over constructor defaults."""
tool = BraveWebSearchTool(n_results=3, country="BR")
params = tool._common_payload_refinement(
{"query": "test", "count": 10, "country": "US"}
)
assert params["count"] == 10
assert params["country"] == "US"
def test_refine_request_payload_passes_multiple_goggles_as_multiple_params(web_tool):
result = web_tool._refine_request_payload(
{
"query": "test",
"goggles": ["goggle1", "goggle2"],
}
)
assert result["goggles"] == ["goggle1", "goggle2"]
# Null-like / empty value stripping
#
# crewAI's ensure_all_properties_required (pydantic_schema_utils.py) marks
# every schema property as required for OpenAI strict-mode compatibility.
# Because optional Brave API parameters look required to the LLM, it fills
# them with placeholder junk — None, "", "null", or []. The test below
# verifies that _common_payload_refinement strips these from optional fields.
def test_common_refinement_strips_null_like_values(web_tool):
"""_common_payload_refinement drops optional keys with None / '' / 'null' / []."""
params = web_tool._common_payload_refinement(
{
"query": "test",
"country": "US",
"search_lang": "",
"freshness": "null",
"count": 5,
"goggles": [],
}
)
assert params["q"] == "test"
assert params["country"] == "US"
assert params["count"] == 5
assert "search_lang" not in params
assert "freshness" not in params
assert "goggles" not in params
# End-to-End _run() with Mocked HTTP Response
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_web_search_end_to_end(mock_get, web_tool):
web_tool.raw = True
data = {"web": {"results": [{"title": "R", "url": "http://r.co"}]}}
mock_get.return_value = _mock_response(json_data=data)
result = web_tool._run(query="test")
mock_get.assert_called_once()
call_args = mock_get.call_args.kwargs
assert call_args["params"]["q"] == "test"
assert call_args["headers"]["x-subscription-token"] == "test-api-key"
assert result == data
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_image_search_end_to_end(mock_get, image_tool):
image_tool.raw = True
data = {"results": [{"url": "http://img.co/a.jpg"}]}
mock_get.return_value = _mock_response(json_data=data)
assert image_tool._run(query="cats") == data
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_news_search_end_to_end(mock_get, news_tool):
news_tool.raw = True
data = {"results": [{"title": "News", "url": "http://n.co"}]}
mock_get.return_value = _mock_response(json_data=data)
assert news_tool._run(query="headlines") == data
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_video_search_end_to_end(mock_get, video_tool):
video_tool.raw = True
data = {"results": [{"title": "Vid", "url": "http://v.co"}]}
mock_get.return_value = _mock_response(json_data=data)
assert video_tool._run(query="python tutorial") == data
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_raw_false_calls_refine_response(mock_get, web_tool):
"""With raw=False (the default), _refine_response transforms the API response."""
api_response = {
@patch("requests.get")
def test_brave_tool_search(mock_get, brave_tool):
mock_response = {
"web": {
"results": [
{
"title": "CrewAI",
"url": "https://crewai.com",
"description": "AI agent framework",
"title": "Test Title",
"url": "http://test.com",
"description": "Test Description",
}
]
}
}
mock_get.return_value = _mock_response(json_data=api_response)
assert web_tool.raw is False
result = web_tool._run(query="crewai")
# The web tool's _refine_response extracts and reshapes results.
# The key assertion: we should NOT get back the raw API envelope.
assert result != api_response
# Backward Compatibility & Legacy Parameter Support
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_positional_query_argument(mock_get, web_tool):
"""tool.run('my query') works as a positional argument."""
mock_get.return_value = _mock_response(json_data={})
web_tool._run("positional test")
assert mock_get.call_args.kwargs["params"]["q"] == "positional test"
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_search_query_backward_compat(mock_get, web_tool):
"""The legacy 'search_query' param is mapped to 'query'."""
mock_get.return_value = _mock_response(json_data={})
web_tool._run(search_query="legacy test")
assert mock_get.call_args.kwargs["params"]["q"] == "legacy test"
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
@patch("crewai_tools.tools.brave_search_tool.base._save_results_to_file")
def test_save_file_called_when_enabled(mock_save, mock_get):
mock_get.return_value = _mock_response(json_data={"results": []})
tool = BraveWebSearchTool(save_file=True)
tool._run(query="test")
mock_save.assert_called_once()
# Error Handling
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_connection_error_raises_runtime_error(mock_get, web_tool):
mock_get.side_effect = requests_lib.exceptions.ConnectionError("refused")
with pytest.raises(RuntimeError, match="Brave Search API connection failed"):
web_tool._run(query="test")
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_timeout_raises_runtime_error(mock_get, web_tool):
mock_get.side_effect = requests_lib.exceptions.Timeout("timed out")
with pytest.raises(RuntimeError, match="timed out"):
web_tool._run(query="test")
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_invalid_params_raises_value_error(mock_get, web_tool):
"""count=999 exceeds WebSearchParams.count le=20."""
with pytest.raises(ValueError, match="Invalid parameters"):
web_tool._run(query="test", count=999)
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_4xx_error_raises_with_api_detail(mock_get, web_tool):
"""A 422 with a structured error body includes code and detail in the message."""
mock_get.return_value = _mock_response(
status_code=422,
json_data={
"error": {
"id": "abc-123",
"status": 422,
"code": "OPTION_NOT_IN_PLAN",
"detail": "extra_snippets requires a Pro plan",
}
},
)
with pytest.raises(RuntimeError, match="OPTION_NOT_IN_PLAN") as exc_info:
web_tool._run(query="test")
assert "extra_snippets requires a Pro plan" in str(exc_info.value)
assert "HTTP 422" in str(exc_info.value)
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_auth_error_raises_immediately(mock_get, web_tool):
"""A 401 with SUBSCRIPTION_TOKEN_INVALID is not retried."""
mock_get.return_value = _mock_response(
status_code=401,
json_data={
"error": {
"id": "xyz",
"status": 401,
"code": "SUBSCRIPTION_TOKEN_INVALID",
"detail": "The subscription token is invalid",
}
},
)
with pytest.raises(RuntimeError, match="SUBSCRIPTION_TOKEN_INVALID"):
web_tool._run(query="test")
# Should NOT have retried — only one call.
assert mock_get.call_count == 1
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_quota_limited_429_raises_immediately(mock_get, web_tool):
"""A 429 with QUOTA_LIMITED is NOT retried — quota exhaustion is terminal."""
mock_get.return_value = _mock_response(
status_code=429,
json_data={
"error": {
"id": "ql-1",
"status": 429,
"code": "QUOTA_LIMITED",
"detail": "Monthly quota exceeded",
}
},
)
with pytest.raises(RuntimeError, match="QUOTA_LIMITED") as exc_info:
web_tool._run(query="test")
assert "Monthly quota exceeded" in str(exc_info.value)
# Terminal — only one HTTP call, no retries.
assert mock_get.call_count == 1
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_usage_limit_exceeded_429_raises_immediately(mock_get, web_tool):
"""USAGE_LIMIT_EXCEEDED is also non-retryable, just like QUOTA_LIMITED."""
mock_get.return_value = _mock_response(
status_code=429,
json_data={
"error": {
"id": "ule-1",
"status": 429,
"code": "USAGE_LIMIT_EXCEEDED",
}
},
text="usage limit exceeded",
)
with pytest.raises(RuntimeError, match="USAGE_LIMIT_EXCEEDED"):
web_tool._run(query="test")
assert mock_get.call_count == 1
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_error_body_is_fully_included_in_message(mock_get, web_tool):
"""The full JSON error body is included in the RuntimeError message."""
mock_get.return_value = _mock_response(
status_code=429,
json_data={
"error": {
"id": "x",
"status": 429,
"code": "QUOTA_LIMITED",
"detail": "Exceeded",
"meta": {"plan": "free", "limit": 1000},
}
},
)
with pytest.raises(RuntimeError) as exc_info:
web_tool._run(query="test")
msg = str(exc_info.value)
assert "HTTP 429" in msg
assert "QUOTA_LIMITED" in msg
assert "free" in msg
assert "1000" in msg
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_error_without_json_body_falls_back_to_text(mock_get, web_tool):
"""When the error response isn't valid JSON, resp.text is used as the detail."""
resp = _mock_response(status_code=500, text="Internal Server Error")
resp.json.side_effect = ValueError("No JSON")
mock_get.return_value = resp
with pytest.raises(RuntimeError, match="Internal Server Error"):
web_tool._run(query="test")
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
def test_invalid_json_on_success_raises_runtime_error(mock_get, web_tool):
"""A 200 OK with a non-JSON body raises RuntimeError."""
resp = _mock_response(status_code=200)
resp.json.side_effect = ValueError("Expecting value")
mock_get.return_value = resp
with pytest.raises(RuntimeError, match="invalid JSON"):
web_tool._run(query="test")
# Rate Limiting
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
@patch("crewai_tools.tools.brave_search_tool.base.time")
def test_rate_limit_sleeps_when_too_fast(mock_time, mock_get, web_tool):
"""Back-to-back calls within the interval trigger a sleep."""
mock_get.return_value = _mock_response(json_data={})
# Simulate: last request was at t=100, "now" is t=100.2 (only 0.2s elapsed).
# With default 1 req/s the min interval is 1.0s, so it should sleep ~0.8s.
mock_time.time.return_value = 100.2
web_tool._last_request_time = 100.0
web_tool._run(query="test")
mock_time.sleep.assert_called_once()
sleep_duration = mock_time.sleep.call_args[0][0]
assert 0.7 < sleep_duration < 0.9 # approximately 0.8s
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
@patch("crewai_tools.tools.brave_search_tool.base.time")
def test_rate_limit_skips_sleep_when_enough_time_passed(mock_time, mock_get, web_tool):
"""No sleep when the elapsed time already exceeds the interval."""
mock_get.return_value = _mock_response(json_data={})
# Last request was at t=100, "now" is t=102 (2s elapsed > 1s interval).
mock_time.time.return_value = 102.0
web_tool._last_request_time = 100.0
web_tool._run(query="test")
mock_time.sleep.assert_not_called()
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
@patch("crewai_tools.tools.brave_search_tool.base.time")
def test_rate_limit_disabled_when_zero(mock_time, mock_get, web_tool):
"""requests_per_second=0 disables rate limiting entirely."""
mock_get.return_value = _mock_response(json_data={})
web_tool._last_request_time = 100.0
mock_time.time.return_value = 100.0 # same instant
web_tool._run(query="test")
mock_time.sleep.assert_not_called()
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
@patch("crewai_tools.tools.brave_search_tool.base.time")
def test_rate_limit_per_instance_independent(mock_time, mock_get, web_tool, image_tool):
"""Each instance has its own rate-limit clock; a request on one does not delay the other."""
mock_get.return_value = _mock_response(json_data={})
# Web tool fires at t=100 (its clock goes 0 -> 100).
mock_time.time.return_value = 100.0
web_tool._run(query="test")
# Image tool fires at t=100.3. Its clock is still 0 (separate instance), so
# next_allowed = 1.0 and 100.3 > 1.0 — no sleep. Total process rate can be sum of instance limits.
mock_time.time.return_value = 100.3
image_tool._run(query="cats")
mock_time.sleep.assert_not_called()
# Retry Behavior
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
@patch("crewai_tools.tools.brave_search_tool.base.time")
def test_429_rate_limited_retries_then_succeeds(mock_time, mock_get, web_tool):
"""A transient RATE_LIMITED 429 is retried; success on the second attempt."""
mock_time.time.return_value = 200.0
resp_429 = _mock_response(
status_code=429,
json_data={"error": {"id": "r", "status": 429, "code": "RATE_LIMITED"}},
headers={"Retry-After": "2"},
)
resp_200 = _mock_response(status_code=200, json_data={"web": {"results": []}})
mock_get.side_effect = [resp_429, resp_200]
web_tool.raw = True
result = web_tool._run(query="test")
assert result == {"web": {"results": []}}
assert mock_get.call_count == 2
# Slept for the Retry-After value.
retry_sleeps = [c for c in mock_time.sleep.call_args_list if c[0][0] == 2.0]
assert len(retry_sleeps) == 1
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
@patch("crewai_tools.tools.brave_search_tool.base.time")
def test_5xx_is_retried(mock_time, mock_get, web_tool):
"""A 502 server error is retried; success on the second attempt."""
mock_time.time.return_value = 200.0
resp_502 = _mock_response(status_code=502, text="Bad Gateway")
resp_502.json.side_effect = ValueError("no json")
resp_200 = _mock_response(status_code=200, json_data={"web": {"results": []}})
mock_get.side_effect = [resp_502, resp_200]
web_tool.raw = True
result = web_tool._run(query="test")
assert result == {"web": {"results": []}}
assert mock_get.call_count == 2
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
@patch("crewai_tools.tools.brave_search_tool.base.time")
def test_429_rate_limited_exhausts_retries(mock_time, mock_get, web_tool):
"""Persistent RATE_LIMITED 429s exhaust retries and raise RuntimeError."""
mock_time.time.return_value = 200.0
resp_429 = _mock_response(
status_code=429,
json_data={"error": {"id": "r", "status": 429, "code": "RATE_LIMITED"}},
)
mock_get.return_value = resp_429
with pytest.raises(RuntimeError, match="RATE_LIMITED"):
web_tool._run(query="test")
# 3 attempts (default _max_retries).
assert mock_get.call_count == 3
@patch("crewai_tools.tools.brave_search_tool.base.requests.get")
@patch("crewai_tools.tools.brave_search_tool.base.time")
def test_retry_uses_exponential_backoff_when_no_retry_after(
mock_time, mock_get, web_tool
):
"""Without Retry-After, backoff is 2^attempt (1s, 2s, ...)."""
mock_time.time.return_value = 200.0
resp_503 = _mock_response(status_code=503, text="Service Unavailable")
resp_503.json.side_effect = ValueError("no json")
resp_200 = _mock_response(status_code=200, json_data={"ok": True})
mock_get.side_effect = [resp_503, resp_503, resp_200]
web_tool.raw = True
web_tool._run(query="test")
# Two retries: attempt 0 → sleep(1.0), attempt 1 → sleep(2.0).
retry_sleeps = [c[0][0] for c in mock_time.sleep.call_args_list]
assert 1.0 in retry_sleeps
assert 2.0 in retry_sleeps
mock_get.return_value.json.return_value = mock_response
result = brave_tool.run(query="test")
data = json.loads(result)
assert isinstance(data, list)
assert len(data) >= 1
assert data[0]["title"] == "Test Title"
assert data[0]["url"] == "http://test.com"
@patch("requests.get")
def test_brave_tool(mock_get):
mock_response = {
"web": {
"results": [
{
"title": "Brave Browser",
"url": "https://brave.com",
"description": "Brave Browser description",
}
]
}
}
mock_get.return_value.json.return_value = mock_response
tool = BraveSearchTool(n_results=2)
result = tool.run(query="Brave Browser")
assert result is not None
# Parse JSON so we can examine the structure
data = json.loads(result)
assert isinstance(data, list)
assert len(data) >= 1
# First item should have expected fields: title, url, and description
first = data[0]
assert "title" in first
assert first["title"] == "Brave Browser"
assert "url" in first
assert first["url"] == "https://brave.com"
assert "description" in first
assert first["description"] == "Brave Browser description"
if __name__ == "__main__":
test_brave_tool()
test_brave_tool_initialization()
# test_brave_tool_search(brave_tool)

File diff suppressed because it is too large Load Diff

View File

@@ -53,7 +53,7 @@ Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = [
"crewai-tools==1.10.2a1",
"crewai-tools==1.10.1a1",
]
embeddings = [
"tiktoken~=0.8.0"

View File

@@ -40,7 +40,7 @@ def _suppress_pydantic_deprecation_warnings() -> None:
_suppress_pydantic_deprecation_warnings()
__version__ = "1.10.2a1"
__version__ = "1.10.1a1"
_telemetry_submitted = False

View File

@@ -1269,6 +1269,17 @@ class Agent(BaseAgent):
)
start_time = time.time()
matches = agent_memory.recall(formatted_messages, limit=20)
# Filter low-relevance memories to reduce noise while
# guaranteeing a minimum context floor.
if matches:
_MIN_SCORE = 0.55
_MIN_RESULTS = 5
above = [m for m in matches if m.score >= _MIN_SCORE]
if len(above) >= _MIN_RESULTS:
matches = above
else:
# Keep at least top _MIN_RESULTS by score
matches = matches[:max(_MIN_RESULTS, len(above))]
memory_block = ""
if matches:
memory_block = "Relevant memories:\n" + "\n".join(

View File

@@ -38,7 +38,7 @@ from crewai.utilities.string_utils import interpolate_only
_SLUG_RE: Final[re.Pattern[str]] = re.compile(
r"^(?:crewai-amp:)?[a-zA-Z0-9][a-zA-Z0-9_-]*(?:#[\w-]+)?$"
r"^(?:crewai-amp:)?[a-zA-Z0-9][a-zA-Z0-9_-]*(?:#\w+)?$"
)

View File

@@ -30,9 +30,12 @@ class CrewAgentExecutorMixin:
memory = getattr(self.agent, "memory", None) or (
getattr(self.crew, "_memory", None) if self.crew else None
)
if memory is None or not self.task or memory.read_only:
if memory is None or not self.task or getattr(memory, "_read_only", False):
return
if f"Action: {sanitize_tool_name('Delegate work to coworker')}" in output.text:
if (
f"Action: {sanitize_tool_name('Delegate work to coworker')}"
in output.text
):
return
try:
raw = (
@@ -45,4 +48,6 @@ class CrewAgentExecutorMixin:
if extracted:
memory.remember_many(extracted, agent_role=self.agent.role)
except Exception as e:
self.agent._logger.log("error", f"Failed to save to memory: {e}")
self.agent._logger.log(
"error", f"Failed to save to memory: {e}"
)

View File

@@ -9,7 +9,6 @@ from __future__ import annotations
import asyncio
from collections.abc import Callable
from concurrent.futures import ThreadPoolExecutor, as_completed
import contextvars
import inspect
import logging
from typing import TYPE_CHECKING, Any, Literal, cast
@@ -756,7 +755,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
with ThreadPoolExecutor(max_workers=max_workers) as pool:
futures = {
pool.submit(
contextvars.copy_context().run,
self._execute_single_native_tool_call,
call_id=call_id,
func_name=func_name,

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.10.2a1"
"crewai[tools]==1.10.1a1"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.10.2a1"
"crewai[tools]==1.10.1a1"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]==1.10.2a1"
"crewai[tools]==1.10.1a1"
]
[tool.crewai]

View File

@@ -3,7 +3,6 @@ from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine
from concurrent.futures import ThreadPoolExecutor, as_completed
import contextvars
from datetime import datetime
import inspect
import json
@@ -303,7 +302,6 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
super().__init__(
suppress_flow_events=True,
tracing=current_tracing if current_tracing else None,
max_method_calls=self.max_iter * 10,
)
self._flow_initialized = True
@@ -405,7 +403,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
self._setup_native_tools()
return "initialized"
@listen("max_iterations_exceeded")
@listen("force_final_answer")
def force_final_answer(self) -> Literal["agent_finished"]:
"""Force agent to provide final answer when max iterations exceeded."""
formatted_answer = handle_max_iterations_exceeded(
@@ -657,11 +655,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
return "tool_result_is_final"
reasoning_prompt = self._i18n.slice("post_tool_reasoning")
reasoning_message_post: LLMMessage = {
reasoning_message: LLMMessage = {
"role": "user",
"content": reasoning_prompt,
}
self.state.messages.append(reasoning_message_post)
self.state.messages.append(reasoning_message)
return "tool_completed"
@@ -729,7 +727,7 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
max_workers = min(8, len(runnable_tool_calls))
with ThreadPoolExecutor(max_workers=max_workers) as pool:
future_to_idx = {
pool.submit(contextvars.copy_context().run, self._execute_single_native_tool_call, tool_call): idx
pool.submit(self._execute_single_native_tool_call, tool_call): idx
for idx, tool_call in enumerate(runnable_tool_calls)
}
ordered_results: list[dict[str, Any] | None] = [None] * len(
@@ -888,10 +886,9 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
call_id, func_name, func_args = info
# Parse arguments
parsed_args, parse_error = parse_tool_call_args(func_args, func_name, call_id)
args_dict, parse_error = parse_tool_call_args(func_args, func_name, call_id)
if parse_error is not None:
return parse_error
args_dict: dict[str, Any] = parsed_args or {}
# Get agent_key for event tracking
agent_key = getattr(self.agent, "key", "unknown") if self.agent else "unknown"
@@ -1110,11 +1107,11 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin):
def check_max_iterations(
self,
) -> Literal[
"max_iterations_exceeded", "continue_reasoning", "continue_reasoning_native"
"force_final_answer", "continue_reasoning", "continue_reasoning_native"
]:
"""Check if max iterations reached before proceeding with reasoning."""
if has_reached_max_iterations(self.state.iterations, self.max_iter):
return "max_iterations_exceeded"
return "force_final_answer"
if self.state.use_native_tools:
return "continue_reasoning_native"
return "continue_reasoning"

View File

@@ -497,50 +497,6 @@ class LockedListProxy(list, Generic[T]): # type: ignore[type-arg]
def __bool__(self) -> bool:
return bool(self._list)
def index(self, value: T, start: SupportsIndex = 0, stop: SupportsIndex | None = None) -> int: # type: ignore[override]
if stop is None:
return self._list.index(value, start)
return self._list.index(value, start, stop)
def count(self, value: T) -> int:
return self._list.count(value)
def sort(self, *, key: Any = None, reverse: bool = False) -> None:
with self._lock:
self._list.sort(key=key, reverse=reverse)
def reverse(self) -> None:
with self._lock:
self._list.reverse()
def copy(self) -> list[T]:
return self._list.copy()
def __add__(self, other: list[T]) -> list[T]:
return self._list + other
def __radd__(self, other: list[T]) -> list[T]:
return other + self._list
def __iadd__(self, other: Iterable[T]) -> LockedListProxy[T]:
with self._lock:
self._list += list(other)
return self
def __mul__(self, n: SupportsIndex) -> list[T]:
return self._list * n
def __rmul__(self, n: SupportsIndex) -> list[T]:
return self._list * n
def __imul__(self, n: SupportsIndex) -> LockedListProxy[T]:
with self._lock:
self._list *= n
return self
def __reversed__(self) -> Iterator[T]:
return reversed(self._list)
def __eq__(self, other: object) -> bool:
"""Compare based on the underlying list contents."""
if isinstance(other, LockedListProxy):
@@ -623,23 +579,6 @@ class LockedDictProxy(dict, Generic[T]): # type: ignore[type-arg]
def __bool__(self) -> bool:
return bool(self._dict)
def copy(self) -> dict[str, T]:
return self._dict.copy()
def __or__(self, other: dict[str, T]) -> dict[str, T]:
return self._dict | other
def __ror__(self, other: dict[str, T]) -> dict[str, T]:
return other | self._dict
def __ior__(self, other: dict[str, T]) -> LockedDictProxy[T]:
with self._lock:
self._dict |= other
return self
def __reversed__(self) -> Iterator[str]:
return reversed(self._dict)
def __eq__(self, other: object) -> bool:
"""Compare based on the underlying dict contents."""
if isinstance(other, LockedDictProxy):
@@ -681,10 +620,6 @@ class StateProxy(Generic[T]):
if name in ("_proxy_state", "_proxy_lock"):
object.__setattr__(self, name, value)
else:
if isinstance(value, LockedListProxy):
value = value._list
elif isinstance(value, LockedDictProxy):
value = value._dict
with object.__getattribute__(self, "_proxy_lock"):
setattr(object.__getattribute__(self, "_proxy_state"), name, value)
@@ -757,7 +692,6 @@ class FlowMeta(type):
condition_type = getattr(
attr_value, "__condition_type__", OR_CONDITION
)
if (
hasattr(attr_value, "__trigger_condition__")
and attr_value.__trigger_condition__ is not None
@@ -835,7 +769,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
persistence: FlowPersistence | None = None,
tracing: bool | None = None,
suppress_flow_events: bool = False,
max_method_calls: int = 100,
**kwargs: Any,
) -> None:
"""Initialize a new Flow instance.
@@ -844,7 +777,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
persistence: Optional persistence backend for storing flow states
tracing: Whether to enable tracing. True=always enable, False=always disable, None=check environment/user settings
suppress_flow_events: Whether to suppress flow event emissions (internal use)
max_method_calls: Maximum times a single method can be called per execution before raising RecursionError
**kwargs: Additional state values to initialize or override
"""
# Initialize basic instance attributes
@@ -860,8 +792,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
self._completed_methods: set[FlowMethodName] = (
set()
) # Track completed methods for reload
self._method_call_counts: dict[FlowMethodName, int] = {}
self._max_method_calls = max_method_calls
self._persistence: FlowPersistence | None = persistence
self._is_execution_resuming: bool = False
self._event_futures: list[Future[None]] = []
@@ -1898,7 +1828,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
self._method_outputs.clear()
self._pending_and_listeners.clear()
self._clear_or_listeners()
self._method_call_counts.clear()
else:
# Only enter resumption mode if there are completed methods to
# replay. When _completed_methods is empty (e.g. a pure
@@ -2640,16 +2569,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
- Skips execution if method was already completed (e.g., after reload)
- Catches and logs any exceptions during execution, preventing individual listener failures from breaking the entire flow
"""
count = self._method_call_counts.get(listener_name, 0) + 1
if count > self._max_method_calls:
raise RecursionError(
f"Method '{listener_name}' has been called {self._max_method_calls} times in "
f"this flow execution, which indicates an infinite loop. "
f"This commonly happens when a @listen label matches the "
f"method's own name."
)
self._method_call_counts[listener_name] = count
if listener_name in self._completed_methods:
if self._is_execution_resuming:
# During resumption, skip execution but continue listeners

View File

@@ -408,7 +408,7 @@ def human_feedback(
emit=list(emit) if emit else None,
default_outcome=default_outcome,
metadata=metadata or {},
llm=llm if isinstance(llm, str) else getattr(llm, "model", None),
llm=llm if isinstance(llm, str) else None,
)
# Determine effective provider:

View File

@@ -72,8 +72,7 @@ class SQLiteFlowPersistence(FlowPersistence):
def init_db(self) -> None:
"""Create the necessary tables if they don't exist."""
with sqlite3.connect(self.db_path, timeout=30) as conn:
conn.execute("PRAGMA journal_mode=WAL")
with sqlite3.connect(self.db_path) as conn:
# Main state table
conn.execute(
"""
@@ -137,7 +136,7 @@ class SQLiteFlowPersistence(FlowPersistence):
f"state_data must be either a Pydantic BaseModel or dict, got {type(state_data)}"
)
with sqlite3.connect(self.db_path, timeout=30) as conn:
with sqlite3.connect(self.db_path) as conn:
conn.execute(
"""
INSERT INTO flow_states (
@@ -164,7 +163,7 @@ class SQLiteFlowPersistence(FlowPersistence):
Returns:
The most recent state as a dictionary, or None if no state exists
"""
with sqlite3.connect(self.db_path, timeout=30) as conn:
with sqlite3.connect(self.db_path) as conn:
cursor = conn.execute(
"""
SELECT state_json
@@ -214,7 +213,7 @@ class SQLiteFlowPersistence(FlowPersistence):
self.save_state(flow_uuid, context.method_name, state_data)
# Save pending feedback context
with sqlite3.connect(self.db_path, timeout=30) as conn:
with sqlite3.connect(self.db_path) as conn:
# Use INSERT OR REPLACE to handle re-triggering feedback on same flow
conn.execute(
"""
@@ -249,7 +248,7 @@ class SQLiteFlowPersistence(FlowPersistence):
# Import here to avoid circular imports
from crewai.flow.async_feedback.types import PendingFeedbackContext
with sqlite3.connect(self.db_path, timeout=30) as conn:
with sqlite3.connect(self.db_path) as conn:
cursor = conn.execute(
"""
SELECT state_json, context_json
@@ -273,7 +272,7 @@ class SQLiteFlowPersistence(FlowPersistence):
Args:
flow_uuid: Unique identifier for the flow instance
"""
with sqlite3.connect(self.db_path, timeout=30) as conn:
with sqlite3.connect(self.db_path) as conn:
conn.execute(
"""
DELETE FROM pending_feedback

View File

@@ -600,7 +600,7 @@ class LiteAgent(FlowTrackable, BaseModel):
def _save_to_memory(self, output_text: str) -> None:
"""Extract discrete memories from the run and remember each. No-op if _memory is None or read-only."""
if self._memory is None or self._memory.read_only:
if self._memory is None or getattr(self._memory, "_read_only", False):
return
input_str = self._get_last_user_content() or "User request"
try:

View File

@@ -22,12 +22,7 @@ if TYPE_CHECKING:
try:
from anthropic import Anthropic, AsyncAnthropic, transform_schema
from anthropic.types import (
Message,
TextBlock,
ThinkingBlock,
ToolUseBlock,
)
from anthropic.types import Message, TextBlock, ThinkingBlock, ToolUseBlock
from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock
import httpx
except ImportError:
@@ -36,11 +31,6 @@ except ImportError:
) from None
TOOL_SEARCH_TOOL_TYPES: Final[tuple[str, ...]] = (
"tool_search_tool_regex_20251119",
"tool_search_tool_bm25_20251119",
)
ANTHROPIC_FILES_API_BETA: Final = "files-api-2025-04-14"
ANTHROPIC_STRUCTURED_OUTPUTS_BETA: Final = "structured-outputs-2025-11-13"
@@ -127,22 +117,6 @@ class AnthropicThinkingConfig(BaseModel):
budget_tokens: int | None = None
class AnthropicToolSearchConfig(BaseModel):
"""Configuration for Anthropic's server-side tool search.
When enabled, tools marked with defer_loading=True are not loaded into
context immediately. Instead, Claude uses the tool search tool to
dynamically discover and load relevant tools on-demand.
Attributes:
type: The tool search variant to use.
- "regex": Claude constructs regex patterns to search tool names/descriptions.
- "bm25": Claude uses natural language queries to search tools.
"""
type: Literal["regex", "bm25"] = "bm25"
class AnthropicCompletion(BaseLLM):
"""Anthropic native completion implementation.
@@ -166,7 +140,6 @@ class AnthropicCompletion(BaseLLM):
interceptor: BaseInterceptor[httpx.Request, httpx.Response] | None = None,
thinking: AnthropicThinkingConfig | None = None,
response_format: type[BaseModel] | None = None,
tool_search: AnthropicToolSearchConfig | bool | None = None,
**kwargs: Any,
):
"""Initialize Anthropic chat completion client.
@@ -186,10 +159,6 @@ class AnthropicCompletion(BaseLLM):
interceptor: HTTP interceptor for modifying requests/responses at transport level.
response_format: Pydantic model for structured output. When provided, responses
will be validated against this model schema.
tool_search: Enable Anthropic's server-side tool search. When True, uses "bm25"
variant by default. Pass an AnthropicToolSearchConfig to choose "regex" or
"bm25". When enabled, tools are automatically marked with defer_loading=True
and a tool search tool is injected into the tools list.
**kwargs: Additional parameters
"""
super().__init__(
@@ -221,13 +190,6 @@ class AnthropicCompletion(BaseLLM):
self.thinking = thinking
self.previous_thinking_blocks: list[ThinkingBlock] = []
self.response_format = response_format
# Tool search config
if tool_search is True:
self.tool_search = AnthropicToolSearchConfig()
elif isinstance(tool_search, AnthropicToolSearchConfig):
self.tool_search = tool_search
else:
self.tool_search = None
# Model-specific settings
self.is_claude_3 = "claude-3" in model.lower()
self.supports_tools = True
@@ -470,23 +432,10 @@ class AnthropicCompletion(BaseLLM):
# Handle tools for Claude 3+
if tools and self.supports_tools:
converted_tools = self._convert_tools_for_interference(tools)
# When tool_search is enabled and there are 2+ regular tools,
# inject the search tool and mark regular tools with defer_loading.
# With only 1 tool there's nothing to search — skip tool search
# entirely so the normal forced tool_choice optimisation still works.
regular_tools = [
t
for t in converted_tools
if t.get("type", "") not in TOOL_SEARCH_TOOL_TYPES
]
if self.tool_search is not None and len(regular_tools) >= 2:
converted_tools = self._apply_tool_search(converted_tools)
params["tools"] = converted_tools
if available_functions and len(regular_tools) == 1:
tool_name = regular_tools[0].get("name")
if available_functions and len(converted_tools) == 1:
tool_name = converted_tools[0].get("name")
if tool_name and tool_name in available_functions:
params["tool_choice"] = {"type": "tool", "name": tool_name}
@@ -505,12 +454,6 @@ class AnthropicCompletion(BaseLLM):
anthropic_tools = []
for tool in tools:
# Pass through tool search tool definitions unchanged
tool_type = tool.get("type", "")
if tool_type in TOOL_SEARCH_TOOL_TYPES:
anthropic_tools.append(tool)
continue
if "input_schema" in tool and "name" in tool and "description" in tool:
anthropic_tools.append(tool)
continue
@@ -523,15 +466,15 @@ class AnthropicCompletion(BaseLLM):
logging.error(f"Error converting tool to Anthropic format: {e}")
raise e
anthropic_tool: dict[str, Any] = {
anthropic_tool = {
"name": name,
"description": description,
}
if parameters and isinstance(parameters, dict):
anthropic_tool["input_schema"] = parameters
anthropic_tool["input_schema"] = parameters # type: ignore[assignment]
else:
anthropic_tool["input_schema"] = {
anthropic_tool["input_schema"] = { # type: ignore[assignment]
"type": "object",
"properties": {},
"required": [],
@@ -541,55 +484,6 @@ class AnthropicCompletion(BaseLLM):
return anthropic_tools
def _apply_tool_search(self, tools: list[dict[str, Any]]) -> list[dict[str, Any]]:
"""Inject tool search tool and mark regular tools with defer_loading.
When tool_search is enabled, this method:
1. Adds the appropriate tool search tool definition (regex or bm25)
2. Marks all regular tools with defer_loading=True so they are only
loaded when Claude discovers them via search
Args:
tools: Converted tool definitions in Anthropic format.
Returns:
Updated tools list with tool search tool prepended and
regular tools marked as deferred.
"""
if self.tool_search is None:
return tools
# Check if a tool search tool is already present (user passed one manually)
has_search_tool = any(
t.get("type", "") in TOOL_SEARCH_TOOL_TYPES for t in tools
)
result: list[dict[str, Any]] = []
if not has_search_tool:
# Map config type to API type identifier
type_map = {
"regex": "tool_search_tool_regex_20251119",
"bm25": "tool_search_tool_bm25_20251119",
}
tool_type = type_map[self.tool_search.type]
# Tool search tool names follow the convention: tool_search_tool_{variant}
tool_name = f"tool_search_tool_{self.tool_search.type}"
result.append({"type": tool_type, "name": tool_name})
for tool in tools:
# Don't modify tool search tools
if tool.get("type", "") in TOOL_SEARCH_TOOL_TYPES:
result.append(tool)
continue
# Mark regular tools as deferred if not already set
if "defer_loading" not in tool:
tool = {**tool, "defer_loading": True}
result.append(tool)
return result
def _extract_thinking_block(
self, content_block: Any
) -> ThinkingBlock | dict[str, Any] | None:

View File

@@ -1781,7 +1781,6 @@ class BedrockCompletion(BaseLLM):
converse_messages: list[LLMMessage] = []
system_message: str | None = None
pending_tool_results: list[dict[str, Any]] = []
for message in formatted_messages:
role = message.get("role")
@@ -1795,56 +1794,53 @@ class BedrockCompletion(BaseLLM):
system_message += f"\n\n{content}"
else:
system_message = cast(str, content)
elif role == "assistant" and tool_calls:
# Convert OpenAI-style tool_calls to Bedrock toolUse format
bedrock_content = []
for tc in tool_calls:
func = tc.get("function", {})
tool_use_block = {
"toolUse": {
"toolUseId": tc.get("id", f"call_{id(tc)}"),
"name": func.get("name", ""),
"input": func.get("arguments", {})
if isinstance(func.get("arguments"), dict)
else json.loads(func.get("arguments", "{}") or "{}"),
}
}
bedrock_content.append(tool_use_block)
converse_messages.append(
{"role": "assistant", "content": bedrock_content}
)
elif role == "tool":
if not tool_call_id:
raise ValueError("Tool message missing required tool_call_id")
pending_tool_results.append(
converse_messages.append(
{
"toolResult": {
"toolUseId": tool_call_id,
"content": [{"text": str(content) if content else ""}],
}
"role": "user",
"content": [
{
"toolResult": {
"toolUseId": tool_call_id,
"content": [
{"text": str(content) if content else ""}
],
}
}
],
}
)
else:
if pending_tool_results:
converse_messages.append(
{"role": "user", "content": pending_tool_results}
)
pending_tool_results = []
if role == "assistant" and tool_calls:
# Convert OpenAI-style tool_calls to Bedrock toolUse format
bedrock_content = []
for tc in tool_calls:
func = tc.get("function", {})
tool_use_block = {
"toolUse": {
"toolUseId": tc.get("id", f"call_{id(tc)}"),
"name": func.get("name", ""),
"input": func.get("arguments", {})
if isinstance(func.get("arguments"), dict)
else json.loads(func.get("arguments", "{}") or "{}"),
}
}
bedrock_content.append(tool_use_block)
converse_messages.append(
{"role": "assistant", "content": bedrock_content}
)
# Convert to Converse API format with proper content structure
if isinstance(content, list):
# Already formatted as multimodal content blocks
converse_messages.append({"role": role, "content": content})
else:
# Convert to Converse API format with proper content structure
if isinstance(content, list):
# Already formatted as multimodal content blocks
converse_messages.append({"role": role, "content": content})
else:
# String content - wrap in text block
text_content = content if content else ""
converse_messages.append(
{"role": role, "content": [{"text": text_content}]}
)
if pending_tool_results:
converse_messages.append({"role": "user", "content": pending_tool_results})
# String content - wrap in text block
text_content = content if content else ""
converse_messages.append(
{"role": role, "content": [{"text": text_content}]}
)
# CRITICAL: Handle model-specific conversation requirements
# Cohere and some other models require conversation to end with user message

View File

@@ -22,7 +22,6 @@ from crewai.mcp.config import (
MCPServerSSE,
MCPServerStdio,
)
from crewai.utilities.string_utils import sanitize_tool_name
from crewai.mcp.transports.http import HTTPTransport
from crewai.mcp.transports.sse import SSETransport
from crewai.mcp.transports.stdio import StdioTransport
@@ -75,9 +74,10 @@ class MCPToolResolver:
elif isinstance(mcp_config, str):
amp_refs.append(self._parse_amp_ref(mcp_config))
else:
tools, clients = self._resolve_native(mcp_config)
tools, client = self._resolve_native(mcp_config)
all_tools.extend(tools)
self._clients.extend(clients)
if client:
self._clients.append(client)
if amp_refs:
tools, clients = self._resolve_amp(amp_refs)
@@ -131,7 +131,7 @@ class MCPToolResolver:
all_tools: list[BaseTool] = []
all_clients: list[Any] = []
resolved_cache: dict[str, tuple[list[BaseTool], list[Any]]] = {}
resolved_cache: dict[str, tuple[list[BaseTool], Any | None]] = {}
for slug in unique_slugs:
config_dict = amp_configs_map.get(slug)
@@ -149,9 +149,10 @@ class MCPToolResolver:
mcp_server_config = self._build_mcp_config_from_dict(config_dict)
try:
tools, clients = self._resolve_native(mcp_server_config)
resolved_cache[slug] = (tools, clients)
all_clients.extend(clients)
tools, client = self._resolve_native(mcp_server_config)
resolved_cache[slug] = (tools, client)
if client:
all_clients.append(client)
except Exception as e:
crewai_event_bus.emit(
self,
@@ -169,9 +170,8 @@ class MCPToolResolver:
slug_tools, _ = cached
if specific_tool:
sanitized = sanitize_tool_name(specific_tool)
all_tools.extend(
t for t in slug_tools if t.name.endswith(f"_{sanitized}")
t for t in slug_tools if t.name.endswith(f"_{specific_tool}")
)
else:
all_tools.extend(slug_tools)
@@ -198,6 +198,7 @@ class MCPToolResolver:
plus_api = PlusAPI(api_key=get_platform_integration_token())
response = plus_api.get_mcp_configs(slugs)
if response.status_code == 200:
configs: dict[str, dict[str, Any]] = response.json().get("configs", {})
return configs
@@ -217,7 +218,6 @@ class MCPToolResolver:
def _resolve_external(self, mcp_ref: str) -> list[BaseTool]:
"""Resolve an HTTPS MCP server URL into tools."""
from crewai.tools.base_tool import BaseTool
from crewai.tools.mcp_tool_wrapper import MCPToolWrapper
if "#" in mcp_ref:
@@ -227,7 +227,6 @@ class MCPToolResolver:
server_params = {"url": server_url}
server_name = self._extract_server_name(server_url)
sanitized_specific_tool = sanitize_tool_name(specific_tool) if specific_tool else None
try:
tool_schemas = self._get_mcp_tool_schemas(server_params)
@@ -240,7 +239,7 @@ class MCPToolResolver:
tools = []
for tool_name, schema in tool_schemas.items():
if sanitized_specific_tool and tool_name != sanitized_specific_tool:
if specific_tool and tool_name != specific_tool:
continue
try:
@@ -272,16 +271,14 @@ class MCPToolResolver:
)
return []
@staticmethod
def _create_transport(
mcp_config: MCPServerConfig,
) -> tuple[StdioTransport | HTTPTransport | SSETransport, str]:
"""Create a fresh transport instance from an MCP server config.
def _resolve_native(
self, mcp_config: MCPServerConfig
) -> tuple[list[BaseTool], Any | None]:
"""Resolve an ``MCPServerConfig`` into tools, returning the client for cleanup."""
from crewai.tools.base_tool import BaseTool
from crewai.tools.mcp_native_tool import MCPNativeTool
Returns a ``(transport, server_name)`` tuple. Each call produces an
independent transport so that parallel tool executions never share
state.
"""
transport: StdioTransport | HTTPTransport | SSETransport
if isinstance(mcp_config, MCPServerStdio):
transport = StdioTransport(
command=mcp_config.command,
@@ -295,54 +292,38 @@ class MCPToolResolver:
headers=mcp_config.headers,
streamable=mcp_config.streamable,
)
server_name = MCPToolResolver._extract_server_name(mcp_config.url)
server_name = self._extract_server_name(mcp_config.url)
elif isinstance(mcp_config, MCPServerSSE):
transport = SSETransport(
url=mcp_config.url,
headers=mcp_config.headers,
)
server_name = MCPToolResolver._extract_server_name(mcp_config.url)
server_name = self._extract_server_name(mcp_config.url)
else:
raise ValueError(f"Unsupported MCP server config type: {type(mcp_config)}")
return transport, server_name
def _resolve_native(
self, mcp_config: MCPServerConfig
) -> tuple[list[BaseTool], list[Any]]:
"""Resolve an ``MCPServerConfig`` into tools.
Returns ``(tools, clients)`` where *clients* is always empty for
native tools (clients are now created on-demand per invocation).
A ``client_factory`` closure is passed to each ``MCPNativeTool`` so
every call -- even concurrent calls to the *same* tool -- gets its
own ``MCPClient`` + transport with no shared mutable state.
"""
from crewai.tools.base_tool import BaseTool
from crewai.tools.mcp_native_tool import MCPNativeTool
discovery_transport, server_name = self._create_transport(mcp_config)
discovery_client = MCPClient(
transport=discovery_transport,
client = MCPClient(
transport=transport,
cache_tools_list=mcp_config.cache_tools_list,
)
async def _setup_client_and_list_tools() -> list[dict[str, Any]]:
try:
if not discovery_client.connected:
await discovery_client.connect()
if not client.connected:
await client.connect()
tools_list = await discovery_client.list_tools()
tools_list = await client.list_tools()
try:
await discovery_client.disconnect()
await client.disconnect()
await asyncio.sleep(0.1)
except Exception as e:
self._logger.log("error", f"Error during disconnect: {e}")
return tools_list
except Exception as e:
if discovery_client.connected:
await discovery_client.disconnect()
if client.connected:
await client.disconnect()
await asyncio.sleep(0.1)
raise RuntimeError(
f"Error during setup client and list tools: {e}"
@@ -395,13 +376,6 @@ class MCPToolResolver:
filtered_tools.append(tool)
tools_list = filtered_tools
def _client_factory() -> MCPClient:
transport, _ = self._create_transport(mcp_config)
return MCPClient(
transport=transport,
cache_tools_list=mcp_config.cache_tools_list,
)
tools = []
for tool_def in tools_list:
tool_name = tool_def.get("name", "")
@@ -422,7 +396,7 @@ class MCPToolResolver:
try:
native_tool = MCPNativeTool(
client_factory=_client_factory,
mcp_client=client,
tool_name=tool_name,
tool_schema=tool_schema,
server_name=server_name,
@@ -433,10 +407,10 @@ class MCPToolResolver:
self._logger.log("error", f"Failed to create native MCP tool: {e}")
continue
return cast(list[BaseTool], tools), []
return cast(list[BaseTool], tools), client
except Exception as e:
if discovery_client.connected:
asyncio.run(discovery_client.disconnect())
if client.connected:
asyncio.run(client.disconnect())
raise RuntimeError(f"Failed to get native MCP tools: {e}") from e

View File

@@ -3,9 +3,11 @@
from __future__ import annotations
from datetime import datetime
from typing import Any, Literal
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator
if TYPE_CHECKING:
from crewai.memory.unified_memory import Memory
from crewai.memory.types import (
_RECALL_OVERSAMPLE_FACTOR,
@@ -13,38 +15,22 @@ from crewai.memory.types import (
MemoryRecord,
ScopeInfo,
)
from crewai.memory.unified_memory import Memory
class MemoryScope(BaseModel):
class MemoryScope:
"""View of Memory restricted to a root path. All operations are scoped under that path."""
model_config = ConfigDict(arbitrary_types_allowed=True)
def __init__(self, memory: Memory, root_path: str) -> None:
"""Initialize scope.
root_path: str = Field(default="/")
_memory: Memory = PrivateAttr()
_root: str = PrivateAttr()
@model_validator(mode="wrap")
@classmethod
def _accept_memory(cls, data: Any, handler: Any) -> MemoryScope:
"""Extract memory dependency and normalize root path before validation."""
if isinstance(data, MemoryScope):
return data
memory = data.pop("memory")
instance: MemoryScope = handler(data)
instance._memory = memory
root = instance.root_path.rstrip("/") or ""
if root and not root.startswith("/"):
root = "/" + root
instance._root = root
return instance
@property
def read_only(self) -> bool:
"""Whether the underlying memory is read-only."""
return self._memory.read_only
Args:
memory: The underlying Memory instance.
root_path: Root path for this scope (e.g. /agent/1).
"""
self._memory = memory
self._root = root_path.rstrip("/") or ""
if self._root and not self._root.startswith("/"):
self._root = "/" + self._root
def _scope_path(self, scope: str | None) -> str:
if not scope or scope == "/":
@@ -66,7 +52,7 @@ class MemoryScope(BaseModel):
importance: float | None = None,
source: str | None = None,
private: bool = False,
) -> MemoryRecord | None:
) -> MemoryRecord:
"""Remember content; scope is relative to this scope's root."""
path = self._scope_path(scope)
return self._memory.remember(
@@ -85,7 +71,7 @@ class MemoryScope(BaseModel):
scope: str | None = None,
categories: list[str] | None = None,
limit: int = 10,
depth: Literal["shallow", "deep"] = "deep",
depth: str = "deep",
source: str | None = None,
include_private: bool = False,
) -> list[MemoryMatch]:
@@ -152,34 +138,34 @@ class MemoryScope(BaseModel):
"""Return a narrower scope under this scope."""
child = path.strip("/")
if not child:
return MemoryScope(memory=self._memory, root_path=self._root or "/")
return MemoryScope(self._memory, self._root or "/")
base = self._root.rstrip("/") or ""
new_root = f"{base}/{child}" if base else f"/{child}"
return MemoryScope(memory=self._memory, root_path=new_root)
return MemoryScope(self._memory, new_root)
class MemorySlice(BaseModel):
class MemorySlice:
"""View over multiple scopes: recall searches all, remember is a no-op when read_only."""
model_config = ConfigDict(arbitrary_types_allowed=True)
def __init__(
self,
memory: Memory,
scopes: list[str],
categories: list[str] | None = None,
read_only: bool = True,
) -> None:
"""Initialize slice.
scopes: list[str] = Field(default_factory=list)
categories: list[str] | None = Field(default=None)
read_only: bool = Field(default=True)
_memory: Memory = PrivateAttr()
@model_validator(mode="wrap")
@classmethod
def _accept_memory(cls, data: Any, handler: Any) -> MemorySlice:
"""Extract memory dependency and normalize scopes before validation."""
if isinstance(data, MemorySlice):
return data
memory = data.pop("memory")
data["scopes"] = [s.rstrip("/") or "/" for s in data.get("scopes", [])]
instance: MemorySlice = handler(data)
instance._memory = memory
return instance
Args:
memory: The underlying Memory instance.
scopes: List of scope paths to include.
categories: Optional category filter for recall.
read_only: If True, remember() is a silent no-op.
"""
self._memory = memory
self._scopes = [s.rstrip("/") or "/" for s in scopes]
self._categories = categories
self._read_only = read_only
def remember(
self,
@@ -192,7 +178,7 @@ class MemorySlice(BaseModel):
private: bool = False,
) -> MemoryRecord | None:
"""Remember into an explicit scope. No-op when read_only=True."""
if self.read_only:
if self._read_only:
return None
return self._memory.remember(
content,
@@ -210,14 +196,14 @@ class MemorySlice(BaseModel):
scope: str | None = None,
categories: list[str] | None = None,
limit: int = 10,
depth: Literal["shallow", "deep"] = "deep",
depth: str = "deep",
source: str | None = None,
include_private: bool = False,
) -> list[MemoryMatch]:
"""Recall across all slice scopes; results merged and re-ranked."""
cats = categories or self.categories
cats = categories or self._categories
all_matches: list[MemoryMatch] = []
for sc in self.scopes:
for sc in self._scopes:
matches = self._memory.recall(
query,
scope=sc,
@@ -245,7 +231,7 @@ class MemorySlice(BaseModel):
def list_scopes(self, path: str = "/") -> list[str]:
"""List scopes across all slice roots."""
out: list[str] = []
for sc in self.scopes:
for sc in self._scopes:
full = f"{sc.rstrip('/')}{path}" if sc != "/" else path
out.extend(self._memory.list_scopes(full))
return sorted(set(out))
@@ -257,23 +243,15 @@ class MemorySlice(BaseModel):
oldest: datetime | None = None
newest: datetime | None = None
children: list[str] = []
for sc in self.scopes:
for sc in self._scopes:
full = f"{sc.rstrip('/')}{path}" if sc != "/" else path
inf = self._memory.info(full)
total_records += inf.record_count
all_categories.update(inf.categories)
if inf.oldest_record:
oldest = (
inf.oldest_record
if oldest is None
else min(oldest, inf.oldest_record)
)
oldest = inf.oldest_record if oldest is None else min(oldest, inf.oldest_record)
if inf.newest_record:
newest = (
inf.newest_record
if newest is None
else max(newest, inf.newest_record)
)
newest = inf.newest_record if newest is None else max(newest, inf.newest_record)
children.extend(inf.child_scopes)
return ScopeInfo(
path=path,
@@ -287,7 +265,7 @@ class MemorySlice(BaseModel):
def list_categories(self, path: str | None = None) -> dict[str, int]:
"""Categories and counts across slice scopes."""
counts: dict[str, int] = {}
for sc in self.scopes:
for sc in self._scopes:
full = (f"{sc.rstrip('/')}{path}" if sc != "/" else path) if path else sc
for k, v in self._memory.list_categories(full).items():
counts[k] = counts.get(k, 0) + v

View File

@@ -38,8 +38,7 @@ class KickoffTaskOutputsSQLiteStorage:
DatabaseOperationError: If database initialization fails due to SQLite errors.
"""
try:
with sqlite3.connect(self.db_path, timeout=30) as conn:
conn.execute("PRAGMA journal_mode=WAL")
with sqlite3.connect(self.db_path) as conn:
cursor = conn.cursor()
cursor.execute(
"""
@@ -83,7 +82,7 @@ class KickoffTaskOutputsSQLiteStorage:
"""
inputs = inputs or {}
try:
with sqlite3.connect(self.db_path, timeout=30) as conn:
with sqlite3.connect(self.db_path) as conn:
conn.execute("BEGIN TRANSACTION")
cursor = conn.cursor()
cursor.execute(
@@ -126,7 +125,7 @@ class KickoffTaskOutputsSQLiteStorage:
DatabaseOperationError: If updating the task output fails due to SQLite errors.
"""
try:
with sqlite3.connect(self.db_path, timeout=30) as conn:
with sqlite3.connect(self.db_path) as conn:
conn.execute("BEGIN TRANSACTION")
cursor = conn.cursor()
@@ -167,7 +166,7 @@ class KickoffTaskOutputsSQLiteStorage:
DatabaseOperationError: If loading task outputs fails due to SQLite errors.
"""
try:
with sqlite3.connect(self.db_path, timeout=30) as conn:
with sqlite3.connect(self.db_path) as conn:
cursor = conn.cursor()
cursor.execute("""
SELECT *
@@ -206,7 +205,7 @@ class KickoffTaskOutputsSQLiteStorage:
DatabaseOperationError: If deleting task outputs fails due to SQLite errors.
"""
try:
with sqlite3.connect(self.db_path, timeout=30) as conn:
with sqlite3.connect(self.db_path) as conn:
conn.execute("BEGIN TRANSACTION")
cursor = conn.cursor()
cursor.execute("DELETE FROM latest_kickoff_task_outputs")

View File

@@ -2,7 +2,6 @@
from __future__ import annotations
from contextlib import AbstractContextManager
from datetime import datetime
import json
import logging
@@ -15,7 +14,6 @@ from typing import Any, ClassVar
import lancedb
from crewai.memory.types import MemoryRecord, ScopeInfo
from crewai.utilities.lock_store import lock as store_lock
_logger = logging.getLogger(__name__)
@@ -92,7 +90,6 @@ class LanceDBStorage:
# Raise it proactively so scans on large tables never hit OS error 24.
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
resource.setrlimit(resource.RLIMIT_NOFILE, (min(hard, 4096), hard))
@@ -102,8 +99,7 @@ class LanceDBStorage:
self._compact_every = compact_every
self._save_count = 0
self._lock_name = f"lancedb:{self._path.resolve()}"
# Get or create a shared write lock for this database path.
resolved = str(self._path.resolve())
with LanceDBStorage._path_locks_guard:
if resolved not in LanceDBStorage._path_locks:
@@ -114,13 +110,10 @@ class LanceDBStorage:
# If no table exists yet, defer creation until the first save so the
# dimension can be auto-detected from the embedder's actual output.
try:
self._table: lancedb.table.Table | None = self._db.open_table(
self._table_name
)
self._table: lancedb.table.Table | None = self._db.open_table(self._table_name)
self._vector_dim: int = self._infer_dim_from_table(self._table)
# Best-effort: create the scope index if it doesn't exist yet.
with self._file_lock():
self._ensure_scope_index()
self._ensure_scope_index()
# Compact in the background if the table has accumulated many
# fragments from previous runs (each save() creates one).
self._compact_if_needed()
@@ -131,8 +124,7 @@ class LanceDBStorage:
# Explicit dim provided: create the table immediately if it doesn't exist.
if self._table is None and vector_dim is not None:
self._vector_dim = vector_dim
with self._file_lock():
self._table = self._create_table(vector_dim)
self._table = self._create_table(vector_dim)
@property
def write_lock(self) -> threading.RLock:
@@ -157,14 +149,18 @@ class LanceDBStorage:
break
return DEFAULT_VECTOR_DIM
def _file_lock(self) -> AbstractContextManager[None]:
"""Return a cross-process lock for serialising writes."""
return store_lock(self._lock_name)
def _retry_write(self, op: str, *args: Any, **kwargs: Any) -> Any:
"""Execute a table operation with retry on LanceDB commit conflicts.
def _do_write(self, op: str, *args: Any, **kwargs: Any) -> Any:
"""Execute a single table write with retry on commit conflicts.
Args:
op: Method name on the table object (e.g. "add", "delete").
*args, **kwargs: Passed to the table method.
Caller must already hold the cross-process file lock.
LanceDB uses optimistic concurrency: if two transactions overlap,
the second to commit fails with an ``OSError`` containing
"Commit conflict". This helper retries with exponential backoff,
refreshing the table reference before each retry so the retried
call uses the latest committed version (not a stale reference).
"""
delay = _RETRY_BASE_DELAY
for attempt in range(_MAX_RETRIES + 1):
@@ -175,24 +171,20 @@ class LanceDBStorage:
raise
_logger.debug(
"LanceDB commit conflict on %s (attempt %d/%d), retrying in %.1fs",
op,
attempt + 1,
_MAX_RETRIES,
delay,
op, attempt + 1, _MAX_RETRIES, delay,
)
# Refresh table to pick up the latest version before retrying.
# The next getattr(self._table, op) will use the fresh table.
try:
self._table = self._db.open_table(self._table_name)
except Exception: # noqa: S110
pass
pass # table refresh is best-effort
time.sleep(delay)
delay *= 2
return None # unreachable, but satisfies type checker
def _create_table(self, vector_dim: int) -> lancedb.table.Table:
"""Create a new table with the given vector dimension.
Caller must already hold the cross-process file lock.
"""
"""Create a new table with the given vector dimension."""
placeholder = [
{
"id": "__schema_placeholder__",
@@ -208,12 +200,8 @@ class LanceDBStorage:
"vector": [0.0] * vector_dim,
}
]
try:
table = self._db.create_table(self._table_name, placeholder)
except ValueError:
table = self._db.open_table(self._table_name)
else:
table.delete("id = '__schema_placeholder__'")
table = self._db.create_table(self._table_name, placeholder)
table.delete("id = '__schema_placeholder__'")
return table
def _ensure_scope_index(self) -> None:
@@ -260,9 +248,9 @@ class LanceDBStorage:
"""Run ``table.optimize()`` in a background thread, absorbing errors."""
try:
if self._table is not None:
with self._file_lock():
self._table.optimize()
self._ensure_scope_index()
self._table.optimize()
# Refresh the scope index so new fragments are covered.
self._ensure_scope_index()
except Exception:
_logger.debug("LanceDB background compaction failed", exc_info=True)
@@ -292,9 +280,7 @@ class LanceDBStorage:
"last_accessed": record.last_accessed.isoformat(),
"source": record.source or "",
"private": record.private,
"vector": record.embedding
if record.embedding
else [0.0] * self._vector_dim,
"vector": record.embedding if record.embedding else [0.0] * self._vector_dim,
}
def _row_to_record(self, row: dict[str, Any]) -> MemoryRecord:
@@ -310,9 +296,7 @@ class LanceDBStorage:
id=str(row["id"]),
content=str(row["content"]),
scope=str(row["scope"]),
categories=json.loads(row["categories_str"])
if row.get("categories_str")
else [],
categories=json.loads(row["categories_str"]) if row.get("categories_str") else [],
metadata=json.loads(row["metadata_str"]) if row.get("metadata_str") else {},
importance=float(row.get("importance", 0.5)),
created_at=_parse_dt(row.get("created_at")),
@@ -332,15 +316,16 @@ class LanceDBStorage:
dim = len(r.embedding)
break
is_new_table = self._table is None
with self._write_lock, self._file_lock():
with self._write_lock:
self._ensure_table(vector_dim=dim)
rows = [self._record_to_row(r) for r in records]
for r in rows:
if r["vector"] is None or len(r["vector"]) != self._vector_dim:
r["vector"] = [0.0] * self._vector_dim
self._do_write("add", rows)
if is_new_table:
self._ensure_scope_index()
self._retry_write("add", rows)
# Create the scope index on the first save so it covers the initial dataset.
if is_new_table:
self._ensure_scope_index()
# Auto-compact every N saves so fragment files don't pile up.
self._save_count += 1
if self._compact_every > 0 and self._save_count % self._compact_every == 0:
@@ -348,14 +333,14 @@ class LanceDBStorage:
def update(self, record: MemoryRecord) -> None:
"""Update a record by ID. Preserves created_at, updates last_accessed."""
with self._write_lock, self._file_lock():
with self._write_lock:
self._ensure_table()
safe_id = str(record.id).replace("'", "''")
self._do_write("delete", f"id = '{safe_id}'")
self._retry_write("delete", f"id = '{safe_id}'")
row = self._record_to_row(record)
if row["vector"] is None or len(row["vector"]) != self._vector_dim:
row["vector"] = [0.0] * self._vector_dim
self._do_write("add", [row])
self._retry_write("add", [row])
def touch_records(self, record_ids: list[str]) -> None:
"""Update last_accessed to now for the given record IDs.
@@ -369,11 +354,11 @@ class LanceDBStorage:
"""
if not record_ids or self._table is None:
return
with self._write_lock, self._file_lock():
with self._write_lock:
now = datetime.utcnow().isoformat()
safe_ids = [str(rid).replace("'", "''") for rid in record_ids]
ids_expr = ", ".join(f"'{rid}'" for rid in safe_ids)
self._do_write(
self._retry_write(
"update",
where=f"id IN ({ids_expr})",
values={"last_accessed": now},
@@ -405,17 +390,13 @@ class LanceDBStorage:
prefix = scope_prefix.rstrip("/")
like_val = prefix + "%"
query = query.where(f"scope LIKE '{like_val}'")
results = query.limit(
limit * 3 if (categories or metadata_filter) else limit
).to_list()
results = query.limit(limit * 3 if (categories or metadata_filter) else limit).to_list()
out: list[tuple[MemoryRecord, float]] = []
for row in results:
record = self._row_to_record(row)
if categories and not any(c in record.categories for c in categories):
continue
if metadata_filter and not all(
record.metadata.get(k) == v for k, v in metadata_filter.items()
):
if metadata_filter and not all(record.metadata.get(k) == v for k, v in metadata_filter.items()):
continue
distance = row.get("_distance", 0.0)
score = 1.0 / (1.0 + float(distance)) if distance is not None else 1.0
@@ -435,24 +416,20 @@ class LanceDBStorage:
) -> int:
if self._table is None:
return 0
with self._write_lock, self._file_lock():
with self._write_lock:
if record_ids and not (categories or metadata_filter):
before = self._table.count_rows()
ids_expr = ", ".join(f"'{rid}'" for rid in record_ids)
self._do_write("delete", f"id IN ({ids_expr})")
self._retry_write("delete", f"id IN ({ids_expr})")
return before - self._table.count_rows()
if categories or metadata_filter:
rows = self._scan_rows(scope_prefix)
to_delete: list[str] = []
for row in rows:
record = self._row_to_record(row)
if categories and not any(
c in record.categories for c in categories
):
if categories and not any(c in record.categories for c in categories):
continue
if metadata_filter and not all(
record.metadata.get(k) == v for k, v in metadata_filter.items()
):
if metadata_filter and not all(record.metadata.get(k) == v for k, v in metadata_filter.items()):
continue
if older_than and record.created_at >= older_than:
continue
@@ -461,7 +438,7 @@ class LanceDBStorage:
return 0
before = self._table.count_rows()
ids_expr = ", ".join(f"'{rid}'" for rid in to_delete)
self._do_write("delete", f"id IN ({ids_expr})")
self._retry_write("delete", f"id IN ({ids_expr})")
return before - self._table.count_rows()
conditions = []
if scope_prefix is not None and scope_prefix.strip("/"):
@@ -473,11 +450,11 @@ class LanceDBStorage:
conditions.append(f"created_at < '{older_than.isoformat()}'")
if not conditions:
before = self._table.count_rows()
self._do_write("delete", "id != ''")
self._retry_write("delete", "id != ''")
return before - self._table.count_rows()
where_expr = " AND ".join(conditions)
before = self._table.count_rows()
self._do_write("delete", where_expr)
self._retry_write("delete", where_expr)
return before - self._table.count_rows()
def _scan_rows(
@@ -551,7 +528,7 @@ class LanceDBStorage:
for row in rows:
sc = str(row.get("scope", ""))
if child_prefix and sc.startswith(child_prefix):
rest = sc[len(child_prefix) :]
rest = sc[len(child_prefix):]
first_component = rest.split("/", 1)[0]
if first_component:
children.add(child_prefix + first_component)
@@ -562,11 +539,7 @@ class LanceDBStorage:
pass
created = row.get("created_at")
if created:
dt = (
datetime.fromisoformat(str(created).replace("Z", "+00:00"))
if isinstance(created, str)
else created
)
dt = datetime.fromisoformat(str(created).replace("Z", "+00:00")) if isinstance(created, str) else created
if isinstance(dt, datetime):
if oldest is None or dt < oldest:
oldest = dt
@@ -589,7 +562,7 @@ class LanceDBStorage:
for row in rows:
sc = str(row.get("scope", ""))
if sc.startswith(prefix) and sc != (prefix.rstrip("/") or "/"):
rest = sc[len(prefix) :]
rest = sc[len(prefix):]
first_component = rest.split("/", 1)[0]
if first_component:
children.add(prefix + first_component)
@@ -617,19 +590,17 @@ class LanceDBStorage:
return info.record_count
def reset(self, scope_prefix: str | None = None) -> None:
with self._write_lock, self._file_lock():
if scope_prefix is None or scope_prefix.strip("/") == "":
if self._table is not None:
self._db.drop_table(self._table_name)
self._table = None
return
if self._table is None:
return
prefix = scope_prefix.rstrip("/")
if prefix:
self._do_write(
"delete", f"scope >= '{prefix}' AND scope < '{prefix}/\uffff'"
)
if scope_prefix is None or scope_prefix.strip("/") == "":
if self._table is not None:
self._db.drop_table(self._table_name)
self._table = None
# Dimension is preserved; table will be recreated on next save.
return
if self._table is None:
return
prefix = scope_prefix.rstrip("/")
if prefix:
self._table.delete(f"scope >= '{prefix}' AND scope < '{prefix}/\uFFFF'")
def optimize(self) -> None:
"""Compact the table synchronously and refresh the scope index.
@@ -643,9 +614,8 @@ class LanceDBStorage:
"""
if self._table is None:
return
with self._write_lock, self._file_lock():
self._table.optimize()
self._ensure_scope_index()
self._table.optimize()
self._ensure_scope_index()
async def asave(self, records: list[MemoryRecord]) -> None:
self.save(records)

View File

@@ -91,10 +91,18 @@ class MemoryMatch(BaseModel):
"""Format this match as a human-readable string including metadata.
Returns:
A multi-line string with score, content, categories, and non-empty
metadata fields.
A multi-line string with score, content, scope/date, categories,
and non-empty metadata fields.
"""
lines = [f"- (score={self.score:.2f}) {self.record.content}"]
# Extract date from scope (e.g. "/conversations/2023-05-29" -> "2023-05-29")
date_str = ""
if self.record.scope and self.record.scope != "/":
parts = self.record.scope.rstrip("/").rsplit("/", 1)
if len(parts) > 1 and len(parts[-1]) >= 10:
date_str = f" [date: {parts[-1]}]"
lines = [f"- (score={self.score:.2f}){date_str} {self.record.content}"]
if self.record.scope and self.record.scope != "/":
lines.append(f" scope: {self.record.scope}")
if self.record.categories:
lines.append(f" categories: {', '.join(self.record.categories)}")
if self.record.metadata:
@@ -366,7 +374,13 @@ def compute_composite_score(
Tuple of (composite_score, match_reasons). match_reasons includes
"semantic" always; "recency" if decay > 0.5; "importance" if record.importance > 0.5.
"""
age_seconds = (datetime.utcnow() - record.created_at).total_seconds()
now = datetime.utcnow()
created = record.created_at
# Strip timezone info to avoid "can't compare offset-naive and
# offset-aware datetimes" when records have mixed tz awareness.
if created.tzinfo is not None:
created = created.replace(tzinfo=None)
age_seconds = (now - created).total_seconds()
age_days = max(age_seconds / 86400.0, 0.0)
decay = 0.5 ** (age_days / config.recency_half_life_days)

View File

@@ -6,9 +6,7 @@ from concurrent.futures import Future, ThreadPoolExecutor
from datetime import datetime
import threading
import time
from typing import TYPE_CHECKING, Annotated, Any, Literal
from pydantic import BaseModel, ConfigDict, Field, PlainValidator, PrivateAttr
from typing import TYPE_CHECKING, Any, Literal
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.memory_events import (
@@ -41,18 +39,13 @@ if TYPE_CHECKING:
)
def _passthrough(v: Any) -> Any:
"""PlainValidator that accepts any value, bypassing strict union discrimination."""
return v
def _default_embedder() -> OpenAIEmbeddingFunction:
"""Build default OpenAI embedder for memory."""
spec: OpenAIProviderSpec = {"provider": "openai", "config": {}}
return build_embedder(spec)
class Memory(BaseModel):
class Memory:
"""Unified memory: standalone, LLM-analyzed, with intelligent recall flow.
Works without agent/crew. Uses LLM to infer scope, categories, importance on save.
@@ -60,119 +53,116 @@ class Memory(BaseModel):
pluggable storage (LanceDB default).
"""
model_config = ConfigDict(arbitrary_types_allowed=True)
def __init__(
self,
llm: BaseLLM | str = "gpt-4o-mini",
storage: StorageBackend | str = "lancedb",
embedder: Any = None,
# -- Scoring weights --
# These three weights control how recall results are ranked.
# The composite score is: semantic_weight * similarity + recency_weight * decay + importance_weight * importance.
# They should sum to ~1.0 for intuitive scoring.
recency_weight: float = 0.3,
semantic_weight: float = 0.5,
importance_weight: float = 0.2,
# How quickly old memories lose relevance. The recency score halves every
# N days (exponential decay). Lower = faster forgetting; higher = longer relevance.
recency_half_life_days: int = 30,
# -- Consolidation --
# When remembering new content, if an existing record has similarity >= this
# threshold, the LLM is asked to merge/update/delete. Set to 1.0 to disable.
consolidation_threshold: float = 0.85,
# Max existing records to compare against when checking for consolidation.
consolidation_limit: int = 5,
# -- Save defaults --
# Importance assigned to new memories when no explicit value is given and
# the LLM analysis path is skipped (all fields provided by the caller).
default_importance: float = 0.5,
# -- Recall depth control --
# These thresholds govern the RecallFlow router that decides between
# returning results immediately ("synthesize") vs. doing an extra
# LLM-driven exploration round ("explore_deeper").
# confidence >= confidence_threshold_high => always synthesize
# confidence < confidence_threshold_low => explore deeper (if budget > 0)
# complex query + confidence < complex_query_threshold => explore deeper
confidence_threshold_high: float = 0.8,
confidence_threshold_low: float = 0.5,
complex_query_threshold: float = 0.7,
# How many LLM-driven exploration rounds the RecallFlow is allowed to run.
# 0 = always shallow (vector search only); higher = more thorough but slower.
exploration_budget: int = 1,
# Queries shorter than this skip LLM analysis (saving ~1-3s).
# Longer queries (full task descriptions) benefit from LLM distillation.
query_analysis_threshold: int = 200,
# When True, all write operations (remember, remember_many) are silently
# skipped. Useful for sharing a read-only view of memory across agents
# without any of them persisting new memories.
read_only: bool = False,
) -> None:
"""Initialize Memory.
llm: Annotated[BaseLLM | str, PlainValidator(_passthrough)] = Field(
default="gpt-4o-mini",
description="LLM for analysis (model name or BaseLLM instance).",
)
storage: Annotated[StorageBackend | str, PlainValidator(_passthrough)] = Field(
default="lancedb",
description="Storage backend instance or path string.",
)
embedder: Any = Field(
default=None,
description="Embedding callable, provider config dict, or None for default OpenAI.",
)
recency_weight: float = Field(
default=0.3,
description="Weight for recency in the composite relevance score.",
)
semantic_weight: float = Field(
default=0.5,
description="Weight for semantic similarity in the composite relevance score.",
)
importance_weight: float = Field(
default=0.2,
description="Weight for importance in the composite relevance score.",
)
recency_half_life_days: int = Field(
default=30,
description="Recency score halves every N days (exponential decay).",
)
consolidation_threshold: float = Field(
default=0.85,
description="Similarity above which consolidation is triggered on save.",
)
consolidation_limit: int = Field(
default=5,
description="Max existing records to compare during consolidation.",
)
default_importance: float = Field(
default=0.5,
description="Default importance when not provided or inferred.",
)
confidence_threshold_high: float = Field(
default=0.8,
description="Recall confidence above which results are returned directly.",
)
confidence_threshold_low: float = Field(
default=0.5,
description="Recall confidence below which deeper exploration is triggered.",
)
complex_query_threshold: float = Field(
default=0.7,
description="For complex queries, explore deeper below this confidence.",
)
exploration_budget: int = Field(
default=1,
description="Number of LLM-driven exploration rounds during deep recall.",
)
query_analysis_threshold: int = Field(
default=200,
description="Queries shorter than this skip LLM analysis during deep recall.",
)
read_only: bool = Field(
default=False,
description="If True, remember() and remember_many() are silent no-ops.",
)
_config: MemoryConfig = PrivateAttr()
_llm_instance: BaseLLM | None = PrivateAttr(default=None)
_embedder_instance: Any = PrivateAttr(default=None)
_storage: StorageBackend = PrivateAttr()
_save_pool: ThreadPoolExecutor = PrivateAttr(
default_factory=lambda: ThreadPoolExecutor(
max_workers=1, thread_name_prefix="memory-save"
)
)
_pending_saves: list[Future[Any]] = PrivateAttr(default_factory=list)
_pending_lock: threading.Lock = PrivateAttr(default_factory=threading.Lock)
def model_post_init(self, __context: Any) -> None:
"""Initialize runtime state from field values."""
Args:
llm: LLM for analysis (model name or BaseLLM instance).
storage: Backend: "lancedb" or a StorageBackend instance.
embedder: Embedding callable, provider config dict, or None (default OpenAI).
recency_weight: Weight for recency in the composite relevance score.
semantic_weight: Weight for semantic similarity in the composite relevance score.
importance_weight: Weight for importance in the composite relevance score.
recency_half_life_days: Recency score halves every N days (exponential decay).
consolidation_threshold: Similarity above which consolidation is triggered on save.
consolidation_limit: Max existing records to compare during consolidation.
default_importance: Default importance when not provided or inferred.
confidence_threshold_high: Recall confidence above which results are returned directly.
confidence_threshold_low: Recall confidence below which deeper exploration is triggered.
complex_query_threshold: For complex queries, explore deeper below this confidence.
exploration_budget: Number of LLM-driven exploration rounds during deep recall.
query_analysis_threshold: Queries shorter than this skip LLM analysis during deep recall.
read_only: If True, remember() and remember_many() are silent no-ops.
"""
self._read_only = read_only
self._config = MemoryConfig(
recency_weight=self.recency_weight,
semantic_weight=self.semantic_weight,
importance_weight=self.importance_weight,
recency_half_life_days=self.recency_half_life_days,
consolidation_threshold=self.consolidation_threshold,
consolidation_limit=self.consolidation_limit,
default_importance=self.default_importance,
confidence_threshold_high=self.confidence_threshold_high,
confidence_threshold_low=self.confidence_threshold_low,
complex_query_threshold=self.complex_query_threshold,
exploration_budget=self.exploration_budget,
query_analysis_threshold=self.query_analysis_threshold,
recency_weight=recency_weight,
semantic_weight=semantic_weight,
importance_weight=importance_weight,
recency_half_life_days=recency_half_life_days,
consolidation_threshold=consolidation_threshold,
consolidation_limit=consolidation_limit,
default_importance=default_importance,
confidence_threshold_high=confidence_threshold_high,
confidence_threshold_low=confidence_threshold_low,
complex_query_threshold=complex_query_threshold,
exploration_budget=exploration_budget,
query_analysis_threshold=query_analysis_threshold,
)
self._llm_instance = None if isinstance(self.llm, str) else self.llm
self._embedder_instance = (
self.embedder
if (self.embedder is not None and not isinstance(self.embedder, dict))
# Store raw config for lazy initialization. LLM and embedder are only
# built on first access so that Memory() never fails at construction
# time (e.g. when auto-created by Flow without an API key set).
self._llm_config: BaseLLM | str = llm
self._llm_instance: BaseLLM | None = None if isinstance(llm, str) else llm
self._embedder_config: Any = embedder
self._embedder_instance: Any = (
embedder
if (embedder is not None and not isinstance(embedder, dict))
else None
)
if isinstance(self.storage, str):
if isinstance(storage, str):
from crewai.memory.storage.lancedb_storage import LanceDBStorage
self._storage = (
LanceDBStorage()
if self.storage == "lancedb"
else LanceDBStorage(path=self.storage)
)
self._storage = LanceDBStorage() if storage == "lancedb" else LanceDBStorage(path=storage)
else:
self._storage = self.storage
self._storage = storage
# Background save queue. max_workers=1 serializes saves to avoid
# concurrent storage mutations (two saves finding the same similar
# record and both trying to update/delete it). Within each save,
# the parallel LLM calls still run on their own thread pool.
self._save_pool = ThreadPoolExecutor(
max_workers=1, thread_name_prefix="memory-save"
)
self._pending_saves: list[Future[Any]] = []
self._pending_lock = threading.Lock()
_MEMORY_DOCS_URL = "https://docs.crewai.com/concepts/memory"
@@ -183,7 +173,11 @@ class Memory(BaseModel):
from crewai.llm import LLM
try:
model_name = self.llm if isinstance(self.llm, str) else str(self.llm)
model_name = (
self._llm_config
if isinstance(self._llm_config, str)
else str(self._llm_config)
)
self._llm_instance = LLM(model=model_name)
except Exception as e:
raise RuntimeError(
@@ -203,8 +197,8 @@ class Memory(BaseModel):
"""Lazy embedder initialization -- only created when first needed."""
if self._embedder_instance is None:
try:
if isinstance(self.embedder, dict):
self._embedder_instance = build_embedder(self.embedder)
if isinstance(self._embedder_config, dict):
self._embedder_instance = build_embedder(self._embedder_config)
else:
self._embedder_instance = _default_embedder()
except Exception as e:
@@ -362,7 +356,7 @@ class Memory(BaseModel):
Raises:
Exception: On save failure (events emitted).
"""
if self.read_only:
if self._read_only:
return None
_source_type = "unified_memory"
try:
@@ -450,7 +444,7 @@ class Memory(BaseModel):
Returns:
Empty list (records are not available until the background save completes).
"""
if not contents or self.read_only:
if not contents or self._read_only:
return []
self._submit_save(

View File

@@ -1,12 +1,13 @@
"""Factory functions for creating ChromaDB clients."""
from hashlib import md5
import os
from chromadb import PersistentClient
import portalocker
from crewai.rag.chromadb.client import ChromaDBClient
from crewai.rag.chromadb.config import ChromaDBConfig
from crewai.utilities.lock_store import lock
def create_client(config: ChromaDBConfig) -> ChromaDBClient:
@@ -24,8 +25,10 @@ def create_client(config: ChromaDBConfig) -> ChromaDBClient:
persist_dir = config.settings.persist_directory
os.makedirs(persist_dir, exist_ok=True)
lock_id = md5(persist_dir.encode(), usedforsecurity=False).hexdigest()
lockfile = os.path.join(persist_dir, f"chromadb-{lock_id}.lock")
with lock(f"chromadb:{persist_dir}"):
with portalocker.Lock(lockfile):
client = PersistentClient(
path=persist_dir,
settings=config.settings,

View File

@@ -2,7 +2,6 @@ from __future__ import annotations
import asyncio
from concurrent.futures import Future
import contextvars
from copy import copy as shallow_copy
import datetime
from hashlib import md5
@@ -525,11 +524,10 @@ class Task(BaseModel):
) -> Future[TaskOutput]:
"""Execute the task asynchronously."""
future: Future[TaskOutput] = Future()
ctx = contextvars.copy_context()
threading.Thread(
daemon=True,
target=ctx.run,
args=(self._execute_task_async, agent, context, tools, future),
target=self._execute_task_async,
args=(agent, context, tools, future),
).start()
return future

View File

@@ -1,30 +1,29 @@
"""Native MCP tool wrapper for CrewAI agents.
This module provides a tool wrapper that creates a fresh MCP client for every
invocation, ensuring safe parallel execution even when the same tool is called
concurrently by the executor.
This module provides a tool wrapper that reuses existing MCP client sessions
for better performance and connection management.
"""
import asyncio
from collections.abc import Callable
from typing import Any
from crewai.tools import BaseTool
class MCPNativeTool(BaseTool):
"""Native MCP tool that creates a fresh client per invocation.
"""Native MCP tool that reuses client sessions.
A ``client_factory`` callable produces an independent ``MCPClient`` +
transport for every ``_run_async`` call. This guarantees that parallel
invocations -- whether of the *same* tool or *different* tools from the
same server -- never share mutable connection state (which would cause
anyio cancel-scope errors).
This tool wrapper is used when agents connect to MCP servers using
structured configurations. It reuses existing client sessions for
better performance and proper connection lifecycle management.
Unlike MCPToolWrapper which connects on-demand, this tool uses
a shared MCP client instance that maintains a persistent connection.
"""
def __init__(
self,
client_factory: Callable[[], Any],
mcp_client: Any,
tool_name: str,
tool_schema: dict[str, Any],
server_name: str,
@@ -33,16 +32,19 @@ class MCPNativeTool(BaseTool):
"""Initialize native MCP tool.
Args:
client_factory: Zero-arg callable that returns a new MCPClient.
mcp_client: MCPClient instance with active session.
tool_name: Name of the tool (may be prefixed).
tool_schema: Schema information for the tool.
server_name: Name of the MCP server for prefixing.
original_tool_name: Original name of the tool on the MCP server.
"""
# Create tool name with server prefix to avoid conflicts
prefixed_name = f"{server_name}_{tool_name}"
# Handle args_schema properly - BaseTool expects a BaseModel subclass
args_schema = tool_schema.get("args_schema")
# Only pass args_schema if it's provided
kwargs = {
"name": prefixed_name,
"description": tool_schema.get(
@@ -55,9 +57,16 @@ class MCPNativeTool(BaseTool):
super().__init__(**kwargs)
self._client_factory = client_factory
# Set instance attributes after super().__init__
self._mcp_client = mcp_client
self._original_tool_name = original_tool_name or tool_name
self._server_name = server_name
# self._logger = logging.getLogger(__name__)
@property
def mcp_client(self) -> Any:
"""Get the MCP client instance."""
return self._mcp_client
@property
def original_tool_name(self) -> str:
@@ -99,26 +108,51 @@ class MCPNativeTool(BaseTool):
async def _run_async(self, **kwargs) -> str:
"""Async implementation of tool execution.
A fresh ``MCPClient`` is created for every invocation so that
concurrent calls never share transport or session state.
Args:
**kwargs: Arguments to pass to the MCP tool.
Returns:
Result from the MCP tool execution.
"""
client = self._client_factory()
await client.connect()
# Note: Since we use asyncio.run() which creates a new event loop each time,
# Always reconnect on-demand because asyncio.run() creates new event loops per call
# All MCP transport context managers (stdio, streamablehttp_client, sse_client)
# use anyio.create_task_group() which can't span different event loops
if self._mcp_client.connected:
await self._mcp_client.disconnect()
await self._mcp_client.connect()
try:
result = await client.call_tool(self.original_tool_name, kwargs)
finally:
await client.disconnect()
result = await self._mcp_client.call_tool(self.original_tool_name, kwargs)
except Exception as e:
error_str = str(e).lower()
if (
"not connected" in error_str
or "connection" in error_str
or "send" in error_str
):
await self._mcp_client.disconnect()
await self._mcp_client.connect()
# Retry the call
result = await self._mcp_client.call_tool(
self.original_tool_name, kwargs
)
else:
raise
finally:
# Always disconnect after tool call to ensure clean context manager lifecycle
# This prevents "exit cancel scope in different task" errors
# All transport context managers must be exited in the same event loop they were entered
await self._mcp_client.disconnect()
# Extract result content
if isinstance(result, str):
return result
# Handle various result formats
if hasattr(result, "content") and result.content:
if isinstance(result.content, list) and len(result.content) > 0:
content_item = result.content[0]

View File

@@ -2,6 +2,10 @@
from __future__ import annotations
import ast
import operator
import re
from datetime import datetime
from typing import Any
from pydantic import BaseModel, Field
@@ -10,6 +14,80 @@ from crewai.tools.base_tool import BaseTool
from crewai.utilities.i18n import get_i18n
# ---------------------------------------------------------------------------
# Safe arithmetic evaluator (no eval())
# ---------------------------------------------------------------------------
_BINARY_OPS: dict[type, Any] = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.truediv,
ast.FloorDiv: operator.floordiv,
ast.Mod: operator.mod,
ast.Pow: operator.pow,
}
_UNARY_OPS: dict[type, Any] = {
ast.USub: operator.neg,
ast.UAdd: operator.pos,
}
def _safe_eval_node(node: ast.AST) -> float:
"""Recursively evaluate an AST node containing only arithmetic."""
if isinstance(node, ast.Constant) and isinstance(node.value, (int, float)):
return float(node.value)
if isinstance(node, ast.BinOp):
op = _BINARY_OPS.get(type(node.op))
if op is None:
raise ValueError(f"Unsupported operator: {type(node.op).__name__}")
return op(_safe_eval_node(node.left), _safe_eval_node(node.right))
if isinstance(node, ast.UnaryOp):
op = _UNARY_OPS.get(type(node.op))
if op is None:
raise ValueError(f"Unsupported unary operator: {type(node.op).__name__}")
return op(_safe_eval_node(node.operand))
raise ValueError(f"Unsupported expression element: {ast.dump(node)}")
def safe_calc(expression: str) -> float:
"""Safely evaluate a mathematical expression string.
Only supports arithmetic operators (+, -, *, /, //, %, **) and numeric
literals. No variable access, function calls, or attribute lookups.
"""
tree = ast.parse(expression.strip(), mode="eval")
return _safe_eval_node(tree.body)
# ---------------------------------------------------------------------------
# Date difference helper
# ---------------------------------------------------------------------------
_DATE_DIFF_RE = re.compile(
r"^\s*(\d{4}-\d{2}-\d{2})\s*-\s*(\d{4}-\d{2}-\d{2})\s*$"
)
def _try_date_diff(expression: str) -> str | None:
"""If *expression* is ``YYYY-MM-DD - YYYY-MM-DD``, return the day difference.
Returns a human-readable string like ``12 days`` or ``-5 days``, or
*None* if the expression is not a date subtraction.
"""
m = _DATE_DIFF_RE.match(expression.strip())
if m is None:
return None
try:
d1 = datetime.strptime(m.group(1), "%Y-%m-%d")
d2 = datetime.strptime(m.group(2), "%Y-%m-%d")
except ValueError:
return None
delta = (d1 - d2).days
return f"{expression.strip()} = {delta} days"
class RecallMemorySchema(BaseModel):
"""Schema for the recall memory tool."""
@@ -49,7 +127,7 @@ class RecallMemoryTool(BaseTool):
all_lines: list[str] = []
seen_ids: set[str] = set()
for query in queries:
matches = self.memory.recall(query, limit=20)
matches = self.memory.recall(query, limit=30)
for m in matches:
if m.record.id not in seen_ids:
seen_ids.add(m.record.id)
@@ -101,6 +179,52 @@ class RememberTool(BaseTool):
return f"Saving {len(contents)} items to memory in background."
class CalculatorSchema(BaseModel):
"""Schema for the calculator tool."""
expression: str = Field(
...,
description=(
"A mathematical expression to evaluate, e.g. '(30 + 25 + 85)' "
"or '(132 + 298) / 5'. Supports +, -, *, /, //, %, **. "
"Also supports date differences: '2023-04-01 - 2023-03-20' returns the number of days."
),
)
class CalculatorTool(BaseTool):
"""Lightweight calculator for arithmetic during memory-based reasoning."""
name: str = "Calculator"
description: str = ""
args_schema: type[BaseModel] = CalculatorSchema
def _run(self, expression: str, **kwargs: Any) -> str:
"""Evaluate a mathematical expression safely.
Supports arithmetic expressions and date differences
(``YYYY-MM-DD - YYYY-MM-DD``).
Args:
expression: Arithmetic or date-difference expression string.
Returns:
The expression and its result, or an error message.
"""
# Try date difference first (e.g. "2023-04-01 - 2023-03-20")
date_result = _try_date_diff(expression)
if date_result is not None:
return date_result
try:
result = safe_calc(expression)
# Format nicely: drop .0 for whole numbers
if result == int(result):
return f"{expression} = {int(result)}"
return f"{expression} = {result:.4g}"
except Exception as e:
return f"Error evaluating '{expression}': {e}"
def create_memory_tools(memory: Any) -> list[BaseTool]:
"""Create Recall and Remember tools for the given memory instance.
@@ -120,8 +244,11 @@ def create_memory_tools(memory: Any) -> list[BaseTool]:
memory=memory,
description=i18n.tools("recall_memory"),
),
CalculatorTool(
description=i18n.tools("calculator"),
),
]
if not memory.read_only:
if not getattr(memory, "_read_only", False):
tools.append(
RememberTool(
memory=memory,

View File

@@ -7,7 +7,7 @@
"slices": {
"observation": "\nObservation:",
"task": "\nCurrent Task: {input}\n\nBegin! This is VERY important to you, use the tools available and give your best Final Answer, your job depends on it!\n\nThought:",
"memory": "\n\n# Memories from past conversations:\n{memory}\n\nIMPORTANT: The memories above are an automatic selection and may be INCOMPLETE. If the task involves counting, listing, or summing items (e.g. 'how many', 'total', 'list all'), you MUST use the Search memory tool with several different queries before answering — do NOT rely solely on the memories shown above. Enumerate each distinct item you find before giving a final count.",
"memory": "\n\n# Memories from past conversations:\n{memory}\n\nGuidelines for using these memories:\n\n1. COMPLETENESS: The memories above are an automatic selection and may be INCOMPLETE. If the task involves counting, listing, or summing items (e.g. 'how many', 'total', 'list all'), you MUST use the Search memory tool with several different queries before answering.\n\n2. COUNTING & ARITHMETIC: When counting or computing totals:\n - You MUST search memory multiple times with DIFFERENT phrasings to find ALL items. After your first search, ask yourself 'could there be more items I haven't found?' and search again with different terms.\n - Enumerate EACH specific item individually with its details, then count. Do NOT guess a number — list them first.\n - Count instances/sessions, not categories (e.g. if yoga is 2x/week, that is 2 sessions, not 1).\n - Only exclude items if there is explicit confirmation of removal, sale, or cancellation — intent to sell or thinking about it still means the user currently has it.\n - Use the Calculator tool for all arithmetic (sums, averages, differences) instead of computing in your head.\n - Only count things the user personally did, owned, or participated in — not things merely mentioned or discussed informationally.\n\n3. PERSONALIZATION: When the user asks for advice, recommendations, tips, or opinions:\n - BUILD UPON what you know about the user — don't just restate their memories as new suggestions.\n - Explicitly reference their specific past experiences, preferences, tools, and interests by name.\n - Frame suggestions in terms of what they've already tried or expressed interest in (e.g. 'Since you enjoyed X, you might also like Y' or 'Building on your experience with X, consider trying Y').\n - NEVER give generic advice that ignores their memories. If you have relevant context, USE it.\n - NEVER say 'I don't have information' or 'I don't have recommendations' if you have ANY related memories. Instead, reason from what you know — even partial context is better than no answer.\n\n4. TEMPORAL REASONING: Each memory has a 'scope' field (e.g. '/conversations/2023-03-04') that tells you the date of the conversation it came from.\n - For 'how many days/weeks/months AGO did X happen': subtract the event's scope date from the question date. Use the Calculator with 'YYYY-MM-DD - YYYY-MM-DD' format, e.g. Calculator('2023-04-01 - 2023-03-20') returns '12 days'.\n - For 'how many days BETWEEN Event A and Event B': subtract the two events' scope dates from each other — do NOT use the question date. Example: if A is at scope 2023-02-10 and B at scope 2023-03-01, compute Calculator('2023-03-01 - 2023-02-10') = 19 days.\n - For 'how long ago was X WHEN Y happened': the reference point is when Y happened (Y's scope date), NOT the question date. Example: 'How many days ago did I launch my website when I signed my first client?' — use the client-signing date as the reference, not today.\n - For 'which happened first' or ordering: compare the scope dates directly — the earlier date happened first. If a memory says 'about a month ago' from scope 2023-05-29, compute the actual date (approximately 2023-04-29) and compare.\n - Always prefer the scope date over vague temporal references like 'recently' in the memory text.\n - NEVER compute date differences by manually counting days in each month. Always use the Calculator with YYYY-MM-DD format.\n\n5. KNOWLEDGE UPDATES: When multiple memories describe the same fact at different dates (different scope values), the LATEST one (most recent scope date) is the current truth.\n - If one memory says '3 sessions attended' (scope 2023-05-11) and a later one says '5 sessions attended' (scope 2023-10-30), the answer is 5 — NOT 3+5=8. The later value is a cumulative update, not an addition.\n - If one memory says 'class on Thursday' (scope 2023-06-16) and a later one says 'class on Friday' (scope 2023-06-30), the answer is Friday — the schedule changed.\n - If one memory says 'personal best 27:12' and a later one says 'personal best 25:50', the answer is 25:50 — the record was broken.\n - ALWAYS check the scope dates when you find conflicting information about the same topic. The most recent scope wins.\n - Do NOT sum values across time periods unless the question explicitly asks for a cumulative total across separate events (e.g. 'total hours spent across all sessions').",
"role_playing": "You are {role}. {backstory}\nYour personal goal is: {goal}",
"tools": "\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
"no_tools": "",
@@ -61,11 +61,12 @@
"default_action": "Please provide a detailed description of this image, including all visual elements, context, and any notable details you can observe."
},
"recall_memory": "Search through the team's shared memory for relevant information. Pass one or more queries to search for multiple things at once. Use this when you need to find facts, decisions, preferences, or past results that may have been stored previously. IMPORTANT: For questions that require counting, summing, or listing items across multiple conversations (e.g. 'how many X', 'total Y', 'list all Z'), you MUST search multiple times with different phrasings to ensure you find ALL relevant items before giving a final count or total. Do not rely on a single search — items may be described differently across conversations.",
"calculator": "Perform arithmetic calculations and date differences. Use this tool whenever you need to add, subtract, multiply, divide, compute averages/totals, or calculate the number of days between two dates. Pass a mathematical expression like '(30 + 25 + 85)' or '(132 + 298) / 5', or a date difference like '2023-04-01 - 2023-03-20' (returns days). Always use this tool instead of computing in your head — NEVER manually count days across months.",
"save_to_memory": "Store one or more important facts, decisions, observations, or lessons in memory so they can be recalled later by you or other agents. Pass multiple items at once when you have several things worth remembering."
},
"memory": {
"query_system": "You analyze a query for searching memory.\nGiven the query and available scopes, output:\n1. keywords: Key entities or keywords that can be used to filter by category.\n2. suggested_scopes: Which available scopes are most relevant (empty for all).\n3. complexity: 'simple' or 'complex'.\n4. recall_queries: 1-3 short, targeted search phrases distilled from the query. Each should be a concise phrase optimized for semantic vector search. If the query is already short and focused, return it as-is in a single-item list. For long task descriptions, extract the distinct things worth searching for.\n5. time_filter: If the query references a time period (like 'last week', 'yesterday', 'in January'), return an ISO 8601 date string for the earliest relevant date (e.g. '2026-02-01'). Return null if no time constraint is implied.",
"extract_memories_system": "You extract discrete, reusable memory statements from raw content (e.g. a task description and its result, or a conversation between a user and an assistant).\n\nFor the given content, output a list of memory statements. Each memory must:\n- Be one clear sentence or short statement\n- Be understandable without the original context\n- Capture a decision, fact, outcome, preference, lesson, or observation worth remembering\n- NOT be a vague summary or a restatement of the task description\n- NOT duplicate the same idea in different words\n\nWhen the content is a conversation, pay special attention to facts stated by the user (first-person statements). These personal facts are HIGH PRIORITY and must always be extracted:\n- What the user did, bought, made, visited, attended, or completed\n- Names of people, pets, places, brands, and specific items the user mentions\n- Quantities, durations, dates, and measurements the user states\n- Subordinate clauses and casual asides often contain important personal details (e.g. \"by the way, it took me 4 hours\" or \"my Golden Retriever Max\")\n\nPreserve exact names and numbers — never generalize (e.g. keep \"lavender gin fizz\" not just \"cocktail\", keep \"12 largemouth bass\" not just \"fish caught\", keep \"Golden Retriever\" not just \"dog\").\n\nAdditional extraction rules:\n- Presupposed facts: When the user reveals a fact indirectly in a question (e.g. \"What collar suits a Golden Retriever like Max?\" presupposes Max is a Golden Retriever), extract that fact as a separate memory.\n- Date precision: Always preserve the full date including day-of-month when stated (e.g. \"February 14th\" not just \"February\", \"March 5\" not just \"March\").\n- Life events in passing: When the user mentions a life event (birth, wedding, graduation, move, adoption) while discussing something else, extract the life event as its own memory (e.g. \"my friend David had a baby boy named Jasper\" is a birth fact, even if mentioned while planning to send congratulations).\n\nIf there is nothing worth remembering (e.g. empty result, no decisions or facts), return an empty list.\nOutput a JSON object with a single key \"memories\" whose value is a list of strings.",
"extract_memories_system": "You extract discrete, reusable memory statements from raw content (e.g. a task description and its result, or a conversation between a user and an assistant).\n\nFor the given content, output a list of memory statements. Each memory must:\n- Be one clear sentence or short statement\n- Be understandable without the original context\n- Capture a decision, fact, outcome, preference, lesson, or observation worth remembering\n- NOT be a vague summary or a restatement of the task description\n- NOT duplicate the same idea in different words\n\nCRITICAL — Extract ALL facts, not just the main topic:\nUsers often reveal important personal facts WHILE discussing something else. Extract these background facts as SEPARATE memories:\n- Casual asides: \"By the way, I just finished a 5K in 35 minutes\" → extract the 5K time as its own memory\n- Qualifiers in questions: \"What collar suits a Golden Retriever like Max?\" → extract \"The user's dog Max is a Golden Retriever\"\n- Brief mentions in lists: If the user lists several items (\"$100 gift card for brother, $75 earrings for sister, $100 baby gift for coworker\"), extract EACH item as a separate memory — do not skip items mentioned only once\n- Adjectival modifiers: \"our 10-day trip to Hawaii\" → extract the duration (10 days) explicitly\n\nWhen the user makes a request that reveals a personal fact, extract BOTH:\n- The fact: \"The user's dog Max is a Golden Retriever\"\n- The action: \"The user is looking for a new collar with a name tag for Max\"\nNever let the request overshadow the revealed fact.\n\nUser personal facts are HIGH PRIORITY and must always be extracted:\n- What the user did, bought, made, visited, attended, or completed\n- Names of people, pets, places, brands, and specific items the user mentions\n- Quantities, durations, dates, prices, and measurements the user states\n- Subordinate clauses and casual asides often contain important personal details\n\nPreserve exact names and numbers — never generalize:\n- Keep \"32 years old\" not \"in their 30s\"\n- Keep \"$24 after a discount\" not \"an impulse buy\"\n- Keep \"10-day trip\" not just \"trip\"\n- Keep \"35 minutes\" not just \"started running\"\n- Keep \"lavender gin fizz\" not just \"cocktail\"\n- Keep \"12 largemouth bass\" not just \"fish caught\"\n- Keep \"Golden Retriever\" not just \"dog\"\n\nWhen the content includes assistant responses, also extract useful factual information, recommendations, or solutions the assistant provides.\n\nAdditional extraction rules:\n- Presupposed facts: When the user reveals a fact indirectly in a question (e.g. \"What collar suits a Golden Retriever like Max?\" presupposes Max is a Golden Retriever), extract that fact as a separate memory.\n- Date precision: Always preserve the full date including day-of-month when stated (e.g. \"February 14th\" not just \"February\", \"March 5\" not just \"March\"). When a well-known holiday name implies a specific date (e.g. \"Valentine's Day\" = February 14th, \"Christmas\" = December 25th), include the calendar date.\n- Relative date resolution: The content often has a date header like \"[Conversation on 2023/05/21 (Sun) 18:59]\". When relative time expressions appear (\"two months ago\", \"last Thursday\", \"about a month ago\"), resolve them to approximate absolute dates using the conversation date (e.g. \"two months ago\" in a May 2023 conversation → \"around March 2023\"). Include the resolved date in the memory.\n- Life events in passing: When the user mentions a life event (birth, wedding, graduation, move, adoption) while discussing something else, extract the life event as its own FACTUAL memory (e.g. \"my friend David had a baby boy named Jasper\" → \"David had a baby boy named Jasper\"). Do NOT convert life events into task actions (do NOT store as \"set up birthday reminder for Jasper\").\n- Separate distinct actions: When the user describes multiple pending tasks (e.g. \"I need to return the old boots AND pick up the new pair\"), extract each as a separate memory. Do not merge them into one statement.\n- Completed vs planned: Clearly distinguish completed events (\"I baked cookies last Thursday\") from planned events (\"I'm thinking of baking chicken wings\"). Include the approximate date for both.\n\nIf there is nothing worth remembering (e.g. empty result, no decisions or facts), return an empty list.\nOutput a JSON object with a single key \"memories\" whose value is a list of strings.",
"extract_memories_user": "Content:\n{content}\n\nExtract memory statements as described. Return structured output.",
"query_user": "Query: {query}\n\nAvailable scopes: {available_scopes}\n{scope_desc}\n\nReturn the analysis as structured output.",
"save_system": "You analyze content to be stored in a hierarchical memory system.\nGiven the content and the existing scopes and categories, output:\n1. suggested_scope: The best matching existing scope path, or a new path if none fit (use / for root).\n2. categories: A list of categories (reuse existing when relevant, add new ones if needed).\n3. importance: A number from 0.0 to 1.0 indicating how significant this memory is.\n4. extracted_metadata: A JSON object with any entities, dates, or topics you can extract.",
@@ -79,4 +80,4 @@
"create_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou have been assigned the following task:\n{description}\n\nExpected output:\n{expected_output}\n\nAvailable tools: {tools}\n\nBefore executing this task, create a detailed plan that leverages your expertise as {role} and outlines:\n1. Your understanding of the task from your professional perspective\n2. The key steps you'll take to complete it, drawing on your background and skills\n3. How you'll approach any challenges that might arise, considering your expertise\n4. How you'll strategically use the available tools based on your experience, exactly what tools to use and how to use them\n5. The expected outcome and how it aligns with your goal\n\nAfter creating your plan, assess whether you feel ready to execute the task or if you could do better.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan because [specific reason].\"",
"refine_plan_prompt": "You are {role} with this background: {backstory}\n\nYour primary goal is: {goal}\n\nYou created the following plan for this task:\n{current_plan}\n\nHowever, you indicated that you're not ready to execute the task yet.\n\nPlease refine your plan further, drawing on your expertise as {role} to address any gaps or uncertainties. As you refine your plan, be specific about which available tools you will use, how you will use them, and why they are the best choices for each step. Clearly outline your tool usage strategy as part of your improved plan.\n\nAfter refining your plan, assess whether you feel ready to execute the task.\nConclude with one of these statements:\n- \"READY: I am ready to execute the task.\"\n- \"NOT READY: I need to refine my plan further because [specific reason].\""
}
}
}

View File

@@ -1,61 +0,0 @@
"""Centralised lock factory.
If ``REDIS_URL`` is set, locks are distributed via ``portalocker.RedisLock``. Otherwise, falls
back to the standard ``portalocker.Lock``.
"""
from __future__ import annotations
from collections.abc import Iterator
from contextlib import contextmanager
from functools import lru_cache
from hashlib import md5
import os
import tempfile
from typing import TYPE_CHECKING, Final
import portalocker
if TYPE_CHECKING:
import redis
_REDIS_URL: str | None = os.environ.get("REDIS_URL")
_DEFAULT_TIMEOUT: Final[int] = 120
@lru_cache(maxsize=1)
def _redis_connection() -> redis.Redis:
"""Return a cached Redis connection, creating one on first call."""
from redis import Redis
if _REDIS_URL is None:
raise ValueError("REDIS_URL environment variable is not set")
return Redis.from_url(_REDIS_URL)
@contextmanager
def lock(name: str, *, timeout: float = _DEFAULT_TIMEOUT) -> Iterator[None]:
"""Acquire a named lock, yielding while it is held.
Args:
name: A human-readable lock name (e.g. ``"chromadb_init"``).
Automatically namespaced to avoid collisions.
timeout: Maximum seconds to wait for the lock before raising.
"""
channel = f"crewai:{md5(name.encode(), usedforsecurity=False).hexdigest()}"
if _REDIS_URL:
with portalocker.RedisLock(
channel=channel,
connection=_redis_connection(),
timeout=timeout,
):
yield
else:
lock_dir = tempfile.gettempdir()
lock_path = os.path.join(lock_dir, f"{channel}.lock")
with portalocker.Lock(lock_path, timeout=timeout):
yield

View File

@@ -2353,68 +2353,3 @@ def test_agent_without_apps_no_platform_tools():
tools = crew._prepare_tools(agent, task, [])
assert tools == []
def test_agent_mcps_accepts_slug_with_specific_tool():
"""Agent(mcps=["notion#get_page"]) must pass validation (_SLUG_RE)."""
agent = Agent(
role="MCP Agent",
goal="Test MCP validation",
backstory="Test agent",
mcps=["notion#get_page"],
)
assert agent.mcps == ["notion#get_page"]
def test_agent_mcps_accepts_slug_with_hyphenated_tool():
agent = Agent(
role="MCP Agent",
goal="Test MCP validation",
backstory="Test agent",
mcps=["notion#get-page"],
)
assert agent.mcps == ["notion#get-page"]
def test_agent_mcps_accepts_multiple_hash_refs():
agent = Agent(
role="MCP Agent",
goal="Test MCP validation",
backstory="Test agent",
mcps=["notion#get_page", "notion#search", "github#list_repos"],
)
assert len(agent.mcps) == 3
def test_agent_mcps_accepts_mixed_ref_types():
agent = Agent(
role="MCP Agent",
goal="Test MCP validation",
backstory="Test agent",
mcps=[
"notion#get_page",
"notion",
"https://mcp.example.com/api",
],
)
assert len(agent.mcps) == 3
def test_agent_mcps_rejects_hash_without_slug():
with pytest.raises(ValueError, match="Invalid MCP reference"):
Agent(
role="MCP Agent",
goal="Test MCP validation",
backstory="Test agent",
mcps=["#get_page"],
)
def test_agent_mcps_accepts_legacy_prefix_with_tool():
agent = Agent(
role="MCP Agent",
goal="Test MCP validation",
backstory="Test agent",
mcps=["crewai-amp:notion#get_page"],
)
assert agent.mcps == ["crewai-amp:notion#get_page"]

View File

@@ -123,7 +123,7 @@ class TestAgentExecutor:
executor.state.iterations = 10
result = executor.check_max_iterations()
assert result == "max_iterations_exceeded"
assert result == "force_final_answer"
def test_route_by_answer_type_action(self, mock_dependencies):
"""Test routing for AgentAction."""

View File

@@ -1136,7 +1136,7 @@ def test_lite_agent_memory_instance_recall_and_save_called():
successful_requests=1,
)
mock_memory = Mock()
mock_memory.read_only = False
mock_memory._read_only = False
mock_memory.recall.return_value = []
mock_memory.extract_memories.return_value = ["Fact one.", "Fact two."]

View File

@@ -1,137 +0,0 @@
interactions:
- request:
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"What is the weather
in Tokyo?"}],"model":"claude-sonnet-4-5","stream":false,"tools":[{"type":"tool_search_tool_bm25_20251119","name":"tool_search_tool_bm25"},{"name":"get_weather","description":"Get
current weather conditions for a specified location","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for get_weather"}},"required":["input"]},"defer_loading":true},{"name":"search_files","description":"Search
through files in the workspace by name or content","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for search_files"}},"required":["input"]},"defer_loading":true},{"name":"read_database","description":"Read
records from a database table with optional filtering","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for read_database"}},"required":["input"]},"defer_loading":true},{"name":"write_database","description":"Write
or update records in a database table","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for write_database"}},"required":["input"]},"defer_loading":true},{"name":"send_email","description":"Send
an email message to one or more recipients","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for send_email"}},"required":["input"]},"defer_loading":true},{"name":"read_email","description":"Read
emails from inbox with filtering options","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for read_email"}},"required":["input"]},"defer_loading":true},{"name":"create_ticket","description":"Create
a new support ticket in the ticketing system","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for create_ticket"}},"required":["input"]},"defer_loading":true},{"name":"update_ticket","description":"Update
an existing support ticket status or description","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for update_ticket"}},"required":["input"]},"defer_loading":true},{"name":"list_users","description":"List
all users in the system with optional filters","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for list_users"}},"required":["input"]},"defer_loading":true},{"name":"get_user_profile","description":"Get
detailed profile information for a specific user","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for get_user_profile"}},"required":["input"]},"defer_loading":true},{"name":"deploy_service","description":"Deploy
a service to the specified environment","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for deploy_service"}},"required":["input"]},"defer_loading":true},{"name":"rollback_service","description":"Rollback
a service deployment to a previous version","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for rollback_service"}},"required":["input"]},"defer_loading":true},{"name":"get_service_logs","description":"Get
service logs filtered by time range and severity","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for get_service_logs"}},"required":["input"]},"defer_loading":true},{"name":"run_sql_query","description":"Run
a read-only SQL query against the analytics database","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for run_sql_query"}},"required":["input"]},"defer_loading":true},{"name":"create_dashboard","description":"Create
a new monitoring dashboard with widgets","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for create_dashboard"}},"required":["input"]},"defer_loading":true}]}'
headers:
User-Agent:
- X-USER-AGENT-XXX
accept:
- application/json
accept-encoding:
- ACCEPT-ENCODING-XXX
anthropic-version:
- '2023-06-01'
connection:
- keep-alive
content-length:
- '3952'
content-type:
- application/json
host:
- api.anthropic.com
x-api-key:
- X-API-KEY-XXX
x-stainless-arch:
- X-STAINLESS-ARCH-XXX
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- X-STAINLESS-OS-XXX
x-stainless-package-version:
- 0.73.0
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.13.3
x-stainless-timeout:
- NOT_GIVEN
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01DAGCoL6C12u6yAgR1UqNAs","type":"message","role":"assistant","content":[{"type":"text","text":"I''ll
search for a weather-related tool to help you get the weather information
for Tokyo."},{"type":"server_tool_use","id":"srvtoolu_0176qgHeeBpSygYAnUzKHCfh","name":"tool_search_tool_bm25","input":{"query":"weather
Tokyo current conditions forecast"},"caller":{"type":"direct"}},{"type":"tool_search_tool_result","tool_use_id":"srvtoolu_0176qgHeeBpSygYAnUzKHCfh","content":{"type":"tool_search_tool_search_result","tool_references":[{"type":"tool_reference","tool_name":"get_weather"}]}},{"type":"text","text":"Great!
I found a weather tool. Let me get the current weather conditions for Tokyo."},{"type":"tool_use","id":"toolu_01R3FavQLuTrwNvEk9gMaViK","name":"get_weather","input":{"input":"Tokyo"},"caller":{"type":"direct"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":1566,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"cache_creation":{"ephemeral_5m_input_tokens":0,"ephemeral_1h_input_tokens":0},"output_tokens":155,"service_tier":"standard","inference_geo":"not_available","server_tool_use":{"web_search_requests":0,"web_fetch_requests":0}}}'
headers:
CF-RAY:
- CF-RAY-XXX
Connection:
- keep-alive
Content-Security-Policy:
- CSP-FILTERED
Content-Type:
- application/json
Date:
- Sun, 08 Mar 2026 21:04:12 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Robots-Tag:
- none
anthropic-organization-id:
- ANTHROPIC-ORGANIZATION-ID-XXX
anthropic-ratelimit-input-tokens-limit:
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-LIMIT-XXX
anthropic-ratelimit-input-tokens-remaining:
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-REMAINING-XXX
anthropic-ratelimit-input-tokens-reset:
- ANTHROPIC-RATELIMIT-INPUT-TOKENS-RESET-XXX
anthropic-ratelimit-output-tokens-limit:
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-LIMIT-XXX
anthropic-ratelimit-output-tokens-remaining:
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-REMAINING-XXX
anthropic-ratelimit-output-tokens-reset:
- ANTHROPIC-RATELIMIT-OUTPUT-TOKENS-RESET-XXX
anthropic-ratelimit-requests-limit:
- '20000'
anthropic-ratelimit-requests-remaining:
- '19999'
anthropic-ratelimit-requests-reset:
- '2026-03-08T21:04:07Z'
anthropic-ratelimit-tokens-limit:
- ANTHROPIC-RATELIMIT-TOKENS-LIMIT-XXX
anthropic-ratelimit-tokens-remaining:
- ANTHROPIC-RATELIMIT-TOKENS-REMAINING-XXX
anthropic-ratelimit-tokens-reset:
- ANTHROPIC-RATELIMIT-TOKENS-RESET-XXX
cf-cache-status:
- DYNAMIC
request-id:
- REQUEST-ID-XXX
strict-transport-security:
- STS-XXX
vary:
- Accept-Encoding
x-envoy-upstream-service-time:
- '4330'
status:
code: 200
message: OK
version: 1

View File

@@ -1,112 +0,0 @@
interactions:
- request:
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"What is the weather
in Tokyo?"}],"model":"claude-sonnet-4-5","stream":false,"tools":[{"name":"get_weather","description":"Get
current weather conditions for a specified location","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for get_weather"}},"required":["input"]}},{"name":"search_files","description":"Search
through files in the workspace by name or content","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for search_files"}},"required":["input"]}},{"name":"read_database","description":"Read
records from a database table with optional filtering","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for read_database"}},"required":["input"]}},{"name":"write_database","description":"Write
or update records in a database table","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for write_database"}},"required":["input"]}},{"name":"send_email","description":"Send
an email message to one or more recipients","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for send_email"}},"required":["input"]}},{"name":"read_email","description":"Read
emails from inbox with filtering options","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for read_email"}},"required":["input"]}},{"name":"create_ticket","description":"Create
a new support ticket in the ticketing system","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for create_ticket"}},"required":["input"]}},{"name":"update_ticket","description":"Update
an existing support ticket status or description","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for update_ticket"}},"required":["input"]}},{"name":"list_users","description":"List
all users in the system with optional filters","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for list_users"}},"required":["input"]}},{"name":"get_user_profile","description":"Get
detailed profile information for a specific user","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for get_user_profile"}},"required":["input"]}},{"name":"deploy_service","description":"Deploy
a service to the specified environment","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for deploy_service"}},"required":["input"]}},{"name":"rollback_service","description":"Rollback
a service deployment to a previous version","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for rollback_service"}},"required":["input"]}},{"name":"get_service_logs","description":"Get
service logs filtered by time range and severity","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for get_service_logs"}},"required":["input"]}},{"name":"run_sql_query","description":"Run
a read-only SQL query against the analytics database","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for run_sql_query"}},"required":["input"]}},{"name":"create_dashboard","description":"Create
a new monitoring dashboard with widgets","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for create_dashboard"}},"required":["input"]}}]}'
headers:
accept:
- application/json
anthropic-version:
- '2023-06-01'
connection:
- keep-alive
content-type:
- application/json
host:
- api.anthropic.com
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01NoSearch001","type":"message","role":"assistant","content":[{"type":"tool_use","id":"toolu_01NoSearch001","name":"get_weather","input":{"input":"Tokyo"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":1943,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":54,"service_tier":"standard"}}'
headers:
Content-Type:
- application/json
status:
code: 200
message: OK
- request:
body: '{"max_tokens":4096,"messages":[{"role":"user","content":"What is the weather
in Tokyo?"}],"model":"claude-sonnet-4-5","stream":false,"tools":[{"type":"tool_search_tool_bm25_20251119","name":"tool_search_tool_bm25"},{"name":"get_weather","description":"Get
current weather conditions for a specified location","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for get_weather"}},"required":["input"]},"defer_loading":true},{"name":"search_files","description":"Search
through files in the workspace by name or content","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for search_files"}},"required":["input"]},"defer_loading":true},{"name":"read_database","description":"Read
records from a database table with optional filtering","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for read_database"}},"required":["input"]},"defer_loading":true},{"name":"write_database","description":"Write
or update records in a database table","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for write_database"}},"required":["input"]},"defer_loading":true},{"name":"send_email","description":"Send
an email message to one or more recipients","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for send_email"}},"required":["input"]},"defer_loading":true},{"name":"read_email","description":"Read
emails from inbox with filtering options","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for read_email"}},"required":["input"]},"defer_loading":true},{"name":"create_ticket","description":"Create
a new support ticket in the ticketing system","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for create_ticket"}},"required":["input"]},"defer_loading":true},{"name":"update_ticket","description":"Update
an existing support ticket status or description","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for update_ticket"}},"required":["input"]},"defer_loading":true},{"name":"list_users","description":"List
all users in the system with optional filters","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for list_users"}},"required":["input"]},"defer_loading":true},{"name":"get_user_profile","description":"Get
detailed profile information for a specific user","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for get_user_profile"}},"required":["input"]},"defer_loading":true},{"name":"deploy_service","description":"Deploy
a service to the specified environment","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for deploy_service"}},"required":["input"]},"defer_loading":true},{"name":"rollback_service","description":"Rollback
a service deployment to a previous version","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for rollback_service"}},"required":["input"]},"defer_loading":true},{"name":"get_service_logs","description":"Get
service logs filtered by time range and severity","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for get_service_logs"}},"required":["input"]},"defer_loading":true},{"name":"run_sql_query","description":"Run
a read-only SQL query against the analytics database","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for run_sql_query"}},"required":["input"]},"defer_loading":true},{"name":"create_dashboard","description":"Create
a new monitoring dashboard with widgets","input_schema":{"type":"object","properties":{"input":{"type":"string","description":"Input
for create_dashboard"}},"required":["input"]},"defer_loading":true}]}'
headers:
accept:
- application/json
anthropic-version:
- '2023-06-01'
connection:
- keep-alive
content-type:
- application/json
host:
- api.anthropic.com
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: '{"model":"claude-sonnet-4-5-20250929","id":"msg_01WithSearch001","type":"message","role":"assistant","content":[{"type":"text","text":"I''ll search for a weather tool."},{"type":"server_tool_use","id":"srvtoolu_01Search001","name":"tool_search_tool_bm25","input":{"query":"weather conditions"},"caller":{"type":"direct"}},{"type":"tool_search_tool_result","tool_use_id":"srvtoolu_01Search001","content":{"type":"tool_search_tool_search_result","tool_references":[{"type":"tool_reference","tool_name":"get_weather"}]}},{"type":"text","text":"Found it. Let me get the weather for Tokyo."},{"type":"tool_use","id":"toolu_01WithSearch001","name":"get_weather","input":{"input":"Tokyo"},"caller":{"type":"direct"}}],"stop_reason":"tool_use","stop_sequence":null,"usage":{"input_tokens":1566,"cache_creation_input_tokens":0,"cache_read_input_tokens":0,"output_tokens":155,"service_tier":"standard"}}'
headers:
Content-Type:
- application/json
status:
code: 200
message: OK
version: 1

View File

@@ -1121,345 +1121,3 @@ def test_anthropic_cached_prompt_tokens_with_tools():
assert usage.successful_requests == 2
# The second call should have cached prompt tokens
assert usage.cached_prompt_tokens > 0
# ---- Tool Search Tool Tests ----
def test_tool_search_true_injects_bm25_and_defer_loading():
"""tool_search=True should inject bm25 tool search and defer all tools."""
llm = LLM(model="anthropic/claude-sonnet-4-5", tool_search=True)
crewai_tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get weather for a location",
"parameters": {
"type": "object",
"properties": {"location": {"type": "string"}},
"required": ["location"],
},
},
},
{
"type": "function",
"function": {
"name": "calculator",
"description": "Perform math calculations",
"parameters": {
"type": "object",
"properties": {"expression": {"type": "string"}},
"required": ["expression"],
},
},
},
]
formatted_messages, system_message = llm._format_messages_for_anthropic(
[{"role": "user", "content": "Hello"}]
)
params = llm._prepare_completion_params(
formatted_messages, system_message, crewai_tools
)
tools = params["tools"]
# Should have 3 tools: tool_search + 2 regular
assert len(tools) == 3
# First tool should be the bm25 tool search tool
assert tools[0]["type"] == "tool_search_tool_bm25_20251119"
assert tools[0]["name"] == "tool_search_tool_bm25"
assert "input_schema" not in tools[0]
# All regular tools should have defer_loading=True
for t in tools[1:]:
assert t.get("defer_loading") is True, f"Tool {t['name']} missing defer_loading"
def test_tool_search_regex_config():
"""tool_search with regex config should use regex variant."""
from crewai.llms.providers.anthropic.completion import AnthropicToolSearchConfig
config = AnthropicToolSearchConfig(type="regex")
llm = LLM(model="anthropic/claude-sonnet-4-5", tool_search=config)
crewai_tools = [
{
"type": "function",
"function": {
"name": "tool_a",
"description": "First tool",
"parameters": {
"type": "object",
"properties": {"q": {"type": "string"}},
"required": ["q"],
},
},
},
{
"type": "function",
"function": {
"name": "tool_b",
"description": "Second tool",
"parameters": {
"type": "object",
"properties": {"q": {"type": "string"}},
"required": ["q"],
},
},
},
]
formatted_messages, system_message = llm._format_messages_for_anthropic(
[{"role": "user", "content": "Hello"}]
)
params = llm._prepare_completion_params(
formatted_messages, system_message, crewai_tools
)
tools = params["tools"]
assert tools[0]["type"] == "tool_search_tool_regex_20251119"
assert tools[0]["name"] == "tool_search_tool_regex"
def test_tool_search_disabled_by_default():
"""tool_search=None (default) should NOT inject anything."""
llm = LLM(model="anthropic/claude-sonnet-4-5")
crewai_tools = [
{
"type": "function",
"function": {
"name": "test_tool",
"description": "A test tool",
"parameters": {
"type": "object",
"properties": {"q": {"type": "string"}},
"required": ["q"],
},
},
},
]
formatted_messages, system_message = llm._format_messages_for_anthropic(
[{"role": "user", "content": "Hello"}]
)
params = llm._prepare_completion_params(
formatted_messages, system_message, crewai_tools
)
tools = params["tools"]
assert len(tools) == 1
for t in tools:
assert t.get("type", "") not in (
"tool_search_tool_bm25_20251119",
"tool_search_tool_regex_20251119",
)
assert "defer_loading" not in t
def test_tool_search_no_duplicate_when_manually_provided():
"""If user passes a tool search tool manually, don't inject a duplicate."""
llm = LLM(model="anthropic/claude-sonnet-4-5", tool_search=True)
# User manually includes a tool search tool
tools_with_search = [
{"type": "tool_search_tool_regex_20251119", "name": "tool_search_tool_regex"},
{
"type": "function",
"function": {
"name": "test_tool",
"description": "A test tool",
"parameters": {
"type": "object",
"properties": {"q": {"type": "string"}},
"required": ["q"],
},
},
},
]
formatted_messages, system_message = llm._format_messages_for_anthropic(
[{"role": "user", "content": "Hello"}]
)
params = llm._prepare_completion_params(
formatted_messages, system_message, tools_with_search
)
tools = params["tools"]
search_tools = [
t for t in tools
if t.get("type", "").startswith("tool_search_tool")
]
# Should only have 1 tool search tool (the user's manual one)
assert len(search_tools) == 1
assert search_tools[0]["type"] == "tool_search_tool_regex_20251119"
def test_tool_search_passthrough_preserves_tool_search_type():
"""_convert_tools_for_interference should pass through tool search tools unchanged."""
llm = LLM(model="anthropic/claude-sonnet-4-5")
tools = [
{"type": "tool_search_tool_regex_20251119", "name": "tool_search_tool_regex"},
{
"name": "get_weather",
"description": "Get weather",
"input_schema": {
"type": "object",
"properties": {"location": {"type": "string"}},
"required": ["location"],
},
},
]
converted = llm._convert_tools_for_interference(tools)
assert len(converted) == 2
# Tool search tool should be passed through exactly
assert converted[0] == {
"type": "tool_search_tool_regex_20251119",
"name": "tool_search_tool_regex",
}
# Regular tool should be preserved
assert converted[1]["name"] == "get_weather"
assert "input_schema" in converted[1]
def test_tool_search_single_tool_skips_search_and_forces_choice():
"""With only 1 tool, tool_search is skipped (nothing to search) and the
normal forced tool_choice optimisation still applies."""
llm = LLM(model="anthropic/claude-sonnet-4-5", tool_search=True)
crewai_tools = [
{
"type": "function",
"function": {
"name": "test_tool",
"description": "A test tool",
"parameters": {
"type": "object",
"properties": {"q": {"type": "string"}},
"required": ["q"],
},
},
},
]
formatted_messages, system_message = llm._format_messages_for_anthropic(
[{"role": "user", "content": "Hello"}]
)
params = llm._prepare_completion_params(
formatted_messages,
system_message,
crewai_tools,
available_functions={"test_tool": lambda q: "result"},
)
# Single tool — tool_search skipped, tool_choice forced as normal
assert "tool_choice" in params
assert params["tool_choice"]["name"] == "test_tool"
# No tool search tool should be injected
tool_types = [t.get("type", "") for t in params["tools"]]
for ts_type in ("tool_search_tool_bm25_20251119", "tool_search_tool_regex_20251119"):
assert ts_type not in tool_types
# No defer_loading on the single tool
assert "defer_loading" not in params["tools"][0]
def test_tool_search_via_llm_class():
"""Verify tool_search param passes through LLM class correctly."""
from crewai.llms.providers.anthropic.completion import (
AnthropicCompletion,
AnthropicToolSearchConfig,
)
# Test with True
llm = LLM(model="anthropic/claude-sonnet-4-5", tool_search=True)
assert isinstance(llm, AnthropicCompletion)
assert llm.tool_search is not None
assert llm.tool_search.type == "bm25"
# Test with config
llm2 = LLM(
model="anthropic/claude-sonnet-4-5",
tool_search=AnthropicToolSearchConfig(type="regex"),
)
assert llm2.tool_search is not None
assert llm2.tool_search.type == "regex"
# Test without (default)
llm3 = LLM(model="anthropic/claude-sonnet-4-5")
assert llm3.tool_search is None
# Many tools shared by the VCR tests below
_MANY_TOOLS = [
{
"name": name,
"description": desc,
"input_schema": {
"type": "object",
"properties": {"input": {"type": "string", "description": f"Input for {name}"}},
"required": ["input"],
},
}
for name, desc in [
("get_weather", "Get current weather conditions for a specified location"),
("search_files", "Search through files in the workspace by name or content"),
("read_database", "Read records from a database table with optional filtering"),
("write_database", "Write or update records in a database table"),
("send_email", "Send an email message to one or more recipients"),
("read_email", "Read emails from inbox with filtering options"),
("create_ticket", "Create a new support ticket in the ticketing system"),
("update_ticket", "Update an existing support ticket status or description"),
("list_users", "List all users in the system with optional filters"),
("get_user_profile", "Get detailed profile information for a specific user"),
("deploy_service", "Deploy a service to the specified environment"),
("rollback_service", "Rollback a service deployment to a previous version"),
("get_service_logs", "Get service logs filtered by time range and severity"),
("run_sql_query", "Run a read-only SQL query against the analytics database"),
("create_dashboard", "Create a new monitoring dashboard with widgets"),
]
]
@pytest.mark.vcr()
def test_tool_search_discovers_and_calls_tool():
"""Tool search should discover the right tool and return a tool_use block."""
llm = LLM(model="anthropic/claude-sonnet-4-5", tool_search=True)
result = llm.call(
"What is the weather in Tokyo?",
tools=_MANY_TOOLS,
)
# Should return tool_use blocks (list) since no available_functions provided
assert isinstance(result, list)
assert len(result) >= 1
# The discovered tool should be get_weather
tool_names = [getattr(block, "name", None) for block in result]
assert "get_weather" in tool_names
@pytest.mark.vcr()
def test_tool_search_saves_input_tokens():
"""Tool search with deferred loading should use fewer input tokens than loading all tools."""
# Call WITHOUT tool search — all 15 tools loaded upfront
llm_no_search = LLM(model="anthropic/claude-sonnet-4-5")
llm_no_search.call("What is the weather in Tokyo?", tools=_MANY_TOOLS)
usage_no_search = llm_no_search.get_token_usage_summary()
# Call WITH tool search — tools deferred
llm_search = LLM(model="anthropic/claude-sonnet-4-5", tool_search=True)
llm_search.call("What is the weather in Tokyo?", tools=_MANY_TOOLS)
usage_search = llm_search.get_token_usage_summary()
# Tool search should use fewer input tokens
assert usage_search.prompt_tokens < usage_no_search.prompt_tokens, (
f"Expected tool_search ({usage_search.prompt_tokens}) to use fewer input tokens "
f"than no search ({usage_no_search.prompt_tokens})"
)

View File

@@ -967,211 +967,3 @@ def test_bedrock_agent_kickoff_structured_output_with_tools():
assert result.pydantic.result == 42, f"Expected result 42 but got {result.pydantic.result}"
assert result.pydantic.operation, "Operation should not be empty"
assert result.pydantic.explanation, "Explanation should not be empty"
def test_bedrock_groups_three_tool_results():
"""Consecutive tool results should be grouped into one Bedrock user message."""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
messages = [
{"role": "user", "content": "Use all three tools, then continue."},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "tool-1",
"type": "function",
"function": {
"name": "lookup_weather",
"arguments": '{"location": "New York"}',
},
},
{
"id": "tool-2",
"type": "function",
"function": {
"name": "lookup_news",
"arguments": '{"topic": "AI"}',
},
},
{
"id": "tool-3",
"type": "function",
"function": {
"name": "lookup_stock",
"arguments": '{"ticker": "AMZN"}',
},
},
],
},
{"role": "tool", "tool_call_id": "tool-1", "content": "72F and sunny"},
{"role": "tool", "tool_call_id": "tool-2", "content": "AI news summary"},
{"role": "tool", "tool_call_id": "tool-3", "content": "AMZN up 1.2%"},
]
formatted_messages, system_message = llm._format_messages_for_converse(messages)
assert system_message is None
assert [message["role"] for message in formatted_messages] == [
"user",
"assistant",
"user",
]
assert len(formatted_messages[1]["content"]) == 3
tool_results = formatted_messages[2]["content"]
assert len(tool_results) == 3
assert [block["toolResult"]["toolUseId"] for block in tool_results] == [
"tool-1",
"tool-2",
"tool-3",
]
assert [block["toolResult"]["content"][0]["text"] for block in tool_results] == [
"72F and sunny",
"AI news summary",
"AMZN up 1.2%",
]
def test_bedrock_parallel_tool_results_grouped():
"""Regression test for issue #4749.
When an assistant message contains multiple parallel tool calls,
Bedrock requires all corresponding tool results to be grouped
in a single user message. Previously each tool result was emitted
as a separate user message, causing:
ValidationException: Expected toolResult blocks at messages.2.content
"""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
messages = [
{"role": "user", "content": "Calculate 25 + 17 AND 10 * 5"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_add",
"type": "function",
"function": {"name": "add_tool", "arguments": '{"a": 25, "b": 17}'},
},
{
"id": "call_mul",
"type": "function",
"function": {"name": "multiply_tool", "arguments": '{"a": 10, "b": 5}'},
},
],
},
{"role": "tool", "tool_call_id": "call_add", "content": "42"},
{"role": "tool", "tool_call_id": "call_mul", "content": "50"},
]
converse_msgs, system_msg = llm._format_messages_for_converse(messages)
# Find the user message that contains toolResult blocks
tool_result_messages = [
m for m in converse_msgs
if m.get("role") == "user"
and any("toolResult" in b for b in m.get("content", []))
]
# There must be exactly ONE user message with tool results (not two)
assert len(tool_result_messages) == 1, (
f"Expected 1 grouped tool-result message, got {len(tool_result_messages)}. "
"Bedrock requires all parallel tool results in a single user message."
)
# That single message must contain both tool results
tool_results = tool_result_messages[0]["content"]
assert len(tool_results) == 2, (
f"Expected 2 toolResult blocks in grouped message, got {len(tool_results)}"
)
# Verify the tool use IDs match
tool_use_ids = {
block["toolResult"]["toolUseId"] for block in tool_results
}
assert tool_use_ids == {"call_add", "call_mul"}
def test_bedrock_single_tool_result_still_works():
"""Ensure single tool call still produces a single-block user message."""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
messages = [
{"role": "user", "content": "Add 1 + 2"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_single",
"type": "function",
"function": {"name": "add_tool", "arguments": '{"a": 1, "b": 2}'},
},
],
},
{"role": "tool", "tool_call_id": "call_single", "content": "3"},
]
converse_msgs, _ = llm._format_messages_for_converse(messages)
tool_result_messages = [
m for m in converse_msgs
if m.get("role") == "user"
and any("toolResult" in b for b in m.get("content", []))
]
assert len(tool_result_messages) == 1
assert len(tool_result_messages[0]["content"]) == 1
assert tool_result_messages[0]["content"][0]["toolResult"]["toolUseId"] == "call_single"
def test_bedrock_tool_results_not_merged_across_assistant_messages():
"""Tool results from different assistant turns must NOT be merged."""
llm = LLM(model="bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0")
messages = [
{"role": "user", "content": "First task"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_a",
"type": "function",
"function": {"name": "tool_a", "arguments": "{}"},
},
],
},
{"role": "tool", "tool_call_id": "call_a", "content": "result_a"},
{"role": "assistant", "content": "Now doing second task"},
{"role": "user", "content": "Second task"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_b",
"type": "function",
"function": {"name": "tool_b", "arguments": "{}"},
},
],
},
{"role": "tool", "tool_call_id": "call_b", "content": "result_b"},
]
converse_msgs, _ = llm._format_messages_for_converse(messages)
tool_result_messages = [
m for m in converse_msgs
if m.get("role") == "user"
and any("toolResult" in b for b in m.get("content", []))
]
# Two separate tool-result messages (one per assistant turn)
assert len(tool_result_messages) == 2, (
"Tool results from different assistant turns must remain separate"
)
assert tool_result_messages[0]["content"][0]["toolResult"]["toolUseId"] == "call_a"
assert tool_result_messages[1]["content"][0]["toolResult"]["toolUseId"] == "call_b"

View File

@@ -268,54 +268,6 @@ class TestGetMCPToolsAmpIntegration:
assert len(tools) == 1
assert tools[0].name == "mcp_notion_so_sse_search"
@patch("crewai.mcp.tool_resolver.MCPClient")
@patch.object(MCPToolResolver, "_fetch_amp_mcp_configs")
def test_tool_filter_with_hyphenated_hash_syntax(
self, mock_fetch, mock_client_class, agent
):
"""notion#get-page must match the tool whose sanitized name is get_page."""
mock_fetch.return_value = {
"notion": {
"type": "sse",
"url": "https://mcp.notion.so/sse",
"headers": {"Authorization": "Bearer token"},
},
}
hyphenated_tool_definitions = [
{
"name": "get_page",
"original_name": "get-page",
"description": "Get a page",
"inputSchema": {},
},
{
"name": "search",
"original_name": "search",
"description": "Search tool",
"inputSchema": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"],
},
},
]
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=hyphenated_tool_definitions)
mock_client.connected = False
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client_class.return_value = mock_client
tools = agent.get_mcp_tools(["notion#get-page"])
mock_fetch.assert_called_once_with(["notion"])
assert len(tools) == 1
assert tools[0].name.endswith("_get_page")
@patch("crewai.mcp.tool_resolver.MCPClient")
@patch.object(MCPToolResolver, "_fetch_amp_mcp_configs")
def test_deduplicates_slugs(
@@ -419,87 +371,3 @@ class TestGetMCPToolsAmpIntegration:
mock_external.assert_called_once_with("https://external.mcp.com/api")
# 2 from notion + 1 from external + 2 from http_config
assert len(tools) == 5
class TestResolveExternalToolFilter:
"""Tests for _resolve_external with #tool-name filtering."""
@pytest.fixture
def agent(self):
return Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
)
@pytest.fixture
def resolver(self, agent):
return MCPToolResolver(agent=agent, logger=agent._logger)
@patch.object(MCPToolResolver, "_get_mcp_tool_schemas")
def test_filters_hyphenated_tool_name(self, mock_schemas, resolver):
"""https://...#get-page must match the sanitized key get_page in schemas."""
mock_schemas.return_value = {
"get_page": {
"description": "Get a page",
"args_schema": None,
},
"search": {
"description": "Search tool",
"args_schema": None,
},
}
tools = resolver._resolve_external("https://mcp.example.com/api#get-page")
assert len(tools) == 1
assert "get_page" in tools[0].name
@patch.object(MCPToolResolver, "_get_mcp_tool_schemas")
def test_filters_underscored_tool_name(self, mock_schemas, resolver):
"""https://...#get_page must also match the sanitized key get_page."""
mock_schemas.return_value = {
"get_page": {
"description": "Get a page",
"args_schema": None,
},
"search": {
"description": "Search tool",
"args_schema": None,
},
}
tools = resolver._resolve_external("https://mcp.example.com/api#get_page")
assert len(tools) == 1
assert "get_page" in tools[0].name
@patch.object(MCPToolResolver, "_get_mcp_tool_schemas")
def test_returns_all_tools_without_hash(self, mock_schemas, resolver):
mock_schemas.return_value = {
"get_page": {
"description": "Get a page",
"args_schema": None,
},
"search": {
"description": "Search tool",
"args_schema": None,
},
}
tools = resolver._resolve_external("https://mcp.example.com/api")
assert len(tools) == 2
@patch.object(MCPToolResolver, "_get_mcp_tool_schemas")
def test_returns_empty_for_nonexistent_tool(self, mock_schemas, resolver):
mock_schemas.return_value = {
"search": {
"description": "Search tool",
"args_schema": None,
},
}
tools = resolver._resolve_external("https://mcp.example.com/api#nonexistent")
assert len(tools) == 0

View File

@@ -1,5 +1,4 @@
import asyncio
import concurrent.futures
from unittest.mock import AsyncMock, patch
import pytest
@@ -31,17 +30,6 @@ def mock_tool_definitions():
]
def _make_mock_client(tool_definitions):
"""Create a mock MCPClient that returns *tool_definitions*."""
client = AsyncMock()
client.list_tools = AsyncMock(return_value=tool_definitions)
client.connected = False
client.connect = AsyncMock()
client.disconnect = AsyncMock()
client.call_tool = AsyncMock(return_value="test result")
return client
def test_agent_with_stdio_mcp_config(mock_tool_definitions):
"""Test agent setup with MCPServerStdio configuration."""
stdio_config = MCPServerStdio(
@@ -57,8 +45,14 @@ def test_agent_with_stdio_mcp_config(mock_tool_definitions):
mcps=[stdio_config],
)
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client_class.return_value = _make_mock_client(mock_tool_definitions)
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False # Will trigger connect
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client_class.return_value = mock_client
tools = agent.get_mcp_tools([stdio_config])
@@ -66,7 +60,8 @@ def test_agent_with_stdio_mcp_config(mock_tool_definitions):
assert all(isinstance(tool, BaseTool) for tool in tools)
mock_client_class.assert_called_once()
transport = mock_client_class.call_args.kwargs["transport"]
call_args = mock_client_class.call_args
transport = call_args.kwargs["transport"]
assert transport.command == "python"
assert transport.args == ["server.py"]
assert transport.env == {"API_KEY": "test_key"}
@@ -88,7 +83,12 @@ def test_agent_with_http_mcp_config(mock_tool_definitions):
)
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client_class.return_value = _make_mock_client(mock_tool_definitions)
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False # Will trigger connect
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client_class.return_value = mock_client
tools = agent.get_mcp_tools([http_config])
@@ -96,7 +96,8 @@ def test_agent_with_http_mcp_config(mock_tool_definitions):
assert all(isinstance(tool, BaseTool) for tool in tools)
mock_client_class.assert_called_once()
transport = mock_client_class.call_args.kwargs["transport"]
call_args = mock_client_class.call_args
transport = call_args.kwargs["transport"]
assert transport.url == "https://api.example.com/mcp"
assert transport.headers == {"Authorization": "Bearer test_token"}
assert transport.streamable is True
@@ -117,7 +118,12 @@ def test_agent_with_sse_mcp_config(mock_tool_definitions):
)
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client_class.return_value = _make_mock_client(mock_tool_definitions)
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client_class.return_value = mock_client
tools = agent.get_mcp_tools([sse_config])
@@ -125,7 +131,8 @@ def test_agent_with_sse_mcp_config(mock_tool_definitions):
assert all(isinstance(tool, BaseTool) for tool in tools)
mock_client_class.assert_called_once()
transport = mock_client_class.call_args.kwargs["transport"]
call_args = mock_client_class.call_args
transport = call_args.kwargs["transport"]
assert transport.url == "https://api.example.com/mcp/sse"
assert transport.headers == {"Authorization": "Bearer test_token"}
@@ -135,7 +142,13 @@ def test_mcp_tool_execution_in_sync_context(mock_tool_definitions):
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client_class.return_value = _make_mock_client(mock_tool_definitions)
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client.call_tool = AsyncMock(return_value="test result")
mock_client_class.return_value = mock_client
agent = Agent(
role="Test Agent",
@@ -147,12 +160,12 @@ def test_mcp_tool_execution_in_sync_context(mock_tool_definitions):
tools = agent.get_mcp_tools([http_config])
assert len(tools) == 2
tool = tools[0]
result = tool.run(query="test query")
assert result == "test result"
# 1 discovery + 1 for the run() invocation
assert mock_client_class.call_count == 2
mock_client.call_tool.assert_called()
@pytest.mark.asyncio
@@ -161,7 +174,13 @@ async def test_mcp_tool_execution_in_async_context(mock_tool_definitions):
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client_class.return_value = _make_mock_client(mock_tool_definitions)
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client.call_tool = AsyncMock(return_value="test result")
mock_client_class.return_value = mock_client
agent = Agent(
role="Test Agent",
@@ -173,129 +192,9 @@ async def test_mcp_tool_execution_in_async_context(mock_tool_definitions):
tools = agent.get_mcp_tools([http_config])
assert len(tools) == 2
tool = tools[0]
result = tool.run(query="test query")
assert result == "test result"
assert mock_client_class.call_count == 2
def test_each_invocation_gets_fresh_client(mock_tool_definitions):
"""Every tool.run() must create its own MCPClient (no shared state)."""
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
clients_created: list = []
def _make_client(**kwargs):
client = _make_mock_client(mock_tool_definitions)
clients_created.append(client)
return client
with patch("crewai.mcp.tool_resolver.MCPClient", side_effect=_make_client):
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
mcps=[http_config],
)
tools = agent.get_mcp_tools([http_config])
assert len(tools) == 2
# 1 discovery client so far
assert len(clients_created) == 1
# Two sequential calls to the same tool must create 2 new clients
tools[0].run(query="q1")
tools[0].run(query="q2")
assert len(clients_created) == 3
assert clients_created[1] is not clients_created[2]
def test_parallel_mcp_tool_execution_same_tool(mock_tool_definitions):
"""Parallel calls to the *same* tool must not interfere."""
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
call_log: list[str] = []
def _make_client(**kwargs):
client = AsyncMock()
client.list_tools = AsyncMock(return_value=mock_tool_definitions)
client.connected = False
client.connect = AsyncMock()
client.disconnect = AsyncMock()
async def _call_tool(name, args):
call_log.append(name)
await asyncio.sleep(0.05)
return f"result-{name}"
client.call_tool = AsyncMock(side_effect=_call_tool)
return client
with patch("crewai.mcp.tool_resolver.MCPClient", side_effect=_make_client):
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
mcps=[http_config],
)
tools = agent.get_mcp_tools([http_config])
assert len(tools) >= 1
tool = tools[0]
# Call the SAME tool concurrently -- the exact scenario from the bug
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as pool:
futures = [
pool.submit(tool.run, query="q1"),
pool.submit(tool.run, query="q2"),
]
results = [f.result() for f in concurrent.futures.as_completed(futures)]
assert len(results) == 2
assert all("result-" in r for r in results)
assert len(call_log) == 2
def test_parallel_mcp_tool_execution_different_tools(mock_tool_definitions):
"""Parallel calls to different tools from the same server must not interfere."""
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
call_log: list[str] = []
def _make_client(**kwargs):
client = AsyncMock()
client.list_tools = AsyncMock(return_value=mock_tool_definitions)
client.connected = False
client.connect = AsyncMock()
client.disconnect = AsyncMock()
async def _call_tool(name, args):
call_log.append(name)
await asyncio.sleep(0.05)
return f"result-{name}"
client.call_tool = AsyncMock(side_effect=_call_tool)
return client
with patch("crewai.mcp.tool_resolver.MCPClient", side_effect=_make_client):
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
mcps=[http_config],
)
tools = agent.get_mcp_tools([http_config])
assert len(tools) == 2
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as pool:
futures = [
pool.submit(tools[0].run, query="q1"),
pool.submit(tools[1].run, query="q2"),
]
results = [f.result() for f in concurrent.futures.as_completed(futures)]
assert len(results) == 2
assert all("result-" in r for r in results)
assert len(call_log) == 2
mock_client.call_tool.assert_called()

View File

@@ -1,13 +0,0 @@
"""Stress tests for concurrent multi-process storage access.
Simulates the Airflow pattern: N worker processes each writing to the
same storage directory simultaneously. Verifies no LockException and
data integrity after all writes complete.
Uses temp files for IPC instead of multiprocessing.Manager (which uses
sockets blocked by pytest_recording).
"""
import pytest
pytestmark = pytest.mark.skip(reason="Multiprocessing tests incompatible with xdist --import-mode=importlib")

View File

@@ -172,8 +172,8 @@ def test_memory_scope_slice(tmp_path: Path, mock_embedder: MagicMock) -> None:
sc = mem.scope("/agent/1")
assert sc._root in ("/agent/1", "/agent/1/")
sl = mem.slice(["/a", "/b"], read_only=True)
assert sl.read_only is True
assert "/a" in sl.scopes and "/b" in sl.scopes
assert sl._read_only is True
assert "/a" in sl._scopes and "/b" in sl._scopes
def test_memory_list_scopes_info_tree(tmp_path: Path, mock_embedder: MagicMock) -> None:
@@ -198,7 +198,7 @@ def test_memory_scope_remember_recall(tmp_path: Path, mock_embedder: MagicMock)
from crewai.memory.memory_scope import MemoryScope
mem = Memory(storage=str(tmp_path / "db5"), llm=MagicMock(), embedder=mock_embedder)
scope = MemoryScope(memory=mem, root_path="/crew/1")
scope = MemoryScope(mem, "/crew/1")
scope.remember("Scoped note", scope="/", categories=[], importance=0.5, metadata={})
results = scope.recall("note", limit=5, depth="shallow")
assert len(results) >= 1
@@ -213,7 +213,7 @@ def test_memory_slice_recall(tmp_path: Path, mock_embedder: MagicMock) -> None:
mem = Memory(storage=str(tmp_path / "db6"), llm=MagicMock(), embedder=mock_embedder)
mem.remember("In scope A", scope="/a", categories=[], importance=0.5, metadata={})
sl = MemorySlice(memory=mem, scopes=["/a"], read_only=True)
sl = MemorySlice(mem, ["/a"], read_only=True)
matches = sl.recall("scope", limit=5, depth="shallow")
assert isinstance(matches, list)
@@ -223,7 +223,7 @@ def test_memory_slice_remember_is_noop_when_read_only(tmp_path: Path, mock_embed
from crewai.memory.memory_scope import MemorySlice
mem = Memory(storage=str(tmp_path / "db7"), llm=MagicMock(), embedder=mock_embedder)
sl = MemorySlice(memory=mem, scopes=["/a"], read_only=True)
sl = MemorySlice(mem, ["/a"], read_only=True)
result = sl.remember("x", scope="/a")
assert result is None
assert mem.list_records() == []
@@ -319,7 +319,7 @@ def test_executor_save_to_memory_calls_extract_then_remember_per_item() -> None:
from crewai.agents.parser import AgentFinish
mock_memory = MagicMock()
mock_memory.read_only = False
mock_memory._read_only = False
mock_memory.extract_memories.return_value = ["Fact A.", "Fact B."]
mock_agent = MagicMock()
@@ -360,7 +360,7 @@ def test_executor_save_to_memory_skips_delegation_output() -> None:
from crewai.utilities.string_utils import sanitize_tool_name
mock_memory = MagicMock()
mock_memory.read_only = False
mock_memory._read_only = False
mock_agent = MagicMock()
mock_agent.memory = mock_memory
mock_agent._logger = MagicMock()
@@ -393,7 +393,7 @@ def test_memory_scope_extract_memories_delegates() -> None:
mock_memory = MagicMock()
mock_memory.extract_memories.return_value = ["Scoped fact."]
scope = MemoryScope(memory=mock_memory, root_path="/agent/1")
scope = MemoryScope(mock_memory, "/agent/1")
result = scope.extract_memories("Some content")
mock_memory.extract_memories.assert_called_once_with("Some content")
assert result == ["Scoped fact."]
@@ -405,7 +405,7 @@ def test_memory_slice_extract_memories_delegates() -> None:
mock_memory = MagicMock()
mock_memory.extract_memories.return_value = ["Sliced fact."]
sl = MemorySlice(memory=mock_memory, scopes=["/a", "/b"], read_only=True)
sl = MemorySlice(mock_memory, ["/a", "/b"], read_only=True)
result = sl.extract_memories("Some content")
mock_memory.extract_memories.assert_called_once_with("Some content")
assert result == ["Sliced fact."]
@@ -670,10 +670,10 @@ def test_agent_kickoff_memory_recall_and_save(tmp_path: Path, mock_embedder: Mag
verbose=False,
)
# Patch on the class to avoid Pydantic BaseModel __delattr__ restriction
with patch.object(Memory, "recall", wraps=mem.recall) as recall_mock, \
patch.object(Memory, "extract_memories", return_value=["PostgreSQL is used."]) as extract_mock, \
patch.object(Memory, "remember_many", wraps=mem.remember_many) as remember_many_mock:
# Mock recall to verify it's called, but return real results
with patch.object(mem, "recall", wraps=mem.recall) as recall_mock, \
patch.object(mem, "extract_memories", return_value=["PostgreSQL is used."]) as extract_mock, \
patch.object(mem, "remember_many", wraps=mem.remember_many) as remember_many_mock:
result = agent.kickoff("What database do we use?")
assert result is not None

View File

@@ -1,6 +1,5 @@
"""Tests for async task execution."""
import contextvars
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
@@ -384,139 +383,4 @@ class TestAsyncTaskOutput:
assert result.description == "Test description"
assert result.expected_output == "Test expected"
assert result.raw == "Test result"
assert result.agent == "Test Agent"
class TestAsyncContextVarPropagation:
"""Tests for ContextVar propagation in threaded async task execution.
Verifies that execute_async() copies the calling thread's contextvars.Context
into the worker thread so that ContextVar values (used by OpenTelemetry,
Langfuse, and other tracing libraries) are preserved.
See: https://github.com/crewAIInc/crewAI/issues/4822
"""
def test_execute_async_preserves_contextvar(self, test_agent: Agent) -> None:
"""ContextVar set before execute_async() must be visible inside the worker thread."""
test_var: contextvars.ContextVar[str | None] = contextvars.ContextVar(
"test_var", default=None
)
test_var.set("parent_value")
captured: list[str | None] = []
original_execute_core = Task._execute_core
def patched_execute_core(self_task, agent, context, tools):
captured.append(test_var.get())
return original_execute_core(self_task, agent, context, tools)
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
)
with patch.object(Task, "_execute_core", patched_execute_core):
future = task.execute_async(agent=test_agent)
try:
future.result(timeout=10)
except Exception:
pass
assert len(captured) == 1, "patched _execute_core should have been called once"
assert captured[0] == "parent_value", (
f"ContextVar should be 'parent_value' inside worker thread, got {captured[0]!r}"
)
def test_execute_async_preserves_multiple_contextvars(self, test_agent: Agent) -> None:
"""Multiple ContextVars set before execute_async() must all be visible."""
var_a: contextvars.ContextVar[str | None] = contextvars.ContextVar(
"var_a", default=None
)
var_b: contextvars.ContextVar[int | None] = contextvars.ContextVar(
"var_b", default=None
)
var_a.set("alpha")
var_b.set(42)
captured_a: list[str | None] = []
captured_b: list[int | None] = []
original_execute_core = Task._execute_core
def patched_execute_core(self_task, agent, context, tools):
captured_a.append(var_a.get())
captured_b.append(var_b.get())
return original_execute_core(self_task, agent, context, tools)
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
)
with patch.object(Task, "_execute_core", patched_execute_core):
future = task.execute_async(agent=test_agent)
try:
future.result(timeout=10)
except Exception:
pass
assert captured_a[0] == "alpha"
assert captured_b[0] == 42
def test_execute_async_context_is_isolated_copy(self, test_agent: Agent) -> None:
"""Changes to ContextVar inside the worker thread must not leak back to the parent."""
test_var: contextvars.ContextVar[str | None] = contextvars.ContextVar(
"test_var", default=None
)
test_var.set("original")
original_execute_core = Task._execute_core
def patched_execute_core(self_task, agent, context, tools):
test_var.set("modified_in_worker")
return original_execute_core(self_task, agent, context, tools)
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
)
with patch.object(Task, "_execute_core", patched_execute_core):
future = task.execute_async(agent=test_agent)
try:
future.result(timeout=10)
except Exception:
pass
assert test_var.get() == "original", (
"ContextVar in parent thread should remain 'original' after worker modifies it"
)
def test_execute_async_without_contextvar_still_works(self, test_agent: Agent) -> None:
"""execute_async() must still work correctly when no ContextVars are set."""
original_execute_core = Task._execute_core
called = []
def patched_execute_core(self_task, agent, context, tools):
called.append(True)
return original_execute_core(self_task, agent, context, tools)
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
)
with patch.object(Task, "_execute_core", patched_execute_core):
future = task.execute_async(agent=test_agent)
try:
future.result(timeout=10)
except Exception:
pass
assert len(called) == 1, "_execute_core should have been called"
assert result.agent == "Test Agent"

View File

@@ -971,128 +971,6 @@ class TestCollapseToOutcomeJsonParsing:
assert mock_llm.call.call_count == 2
class TestLLMObjectPreservedInContext:
"""Tests that BaseLLM objects have their model string preserved in PendingFeedbackContext."""
@patch("crewai.flow.flow.crewai_event_bus.emit")
def test_basellm_object_model_string_survives_roundtrip(self, mock_emit: MagicMock) -> None:
"""Test that when llm is a BaseLLM object, its model string is stored in context
so that outcome collapsing works after async pause/resume.
This is the exact bug: locally the sync path keeps the LLM object in memory,
but in production the async path serializes the context and the LLM object was
discarded (stored as None), causing resume to skip classification and always
fall back to emit[0].
"""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
# Create a mock BaseLLM object (not a string)
mock_llm_obj = MagicMock()
mock_llm_obj.model = "gemini/gemini-2.0-flash"
class PausingProvider:
def __init__(self, persistence: SQLiteFlowPersistence):
self.persistence = persistence
self.captured_context: PendingFeedbackContext | None = None
def request_feedback(
self, context: PendingFeedbackContext, flow: Flow
) -> str:
self.captured_context = context
self.persistence.save_pending_feedback(
flow_uuid=context.flow_id,
context=context,
state_data=flow.state if isinstance(flow.state, dict) else flow.state.model_dump(),
)
raise HumanFeedbackPending(context=context)
provider = PausingProvider(persistence)
class TestFlow(Flow):
result_path: str = ""
@start()
@human_feedback(
message="Approve?",
emit=["needs_changes", "approved"],
llm=mock_llm_obj,
default_outcome="approved",
provider=provider,
)
def review(self):
return "content for review"
@listen("approved")
def handle_approved(self):
self.result_path = "approved"
return "Approved!"
@listen("needs_changes")
def handle_changes(self):
self.result_path = "needs_changes"
return "Changes needed"
# Phase 1: Start flow (should pause)
flow1 = TestFlow(persistence=persistence)
result = flow1.kickoff()
assert isinstance(result, HumanFeedbackPending)
# Verify the context stored the model STRING, not None
assert provider.captured_context is not None
assert provider.captured_context.llm == "gemini/gemini-2.0-flash"
# Verify it survives persistence roundtrip
flow_id = result.context.flow_id
loaded = persistence.load_pending_feedback(flow_id)
assert loaded is not None
_, loaded_context = loaded
assert loaded_context.llm == "gemini/gemini-2.0-flash"
# Phase 2: Resume with positive feedback - should use LLM to classify
flow2 = TestFlow.from_pending(flow_id, persistence)
assert flow2._pending_feedback_context is not None
assert flow2._pending_feedback_context.llm == "gemini/gemini-2.0-flash"
# Mock _collapse_to_outcome to verify it gets called (not skipped)
with patch.object(flow2, "_collapse_to_outcome", return_value="approved") as mock_collapse:
flow2.resume("this looks good, proceed!")
# The key assertion: _collapse_to_outcome was called (not skipped due to llm=None)
mock_collapse.assert_called_once_with(
feedback="this looks good, proceed!",
outcomes=["needs_changes", "approved"],
llm="gemini/gemini-2.0-flash",
)
assert flow2.last_human_feedback.outcome == "approved"
assert flow2.result_path == "approved"
def test_string_llm_still_works(self) -> None:
"""Test that passing llm as a string still works correctly."""
context = PendingFeedbackContext(
flow_id="str-llm-test",
flow_class="test.Flow",
method_name="review",
method_output="output",
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
serialized = context.to_dict()
restored = PendingFeedbackContext.from_dict(serialized)
assert restored.llm == "gpt-4o-mini"
def test_none_llm_when_no_model_attr(self) -> None:
"""Test that llm is None when object has no model attribute."""
mock_obj = MagicMock(spec=[]) # No attributes
# Simulate what the decorator does
llm_value = mock_obj if isinstance(mock_obj, str) else getattr(mock_obj, "model", None)
assert llm_value is None
class TestAsyncHumanFeedbackEdgeCases:
"""Edge case tests for async human feedback."""

View File

@@ -36,7 +36,7 @@ from crewai.flow import Flow, start
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.llm import LLM
from crewai.memory.unified_memory import Memory
from crewai.process import Process
from crewai.project import CrewBase, agent, before_kickoff, crew, task
from crewai.task import Task
@@ -2618,9 +2618,9 @@ def test_memory_remember_called_after_task():
)
with patch.object(
Memory, "extract_memories", wraps=crew._memory.extract_memories
crew._memory, "extract_memories", wraps=crew._memory.extract_memories
) as extract_mock, patch.object(
Memory, "remember", wraps=crew._memory.remember
crew._memory, "remember", wraps=crew._memory.remember
) as remember_mock:
crew.kickoff()
@@ -4773,13 +4773,13 @@ def test_memory_remember_receives_task_content():
# Mock extract_memories to return fake memories and capture the raw input.
# No wraps= needed -- the test only checks what args it receives, not the output.
patch.object(
Memory, "extract_memories", return_value=["Fake memory."]
crew._memory, "extract_memories", return_value=["Fake memory."]
) as extract_mock,
# Mock recall to avoid LLM calls for query analysis (not in cassette).
patch.object(Memory, "recall", return_value=[]),
patch.object(crew._memory, "recall", return_value=[]),
# Mock remember_many to prevent the background save from triggering
# LLM calls (field resolution) that aren't in the cassette.
patch.object(Memory, "remember_many", return_value=[]),
patch.object(crew._memory, "remember_many", return_value=[]),
):
crew.kickoff()

View File

@@ -1843,213 +1843,3 @@ def test_cyclic_flow_works_with_persist_and_id_input():
f"'{method}' should fire 3 times, "
f"got {len(events)}: {execution_order}"
)
@pytest.mark.timeout(5)
def test_self_listening_method_does_not_loop():
"""A method whose @listen label matches its own name must not loop forever.
Without the guard, 'process' re-triggers itself on every completion,
running indefinitely (timeout → FAIL). The fix caps method calls
and raises RecursionError (PASS).
"""
class SelfListenFlow(Flow):
@start()
def begin(self):
return "process"
@router(begin)
def route(self):
return "process"
@listen("process")
def process(self):
pass
flow = SelfListenFlow()
with pytest.raises(RecursionError, match="infinite loop"):
flow.kickoff()
def test_or_condition_self_listen_fires_once():
"""or_() with a self-referencing label only fires once due to or_() guard."""
call_count = 0
class OrSelfListenFlow(Flow):
@start()
def begin(self):
return "process"
@router(begin)
def route(self):
return "process"
@listen(or_("other_trigger", "process"))
def process(self):
nonlocal call_count
call_count += 1
flow = OrSelfListenFlow()
flow.kickoff()
assert call_count == 1
class ListState(BaseModel):
items: list = []
class DictState(BaseModel):
data: dict = {}
class _ListFlow(Flow[ListState]):
@start()
def populate(self):
self.state.items = [3, 1, 4, 1, 5, 9, 2, 6]
class _DictFlow(Flow[DictState]):
@start()
def populate(self):
self.state.data = {"a": 1, "b": 2, "c": 3}
def _make_list_flow():
flow = _ListFlow()
flow.kickoff()
return flow
def _make_dict_flow():
flow = _DictFlow()
flow.kickoff()
return flow
def test_locked_list_proxy_index():
flow = _make_list_flow()
assert flow.state.items.index(4) == 2
assert flow.state.items.index(1, 2) == 3
def test_locked_list_proxy_index_missing_raises():
flow = _make_list_flow()
with pytest.raises(ValueError):
flow.state.items.index(999)
def test_locked_list_proxy_count():
flow = _make_list_flow()
assert flow.state.items.count(1) == 2
assert flow.state.items.count(999) == 0
def test_locked_list_proxy_sort():
flow = _make_list_flow()
flow.state.items.sort()
assert list(flow.state.items) == [1, 1, 2, 3, 4, 5, 6, 9]
def test_locked_list_proxy_sort_reverse():
flow = _make_list_flow()
flow.state.items.sort(reverse=True)
assert list(flow.state.items) == [9, 6, 5, 4, 3, 2, 1, 1]
def test_locked_list_proxy_sort_key():
flow = _make_list_flow()
flow.state.items.sort(key=lambda x: -x)
assert list(flow.state.items) == [9, 6, 5, 4, 3, 2, 1, 1]
def test_locked_list_proxy_reverse():
flow = _make_list_flow()
original = list(flow.state.items)
flow.state.items.reverse()
assert list(flow.state.items) == list(reversed(original))
def test_locked_list_proxy_copy():
flow = _make_list_flow()
copied = flow.state.items.copy()
assert copied == [3, 1, 4, 1, 5, 9, 2, 6]
assert isinstance(copied, list)
copied.append(999)
assert 999 not in flow.state.items
def test_locked_list_proxy_add():
flow = _make_list_flow()
result = flow.state.items + [10, 11]
assert result == [3, 1, 4, 1, 5, 9, 2, 6, 10, 11]
assert len(flow.state.items) == 8
def test_locked_list_proxy_radd():
flow = _make_list_flow()
result = [0] + flow.state.items
assert result[0] == 0
assert len(result) == 9
def test_locked_list_proxy_iadd():
flow = _make_list_flow()
flow.state.items += [10]
assert 10 in flow.state.items
# Verify no deadlock: mutations must still work after +=
flow.state.items.append(99)
assert 99 in flow.state.items
def test_locked_list_proxy_mul():
flow = _make_list_flow()
result = flow.state.items * 2
assert len(result) == 16
def test_locked_list_proxy_rmul():
flow = _make_list_flow()
result = 2 * flow.state.items
assert len(result) == 16
def test_locked_list_proxy_reversed():
flow = _make_list_flow()
original = list(flow.state.items)
assert list(reversed(flow.state.items)) == list(reversed(original))
def test_locked_dict_proxy_copy():
flow = _make_dict_flow()
copied = flow.state.data.copy()
assert copied == {"a": 1, "b": 2, "c": 3}
assert isinstance(copied, dict)
copied["z"] = 99
assert "z" not in flow.state.data
def test_locked_dict_proxy_or():
flow = _make_dict_flow()
result = flow.state.data | {"d": 4}
assert result == {"a": 1, "b": 2, "c": 3, "d": 4}
assert "d" not in flow.state.data
def test_locked_dict_proxy_ror():
flow = _make_dict_flow()
result = {"z": 0} | flow.state.data
assert result == {"z": 0, "a": 1, "b": 2, "c": 3}
def test_locked_dict_proxy_ior():
flow = _make_dict_flow()
flow.state.data |= {"d": 4}
assert flow.state.data["d"] == 4
# Verify no deadlock: mutations must still work after |=
flow.state.data["e"] = 5
assert flow.state.data["e"] == 5
def test_locked_dict_proxy_reversed():
flow = _make_dict_flow()
assert list(reversed(flow.state.data)) == ["c", "b", "a"]

View File

@@ -1,3 +1,3 @@
"""CrewAI development tools."""
__version__ = "1.10.2a1"
__version__ = "1.10.1a1"

8
uv.lock generated
View File

@@ -1426,7 +1426,7 @@ requires-dist = [
{ name = "docker", specifier = "~=7.1.0" },
{ name = "exa-py", marker = "extra == 'exa-py'", specifier = ">=1.8.7" },
{ name = "firecrawl-py", marker = "extra == 'firecrawl-py'", specifier = ">=1.8.0" },
{ name = "gitpython", marker = "extra == 'github'", specifier = ">=3.1.41,<4" },
{ name = "gitpython", marker = "extra == 'github'", specifier = "==3.1.38" },
{ name = "hyperbrowser", marker = "extra == 'hyperbrowser'", specifier = ">=0.18.0" },
{ name = "langchain-apify", marker = "extra == 'apify'", specifier = ">=0.1.2,<1.0.0" },
{ name = "linkup-sdk", marker = "extra == 'linkup-sdk'", specifier = ">=0.2.2" },
@@ -2201,14 +2201,14 @@ wheels = [
[[package]]
name = "gitpython"
version = "3.1.46"
version = "3.1.38"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "gitdb" },
]
sdist = { url = "https://files.pythonhosted.org/packages/df/b5/59d16470a1f0dfe8c793f9ef56fd3826093fc52b3bd96d6b9d6c26c7e27b/gitpython-3.1.46.tar.gz", hash = "sha256:400124c7d0ef4ea03f7310ac2fbf7151e09ff97f2a3288d64a440c584a29c37f", size = 215371, upload-time = "2026-01-01T15:37:32.073Z" }
sdist = { url = "https://files.pythonhosted.org/packages/b3/45/cee7af549b6fa33f04531e402693a772b776cd9f845a2cbeca99cfac3331/GitPython-3.1.38.tar.gz", hash = "sha256:4d683e8957c8998b58ddb937e3e6cd167215a180e1ffd4da769ab81c620a89fe", size = 200632, upload-time = "2023-10-17T06:09:52.235Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/6a/09/e21df6aef1e1ffc0c816f0522ddc3f6dcded766c3261813131c78a704470/gitpython-3.1.46-py3-none-any.whl", hash = "sha256:79812ed143d9d25b6d176a10bb511de0f9c67b1fa641d82097b0ab90398a2058", size = 208620, upload-time = "2026-01-01T15:37:30.574Z" },
{ url = "https://files.pythonhosted.org/packages/3c/ae/044453eacd5a526d3f242ccd77e38ee8219c65e0b132562b551bd67c61a4/GitPython-3.1.38-py3-none-any.whl", hash = "sha256:9e98b672ffcb081c2c8d5aa630d4251544fb040fb158863054242f24a2a2ba30", size = 190573, upload-time = "2023-10-17T06:09:50.18Z" },
]
[[package]]