Compare commits

..

4 Commits

Author SHA1 Message Date
Greyson LaLonde
a39d1b6ebb Reduce test timeout to 30s 2025-07-03 19:16:03 -04:00
Greyson LaLonde
d7de94ff78 perf: increase test parallelization to 8 groups and remove redundant cache 2025-07-02 21:41:12 -04:00
Greyson LaLonde
8c3372ed57 fix: add job name to match branch protection rules 2025-07-02 20:18:18 -04:00
Greyson LaLonde
9492788e10 tests: speed up GitHub Actions with parallelization
- Add pytest-xdist and pytest-split to dev dependencies
- Split tests into 4 parallel groups per Python version
- Enable CPU-level parallelization with -n auto
- Add fail-fast strategy and maxfail=3
- Improve caching for Python packages
2025-07-02 20:03:41 -04:00
191 changed files with 4872 additions and 17812 deletions

3
.gitignore vendored
View File

@@ -26,5 +26,4 @@ test_flow.html
crewairules.mdc
plan.md
conceptual_plan.md
build_image
chromadb-*.lock
build_image

View File

@@ -9,7 +9,12 @@
},
"favicon": "/images/favicon.svg",
"contextual": {
"options": ["copy", "view", "chatgpt", "claude"]
"options": [
"copy",
"view",
"chatgpt",
"claude"
]
},
"navigation": {
"languages": [
@@ -32,6 +37,11 @@
"href": "https://chatgpt.com/g/g-qqTuUWsBY-crewai-assistant",
"icon": "robot"
},
{
"anchor": "Get Help",
"href": "mailto:support@crewai.com",
"icon": "headset"
},
{
"anchor": "Releases",
"href": "https://github.com/crewAIInc/crewAI/releases",
@@ -45,22 +55,32 @@
"groups": [
{
"group": "Get Started",
"pages": ["en/introduction", "en/installation", "en/quickstart"]
"pages": [
"en/introduction",
"en/installation",
"en/quickstart"
]
},
{
"group": "Guides",
"pages": [
{
"group": "Strategy",
"pages": ["en/guides/concepts/evaluating-use-cases"]
"pages": [
"en/guides/concepts/evaluating-use-cases"
]
},
{
"group": "Agents",
"pages": ["en/guides/agents/crafting-effective-agents"]
"pages": [
"en/guides/agents/crafting-effective-agents"
]
},
{
"group": "Crews",
"pages": ["en/guides/crews/first-crew"]
"pages": [
"en/guides/crews/first-crew"
]
},
{
"group": "Flows",
@@ -74,6 +94,7 @@
"pages": [
"en/guides/advanced/customizing-prompts",
"en/guides/advanced/fingerprinting"
]
}
]
@@ -128,9 +149,7 @@
"en/tools/file-document/jsonsearchtool",
"en/tools/file-document/csvsearchtool",
"en/tools/file-document/directorysearchtool",
"en/tools/file-document/directoryreadtool",
"en/tools/file-document/ocrtool",
"en/tools/file-document/pdf-text-writing-tool"
"en/tools/file-document/directoryreadtool"
]
},
{
@@ -148,8 +167,7 @@
"en/tools/web-scraping/stagehandtool",
"en/tools/web-scraping/firecrawlcrawlwebsitetool",
"en/tools/web-scraping/firecrawlscrapewebsitetool",
"en/tools/web-scraping/oxylabsscraperstool",
"en/tools/web-scraping/brightdata-tools"
"en/tools/web-scraping/oxylabsscraperstool"
]
},
{
@@ -164,13 +182,7 @@
"en/tools/search-research/websitesearchtool",
"en/tools/search-research/codedocssearchtool",
"en/tools/search-research/youtubechannelsearchtool",
"en/tools/search-research/youtubevideosearchtool",
"en/tools/search-research/tavilysearchtool",
"en/tools/search-research/tavilyextractortool",
"en/tools/search-research/arxivpapertool",
"en/tools/search-research/serpapi-googlesearchtool",
"en/tools/search-research/serpapi-googleshoppingtool",
"en/tools/search-research/databricks-query-tool"
"en/tools/search-research/youtubevideosearchtool"
]
},
{
@@ -182,9 +194,7 @@
"en/tools/database-data/snowflakesearchtool",
"en/tools/database-data/nl2sqltool",
"en/tools/database-data/qdrantvectorsearchtool",
"en/tools/database-data/weaviatevectorsearchtool",
"en/tools/database-data/mongodbvectorsearchtool",
"en/tools/database-data/singlestoresearchtool"
"en/tools/database-data/weaviatevectorsearchtool"
]
},
{
@@ -216,8 +226,7 @@
"en/tools/automation/overview",
"en/tools/automation/apifyactorstool",
"en/tools/automation/composiotool",
"en/tools/automation/multiontool",
"en/tools/automation/zapieractionstool"
"en/tools/automation/multiontool"
]
}
]
@@ -228,12 +237,10 @@
"en/observability/overview",
"en/observability/agentops",
"en/observability/arize-phoenix",
"en/observability/langdb",
"en/observability/langfuse",
"en/observability/langtrace",
"en/observability/maxim",
"en/observability/mlflow",
"en/observability/neatlogs",
"en/observability/openlit",
"en/observability/opik",
"en/observability/patronus-evaluation",
@@ -267,7 +274,9 @@
},
{
"group": "Telemetry",
"pages": ["en/telemetry"]
"pages": [
"en/telemetry"
]
}
]
},
@@ -276,7 +285,9 @@
"groups": [
{
"group": "Getting Started",
"pages": ["en/enterprise/introduction"]
"pages": [
"en/enterprise/introduction"
]
},
{
"group": "Features",
@@ -285,8 +296,7 @@
"en/enterprise/features/webhook-streaming",
"en/enterprise/features/traces",
"en/enterprise/features/hallucination-guardrail",
"en/enterprise/features/integrations",
"en/enterprise/features/agent-repositories"
"en/enterprise/features/integrations"
]
},
{
@@ -331,7 +341,9 @@
},
{
"group": "Resources",
"pages": ["en/enterprise/resources/frequently-asked-questions"]
"pages": [
"en/enterprise/resources/frequently-asked-questions"
]
}
]
},
@@ -340,7 +352,9 @@
"groups": [
{
"group": "Getting Started",
"pages": ["en/api-reference/introduction"]
"pages": [
"en/api-reference/introduction"
]
},
{
"group": "Endpoints",
@@ -350,13 +364,16 @@
},
{
"tab": "Examples",
"groups": [
"groups": [
{
"group": "Examples",
"pages": ["en/examples/example"]
"pages": [
"en/examples/example"
]
}
]
}
]
},
{
@@ -378,6 +395,11 @@
"href": "https://chatgpt.com/g/g-qqTuUWsBY-crewai-assistant",
"icon": "robot"
},
{
"anchor": "Obter Ajuda",
"href": "mailto:support@crewai.com",
"icon": "headset"
},
{
"anchor": "Lançamentos",
"href": "https://github.com/crewAIInc/crewAI/releases",
@@ -402,15 +424,21 @@
"pages": [
{
"group": "Estratégia",
"pages": ["pt-BR/guides/concepts/evaluating-use-cases"]
"pages": [
"pt-BR/guides/concepts/evaluating-use-cases"
]
},
{
"group": "Agentes",
"pages": ["pt-BR/guides/agents/crafting-effective-agents"]
"pages": [
"pt-BR/guides/agents/crafting-effective-agents"
]
},
{
"group": "Crews",
"pages": ["pt-BR/guides/crews/first-crew"]
"pages": [
"pt-BR/guides/crews/first-crew"
]
},
{
"group": "Flows",
@@ -566,7 +594,6 @@
"pt-BR/observability/overview",
"pt-BR/observability/agentops",
"pt-BR/observability/arize-phoenix",
"pt-BR/observability/langdb",
"pt-BR/observability/langfuse",
"pt-BR/observability/langtrace",
"pt-BR/observability/maxim",
@@ -604,7 +631,9 @@
},
{
"group": "Telemetria",
"pages": ["pt-BR/telemetry"]
"pages": [
"pt-BR/telemetry"
]
}
]
},
@@ -613,7 +642,9 @@
"groups": [
{
"group": "Começando",
"pages": ["pt-BR/enterprise/introduction"]
"pages": [
"pt-BR/enterprise/introduction"
]
},
{
"group": "Funcionalidades",
@@ -678,7 +709,9 @@
"groups": [
{
"group": "Começando",
"pages": ["pt-BR/api-reference/introduction"]
"pages": [
"pt-BR/api-reference/introduction"
]
},
{
"group": "Endpoints",
@@ -688,13 +721,16 @@
},
{
"tab": "Exemplos",
"groups": [
"groups": [
{
"group": "Exemplos",
"pages": ["pt-BR/examples/example"]
"pages": [
"pt-BR/examples/example"
]
}
]
}
]
}
]
@@ -738,7 +774,7 @@
"destination": "/en/introduction"
},
{
"source": "/installation",
"source": "/installation",
"destination": "/en/installation"
},
{

View File

@@ -526,103 +526,6 @@ agent = Agent(
The context window management feature works automatically in the background. You don't need to call any special functions - just set `respect_context_window` to your preferred behavior and CrewAI handles the rest!
</Note>
## Direct Agent Interaction with `kickoff()`
Agents can be used directly without going through a task or crew workflow using the `kickoff()` method. This provides a simpler way to interact with an agent when you don't need the full crew orchestration capabilities.
### How `kickoff()` Works
The `kickoff()` method allows you to send messages directly to an agent and get a response, similar to how you would interact with an LLM but with all the agent's capabilities (tools, reasoning, etc.).
```python Code
from crewai import Agent
from crewai_tools import SerperDevTool
# Create an agent
researcher = Agent(
role="AI Technology Researcher",
goal="Research the latest AI developments",
tools=[SerperDevTool()],
verbose=True
)
# Use kickoff() to interact directly with the agent
result = researcher.kickoff("What are the latest developments in language models?")
# Access the raw response
print(result.raw)
```
### Parameters and Return Values
| Parameter | Type | Description |
| :---------------- | :---------------------------------- | :------------------------------------------------------------------------ |
| `messages` | `Union[str, List[Dict[str, str]]]` | Either a string query or a list of message dictionaries with role/content |
| `response_format` | `Optional[Type[Any]]` | Optional Pydantic model for structured output |
The method returns a `LiteAgentOutput` object with the following properties:
- `raw`: String containing the raw output text
- `pydantic`: Parsed Pydantic model (if a `response_format` was provided)
- `agent_role`: Role of the agent that produced the output
- `usage_metrics`: Token usage metrics for the execution
### Structured Output
You can get structured output by providing a Pydantic model as the `response_format`:
```python Code
from pydantic import BaseModel
from typing import List
class ResearchFindings(BaseModel):
main_points: List[str]
key_technologies: List[str]
future_predictions: str
# Get structured output
result = researcher.kickoff(
"Summarize the latest developments in AI for 2025",
response_format=ResearchFindings
)
# Access structured data
print(result.pydantic.main_points)
print(result.pydantic.future_predictions)
```
### Multiple Messages
You can also provide a conversation history as a list of message dictionaries:
```python Code
messages = [
{"role": "user", "content": "I need information about large language models"},
{"role": "assistant", "content": "I'd be happy to help with that! What specifically would you like to know?"},
{"role": "user", "content": "What are the latest developments in 2025?"}
]
result = researcher.kickoff(messages)
```
### Async Support
An asynchronous version is available via `kickoff_async()` with the same parameters:
```python Code
import asyncio
async def main():
result = await researcher.kickoff_async("What are the latest developments in AI?")
print(result.raw)
asyncio.run(main())
```
<Note>
The `kickoff()` method uses a `LiteAgent` internally, which provides a simpler execution flow while preserving all of the agent's configuration (role, goal, backstory, tools, etc.).
</Note>
## Important Considerations and Best Practices
### Security and Code Execution

View File

@@ -88,7 +88,7 @@ crewai replay [OPTIONS]
- `-t, --task_id TEXT`: Replay the crew from this task ID, including all subsequent tasks
Example:
```shell Terminal
```shell Terminal
crewai replay -t task_123456
```
@@ -134,7 +134,7 @@ crewai test [OPTIONS]
- `-m, --model TEXT`: LLM Model to run the tests on the Crew (default: "gpt-4o-mini")
Example:
```shell Terminal
```shell Terminal
crewai test -n 5 -m gpt-3.5-turbo
```
@@ -151,7 +151,7 @@ Starting from version 0.103.0, the `crewai run` command can be used to run both
</Note>
<Note>
Make sure to run these commands from the directory where your CrewAI project is set up.
Make sure to run these commands from the directory where your CrewAI project is set up.
Some commands may require additional configuration or setup within your project structure.
</Note>
@@ -235,7 +235,7 @@ You must be authenticated to CrewAI Enterprise to use these organization managem
- **Deploy the Crew**: Once you are authenticated, you can deploy your crew or flow to CrewAI Enterprise.
```shell Terminal
crewai deploy push
```
```
- Initiates the deployment process on the CrewAI Enterprise platform.
- Upon successful initiation, it will output the Deployment created successfully! message along with the Deployment Name and a unique Deployment ID (UUID).
@@ -309,82 +309,3 @@ When you select a provider, the CLI will prompt you to enter the Key name and th
See the following link for each provider's key name:
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)
### 12. Configuration Management
Manage CLI configuration settings for CrewAI.
```shell Terminal
crewai config [COMMAND] [OPTIONS]
```
#### Commands:
- `list`: Display all CLI configuration parameters
```shell Terminal
crewai config list
```
- `set`: Set a CLI configuration parameter
```shell Terminal
crewai config set <key> <value>
```
- `reset`: Reset all CLI configuration parameters to default values
```shell Terminal
crewai config reset
```
#### Available Configuration Parameters
- `enterprise_base_url`: Base URL of the CrewAI Enterprise instance
- `oauth2_provider`: OAuth2 provider used for authentication (e.g., workos, okta, auth0)
- `oauth2_audience`: OAuth2 audience value, typically used to identify the target API or resource
- `oauth2_client_id`: OAuth2 client ID issued by the provider, used during authentication requests
- `oauth2_domain`: OAuth2 provider's domain (e.g., your-org.auth0.com) used for issuing tokens
#### Examples
Display current configuration:
```shell Terminal
crewai config list
```
Example output:
```
CrewAI CLI Configuration
┏━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Setting ┃ Value ┃ Description ┃
┡━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ enterprise_base_url│ https://app.crewai.com │ Base URL of the CrewAI Enterprise instance │
│ org_name │ Not set │ Name of the currently active organization │
│ org_uuid │ Not set │ UUID of the currently active organization │
│ oauth2_provider │ workos │ OAuth2 provider used for authentication (e.g., workos, okta, auth0). │
│ oauth2_audience │ client_01YYY │ OAuth2 audience value, typically used to identify the target API or resource. │
│ oauth2_client_id │ client_01XXX │ OAuth2 client ID issued by the provider, used during authentication requests. │
│ oauth2_domain │ login.crewai.com │ OAuth2 provider's domain (e.g., your-org.auth0.com) used for issuing tokens. │
```
Set the enterprise base URL:
```shell Terminal
crewai config set enterprise_base_url https://my-enterprise.crewai.com
```
Set OAuth2 provider:
```shell Terminal
crewai config set oauth2_provider auth0
```
Set OAuth2 domain:
```shell Terminal
crewai config set oauth2_domain my-company.auth0.com
```
Reset all configuration to defaults:
```shell Terminal
crewai config reset
```
<Note>
Configuration settings are stored in `~/.config/crewai/settings.json`. Some settings like organization name and UUID are read-only and managed through authentication and organization commands. Tool repository related settings are hidden and cannot be set directly by users.
</Note>

View File

@@ -20,7 +20,8 @@ A crew in crewAI represents a collaborative group of agents working together to
| **Function Calling LLM** _(optional)_ | `function_calling_llm` | If passed, the crew will use this LLM to do function calling for tools for all agents in the crew. Each agent can have its own LLM, which overrides the crew's LLM for function calling. |
| **Config** _(optional)_ | `config` | Optional configuration settings for the crew, in `Json` or `Dict[str, Any]` format. |
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). | |
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
@@ -31,7 +32,6 @@ A crew in crewAI represents a collaborative group of agents working together to
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
| **Planning LLM** *(optional)* | `planning_llm` | The language model used by the AgentPlanner in a planning process. |
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | Knowledge sources available at the crew level, accessible to all the agents. |
<Tip>
**Crew Max RPM**: The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents' `max_rpm` settings if you set it.

View File

@@ -270,7 +270,7 @@ In this section, you'll find detailed examples that help you select, configure,
from crewai import LLM
llm = LLM(
model="gemini-1.5-pro-latest", # or vertex_ai/gemini-1.5-pro-latest
model="gemini/gemini-1.5-pro-latest",
temperature=0.7,
vertex_credentials=vertex_credentials_json
)

View File

@@ -9,7 +9,8 @@ icon: database
The CrewAI framework provides a sophisticated memory system designed to significantly enhance AI agent capabilities. CrewAI offers **three distinct memory approaches** that serve different use cases:
1. **Basic Memory System** - Built-in short-term, long-term, and entity memory
2. **External Memory** - Standalone external memory providers
2. **User Memory** - User-specific memory with Mem0 integration (legacy approach)
3. **External Memory** - Standalone external memory providers (new approach)
## Memory System Components
@@ -18,7 +19,7 @@ The CrewAI framework provides a sophisticated memory system designed to signific
| **Short-Term Memory**| Temporarily stores recent interactions and outcomes using `RAG`, enabling agents to recall and utilize information relevant to their current context during the current executions.|
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, `ExternalMemory` and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
## 1. Basic Memory System (Recommended)
@@ -201,7 +202,7 @@ crew = Crew(
tasks=[task],
memory=True,
embedder={
"provider": "anthropic", # Match your LLM provider
"provider": "anthropic", # Match your LLM provider
"config": {
"api_key": "your-anthropic-key",
"model": "text-embedding-3-small"
@@ -622,7 +623,7 @@ for provider in providers_to_test:
**Model not found errors:**
```python
# Verify model availability
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
from crewai.utilities.embedding_configurator import EmbeddingConfigurator
configurator = EmbeddingConfigurator()
try:
@@ -683,18 +684,67 @@ print(f"OpenAI: {openai_time:.2f}s")
print(f"Ollama: {ollama_time:.2f}s")
```
## 2. External Memory
External Memory provides a standalone memory system that operates independently from the crew's built-in memory. This is ideal for specialized memory providers or cross-application memory sharing.
## 2. User Memory with Mem0 (Legacy)
### Basic External Memory with Mem0
<Warning>
**Legacy Approach**: While fully functional, this approach is considered legacy. For new projects requiring user-specific memory, consider using External Memory instead.
</Warning>
User Memory integrates with [Mem0](https://mem0.ai/) to provide user-specific memory that persists across sessions and integrates with the crew's contextual memory system.
### Prerequisites
```bash
pip install mem0ai
```
### Mem0 Cloud Configuration
```python
import os
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
from crewai import Crew, Process
# Create external memory instance with local Mem0 Configuration
external_memory = ExternalMemory(
embedder_config={
# Set your Mem0 API key
os.environ["MEM0_API_KEY"] = "m0-your-api-key"
crew = Crew(
agents=[...],
tasks=[...],
memory=True, # Required for contextual memory integration
memory_config={
"provider": "mem0",
"config": {"user_id": "john"},
"user_memory": {} # Required - triggers user memory initialization
},
process=Process.sequential,
verbose=True
)
```
### Advanced Mem0 Configuration
```python
crew = Crew(
agents=[...],
tasks=[...],
memory=True,
memory_config={
"provider": "mem0",
"config": {
"user_id": "john",
"org_id": "my_org_id", # Optional
"project_id": "my_project_id", # Optional
"api_key": "custom-api-key" # Optional - overrides env var
},
"user_memory": {}
}
)
```
### Local Mem0 Configuration
```python
crew = Crew(
agents=[...],
tasks=[...],
memory=True,
memory_config={
"provider": "mem0",
"config": {
"user_id": "john",
@@ -711,60 +761,37 @@ external_memory = ExternalMemory(
"provider": "openai",
"config": {"api_key": "your-api-key", "model": "text-embedding-3-small"}
}
},
"infer": True # Optional defaults to True
}
},
"user_memory": {}
}
)
crew = Crew(
agents=[...],
tasks=[...],
external_memory=external_memory, # Separate from basic memory
process=Process.sequential,
verbose=True
)
```
### Advanced External Memory with Mem0 Client
When using Mem0 Client, you can customize the memory configuration further, by using parameters like 'includes', 'excludes', 'custom_categories', 'infer' and 'run_id' (this is only for short-term memory).
You can find more details in the [Mem0 documentation](https://docs.mem0.ai/).
## 3. External Memory (New Approach)
External Memory provides a standalone memory system that operates independently from the crew's built-in memory. This is ideal for specialized memory providers or cross-application memory sharing.
### Basic External Memory with Mem0
```python
import os
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
new_categories = [
{"lifestyle_management_concerns": "Tracks daily routines, habits, hobbies and interests including cooking, time management and work-life balance"},
{"seeking_structure": "Documents goals around creating routines, schedules, and organized systems in various life areas"},
{"personal_information": "Basic information about the user including name, preferences, and personality traits"}
]
os.environ["MEM0_API_KEY"] = "your-api-key"
# Create external memory instance with Mem0 Client
# Create external memory instance
external_memory = ExternalMemory(
embedder_config={
"provider": "mem0",
"config": {
"user_id": "john",
"org_id": "my_org_id", # Optional
"project_id": "my_project_id", # Optional
"api_key": "custom-api-key" # Optional - overrides env var
"run_id": "my_run_id", # Optional - for short-term memory
"includes": "include1", # Optional
"excludes": "exclude1", # Optional
"infer": True # Optional defaults to True
"custom_categories": new_categories # Optional - custom categories for user memory
},
"config": {"user_id": "U-123"}
}
)
crew = Crew(
agents=[...],
tasks=[...],
external_memory=external_memory, # Separate from basic memory
external_memory=external_memory, # Separate from basic memory
process=Process.sequential,
verbose=True
)
@@ -803,18 +830,17 @@ crew = Crew(
)
```
## 🧠 Memory System Comparison
| **Category** | **Feature** | **Basic Memory** | **External Memory** |
|---------------------|------------------------|-----------------------------|------------------------------|
| **Ease of Use** | Setup Complexity | Simple | Moderate |
| | Integration | Built-in (contextual) | Standalone |
| **Persistence** | Storage | Local files | Custom / Mem0 |
| | Cross-session Support | ✅ | ✅ |
| **Personalization** | User-specific Memory | ❌ | ✅ |
| | Custom Providers | Limited | Any provider |
| **Use Case Fit** | Recommended For | Most general use cases | Specialized / custom needs |
## Memory System Comparison
| Feature | Basic Memory | User Memory (Legacy) | External Memory |
|---------|-------------|---------------------|----------------|
| **Setup Complexity** | Simple | Medium | Medium |
| **Integration** | Built-in contextual | Contextual + User-specific | Standalone |
| **Storage** | Local files | Mem0 Cloud/Local | Custom/Mem0 |
| **Cross-session** | ✅ | ✅ | ✅ |
| **User-specific** | ❌ | ✅ | ✅ |
| **Custom providers** | Limited | Mem0 only | Any provider |
| **Recommended for** | Most use cases | Legacy projects | Specialized needs |
## Supported Embedding Providers

View File

@@ -54,11 +54,9 @@ crew = Crew(
| **Markdown** _(optional)_ | `markdown` | `Optional[bool]` | Whether the task should instruct the agent to return the final answer formatted in Markdown. Defaults to False. |
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
| **Create Directory** _(optional)_ | `create_directory` | `Optional[bool]` | Whether to create the directory for output_file if it doesn't exist. Defaults to True. |
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
| **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | A Pydantic model for task output. |
| **Callback** _(optional)_ | `callback` | `Optional[Any]` | Function/object to be executed after task completion. |
| **Guardrail** _(optional)_ | `guardrail` | `Optional[Callable]` | Function to validate task output before proceeding to next task. |
## Creating Tasks
@@ -334,11 +332,9 @@ Task guardrails provide a way to validate and transform task outputs before they
are passed to the next task. This feature helps ensure data quality and provides
feedback to agents when their output doesn't meet specific criteria.
Guardrails are implemented as Python functions that contain custom validation logic, giving you complete control over the validation process and ensuring reliable, deterministic results.
### Using Task Guardrails
### Function-Based Guardrails
To add a function-based guardrail to a task, provide a validation function through the `guardrail` parameter:
To add a guardrail to a task, provide a validation function through the `guardrail` parameter:
```python Code
from typing import Tuple, Union, Dict, Any
@@ -376,7 +372,9 @@ blog_task = Task(
- On success: it returns a tuple of `(bool, Any)`. For example: `(True, validated_result)`
- On Failure: it returns a tuple of `(bool, str)`. For example: `(False, "Error message explain the failure")`
### LLMGuardrail
The `LLMGuardrail` class offers a robust mechanism for validating task outputs.
### Error Handling Best Practices
@@ -800,91 +798,184 @@ While creating and executing tasks, certain validation mechanisms are in place t
These validations help in maintaining the consistency and reliability of task executions within the crewAI framework.
## Task Guardrails
Task guardrails provide a powerful way to validate, transform, or filter task outputs before they are passed to the next task. Guardrails are optional functions that execute before the next task starts, allowing you to ensure that task outputs meet specific requirements or formats.
### Basic Usage
#### Define your own logic to validate
```python Code
from typing import Tuple, Union
from crewai import Task
def validate_json_output(result: str) -> Tuple[bool, Union[dict, str]]:
"""Validate that the output is valid JSON."""
try:
json_data = json.loads(result)
return (True, json_data)
except json.JSONDecodeError:
return (False, "Output must be valid JSON")
task = Task(
description="Generate JSON data",
expected_output="Valid JSON object",
guardrail=validate_json_output
)
```
#### Leverage a no-code approach for validation
```python Code
from crewai import Task
task = Task(
description="Generate JSON data",
expected_output="Valid JSON object",
guardrail="Ensure the response is a valid JSON object"
)
```
#### Using YAML
```yaml
research_task:
...
guardrail: make sure each bullet contains a minimum of 100 words
...
```
```python Code
@CrewBase
class InternalCrew:
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
...
@task
def research_task(self):
return Task(config=self.tasks_config["research_task"]) # type: ignore[index]
...
```
#### Use custom models for code generation
```python Code
from crewai import Task
from crewai.llm import LLM
task = Task(
description="Generate JSON data",
expected_output="Valid JSON object",
guardrail=LLMGuardrail(
description="Ensure the response is a valid JSON object",
llm=LLM(model="gpt-4o-mini"),
)
)
```
### How Guardrails Work
1. **Optional Attribute**: Guardrails are an optional attribute at the task level, allowing you to add validation only where needed.
2. **Execution Timing**: The guardrail function is executed before the next task starts, ensuring valid data flow between tasks.
3. **Return Format**: Guardrails must return a tuple of `(success, data)`:
- If `success` is `True`, `data` is the validated/transformed result
- If `success` is `False`, `data` is the error message
4. **Result Routing**:
- On success (`True`), the result is automatically passed to the next task
- On failure (`False`), the error is sent back to the agent to generate a new answer
### Common Use Cases
#### Data Format Validation
```python Code
def validate_email_format(result: str) -> Tuple[bool, Union[str, str]]:
"""Ensure the output contains a valid email address."""
import re
email_pattern = r'^[\w\.-]+@[\w\.-]+\.\w+$'
if re.match(email_pattern, result.strip()):
return (True, result.strip())
return (False, "Output must be a valid email address")
```
#### Content Filtering
```python Code
def filter_sensitive_info(result: str) -> Tuple[bool, Union[str, str]]:
"""Remove or validate sensitive information."""
sensitive_patterns = ['SSN:', 'password:', 'secret:']
for pattern in sensitive_patterns:
if pattern.lower() in result.lower():
return (False, f"Output contains sensitive information ({pattern})")
return (True, result)
```
#### Data Transformation
```python Code
def normalize_phone_number(result: str) -> Tuple[bool, Union[str, str]]:
"""Ensure phone numbers are in a consistent format."""
import re
digits = re.sub(r'\D', '', result)
if len(digits) == 10:
formatted = f"({digits[:3]}) {digits[3:6]}-{digits[6:]}"
return (True, formatted)
return (False, "Output must be a 10-digit phone number")
```
### Advanced Features
#### Chaining Multiple Validations
```python Code
def chain_validations(*validators):
"""Chain multiple validators together."""
def combined_validator(result):
for validator in validators:
success, data = validator(result)
if not success:
return (False, data)
result = data
return (True, result)
return combined_validator
# Usage
task = Task(
description="Get user contact info",
expected_output="Email and phone",
guardrail=chain_validations(
validate_email_format,
filter_sensitive_info
)
)
```
#### Custom Retry Logic
```python Code
task = Task(
description="Generate data",
expected_output="Valid data",
guardrail=validate_data,
max_retries=5 # Override default retry limit
)
```
## Creating Directories when Saving Files
The `create_directory` parameter controls whether CrewAI should automatically create directories when saving task outputs to files. This feature is particularly useful for organizing outputs and ensuring that file paths are correctly structured, especially when working with complex project hierarchies.
### Default Behavior
By default, `create_directory=True`, which means CrewAI will automatically create any missing directories in the output file path:
You can now specify if a task should create directories when saving its output to a file. This is particularly useful for organizing outputs and ensuring that file paths are correctly structured.
```python Code
# Default behavior - directories are created automatically
report_task = Task(
description='Generate a comprehensive market analysis report',
expected_output='A detailed market analysis with charts and insights',
agent=analyst_agent,
output_file='reports/2025/market_analysis.md', # Creates 'reports/2025/' if it doesn't exist
markdown=True
# ...
save_output_task = Task(
description='Save the summarized AI news to a file',
expected_output='File saved successfully',
agent=research_agent,
tools=[file_save_tool],
output_file='outputs/ai_news_summary.txt',
create_directory=True
)
```
### Disabling Directory Creation
If you want to prevent automatic directory creation and ensure that the directory already exists, set `create_directory=False`:
```python Code
# Strict mode - directory must already exist
strict_output_task = Task(
description='Save critical data that requires existing infrastructure',
expected_output='Data saved to pre-configured location',
agent=data_agent,
output_file='secure/vault/critical_data.json',
create_directory=False # Will raise RuntimeError if 'secure/vault/' doesn't exist
)
```
### YAML Configuration
You can also configure this behavior in your YAML task definitions:
```yaml tasks.yaml
analysis_task:
description: >
Generate quarterly financial analysis
expected_output: >
A comprehensive financial report with quarterly insights
agent: financial_analyst
output_file: reports/quarterly/q4_2024_analysis.pdf
create_directory: true # Automatically create 'reports/quarterly/' directory
audit_task:
description: >
Perform compliance audit and save to existing audit directory
expected_output: >
A compliance audit report
agent: auditor
output_file: audit/compliance_report.md
create_directory: false # Directory must already exist
```
### Use Cases
**Automatic Directory Creation (`create_directory=True`):**
- Development and prototyping environments
- Dynamic report generation with date-based folders
- Automated workflows where directory structure may vary
- Multi-tenant applications with user-specific folders
**Manual Directory Management (`create_directory=False`):**
- Production environments with strict file system controls
- Security-sensitive applications where directories must be pre-configured
- Systems with specific permission requirements
- Compliance environments where directory creation is audited
### Error Handling
When `create_directory=False` and the directory doesn't exist, CrewAI will raise a `RuntimeError`:
```python Code
try:
result = crew.kickoff()
except RuntimeError as e:
# Handle missing directory error
print(f"Directory creation failed: {e}")
# Create directory manually or use fallback location
#...
```
Check out the video below to see how to use structured outputs in CrewAI:

View File

@@ -1,155 +0,0 @@
---
title: 'Agent Repositories'
description: 'Learn how to use Agent Repositories to share and reuse your agents across teams and projects'
icon: 'database'
---
Agent Repositories allow enterprise users to store, share, and reuse agent definitions across teams and projects. This feature enables organizations to maintain a centralized library of standardized agents, promoting consistency and reducing duplication of effort.
## Benefits of Agent Repositories
- **Standardization**: Maintain consistent agent definitions across your organization
- **Reusability**: Create an agent once and use it in multiple crews and projects
- **Governance**: Implement organization-wide policies for agent configurations
- **Collaboration**: Enable teams to share and build upon each other's work
## Using Agent Repositories
### Prerequisites
1. You must have an account at CrewAI, try the [free plan](https://app.crewai.com).
2. You need to be authenticated using the CrewAI CLI.
3. If you have more than one organization, make sure you are switched to the correct organization using the CLI command:
```bash
crewai org switch <org_id>
```
### Creating and Managing Agents in Repositories
To create and manage agents in repositories,Enterprise Dashboard.
### Loading Agents from Repositories
You can load agents from repositories in your code using the `from_repository` parameter:
```python
from crewai import Agent
# Create an agent by loading it from a repository
# The agent is loaded with all its predefined configurations
researcher = Agent(
from_repository="market-research-agent"
)
```
### Overriding Repository Settings
You can override specific settings from the repository by providing them in the configuration:
```python
researcher = Agent(
from_repository="market-research-agent",
goal="Research the latest trends in AI development", # Override the repository goal
verbose=True # Add a setting not in the repository
)
```
### Example: Creating a Crew with Repository Agents
```python
from crewai import Crew, Agent, Task
# Load agents from repositories
researcher = Agent(
from_repository="market-research-agent"
)
writer = Agent(
from_repository="content-writer-agent"
)
# Create tasks
research_task = Task(
description="Research the latest trends in AI",
agent=researcher
)
writing_task = Task(
description="Write a comprehensive report based on the research",
agent=writer
)
# Create the crew
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
verbose=True
)
# Run the crew
result = crew.kickoff()
```
### Example: Using `kickoff()` with Repository Agents
You can also use repository agents directly with the `kickoff()` method for simpler interactions:
```python
from crewai import Agent
from pydantic import BaseModel
from typing import List
# Define a structured output format
class MarketAnalysis(BaseModel):
key_trends: List[str]
opportunities: List[str]
recommendation: str
# Load an agent from repository
analyst = Agent(
from_repository="market-analyst-agent",
verbose=True
)
# Get a free-form response
result = analyst.kickoff("Analyze the AI market in 2025")
print(result.raw) # Access the raw response
# Get structured output
structured_result = analyst.kickoff(
"Provide a structured analysis of the AI market in 2025",
response_format=MarketAnalysis
)
# Access structured data
print(f"Key Trends: {structured_result.pydantic.key_trends}")
print(f"Recommendation: {structured_result.pydantic.recommendation}")
```
## Best Practices
1. **Naming Convention**: Use clear, descriptive names for your repository agents
2. **Documentation**: Include comprehensive descriptions for each agent
3. **Tool Management**: Ensure that tools referenced by repository agents are available in your environment
4. **Access Control**: Manage permissions to ensure only authorized team members can modify repository agents
## Organization Management
To switch between organizations or see your current organization, use the CrewAI CLI:
```bash
# View current organization
crewai org current
# Switch to a different organization
crewai org switch <org_id>
# List all available organizations
crewai org list
```
<Note>
When loading agents from repositories, you must be authenticated and switched to the correct organization. If you receive errors, check your authentication status and organization settings using the CLI commands above.
</Note>

View File

@@ -44,19 +44,6 @@ The `MCPServerAdapter` class from `crewai-tools` is the primary way to connect t
Using a Python context manager (`with` statement) is the **recommended approach** for `MCPServerAdapter`. It automatically handles starting and stopping the connection to the MCP server.
## Connection Configuration
The `MCPServerAdapter` supports several configuration options to customize the connection behavior:
- **`connect_timeout`** (optional): Maximum time in seconds to wait for establishing a connection to the MCP server. Defaults to 30 seconds if not specified. This is particularly useful for remote servers that may have variable response times.
```python
# Example with custom connection timeout
with MCPServerAdapter(server_params, connect_timeout=60) as tools:
# Connection will timeout after 60 seconds if not established
pass
```
```python
from crewai import Agent
from crewai_tools import MCPServerAdapter
@@ -83,7 +70,7 @@ server_params = {
}
# Example usage (uncomment and adapt once server_params is set):
with MCPServerAdapter(server_params, connect_timeout=60) as mcp_tools:
with MCPServerAdapter(server_params) as mcp_tools:
print(f"Available tools: {[tool.name for tool in mcp_tools]}")
my_agent = Agent(
@@ -108,7 +95,7 @@ There are two ways to filter tools:
### Accessing a specific tool using dictionary-style indexing.
```python
with MCPServerAdapter(server_params, connect_timeout=60) as mcp_tools:
with MCPServerAdapter(server_params) as mcp_tools:
print(f"Available tools: {[tool.name for tool in mcp_tools]}")
my_agent = Agent(
@@ -125,7 +112,7 @@ with MCPServerAdapter(server_params, connect_timeout=60) as mcp_tools:
### Pass a list of tool names to the `MCPServerAdapter` constructor.
```python
with MCPServerAdapter(server_params, "tool_name", connect_timeout=60) as mcp_tools:
with MCPServerAdapter(server_params, "tool_name") as mcp_tools:
print(f"Available tools: {[tool.name for tool in mcp_tools]}")
my_agent = Agent(

View File

@@ -1,286 +0,0 @@
---
title: LangDB Integration
description: Govern, secure, and optimize your CrewAI workflows with LangDB AI Gateway—access 350+ models, automatic routing, cost optimization, and full observability.
icon: database
---
# Introduction
[LangDB AI Gateway](https://langdb.ai) provides OpenAI-compatible APIs to connect with multiple Large Language Models and serves as an observability platform that makes it effortless to trace CrewAI workflows end-to-end while providing access to 350+ language models. With a single `init()` call, all agent interactions, task executions, and LLM calls are captured, providing comprehensive observability and production-ready AI infrastructure for your applications.
<Frame caption="LangDB CrewAI Trace Example">
<img src="/images/langdb-1.png" alt="LangDB CrewAI trace example" />
</Frame>
**Checkout:** [View the live trace example](https://app.langdb.ai/sharing/threads/3becbfed-a1be-ae84-ea3c-4942867a3e22)
## Features
### AI Gateway Capabilities
- **Access to 350+ LLMs**: Connect to all major language models through a single integration
- **Virtual Models**: Create custom model configurations with specific parameters and routing rules
- **Virtual MCP**: Enable compatibility and integration with MCP (Model Context Protocol) systems for enhanced agent communication
- **Guardrails**: Implement safety measures and compliance controls for agent behavior
### Observability & Tracing
- **Automatic Tracing**: Single `init()` call captures all CrewAI interactions
- **End-to-End Visibility**: Monitor agent workflows from start to finish
- **Tool Usage Tracking**: Track which tools agents use and their outcomes
- **Model Call Monitoring**: Detailed insights into LLM interactions
- **Performance Analytics**: Monitor latency, token usage, and costs
- **Debugging Support**: Step-through execution for troubleshooting
- **Real-time Monitoring**: Live traces and metrics dashboard
## Setup Instructions
<Steps>
<Step title="Install LangDB">
Install the LangDB client with CrewAI feature flag:
```bash
pip install 'pylangdb[crewai]'
```
</Step>
<Step title="Set Environment Variables">
Configure your LangDB credentials:
```bash
export LANGDB_API_KEY="<your_langdb_api_key>"
export LANGDB_PROJECT_ID="<your_langdb_project_id>"
export LANGDB_API_BASE_URL='https://api.us-east-1.langdb.ai'
```
</Step>
<Step title="Initialize Tracing">
Import and initialize LangDB before configuring your CrewAI code:
```python
from pylangdb.crewai import init
# Initialize LangDB
init()
```
</Step>
<Step title="Configure CrewAI with LangDB">
Set up your LLM with LangDB headers:
```python
from crewai import Agent, Task, Crew, LLM
import os
# Configure LLM with LangDB headers
llm = LLM(
model="openai/gpt-4o", # Replace with the model you want to use
api_key=os.getenv("LANGDB_API_KEY"),
base_url=os.getenv("LANGDB_API_BASE_URL"),
extra_headers={"x-project-id": os.getenv("LANGDB_PROJECT_ID")}
)
```
</Step>
</Steps>
## Quick Start Example
Here's a simple example to get you started with LangDB and CrewAI:
```python
import os
from pylangdb.crewai import init
from crewai import Agent, Task, Crew, LLM
# Initialize LangDB before any CrewAI imports
init()
def create_llm(model):
return LLM(
model=model,
api_key=os.environ.get("LANGDB_API_KEY"),
base_url=os.environ.get("LANGDB_API_BASE_URL"),
extra_headers={"x-project-id": os.environ.get("LANGDB_PROJECT_ID")}
)
# Define your agent
researcher = Agent(
role="Research Specialist",
goal="Research topics thoroughly",
backstory="Expert researcher with skills in finding information",
llm=create_llm("openai/gpt-4o"), # Replace with the model you want to use
verbose=True
)
# Create a task
task = Task(
description="Research the given topic and provide a comprehensive summary",
agent=researcher,
expected_output="Detailed research summary with key findings"
)
# Create and run the crew
crew = Crew(agents=[researcher], tasks=[task])
result = crew.kickoff()
print(result)
```
## Complete Example: Research and Planning Agent
This comprehensive example demonstrates a multi-agent workflow with research and planning capabilities.
### Prerequisites
```bash
pip install crewai 'pylangdb[crewai]' crewai_tools setuptools python-dotenv
```
### Environment Setup
```bash
# LangDB credentials
export LANGDB_API_KEY="<your_langdb_api_key>"
export LANGDB_PROJECT_ID="<your_langdb_project_id>"
export LANGDB_API_BASE_URL='https://api.us-east-1.langdb.ai'
# Additional API keys (optional)
export SERPER_API_KEY="<your_serper_api_key>" # For web search capabilities
```
### Complete Implementation
```python
#!/usr/bin/env python3
import os
import sys
from pylangdb.crewai import init
init() # Initialize LangDB before any CrewAI imports
from dotenv import load_dotenv
from crewai import Agent, Task, Crew, Process, LLM
from crewai_tools import SerperDevTool
load_dotenv()
def create_llm(model):
return LLM(
model=model,
api_key=os.environ.get("LANGDB_API_KEY"),
base_url=os.environ.get("LANGDB_API_BASE_URL"),
extra_headers={"x-project-id": os.environ.get("LANGDB_PROJECT_ID")}
)
class ResearchPlanningCrew:
def researcher(self) -> Agent:
return Agent(
role="Research Specialist",
goal="Research topics thoroughly and compile comprehensive information",
backstory="Expert researcher with skills in finding and analyzing information from various sources",
tools=[SerperDevTool()],
llm=create_llm("openai/gpt-4o"),
verbose=True
)
def planner(self) -> Agent:
return Agent(
role="Strategic Planner",
goal="Create actionable plans based on research findings",
backstory="Strategic planner who breaks down complex challenges into executable plans",
reasoning=True,
max_reasoning_attempts=3,
llm=create_llm("openai/anthropic/claude-3.7-sonnet"),
verbose=True
)
def research_task(self) -> Task:
return Task(
description="Research the topic thoroughly and compile comprehensive information",
agent=self.researcher(),
expected_output="Comprehensive research report with key findings and insights"
)
def planning_task(self) -> Task:
return Task(
description="Create a strategic plan based on the research findings",
agent=self.planner(),
expected_output="Strategic execution plan with phases, goals, and actionable steps",
context=[self.research_task()]
)
def crew(self) -> Crew:
return Crew(
agents=[self.researcher(), self.planner()],
tasks=[self.research_task(), self.planning_task()],
verbose=True,
process=Process.sequential
)
def main():
topic = sys.argv[1] if len(sys.argv) > 1 else "Artificial Intelligence in Healthcare"
crew_instance = ResearchPlanningCrew()
# Update task descriptions with the specific topic
crew_instance.research_task().description = f"Research {topic} thoroughly and compile comprehensive information"
crew_instance.planning_task().description = f"Create a strategic plan for {topic} based on the research findings"
result = crew_instance.crew().kickoff()
print(result)
if __name__ == "__main__":
main()
```
### Running the Example
```bash
python main.py "Sustainable Energy Solutions"
```
## Viewing Traces in LangDB
After running your CrewAI application, you can view detailed traces in the LangDB dashboard:
<Frame caption="LangDB Trace Dashboard">
<img src="/images/langdb-2.png" alt="LangDB trace dashboard showing CrewAI workflow" />
</Frame>
### What You'll See
- **Agent Interactions**: Complete flow of agent conversations and task handoffs
- **Tool Usage**: Which tools were called, their inputs, and outputs
- **Model Calls**: Detailed LLM interactions with prompts image.pngand responses
- **Performance Metrics**: Latency, token usage, and cost tracking
- **Execution Timeline**: Step-by-step view of the entire workflow
## Troubleshooting
### Common Issues
- **No traces appearing**: Ensure `init()` is called before any CrewAI imports
- **Authentication errors**: Verify your LangDB API key and project ID
## Resources
<CardGroup cols={3}>
<Card title="LangDB Documentation" icon="book" href="https://docs.langdb.ai">
Official LangDB documentation and guides
</Card>
<Card title="LangDB Guides" icon="graduation-cap" href="https://docs.langdb.ai/guides">
Step-by-step tutorials for building AI agents
</Card>
<Card title="GitHub Examples" icon="github" href="https://github.com/langdb/langdb-samples/tree/main/examples/crewai" >
Complete CrewAI integration examples
</Card>
<Card title="LangDB Dashboard" icon="chart-line" href="https://app.langdb.ai">
Access your traces and analytics
</Card>
<Card title="Model Catalog" icon="list" href="https://app.langdb.ai/models">
Browse 350+ available language models
</Card>
<Card title="Enterprise Features" icon="building" href="https://docs.langdb.ai/enterprise">
Self-hosted options and enterprise capabilities
</Card>
</CardGroup>
## Next Steps
This guide covered the basics of integrating LangDB AI Gateway with CrewAI. To further enhance your AI workflows, explore:
- **Virtual Models**: Create custom model configurations with routing strategies
- **Guardrails & Safety**: Implement content filtering and compliance controls
- **Production Deployment**: Configure fallbacks, retries, and load balancing
For more advanced features and use cases, visit the [LangDB Documentation](https://docs.langdb.ai) or explore the [Model Catalog](https://app.langdb.ai/models) to discover all available models.

View File

@@ -1,134 +0,0 @@
---
title: Neatlogs Integration
description: Understand, debug, and share your CrewAI agent runs
icon: magnifying-glass-chart
---
# Introduction
Neatlogs helps you **see what your agent did**, **why**, and **share it**.
It captures every step: thoughts, tool calls, responses, evaluations. No raw logs. Just clear, structured traces. Great for debugging and collaboration.
## Why use Neatlogs?
CrewAI agents use multiple tools and reasoning steps. When something goes wrong, you need context — not just errors.
Neatlogs lets you:
- Follow the full decision path
- Add feedback directly on steps
- Chat with the trace using AI assistant
- Share runs publicly for feedback
- Turn insights into tasks
All in one place.
Manage your traces effortlessly
![Traces](/images/neatlogs-1.png)
![Trace Response](/images/neatlogs-2.png)
The best UX to view a CrewAI trace. Post comments anywhere you want. Use AI to debug.
![Trace Details](/images/neatlogs-3.png)
![Ai Chat Bot With A Trace](/images/neatlogs-4.png)
![Comments Drawer](/images/neatlogs-5.png)
## Core Features
- **Trace Viewer**: Track thoughts, tools, and decisions in sequence
- **Inline Comments**: Tag teammates on any trace step
- **Feedback & Evaluation**: Mark outputs as correct or incorrect
- **Error Highlighting**: Automatic flagging of API/tool failures
- **Task Conversion**: Convert comments into assigned tasks
- **Ask the Trace (AI)**: Chat with your trace using Neatlogs AI bot
- **Public Sharing**: Publish trace links to your community
## Quick Setup with CrewAI
<Steps>
<Step title="Sign Up & Get API Key">
Visit [neatlogs.com](https://neatlogs.com/?utm_source=crewAI-docs), create a project, copy the API key.
</Step>
<Step title="Install SDK">
```bash
pip install neatlogs
```
(Latest version 0.8.0, Python 3.8+; MIT license)
</Step>
<Step title="Initialize Neatlogs">
Before starting Crew agents, add:
```python
import neatlogs
neatlogs.init("YOUR_PROJECT_API_KEY")
```
Agents run as usual. Neatlogs captures everything automatically.
</Step>
</Steps>
## Under the Hood
According to GitHub, Neatlogs:
- Captures thoughts, tool calls, responses, errors, and token stats
- Supports AI-powered task generation and robust evaluation workflows
All with just two lines of code.
## Watch It Work
### 🔍 Full Demo (4min)
<iframe
width="100%"
height="315"
src="https://www.youtube.com/embed/8KDme9T2I7Q?si=b8oHteaBwFNs_Duk"
title="YouTube video player"
frameBorder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
allowFullScreen
></iframe>
### ⚙️ CrewAI Integration (30s)
<iframe
className="w-full aspect-video rounded-xl"
src="https://www.loom.com/embed/9c78b552af43452bb3e4783cb8d91230?sid=e9d7d370-a91a-49b0-809e-2f375d9e801d"
title="Loom video player"
frameBorder="0"
allowFullScreen
></iframe>
## Links & Support
- 📘 [Neatlogs Docs](https://docs.neatlogs.com/)
- 🔐 [Dashboard & API Key](https://app.neatlogs.com/)
- 🐦 [Follow on Twitter](https://twitter.com/neatlogs)
- 📧 Contact: hello@neatlogs.com
- 🛠 [GitHub SDK](https://github.com/NeatLogs/neatlogs)
## TL;DR
With just:
```bash
pip install neatlogs
import neatlogs
neatlogs.init("YOUR_API_KEY")
You can now capture, understand, share, and act on your CrewAI agent runs in seconds.
No setup overhead. Full trace transparency. Full team collaboration.
```

View File

@@ -25,10 +25,6 @@ Observability is crucial for understanding how your CrewAI agents perform, ident
Session replays, metrics, and monitoring for agent development and production.
</Card>
<Card title="LangDB" icon="database" href="/en/observability/langdb">
End-to-end tracing for CrewAI workflows with automatic agent interaction capture.
</Card>
<Card title="OpenLIT" icon="magnifying-glass-chart" href="/en/observability/openlit">
OpenTelemetry-native monitoring with cost tracking and performance analytics.
</Card>

View File

@@ -35,7 +35,7 @@ os.environ['OTEL_SDK_DISABLED'] = 'true'
### Data Explanation:
| Defaulted | Data | Reason and Specifics |
|:----------|:------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------|
|-----------|-------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------|
| Yes | CrewAI and Python Version | Tracks software versions. Example: CrewAI v1.2.3, Python 3.8.10. No personal data. |
| Yes | Crew Metadata | Includes: randomly generated key and ID, process type (e.g., 'sequential', 'parallel'), boolean flag for memory usage (true/false), count of tasks, count of agents. All non-personal. |
| Yes | Agent Data | Includes: randomly generated key and ID, role name (should not include personal info), boolean settings (verbose, delegation enabled, code execution allowed), max iterations, max RPM, max retry limit, LLM info (see LLM Attributes), list of tool names (should not include personal info). No personal data. |

View File

@@ -20,10 +20,6 @@ These tools enable your agents to automate workflows, integrate with external pl
<Card title="Multion Tool" icon="window-restore" href="/en/tools/automation/multiontool">
Automate browser interactions and web-based workflows.
</Card>
<Card title="Zapier Actions Adapter" icon="bolt" href="/en/tools/automation/zapieractionstool">
Expose Zapier Actions as CrewAI tools for automation across thousands of apps.
</Card>
</CardGroup>
## **Common Use Cases**

View File

@@ -1,58 +0,0 @@
---
title: Zapier Actions Tool
description: The `ZapierActionsAdapter` exposes Zapier actions as CrewAI tools for automation.
icon: bolt
---
# `ZapierActionsAdapter`
## Description
Use the Zapier adapter to list and call Zapier actions as CrewAI tools. This enables agents to trigger automations across thousands of apps.
## Installation
This adapter is included with `crewai-tools`. No extra install required.
## Environment Variables
- `ZAPIER_API_KEY` (required): Zapier API key. Get one from the Zapier Actions dashboard at https://actions.zapier.com/ (create an account, then generate an API key). You can also pass `zapier_api_key` directly when constructing the adapter.
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools.adapters.zapier_adapter import ZapierActionsAdapter
adapter = ZapierActionsAdapter(api_key="your_zapier_api_key")
tools = adapter.tools()
agent = Agent(
role="Automator",
goal="Execute Zapier actions",
backstory="Automation specialist",
tools=tools,
verbose=True,
)
task = Task(
description="Create a new Google Sheet and add a row using Zapier actions",
expected_output="Confirmation with created resource IDs",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
```
## Notes & limits
- The adapter lists available actions for your key and creates `BaseTool` wrappers dynamically.
- Handle actionspecific required fields in your task instructions or tool call.
- Rate limits depend on your Zapier plan; see the Zapier Actions docs.
## Notes
- The adapter fetches available actions and generates `BaseTool` wrappers dynamically.

View File

@@ -1,168 +0,0 @@
---
title: MongoDB Vector Search Tool
description: The `MongoDBVectorSearchTool` performs vector search on MongoDB Atlas with optional indexing helpers.
icon: "leaf"
---
# `MongoDBVectorSearchTool`
## Description
Perform vector similarity queries on MongoDB Atlas collections. Supports index creation helpers and bulk insert of embedded texts.
MongoDB Atlas supports native vector search. Learn more:
https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-overview/
## Installation
Install with the MongoDB extra:
```shell
pip install crewai-tools[mongodb]
```
or
```shell
uv add crewai-tools --extra mongodb
```
## Parameters
### Initialization
- `connection_string` (str, required)
- `database_name` (str, required)
- `collection_name` (str, required)
- `vector_index_name` (str, default `vector_index`)
- `text_key` (str, default `text`)
- `embedding_key` (str, default `embedding`)
- `dimensions` (int, default `1536`)
### Run Parameters
- `query` (str, required): Natural language query to embed and search.
## Quick start
```python Code
from crewai_tools import MongoDBVectorSearchTool
tool = MongoDBVectorSearchTool(
connection_string="mongodb+srv://...",
database_name="mydb",
collection_name="docs",
)
print(tool.run(query="how to create vector index"))
```
## Index creation helpers
Use `create_vector_search_index(...)` to provision an Atlas Vector Search index with the correct dimensions and similarity.
## Common issues
- Authentication failures: ensure your Atlas IP Access List allows your runner and the connection string includes credentials.
- Index not found: create the vector index first; name must match `vector_index_name`.
- Dimensions mismatch: align embedding model dimensions with `dimensions`.
## More examples
### Basic initialization
```python Code
from crewai_tools import MongoDBVectorSearchTool
tool = MongoDBVectorSearchTool(
database_name="example_database",
collection_name="example_collection",
connection_string="<your_mongodb_connection_string>",
)
```
### Custom query configuration
```python Code
from crewai_tools import MongoDBVectorSearchConfig, MongoDBVectorSearchTool
query_config = MongoDBVectorSearchConfig(limit=10, oversampling_factor=2)
tool = MongoDBVectorSearchTool(
database_name="example_database",
collection_name="example_collection",
connection_string="<your_mongodb_connection_string>",
query_config=query_config,
vector_index_name="my_vector_index",
)
rag_agent = Agent(
name="rag_agent",
role="You are a helpful assistant that can answer questions with the help of the MongoDBVectorSearchTool.",
goal="...",
backstory="...",
tools=[tool],
)
```
### Preloading the database and creating the index
```python Code
import os
from crewai_tools import MongoDBVectorSearchTool
tool = MongoDBVectorSearchTool(
database_name="example_database",
collection_name="example_collection",
connection_string="<your_mongodb_connection_string>",
)
# Load text content from a local folder and add to MongoDB
texts = []
for fname in os.listdir("knowledge"):
path = os.path.join("knowledge", fname)
if os.path.isfile(path):
with open(path, "r", encoding="utf-8") as f:
texts.append(f.read())
tool.add_texts(texts)
# Create the Atlas Vector Search index (e.g., 3072 dims for text-embedding-3-large)
tool.create_vector_search_index(dimensions=3072)
```
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import MongoDBVectorSearchTool
tool = MongoDBVectorSearchTool(
connection_string="mongodb+srv://...",
database_name="mydb",
collection_name="docs",
)
agent = Agent(
role="RAG Agent",
goal="Answer using MongoDB vector search",
backstory="Knowledge retrieval specialist",
tools=[tool],
verbose=True,
)
task = Task(
description="Find relevant content for 'indexing guidance'",
expected_output="A concise answer citing the most relevant matches",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
result = crew.kickoff()
```

View File

@@ -32,14 +32,6 @@ These tools enable your agents to interact with various database systems, from t
<Card title="Weaviate Vector Search" icon="network-wired" href="/en/tools/database-data/weaviatevectorsearchtool">
Perform semantic search with Weaviate vector database.
</Card>
<Card title="MongoDB Vector Search" icon="leaf" href="/en/tools/database-data/mongodbvectorsearchtool">
Vector similarity search on MongoDB Atlas with indexing helpers.
</Card>
<Card title="SingleStore Search" icon="database" href="/en/tools/database-data/singlestoresearchtool">
Safe SELECT/SHOW queries on SingleStore with pooling and validation.
</Card>
</CardGroup>
## **Common Use Cases**

View File

@@ -1,61 +0,0 @@
---
title: SingleStore Search Tool
description: The `SingleStoreSearchTool` safely executes SELECT/SHOW queries on SingleStore with pooling.
icon: circle
---
# `SingleStoreSearchTool`
## Description
Execute readonly queries (`SELECT`/`SHOW`) against SingleStore with connection pooling and input validation.
## Installation
```shell
uv add crewai-tools[singlestore]
```
## Environment Variables
Variables like `SINGLESTOREDB_HOST`, `SINGLESTOREDB_USER`, `SINGLESTOREDB_PASSWORD`, etc., can be used, or `SINGLESTOREDB_URL` as a single DSN.
Generate the API key from the SingleStore dashboard, [docs here](https://docs.singlestore.com/cloud/reference/management-api/#generate-an-api-key).
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import SingleStoreSearchTool
tool = SingleStoreSearchTool(
tables=["products"],
host="host",
user="user",
password="pass",
database="db",
)
agent = Agent(
role="Analyst",
goal="Query SingleStore",
tools=[tool],
verbose=True,
)
task = Task(
description="List 5 products",
expected_output="5 rows as JSON/text",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
result = crew.kickoff()
```

View File

@@ -1,89 +0,0 @@
---
title: OCR Tool
description: The `OCRTool` extracts text from local images or image URLs using an LLM with vision.
icon: image
---
# `OCRTool`
## Description
Extract text from images (local path or URL). Uses a visioncapable LLM via CrewAIs LLM interface.
## Installation
No extra install beyond `crewai-tools`. Ensure your selected LLM supports vision.
## Parameters
### Run Parameters
- `image_path_url` (str, required): Local image path or HTTP(S) URL.
## Examples
### Direct usage
```python Code
from crewai_tools import OCRTool
print(OCRTool().run(image_path_url="/tmp/receipt.png"))
```
### With an agent
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import OCRTool
ocr = OCRTool()
agent = Agent(
role="OCR",
goal="Extract text",
tools=[ocr],
)
task = Task(
description="Extract text from https://example.com/invoice.jpg",
expected_output="All detected text in plain text",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
```
## Notes
- Ensure the selected LLM supports image inputs.
- For large images, consider downscaling to reduce token usage.
- You can pass a specific LLM instance to the tool (e.g., `LLM(model="gpt-4o")`) if needed, matching the README guidance.
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import OCRTool
tool = OCRTool()
agent = Agent(
role="OCR Specialist",
goal="Extract text from images",
backstory="Visionenabled analyst",
tools=[tool],
verbose=True,
)
task = Task(
description="Extract text from https://example.com/receipt.png",
expected_output="All detected text in plain text",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
```

View File

@@ -52,14 +52,6 @@ These tools enable your agents to work with various file formats and document ty
<Card title="Directory Read Tool" icon="folder" href="/en/tools/file-document/directoryreadtool">
Read and list directory contents, file structures, and metadata.
</Card>
<Card title="OCR Tool" icon="image" href="/en/tools/file-document/ocrtool">
Extract text from images (local files or URLs) using a visioncapable LLM.
</Card>
<Card title="PDF Text Writing Tool" icon="file-pdf" href="/en/tools/file-document/pdf-text-writing-tool">
Write text at specific coordinates in PDFs, with optional custom fonts.
</Card>
</CardGroup>
## **Common Use Cases**

View File

@@ -1,76 +0,0 @@
---
title: PDF Text Writing Tool
description: The `PDFTextWritingTool` writes text to specific positions in a PDF, supporting custom fonts.
icon: file-pdf
---
# `PDFTextWritingTool`
## Description
Write text at precise coordinates on a PDF page, optionally embedding a custom TrueType font.
## Parameters
### Run Parameters
- `pdf_path` (str, required): Path to the input PDF.
- `text` (str, required): Text to add.
- `position` (tuple[int, int], required): `(x, y)` coordinates.
- `font_size` (int, default `12`)
- `font_color` (str, default `"0 0 0 rg"`)
- `font_name` (str, default `"F1"`)
- `font_file` (str, optional): Path to `.ttf` file.
- `page_number` (int, default `0`)
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import PDFTextWritingTool
tool = PDFTextWritingTool()
agent = Agent(
role="PDF Editor",
goal="Annotate PDFs",
backstory="Documentation specialist",
tools=[tool],
verbose=True,
)
task = Task(
description="Write 'CONFIDENTIAL' at (72, 720) on page 1 of ./sample.pdf",
expected_output="Confirmation message",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
result = crew.kickoff()
```
### Direct usage
```python Code
from crewai_tools import PDFTextWritingTool
PDFTextWritingTool().run(
pdf_path="./input.pdf",
text="CONFIDENTIAL",
position=(72, 720),
font_size=18,
page_number=0,
)
```
## Tips
- Coordinate origin is the bottomleft corner.
- If using a custom font (`font_file`), ensure it is a valid `.ttf`.

View File

@@ -1,112 +0,0 @@
---
title: Arxiv Paper Tool
description: The `ArxivPaperTool` searches arXiv for papers matching a query and optionally downloads PDFs.
icon: box-archive
---
# `ArxivPaperTool`
## Description
The `ArxivPaperTool` queries the arXiv API for academic papers and returns compact, readable results. It can also optionally download PDFs to disk.
## Installation
This tool has no special installation beyond `crewai-tools`.
```shell
uv add crewai-tools
```
No API key is required. This tool uses the public arXiv Atom API.
## Steps to Get Started
1. Initialize the tool.
2. Provide a `search_query` (e.g., "transformer neural network").
3. Optionally set `max_results` (1100) and enable PDF downloads in the constructor.
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import ArxivPaperTool
tool = ArxivPaperTool(
download_pdfs=False,
save_dir="./arxiv_pdfs",
use_title_as_filename=True,
)
agent = Agent(
role="Researcher",
goal="Find relevant arXiv papers",
backstory="Expert at literature discovery",
tools=[tool],
verbose=True,
)
task = Task(
description="Search arXiv for 'transformer neural network' and list top 5 results.",
expected_output="A concise list of 5 relevant papers with titles, links, and summaries.",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
```
### Direct usage (without Agent)
```python Code
from crewai_tools import ArxivPaperTool
tool = ArxivPaperTool(
download_pdfs=True,
save_dir="./arxiv_pdfs",
)
print(tool.run(search_query="mixture of experts", max_results=3))
```
## Parameters
### Initialization Parameters
- `download_pdfs` (bool, default `False`): Whether to download PDFs.
- `save_dir` (str, default `./arxiv_pdfs`): Directory to save PDFs.
- `use_title_as_filename` (bool, default `False`): Use paper titles for filenames.
### Run Parameters
- `search_query` (str, required): The arXiv search query.
- `max_results` (int, default `5`, range 1100): Number of results.
## Output format
The tool returns a humanreadable list of papers with:
- Title
- Link (abs page)
- Snippet/summary (truncated)
When `download_pdfs=True`, PDFs are saved to disk and the summary mentions saved files.
## Usage Notes
- The tool returns formatted text with key metadata and links.
- When `download_pdfs=True`, PDFs will be stored in `save_dir`.
## Troubleshooting
- If you receive a network timeout, retry or reduce `max_results`.
- Invalid XML errors indicate an arXiv response parse issue; try a simpler query.
- File system errors (e.g., permission denied) may occur when saving PDFs; ensure `save_dir` is writable.
## Related links
- arXiv API docs: https://info.arxiv.org/help/api/index.html
## Error Handling
- Network issues, invalid XML, and OS errors are handled with informative messages.

View File

@@ -23,7 +23,7 @@ pip install 'crewai[tools]'
To effectively use the `BraveSearchTool`, follow these steps:
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
2. **API Key Acquisition**: Acquire a Brave Search API key at https://api.search.brave.com/app/keys (sign in to generate a key).
2. **API Key Acquisition**: Acquire a Brave Search API key by registering at [Brave Search API](https://api.search.brave.com/app/keys).
3. **Environment Configuration**: Store your obtained API key in an environment variable named `BRAVE_API_KEY` to facilitate its use by the tool.
## Example

View File

@@ -1,80 +0,0 @@
---
title: Databricks SQL Query Tool
description: The `DatabricksQueryTool` executes SQL queries against Databricks workspace tables.
icon: trowel-bricks
---
# `DatabricksQueryTool`
## Description
Run SQL against Databricks workspace tables with either CLI profile or direct host/token authentication.
## Installation
```shell
uv add crewai-tools[databricks-sdk]
```
## Environment Variables
- `DATABRICKS_CONFIG_PROFILE` or (`DATABRICKS_HOST` + `DATABRICKS_TOKEN`)
Create a personal access token and find host details in the Databricks workspace under User Settings → Developer.
Docs: https://docs.databricks.com/en/dev-tools/auth/pat.html
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import DatabricksQueryTool
tool = DatabricksQueryTool(
default_catalog="main",
default_schema="default",
)
agent = Agent(
role="Data Analyst",
goal="Query Databricks",
tools=[tool],
verbose=True,
)
task = Task(
description="SELECT * FROM my_table LIMIT 10",
expected_output="10 rows",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
result = crew.kickoff()
print(result)
```
## Parameters
- `query` (required): SQL query to execute
- `catalog` (optional): Override default catalog
- `db_schema` (optional): Override default schema
- `warehouse_id` (optional): Override default SQL warehouse
- `row_limit` (optional): Maximum rows to return (default: 1000)
## Defaults on initialization
- `default_catalog`
- `default_schema`
- `default_warehouse_id`
### Error handling & tips
- Authentication errors: verify `DATABRICKS_HOST` begins with `https://` and token is valid.
- Permissions: ensure your SQL warehouse and schema are accessible by your token.
- Limits: longrunning queries should be avoided in agent loops; add filters/limits.

View File

@@ -24,8 +24,6 @@ pip install 'crewai[tools]'
This command installs the necessary package to run the GithubSearchTool along with any other tools included in the crewai_tools package.
Get a GitHub Personal Access Token at https://github.com/settings/tokens (Developer settings → Finegrained tokens or classic tokens).
## Example
Heres how you can use the GithubSearchTool to perform semantic searches within a GitHub repository:

View File

@@ -44,26 +44,6 @@ These tools enable your agents to search the web, research topics, and find info
<Card title="YouTube Video Search" icon="play" href="/en/tools/search-research/youtubevideosearchtool">
Find and analyze YouTube videos by topic, keyword, or criteria.
</Card>
<Card title="Tavily Search Tool" icon="magnifying-glass" href="/en/tools/search-research/tavilysearchtool">
Comprehensive web search using Tavily's AI-powered search API.
</Card>
<Card title="Tavily Extractor Tool" icon="file-text" href="/en/tools/search-research/tavilyextractortool">
Extract structured content from web pages using the Tavily API.
</Card>
<Card title="Arxiv Paper Tool" icon="box-archive" href="/en/tools/search-research/arxivpapertool">
Search arXiv and optionally download PDFs.
</Card>
<Card title="SerpApi Google Search" icon="search" href="/en/tools/search-research/serpapi-googlesearchtool">
Google search via SerpApi with structured results.
</Card>
<Card title="SerpApi Google Shopping" icon="cart-shopping" href="/en/tools/search-research/serpapi-googleshoppingtool">
Google Shopping queries via SerpApi.
</Card>
</CardGroup>
## **Common Use Cases**
@@ -75,19 +55,17 @@ These tools enable your agents to search the web, research topics, and find info
- **Academic Research**: Find scholarly articles and technical papers
```python
from crewai_tools import SerperDevTool, GitHubSearchTool, YoutubeVideoSearchTool, TavilySearchTool, TavilyExtractorTool
from crewai_tools import SerperDevTool, GitHubSearchTool, YoutubeVideoSearchTool
# Create research tools
web_search = SerperDevTool()
code_search = GitHubSearchTool()
video_research = YoutubeVideoSearchTool()
tavily_search = TavilySearchTool()
content_extractor = TavilyExtractorTool()
# Add to your agent
agent = Agent(
role="Research Analyst",
tools=[web_search, code_search, video_research, tavily_search, content_extractor],
tools=[web_search, code_search, video_research],
goal="Gather comprehensive information on any topic"
)
```

View File

@@ -1,65 +0,0 @@
---
title: SerpApi Google Search Tool
description: The `SerpApiGoogleSearchTool` performs Google searches using the SerpApi service.
icon: google
---
# `SerpApiGoogleSearchTool`
## Description
Use the `SerpApiGoogleSearchTool` to run Google searches with SerpApi and retrieve structured results. Requires a SerpApi API key.
## Installation
```shell
uv add crewai-tools[serpapi]
```
## Environment Variables
- `SERPAPI_API_KEY` (required): API key for SerpApi. Create one at https://serpapi.com/ (free tier available).
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import SerpApiGoogleSearchTool
tool = SerpApiGoogleSearchTool()
agent = Agent(
role="Researcher",
goal="Answer questions using Google search",
backstory="Search specialist",
tools=[tool],
verbose=True,
)
task = Task(
description="Search for the latest CrewAI releases",
expected_output="A concise list of relevant results with titles and links",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
```
## Notes
- Set `SERPAPI_API_KEY` in the environment. Create a key at https://serpapi.com/
- See also Google Shopping via SerpApi: `/en/tools/search-research/serpapi-googleshoppingtool`
## Parameters
### Run Parameters
- `search_query` (str, required): The Google query.
- `location` (str, optional): Geographic location parameter.
## Notes
- This tool wraps SerpApi and returns structured search results.

View File

@@ -1,61 +0,0 @@
---
title: SerpApi Google Shopping Tool
description: The `SerpApiGoogleShoppingTool` searches Google Shopping results using SerpApi.
icon: cart-shopping
---
# `SerpApiGoogleShoppingTool`
## Description
Leverage `SerpApiGoogleShoppingTool` to query Google Shopping via SerpApi and retrieve product-oriented results.
## Installation
```shell
uv add crewai-tools[serpapi]
```
## Environment Variables
- `SERPAPI_API_KEY` (required): API key for SerpApi. Create one at https://serpapi.com/ (free tier available).
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import SerpApiGoogleShoppingTool
tool = SerpApiGoogleShoppingTool()
agent = Agent(
role="Shopping Researcher",
goal="Find relevant products",
backstory="Expert in product search",
tools=[tool],
verbose=True,
)
task = Task(
description="Search Google Shopping for 'wireless noise-canceling headphones'",
expected_output="Top relevant products with titles and links",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
```
## Notes
- Set `SERPAPI_API_KEY` in the environment. Create a key at https://serpapi.com/
- See also Google Web Search via SerpApi: `/en/tools/search-research/serpapi-googlesearchtool`
## Parameters
### Run Parameters
- `search_query` (str, required): Product search query.
- `location` (str, optional): Geographic location parameter.

View File

@@ -6,6 +6,10 @@ icon: google
# `SerperDevTool`
<Note>
We are still working on improving tools, so there might be unexpected behavior or changes in the future.
</Note>
## Description
This tool is designed to perform a semantic search for a specified query from a text's content across the internet. It utilizes the [serper.dev](https://serper.dev) API
@@ -13,12 +17,6 @@ to fetch and display the most relevant search results based on the query provide
## Installation
To effectively use the `SerperDevTool`, follow these steps:
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
2. **API Key Acquisition**: Acquire a `serper.dev` API key at https://serper.dev/ (free tier available).
3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPER_API_KEY` to facilitate its use by the tool.
To incorporate this tool into your project, follow the installation instructions below:
```shell
@@ -36,6 +34,14 @@ from crewai_tools import SerperDevTool
tool = SerperDevTool()
```
## Steps to Get Started
To effectively use the `SerperDevTool`, follow these steps:
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
2. **API Key Acquisition**: Acquire a `serper.dev` API key by registering for a free account at `serper.dev`.
3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPER_API_KEY` to facilitate its use by the tool.
## Parameters
The `SerperDevTool` comes with several parameters that will be passed to the API :

View File

@@ -1,139 +0,0 @@
---
title: "Tavily Extractor Tool"
description: "Extract structured content from web pages using the Tavily API"
icon: square-poll-horizontal
---
The `TavilyExtractorTool` allows CrewAI agents to extract structured content from web pages using the Tavily API. It can process single URLs or lists of URLs and provides options for controlling the extraction depth and including images.
## Installation
To use the `TavilyExtractorTool`, you need to install the `tavily-python` library:
```shell
pip install 'crewai[tools]' tavily-python
```
You also need to set your Tavily API key as an environment variable:
```bash
export TAVILY_API_KEY='your-tavily-api-key'
```
## Example Usage
Here's how to initialize and use the `TavilyExtractorTool` within a CrewAI agent:
```python
import os
from crewai import Agent, Task, Crew
from crewai_tools import TavilyExtractorTool
# Ensure TAVILY_API_KEY is set in your environment
# os.environ["TAVILY_API_KEY"] = "YOUR_API_KEY"
# Initialize the tool
tavily_tool = TavilyExtractorTool()
# Create an agent that uses the tool
extractor_agent = Agent(
role='Web Content Extractor',
goal='Extract key information from specified web pages',
backstory='You are an expert at extracting relevant content from websites using the Tavily API.',
tools=[tavily_tool],
verbose=True
)
# Define a task for the agent
extract_task = Task(
description='Extract the main content from the URL https://example.com using basic extraction depth.',
expected_output='A JSON string containing the extracted content from the URL.',
agent=extractor_agent
)
# Create and run the crew
crew = Crew(
agents=[extractor_agent],
tasks=[extract_task],
verbose=2
)
result = crew.kickoff()
print(result)
```
## Configuration Options
The `TavilyExtractorTool` accepts the following arguments:
- `urls` (Union[List[str], str]): **Required**. A single URL string or a list of URL strings to extract data from.
- `include_images` (Optional[bool]): Whether to include images in the extraction results. Defaults to `False`.
- `extract_depth` (Literal["basic", "advanced"]): The depth of extraction. Use `"basic"` for faster, surface-level extraction or `"advanced"` for more comprehensive extraction. Defaults to `"basic"`.
- `timeout` (int): The maximum time in seconds to wait for the extraction request to complete. Defaults to `60`.
## Advanced Usage
### Multiple URLs with Advanced Extraction
```python
# Example with multiple URLs and advanced extraction
multi_extract_task = Task(
description='Extract content from https://example.com and https://anotherexample.org using advanced extraction.',
expected_output='A JSON string containing the extracted content from both URLs.',
agent=extractor_agent
)
# Configure the tool with custom parameters
custom_extractor = TavilyExtractorTool(
extract_depth='advanced',
include_images=True,
timeout=120
)
agent_with_custom_tool = Agent(
role="Advanced Content Extractor",
goal="Extract comprehensive content with images",
tools=[custom_extractor]
)
```
### Tool Parameters
You can customize the tool's behavior by setting parameters during initialization:
```python
# Initialize with custom configuration
extractor_tool = TavilyExtractorTool(
extract_depth='advanced', # More comprehensive extraction
include_images=True, # Include image results
timeout=90 # Custom timeout
)
```
## Features
- **Single or Multiple URLs**: Extract content from one URL or process multiple URLs in a single request
- **Configurable Depth**: Choose between basic (fast) and advanced (comprehensive) extraction modes
- **Image Support**: Optionally include images in the extraction results
- **Structured Output**: Returns well-formatted JSON containing the extracted content
- **Error Handling**: Robust handling of network timeouts and extraction errors
## Response Format
The tool returns a JSON string representing the structured data extracted from the provided URL(s). The exact structure depends on the content of the pages and the `extract_depth` used.
Common response elements include:
- **Title**: The page title
- **Content**: Main text content of the page
- **Images**: Image URLs and metadata (when `include_images=True`)
- **Metadata**: Additional page information like author, description, etc.
## Use Cases
- **Content Analysis**: Extract and analyze content from competitor websites
- **Research**: Gather structured data from multiple sources for analysis
- **Content Migration**: Extract content from existing websites for migration
- **Monitoring**: Regular extraction of content for change detection
- **Data Collection**: Systematic extraction of information from web sources
Refer to the [Tavily API documentation](https://docs.tavily.com/docs/tavily-api/python-sdk#extract) for detailed information about the response structure and available options.

View File

@@ -1,124 +0,0 @@
---
title: "Tavily Search Tool"
description: "Perform comprehensive web searches using the Tavily Search API"
icon: "magnifying-glass"
---
The `TavilySearchTool` provides an interface to the Tavily Search API, enabling CrewAI agents to perform comprehensive web searches. It allows for specifying search depth, topics, time ranges, included/excluded domains, and whether to include direct answers, raw content, or images in the results.
## Installation
To use the `TavilySearchTool`, you need to install the `tavily-python` library:
```shell
pip install 'crewai[tools]' tavily-python
```
## Environment Variables
Ensure your Tavily API key is set as an environment variable:
```bash
export TAVILY_API_KEY='your_tavily_api_key'
```
Get an API key at https://app.tavily.com/ (sign up, then create a key).
## Example Usage
Here's how to initialize and use the `TavilySearchTool` within a CrewAI agent:
```python
import os
from crewai import Agent, Task, Crew
from crewai_tools import TavilySearchTool
# Ensure the TAVILY_API_KEY environment variable is set
# os.environ["TAVILY_API_KEY"] = "YOUR_TAVILY_API_KEY"
# Initialize the tool
tavily_tool = TavilySearchTool()
# Create an agent that uses the tool
researcher = Agent(
role='Market Researcher',
goal='Find information about the latest AI trends',
backstory='An expert market researcher specializing in technology.',
tools=[tavily_tool],
verbose=True
)
# Create a task for the agent
research_task = Task(
description='Search for the top 3 AI trends in 2024.',
expected_output='A JSON report summarizing the top 3 AI trends found.',
agent=researcher
)
# Form the crew and kick it off
crew = Crew(
agents=[researcher],
tasks=[research_task],
verbose=2
)
result = crew.kickoff()
print(result)
```
## Configuration Options
The `TavilySearchTool` accepts the following arguments during initialization or when calling the `run` method:
- `query` (str): **Required**. The search query string.
- `search_depth` (Literal["basic", "advanced"], optional): The depth of the search. Defaults to `"basic"`.
- `topic` (Literal["general", "news", "finance"], optional): The topic to focus the search on. Defaults to `"general"`.
- `time_range` (Literal["day", "week", "month", "year"], optional): The time range for the search. Defaults to `None`.
- `days` (int, optional): The number of days to search back. Relevant if `time_range` is not set. Defaults to `7`.
- `max_results` (int, optional): The maximum number of search results to return. Defaults to `5`.
- `include_domains` (Sequence[str], optional): A list of domains to prioritize in the search. Defaults to `None`.
- `exclude_domains` (Sequence[str], optional): A list of domains to exclude from the search. Defaults to `None`.
- `include_answer` (Union[bool, Literal["basic", "advanced"]], optional): Whether to include a direct answer synthesized from the search results. Defaults to `False`.
- `include_raw_content` (bool, optional): Whether to include the raw HTML content of the searched pages. Defaults to `False`.
- `include_images` (bool, optional): Whether to include image results. Defaults to `False`.
- `timeout` (int, optional): The request timeout in seconds. Defaults to `60`.
## Advanced Usage
You can configure the tool with custom parameters:
```python
# Example: Initialize with specific parameters
custom_tavily_tool = TavilySearchTool(
search_depth='advanced',
max_results=10,
include_answer=True
)
# The agent will use these defaults
agent_with_custom_tool = Agent(
role="Advanced Researcher",
goal="Conduct detailed research with comprehensive results",
tools=[custom_tavily_tool]
)
```
## Features
- **Comprehensive Search**: Access to Tavily's powerful search index
- **Configurable Depth**: Choose between basic and advanced search modes
- **Topic Filtering**: Focus searches on general, news, or finance topics
- **Time Range Control**: Limit results to specific time periods
- **Domain Control**: Include or exclude specific domains
- **Direct Answers**: Get synthesized answers from search results
- **Content Filtering**: Prevent context window issues with automatic content truncation
## Response Format
The tool returns search results as a JSON string containing:
- Search results with titles, URLs, and content snippets
- Optional direct answers to queries
- Optional image results
- Optional raw HTML content (when enabled)
Content for each result is automatically truncated to prevent context window issues while maintaining the most relevant information.

View File

@@ -1,111 +0,0 @@
---
title: Bright Data Tools
description: Bright Data integrations for SERP search, Web Unlocker scraping, and Dataset API.
icon: spider
---
# Bright Data Tools
This set of tools integrates Bright Data services for web extraction.
## Installation
```shell
uv add crewai-tools requests aiohttp
```
## Environment Variables
- `BRIGHT_DATA_API_KEY` (required)
- `BRIGHT_DATA_ZONE` (for SERP/Web Unlocker)
Create credentials at https://brightdata.com/ (sign up, then create an API token and zone).
See their docs: https://developers.brightdata.com/
## Included Tools
- `BrightDataSearchTool`: SERP search (Google/Bing/Yandex) with geo/language/device options.
- `BrightDataWebUnlockerTool`: Scrape pages with anti-bot bypass and rendering.
- `BrightDataDatasetTool`: Run Dataset API jobs and fetch results.
## Examples
### SERP Search
```python Code
from crewai_tools import BrightDataSearchTool
tool = BrightDataSearchTool(
query="CrewAI",
country="us",
)
print(tool.run())
```
### Web Unlocker
```python Code
from crewai_tools import BrightDataWebUnlockerTool
tool = BrightDataWebUnlockerTool(
url="https://example.com",
format="markdown",
)
print(tool.run(url="https://example.com"))
```
### Dataset API
```python Code
from crewai_tools import BrightDataDatasetTool
tool = BrightDataDatasetTool(
dataset_type="ecommerce",
url="https://example.com/product",
)
print(tool.run())
```
## Troubleshooting
- 401/403: verify `BRIGHT_DATA_API_KEY` and `BRIGHT_DATA_ZONE`.
- Empty/blocked content: enable rendering or try a different zone.
## Example
```python Code
from crewai import Agent, Task, Crew
from crewai_tools import BrightDataSearchTool
tool = BrightDataSearchTool(
query="CrewAI",
country="us",
)
agent = Agent(
role="Web Researcher",
goal="Search with Bright Data",
backstory="Finds reliable results",
tools=[tool],
verbose=True,
)
task = Task(
description="Search for CrewAI and summarize top results",
expected_output="Short summary with links",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
result = crew.kickoff()
```

View File

@@ -60,10 +60,6 @@ These tools enable your agents to interact with the web, extract data from websi
<Card title="Oxylabs Scraper Tool" icon="globe" href="/en/tools/web-scraping/oxylabsscraperstool">
Access web data at scale with Oxylabs.
</Card>
<Card title="Bright Data Tools" icon="spider" href="/en/tools/web-scraping/brightdata-tools">
SERP search, Web Unlocker, and Dataset API integrations.
</Card>
</CardGroup>
## **Common Use Cases**

View File

@@ -1,100 +0,0 @@
---
title: Serper Scrape Website
description: The `SerperScrapeWebsiteTool` is designed to scrape websites and extract clean, readable content using Serper's scraping API.
icon: globe
---
# `SerperScrapeWebsiteTool`
## Description
This tool is designed to scrape website content and extract clean, readable text from any website URL. It utilizes the [serper.dev](https://serper.dev) scraping API to fetch and process web pages, optionally including markdown formatting for better structure and readability.
## Installation
To effectively use the `SerperScrapeWebsiteTool`, follow these steps:
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
2. **API Key Acquisition**: Acquire a `serper.dev` API key by registering for an account at `serper.dev`.
3. **Environment Configuration**: Store your obtained API key in an environment variable named `SERPER_API_KEY` to facilitate its use by the tool.
To incorporate this tool into your project, follow the installation instructions below:
```shell
pip install 'crewai[tools]'
```
## Example
The following example demonstrates how to initialize the tool and scrape a website:
```python Code
from crewai_tools import SerperScrapeWebsiteTool
# Initialize the tool for website scraping capabilities
tool = SerperScrapeWebsiteTool()
# Scrape a website with markdown formatting
result = tool.run(url="https://example.com", include_markdown=True)
```
## Arguments
The `SerperScrapeWebsiteTool` accepts the following arguments:
- **url**: Required. The URL of the website to scrape.
- **include_markdown**: Optional. Whether to include markdown formatting in the scraped content. Defaults to `True`.
## Example with Parameters
Here is an example demonstrating how to use the tool with different parameters:
```python Code
from crewai_tools import SerperScrapeWebsiteTool
tool = SerperScrapeWebsiteTool()
# Scrape with markdown formatting (default)
markdown_result = tool.run(
url="https://docs.crewai.com",
include_markdown=True
)
# Scrape without markdown formatting for plain text
plain_result = tool.run(
url="https://docs.crewai.com",
include_markdown=False
)
print("Markdown formatted content:")
print(markdown_result)
print("\nPlain text content:")
print(plain_result)
```
## Use Cases
The `SerperScrapeWebsiteTool` is particularly useful for:
- **Content Analysis**: Extract and analyze website content for research purposes
- **Data Collection**: Gather structured information from web pages
- **Documentation Processing**: Convert web-based documentation into readable formats
- **Competitive Analysis**: Scrape competitor websites for market research
- **Content Migration**: Extract content from existing websites for migration purposes
## Error Handling
The tool includes comprehensive error handling for:
- **Network Issues**: Handles connection timeouts and network errors gracefully
- **API Errors**: Provides detailed error messages for API-related issues
- **Invalid URLs**: Validates and reports issues with malformed URLs
- **Authentication**: Clear error messages for missing or invalid API keys
## Security Considerations
- Always store your `SERPER_API_KEY` in environment variables, never hardcode it in your source code
- Be mindful of rate limits imposed by the Serper API
- Respect robots.txt and website terms of service when scraping content
- Consider implementing delays between requests for large-scale scraping operations

Binary file not shown.

Before

Width:  |  Height:  |  Size: 127 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 117 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 222 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 329 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 590 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 216 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 277 KiB

View File

@@ -76,7 +76,6 @@ Exemplo:
crewai train -n 10 -f my_training_data.pkl
```
```python
# Exemplo de uso programático do comando train
n_iterations = 2
inputs = {"topic": "Treinamento CrewAI"}
@@ -84,13 +83,12 @@ filename = "seu_modelo.pkl"
try:
SuaCrew().crew().train(
n_iterations=n_iterations,
inputs=inputs,
n_iterations=n_iterations,
inputs=inputs,
filename=filename
)
except Exception as e:
raise Exception(f"Ocorreu um erro ao treinar a crew: {e}")
```
### 4. Replay
@@ -103,7 +101,7 @@ crewai replay [OPTIONS]
- `-t, --task_id TEXT`: Reexecuta o crew a partir deste task ID, incluindo todas as tarefas subsequentes
Exemplo:
```shell Terminal
```shell Terminal
crewai replay -t task_123456
```
@@ -149,7 +147,7 @@ crewai test [OPTIONS]
- `-m, --model TEXT`: Modelo LLM para executar os testes no Crew (padrão: "gpt-4o-mini")
Exemplo:
```shell Terminal
```shell Terminal
crewai test -n 5 -m gpt-3.5-turbo
```
@@ -203,7 +201,10 @@ def crew(self) -> Crew:
Implemente o crew ou flow no [CrewAI Enterprise](https://app.crewai.com).
- **Autenticação**: Você precisa estar autenticado para implementar no CrewAI Enterprise.
Você pode fazer login ou criar uma conta com:
```shell Terminal
crewai signup
```
Caso já tenha uma conta, você pode fazer login com:
```shell Terminal
crewai login
```
@@ -250,7 +251,7 @@ Você deve estar autenticado no CrewAI Enterprise para usar estes comandos de ge
- **Implantar o Crew**: Depois de autenticado, você pode implantar seu crew ou flow no CrewAI Enterprise.
```shell Terminal
crewai deploy push
```
```
- Inicia o processo de deployment na plataforma CrewAI Enterprise.
- Após a iniciação bem-sucedida, será exibida a mensagem Deployment created successfully! juntamente com o Nome do Deployment e um Deployment ID (UUID) único.
@@ -323,83 +324,4 @@ Ao escolher um provedor, o CLI solicitará que você informe o nome da chave e a
Veja o seguinte link para o nome de chave de cada provedor:
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)
### 12. Gerenciamento de Configuração
Gerencie as configurações do CLI para CrewAI.
```shell Terminal
crewai config [COMANDO] [OPÇÕES]
```
#### Comandos:
- `list`: Exibir todos os parâmetros de configuração do CLI
```shell Terminal
crewai config list
```
- `set`: Definir um parâmetro de configuração do CLI
```shell Terminal
crewai config set <chave> <valor>
```
- `reset`: Redefinir todos os parâmetros de configuração do CLI para valores padrão
```shell Terminal
crewai config reset
```
#### Parâmetros de Configuração Disponíveis
- `enterprise_base_url`: URL base da instância CrewAI Enterprise
- `oauth2_provider`: Provedor OAuth2 usado para autenticação (ex: workos, okta, auth0)
- `oauth2_audience`: Valor de audiência OAuth2, tipicamente usado para identificar a API ou recurso de destino
- `oauth2_client_id`: ID do cliente OAuth2 emitido pelo provedor, usado durante solicitações de autenticação
- `oauth2_domain`: Domínio do provedor OAuth2 (ex: sua-org.auth0.com) usado para emissão de tokens
#### Exemplos
Exibir configuração atual:
```shell Terminal
crewai config list
```
Exemplo de saída:
```
CrewAI CLI Configuration
┏━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
┃ Setting ┃ Value ┃ Description ┃
┡━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩
│ enterprise_base_url│ https://app.crewai.com │ Base URL of the CrewAI Enterprise instance │
│ org_name │ Not set │ Name of the currently active organization │
│ org_uuid │ Not set │ UUID of the currently active organization │
│ oauth2_provider │ workos │ OAuth2 provider used for authentication (e.g., workos, okta, auth0). │
│ oauth2_audience │ client_01YYY │ OAuth2 audience value, typically used to identify the target API or resource. │
│ oauth2_client_id │ client_01XXX │ OAuth2 client ID issued by the provider, used during authentication requests. │
│ oauth2_domain │ login.crewai.com │ OAuth2 provider's domain (e.g., your-org.auth0.com) used for issuing tokens. │
```
Definir a URL base do enterprise:
```shell Terminal
crewai config set enterprise_base_url https://minha-empresa.crewai.com
```
Definir provedor OAuth2:
```shell Terminal
crewai config set oauth2_provider auth0
```
Definir domínio OAuth2:
```shell Terminal
crewai config set oauth2_domain minha-empresa.auth0.com
```
Redefinir todas as configurações para padrões:
```shell Terminal
crewai config reset
```
<Note>
As configurações são armazenadas em `~/.config/crewai/settings.json`. Algumas configurações como nome da organização e UUID são somente leitura e gerenciadas através de comandos de autenticação e organização. Configurações relacionadas ao repositório de ferramentas são ocultas e não podem ser definidas diretamente pelo usuário.
</Note>
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)

View File

@@ -20,7 +20,8 @@ Uma crew no crewAI representa um grupo colaborativo de agentes trabalhando em co
| **Function Calling LLM** _(opcional)_ | `function_calling_llm` | Se definido, a crew utilizará este LLM para invocar funções das ferramentas para todos os agentes da crew. Cada agente pode ter seu próprio LLM, que substitui o LLM da crew para chamadas de função. |
| **Config** _(opcional)_ | `config` | Configurações opcionais para a crew, no formato `Json` ou `Dict[str, Any]`. |
| **Max RPM** _(opcional)_ | `max_rpm` | Número máximo de requisições por minuto que a crew respeita durante a execução. O padrão é `None`. |
| **Memory** _(opcional)_ | `memory` | Utilizada para armazenar memórias de execução (curto prazo, longo prazo, memória de entidade). | |
| **Memory** _(opcional)_ | `memory` | Utilizada para armazenar memórias de execução (curto prazo, longo prazo, memória de entidade). |
| **Memory Config** _(opcional)_ | `memory_config` | Configuração para o provedor de memória a ser utilizada pela crew. |
| **Cache** _(opcional)_ | `cache` | Especifica se deve usar cache para armazenar os resultados da execução de ferramentas. O padrão é `True`. |
| **Embedder** _(opcional)_ | `embedder` | Configuração do embedder a ser utilizado pela crew. Atualmente mais usado por memory. O padrão é `{"provider": "openai"}`. |
| **Step Callback** _(opcional)_ | `step_callback` | Uma função chamada após cada etapa de cada agente. Pode ser usada para registrar as ações do agente ou executar outras operações; não sobrescreve o `step_callback` específico do agente. |

View File

@@ -268,7 +268,7 @@ Nesta seção, você encontrará exemplos detalhados que ajudam a selecionar, co
from crewai import LLM
llm = LLM(
model="gemini-1.5-pro-latest", # or vertex_ai/gemini-1.5-pro-latest
model="gemini/gemini-1.5-pro-latest",
temperature=0.7,
vertex_credentials=vertex_credentials_json
)

View File

@@ -9,7 +9,8 @@ icon: database
O framework CrewAI oferece um sistema de memória sofisticado projetado para aprimorar significativamente as capacidades dos agentes de IA. O CrewAI disponibiliza **três abordagens distintas de memória** que atendem a diferentes casos de uso:
1. **Sistema Básico de Memória** - Memória de curto prazo, longo prazo e de entidades integradas
2. **Memória Externa** - Provedores de memória externos autônomos
2. **Memória de Usuário** - Memória específica do usuário com integração ao Mem0 (abordagem legada)
3. **Memória Externa** - Provedores de memória externos autônomos (nova abordagem)
## Componentes do Sistema de Memória
@@ -18,7 +19,7 @@ O framework CrewAI oferece um sistema de memória sofisticado projetado para apr
| **Memória de Curto Prazo** | Armazena temporariamente interações e resultados recentes usando `RAG`, permitindo que os agentes recordem e utilizem informações relevantes ao contexto atual durante as execuções. |
| **Memória de Longo Prazo** | Preserva informações valiosas e aprendizados de execuções passadas, permitindo que os agentes construam e refinem seu conhecimento ao longo do tempo. |
| **Memória de Entidades** | Captura e organiza informações sobre entidades (pessoas, lugares, conceitos) encontradas durante tarefas, facilitando um entendimento mais profundo e o mapeamento de relacionamentos. Utiliza `RAG` para armazenar informações de entidades. |
| **Memória Contextual** | Mantém o contexto das interações combinando `ShortTermMemory`, `LongTermMemory` , `ExternalMemory` e `EntityMemory`, auxiliando na coerência e relevância das respostas dos agentes ao longo de uma sequência de tarefas ou conversas. |
| **Memória Contextual** | Mantém o contexto das interações combinando `ShortTermMemory`, `LongTermMemory` e `EntityMemory`, auxiliando na coerência e relevância das respostas dos agentes ao longo de uma sequência de tarefas ou conversas. |
## 1. Sistema Básico de Memória (Recomendado)
@@ -622,7 +623,7 @@ for provider in providers_to_test:
**Erros de modelo não encontrado:**
```python
# Verifique disponibilidade do modelo
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
from crewai.utilities.embedding_configurator import EmbeddingConfigurator
configurator = EmbeddingConfigurator()
try:
@@ -683,19 +684,67 @@ print(f"OpenAI: {openai_time:.2f}s")
print(f"Ollama: {ollama_time:.2f}s")
```
## 2. Memória Externa
## 2. Memória de Usuário com Mem0 (Legado)
A Memória Externa fornece um sistema de memória autônomo que opera independentemente da memória interna da crew. Isso é ideal para provedores de memória especializados ou compartilhamento de memória entre aplicações.
<Warning>
**Abordagem Legada**: Embora totalmente funcional, esta abordagem é considerada legada. Para novos projetos que exijam memória específica do usuário, considere usar Memória Externa.
</Warning>
### Memória Externa Básica com Mem0
A Memória de Usuário se integra com o [Mem0](https://mem0.ai/) para fornecer memória específica do usuário que persiste entre sessões e se integra ao sistema de memória contextual da crew.
### Pré-requisitos
```bash
pip install mem0ai
```
### Configuração Mem0 na Nuvem
```python
import os
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
from crewai import Crew, Process
# Create external memory instance with local Mem0 Configuration
external_memory = ExternalMemory(
embedder_config={
# Defina sua chave de API do Mem0
os.environ["MEM0_API_KEY"] = "m0-your-api-key"
crew = Crew(
agents=[...],
tasks=[...],
memory=True, # Necessário para integração com a memória contextual
memory_config={
"provider": "mem0",
"config": {"user_id": "john"},
"user_memory": {} # Obrigatório - inicializa a memória de usuário
},
process=Process.sequential,
verbose=True
)
```
### Configuração Avançada Mem0
```python
crew = Crew(
agents=[...],
tasks=[...],
memory=True,
memory_config={
"provider": "mem0",
"config": {
"user_id": "john",
"org_id": "my_org_id", # Opcional
"project_id": "my_project_id", # Opcional
"api_key": "custom-api-key" # Opcional - sobrescreve variável de ambiente
},
"user_memory": {}
}
)
```
### Configuração Mem0 Local
```python
crew = Crew(
agents=[...],
tasks=[...],
memory=True,
memory_config={
"provider": "mem0",
"config": {
"user_id": "john",
@@ -712,60 +761,37 @@ external_memory = ExternalMemory(
"provider": "openai",
"config": {"api_key": "your-api-key", "model": "text-embedding-3-small"}
}
},
"infer": True # Optional defaults to True
}
},
"user_memory": {}
}
)
crew = Crew(
agents=[...],
tasks=[...],
external_memory=external_memory, # Separate from basic memory
process=Process.sequential,
verbose=True
)
```
### Memória Externa Avançada com o Cliente Mem0
Ao usar o Cliente Mem0, você pode personalizar ainda mais a configuração de memória usando parâmetros como "includes", "excludes", "custom_categories", "infer" e "run_id" (apenas para memória de curto prazo).
Você pode encontrar mais detalhes na [documentação do Mem0](https://docs.mem0.ai/).
## 3. Memória Externa (Nova Abordagem)
A Memória Externa fornece um sistema de memória autônomo que opera independentemente da memória interna da crew. Isso é ideal para provedores de memória especializados ou compartilhamento de memória entre aplicações.
### Memória Externa Básica com Mem0
```python
import os
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
new_categories = [
{"lifestyle_management_concerns": "Tracks daily routines, habits, hobbies and interests including cooking, time management and work-life balance"},
{"seeking_structure": "Documents goals around creating routines, schedules, and organized systems in various life areas"},
{"personal_information": "Basic information about the user including name, preferences, and personality traits"}
]
os.environ["MEM0_API_KEY"] = "your-api-key"
# Create external memory instance with Mem0 Client
# Criar instância de memória externa
external_memory = ExternalMemory(
embedder_config={
"provider": "mem0",
"config": {
"user_id": "john",
"org_id": "my_org_id", # Optional
"project_id": "my_project_id", # Optional
"api_key": "custom-api-key" # Optional - overrides env var
"run_id": "my_run_id", # Optional - for short-term memory
"includes": "include1", # Optional
"excludes": "exclude1", # Optional
"infer": True # Optional defaults to True
"custom_categories": new_categories # Optional - custom categories for user memory
},
"provider": "mem0",
"config": {"user_id": "U-123"}
}
)
crew = Crew(
agents=[...],
tasks=[...],
external_memory=external_memory, # Separate from basic memory
external_memory=external_memory, # Independente da memória básica
process=Process.sequential,
verbose=True
)
@@ -804,18 +830,17 @@ crew = Crew(
)
```
## 🧠 Comparação dos Sistemas de Memória
| **Categoria** | **Recurso** | **Memória Básica** | **Memória Externa** |
|------------------------|-------------------------------|-------------------------------|----------------------------------|
| **Facilidade de Uso** | Complexidade de Setup | Simples | Média |
| | Integração | Contextual integrada | Autônoma |
| **Persistência** | Armazenamento | Arquivos locais | Customizada / Mem0 |
| | Multi-sessão | ✅ | ✅ |
| **Personalização** | Especificidade do Usuário | ❌ | ✅ |
| | Provedores Customizados | Limitado | Qualquer provedor |
| **Aplicação Recomendada** | Recomendado para | Maioria dos casos | Necessidades especializadas |
## Comparação dos Sistemas de Memória
| Recurso | Memória Básica | Memória de Usuário (Legado) | Memória Externa |
|---------|---------------|-----------------------------|----------------|
| **Complexidade de Setup** | Simples | Média | Média |
| **Integração** | Contextual integrada | Contextual + específica do usuário | Autônoma |
| **Armazenamento** | Arquivos locais | Mem0 Cloud/Local | Customizada/Mem0 |
| **Multi-sessão** | ✅ | ✅ | ✅ |
| **Especificidade do Usuário** | ❌ | ✅ | ✅ |
| **Provedores Customizados** | Limitado | Apenas Mem0 | Qualquer provedor |
| **Recomendado para** | Maioria dos casos | Projetos legados | Necessidades especializadas |
## Provedores de Embedding Suportados
@@ -964,4 +989,4 @@ crew = Crew(
## Conclusão
Integrar o sistema de memória do CrewAI em seus projetos é simples. Ao aproveitar os componentes e configurações oferecidos,
você rapidamente capacita seus agentes a lembrar, raciocinar e aprender com suas interações, desbloqueando novos níveis de inteligência e capacidade.
você rapidamente capacita seus agentes a lembrar, raciocinar e aprender com suas interações, desbloqueando novos níveis de inteligência e capacidade.

View File

@@ -54,11 +54,9 @@ crew = Crew(
| **Markdown** _(opcional)_ | `markdown` | `Optional[bool]` | Se a tarefa deve instruir o agente a retornar a resposta final formatada em Markdown. O padrão é False. |
| **Config** _(opcional)_ | `config` | `Optional[Dict[str, Any]]` | Parâmetros de configuração específicos da tarefa. |
| **Arquivo de Saída** _(opcional)_| `output_file` | `Optional[str]` | Caminho do arquivo para armazenar a saída da tarefa. |
| **Criar Diretório** _(opcional)_ | `create_directory` | `Optional[bool]` | Se deve criar o diretório para output_file caso não exista. O padrão é True. |
| **Saída JSON** _(opcional)_ | `output_json` | `Optional[Type[BaseModel]]` | Um modelo Pydantic para estruturar a saída em JSON. |
| **Output Pydantic** _(opcional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | Um modelo Pydantic para a saída da tarefa. |
| **Callback** _(opcional)_ | `callback` | `Optional[Any]` | Função/objeto a ser executado após a conclusão da tarefa. |
| **Guardrail** _(opcional)_ | `guardrail` | `Optional[Callable]` | Função para validar a saída da tarefa antes de prosseguir para a próxima tarefa. |
## Criando Tarefas
@@ -332,11 +330,9 @@ analysis_task = Task(
Guardrails (trilhas de proteção) de tarefas fornecem uma maneira de validar e transformar as saídas das tarefas antes que elas sejam passadas para a próxima tarefa. Esse recurso assegura a qualidade dos dados e oferece feedback aos agentes quando sua saída não atende a critérios específicos.
Guardrails são implementados como funções Python que contêm lógica de validação customizada, proporcionando controle total sobre o processo de validação e garantindo resultados confiáveis e determinísticos.
### Usando Guardrails em Tarefas
### Guardrails Baseados em Função
Para adicionar um guardrail baseado em função a uma tarefa, forneça uma função de validação por meio do parâmetro `guardrail`:
Para adicionar um guardrail a uma tarefa, forneça uma função de validação por meio do parâmetro `guardrail`:
```python Code
from typing import Tuple, Union, Dict, Any
@@ -374,7 +370,9 @@ blog_task = Task(
- Em caso de sucesso: retorna uma tupla `(True, resultado_validado)`
- Em caso de falha: retorna uma tupla `(False, "mensagem de erro explicando a falha")`
### LLMGuardrail
A classe `LLMGuardrail` oferece um mecanismo robusto para validação das saídas das tarefas.
### Melhores Práticas de Tratamento de Erros
@@ -825,7 +823,26 @@ task = Task(
)
```
#### Use uma abordagem no-code para validação
```python Code
from crewai import Task
task = Task(
description="Gerar dados em JSON",
expected_output="Objeto JSON válido",
guardrail="Garanta que a resposta é um objeto JSON válido"
)
```
#### Usando YAML
```yaml
research_task:
...
guardrail: garanta que cada bullet tenha no mínimo 100 palavras
...
```
```python Code
@CrewBase
@@ -941,87 +958,21 @@ task = Task(
## Criando Diretórios ao Salvar Arquivos
O parâmetro `create_directory` controla se o CrewAI deve criar automaticamente diretórios ao salvar saídas de tarefas em arquivos. Este recurso é particularmente útil para organizar outputs e garantir que os caminhos de arquivos estejam estruturados corretamente, especialmente ao trabalhar com hierarquias de projetos complexas.
### Comportamento Padrão
Por padrão, `create_directory=True`, o que significa que o CrewAI criará automaticamente qualquer diretório ausente no caminho do arquivo de saída:
Agora é possível especificar se uma tarefa deve criar diretórios ao salvar sua saída em arquivo. Isso é útil para organizar outputs e garantir que os caminhos estejam corretos.
```python Code
# Comportamento padrão - diretórios são criados automaticamente
report_task = Task(
description='Gerar um relatório abrangente de análise de mercado',
expected_output='Uma análise detalhada de mercado com gráficos e insights',
agent=analyst_agent,
output_file='reports/2025/market_analysis.md', # Cria 'reports/2025/' se não existir
markdown=True
# ...
save_output_task = Task(
description='Salve o resumo das notícias de IA em um arquivo',
expected_output='Arquivo salvo com sucesso',
agent=research_agent,
tools=[file_save_tool],
output_file='outputs/ai_news_summary.txt',
create_directory=True
)
```
### Desabilitando a Criação de Diretórios
Se você quiser evitar a criação automática de diretórios e garantir que o diretório já exista, defina `create_directory=False`:
```python Code
# Modo estrito - o diretório já deve existir
strict_output_task = Task(
description='Salvar dados críticos que requerem infraestrutura existente',
expected_output='Dados salvos em localização pré-configurada',
agent=data_agent,
output_file='secure/vault/critical_data.json',
create_directory=False # Gerará RuntimeError se 'secure/vault/' não existir
)
```
### Configuração YAML
Você também pode configurar este comportamento em suas definições de tarefas YAML:
```yaml tasks.yaml
analysis_task:
description: >
Gerar análise financeira trimestral
expected_output: >
Um relatório financeiro abrangente com insights trimestrais
agent: financial_analyst
output_file: reports/quarterly/q4_2024_analysis.pdf
create_directory: true # Criar automaticamente o diretório 'reports/quarterly/'
audit_task:
description: >
Realizar auditoria de conformidade e salvar no diretório de auditoria existente
expected_output: >
Um relatório de auditoria de conformidade
agent: auditor
output_file: audit/compliance_report.md
create_directory: false # O diretório já deve existir
```
### Casos de Uso
**Criação Automática de Diretórios (`create_directory=True`):**
- Ambientes de desenvolvimento e prototipagem
- Geração dinâmica de relatórios com pastas baseadas em datas
- Fluxos de trabalho automatizados onde a estrutura de diretórios pode variar
- Aplicações multi-tenant com pastas específicas do usuário
**Gerenciamento Manual de Diretórios (`create_directory=False`):**
- Ambientes de produção com controles rígidos do sistema de arquivos
- Aplicações sensíveis à segurança onde diretórios devem ser pré-configurados
- Sistemas com requisitos específicos de permissão
- Ambientes de conformidade onde a criação de diretórios é auditada
### Tratamento de Erros
Quando `create_directory=False` e o diretório não existe, o CrewAI gerará um `RuntimeError`:
```python Code
try:
result = crew.kickoff()
except RuntimeError as e:
# Tratar erro de diretório ausente
print(f"Falha na criação do diretório: {e}")
# Criar diretório manualmente ou usar local alternativo
#...
```
Veja o vídeo abaixo para aprender como utilizar saídas estruturadas no CrewAI:

View File

@@ -44,19 +44,6 @@ A classe `MCPServerAdapter` da `crewai-tools` é a principal forma de conectar-s
O uso de um gerenciador de contexto Python (`with`) é a **abordagem recomendada** para o `MCPServerAdapter`. Ele lida automaticamente com a abertura e o fechamento da conexão com o servidor MCP.
## Configuração de Conexão
O `MCPServerAdapter` suporta várias opções de configuração para personalizar o comportamento da conexão:
- **`connect_timeout`** (opcional): Tempo máximo em segundos para aguardar o estabelecimento de uma conexão com o servidor MCP. O padrão é 30 segundos se não especificado. Isso é particularmente útil para servidores remotos que podem ter tempos de resposta variáveis.
```python
# Exemplo com timeout personalizado para conexão
with MCPServerAdapter(server_params, connect_timeout=60) as tools:
# A conexão terá timeout após 60 segundos se não estabelecida
pass
```
```python
from crewai import Agent
from crewai_tools import MCPServerAdapter
@@ -83,7 +70,7 @@ server_params = {
}
# Exemplo de uso (descomente e adapte após definir server_params):
with MCPServerAdapter(server_params, connect_timeout=60) as mcp_tools:
with MCPServerAdapter(server_params) as mcp_tools:
print(f"Available tools: {[tool.name for tool in mcp_tools]}")
meu_agente = Agent(
@@ -101,7 +88,7 @@ Este padrão geral mostra como integrar ferramentas. Para exemplos específicos
## Filtrando Ferramentas
```python
with MCPServerAdapter(server_params, connect_timeout=60) as mcp_tools:
with MCPServerAdapter(server_params) as mcp_tools:
print(f"Available tools: {[tool.name for tool in mcp_tools]}")
meu_agente = Agent(

View File

@@ -1,286 +0,0 @@
---
title: Integração LangDB
description: Governe, proteja e otimize seus fluxos de trabalho CrewAI com LangDB AI Gateway—acesse mais de 350 modelos, roteamento automático, otimização de custos e observabilidade completa.
icon: database
---
# Introdução
[LangDB AI Gateway](https://langdb.ai) fornece APIs compatíveis com OpenAI para conectar com múltiplos Modelos de Linguagem Grandes e serve como uma plataforma de observabilidade que torna effortless rastrear fluxos de trabalho CrewAI de ponta a ponta, proporcionando acesso a mais de 350 modelos de linguagem. Com uma única chamada `init()`, todas as interações de agentes, execuções de tarefas e chamadas LLM são capturadas, fornecendo observabilidade abrangente e infraestrutura de IA pronta para produção para suas aplicações.
<Frame caption="Exemplo de Rastreamento CrewAI LangDB">
<img src="/images/langdb-1.png" alt="Exemplo de rastreamento CrewAI LangDB" />
</Frame>
**Confira:** [Ver o exemplo de trace ao vivo](https://app.langdb.ai/sharing/threads/3becbfed-a1be-ae84-ea3c-4942867a3e22)
## Recursos
### Capacidades do AI Gateway
- **Acesso a mais de 350 LLMs**: Conecte-se a todos os principais modelos de linguagem através de uma única integração
- **Modelos Virtuais**: Crie configurações de modelo personalizadas com parâmetros específicos e regras de roteamento
- **MCP Virtual**: Habilite compatibilidade e integração com sistemas MCP (Model Context Protocol) para comunicação aprimorada de agentes
- **Guardrails**: Implemente medidas de segurança e controles de conformidade para comportamento de agentes
### Observabilidade e Rastreamento
- **Rastreamento Automático**: Uma única chamada `init()` captura todas as interações CrewAI
- **Visibilidade Ponta a Ponta**: Monitore fluxos de trabalho de agentes do início ao fim
- **Rastreamento de Uso de Ferramentas**: Rastreie quais ferramentas os agentes usam e seus resultados
- **Monitoramento de Chamadas de Modelo**: Insights detalhados sobre interações LLM
- **Análise de Performance**: Monitore latência, uso de tokens e custos
- **Suporte a Depuração**: Execução passo a passo para solução de problemas
- **Monitoramento em Tempo Real**: Dashboard de traces e métricas ao vivo
## Instruções de Configuração
<Steps>
<Step title="Instalar LangDB">
Instale o cliente LangDB com flag de recurso CrewAI:
```bash
pip install 'pylangdb[crewai]'
```
</Step>
<Step title="Definir Variáveis de Ambiente">
Configure suas credenciais LangDB:
```bash
export LANGDB_API_KEY="<sua_chave_api_langdb>"
export LANGDB_PROJECT_ID="<seu_id_projeto_langdb>"
export LANGDB_API_BASE_URL='https://api.us-east-1.langdb.ai'
```
</Step>
<Step title="Inicializar Rastreamento">
Importe e inicialize LangDB antes de configurar seu código CrewAI:
```python
from pylangdb.crewai import init
# Inicializar LangDB
init()
```
</Step>
<Step title="Configurar CrewAI com LangDB">
Configure seu LLM com cabeçalhos LangDB:
```python
from crewai import Agent, Task, Crew, LLM
import os
# Configurar LLM com cabeçalhos LangDB
llm = LLM(
model="openai/gpt-4o", # Substitua pelo modelo que você quer usar
api_key=os.getenv("LANGDB_API_KEY"),
base_url=os.getenv("LANGDB_API_BASE_URL"),
extra_headers={"x-project-id": os.getenv("LANGDB_PROJECT_ID")}
)
```
</Step>
</Steps>
## Exemplo de Início Rápido
Aqui está um exemplo simples para começar com LangDB e CrewAI:
```python
import os
from pylangdb.crewai import init
from crewai import Agent, Task, Crew, LLM
# Inicializar LangDB antes de qualquer importação CrewAI
init()
def create_llm(model):
return LLM(
model=model,
api_key=os.environ.get("LANGDB_API_KEY"),
base_url=os.environ.get("LANGDB_API_BASE_URL"),
extra_headers={"x-project-id": os.environ.get("LANGDB_PROJECT_ID")}
)
# Defina seu agente
researcher = Agent(
role="Especialista em Pesquisa",
goal="Pesquisar tópicos minuciosamente",
backstory="Pesquisador especialista com habilidades em encontrar informações",
llm=create_llm("openai/gpt-4o"), # Substitua pelo modelo que você quer usar
verbose=True
)
# Criar uma tarefa
task = Task(
description="Pesquise o tópico dado e forneça um resumo abrangente",
agent=researcher,
expected_output="Resumo de pesquisa detalhado com principais descobertas"
)
# Criar e executar a equipe
crew = Crew(agents=[researcher], tasks=[task])
result = crew.kickoff()
print(result)
```
## Exemplo Completo: Agente de Pesquisa e Planejamento
Este exemplo abrangente demonstra um fluxo de trabalho multi-agente com capacidades de pesquisa e planejamento.
### Pré-requisitos
```bash
pip install crewai 'pylangdb[crewai]' crewai_tools setuptools python-dotenv
```
### Configuração do Ambiente
```bash
# Credenciais LangDB
export LANGDB_API_KEY="<sua_chave_api_langdb>"
export LANGDB_PROJECT_ID="<seu_id_projeto_langdb>"
export LANGDB_API_BASE_URL='https://api.us-east-1.langdb.ai'
# Chaves API adicionais (opcional)
export SERPER_API_KEY="<sua_chave_api_serper>" # Para capacidades de busca na web
```
### Implementação Completa
```python
#!/usr/bin/env python3
import os
import sys
from pylangdb.crewai import init
init() # Inicializar LangDB antes de qualquer importação CrewAI
from dotenv import load_dotenv
from crewai import Agent, Task, Crew, Process, LLM
from crewai_tools import SerperDevTool
load_dotenv()
def create_llm(model):
return LLM(
model=model,
api_key=os.environ.get("LANGDB_API_KEY"),
base_url=os.environ.get("LANGDB_API_BASE_URL"),
extra_headers={"x-project-id": os.environ.get("LANGDB_PROJECT_ID")}
)
class ResearchPlanningCrew:
def researcher(self) -> Agent:
return Agent(
role="Especialista em Pesquisa",
goal="Pesquisar tópicos minuciosamente e compilar informações abrangentes",
backstory="Pesquisador especialista com habilidades em encontrar e analisar informações de várias fontes",
tools=[SerperDevTool()],
llm=create_llm("openai/gpt-4o"),
verbose=True
)
def planner(self) -> Agent:
return Agent(
role="Planejador Estratégico",
goal="Criar planos acionáveis baseados em descobertas de pesquisa",
backstory="Planejador estratégico que divide desafios complexos em planos executáveis",
reasoning=True,
max_reasoning_attempts=3,
llm=create_llm("openai/anthropic/claude-3.7-sonnet"),
verbose=True
)
def research_task(self) -> Task:
return Task(
description="Pesquise o tópico minuciosamente e compile informações abrangentes",
agent=self.researcher(),
expected_output="Relatório de pesquisa abrangente com principais descobertas e insights"
)
def planning_task(self) -> Task:
return Task(
description="Crie um plano estratégico baseado nas descobertas da pesquisa",
agent=self.planner(),
expected_output="Plano de execução estratégica com fases, objetivos e etapas acionáveis",
context=[self.research_task()]
)
def crew(self) -> Crew:
return Crew(
agents=[self.researcher(), self.planner()],
tasks=[self.research_task(), self.planning_task()],
verbose=True,
process=Process.sequential
)
def main():
topic = sys.argv[1] if len(sys.argv) > 1 else "Inteligência Artificial na Saúde"
crew_instance = ResearchPlanningCrew()
# Atualizar descrições de tarefas com o tópico específico
crew_instance.research_task().description = f"Pesquise {topic} minuciosamente e compile informações abrangentes"
crew_instance.planning_task().description = f"Crie um plano estratégico para {topic} baseado nas descobertas da pesquisa"
result = crew_instance.crew().kickoff()
print(result)
if __name__ == "__main__":
main()
```
### Executando o Exemplo
```bash
python main.py "Soluções de Energia Sustentável"
```
## Visualizando Traces no LangDB
Após executar sua aplicação CrewAI, você pode visualizar traces detalhados no dashboard LangDB:
<Frame caption="Dashboard de Trace LangDB">
<img src="/images/langdb-2.png" alt="Dashboard de trace LangDB mostrando fluxo de trabalho CrewAI" />
</Frame>
### O Que Você Verá
- **Interações de Agentes**: Fluxo completo de conversas de agentes e transferências de tarefas
- **Uso de Ferramentas**: Quais ferramentas foram chamadas, suas entradas e saídas
- **Chamadas de Modelo**: Interações LLM detalhadas com prompts e respostas
- **Métricas de Performance**: Rastreamento de latência, uso de tokens e custos
- **Linha do Tempo de Execução**: Visualização passo a passo de todo o fluxo de trabalho
## Solução de Problemas
### Problemas Comuns
- **Nenhum trace aparecendo**: Certifique-se de que `init()` seja chamado antes de qualquer importação CrewAI
- **Erros de autenticação**: Verifique sua chave API LangDB e ID do projeto
## Recursos
<CardGroup cols={3}>
<Card title="Documentação LangDB" icon="book" href="https://docs.langdb.ai">
Documentação oficial e guias LangDB
</Card>
<Card title="Guias LangDB" icon="graduation-cap" href="https://docs.langdb.ai/guides">
Tutoriais passo a passo para construir agentes de IA
</Card>
<Card title="Exemplos GitHub" icon="github" href="https://github.com/langdb/langdb-samples/tree/main/examples/crewai" >
Exemplos completos de integração CrewAI
</Card>
<Card title="Dashboard LangDB" icon="chart-line" href="https://app.langdb.ai">
Acesse seus traces e análises
</Card>
<Card title="Catálogo de Modelos" icon="list" href="https://app.langdb.ai/models">
Navegue por mais de 350 modelos de linguagem disponíveis
</Card>
<Card title="Recursos Enterprise" icon="building" href="https://docs.langdb.ai/enterprise">
Opções auto-hospedadas e capacidades empresariais
</Card>
</CardGroup>
## Próximos Passos
Este guia cobriu o básico da integração do LangDB AI Gateway com CrewAI. Para aprimorar ainda mais seus fluxos de trabalho de IA, explore:
- **Modelos Virtuais**: Crie configurações de modelo personalizadas com estratégias de roteamento
- **Guardrails e Segurança**: Implemente filtragem de conteúdo e controles de conformidade
- **Implantação em Produção**: Configure fallbacks, tentativas e balanceamento de carga
Para recursos mais avançados e casos de uso, visite a [Documentação LangDB](https://docs.langdb.ai) ou explore o [Catálogo de Modelos](https://app.langdb.ai/models) para descobrir todos os modelos disponíveis.

View File

@@ -25,10 +25,6 @@ A observabilidade é fundamental para entender como seus agentes CrewAI estão d
Replays de sessões, métricas e monitoramento para desenvolvimento e produção de agentes.
</Card>
<Card title="LangDB" icon="database" href="/pt-BR/observability/langdb">
Rastreamento ponta a ponta para fluxos de trabalho CrewAI com captura automática de interações de agentes.
</Card>
<Card title="OpenLIT" icon="magnifying-glass-chart" href="/pt-BR/observability/openlit">
Monitoramento nativo OpenTelemetry com rastreamento de custos e análises de desempenho.
</Card>

View File

@@ -11,7 +11,7 @@ dependencies = [
# Core Dependencies
"pydantic>=2.4.2",
"openai>=1.13.3",
"litellm==1.74.9",
"litellm==1.72.6",
"instructor>=1.3.3",
# Text Processing
"pdfplumber>=0.11.4",
@@ -39,7 +39,6 @@ dependencies = [
"tomli>=2.0.2",
"blinker>=1.9.0",
"json5>=0.10.0",
"portalocker==2.7.0",
]
[project.urls]
@@ -48,7 +47,7 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies]
tools = ["crewai-tools~=0.60.0"]
tools = ["crewai-tools~=0.49.0"]
embeddings = [
"tiktoken~=0.8.0"
]

View File

@@ -54,7 +54,7 @@ def _track_install_async():
_track_install_async()
__version__ = "0.157.0"
__version__ = "0.140.0"
__all__ = [
"Agent",
"Crew",

View File

@@ -210,6 +210,7 @@ class Agent(BaseAgent):
sources=self.knowledge_sources,
embedder=self.embedder,
collection_name=self.role,
storage=self.knowledge_storage or None,
)
self.knowledge.add_sources()
except (TypeError, ValueError) as e:
@@ -222,9 +223,11 @@ class Agent(BaseAgent):
memory_attributes = [
"memory",
"memory_config",
"_short_term_memory",
"_long_term_memory",
"_entity_memory",
"_user_memory",
"_external_memory",
]
@@ -314,9 +317,11 @@ class Agent(BaseAgent):
start_time = time.time()
contextual_memory = ContextualMemory(
self.crew.memory_config,
self.crew._short_term_memory,
self.crew._long_term_memory,
self.crew._entity_memory,
self.crew._user_memory,
self.crew._external_memory,
)
memory = contextual_memory.build_context_for_task(task, context)
@@ -336,8 +341,7 @@ class Agent(BaseAgent):
self.knowledge_config.model_dump() if self.knowledge_config else {}
)
if self.knowledge or (self.crew and self.crew.knowledge):
if self.knowledge:
crewai_event_bus.emit(
self,
event=KnowledgeRetrievalStartedEvent(
@@ -349,28 +353,25 @@ class Agent(BaseAgent):
task_prompt
)
if self.knowledge_search_query:
# Quering agent specific knowledge
if self.knowledge:
agent_knowledge_snippets = self.knowledge.query(
[self.knowledge_search_query], **knowledge_config
)
if agent_knowledge_snippets:
self.agent_knowledge_context = extract_knowledge_context(
agent_knowledge_snippets
)
if self.agent_knowledge_context:
task_prompt += self.agent_knowledge_context
# Quering crew specific knowledge
knowledge_snippets = self.crew.query_knowledge(
agent_knowledge_snippets = self.knowledge.query(
[self.knowledge_search_query], **knowledge_config
)
if knowledge_snippets:
self.crew_knowledge_context = extract_knowledge_context(
knowledge_snippets
if agent_knowledge_snippets:
self.agent_knowledge_context = extract_knowledge_context(
agent_knowledge_snippets
)
if self.crew_knowledge_context:
task_prompt += self.crew_knowledge_context
if self.agent_knowledge_context:
task_prompt += self.agent_knowledge_context
if self.crew:
knowledge_snippets = self.crew.query_knowledge(
[self.knowledge_search_query], **knowledge_config
)
if knowledge_snippets:
self.crew_knowledge_context = extract_knowledge_context(
knowledge_snippets
)
if self.crew_knowledge_context:
task_prompt += self.crew_knowledge_context
crewai_event_bus.emit(
self,

View File

@@ -120,8 +120,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
raise
except Exception as e:
handle_unknown_error(self._printer, e)
raise
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors
raise e
else:
raise e
if self.ask_for_human_input:
formatted_answer = self._handle_human_feedback(formatted_answer)

View File

@@ -1,6 +1,8 @@
ALGORITHMS = ["RS256"]
#TODO: The AUTH0 constants should be removed after WorkOS migration is completed
AUTH0_DOMAIN = "crewai.us.auth0.com"
AUTH0_CLIENT_ID = "DEVC5Fw6NlRoSzmDCcOhVq85EfLBjKa8"
AUTH0_AUDIENCE = "https://crewai.us.auth0.com/api/v2/"
WORKOS_DOMAIN = "login.crewai.com"
WORKOS_CLI_CONNECT_APP_ID = "client_01JYT06R59SP0NXYGD994NFXXX"
WORKOS_ENVIRONMENT_ID = "client_01JNJQWB4HG8T5980R5VHP057C"

View File

@@ -1,92 +1,76 @@
import time
import webbrowser
from typing import Any, Dict, Optional
from typing import Any, Dict
import requests
from rich.console import Console
from pydantic import BaseModel, Field
from .constants import (
AUTH0_AUDIENCE,
AUTH0_CLIENT_ID,
AUTH0_DOMAIN,
WORKOS_DOMAIN,
WORKOS_CLI_CONNECT_APP_ID,
WORKOS_ENVIRONMENT_ID,
)
from .utils import TokenManager, validate_jwt_token
from urllib.parse import quote
from crewai.cli.plus_api import PlusAPI
from crewai.cli.config import Settings
from crewai.cli.authentication.constants import (
AUTH0_AUDIENCE,
AUTH0_CLIENT_ID,
AUTH0_DOMAIN,
)
console = Console()
class Oauth2Settings(BaseModel):
provider: str = Field(description="OAuth2 provider used for authentication (e.g., workos, okta, auth0).")
client_id: str = Field(description="OAuth2 client ID issued by the provider, used during authentication requests.")
domain: str = Field(description="OAuth2 provider's domain (e.g., your-org.auth0.com) used for issuing tokens.")
audience: Optional[str] = Field(description="OAuth2 audience value, typically used to identify the target API or resource.", default=None)
@classmethod
def from_settings(cls):
settings = Settings()
return cls(
provider=settings.oauth2_provider,
domain=settings.oauth2_domain,
client_id=settings.oauth2_client_id,
audience=settings.oauth2_audience,
)
class ProviderFactory:
@classmethod
def from_settings(cls, settings: Optional[Oauth2Settings] = None):
settings = settings or Oauth2Settings.from_settings()
import importlib
module = importlib.import_module(f"crewai.cli.authentication.providers.{settings.provider.lower()}")
provider = getattr(module, f"{settings.provider.capitalize()}Provider")
return provider(settings)
class AuthenticationCommand:
AUTH0_DEVICE_CODE_URL = f"https://{AUTH0_DOMAIN}/oauth/device/code"
AUTH0_TOKEN_URL = f"https://{AUTH0_DOMAIN}/oauth/token"
WORKOS_DEVICE_CODE_URL = f"https://{WORKOS_DOMAIN}/oauth2/device_authorization"
WORKOS_TOKEN_URL = f"https://{WORKOS_DOMAIN}/oauth2/token"
def __init__(self):
self.token_manager = TokenManager()
self.oauth2_provider = ProviderFactory.from_settings()
# TODO: WORKOS - This variable is temporary until migration to WorkOS is complete.
self.user_provider = "workos"
def login(self) -> None:
"""Sign up to CrewAI+"""
device_code_url = self.WORKOS_DEVICE_CODE_URL
token_url = self.WORKOS_TOKEN_URL
client_id = WORKOS_CLI_CONNECT_APP_ID
audience = None
console.print("Signing in to CrewAI Enterprise...\n", style="bold blue")
# TODO: WORKOS - Next line and conditional are temporary until migration to WorkOS is complete.
user_provider = self._determine_user_provider()
if user_provider == "auth0":
settings = Oauth2Settings(
provider="auth0",
client_id=AUTH0_CLIENT_ID,
domain=AUTH0_DOMAIN,
audience=AUTH0_AUDIENCE
)
self.oauth2_provider = ProviderFactory.from_settings(settings)
device_code_url = self.AUTH0_DEVICE_CODE_URL
token_url = self.AUTH0_TOKEN_URL
client_id = AUTH0_CLIENT_ID
audience = AUTH0_AUDIENCE
self.user_provider = "auth0"
# End of temporary code.
device_code_data = self._get_device_code()
device_code_data = self._get_device_code(client_id, device_code_url, audience)
self._display_auth_instructions(device_code_data)
return self._poll_for_token(device_code_data)
return self._poll_for_token(device_code_data, client_id, token_url)
def _get_device_code(
self
self, client_id: str, device_code_url: str, audience: str | None = None
) -> Dict[str, Any]:
"""Get the device code to authenticate the user."""
device_code_payload = {
"client_id": self.oauth2_provider.get_client_id(),
"client_id": client_id,
"scope": "openid",
"audience": self.oauth2_provider.get_audience(),
"audience": audience,
}
response = requests.post(
url=self.oauth2_provider.get_authorize_url(), data=device_code_payload, timeout=20
url=device_code_url, data=device_code_payload, timeout=20
)
response.raise_for_status()
return response.json()
@@ -98,21 +82,21 @@ class AuthenticationCommand:
webbrowser.open(device_code_data["verification_uri_complete"])
def _poll_for_token(
self, device_code_data: Dict[str, Any]
self, device_code_data: Dict[str, Any], client_id: str, token_poll_url: str
) -> None:
"""Polls the server for the token until it is received, or max attempts are reached."""
token_payload = {
"grant_type": "urn:ietf:params:oauth:grant-type:device_code",
"device_code": device_code_data["device_code"],
"client_id": self.oauth2_provider.get_client_id(),
"client_id": client_id,
}
console.print("\nWaiting for authentication... ", style="bold blue", end="")
attempts = 0
while True and attempts < 10:
response = requests.post(self.oauth2_provider.get_token_url(), data=token_payload, timeout=30)
response = requests.post(token_poll_url, data=token_payload, timeout=30)
token_data = response.json()
if response.status_code == 200:
@@ -144,14 +128,19 @@ class AuthenticationCommand:
"""Validates the JWT token and saves the token to the token manager."""
jwt_token = token_data["access_token"]
issuer = self.oauth2_provider.get_issuer()
jwt_token_data = {
"jwt_token": jwt_token,
"jwks_url": self.oauth2_provider.get_jwks_url(),
"issuer": issuer,
"audience": self.oauth2_provider.get_audience(),
"jwks_url": f"https://{WORKOS_DOMAIN}/oauth2/jwks",
"issuer": f"https://{WORKOS_DOMAIN}",
"audience": WORKOS_ENVIRONMENT_ID,
}
# TODO: WORKOS - The following conditional is temporary until migration to WorkOS is complete.
if self.user_provider == "auth0":
jwt_token_data["jwks_url"] = f"https://{AUTH0_DOMAIN}/.well-known/jwks.json"
jwt_token_data["issuer"] = f"https://{AUTH0_DOMAIN}/"
jwt_token_data["audience"] = AUTH0_AUDIENCE
decoded_token = validate_jwt_token(**jwt_token_data)
expires_at = decoded_token.get("exp", 0)

View File

@@ -1,26 +0,0 @@
from crewai.cli.authentication.providers.base_provider import BaseProvider
class Auth0Provider(BaseProvider):
def get_authorize_url(self) -> str:
return f"https://{self._get_domain()}/oauth/device/code"
def get_token_url(self) -> str:
return f"https://{self._get_domain()}/oauth/token"
def get_jwks_url(self) -> str:
return f"https://{self._get_domain()}/.well-known/jwks.json"
def get_issuer(self) -> str:
return f"https://{self._get_domain()}/"
def get_audience(self) -> str:
assert self.settings.audience is not None, "Audience is required"
return self.settings.audience
def get_client_id(self) -> str:
assert self.settings.client_id is not None, "Client ID is required"
return self.settings.client_id
def _get_domain(self) -> str:
assert self.settings.domain is not None, "Domain is required"
return self.settings.domain

View File

@@ -1,30 +0,0 @@
from abc import ABC, abstractmethod
from crewai.cli.authentication.main import Oauth2Settings
class BaseProvider(ABC):
def __init__(self, settings: Oauth2Settings):
self.settings = settings
@abstractmethod
def get_authorize_url(self) -> str:
...
@abstractmethod
def get_token_url(self) -> str:
...
@abstractmethod
def get_jwks_url(self) -> str:
...
@abstractmethod
def get_issuer(self) -> str:
...
@abstractmethod
def get_audience(self) -> str:
...
@abstractmethod
def get_client_id(self) -> str:
...

View File

@@ -1,22 +0,0 @@
from crewai.cli.authentication.providers.base_provider import BaseProvider
class OktaProvider(BaseProvider):
def get_authorize_url(self) -> str:
return f"https://{self.settings.domain}/oauth2/default/v1/device/authorize"
def get_token_url(self) -> str:
return f"https://{self.settings.domain}/oauth2/default/v1/token"
def get_jwks_url(self) -> str:
return f"https://{self.settings.domain}/oauth2/default/v1/keys"
def get_issuer(self) -> str:
return f"https://{self.settings.domain}/oauth2/default"
def get_audience(self) -> str:
assert self.settings.audience is not None
return self.settings.audience
def get_client_id(self) -> str:
assert self.settings.client_id is not None
return self.settings.client_id

View File

@@ -1,25 +0,0 @@
from crewai.cli.authentication.providers.base_provider import BaseProvider
class WorkosProvider(BaseProvider):
def get_authorize_url(self) -> str:
return f"https://{self._get_domain()}/oauth2/device_authorization"
def get_token_url(self) -> str:
return f"https://{self._get_domain()}/oauth2/token"
def get_jwks_url(self) -> str:
return f"https://{self._get_domain()}/oauth2/jwks"
def get_issuer(self) -> str:
return f"https://{self._get_domain()}"
def get_audience(self) -> str:
return self.settings.audience or ""
def get_client_id(self) -> str:
assert self.settings.client_id is not None, "Client ID is required"
return self.settings.client_id
def _get_domain(self) -> str:
assert self.settings.domain is not None, "Domain is required"
return self.settings.domain

View File

@@ -30,9 +30,6 @@ def validate_jwt_token(
jwk_client = PyJWKClient(jwks_url)
signing_key = jwk_client.get_signing_key_from_jwt(jwt_token)
_unverified_decoded_token = jwt.decode(
jwt_token, options={"verify_signature": False}
)
decoded_token = jwt.decode(
jwt_token,
signing_key.key,
@@ -52,15 +49,9 @@ def validate_jwt_token(
except jwt.ExpiredSignatureError:
raise Exception("Token has expired.")
except jwt.InvalidAudienceError:
actual_audience = _unverified_decoded_token.get("aud", "[no audience found]")
raise Exception(
f"Invalid token audience. Got: '{actual_audience}'. Expected: '{audience}'"
)
raise Exception(f"Invalid token audience. Expected: '{audience}'")
except jwt.InvalidIssuerError:
actual_issuer = _unverified_decoded_token.get("iss", "[no issuer found]")
raise Exception(
f"Invalid token issuer. Got: '{actual_issuer}'. Expected: '{issuer}'"
)
raise Exception(f"Invalid token issuer. Expected: '{issuer}'")
except jwt.MissingRequiredClaimError as e:
raise Exception(f"Token is missing required claims: {str(e)}")
except jwt.exceptions.PyJWKClientError as e:

View File

@@ -3,7 +3,6 @@ from typing import Optional
import click
from crewai.cli.config import Settings
from crewai.cli.settings.main import SettingsCommand
from crewai.cli.add_crew_to_flow import add_crew_to_flow
from crewai.cli.create_crew import create_crew
from crewai.cli.create_flow import create_flow
@@ -14,7 +13,6 @@ from crewai.memory.storage.kickoff_task_outputs_storage import (
from .authentication.main import AuthenticationCommand
from .deploy.main import DeployCommand
from .enterprise.main import EnterpriseConfigureCommand
from .evaluate_crew import evaluate_crew
from .install_crew import install_crew
from .kickoff_flow import kickoff_flow
@@ -229,7 +227,7 @@ def update():
@crewai.command()
def login():
"""Sign Up/Login to CrewAI Enterprise."""
Settings().clear_user_settings()
Settings().clear()
AuthenticationCommand().login()
@@ -371,8 +369,8 @@ def org():
pass
@org.command("list")
def org_list():
@org.command()
def list():
"""List available organizations."""
org_command = OrganizationCommand()
org_command.list()
@@ -393,48 +391,5 @@ def current():
org_command.current()
@crewai.group()
def enterprise():
"""Enterprise Configuration commands."""
pass
@enterprise.command("configure")
@click.argument("enterprise_url")
def enterprise_configure(enterprise_url: str):
"""Configure CrewAI Enterprise OAuth2 settings from the provided Enterprise URL."""
enterprise_command = EnterpriseConfigureCommand()
enterprise_command.configure(enterprise_url)
@crewai.group()
def config():
"""CLI Configuration commands."""
pass
@config.command("list")
def config_list():
"""List all CLI configuration parameters."""
config_command = SettingsCommand()
config_command.list()
@config.command("set")
@click.argument("key")
@click.argument("value")
def config_set(key: str, value: str):
"""Set a CLI configuration parameter."""
config_command = SettingsCommand()
config_command.set(key, value)
@config.command("reset")
def config_reset():
"""Reset all CLI configuration parameters to default values."""
config_command = SettingsCommand()
config_command.reset_all_settings()
if __name__ == "__main__":
crewai()

View File

@@ -26,7 +26,7 @@ class PlusAPIMixin:
"Please sign up/login to CrewAI+ before using the CLI.",
style="bold red",
)
console.print("Run 'crewai login' to sign up/login.", style="bold green")
console.print("Run 'crewai signup' to sign up/login.", style="bold green")
raise SystemExit
def _validate_response(self, response: requests.Response) -> None:

View File

@@ -4,60 +4,10 @@ from typing import Optional
from pydantic import BaseModel, Field
from crewai.cli.constants import (
DEFAULT_CREWAI_ENTERPRISE_URL,
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_PROVIDER,
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_AUDIENCE,
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_CLIENT_ID,
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_DOMAIN,
)
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "crewai" / "settings.json"
# Settings that are related to the user's account
USER_SETTINGS_KEYS = [
"tool_repository_username",
"tool_repository_password",
"org_name",
"org_uuid",
]
# Settings that are related to the CLI
CLI_SETTINGS_KEYS = [
"enterprise_base_url",
"oauth2_provider",
"oauth2_audience",
"oauth2_client_id",
"oauth2_domain",
]
# Default values for CLI settings
DEFAULT_CLI_SETTINGS = {
"enterprise_base_url": DEFAULT_CREWAI_ENTERPRISE_URL,
"oauth2_provider": CREWAI_ENTERPRISE_DEFAULT_OAUTH2_PROVIDER,
"oauth2_audience": CREWAI_ENTERPRISE_DEFAULT_OAUTH2_AUDIENCE,
"oauth2_client_id": CREWAI_ENTERPRISE_DEFAULT_OAUTH2_CLIENT_ID,
"oauth2_domain": CREWAI_ENTERPRISE_DEFAULT_OAUTH2_DOMAIN,
}
# Readonly settings - cannot be set by the user
READONLY_SETTINGS_KEYS = [
"org_name",
"org_uuid",
]
# Hidden settings - not displayed by the 'list' command and cannot be set by the user
HIDDEN_SETTINGS_KEYS = [
"config_path",
"tool_repository_username",
"tool_repository_password",
]
class Settings(BaseModel):
enterprise_base_url: Optional[str] = Field(
default=DEFAULT_CLI_SETTINGS["enterprise_base_url"],
description="Base URL of the CrewAI Enterprise instance",
)
tool_repository_username: Optional[str] = Field(
None, description="Username for interacting with the Tool Repository"
)
@@ -70,27 +20,7 @@ class Settings(BaseModel):
org_uuid: Optional[str] = Field(
None, description="UUID of the currently active organization"
)
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, frozen=True, exclude=True)
oauth2_provider: str = Field(
description="OAuth2 provider used for authentication (e.g., workos, okta, auth0).",
default=DEFAULT_CLI_SETTINGS["oauth2_provider"]
)
oauth2_audience: Optional[str] = Field(
description="OAuth2 audience value, typically used to identify the target API or resource.",
default=DEFAULT_CLI_SETTINGS["oauth2_audience"]
)
oauth2_client_id: str = Field(
default=DEFAULT_CLI_SETTINGS["oauth2_client_id"],
description="OAuth2 client ID issued by the provider, used during authentication requests.",
)
oauth2_domain: str = Field(
description="OAuth2 provider's domain (e.g., your-org.auth0.com) used for issuing tokens.",
default=DEFAULT_CLI_SETTINGS["oauth2_domain"]
)
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, exclude=True)
def __init__(self, config_path: Path = DEFAULT_CONFIG_PATH, **data):
"""Load Settings from config path"""
@@ -107,16 +37,9 @@ class Settings(BaseModel):
merged_data = {**file_data, **data}
super().__init__(config_path=config_path, **merged_data)
def clear_user_settings(self) -> None:
"""Clear all user settings"""
self._reset_user_settings()
self.dump()
def reset(self) -> None:
"""Reset all settings to default values"""
self._reset_user_settings()
self._reset_cli_settings()
self.dump()
def clear(self) -> None:
"""Clear all settings"""
self.config_path.unlink(missing_ok=True)
def dump(self) -> None:
"""Save current settings to settings.json"""
@@ -129,13 +52,3 @@ class Settings(BaseModel):
updated_data = {**existing_data, **self.model_dump(exclude_unset=True)}
with self.config_path.open("w") as f:
json.dump(updated_data, f, indent=4)
def _reset_user_settings(self) -> None:
"""Reset all user settings to default values"""
for key in USER_SETTINGS_KEYS:
setattr(self, key, None)
def _reset_cli_settings(self) -> None:
"""Reset all CLI settings to default values"""
for key in CLI_SETTINGS_KEYS:
setattr(self, key, DEFAULT_CLI_SETTINGS.get(key))

View File

@@ -1,9 +1,3 @@
DEFAULT_CREWAI_ENTERPRISE_URL = "https://app.crewai.com"
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_PROVIDER = "workos"
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_AUDIENCE = "client_01JNJQWBJ4SPFN3SWJM5T7BDG8"
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_CLIENT_ID = "client_01JYT06R59SP0NXYGD994NFXXX"
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_DOMAIN = "login.crewai.com"
ENV_VARS = {
"openai": [
{
@@ -326,4 +320,5 @@ DEFAULT_LLM_MODEL = "gpt-4o-mini"
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
LITELLM_PARAMS = ["api_key", "api_base", "api_version"]

View File

@@ -1,84 +0,0 @@
import requests
from typing import Dict, Any
from rich.console import Console
from requests.exceptions import RequestException, JSONDecodeError
from crewai.cli.command import BaseCommand
from crewai.cli.settings.main import SettingsCommand
from crewai.cli.version import get_crewai_version
console = Console()
class EnterpriseConfigureCommand(BaseCommand):
def __init__(self):
super().__init__()
self.settings_command = SettingsCommand()
def configure(self, enterprise_url: str) -> None:
try:
enterprise_url = enterprise_url.rstrip('/')
oauth_config = self._fetch_oauth_config(enterprise_url)
self._update_oauth_settings(enterprise_url, oauth_config)
console.print(
f"✅ Successfully configured CrewAI Enterprise with OAuth2 settings from {enterprise_url}",
style="bold green"
)
except Exception as e:
console.print(f"❌ Failed to configure Enterprise settings: {str(e)}", style="bold red")
raise SystemExit(1)
def _fetch_oauth_config(self, enterprise_url: str) -> Dict[str, Any]:
oauth_endpoint = f"{enterprise_url}/auth/parameters"
try:
console.print(f"🔄 Fetching OAuth2 configuration from {oauth_endpoint}...")
headers = {
"Content-Type": "application/json",
"User-Agent": f"CrewAI-CLI/{get_crewai_version()}",
"X-Crewai-Version": get_crewai_version(),
}
response = requests.get(oauth_endpoint, timeout=30, headers=headers)
response.raise_for_status()
try:
oauth_config = response.json()
except JSONDecodeError:
raise ValueError(f"Invalid JSON response from {oauth_endpoint}")
required_fields = ['audience', 'domain', 'device_authorization_client_id', 'provider']
missing_fields = [field for field in required_fields if field not in oauth_config]
if missing_fields:
raise ValueError(f"Missing required fields in OAuth2 configuration: {', '.join(missing_fields)}")
console.print("✅ Successfully retrieved OAuth2 configuration", style="green")
return oauth_config
except RequestException as e:
raise ValueError(f"Failed to connect to enterprise URL: {str(e)}")
except Exception as e:
raise ValueError(f"Error fetching OAuth2 configuration: {str(e)}")
def _update_oauth_settings(self, enterprise_url: str, oauth_config: Dict[str, Any]) -> None:
try:
config_mapping = {
'enterprise_base_url': enterprise_url,
'oauth2_provider': oauth_config['provider'],
'oauth2_audience': oauth_config['audience'],
'oauth2_client_id': oauth_config['device_authorization_client_id'],
'oauth2_domain': oauth_config['domain']
}
console.print("🔄 Updating local OAuth2 configuration...")
for key, value in config_mapping.items():
self.settings_command.set(key, value)
console.print(f" ✓ Set {key}: {value}", style="dim")
except Exception as e:
raise ValueError(f"Failed to update OAuth2 settings: {str(e)}")

View File

@@ -1,3 +1,4 @@
from os import getenv
from typing import List, Optional
from urllib.parse import urljoin
@@ -5,7 +6,6 @@ import requests
from crewai.cli.config import Settings
from crewai.cli.version import get_crewai_version
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
class PlusAPI:
@@ -17,7 +17,6 @@ class PlusAPI:
ORGANIZATIONS_RESOURCE = "/crewai_plus/api/v1/me/organizations"
CREWS_RESOURCE = "/crewai_plus/api/v1/crews"
AGENTS_RESOURCE = "/crewai_plus/api/v1/agents"
TRACING_RESOURCE = "/crewai_plus/api/v1/tracing"
def __init__(self, api_key: str) -> None:
self.api_key = api_key
@@ -30,10 +29,7 @@ class PlusAPI:
settings = Settings()
if settings.org_uuid:
self.headers["X-Crewai-Organization-Id"] = settings.org_uuid
self.base_url = (
str(settings.enterprise_base_url) or DEFAULT_CREWAI_ENTERPRISE_URL
)
self.base_url = getenv("CREWAI_BASE_URL", "https://app.crewai.com")
def _make_request(self, method: str, endpoint: str, **kwargs) -> requests.Response:
url = urljoin(self.base_url, endpoint)
@@ -112,28 +108,7 @@ class PlusAPI:
def create_crew(self, payload) -> requests.Response:
return self._make_request("POST", self.CREWS_RESOURCE, json=payload)
def get_organizations(self) -> requests.Response:
return self._make_request("GET", self.ORGANIZATIONS_RESOURCE)
def send_trace_batch(self, payload) -> requests.Response:
return self._make_request("POST", self.TRACING_RESOURCE, json=payload)
def initialize_trace_batch(self, payload) -> requests.Response:
return self._make_request(
"POST", f"{self.TRACING_RESOURCE}/batches", json=payload
)
def send_trace_events(self, trace_batch_id: str, payload) -> requests.Response:
return self._make_request(
"POST",
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/events",
json=payload,
)
def finalize_trace_batch(self, trace_batch_id: str, payload) -> requests.Response:
return self._make_request(
"PATCH",
f"{self.TRACING_RESOURCE}/batches/{trace_batch_id}/finalize",
json=payload,
)

View File

@@ -1,67 +0,0 @@
from rich.console import Console
from rich.table import Table
from crewai.cli.command import BaseCommand
from crewai.cli.config import Settings, READONLY_SETTINGS_KEYS, HIDDEN_SETTINGS_KEYS
from typing import Any
console = Console()
class SettingsCommand(BaseCommand):
"""A class to handle CLI configuration commands."""
def __init__(self, settings_kwargs: dict[str, Any] = {}):
super().__init__()
self.settings = Settings(**settings_kwargs)
def list(self) -> None:
"""List all CLI configuration parameters."""
table = Table(title="CrewAI CLI Configuration")
table.add_column("Setting", style="cyan", no_wrap=True)
table.add_column("Value", style="green")
table.add_column("Description", style="yellow")
# Add all settings to the table
for field_name, field_info in Settings.model_fields.items():
if field_name in HIDDEN_SETTINGS_KEYS:
# Do not display hidden settings
continue
current_value = getattr(self.settings, field_name)
description = field_info.description or "No description available"
display_value = (
str(current_value) if current_value is not None else "Not set"
)
table.add_row(field_name, display_value, description)
console.print(table)
def set(self, key: str, value: str) -> None:
"""Set a CLI configuration parameter."""
readonly_settings = READONLY_SETTINGS_KEYS + HIDDEN_SETTINGS_KEYS
if not hasattr(self.settings, key) or key in readonly_settings:
console.print(
f"Error: Unknown or readonly configuration key '{key}'",
style="bold red",
)
console.print("Available keys:", style="yellow")
for field_name in Settings.model_fields.keys():
if field_name not in readonly_settings:
console.print(f" - {field_name}", style="yellow")
raise SystemExit(1)
setattr(self.settings, key, value)
self.settings.dump()
console.print(f"Successfully set '{key}' to '{value}'", style="bold green")
def reset_all_settings(self) -> None:
"""Reset all CLI configuration parameters to default values."""
self.settings.reset()
console.print(
"Successfully reset all configuration parameters to default values. It is recommended to run [bold yellow]'crewai login'[/bold yellow] to re-authenticate.",
style="bold green",
)

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.157.0,<1.0.0"
"crewai[tools]>=0.140.0,<1.0.0"
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.157.0,<1.0.0",
"crewai[tools]>=0.140.0,<1.0.0",
]
[project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<3.14"
dependencies = [
"crewai[tools]>=0.157.0"
"crewai[tools]>=0.140.0"
]
[tool.crewai]

View File

@@ -18,11 +18,6 @@ from typing import (
cast,
)
from opentelemetry import baggage
from opentelemetry.context import attach, detach
from crewai.utilities.crew.models import CrewContext
from pydantic import (
UUID4,
BaseModel,
@@ -47,6 +42,7 @@ from crewai.memory.entity.entity_memory import EntityMemory
from crewai.memory.external.external_memory import ExternalMemory
from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.memory.user.user_memory import UserMemory
from crewai.process import Process
from crewai.security import Fingerprint, SecurityConfig
from crewai.task import Task
@@ -72,12 +68,6 @@ from crewai.utilities.events.crew_events import (
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.event_listener import EventListener
from crewai.utilities.events.listeners.tracing.trace_listener import (
TraceCollectionListener,
)
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
from crewai.utilities.formatter import (
aggregate_raw_outputs_from_task_outputs,
aggregate_raw_outputs_from_tasks,
@@ -100,6 +90,7 @@ class Crew(FlowTrackable, BaseModel):
manager_llm: The language model that will run manager agent.
manager_agent: Custom agent that will be used as manager.
memory: Whether the crew should use memory to store memories of it's execution.
memory_config: Configuration for the memory to be used for the crew.
cache: Whether the crew should use a cache to store the results of the tools execution.
function_calling_llm: The language model that will run the tool calling for all the agents.
process: The process flow that the crew will follow (e.g., sequential, hierarchical).
@@ -125,6 +116,7 @@ class Crew(FlowTrackable, BaseModel):
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
_user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr()
_external_memory: Optional[InstanceOf[ExternalMemory]] = PrivateAttr()
_train: Optional[bool] = PrivateAttr(default=False)
_train_iteration: Optional[int] = PrivateAttr()
@@ -136,7 +128,7 @@ class Crew(FlowTrackable, BaseModel):
default_factory=TaskOutputStorageHandler
)
name: Optional[str] = Field(default="crew")
name: Optional[str] = Field(default=None)
cache: bool = Field(default=True)
tasks: List[Task] = Field(default_factory=list)
agents: List[BaseAgent] = Field(default_factory=list)
@@ -146,6 +138,10 @@ class Crew(FlowTrackable, BaseModel):
default=False,
description="Whether the crew should use memory to store memories of it's execution",
)
memory_config: Optional[Dict[str, Any]] = Field(
default=None,
description="Configuration for the memory to be used for the crew.",
)
short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(
default=None,
description="An Instance of the ShortTermMemory to be used by the Crew",
@@ -158,6 +154,10 @@ class Crew(FlowTrackable, BaseModel):
default=None,
description="An Instance of the EntityMemory to be used by the Crew",
)
user_memory: Optional[InstanceOf[UserMemory]] = Field(
default=None,
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
)
external_memory: Optional[InstanceOf[ExternalMemory]] = Field(
default=None,
description="An Instance of the ExternalMemory to be used by the Crew",
@@ -244,14 +244,6 @@ class Crew(FlowTrackable, BaseModel):
default_factory=SecurityConfig,
description="Security configuration for the crew, including fingerprinting.",
)
token_usage: Optional[UsageMetrics] = Field(
default=None,
description="Metrics for the LLM usage during all tasks execution.",
)
tracing: Optional[bool] = Field(
default=False,
description="Whether to enable tracing for the crew.",
)
@field_validator("id", mode="before")
@classmethod
@@ -283,9 +275,6 @@ class Crew(FlowTrackable, BaseModel):
self._cache_handler = CacheHandler()
event_listener = EventListener()
if is_tracing_enabled() or self.tracing:
trace_listener = TraceCollectionListener(tracing=self.tracing)
trace_listener.setup_listeners(crewai_event_bus)
event_listener.verbose = self.verbose
event_listener.formatter.verbose = self.verbose
self._logger = Logger(verbose=self.verbose)
@@ -297,6 +286,20 @@ class Crew(FlowTrackable, BaseModel):
return self
def _initialize_user_memory(self):
if (
self.memory_config
and "user_memory" in self.memory_config
and self.memory_config.get("provider") == "mem0"
): # Check for user_memory in config
user_memory_config = self.memory_config["user_memory"]
if isinstance(
user_memory_config, dict
): # Check if it's a configuration dict
self._user_memory = UserMemory(crew=self)
else:
raise TypeError("user_memory must be a configuration dictionary")
def _initialize_default_memories(self):
self._long_term_memory = self._long_term_memory or LongTermMemory()
self._short_term_memory = self._short_term_memory or ShortTermMemory(
@@ -319,8 +322,12 @@ class Crew(FlowTrackable, BaseModel):
self._short_term_memory = self.short_term_memory
self._entity_memory = self.entity_memory
# UserMemory is gonna to be deprecated in the future, but we have to initialize a default value for now
self._user_memory = None
if self.memory:
self._initialize_default_memories()
self._initialize_user_memory()
return self
@@ -563,7 +570,7 @@ class Crew(FlowTrackable, BaseModel):
crewai_event_bus.emit(
self,
CrewTrainStartedEvent(
crew_name=self.name,
crew_name=self.name or "crew",
n_iterations=n_iterations,
filename=filename,
inputs=inputs,
@@ -590,7 +597,7 @@ class Crew(FlowTrackable, BaseModel):
crewai_event_bus.emit(
self,
CrewTrainCompletedEvent(
crew_name=self.name,
crew_name=self.name or "crew",
n_iterations=n_iterations,
filename=filename,
),
@@ -598,7 +605,7 @@ class Crew(FlowTrackable, BaseModel):
except Exception as e:
crewai_event_bus.emit(
self,
CrewTrainFailedEvent(error=str(e), crew_name=self.name),
CrewTrainFailedEvent(error=str(e), crew_name=self.name or "crew"),
)
self._logger.log("error", f"Training failed: {e}", color="red")
CrewTrainingHandler(TRAINING_DATA_FILE).clear()
@@ -609,11 +616,6 @@ class Crew(FlowTrackable, BaseModel):
self,
inputs: Optional[Dict[str, Any]] = None,
) -> CrewOutput:
ctx = baggage.set_baggage(
"crew_context", CrewContext(id=str(self.id), key=self.key)
)
token = attach(ctx)
try:
for before_callback in self.before_kickoff_callbacks:
if inputs is None:
@@ -622,7 +624,7 @@ class Crew(FlowTrackable, BaseModel):
crewai_event_bus.emit(
self,
CrewKickoffStartedEvent(crew_name=self.name, inputs=inputs),
CrewKickoffStartedEvent(crew_name=self.name or "crew", inputs=inputs),
)
# Starts the crew to work on its assigned tasks.
@@ -671,11 +673,9 @@ class Crew(FlowTrackable, BaseModel):
except Exception as e:
crewai_event_bus.emit(
self,
CrewKickoffFailedEvent(error=str(e), crew_name=self.name),
CrewKickoffFailedEvent(error=str(e), crew_name=self.name or "crew"),
)
raise
finally:
detach(token)
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
"""Executes the Crew's workflow for each input in the list and aggregates results."""
@@ -1061,13 +1061,11 @@ class Crew(FlowTrackable, BaseModel):
final_string_output = final_task_output.raw
self._finish_execution(final_string_output)
self.token_usage = self.calculate_usage_metrics()
token_usage = self.calculate_usage_metrics()
crewai_event_bus.emit(
self,
CrewKickoffCompletedEvent(
crew_name=self.name,
output=final_task_output,
total_tokens=self.token_usage.total_tokens,
crew_name=self.name or "crew", output=final_task_output
),
)
return CrewOutput(
@@ -1075,7 +1073,7 @@ class Crew(FlowTrackable, BaseModel):
pydantic=final_task_output.pydantic,
json_dict=final_task_output.json_dict,
tasks_output=task_outputs,
token_usage=self.token_usage,
token_usage=token_usage,
)
def _process_async_tasks(
@@ -1244,6 +1242,8 @@ class Crew(FlowTrackable, BaseModel):
copied_data["entity_memory"] = self.entity_memory.model_copy(deep=True)
if self.external_memory:
copied_data["external_memory"] = self.external_memory.model_copy(deep=True)
if self.user_memory:
copied_data["user_memory"] = self.user_memory.model_copy(deep=True)
copied_data.pop("agents", None)
copied_data.pop("tasks", None)
@@ -1312,14 +1312,13 @@ class Crew(FlowTrackable, BaseModel):
crewai_event_bus.emit(
self,
CrewTestStartedEvent(
crew_name=self.name,
crew_name=self.name or "crew",
n_iterations=n_iterations,
eval_llm=llm_instance,
inputs=inputs,
),
)
test_crew = self.copy()
evaluator = CrewEvaluator(test_crew, llm_instance)
for i in range(1, n_iterations + 1):
@@ -1331,13 +1330,13 @@ class Crew(FlowTrackable, BaseModel):
crewai_event_bus.emit(
self,
CrewTestCompletedEvent(
crew_name=self.name,
crew_name=self.name or "crew",
),
)
except Exception as e:
crewai_event_bus.emit(
self,
CrewTestFailedEvent(error=str(e), crew_name=self.name),
CrewTestFailedEvent(error=str(e), crew_name=self.name or "crew"),
)
raise

View File

@@ -1,40 +0,0 @@
from crewai.experimental.evaluation import (
BaseEvaluator,
EvaluationScore,
MetricCategory,
AgentEvaluationResult,
SemanticQualityEvaluator,
GoalAlignmentEvaluator,
ReasoningEfficiencyEvaluator,
ToolSelectionEvaluator,
ParameterExtractionEvaluator,
ToolInvocationEvaluator,
EvaluationTraceCallback,
create_evaluation_callbacks,
AgentEvaluator,
create_default_evaluator,
ExperimentRunner,
ExperimentResults,
ExperimentResult,
)
__all__ = [
"BaseEvaluator",
"EvaluationScore",
"MetricCategory",
"AgentEvaluationResult",
"SemanticQualityEvaluator",
"GoalAlignmentEvaluator",
"ReasoningEfficiencyEvaluator",
"ToolSelectionEvaluator",
"ParameterExtractionEvaluator",
"ToolInvocationEvaluator",
"EvaluationTraceCallback",
"create_evaluation_callbacks",
"AgentEvaluator",
"create_default_evaluator",
"ExperimentRunner",
"ExperimentResults",
"ExperimentResult"
]

View File

@@ -1,51 +0,0 @@
from crewai.experimental.evaluation.base_evaluator import (
BaseEvaluator,
EvaluationScore,
MetricCategory,
AgentEvaluationResult
)
from crewai.experimental.evaluation.metrics import (
SemanticQualityEvaluator,
GoalAlignmentEvaluator,
ReasoningEfficiencyEvaluator,
ToolSelectionEvaluator,
ParameterExtractionEvaluator,
ToolInvocationEvaluator
)
from crewai.experimental.evaluation.evaluation_listener import (
EvaluationTraceCallback,
create_evaluation_callbacks
)
from crewai.experimental.evaluation.agent_evaluator import (
AgentEvaluator,
create_default_evaluator
)
from crewai.experimental.evaluation.experiment import (
ExperimentRunner,
ExperimentResults,
ExperimentResult
)
__all__ = [
"BaseEvaluator",
"EvaluationScore",
"MetricCategory",
"AgentEvaluationResult",
"SemanticQualityEvaluator",
"GoalAlignmentEvaluator",
"ReasoningEfficiencyEvaluator",
"ToolSelectionEvaluator",
"ParameterExtractionEvaluator",
"ToolInvocationEvaluator",
"EvaluationTraceCallback",
"create_evaluation_callbacks",
"AgentEvaluator",
"create_default_evaluator",
"ExperimentRunner",
"ExperimentResults",
"ExperimentResult"
]

View File

@@ -1,245 +0,0 @@
import threading
from typing import Any
from crewai.experimental.evaluation.base_evaluator import AgentEvaluationResult, AggregationStrategy
from crewai.agent import Agent
from crewai.task import Task
from crewai.experimental.evaluation.evaluation_display import EvaluationDisplayFormatter
from crewai.utilities.events.agent_events import AgentEvaluationStartedEvent, AgentEvaluationCompletedEvent, AgentEvaluationFailedEvent
from crewai.experimental.evaluation import BaseEvaluator, create_evaluation_callbacks
from collections.abc import Sequence
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
from crewai.utilities.events.task_events import TaskCompletedEvent
from crewai.utilities.events.agent_events import LiteAgentExecutionCompletedEvent
from crewai.experimental.evaluation.base_evaluator import AgentAggregatedEvaluationResult, EvaluationScore, MetricCategory
class ExecutionState:
def __init__(self):
self.traces = {}
self.current_agent_id: str | None = None
self.current_task_id: str | None = None
self.iteration = 1
self.iterations_results = {}
self.agent_evaluators = {}
class AgentEvaluator:
def __init__(
self,
agents: list[Agent],
evaluators: Sequence[BaseEvaluator] | None = None,
):
self.agents: list[Agent] = agents
self.evaluators: Sequence[BaseEvaluator] | None = evaluators
self.callback = create_evaluation_callbacks()
self.console_formatter = ConsoleFormatter()
self.display_formatter = EvaluationDisplayFormatter()
self._thread_local: threading.local = threading.local()
for agent in self.agents:
self._execution_state.agent_evaluators[str(agent.id)] = self.evaluators
self._subscribe_to_events()
@property
def _execution_state(self) -> ExecutionState:
if not hasattr(self._thread_local, 'execution_state'):
self._thread_local.execution_state = ExecutionState()
return self._thread_local.execution_state
def _subscribe_to_events(self) -> None:
from typing import cast
crewai_event_bus.register_handler(TaskCompletedEvent, cast(Any, self._handle_task_completed))
crewai_event_bus.register_handler(LiteAgentExecutionCompletedEvent, cast(Any, self._handle_lite_agent_completed))
def _handle_task_completed(self, source: Any, event: TaskCompletedEvent) -> None:
assert event.task is not None
agent = event.task.agent
if agent and str(getattr(agent, 'id', 'unknown')) in self._execution_state.agent_evaluators:
self.emit_evaluation_started_event(agent_role=agent.role, agent_id=str(agent.id), task_id=str(event.task.id))
state = ExecutionState()
state.current_agent_id = str(agent.id)
state.current_task_id = str(event.task.id)
assert state.current_agent_id is not None and state.current_task_id is not None
trace = self.callback.get_trace(state.current_agent_id, state.current_task_id)
if not trace:
return
result = self.evaluate(
agent=agent,
task=event.task,
execution_trace=trace,
final_output=event.output,
state=state
)
current_iteration = self._execution_state.iteration
if current_iteration not in self._execution_state.iterations_results:
self._execution_state.iterations_results[current_iteration] = {}
if agent.role not in self._execution_state.iterations_results[current_iteration]:
self._execution_state.iterations_results[current_iteration][agent.role] = []
self._execution_state.iterations_results[current_iteration][agent.role].append(result)
def _handle_lite_agent_completed(self, source: object, event: LiteAgentExecutionCompletedEvent) -> None:
agent_info = event.agent_info
agent_id = str(agent_info["id"])
if agent_id in self._execution_state.agent_evaluators:
state = ExecutionState()
state.current_agent_id = agent_id
state.current_task_id = "lite_task"
target_agent = None
for agent in self.agents:
if str(agent.id) == agent_id:
target_agent = agent
break
if not target_agent:
return
assert state.current_agent_id is not None and state.current_task_id is not None
trace = self.callback.get_trace(state.current_agent_id, state.current_task_id)
if not trace:
return
result = self.evaluate(
agent=target_agent,
execution_trace=trace,
final_output=event.output,
state=state
)
current_iteration = self._execution_state.iteration
if current_iteration not in self._execution_state.iterations_results:
self._execution_state.iterations_results[current_iteration] = {}
agent_role = target_agent.role
if agent_role not in self._execution_state.iterations_results[current_iteration]:
self._execution_state.iterations_results[current_iteration][agent_role] = []
self._execution_state.iterations_results[current_iteration][agent_role].append(result)
def set_iteration(self, iteration: int) -> None:
self._execution_state.iteration = iteration
def reset_iterations_results(self) -> None:
self._execution_state.iterations_results = {}
def get_evaluation_results(self) -> dict[str, list[AgentEvaluationResult]]:
if self._execution_state.iterations_results and self._execution_state.iteration in self._execution_state.iterations_results:
return self._execution_state.iterations_results[self._execution_state.iteration]
return {}
def display_results_with_iterations(self) -> None:
self.display_formatter.display_summary_results(self._execution_state.iterations_results)
def get_agent_evaluation(self, strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE, include_evaluation_feedback: bool = True) -> dict[str, AgentAggregatedEvaluationResult]:
agent_results = {}
with crewai_event_bus.scoped_handlers():
task_results = self.get_evaluation_results()
for agent_role, results in task_results.items():
if not results:
continue
agent_id = results[0].agent_id
aggregated_result = self.display_formatter._aggregate_agent_results(
agent_id=agent_id,
agent_role=agent_role,
results=results,
strategy=strategy
)
agent_results[agent_role] = aggregated_result
if self._execution_state.iterations_results and self._execution_state.iteration == max(self._execution_state.iterations_results.keys(), default=0):
self.display_results_with_iterations()
if include_evaluation_feedback:
self.display_evaluation_with_feedback()
return agent_results
def display_evaluation_with_feedback(self) -> None:
self.display_formatter.display_evaluation_with_feedback(self._execution_state.iterations_results)
def evaluate(
self,
agent: Agent,
execution_trace: dict[str, Any],
final_output: Any,
state: ExecutionState,
task: Task | None = None,
) -> AgentEvaluationResult:
result = AgentEvaluationResult(
agent_id=state.current_agent_id or str(agent.id),
task_id=state.current_task_id or (str(task.id) if task else "unknown_task")
)
assert self.evaluators is not None
task_id = str(task.id) if task else None
for evaluator in self.evaluators:
try:
self.emit_evaluation_started_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id)
score = evaluator.evaluate(
agent=agent,
task=task,
execution_trace=execution_trace,
final_output=final_output
)
result.metrics[evaluator.metric_category] = score
self.emit_evaluation_completed_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id, metric_category=evaluator.metric_category, score=score)
except Exception as e:
self.emit_evaluation_failed_event(agent_role=agent.role, agent_id=str(agent.id), task_id=task_id, error=str(e))
self.console_formatter.print(f"Error in {evaluator.metric_category.value} evaluator: {str(e)}")
return result
def emit_evaluation_started_event(self, agent_role: str, agent_id: str, task_id: str | None = None):
crewai_event_bus.emit(
self,
AgentEvaluationStartedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration)
)
def emit_evaluation_completed_event(self, agent_role: str, agent_id: str, task_id: str | None = None, metric_category: MetricCategory | None = None, score: EvaluationScore | None = None):
crewai_event_bus.emit(
self,
AgentEvaluationCompletedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration, metric_category=metric_category, score=score)
)
def emit_evaluation_failed_event(self, agent_role: str, agent_id: str, error: str, task_id: str | None = None):
crewai_event_bus.emit(
self,
AgentEvaluationFailedEvent(agent_role=agent_role, agent_id=agent_id, task_id=task_id, iteration=self._execution_state.iteration, error=error)
)
def create_default_evaluator(agents: list[Agent], llm: None = None):
from crewai.experimental.evaluation import (
GoalAlignmentEvaluator,
SemanticQualityEvaluator,
ToolSelectionEvaluator,
ParameterExtractionEvaluator,
ToolInvocationEvaluator,
ReasoningEfficiencyEvaluator
)
evaluators = [
GoalAlignmentEvaluator(llm=llm),
SemanticQualityEvaluator(llm=llm),
ToolSelectionEvaluator(llm=llm),
ParameterExtractionEvaluator(llm=llm),
ToolInvocationEvaluator(llm=llm),
ReasoningEfficiencyEvaluator(llm=llm),
]
return AgentEvaluator(evaluators=evaluators, agents=agents)

View File

@@ -1,125 +0,0 @@
import abc
import enum
from enum import Enum
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.task import Task
from crewai.llm import BaseLLM
from crewai.utilities.llm_utils import create_llm
class MetricCategory(enum.Enum):
GOAL_ALIGNMENT = "goal_alignment"
SEMANTIC_QUALITY = "semantic_quality"
REASONING_EFFICIENCY = "reasoning_efficiency"
TOOL_SELECTION = "tool_selection"
PARAMETER_EXTRACTION = "parameter_extraction"
TOOL_INVOCATION = "tool_invocation"
def title(self):
return self.value.replace('_', ' ').title()
class EvaluationScore(BaseModel):
score: float | None = Field(
default=5.0,
description="Numeric score from 0-10 where 0 is worst and 10 is best, None if not applicable",
ge=0.0,
le=10.0
)
feedback: str = Field(
default="",
description="Detailed feedback explaining the evaluation score"
)
raw_response: str | None = Field(
default=None,
description="Raw response from the evaluator (e.g., LLM)"
)
def __str__(self) -> str:
if self.score is None:
return f"Score: N/A - {self.feedback}"
return f"Score: {self.score:.1f}/10 - {self.feedback}"
class BaseEvaluator(abc.ABC):
def __init__(self, llm: BaseLLM | None = None):
self.llm: BaseLLM | None = create_llm(llm)
@property
@abc.abstractmethod
def metric_category(self) -> MetricCategory:
pass
@abc.abstractmethod
def evaluate(
self,
agent: Agent,
execution_trace: Dict[str, Any],
final_output: Any,
task: Task | None = None,
) -> EvaluationScore:
pass
class AgentEvaluationResult(BaseModel):
agent_id: str = Field(description="ID of the evaluated agent")
task_id: str = Field(description="ID of the task that was executed")
metrics: Dict[MetricCategory, EvaluationScore] = Field(
default_factory=dict,
description="Evaluation scores for each metric category"
)
class AggregationStrategy(Enum):
SIMPLE_AVERAGE = "simple_average" # Equal weight to all tasks
WEIGHTED_BY_COMPLEXITY = "weighted_by_complexity" # Weight by task complexity
BEST_PERFORMANCE = "best_performance" # Use best scores across tasks
WORST_PERFORMANCE = "worst_performance" # Use worst scores across tasks
class AgentAggregatedEvaluationResult(BaseModel):
agent_id: str = Field(
default="",
description="ID of the agent"
)
agent_role: str = Field(
default="",
description="Role of the agent"
)
task_count: int = Field(
default=0,
description="Number of tasks included in this aggregation"
)
aggregation_strategy: AggregationStrategy = Field(
default=AggregationStrategy.SIMPLE_AVERAGE,
description="Strategy used for aggregation"
)
metrics: Dict[MetricCategory, EvaluationScore] = Field(
default_factory=dict,
description="Aggregated metrics across all tasks"
)
task_results: List[str] = Field(
default_factory=list,
description="IDs of tasks included in this aggregation"
)
overall_score: Optional[float] = Field(
default=None,
description="Overall score for this agent"
)
def __str__(self) -> str:
result = f"Agent Evaluation: {self.agent_role}\n"
result += f"Strategy: {self.aggregation_strategy.value}\n"
result += f"Tasks evaluated: {self.task_count}\n"
for category, score in self.metrics.items():
result += f"\n\n- {category.value.upper()}: {score.score}/10\n"
if score.feedback:
detailed_feedback = "\n ".join(score.feedback.split('\n'))
result += f" {detailed_feedback}\n"
return result

View File

@@ -1,333 +0,0 @@
from collections import defaultdict
from typing import Dict, Any, List
from rich.table import Table
from rich.box import HEAVY_EDGE, ROUNDED
from collections.abc import Sequence
from crewai.experimental.evaluation.base_evaluator import AgentAggregatedEvaluationResult, AggregationStrategy, AgentEvaluationResult, MetricCategory
from crewai.experimental.evaluation import EvaluationScore
from crewai.utilities.events.utils.console_formatter import ConsoleFormatter
from crewai.utilities.llm_utils import create_llm
class EvaluationDisplayFormatter:
def __init__(self):
self.console_formatter = ConsoleFormatter()
def display_evaluation_with_feedback(self, iterations_results: Dict[int, Dict[str, List[Any]]]):
if not iterations_results:
self.console_formatter.print("[yellow]No evaluation results to display[/yellow]")
return
all_agent_roles: set[str] = set()
for iter_results in iterations_results.values():
all_agent_roles.update(iter_results.keys())
for agent_role in sorted(all_agent_roles):
self.console_formatter.print(f"\n[bold cyan]Agent: {agent_role}[/bold cyan]")
for iter_num, results in sorted(iterations_results.items()):
if agent_role not in results or not results[agent_role]:
continue
agent_results = results[agent_role]
agent_id = agent_results[0].agent_id
aggregated_result = self._aggregate_agent_results(
agent_id=agent_id,
agent_role=agent_role,
results=agent_results,
)
self.console_formatter.print(f"\n[bold]Iteration {iter_num}[/bold]")
table = Table(box=ROUNDED)
table.add_column("Metric", style="cyan")
table.add_column("Score (1-10)", justify="center")
table.add_column("Feedback", style="green")
if aggregated_result.metrics:
for metric, evaluation_score in aggregated_result.metrics.items():
score = evaluation_score.score
if isinstance(score, (int, float)):
if score >= 8.0:
score_text = f"[green]{score:.1f}[/green]"
elif score >= 6.0:
score_text = f"[cyan]{score:.1f}[/cyan]"
elif score >= 4.0:
score_text = f"[yellow]{score:.1f}[/yellow]"
else:
score_text = f"[red]{score:.1f}[/red]"
else:
score_text = "[dim]N/A[/dim]"
table.add_section()
table.add_row(
metric.title(),
score_text,
evaluation_score.feedback or ""
)
if aggregated_result.overall_score is not None:
overall_score = aggregated_result.overall_score
if overall_score >= 8.0:
overall_color = "green"
elif overall_score >= 6.0:
overall_color = "cyan"
elif overall_score >= 4.0:
overall_color = "yellow"
else:
overall_color = "red"
table.add_section()
table.add_row(
"Overall Score",
f"[{overall_color}]{overall_score:.1f}[/]",
"Overall agent evaluation score"
)
self.console_formatter.print(table)
def display_summary_results(self, iterations_results: Dict[int, Dict[str, List[AgentAggregatedEvaluationResult]]]):
if not iterations_results:
self.console_formatter.print("[yellow]No evaluation results to display[/yellow]")
return
self.console_formatter.print("\n")
table = Table(title="Agent Performance Scores \n (1-10 Higher is better)", box=HEAVY_EDGE)
table.add_column("Agent/Metric", style="cyan")
for iter_num in sorted(iterations_results.keys()):
run_label = f"Run {iter_num}"
table.add_column(run_label, justify="center")
table.add_column("Avg. Total", justify="center")
all_agent_roles: set[str] = set()
for results in iterations_results.values():
all_agent_roles.update(results.keys())
for agent_role in sorted(all_agent_roles):
agent_scores_by_iteration = {}
agent_metrics_by_iteration = {}
for iter_num, results in sorted(iterations_results.items()):
if agent_role not in results or not results[agent_role]:
continue
agent_results = results[agent_role]
agent_id = agent_results[0].agent_id
aggregated_result = self._aggregate_agent_results(
agent_id=agent_id,
agent_role=agent_role,
results=agent_results,
strategy=AggregationStrategy.SIMPLE_AVERAGE
)
valid_scores = [score.score for score in aggregated_result.metrics.values()
if score.score is not None]
if valid_scores:
avg_score = sum(valid_scores) / len(valid_scores)
agent_scores_by_iteration[iter_num] = avg_score
agent_metrics_by_iteration[iter_num] = aggregated_result.metrics
if not agent_scores_by_iteration:
continue
avg_across_iterations = sum(agent_scores_by_iteration.values()) / len(agent_scores_by_iteration)
row = [f"[bold]{agent_role}[/bold]"]
for iter_num in sorted(iterations_results.keys()):
if iter_num in agent_scores_by_iteration:
score = agent_scores_by_iteration[iter_num]
if score >= 8.0:
color = "green"
elif score >= 6.0:
color = "cyan"
elif score >= 4.0:
color = "yellow"
else:
color = "red"
row.append(f"[bold {color}]{score:.1f}[/]")
else:
row.append("-")
if avg_across_iterations >= 8.0:
color = "green"
elif avg_across_iterations >= 6.0:
color = "cyan"
elif avg_across_iterations >= 4.0:
color = "yellow"
else:
color = "red"
row.append(f"[bold {color}]{avg_across_iterations:.1f}[/]")
table.add_row(*row)
all_metrics: set[Any] = set()
for metrics in agent_metrics_by_iteration.values():
all_metrics.update(metrics.keys())
for metric in sorted(all_metrics, key=lambda x: x.value):
metric_scores = []
row = [f" - {metric.title()}"]
for iter_num in sorted(iterations_results.keys()):
if (iter_num in agent_metrics_by_iteration and
metric in agent_metrics_by_iteration[iter_num]):
metric_score = agent_metrics_by_iteration[iter_num][metric].score
if metric_score is not None:
metric_scores.append(metric_score)
if metric_score >= 8.0:
color = "green"
elif metric_score >= 6.0:
color = "cyan"
elif metric_score >= 4.0:
color = "yellow"
else:
color = "red"
row.append(f"[{color}]{metric_score:.1f}[/]")
else:
row.append("[dim]N/A[/dim]")
else:
row.append("-")
if metric_scores:
avg = sum(metric_scores) / len(metric_scores)
if avg >= 8.0:
color = "green"
elif avg >= 6.0:
color = "cyan"
elif avg >= 4.0:
color = "yellow"
else:
color = "red"
row.append(f"[{color}]{avg:.1f}[/]")
else:
row.append("-")
table.add_row(*row)
table.add_row(*[""] * (len(sorted(iterations_results.keys())) + 2))
self.console_formatter.print(table)
self.console_formatter.print("\n")
def _aggregate_agent_results(
self,
agent_id: str,
agent_role: str,
results: Sequence[AgentEvaluationResult],
strategy: AggregationStrategy = AggregationStrategy.SIMPLE_AVERAGE,
) -> AgentAggregatedEvaluationResult:
metrics_by_category: dict[MetricCategory, list[EvaluationScore]] = defaultdict(list)
for result in results:
for metric_name, evaluation_score in result.metrics.items():
metrics_by_category[metric_name].append(evaluation_score)
aggregated_metrics: dict[MetricCategory, EvaluationScore] = {}
for category, scores in metrics_by_category.items():
valid_scores = [s.score for s in scores if s.score is not None]
avg_score = sum(valid_scores) / len(valid_scores) if valid_scores else None
feedbacks = [s.feedback for s in scores if s.feedback]
feedback_summary = None
if feedbacks:
if len(feedbacks) > 1:
feedback_summary = self._summarize_feedbacks(
agent_role=agent_role,
metric=category.title(),
feedbacks=feedbacks,
scores=[s.score for s in scores],
strategy=strategy
)
else:
feedback_summary = feedbacks[0]
aggregated_metrics[category] = EvaluationScore(
score=avg_score,
feedback=feedback_summary
)
overall_score = None
if aggregated_metrics:
valid_scores = [m.score for m in aggregated_metrics.values() if m.score is not None]
if valid_scores:
overall_score = sum(valid_scores) / len(valid_scores)
return AgentAggregatedEvaluationResult(
agent_id=agent_id,
agent_role=agent_role,
metrics=aggregated_metrics,
overall_score=overall_score,
task_count=len(results),
aggregation_strategy=strategy
)
def _summarize_feedbacks(
self,
agent_role: str,
metric: str,
feedbacks: List[str],
scores: List[float | None],
strategy: AggregationStrategy
) -> str:
if len(feedbacks) <= 2 and all(len(fb) < 200 for fb in feedbacks):
return "\n\n".join([f"Feedback {i+1}: {fb}" for i, fb in enumerate(feedbacks)])
try:
llm = create_llm()
formatted_feedbacks = []
for i, (feedback, score) in enumerate(zip(feedbacks, scores)):
if len(feedback) > 500:
feedback = feedback[:500] + "..."
score_text = f"{score:.1f}" if score is not None else "N/A"
formatted_feedbacks.append(f"Feedback #{i+1} (Score: {score_text}):\n{feedback}")
all_feedbacks = "\n\n" + "\n\n---\n\n".join(formatted_feedbacks)
strategy_guidance = ""
if strategy == AggregationStrategy.BEST_PERFORMANCE:
strategy_guidance = "Focus on the highest-scoring aspects and strengths demonstrated."
elif strategy == AggregationStrategy.WORST_PERFORMANCE:
strategy_guidance = "Focus on areas that need improvement and common issues across tasks."
else:
strategy_guidance = "Provide a balanced analysis of strengths and weaknesses across all tasks."
prompt = [
{"role": "system", "content": f"""You are an expert evaluator creating a comprehensive summary of agent performance feedback.
Your job is to synthesize multiple feedback points about the same metric across different tasks.
Create a concise, insightful summary that captures the key patterns and themes from all feedback.
{strategy_guidance}
Your summary should be:
1. Specific and concrete (not vague or general)
2. Focused on actionable insights
3. Highlighting patterns across tasks
4. 150-250 words in length
The summary should be directly usable as final feedback for the agent's performance on this metric."""},
{"role": "user", "content": f"""I need a synthesized summary of the following feedback for:
Agent Role: {agent_role}
Metric: {metric.title()}
{all_feedbacks}
"""}
]
assert llm is not None
response = llm.call(prompt)
return response
except Exception:
return "Synthesized from multiple tasks: " + "\n\n".join([f"- {fb[:500]}..." for fb in feedbacks])

View File

@@ -1,234 +0,0 @@
from datetime import datetime
from typing import Any, Dict, Optional
from collections.abc import Sequence
from crewai.agent import Agent
from crewai.task import Task
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.crewai_event_bus import CrewAIEventsBus
from crewai.utilities.events.agent_events import (
AgentExecutionStartedEvent,
AgentExecutionCompletedEvent,
LiteAgentExecutionStartedEvent,
LiteAgentExecutionCompletedEvent
)
from crewai.utilities.events.tool_usage_events import (
ToolUsageFinishedEvent,
ToolUsageErrorEvent,
ToolExecutionErrorEvent,
ToolSelectionErrorEvent,
ToolValidateInputErrorEvent
)
from crewai.utilities.events.llm_events import (
LLMCallStartedEvent,
LLMCallCompletedEvent
)
class EvaluationTraceCallback(BaseEventListener):
"""Event listener for collecting execution traces for evaluation.
This listener attaches to the event bus to collect detailed information
about the execution process, including agent steps, tool uses, knowledge
retrievals, and final output - all for use in agent evaluation.
"""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance._initialized = False
return cls._instance
def __init__(self):
if not hasattr(self, "_initialized") or not self._initialized:
super().__init__()
self.traces = {}
self.current_agent_id = None
self.current_task_id = None
self._initialized = True
def setup_listeners(self, event_bus: CrewAIEventsBus):
@event_bus.on(AgentExecutionStartedEvent)
def on_agent_started(source, event: AgentExecutionStartedEvent):
self.on_agent_start(event.agent, event.task)
@event_bus.on(LiteAgentExecutionStartedEvent)
def on_lite_agent_started(source, event: LiteAgentExecutionStartedEvent):
self.on_lite_agent_start(event.agent_info)
@event_bus.on(AgentExecutionCompletedEvent)
def on_agent_completed(source, event: AgentExecutionCompletedEvent):
self.on_agent_finish(event.agent, event.task, event.output)
@event_bus.on(LiteAgentExecutionCompletedEvent)
def on_lite_agent_completed(source, event: LiteAgentExecutionCompletedEvent):
self.on_lite_agent_finish(event.output)
@event_bus.on(ToolUsageFinishedEvent)
def on_tool_completed(source, event: ToolUsageFinishedEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.output, success=True)
@event_bus.on(ToolUsageErrorEvent)
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error,
success=False, error_type="usage_error")
@event_bus.on(ToolExecutionErrorEvent)
def on_tool_execution_error(source, event: ToolExecutionErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error,
success=False, error_type="execution_error")
@event_bus.on(ToolSelectionErrorEvent)
def on_tool_selection_error(source, event: ToolSelectionErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error,
success=False, error_type="selection_error")
@event_bus.on(ToolValidateInputErrorEvent)
def on_tool_validate_input_error(source, event: ToolValidateInputErrorEvent):
self.on_tool_use(event.tool_name, event.tool_args, event.error,
success=False, error_type="validation_error")
@event_bus.on(LLMCallStartedEvent)
def on_llm_call_started(source, event: LLMCallStartedEvent):
self.on_llm_call_start(event.messages, event.tools)
@event_bus.on(LLMCallCompletedEvent)
def on_llm_call_completed(source, event: LLMCallCompletedEvent):
self.on_llm_call_end(event.messages, event.response)
def on_lite_agent_start(self, agent_info: dict[str, Any]):
self.current_agent_id = agent_info['id']
self.current_task_id = "lite_task"
trace_key = f"{self.current_agent_id}_{self.current_task_id}"
self._init_trace(
trace_key=trace_key,
agent_id=self.current_agent_id,
task_id=self.current_task_id,
tool_uses=[],
llm_calls=[],
start_time=datetime.now(),
final_output=None
)
def _init_trace(self, trace_key: str, **kwargs: Any):
self.traces[trace_key] = kwargs
def on_agent_start(self, agent: Agent, task: Task):
self.current_agent_id = agent.id
self.current_task_id = task.id
trace_key = f"{agent.id}_{task.id}"
self._init_trace(
trace_key=trace_key,
agent_id=agent.id,
task_id=task.id,
tool_uses=[],
llm_calls=[],
start_time=datetime.now(),
final_output=None
)
def on_agent_finish(self, agent: Agent, task: Task, output: Any):
trace_key = f"{agent.id}_{task.id}"
if trace_key in self.traces:
self.traces[trace_key]["final_output"] = output
self.traces[trace_key]["end_time"] = datetime.now()
self._reset_current()
def _reset_current(self):
self.current_agent_id = None
self.current_task_id = None
def on_lite_agent_finish(self, output: Any):
trace_key = f"{self.current_agent_id}_lite_task"
if trace_key in self.traces:
self.traces[trace_key]["final_output"] = output
self.traces[trace_key]["end_time"] = datetime.now()
self._reset_current()
def on_tool_use(self, tool_name: str, tool_args: dict[str, Any] | str, result: Any,
success: bool = True, error_type: str | None = None):
if not self.current_agent_id or not self.current_task_id:
return
trace_key = f"{self.current_agent_id}_{self.current_task_id}"
if trace_key in self.traces:
tool_use = {
"tool": tool_name,
"args": tool_args,
"result": result,
"success": success,
"timestamp": datetime.now()
}
# Add error information if applicable
if not success and error_type:
tool_use["error"] = True
tool_use["error_type"] = error_type
self.traces[trace_key]["tool_uses"].append(tool_use)
def on_llm_call_start(self, messages: str | Sequence[dict[str, Any]] | None, tools: Sequence[dict[str, Any]] | None = None):
if not self.current_agent_id or not self.current_task_id:
return
trace_key = f"{self.current_agent_id}_{self.current_task_id}"
if trace_key not in self.traces:
return
self.current_llm_call = {
"messages": messages,
"tools": tools,
"start_time": datetime.now(),
"response": None,
"end_time": None
}
def on_llm_call_end(self, messages: str | list[dict[str, Any]] | None, response: Any):
if not self.current_agent_id or not self.current_task_id:
return
trace_key = f"{self.current_agent_id}_{self.current_task_id}"
if trace_key not in self.traces:
return
total_tokens = 0
if hasattr(response, "usage") and hasattr(response.usage, "total_tokens"):
total_tokens = response.usage.total_tokens
current_time = datetime.now()
start_time = None
if hasattr(self, "current_llm_call") and self.current_llm_call:
start_time = self.current_llm_call.get("start_time")
if not start_time:
start_time = current_time
llm_call = {
"messages": messages,
"response": response,
"start_time": start_time,
"end_time": current_time,
"total_tokens": total_tokens
}
self.traces[trace_key]["llm_calls"].append(llm_call)
if hasattr(self, "current_llm_call"):
self.current_llm_call = {}
def get_trace(self, agent_id: str, task_id: str) -> Optional[Dict[str, Any]]:
trace_key = f"{agent_id}_{task_id}"
return self.traces.get(trace_key)
def create_evaluation_callbacks() -> EvaluationTraceCallback:
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
callback = EvaluationTraceCallback()
callback.setup_listeners(crewai_event_bus)
return callback

View File

@@ -1,8 +0,0 @@
from crewai.experimental.evaluation.experiment.runner import ExperimentRunner
from crewai.experimental.evaluation.experiment.result import ExperimentResults, ExperimentResult
__all__ = [
"ExperimentRunner",
"ExperimentResults",
"ExperimentResult"
]

View File

@@ -1,122 +0,0 @@
import json
import os
from datetime import datetime, timezone
from typing import Any
from pydantic import BaseModel
class ExperimentResult(BaseModel):
identifier: str
inputs: dict[str, Any]
score: int | dict[str, int | float]
expected_score: int | dict[str, int | float]
passed: bool
agent_evaluations: dict[str, Any] | None = None
class ExperimentResults:
def __init__(self, results: list[ExperimentResult], metadata: dict[str, Any] | None = None):
self.results = results
self.metadata = metadata or {}
self.timestamp = datetime.now(timezone.utc)
from crewai.experimental.evaluation.experiment.result_display import ExperimentResultsDisplay
self.display = ExperimentResultsDisplay()
def to_json(self, filepath: str | None = None) -> dict[str, Any]:
data = {
"timestamp": self.timestamp.isoformat(),
"metadata": self.metadata,
"results": [r.model_dump(exclude={"agent_evaluations"}) for r in self.results]
}
if filepath:
with open(filepath, 'w') as f:
json.dump(data, f, indent=2)
self.display.console.print(f"[green]Results saved to {filepath}[/green]")
return data
def compare_with_baseline(self, baseline_filepath: str, save_current: bool = True, print_summary: bool = False) -> dict[str, Any]:
baseline_runs = []
if os.path.exists(baseline_filepath) and os.path.getsize(baseline_filepath) > 0:
try:
with open(baseline_filepath, 'r') as f:
baseline_data = json.load(f)
if isinstance(baseline_data, dict) and "timestamp" in baseline_data:
baseline_runs = [baseline_data]
elif isinstance(baseline_data, list):
baseline_runs = baseline_data
except (json.JSONDecodeError, FileNotFoundError) as e:
self.display.console.print(f"[yellow]Warning: Could not load baseline file: {str(e)}[/yellow]")
if not baseline_runs:
if save_current:
current_data = self.to_json()
with open(baseline_filepath, 'w') as f:
json.dump([current_data], f, indent=2)
self.display.console.print(f"[green]Saved current results as new baseline to {baseline_filepath}[/green]")
return {"is_baseline": True, "changes": {}}
baseline_runs.sort(key=lambda x: x.get("timestamp", ""), reverse=True)
latest_run = baseline_runs[0]
comparison = self._compare_with_run(latest_run)
if print_summary:
self.display.comparison_summary(comparison, latest_run["timestamp"])
if save_current:
current_data = self.to_json()
baseline_runs.append(current_data)
with open(baseline_filepath, 'w') as f:
json.dump(baseline_runs, f, indent=2)
self.display.console.print(f"[green]Added current results to baseline file {baseline_filepath}[/green]")
return comparison
def _compare_with_run(self, baseline_run: dict[str, Any]) -> dict[str, Any]:
baseline_results = baseline_run.get("results", [])
baseline_lookup = {}
for result in baseline_results:
test_identifier = result.get("identifier")
if test_identifier:
baseline_lookup[test_identifier] = result
improved = []
regressed = []
unchanged = []
new_tests = []
for result in self.results:
test_identifier = result.identifier
if not test_identifier or test_identifier not in baseline_lookup:
new_tests.append(test_identifier)
continue
baseline_result = baseline_lookup[test_identifier]
baseline_passed = baseline_result.get("passed", False)
if result.passed and not baseline_passed:
improved.append(test_identifier)
elif not result.passed and baseline_passed:
regressed.append(test_identifier)
else:
unchanged.append(test_identifier)
missing_tests = []
current_test_identifiers = {result.identifier for result in self.results}
for result in baseline_results:
test_identifier = result.get("identifier")
if test_identifier and test_identifier not in current_test_identifiers:
missing_tests.append(test_identifier)
return {
"improved": improved,
"regressed": regressed,
"unchanged": unchanged,
"new_tests": new_tests,
"missing_tests": missing_tests,
"total_compared": len(improved) + len(regressed) + len(unchanged),
"baseline_timestamp": baseline_run.get("timestamp", "unknown")
}

View File

@@ -1,70 +0,0 @@
from typing import Dict, Any
from rich.console import Console
from rich.table import Table
from rich.panel import Panel
from crewai.experimental.evaluation.experiment.result import ExperimentResults
class ExperimentResultsDisplay:
def __init__(self):
self.console = Console()
def summary(self, experiment_results: ExperimentResults):
total = len(experiment_results.results)
passed = sum(1 for r in experiment_results.results if r.passed)
table = Table(title="Experiment Summary")
table.add_column("Metric", style="cyan")
table.add_column("Value", style="green")
table.add_row("Total Test Cases", str(total))
table.add_row("Passed", str(passed))
table.add_row("Failed", str(total - passed))
table.add_row("Success Rate", f"{(passed / total * 100):.1f}%" if total > 0 else "N/A")
self.console.print(table)
def comparison_summary(self, comparison: Dict[str, Any], baseline_timestamp: str):
self.console.print(Panel(f"[bold]Comparison with baseline run from {baseline_timestamp}[/bold]",
expand=False))
table = Table(title="Results Comparison")
table.add_column("Metric", style="cyan")
table.add_column("Count", style="white")
table.add_column("Details", style="dim")
improved = comparison.get("improved", [])
if improved:
details = ", ".join([f"{test_identifier}" for test_identifier in improved[:3]])
if len(improved) > 3:
details += f" and {len(improved) - 3} more"
table.add_row("✅ Improved", str(len(improved)), details)
else:
table.add_row("✅ Improved", "0", "")
regressed = comparison.get("regressed", [])
if regressed:
details = ", ".join([f"{test_identifier}" for test_identifier in regressed[:3]])
if len(regressed) > 3:
details += f" and {len(regressed) - 3} more"
table.add_row("❌ Regressed", str(len(regressed)), details, style="red")
else:
table.add_row("❌ Regressed", "0", "")
unchanged = comparison.get("unchanged", [])
table.add_row("⏺ Unchanged", str(len(unchanged)), "")
new_tests = comparison.get("new_tests", [])
if new_tests:
details = ", ".join(new_tests[:3])
if len(new_tests) > 3:
details += f" and {len(new_tests) - 3} more"
table.add_row(" New Tests", str(len(new_tests)), details)
missing_tests = comparison.get("missing_tests", [])
if missing_tests:
details = ", ".join(missing_tests[:3])
if len(missing_tests) > 3:
details += f" and {len(missing_tests) - 3} more"
table.add_row(" Missing Tests", str(len(missing_tests)), details)
self.console.print(table)

View File

@@ -1,125 +0,0 @@
from collections import defaultdict
from hashlib import md5
from typing import Any
from crewai import Crew, Agent
from crewai.experimental.evaluation import AgentEvaluator, create_default_evaluator
from crewai.experimental.evaluation.experiment.result_display import ExperimentResultsDisplay
from crewai.experimental.evaluation.experiment.result import ExperimentResults, ExperimentResult
from crewai.experimental.evaluation.evaluation_display import AgentAggregatedEvaluationResult
class ExperimentRunner:
def __init__(self, dataset: list[dict[str, Any]]):
self.dataset = dataset or []
self.evaluator: AgentEvaluator | None = None
self.display = ExperimentResultsDisplay()
def run(self, crew: Crew | None = None, agents: list[Agent] | None = None, print_summary: bool = False) -> ExperimentResults:
if crew and not agents:
agents = crew.agents
assert agents is not None
self.evaluator = create_default_evaluator(agents=agents)
results = []
for test_case in self.dataset:
self.evaluator.reset_iterations_results()
result = self._run_test_case(test_case=test_case, crew=crew, agents=agents)
results.append(result)
experiment_results = ExperimentResults(results)
if print_summary:
self.display.summary(experiment_results)
return experiment_results
def _run_test_case(self, test_case: dict[str, Any], agents: list[Agent], crew: Crew | None = None) -> ExperimentResult:
inputs = test_case["inputs"]
expected_score = test_case["expected_score"]
identifier = test_case.get("identifier") or md5(str(test_case).encode(), usedforsecurity=False).hexdigest()
try:
self.display.console.print(f"[dim]Running crew with input: {str(inputs)[:50]}...[/dim]")
self.display.console.print("\n")
if crew:
crew.kickoff(inputs=inputs)
else:
for agent in agents:
agent.kickoff(**inputs)
assert self.evaluator is not None
agent_evaluations = self.evaluator.get_agent_evaluation()
actual_score = self._extract_scores(agent_evaluations)
passed = self._assert_scores(expected_score, actual_score)
return ExperimentResult(
identifier=identifier,
inputs=inputs,
score=actual_score,
expected_score=expected_score,
passed=passed,
agent_evaluations=agent_evaluations
)
except Exception as e:
self.display.console.print(f"[red]Error running test case: {str(e)}[/red]")
return ExperimentResult(
identifier=identifier,
inputs=inputs,
score=0,
expected_score=expected_score,
passed=False
)
def _extract_scores(self, agent_evaluations: dict[str, AgentAggregatedEvaluationResult]) -> float | dict[str, float]:
all_scores: dict[str, list[float]] = defaultdict(list)
for evaluation in agent_evaluations.values():
for metric_name, score in evaluation.metrics.items():
if score.score is not None:
all_scores[metric_name.value].append(score.score)
avg_scores = {m: sum(s)/len(s) for m, s in all_scores.items()}
if len(avg_scores) == 1:
return list(avg_scores.values())[0]
return avg_scores
def _assert_scores(self, expected: float | dict[str, float],
actual: float | dict[str, float]) -> bool:
"""
Compare expected and actual scores, and return whether the test case passed.
The rules for comparison are as follows:
- If both expected and actual scores are single numbers, the actual score must be >= expected.
- If expected is a single number and actual is a dict, compare against the average of actual values.
- If expected is a dict and actual is a single number, actual must be >= all expected values.
- If both are dicts, actual must have matching keys with values >= expected values.
"""
if isinstance(expected, (int, float)) and isinstance(actual, (int, float)):
return actual >= expected
if isinstance(expected, dict) and isinstance(actual, (int, float)):
return all(actual >= exp_score for exp_score in expected.values())
if isinstance(expected, (int, float)) and isinstance(actual, dict):
if not actual:
return False
avg_score = sum(actual.values()) / len(actual)
return avg_score >= expected
if isinstance(expected, dict) and isinstance(actual, dict):
if not expected:
return True
matching_keys = set(expected.keys()) & set(actual.keys())
if not matching_keys:
return False
# All matching keys must have actual >= expected
return all(actual[key] >= expected[key] for key in matching_keys)
return False

View File

@@ -1,30 +0,0 @@
"""Robust JSON parsing utilities for evaluation responses."""
import json
import re
from typing import Any
def extract_json_from_llm_response(text: str) -> dict[str, Any]:
try:
return json.loads(text)
except json.JSONDecodeError:
pass
json_patterns = [
# Standard markdown code blocks with json
r'```json\s*([\s\S]*?)\s*```',
# Code blocks without language specifier
r'```\s*([\s\S]*?)\s*```',
# Inline code with JSON
r'`([{\\[].*[}\]])`',
]
for pattern in json_patterns:
matches = re.findall(pattern, text, re.IGNORECASE | re.DOTALL)
for match in matches:
try:
return json.loads(match.strip())
except json.JSONDecodeError:
continue
raise ValueError("No valid JSON found in the response")

View File

@@ -1,26 +0,0 @@
from crewai.experimental.evaluation.metrics.reasoning_metrics import (
ReasoningEfficiencyEvaluator
)
from crewai.experimental.evaluation.metrics.tools_metrics import (
ToolSelectionEvaluator,
ParameterExtractionEvaluator,
ToolInvocationEvaluator
)
from crewai.experimental.evaluation.metrics.goal_metrics import (
GoalAlignmentEvaluator
)
from crewai.experimental.evaluation.metrics.semantic_quality_metrics import (
SemanticQualityEvaluator
)
__all__ = [
"ReasoningEfficiencyEvaluator",
"ToolSelectionEvaluator",
"ParameterExtractionEvaluator",
"ToolInvocationEvaluator",
"GoalAlignmentEvaluator",
"SemanticQualityEvaluator"
]

View File

@@ -1,69 +0,0 @@
from typing import Any, Dict
from crewai.agent import Agent
from crewai.task import Task
from crewai.experimental.evaluation.base_evaluator import BaseEvaluator, EvaluationScore, MetricCategory
from crewai.experimental.evaluation.json_parser import extract_json_from_llm_response
class GoalAlignmentEvaluator(BaseEvaluator):
@property
def metric_category(self) -> MetricCategory:
return MetricCategory.GOAL_ALIGNMENT
def evaluate(
self,
agent: Agent,
execution_trace: Dict[str, Any],
final_output: Any,
task: Task | None = None,
) -> EvaluationScore:
task_context = ""
if task is not None:
task_context = f"Task description: {task.description}\nExpected output: {task.expected_output}\n"
prompt = [
{"role": "system", "content": """You are an expert evaluator assessing how well an AI agent's output aligns with its assigned task goal.
Score the agent's goal alignment on a scale from 0-10 where:
- 0: Complete misalignment, agent did not understand or attempt the task goal
- 5: Partial alignment, agent attempted the task but missed key requirements
- 10: Perfect alignment, agent fully satisfied all task requirements
Consider:
1. Did the agent correctly interpret the task goal?
2. Did the final output directly address the requirements?
3. Did the agent focus on relevant aspects of the task?
4. Did the agent provide all requested information or deliverables?
Return your evaluation as JSON with fields 'score' (number) and 'feedback' (string).
"""},
{"role": "user", "content": f"""
Agent role: {agent.role}
Agent goal: {agent.goal}
{task_context}
Agent's final output:
{final_output}
Evaluate how well the agent's output aligns with the assigned task goal.
"""}
]
assert self.llm is not None
response = self.llm.call(prompt)
try:
evaluation_data: dict[str, Any] = extract_json_from_llm_response(response)
assert evaluation_data is not None
return EvaluationScore(
score=evaluation_data.get("score", 0),
feedback=evaluation_data.get("feedback", response),
raw_response=response
)
except Exception:
return EvaluationScore(
score=None,
feedback=f"Failed to parse evaluation. Raw response: {response}",
raw_response=response
)

View File

@@ -1,361 +0,0 @@
"""Agent reasoning efficiency evaluators.
This module provides evaluator implementations for:
- Reasoning efficiency
- Loop detection
- Thinking-to-action ratio
"""
import logging
import re
from enum import Enum
from typing import Any, Dict, List, Tuple
import numpy as np
from collections.abc import Sequence
from crewai.agent import Agent
from crewai.task import Task
from crewai.experimental.evaluation.base_evaluator import BaseEvaluator, EvaluationScore, MetricCategory
from crewai.experimental.evaluation.json_parser import extract_json_from_llm_response
from crewai.tasks.task_output import TaskOutput
class ReasoningPatternType(Enum):
EFFICIENT = "efficient" # Good reasoning flow
LOOP = "loop" # Agent is stuck in a loop
VERBOSE = "verbose" # Agent is unnecessarily verbose
INDECISIVE = "indecisive" # Agent struggles to make decisions
SCATTERED = "scattered" # Agent jumps between topics without focus
class ReasoningEfficiencyEvaluator(BaseEvaluator):
@property
def metric_category(self) -> MetricCategory:
return MetricCategory.REASONING_EFFICIENCY
def evaluate(
self,
agent: Agent,
execution_trace: Dict[str, Any],
final_output: TaskOutput | str,
task: Task | None = None,
) -> EvaluationScore:
task_context = ""
if task is not None:
task_context = f"Task description: {task.description}\nExpected output: {task.expected_output}\n"
llm_calls = execution_trace.get("llm_calls", [])
if not llm_calls or len(llm_calls) < 2:
return EvaluationScore(
score=None,
feedback="Insufficient LLM calls to evaluate reasoning efficiency."
)
total_calls = len(llm_calls)
total_tokens = sum(call.get("total_tokens", 0) for call in llm_calls)
avg_tokens_per_call = total_tokens / total_calls if total_calls > 0 else 0
time_intervals = []
has_reliable_timing = True
for i in range(1, len(llm_calls)):
start_time = llm_calls[i-1].get("end_time")
end_time = llm_calls[i].get("start_time")
if start_time and end_time and start_time != end_time:
try:
interval = end_time - start_time
time_intervals.append(interval.total_seconds() if hasattr(interval, 'total_seconds') else 0)
except Exception:
has_reliable_timing = False
else:
has_reliable_timing = False
loop_detected, loop_details = self._detect_loops(llm_calls)
pattern_analysis = self._analyze_reasoning_patterns(llm_calls)
efficiency_metrics = {
"total_llm_calls": total_calls,
"total_tokens": total_tokens,
"avg_tokens_per_call": avg_tokens_per_call,
"reasoning_pattern": pattern_analysis["primary_pattern"].value,
"loops_detected": loop_detected,
}
if has_reliable_timing and time_intervals:
efficiency_metrics["avg_time_between_calls"] = np.mean(time_intervals)
loop_info = f"Detected {len(loop_details)} potential reasoning loops." if loop_detected else "No significant reasoning loops detected."
call_samples = self._get_call_samples(llm_calls)
final_output = final_output.raw if isinstance(final_output, TaskOutput) else final_output
prompt = [
{"role": "system", "content": """You are an expert evaluator assessing the reasoning efficiency of an AI agent's thought process.
Evaluate the agent's reasoning efficiency across these five key subcategories:
1. Focus (0-10): How well the agent stays on topic and avoids unnecessary tangents
2. Progression (0-10): How effectively the agent builds on previous thoughts rather than repeating or circling
3. Decision Quality (0-10): How decisively and appropriately the agent makes decisions
4. Conciseness (0-10): How efficiently the agent communicates without unnecessary verbosity
5. Loop Avoidance (0-10): How well the agent avoids getting stuck in repetitive thinking patterns
For each subcategory, provide a score from 0-10 where:
- 0: Completely inefficient
- 5: Moderately efficient
- 10: Highly efficient
The overall score should be a weighted average of these subcategories.
Return your evaluation as JSON with the following structure:
{
"overall_score": float,
"scores": {
"focus": float,
"progression": float,
"decision_quality": float,
"conciseness": float,
"loop_avoidance": float
},
"feedback": string (general feedback about overall reasoning efficiency),
"optimization_suggestions": string (concrete suggestions for improving reasoning efficiency),
"detected_patterns": string (describe any inefficient reasoning patterns you observe)
}"""},
{"role": "user", "content": f"""
Agent role: {agent.role}
{task_context}
Reasoning efficiency metrics:
- Total LLM calls: {efficiency_metrics["total_llm_calls"]}
- Average tokens per call: {efficiency_metrics["avg_tokens_per_call"]:.1f}
- Primary reasoning pattern: {efficiency_metrics["reasoning_pattern"]}
- {loop_info}
{"- Average time between calls: {:.2f} seconds".format(efficiency_metrics.get("avg_time_between_calls", 0)) if "avg_time_between_calls" in efficiency_metrics else ""}
Sample of agent reasoning flow (chronological sequence):
{call_samples}
Agent's final output:
{final_output[:500]}... (truncated)
Evaluate the reasoning efficiency of this agent based on these interaction patterns.
Identify any inefficient reasoning patterns and provide specific suggestions for optimization.
"""}
]
assert self.llm is not None
response = self.llm.call(prompt)
try:
evaluation_data = extract_json_from_llm_response(response)
scores = evaluation_data.get("scores", {})
focus = scores.get("focus", 5.0)
progression = scores.get("progression", 5.0)
decision_quality = scores.get("decision_quality", 5.0)
conciseness = scores.get("conciseness", 5.0)
loop_avoidance = scores.get("loop_avoidance", 5.0)
overall_score = evaluation_data.get("overall_score", evaluation_data.get("score", 5.0))
feedback = evaluation_data.get("feedback", "No detailed feedback provided.")
optimization_suggestions = evaluation_data.get("optimization_suggestions", "No specific suggestions provided.")
detailed_feedback = "Reasoning Efficiency Evaluation:\n"
detailed_feedback += f"• Focus: {focus}/10 - Staying on topic without tangents\n"
detailed_feedback += f"• Progression: {progression}/10 - Building on previous thinking\n"
detailed_feedback += f"• Decision Quality: {decision_quality}/10 - Making appropriate decisions\n"
detailed_feedback += f"• Conciseness: {conciseness}/10 - Communicating efficiently\n"
detailed_feedback += f"• Loop Avoidance: {loop_avoidance}/10 - Avoiding repetitive patterns\n\n"
detailed_feedback += f"Feedback:\n{feedback}\n\n"
detailed_feedback += f"Optimization Suggestions:\n{optimization_suggestions}"
return EvaluationScore(
score=float(overall_score),
feedback=detailed_feedback,
raw_response=response
)
except Exception as e:
logging.warning(f"Failed to parse reasoning efficiency evaluation: {e}")
return EvaluationScore(
score=None,
feedback=f"Failed to parse reasoning efficiency evaluation. Raw response: {response[:200]}...",
raw_response=response
)
def _detect_loops(self, llm_calls: List[Dict]) -> Tuple[bool, List[Dict]]:
loop_details = []
messages = []
for call in llm_calls:
content = call.get("response", "")
if isinstance(content, str):
messages.append(content)
elif isinstance(content, list) and len(content) > 0:
# Handle message list format
for msg in content:
if isinstance(msg, dict) and "content" in msg:
messages.append(msg["content"])
# Simple n-gram based similarity detection
# For a more robust implementation, consider using embedding-based similarity
for i in range(len(messages) - 2):
for j in range(i + 1, len(messages) - 1):
# Check for repeated patterns (simplistic approach)
# A more sophisticated approach would use semantic similarity
similarity = self._calculate_text_similarity(messages[i], messages[j])
if similarity > 0.7: # Arbitrary threshold
loop_details.append({
"first_occurrence": i,
"second_occurrence": j,
"similarity": similarity,
"snippet": messages[i][:100] + "..."
})
return len(loop_details) > 0, loop_details
def _calculate_text_similarity(self, text1: str, text2: str) -> float:
text1 = re.sub(r'\s+', ' ', text1.lower()).strip()
text2 = re.sub(r'\s+', ' ', text2.lower()).strip()
# Simple Jaccard similarity on word sets
words1 = set(text1.split())
words2 = set(text2.split())
intersection = len(words1.intersection(words2))
union = len(words1.union(words2))
return intersection / union if union > 0 else 0.0
def _analyze_reasoning_patterns(self, llm_calls: List[Dict]) -> Dict[str, Any]:
call_lengths = []
response_times = []
for call in llm_calls:
content = call.get("response", "")
if isinstance(content, str):
call_lengths.append(len(content))
elif isinstance(content, list) and len(content) > 0:
# Handle message list format
total_length = 0
for msg in content:
if isinstance(msg, dict) and "content" in msg:
total_length += len(msg["content"])
call_lengths.append(total_length)
start_time = call.get("start_time")
end_time = call.get("end_time")
if start_time and end_time:
try:
response_times.append(end_time - start_time)
except Exception:
pass
avg_length = np.mean(call_lengths) if call_lengths else 0
std_length = np.std(call_lengths) if call_lengths else 0
length_trend = self._calculate_trend(call_lengths)
primary_pattern = ReasoningPatternType.EFFICIENT
details = "Agent demonstrates efficient reasoning patterns."
loop_score = self._calculate_loop_likelihood(call_lengths, response_times)
if loop_score > 0.7:
primary_pattern = ReasoningPatternType.LOOP
details = "Agent appears to be stuck in repetitive thinking patterns."
elif avg_length > 1000 and std_length / avg_length < 0.3:
primary_pattern = ReasoningPatternType.VERBOSE
details = "Agent is consistently verbose across interactions."
elif len(llm_calls) > 10 and length_trend > 0.5:
primary_pattern = ReasoningPatternType.INDECISIVE
details = "Agent shows signs of indecisiveness with increasing message lengths."
elif std_length / avg_length > 0.8:
primary_pattern = ReasoningPatternType.SCATTERED
details = "Agent shows inconsistent reasoning flow with highly variable responses."
return {
"primary_pattern": primary_pattern,
"details": details,
"metrics": {
"avg_length": avg_length,
"std_length": std_length,
"length_trend": length_trend,
"loop_score": loop_score
}
}
def _calculate_trend(self, values: Sequence[float | int]) -> float:
if not values or len(values) < 2:
return 0.0
try:
x = np.arange(len(values))
y = np.array(values)
# Simple linear regression
slope = np.polyfit(x, y, 1)[0]
# Normalize slope to -1 to 1 range
max_possible_slope = max(values) - min(values)
if max_possible_slope > 0:
normalized_slope = slope / max_possible_slope
return max(min(normalized_slope, 1.0), -1.0)
return 0.0
except Exception:
return 0.0
def _calculate_loop_likelihood(self, call_lengths: Sequence[float], response_times: Sequence[float]) -> float:
if not call_lengths or len(call_lengths) < 3:
return 0.0
indicators = []
if len(call_lengths) >= 4:
repeated_lengths = 0
for i in range(len(call_lengths) - 2):
ratio = call_lengths[i] / call_lengths[i + 2] if call_lengths[i + 2] > 0 else 0
if 0.85 <= ratio <= 1.15:
repeated_lengths += 1
length_repetition_score = repeated_lengths / (len(call_lengths) - 2)
indicators.append(length_repetition_score)
if response_times and len(response_times) >= 3:
try:
std_time = np.std(response_times)
mean_time = np.mean(response_times)
if mean_time > 0:
time_consistency = 1.0 - (std_time / mean_time)
indicators.append(max(0, time_consistency - 0.3) * 1.5)
except Exception:
pass
return np.mean(indicators) if indicators else 0.0
def _get_call_samples(self, llm_calls: List[Dict]) -> str:
samples = []
if len(llm_calls) <= 6:
sample_indices = list(range(len(llm_calls)))
else:
sample_indices = [0, 1, len(llm_calls) // 2 - 1, len(llm_calls) // 2,
len(llm_calls) - 2, len(llm_calls) - 1]
for idx in sample_indices:
call = llm_calls[idx]
content = call.get("response", "")
if isinstance(content, str):
sample = content
elif isinstance(content, list) and len(content) > 0:
sample_parts = []
for msg in content:
if isinstance(msg, dict) and "content" in msg:
sample_parts.append(msg["content"])
sample = "\n".join(sample_parts)
else:
sample = str(content)
truncated = sample[:200] + "..." if len(sample) > 200 else sample
samples.append(f"Call {idx + 1}:\n{truncated}\n")
return "\n".join(samples)

View File

@@ -1,68 +0,0 @@
from typing import Any, Dict
from crewai.agent import Agent
from crewai.task import Task
from crewai.experimental.evaluation.base_evaluator import BaseEvaluator, EvaluationScore, MetricCategory
from crewai.experimental.evaluation.json_parser import extract_json_from_llm_response
class SemanticQualityEvaluator(BaseEvaluator):
@property
def metric_category(self) -> MetricCategory:
return MetricCategory.SEMANTIC_QUALITY
def evaluate(
self,
agent: Agent,
execution_trace: Dict[str, Any],
final_output: Any,
task: Task | None = None,
) -> EvaluationScore:
task_context = ""
if task is not None:
task_context = f"Task description: {task.description}"
prompt = [
{"role": "system", "content": """You are an expert evaluator assessing the semantic quality of an AI agent's output.
Score the semantic quality on a scale from 0-10 where:
- 0: Completely incoherent, confusing, or logically flawed output
- 5: Moderately clear and logical output with some issues
- 10: Exceptionally clear, coherent, and logically sound output
Consider:
1. Is the output well-structured and organized?
2. Is the reasoning logical and well-supported?
3. Is the language clear, precise, and appropriate for the task?
4. Are claims supported by evidence when appropriate?
5. Is the output free from contradictions and logical fallacies?
Return your evaluation as JSON with fields 'score' (number) and 'feedback' (string).
"""},
{"role": "user", "content": f"""
Agent role: {agent.role}
{task_context}
Agent's final output:
{final_output}
Evaluate the semantic quality and reasoning of this output.
"""}
]
assert self.llm is not None
response = self.llm.call(prompt)
try:
evaluation_data: dict[str, Any] = extract_json_from_llm_response(response)
assert evaluation_data is not None
return EvaluationScore(
score=float(evaluation_data["score"]) if evaluation_data.get("score") is not None else None,
feedback=evaluation_data.get("feedback", response),
raw_response=response
)
except Exception:
return EvaluationScore(
score=None,
feedback=f"Failed to parse evaluation. Raw response: {response}",
raw_response=response
)

View File

@@ -1,410 +0,0 @@
import json
from typing import Dict, Any
from crewai.experimental.evaluation.base_evaluator import BaseEvaluator, EvaluationScore, MetricCategory
from crewai.experimental.evaluation.json_parser import extract_json_from_llm_response
from crewai.agent import Agent
from crewai.task import Task
class ToolSelectionEvaluator(BaseEvaluator):
@property
def metric_category(self) -> MetricCategory:
return MetricCategory.TOOL_SELECTION
def evaluate(
self,
agent: Agent,
execution_trace: Dict[str, Any],
final_output: str,
task: Task | None = None,
) -> EvaluationScore:
task_context = ""
if task is not None:
task_context = f"Task description: {task.description}"
tool_uses = execution_trace.get("tool_uses", [])
tool_count = len(tool_uses)
unique_tool_types = set([tool.get("tool", "Unknown tool") for tool in tool_uses])
if tool_count == 0:
if not agent.tools:
return EvaluationScore(
score=None,
feedback="Agent had no tools available to use."
)
else:
return EvaluationScore(
score=None,
feedback="Agent had tools available but didn't use any."
)
available_tools_info = ""
if agent.tools:
for tool in agent.tools:
available_tools_info += f"- {tool.name}: {tool.description}\n"
else:
available_tools_info = "No tools available"
tool_types_summary = "Tools selected by the agent:\n"
for tool_type in sorted(unique_tool_types):
tool_types_summary += f"- {tool_type}\n"
prompt = [
{"role": "system", "content": """You are an expert evaluator assessing if an AI agent selected the most appropriate tools for a given task.
You must evaluate based on these 2 criteria:
1. Relevance (0-10): Were the tools chosen directly aligned with the task's goals?
2. Coverage (0-10): Did the agent select ALL appropriate tools from the AVAILABLE tools?
IMPORTANT:
- ONLY consider tools that are listed as available to the agent
- DO NOT suggest tools that aren't in the 'Available tools' list
- DO NOT evaluate the quality or accuracy of tool outputs/results
- DO NOT evaluate how many times each tool was used
- DO NOT evaluate how the agent used the parameters
- DO NOT evaluate whether the agent interpreted the task correctly
Focus ONLY on whether the correct CATEGORIES of tools were selected from what was available.
Return your evaluation as JSON with these fields:
- scores: {"relevance": number, "coverage": number}
- overall_score: number (average of all scores, 0-10)
- feedback: string (focused ONLY on tool selection decisions from available tools)
- improvement_suggestions: string (ONLY suggest better selection from the AVAILABLE tools list, NOT new tools)
"""},
{"role": "user", "content": f"""
Agent role: {agent.role}
{task_context}
Available tools for this agent:
{available_tools_info}
{tool_types_summary}
Based ONLY on the task description and comparing the AVAILABLE tools with those that were selected (listed above), evaluate if the agent selected the appropriate tool types for this task.
IMPORTANT:
- ONLY evaluate selection from tools listed as available
- DO NOT suggest new tools that aren't in the available tools list
- DO NOT evaluate tool usage or results
"""}
]
assert self.llm is not None
response = self.llm.call(prompt)
try:
evaluation_data = extract_json_from_llm_response(response)
assert evaluation_data is not None
scores = evaluation_data.get("scores", {})
relevance = scores.get("relevance", 5.0)
coverage = scores.get("coverage", 5.0)
overall_score = float(evaluation_data.get("overall_score", 5.0))
feedback = "Tool Selection Evaluation:\n"
feedback += f"• Relevance: {relevance}/10 - Selection of appropriate tool types for the task\n"
feedback += f"• Coverage: {coverage}/10 - Selection of all necessary tool types\n"
if "improvement_suggestions" in evaluation_data:
feedback += f"Improvement Suggestions:\n{evaluation_data['improvement_suggestions']}"
else:
feedback += evaluation_data.get("feedback", "No detailed feedback available.")
return EvaluationScore(
score=overall_score,
feedback=feedback,
raw_response=response
)
except Exception as e:
return EvaluationScore(
score=None,
feedback=f"Error evaluating tool selection: {e}",
raw_response=response
)
class ParameterExtractionEvaluator(BaseEvaluator):
@property
def metric_category(self) -> MetricCategory:
return MetricCategory.PARAMETER_EXTRACTION
def evaluate(
self,
agent: Agent,
execution_trace: Dict[str, Any],
final_output: str,
task: Task | None = None,
) -> EvaluationScore:
task_context = ""
if task is not None:
task_context = f"Task description: {task.description}"
tool_uses = execution_trace.get("tool_uses", [])
tool_count = len(tool_uses)
if tool_count == 0:
return EvaluationScore(
score=None,
feedback="No tool usage detected. Cannot evaluate parameter extraction."
)
validation_errors = []
for tool_use in tool_uses:
if not tool_use.get("success", True) and tool_use.get("error_type") == "validation_error":
validation_errors.append({
"tool": tool_use.get("tool", "Unknown tool"),
"error": tool_use.get("result"),
"args": tool_use.get("args", {})
})
validation_error_rate = len(validation_errors) / tool_count if tool_count > 0 else 0
param_samples = []
for i, tool_use in enumerate(tool_uses[:5]):
tool_name = tool_use.get("tool", "Unknown tool")
tool_args = tool_use.get("args", {})
success = tool_use.get("success", True) and not tool_use.get("error", False)
error_type = tool_use.get("error_type", "") if not success else ""
is_validation_error = error_type == "validation_error"
sample = f"Tool use #{i+1} - {tool_name}:\n"
sample += f"- Parameters: {json.dumps(tool_args, indent=2)}\n"
sample += f"- Success: {'No' if not success else 'Yes'}"
if is_validation_error:
sample += " (PARAMETER VALIDATION ERROR)\n"
sample += f"- Error: {tool_use.get('result', 'Unknown error')}"
elif not success:
sample += f" (Other error: {error_type})\n"
param_samples.append(sample)
validation_errors_info = ""
if validation_errors:
validation_errors_info = f"\nParameter validation errors detected: {len(validation_errors)} ({validation_error_rate:.1%} of tool uses)\n"
for i, err in enumerate(validation_errors[:3]):
tool_name = err.get("tool", "Unknown tool")
error_msg = err.get("error", "Unknown error")
args = err.get("args", {})
validation_errors_info += f"\nValidation Error #{i+1}:\n- Tool: {tool_name}\n- Args: {json.dumps(args, indent=2)}\n- Error: {error_msg}"
if len(validation_errors) > 3:
validation_errors_info += f"\n...and {len(validation_errors) - 3} more validation errors."
param_samples_text = "\n\n".join(param_samples)
prompt = [
{"role": "system", "content": """You are an expert evaluator assessing how well an AI agent extracts and formats PARAMETER VALUES for tool calls.
Your job is to evaluate ONLY whether the agent used the correct parameter VALUES, not whether the right tools were selected or how the tools were invoked.
Evaluate parameter extraction based on these criteria:
1. Accuracy (0-10): Are parameter values correctly identified from the context/task?
2. Formatting (0-10): Are values formatted correctly for each tool's requirements?
3. Completeness (0-10): Are all required parameter values provided, with no missing information?
IMPORTANT: DO NOT evaluate:
- Whether the right tool was chosen (that's the ToolSelectionEvaluator's job)
- How the tools were structurally invoked (that's the ToolInvocationEvaluator's job)
- The quality of results from tools
Focus ONLY on the PARAMETER VALUES - whether they were correctly extracted from the context, properly formatted, and complete.
Validation errors are important signals that parameter values weren't properly extracted or formatted.
Return your evaluation as JSON with these fields:
- scores: {"accuracy": number, "formatting": number, "completeness": number}
- overall_score: number (average of all scores, 0-10)
- feedback: string (focused ONLY on parameter value extraction quality)
- improvement_suggestions: string (concrete suggestions for better parameter VALUE extraction)
"""},
{"role": "user", "content": f"""
Agent role: {agent.role}
{task_context}
Parameter extraction examples:
{param_samples_text}
{validation_errors_info}
Evaluate the quality of the agent's parameter extraction for this task.
"""}
]
assert self.llm is not None
response = self.llm.call(prompt)
try:
evaluation_data = extract_json_from_llm_response(response)
assert evaluation_data is not None
scores = evaluation_data.get("scores", {})
accuracy = scores.get("accuracy", 5.0)
formatting = scores.get("formatting", 5.0)
completeness = scores.get("completeness", 5.0)
overall_score = float(evaluation_data.get("overall_score", 5.0))
feedback = "Parameter Extraction Evaluation:\n"
feedback += f"• Accuracy: {accuracy}/10 - Correctly identifying required parameters\n"
feedback += f"• Formatting: {formatting}/10 - Properly formatting parameters for tools\n"
feedback += f"• Completeness: {completeness}/10 - Including all necessary information\n\n"
if "improvement_suggestions" in evaluation_data:
feedback += f"Improvement Suggestions:\n{evaluation_data['improvement_suggestions']}"
else:
feedback += evaluation_data.get("feedback", "No detailed feedback available.")
return EvaluationScore(
score=overall_score,
feedback=feedback,
raw_response=response
)
except Exception as e:
return EvaluationScore(
score=None,
feedback=f"Error evaluating parameter extraction: {e}",
raw_response=response
)
class ToolInvocationEvaluator(BaseEvaluator):
@property
def metric_category(self) -> MetricCategory:
return MetricCategory.TOOL_INVOCATION
def evaluate(
self,
agent: Agent,
execution_trace: Dict[str, Any],
final_output: str,
task: Task | None = None,
) -> EvaluationScore:
task_context = ""
if task is not None:
task_context = f"Task description: {task.description}"
tool_uses = execution_trace.get("tool_uses", [])
tool_errors = []
tool_count = len(tool_uses)
if tool_count == 0:
return EvaluationScore(
score=None,
feedback="No tool usage detected. Cannot evaluate tool invocation."
)
for tool_use in tool_uses:
if not tool_use.get("success", True) or tool_use.get("error", False):
error_info = {
"tool": tool_use.get("tool", "Unknown tool"),
"error": tool_use.get("result"),
"error_type": tool_use.get("error_type", "unknown_error")
}
tool_errors.append(error_info)
error_rate = len(tool_errors) / tool_count if tool_count > 0 else 0
error_types = {}
for error in tool_errors:
error_type = error.get("error_type", "unknown_error")
if error_type not in error_types:
error_types[error_type] = 0
error_types[error_type] += 1
invocation_samples = []
for i, tool_use in enumerate(tool_uses[:5]):
tool_name = tool_use.get("tool", "Unknown tool")
tool_args = tool_use.get("args", {})
success = tool_use.get("success", True) and not tool_use.get("error", False)
error_type = tool_use.get("error_type", "") if not success else ""
error_msg = tool_use.get("result", "No error") if not success else "No error"
sample = f"Tool invocation #{i+1}:\n"
sample += f"- Tool: {tool_name}\n"
sample += f"- Parameters: {json.dumps(tool_args, indent=2)}\n"
sample += f"- Success: {'No' if not success else 'Yes'}\n"
if not success:
sample += f"- Error type: {error_type}\n"
sample += f"- Error: {error_msg}"
invocation_samples.append(sample)
error_type_summary = ""
if error_types:
error_type_summary = "Error type breakdown:\n"
for error_type, count in error_types.items():
error_type_summary += f"- {error_type}: {count} occurrences ({(count/tool_count):.1%})\n"
invocation_samples_text = "\n\n".join(invocation_samples)
prompt = [
{"role": "system", "content": """You are an expert evaluator assessing how correctly an AI agent's tool invocations are STRUCTURED.
Your job is to evaluate ONLY the structural and syntactical aspects of how the agent called tools, NOT which tools were selected or what parameter values were used.
Evaluate the agent's tool invocation based on these criteria:
1. Structure (0-10): Does the tool call follow the expected syntax and format?
2. Error Handling (0-10): Does the agent handle tool errors appropriately?
3. Invocation Patterns (0-10): Are tool calls properly sequenced, batched, or managed?
Error types that indicate invocation issues:
- execution_error: The tool was called correctly but failed during execution
- usage_error: General errors in how the tool was used structurally
IMPORTANT: DO NOT evaluate:
- Whether the right tool was chosen (that's the ToolSelectionEvaluator's job)
- Whether the parameter values are correct (that's the ParameterExtractionEvaluator's job)
- The quality of results from tools
Focus ONLY on HOW tools were invoked - the structure, format, and handling of the invocation process.
Return your evaluation as JSON with these fields:
- scores: {"structure": number, "error_handling": number, "invocation_patterns": number}
- overall_score: number (average of all scores, 0-10)
- feedback: string (focused ONLY on structural aspects of tool invocation)
- improvement_suggestions: string (concrete suggestions for better structuring of tool calls)
"""},
{"role": "user", "content": f"""
Agent role: {agent.role}
{task_context}
Tool invocation examples:
{invocation_samples_text}
Tool error rate: {error_rate:.2%} ({len(tool_errors)} errors out of {tool_count} invocations)
{error_type_summary}
Evaluate the quality of the agent's tool invocation structure during this task.
"""}
]
assert self.llm is not None
response = self.llm.call(prompt)
try:
evaluation_data = extract_json_from_llm_response(response)
assert evaluation_data is not None
scores = evaluation_data.get("scores", {})
structure = scores.get("structure", 5.0)
error_handling = scores.get("error_handling", 5.0)
invocation_patterns = scores.get("invocation_patterns", 5.0)
overall_score = float(evaluation_data.get("overall_score", 5.0))
feedback = "Tool Invocation Evaluation:\n"
feedback += f"• Structure: {structure}/10 - Following proper syntax and format\n"
feedback += f"• Error Handling: {error_handling}/10 - Appropriately handling tool errors\n"
feedback += f"• Invocation Patterns: {invocation_patterns}/10 - Proper sequencing and management of calls\n\n"
if "improvement_suggestions" in evaluation_data:
feedback += f"Improvement Suggestions:\n{evaluation_data['improvement_suggestions']}"
else:
feedback += evaluation_data.get("feedback", "No detailed feedback available.")
return EvaluationScore(
score=overall_score,
feedback=feedback,
raw_response=response
)
except Exception as e:
return EvaluationScore(
score=None,
feedback=f"Error evaluating tool invocation: {e}",
raw_response=response
)

View File

@@ -1,52 +0,0 @@
import inspect
from typing_extensions import Any
import warnings
from crewai.experimental.evaluation.experiment import ExperimentResults, ExperimentRunner
from crewai import Crew, Agent
def assert_experiment_successfully(experiment_results: ExperimentResults, baseline_filepath: str | None = None) -> None:
failed_tests = [result for result in experiment_results.results if not result.passed]
if failed_tests:
detailed_failures: list[str] = []
for result in failed_tests:
expected = result.expected_score
actual = result.score
detailed_failures.append(f"- {result.identifier}: expected {expected}, got {actual}")
failure_details = "\n".join(detailed_failures)
raise AssertionError(f"The following test cases failed:\n{failure_details}")
baseline_filepath = baseline_filepath or _get_baseline_filepath_fallback()
comparison = experiment_results.compare_with_baseline(baseline_filepath=baseline_filepath)
assert_experiment_no_regression(comparison)
def assert_experiment_no_regression(comparison_result: dict[str, list[str]]) -> None:
regressed = comparison_result.get("regressed", [])
if regressed:
raise AssertionError(f"Regression detected! The following tests that previously passed now fail: {regressed}")
missing_tests = comparison_result.get("missing_tests", [])
if missing_tests:
warnings.warn(
f"Warning: {len(missing_tests)} tests from the baseline are missing in the current run: {missing_tests}",
UserWarning
)
def run_experiment(dataset: list[dict[str, Any]], crew: Crew | None = None, agents: list[Agent] | None = None, verbose: bool = False) -> ExperimentResults:
runner = ExperimentRunner(dataset=dataset)
return runner.run(agents=agents, crew=crew, print_summary=verbose)
def _get_baseline_filepath_fallback() -> str:
test_func_name = "experiment_fallback"
try:
current_frame = inspect.currentframe()
if current_frame is not None:
test_func_name = current_frame.f_back.f_back.f_code.co_name # type: ignore[union-attr]
except Exception:
...
return f"{test_func_name}_results.json"

View File

@@ -32,10 +32,6 @@ from crewai.utilities.events.flow_events import (
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
)
from crewai.utilities.events.listeners.tracing.trace_listener import (
TraceCollectionListener,
)
from crewai.utilities.events.listeners.tracing.utils import is_tracing_enabled
from crewai.utilities.printer import Printer
logger = logging.getLogger(__name__)
@@ -440,8 +436,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
_routers: Set[str] = set()
_router_paths: Dict[str, List[str]] = {}
initial_state: Union[Type[T], T, None] = None
name: Optional[str] = None
tracing: Optional[bool] = False
def __class_getitem__(cls: Type["Flow"], item: Type[T]) -> Type["Flow"]:
class _FlowGeneric(cls): # type: ignore
@@ -453,7 +447,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
def __init__(
self,
persistence: Optional[FlowPersistence] = None,
tracing: Optional[bool] = False,
**kwargs: Any,
) -> None:
"""Initialize a new Flow instance.
@@ -471,10 +464,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
# Initialize state with initial values
self._state = self._create_initial_state()
self.tracing = tracing
if is_tracing_enabled() or tracing:
trace_listener = TraceCollectionListener(tracing=tracing)
trace_listener.setup_listeners(crewai_event_bus)
# Apply any additional kwargs
if kwargs:
self._initialize_state(kwargs)
@@ -483,7 +473,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
self,
FlowCreatedEvent(
type="flow_created",
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
),
)
@@ -779,7 +769,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
self,
FlowStartedEvent(
type="flow_started",
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
inputs=inputs,
),
)
@@ -802,7 +792,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
self,
FlowFinishedEvent(
type="flow_finished",
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
result=final_output,
),
)
@@ -844,7 +834,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
MethodExecutionStartedEvent(
type="method_execution_started",
method_name=method_name,
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
params=dumped_params,
state=self._copy_state(),
),
@@ -866,7 +856,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
MethodExecutionFinishedEvent(
type="method_execution_finished",
method_name=method_name,
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
state=self._copy_state(),
result=result,
),
@@ -879,7 +869,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
MethodExecutionFailedEvent(
type="method_execution_failed",
method_name=method_name,
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
error=e,
),
)
@@ -1086,7 +1076,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
self,
FlowPlotEvent(
type="flow_plot",
flow_name=self.name or self.__class__.__name__,
flow_name=self.__class__.__name__,
),
)
plot_flow(self, filename)

View File

@@ -81,7 +81,7 @@ class SQLiteFlowPersistence(FlowPersistence):
"""
# Convert state_data to dict, handling both Pydantic and dict cases
if isinstance(state_data, BaseModel):
state_dict = state_data.model_dump()
state_dict = dict(state_data) # Use dict() for better type compatibility
elif isinstance(state_data, dict):
state_dict = state_data
else:

View File

@@ -0,0 +1,55 @@
from abc import ABC, abstractmethod
from typing import List
import numpy as np
class BaseEmbedder(ABC):
"""
Abstract base class for text embedding models
"""
@abstractmethod
def embed_chunks(self, chunks: List[str]) -> np.ndarray:
"""
Generate embeddings for a list of text chunks
Args:
chunks: List of text chunks to embed
Returns:
Array of embeddings
"""
pass
@abstractmethod
def embed_texts(self, texts: List[str]) -> np.ndarray:
"""
Generate embeddings for a list of texts
Args:
texts: List of texts to embed
Returns:
Array of embeddings
"""
pass
@abstractmethod
def embed_text(self, text: str) -> np.ndarray:
"""
Generate embedding for a single text
Args:
text: Text to embed
Returns:
Embedding array
"""
pass
@property
@abstractmethod
def dimension(self) -> int:
"""Get the dimension of the embeddings"""
pass

View File

@@ -13,12 +13,11 @@ from chromadb.api.types import OneOrMany
from chromadb.config import Settings
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
from crewai.rag.embeddings.configurator import EmbeddingConfigurator
from crewai.utilities import EmbeddingConfigurator
from crewai.utilities.chromadb import sanitize_collection_name
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
from crewai.utilities.logger import Logger
from crewai.utilities.paths import db_storage_path
from crewai.utilities.chromadb import create_persistent_client
@contextlib.contextmanager
@@ -85,11 +84,14 @@ class KnowledgeStorage(BaseKnowledgeStorage):
raise Exception("Collection not initialized")
def initialize_knowledge_storage(self):
self.app = create_persistent_client(
path=os.path.join(db_storage_path(), "knowledge"),
base_path = os.path.join(db_storage_path(), "knowledge")
chroma_client = chromadb.PersistentClient(
path=base_path,
settings=Settings(allow_reset=True),
)
self.app = chroma_client
try:
collection_name = (
f"knowledge_{self.collection_name}"
@@ -109,8 +111,9 @@ class KnowledgeStorage(BaseKnowledgeStorage):
def reset(self):
base_path = os.path.join(db_storage_path(), KNOWLEDGE_DIRECTORY)
if not self.app:
self.app = create_persistent_client(
path=base_path, settings=Settings(allow_reset=True)
self.app = chromadb.PersistentClient(
path=base_path,
settings=Settings(allow_reset=True),
)
self.app.reset()

View File

@@ -40,7 +40,7 @@ from crewai.agents.parser import (
OutputParserException,
)
from crewai.flow.flow_trackable import FlowTrackable
from crewai.llm import LLM, BaseLLM
from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.utilities import I18N
@@ -135,7 +135,7 @@ class LiteAgent(FlowTrackable, BaseModel):
role: str = Field(description="Role of the agent")
goal: str = Field(description="Goal of the agent")
backstory: str = Field(description="Backstory of the agent")
llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
default=None, description="Language model that will run the agent"
)
tools: List[BaseTool] = Field(
@@ -147,7 +147,7 @@ class LiteAgent(FlowTrackable, BaseModel):
default=15, description="Maximum number of iterations for tool usage"
)
max_execution_time: Optional[int] = Field(
default=None, description=". Maximum execution time in seconds"
default=None, description="Maximum execution time in seconds"
)
respect_context_window: bool = Field(
default=True,
@@ -209,10 +209,8 @@ class LiteAgent(FlowTrackable, BaseModel):
def setup_llm(self):
"""Set up the LLM and other components after initialization."""
self.llm = create_llm(self.llm)
if not isinstance(self.llm, BaseLLM):
raise ValueError(
f"Expected LLM instance of type BaseLLM, got {type(self.llm).__name__}"
)
if not isinstance(self.llm, LLM):
raise ValueError("Unable to create LLM instance")
# Initialize callbacks
token_callback = TokenCalcHandler(token_cost_process=self._token_process)
@@ -234,10 +232,7 @@ class LiteAgent(FlowTrackable, BaseModel):
elif isinstance(self.guardrail, str):
from crewai.tasks.llm_guardrail import LLMGuardrail
if not isinstance(self.llm, BaseLLM):
raise TypeError(
f"Guardrail requires LLM instance of type BaseLLM, got {type(self.llm).__name__}"
)
assert isinstance(self.llm, LLM)
self._guardrail = LLMGuardrail(description=self.guardrail, llm=self.llm)
@@ -309,7 +304,6 @@ class LiteAgent(FlowTrackable, BaseModel):
"""
# Create agent info for event emission
agent_info = {
"id": self.id,
"role": self.role,
"goal": self.goal,
"backstory": self.backstory,
@@ -519,8 +513,7 @@ class LiteAgent(FlowTrackable, BaseModel):
enforce_rpm_limit(self.request_within_rpm_limit)
llm = cast(LLM, self.llm)
model = llm.model if hasattr(llm, "model") else "unknown"
# Emit LLM call started event
crewai_event_bus.emit(
self,
event=LLMCallStartedEvent(
@@ -528,7 +521,6 @@ class LiteAgent(FlowTrackable, BaseModel):
tools=None,
callbacks=self._callbacks,
from_agent=self,
model=model,
),
)
@@ -545,11 +537,9 @@ class LiteAgent(FlowTrackable, BaseModel):
crewai_event_bus.emit(
self,
event=LLMCallCompletedEvent(
messages=self._messages,
response=answer,
call_type=LLMCallType.LLM_CALL,
from_agent=self,
model=model,
),
)
except Exception as e:

View File

@@ -59,8 +59,6 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
load_dotenv()
litellm.suppress_debug_info = True
class FilteredStream(io.TextIOBase):
_lock = None
@@ -78,9 +76,10 @@ class FilteredStream(io.TextIOBase):
# Skip common noisy LiteLLM banners and any other lines that contain "litellm"
if (
"litellm.info:" in lower_s
or "Consider using a smaller input or implementing a text splitting strategy"
in lower_s
"give feedback / get help" in lower_s
or "litellm.info:" in lower_s
or "litellm" in lower_s
or "Consider using a smaller input or implementing a text splitting strategy" in lower_s
):
return 0
@@ -288,8 +287,6 @@ class AccumulatedToolArgs(BaseModel):
class LLM(BaseLLM):
completion_cost: Optional[float] = None
def __init__(
self,
model: str,
@@ -313,7 +310,6 @@ class LLM(BaseLLM):
api_key: Optional[str] = None,
callbacks: List[Any] = [],
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
thinking_budget: Optional[int] = None,
stream: bool = False,
**kwargs,
):
@@ -338,14 +334,10 @@ class LLM(BaseLLM):
self.callbacks = callbacks
self.context_window_size = 0
self.reasoning_effort = reasoning_effort
self.thinking_budget = thinking_budget
self.additional_params = kwargs
self.is_anthropic = self._is_anthropic_model(model)
self.stream = stream
if self.thinking_budget is not None and (not isinstance(self.thinking_budget, int) or self.thinking_budget <= 0):
raise ValueError("thinking_budget must be a positive integer")
litellm.drop_params = True
# Normalize self.stop to always be a List[str]
@@ -419,9 +411,6 @@ class LLM(BaseLLM):
**self.additional_params,
}
if self.thinking_budget is not None:
params["thinking"] = {"budget_tokens": self.thinking_budget}
# Remove None values from params
return {k: v for k, v in params.items() if v is not None}
@@ -519,6 +508,7 @@ class LLM(BaseLLM):
# Enable tool calls using streaming
if "tool_calls" in delta:
tool_calls = delta["tool_calls"]
if tool_calls:
result = self._handle_streaming_tool_calls(
tool_calls=tool_calls,
@@ -527,7 +517,6 @@ class LLM(BaseLLM):
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
chunk_content = result
@@ -544,11 +533,7 @@ class LLM(BaseLLM):
assert hasattr(crewai_event_bus, "emit")
crewai_event_bus.emit(
self,
event=LLMStreamChunkEvent(
chunk=chunk_content,
from_task=from_task,
from_agent=from_agent,
),
event=LLMStreamChunkEvent(chunk=chunk_content, from_task=from_task, from_agent=from_agent),
)
# --- 4) Fallback to non-streaming if no content received
if not full_response.strip() and chunk_count == 0:
@@ -561,11 +546,7 @@ class LLM(BaseLLM):
"stream_options", None
) # Remove stream_options for non-streaming call
return self._handle_non_streaming_response(
non_streaming_params,
callbacks,
available_functions,
from_task,
from_agent,
non_streaming_params, callbacks, available_functions, from_task, from_agent
)
# --- 5) Handle empty response with chunks
@@ -650,13 +631,7 @@ class LLM(BaseLLM):
# Log token usage if available in streaming mode
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
# Emit completion event and return response
self._handle_emit_call_events(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent)
return full_response
# --- 9) Handle tool calls if present
@@ -668,13 +643,7 @@ class LLM(BaseLLM):
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
# --- 11) Emit completion event and return response
self._handle_emit_call_events(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent)
return full_response
except ContextWindowExceededError as e:
@@ -686,22 +655,14 @@ class LLM(BaseLLM):
logging.error(f"Error in streaming response: {str(e)}")
if full_response.strip():
logging.warning(f"Returning partial response despite error: {str(e)}")
self._handle_emit_call_events(
response=full_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent)
return full_response
# Emit failed event and re-raise the exception
assert hasattr(crewai_event_bus, "emit")
crewai_event_bus.emit(
self,
event=LLMCallFailedEvent(
error=str(e), from_task=from_task, from_agent=from_agent
),
event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent),
)
raise Exception(f"Failed to get streaming response: {str(e)}")
@@ -799,7 +760,7 @@ class LLM(BaseLLM):
available_functions: Optional[Dict[str, Any]] = None,
from_task: Optional[Any] = None,
from_agent: Optional[Any] = None,
) -> str | Any:
) -> str:
"""Handle a non-streaming response from the LLM.
Args:
@@ -819,16 +780,17 @@ class LLM(BaseLLM):
# across the codebase. This allows CrewAgentExecutor to handle context
# length issues appropriately.
response = litellm.completion(**params)
except ContextWindowExceededError as e:
# Convert litellm's context window error to our own exception type
# for consistent handling in the rest of the codebase
raise LLMContextLengthExceededException(str(e))
# --- 2) Extract response message and content
response_message = cast(Choices, cast(ModelResponse, response).choices)[
0
].message
text_response = response_message.content or ""
# --- 3) Handle callbacks with usage info
if callbacks and len(callbacks) > 0:
for callback in callbacks:
@@ -841,35 +803,22 @@ class LLM(BaseLLM):
start_time=0,
end_time=0,
)
# --- 4) Check for tool calls
tool_calls = getattr(response_message, "tool_calls", [])
# --- 5) If no tool calls or no available functions, return the text response directly as long as there is a text response
if (not tool_calls or not available_functions) and text_response:
self._handle_emit_call_events(
response=text_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
# --- 5) If no tool calls or no available functions, return the text response directly
if not tool_calls or not available_functions:
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task, from_agent)
return text_response
# --- 6) If there is no text response, no available functions, but there are tool calls, return the tool calls
elif tool_calls and not available_functions and not text_response:
return tool_calls
# --- 7) Handle tool calls if present
# --- 6) Handle tool calls if present
tool_result = self._handle_tool_call(tool_calls, available_functions)
if tool_result is not None:
return tool_result
# --- 8) If tool call handling didn't return a result, emit completion event and return text response
self._handle_emit_call_events(
response=text_response,
call_type=LLMCallType.LLM_CALL,
from_task=from_task,
from_agent=from_agent,
messages=params["messages"],
)
# --- 7) If tool call handling didn't return a result, emit completion event and return text response
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task, from_agent)
return text_response
def _handle_tool_call(
@@ -912,7 +861,6 @@ class LLM(BaseLLM):
tool_args=function_args,
),
)
result = fn(**function_args)
crewai_event_bus.emit(
self,
@@ -926,9 +874,7 @@ class LLM(BaseLLM):
)
# --- 3.3) Emit success event
self._handle_emit_call_events(
response=result, call_type=LLMCallType.TOOL_CALL
)
self._handle_emit_call_events(result, LLMCallType.TOOL_CALL)
return result
except Exception as e:
# --- 3.4) Handle execution errors
@@ -946,7 +892,7 @@ class LLM(BaseLLM):
event=ToolUsageErrorEvent(
tool_name=function_name,
tool_args=function_args,
error=f"Tool execution error: {str(e)}",
error=f"Tool execution error: {str(e)}"
),
)
return None
@@ -996,7 +942,6 @@ class LLM(BaseLLM):
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
model=self.model,
),
)
@@ -1006,18 +951,22 @@ class LLM(BaseLLM):
# --- 3) Convert string messages to proper format if needed
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
# --- 4) Handle O1 model special case (system messages not supported)
if "o1" in self.model.lower():
for message in messages:
if message.get("role") == "system":
message["role"] = "assistant"
# --- 5) Set up callbacks if provided
with suppress_warnings():
if callbacks and len(callbacks) > 0:
self.set_callbacks(callbacks)
try:
# --- 6) Prepare parameters for the completion call
params = self._prepare_completion_params(messages, tools)
# --- 7) Make the completion call and handle response
if self.stream:
return self._handle_streaming_response(
@@ -1034,69 +983,25 @@ class LLM(BaseLLM):
# whether to summarize the content or abort based on the respect_context_window flag
raise
except Exception as e:
unsupported_stop = "Unsupported parameter" in str(
e
) and "'stop'" in str(e)
if unsupported_stop:
if (
"additional_drop_params" in self.additional_params
and isinstance(
self.additional_params["additional_drop_params"], list
)
):
self.additional_params["additional_drop_params"].append("stop")
else:
self.additional_params = {"additional_drop_params": ["stop"]}
logging.info("Retrying LLM call without the unsupported 'stop'")
return self.call(
messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
from_task=from_task,
from_agent=from_agent,
)
assert hasattr(crewai_event_bus, "emit")
crewai_event_bus.emit(
self,
event=LLMCallFailedEvent(
error=str(e), from_task=from_task, from_agent=from_agent
),
event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent),
)
logging.error(f"LiteLLM call failed: {str(e)}")
raise
def _handle_emit_call_events(
self,
response: Any,
call_type: LLMCallType,
from_task: Optional[Any] = None,
from_agent: Optional[Any] = None,
messages: str | list[dict[str, Any]] | None = None,
):
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None):
"""Handle the events for the LLM call.
Args:
response (str): The response from the LLM call.
call_type (str): The type of call, either "tool_call" or "llm_call".
from_task: Optional task object
from_agent: Optional agent object
messages: Optional messages object
"""
assert hasattr(crewai_event_bus, "emit")
crewai_event_bus.emit(
self,
event=LLMCallCompletedEvent(
messages=messages,
response=response,
call_type=call_type,
from_task=from_task,
from_agent=from_agent,
model=self.model,
),
event=LLMCallCompletedEvent(response=response, call_type=call_type, from_task=from_task, from_agent=from_agent),
)
def _format_messages_for_provider(
@@ -1142,14 +1047,13 @@ class LLM(BaseLLM):
if "mistral" in self.model.lower():
# Check if the last message has a role of 'assistant'
if messages and messages[-1]["role"] == "assistant":
return messages + [{"role": "user", "content": "Please continue."}]
# Add a dummy user message to ensure the last message has a role of 'user'
messages = (
messages.copy()
) # Create a copy to avoid modifying the original
messages.append({"role": "user", "content": "Please continue."})
return messages
# TODO: Remove this code after merging PR https://github.com/BerriAI/litellm/pull/10917
# Ollama doesn't supports last message to be 'assistant'
if "ollama" in self.model.lower() and messages and messages[-1]["role"] == "assistant":
return messages + [{"role": "user", "content": ""}]
# Handle Anthropic models
if not self.is_anthropic:
return messages
@@ -1169,7 +1073,7 @@ class LLM(BaseLLM):
- If there is no '/', defaults to "openai".
"""
if "/" in self.model:
return self.model.partition("/")[0]
return self.model.split("/")[0]
return None
def _validate_call_params(self) -> None:

View File

@@ -1,9 +1,11 @@
from .entity.entity_memory import EntityMemory
from .long_term.long_term_memory import LongTermMemory
from .short_term.short_term_memory import ShortTermMemory
from .user.user_memory import UserMemory
from .external.external_memory import ExternalMemory
__all__ = [
"UserMemory",
"EntityMemory",
"LongTermMemory",
"ShortTermMemory",

View File

@@ -1,24 +1,32 @@
from typing import Optional
from typing import Any, Dict, Optional
from crewai.memory import (
EntityMemory,
ExternalMemory,
LongTermMemory,
ShortTermMemory,
UserMemory,
)
class ContextualMemory:
def __init__(
self,
memory_config: Optional[Dict[str, Any]],
stm: ShortTermMemory,
ltm: LongTermMemory,
em: EntityMemory,
um: UserMemory,
exm: ExternalMemory,
):
if memory_config is not None:
self.memory_provider = memory_config.get("provider")
else:
self.memory_provider = None
self.stm = stm
self.ltm = ltm
self.em = em
self.um = um
self.exm = exm
def build_context_for_task(self, task, context) -> str:
@@ -36,6 +44,8 @@ class ContextualMemory:
context.append(self._fetch_stm_context(query))
context.append(self._fetch_entity_context(query))
context.append(self._fetch_external_context(query))
if self.memory_provider == "mem0":
context.append(self._fetch_user_context(query))
return "\n".join(filter(None, context))
def _fetch_stm_context(self, query) -> str:
@@ -50,7 +60,7 @@ class ContextualMemory:
stm_results = self.stm.search(query)
formatted_results = "\n".join(
[
f"- {result['context']}"
f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
for result in stm_results
]
)
@@ -90,12 +100,33 @@ class ContextualMemory:
em_results = self.em.search(query)
formatted_results = "\n".join(
[
f"- {result['context']}"
f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
for result in em_results
] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
)
return f"Entities:\n{formatted_results}" if em_results else ""
def _fetch_user_context(self, query: str) -> str:
"""
Fetches and formats relevant user information from User Memory.
Args:
query (str): The search query to find relevant user memories.
Returns:
str: Formatted user memories as bullet points, or an empty string if none found.
"""
if self.um is None:
return ""
user_memories = self.um.search(query)
if not user_memories:
return ""
formatted_memories = "\n".join(
f"- {result['memory']}" for result in user_memories
)
return f"User memories/preferences:\n{formatted_memories}"
def _fetch_external_context(self, query: str) -> str:
"""
Fetches and formats relevant information from External Memory.
@@ -113,6 +144,6 @@ class ContextualMemory:
return ""
formatted_memories = "\n".join(
f"- {result['context']}" for result in external_memories
f"- {result['memory']}" for result in external_memories
)
return f"External memories:\n{formatted_memories}"

View File

@@ -27,7 +27,11 @@ class EntityMemory(Memory):
_memory_provider: Optional[str] = PrivateAttr()
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
memory_provider = embedder_config.get("provider") if embedder_config else None
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
memory_provider = crew.memory_config.get("provider")
else:
memory_provider = None
if memory_provider == "mem0":
try:
from crewai.memory.storage.mem0_storage import Mem0Storage
@@ -35,8 +39,7 @@ class EntityMemory(Memory):
raise ImportError(
"Mem0 is not installed. Please install it with `pip install mem0ai`."
)
config = embedder_config.get("config")
storage = Mem0Storage(type="short_term", crew=crew, config=config)
storage = Mem0Storage(type="entities", crew=crew)
else:
storage = (
storage

View File

@@ -29,7 +29,11 @@ class ShortTermMemory(Memory):
_memory_provider: Optional[str] = PrivateAttr()
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
memory_provider = embedder_config.get("provider") if embedder_config else None
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
memory_provider = crew.memory_config.get("provider")
else:
memory_provider = None
if memory_provider == "mem0":
try:
from crewai.memory.storage.mem0_storage import Mem0Storage
@@ -37,8 +41,7 @@ class ShortTermMemory(Memory):
raise ImportError(
"Mem0 is not installed. Please install it with `pip install mem0ai`."
)
config = embedder_config.get("config")
storage = Mem0Storage(type="short_term", crew=crew, config=config)
storage = Mem0Storage(type="short_term", crew=crew)
else:
storage = (
storage

Some files were not shown because too many files have changed in this diff Show More