Compare commits

..

7 Commits

Author SHA1 Message Date
Lorenze Jay
38735cba99 Merge branch 'main' into bugfix/flow-persist-nested-models 2025-03-21 17:03:57 -07:00
Brandon Hancock
cde67882b4 resuse existing code and address PRs 2025-03-21 15:10:08 -04:00
Lorenze Jay
d3df545f1e Merge branch 'main' into bugfix/flow-persist-nested-models 2025-03-21 11:59:11 -07:00
Brandon Hancock (bhancock_ai)
b5067a2689 Merge branch 'main' into bugfix/flow-persist-nested-models 2025-03-10 12:05:13 -04:00
Brandon Hancock (bhancock_ai)
362b20f052 Merge branch 'main' into bugfix/flow-persist-nested-models 2025-03-07 12:55:05 -05:00
Brandon Hancock
d5408ec461 Drop file 2025-03-06 16:40:36 -05:00
Brandon Hancock
6677c9c192 nested models in flow persist 2025-03-05 16:14:50 -05:00
188 changed files with 14103 additions and 40826 deletions

View File

@@ -1,33 +0,0 @@
name: Notify Downstream
on:
push:
branches:
- main
permissions:
contents: read
jobs:
notify-downstream:
runs-on: ubuntu-latest
steps:
- name: Generate GitHub App token
id: app-token
uses: tibdex/github-app-token@v2
with:
app_id: ${{ secrets.OSS_SYNC_APP_ID }}
private_key: ${{ secrets.OSS_SYNC_APP_PRIVATE_KEY }}
- name: Notify Repo B
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ steps.app-token.outputs.token }}
repository: ${{ secrets.OSS_SYNC_DOWNSTREAM_REPO }}
event-type: upstream-commit
client-payload: |
{
"commit_sha": "${{ github.sha }}"
}

View File

@@ -12,9 +12,6 @@ jobs:
tests: tests:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 15 timeout-minutes: 15
strategy:
matrix:
python-version: ['3.10', '3.11', '3.12']
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v4
@@ -24,8 +21,9 @@ jobs:
with: with:
enable-cache: true enable-cache: true
- name: Set up Python ${{ matrix.python-version }}
run: uv python install ${{ matrix.python-version }} - name: Set up Python
run: uv python install 3.12.8
- name: Install the project - name: Install the project
run: uv sync --dev --all-extras run: uv sync --dev --all-extras

1
.gitignore vendored
View File

@@ -26,4 +26,3 @@ test_flow.html
crewairules.mdc crewairules.mdc
plan.md plan.md
conceptual_plan.md conceptual_plan.md
build_image

View File

@@ -257,14 +257,10 @@ reporting_task:
from crewai import Agent, Crew, Process, Task from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task from crewai.project import CrewBase, agent, crew, task
from crewai_tools import SerperDevTool from crewai_tools import SerperDevTool
from crewai.agents.agent_builder.base_agent import BaseAgent
from typing import List
@CrewBase @CrewBase
class LatestAiDevelopmentCrew(): class LatestAiDevelopmentCrew():
"""LatestAiDevelopment crew""" """LatestAiDevelopment crew"""
agents: List[BaseAgent]
tasks: List[Task]
@agent @agent
def researcher(self) -> Agent: def researcher(self) -> Agent:
@@ -405,16 +401,11 @@ You can test different real life examples of AI crews in the [CrewAI-examples re
### Using Crews and Flows Together ### Using Crews and Flows Together
CrewAI's power truly shines when combining Crews with Flows to create sophisticated automation pipelines. CrewAI's power truly shines when combining Crews with Flows to create sophisticated automation pipelines. Here's how you can orchestrate multiple Crews within a Flow:
CrewAI flows support logical operators like `or_` and `and_` to combine multiple conditions. This can be used with `@start`, `@listen`, or `@router` decorators to create complex triggering conditions.
- `or_`: Triggers when any of the specified conditions are met.
- `and_`Triggers when all of the specified conditions are met.
Here's how you can orchestrate multiple Crews within a Flow:
```python ```python
from crewai.flow.flow import Flow, listen, start, router, or_ from crewai.flow.flow import Flow, listen, start, router
from crewai import Crew, Agent, Task, Process from crewai import Crew, Agent, Task
from pydantic import BaseModel from pydantic import BaseModel
# Define structured state for precise control # Define structured state for precise control
@@ -488,7 +479,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
) )
return strategy_crew.kickoff() return strategy_crew.kickoff()
@listen(or_("medium_confidence", "low_confidence")) @listen("medium_confidence", "low_confidence")
def request_additional_analysis(self): def request_additional_analysis(self):
self.state.recommendations.append("Gather more data") self.state.recommendations.append("Gather more data")
return "Additional analysis required" return "Additional analysis required"

View File

@@ -4,7 +4,7 @@ description: View the latest updates and changes to CrewAI
icon: timeline icon: timeline
--- ---
<Update label="2025-03-17" description="v0.108.0"> <Update label="2024-03-17" description="v0.108.0">
**Features** **Features**
- Converted tabs to spaces in `crew.py` template - Converted tabs to spaces in `crew.py` template
- Enhanced LLM Streaming Response Handling and Event System - Enhanced LLM Streaming Response Handling and Event System
@@ -24,7 +24,7 @@ icon: timeline
- Added documentation for `ApifyActorsTool` - Added documentation for `ApifyActorsTool`
</Update> </Update>
<Update label="2025-03-10" description="v0.105.0"> <Update label="2024-03-10" description="v0.105.0">
**Core Improvements & Fixes** **Core Improvements & Fixes**
- Fixed issues with missing template variables and user memory configuration - Fixed issues with missing template variables and user memory configuration
- Improved async flow support and addressed agent response formatting - Improved async flow support and addressed agent response formatting
@@ -45,7 +45,7 @@ icon: timeline
- Fixed typos in prompts and updated Amazon Bedrock model listings - Fixed typos in prompts and updated Amazon Bedrock model listings
</Update> </Update>
<Update label="2025-02-12" description="v0.102.0"> <Update label="2024-02-12" description="v0.102.0">
**Core Improvements & Fixes** **Core Improvements & Fixes**
- Enhanced LLM Support: Improved structured LLM output, parameter handling, and formatting for Anthropic models - Enhanced LLM Support: Improved structured LLM output, parameter handling, and formatting for Anthropic models
- Crew & Agent Stability: Fixed issues with cloning agents/crews using knowledge sources, multiple task outputs in conditional tasks, and ignored Crew task callbacks - Crew & Agent Stability: Fixed issues with cloning agents/crews using knowledge sources, multiple task outputs in conditional tasks, and ignored Crew task callbacks
@@ -65,7 +65,7 @@ icon: timeline
- Fixed Various Typos & Formatting Issues - Fixed Various Typos & Formatting Issues
</Update> </Update>
<Update label="2025-01-28" description="v0.100.0"> <Update label="2024-01-28" description="v0.100.0">
**Features** **Features**
- Add Composio docs - Add Composio docs
- Add SageMaker as a LLM provider - Add SageMaker as a LLM provider
@@ -80,7 +80,7 @@ icon: timeline
- Improve formatting and clarity in CLI and Composio Tool docs - Improve formatting and clarity in CLI and Composio Tool docs
</Update> </Update>
<Update label="2025-01-20" description="v0.98.0"> <Update label="2024-01-20" description="v0.98.0">
**Features** **Features**
- Conversation crew v1 - Conversation crew v1
- Add unique ID to flow states - Add unique ID to flow states
@@ -101,7 +101,7 @@ icon: timeline
- Fixed typos, nested pydantic model issue, and docling issues - Fixed typos, nested pydantic model issue, and docling issues
</Update> </Update>
<Update label="2025-01-04" description="v0.95.0"> <Update label="2024-01-04" description="v0.95.0">
**New Features** **New Features**
- Adding Multimodal Abilities to Crew - Adding Multimodal Abilities to Crew
- Programatic Guardrails - Programatic Guardrails
@@ -131,7 +131,7 @@ icon: timeline
- Suppressed userWarnings from litellm pydantic issues - Suppressed userWarnings from litellm pydantic issues
</Update> </Update>
<Update label="2024-12-05" description="v0.86.0"> <Update label="2023-12-05" description="v0.86.0">
**Changes** **Changes**
- Remove all references to pipeline and pipeline router - Remove all references to pipeline and pipeline router
- Add Nvidia NIM as provider in Custom LLM - Add Nvidia NIM as provider in Custom LLM
@@ -141,7 +141,7 @@ icon: timeline
- Simplify template crew - Simplify template crew
</Update> </Update>
<Update label="2024-12-04" description="v0.85.0"> <Update label="2023-12-04" description="v0.85.0">
**Features** **Features**
- Added knowledge to agent level - Added knowledge to agent level
- Feat/remove langchain - Feat/remove langchain
@@ -161,7 +161,7 @@ icon: timeline
- Improvements to LLM Configuration and Usage - Improvements to LLM Configuration and Usage
</Update> </Update>
<Update label="2024-11-25" description="v0.83.0"> <Update label="2023-11-25" description="v0.83.0">
**New Features** **New Features**
- New before_kickoff and after_kickoff crew callbacks - New before_kickoff and after_kickoff crew callbacks
- Support to pre-seed agents with Knowledge - Support to pre-seed agents with Knowledge
@@ -178,7 +178,7 @@ icon: timeline
- Update Docs - Update Docs
</Update> </Update>
<Update label="2024-11-13" description="v0.80.0"> <Update label="2023-11-13" description="v0.80.0">
**Fixes** **Fixes**
- Fixing Tokens callback replacement bug - Fixing Tokens callback replacement bug
- Fixing Step callback issue - Fixing Step callback issue

View File

@@ -18,18 +18,6 @@ In the CrewAI framework, an `Agent` is an autonomous unit that can:
Think of an agent as a specialized team member with specific skills, expertise, and responsibilities. For example, a `Researcher` agent might excel at gathering and analyzing information, while a `Writer` agent might be better at creating content. Think of an agent as a specialized team member with specific skills, expertise, and responsibilities. For example, a `Researcher` agent might excel at gathering and analyzing information, while a `Writer` agent might be better at creating content.
</Tip> </Tip>
<Note type="info" title="Enterprise Enhancement: Visual Agent Builder">
CrewAI Enterprise includes a Visual Agent Builder that simplifies agent creation and configuration without writing code. Design your agents visually and test them in real-time.
![Visual Agent Builder Screenshot](../images/enterprise/crew-studio-quickstart)
The Visual Agent Builder enables:
- Intuitive agent configuration with form-based interfaces
- Real-time testing and validation
- Template library with pre-configured agent types
- Easy customization of agent attributes and behaviors
</Note>
## Agent Attributes ## Agent Attributes
| Attribute | Parameter | Type | Description | | Attribute | Parameter | Type | Description |
@@ -118,7 +106,7 @@ class LatestAiDevelopmentCrew():
@agent @agent
def researcher(self) -> Agent: def researcher(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['researcher'], # type: ignore[index] config=self.agents_config['researcher'],
verbose=True, verbose=True,
tools=[SerperDevTool()] tools=[SerperDevTool()]
) )
@@ -126,7 +114,7 @@ class LatestAiDevelopmentCrew():
@agent @agent
def reporting_analyst(self) -> Agent: def reporting_analyst(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['reporting_analyst'], # type: ignore[index] config=self.agents_config['reporting_analyst'],
verbose=True verbose=True
) )
``` ```

View File

@@ -23,7 +23,8 @@ The `Crew` class has been enriched with several attributes to support advanced f
| **Process Flow** (`process`) | Defines execution logic (e.g., sequential, hierarchical) for task distribution. | | **Process Flow** (`process`) | Defines execution logic (e.g., sequential, hierarchical) for task distribution. |
| **Verbose Logging** (`verbose`) | Provides detailed logging for monitoring and debugging. Accepts integer and boolean values to control verbosity level. | | **Verbose Logging** (`verbose`) | Provides detailed logging for monitoring and debugging. Accepts integer and boolean values to control verbosity level. |
| **Rate Limiting** (`max_rpm`) | Limits requests per minute to optimize resource usage. Setting guidelines depend on task complexity and load. | | **Rate Limiting** (`max_rpm`) | Limits requests per minute to optimize resource usage. Setting guidelines depend on task complexity and load. |
| **Internationalization / Customization** (`prompt_file`) | Supports prompt customization for global usability. [Example of file](https://github.com/joaomdmoura/crewAI/blob/main/src/crewai/translations/en.json) | | **Internationalization / Customization** (`language`, `prompt_file`) | Supports prompt customization for global usability. [Example of file](https://github.com/joaomdmoura/crewAI/blob/main/src/crewai/translations/en.json) |
| **Execution and Output Handling** (`full_output`) | Controls output granularity, distinguishing between full and final outputs. |
| **Callback and Telemetry** (`step_callback`, `task_callback`) | Enables step-wise and task-level execution monitoring and telemetry for performance analytics. | | **Callback and Telemetry** (`step_callback`, `task_callback`) | Enables step-wise and task-level execution monitoring and telemetry for performance analytics. |
| **Crew Sharing** (`share_crew`) | Allows sharing crew data with CrewAI for model improvement. Privacy implications and benefits should be considered. | | **Crew Sharing** (`share_crew`) | Allows sharing crew data with CrewAI for model improvement. Privacy implications and benefits should be considered. |
| **Usage Metrics** (`usage_metrics`) | Logs all LLM usage metrics during task execution for performance insights. | | **Usage Metrics** (`usage_metrics`) | Logs all LLM usage metrics during task execution for performance insights. |

View File

@@ -20,10 +20,13 @@ A crew in crewAI represents a collaborative group of agents working together to
| **Function Calling LLM** _(optional)_ | `function_calling_llm` | If passed, the crew will use this LLM to do function calling for tools for all agents in the crew. Each agent can have its own LLM, which overrides the crew's LLM for function calling. | | **Function Calling LLM** _(optional)_ | `function_calling_llm` | If passed, the crew will use this LLM to do function calling for tools for all agents in the crew. Each agent can have its own LLM, which overrides the crew's LLM for function calling. |
| **Config** _(optional)_ | `config` | Optional configuration settings for the crew, in `Json` or `Dict[str, Any]` format. | | **Config** _(optional)_ | `config` | Optional configuration settings for the crew, in `Json` or `Dict[str, Any]` format. |
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. | | **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). | | **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. | | **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. | | **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. | | **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. | | **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. | | **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. | | **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
@@ -52,16 +55,12 @@ After creating your CrewAI project as outlined in the [Installation](/installati
```python code ```python code
from crewai import Agent, Crew, Task, Process from crewai import Agent, Crew, Task, Process
from crewai.project import CrewBase, agent, task, crew, before_kickoff, after_kickoff from crewai.project import CrewBase, agent, task, crew, before_kickoff, after_kickoff
from crewai.agents.agent_builder.base_agent import BaseAgent
from typing import List
@CrewBase @CrewBase
class YourCrewName: class YourCrewName:
"""Description of your crew""" """Description of your crew"""
agents: List[BaseAgent]
tasks: List[Task]
# Paths to your YAML configuration files # Paths to your YAML configuration files
# To see an example agent and task defined in YAML, checkout the following: # To see an example agent and task defined in YAML, checkout the following:
# - Task: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended # - Task: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
@@ -84,27 +83,27 @@ class YourCrewName:
@agent @agent
def agent_one(self) -> Agent: def agent_one(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['agent_one'], # type: ignore[index] config=self.agents_config['agent_one'],
verbose=True verbose=True
) )
@agent @agent
def agent_two(self) -> Agent: def agent_two(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['agent_two'], # type: ignore[index] config=self.agents_config['agent_two'],
verbose=True verbose=True
) )
@task @task
def task_one(self) -> Task: def task_one(self) -> Task:
return Task( return Task(
config=self.tasks_config['task_one'] # type: ignore[index] config=self.tasks_config['task_one']
) )
@task @task
def task_two(self) -> Task: def task_two(self) -> Task:
return Task( return Task(
config=self.tasks_config['task_two'] # type: ignore[index] config=self.tasks_config['task_two']
) )
@crew @crew

View File

@@ -1,7 +1,6 @@
--- ---
title: 'Event Listeners' title: 'Event Listeners'
description: 'Tap into CrewAI events to build custom integrations and monitoring' description: 'Tap into CrewAI events to build custom integrations and monitoring'
icon: spinner
--- ---
# Event Listeners # Event Listeners
@@ -13,25 +12,11 @@ CrewAI provides a powerful event system that allows you to listen for and react
CrewAI uses an event bus architecture to emit events throughout the execution lifecycle. The event system is built on the following components: CrewAI uses an event bus architecture to emit events throughout the execution lifecycle. The event system is built on the following components:
1. **CrewAIEventsBus**: A singleton event bus that manages event registration and emission 1. **CrewAIEventsBus**: A singleton event bus that manages event registration and emission
2. **BaseEvent**: Base class for all events in the system 2. **CrewEvent**: Base class for all events in the system
3. **BaseEventListener**: Abstract base class for creating custom event listeners 3. **BaseEventListener**: Abstract base class for creating custom event listeners
When specific actions occur in CrewAI (like a Crew starting execution, an Agent completing a task, or a tool being used), the system emits corresponding events. You can register handlers for these events to execute custom code when they occur. When specific actions occur in CrewAI (like a Crew starting execution, an Agent completing a task, or a tool being used), the system emits corresponding events. You can register handlers for these events to execute custom code when they occur.
<Note type="info" title="Enterprise Enhancement: Prompt Tracing">
CrewAI Enterprise provides a built-in Prompt Tracing feature that leverages the event system to track, store, and visualize all prompts, completions, and associated metadata. This provides powerful debugging capabilities and transparency into your agent operations.
![Prompt Tracing Dashboard](../images/enterprise/prompt-tracing.png)
With Prompt Tracing you can:
- View the complete history of all prompts sent to your LLM
- Track token usage and costs
- Debug agent reasoning failures
- Share prompt sequences with your team
- Compare different prompt strategies
- Export traces for compliance and auditing
</Note>
## Creating a Custom Event Listener ## Creating a Custom Event Listener
To create a custom event listener, you need to: To create a custom event listener, you need to:
@@ -248,7 +233,7 @@ Each event handler receives two parameters:
1. **source**: The object that emitted the event 1. **source**: The object that emitted the event
2. **event**: The event instance, containing event-specific data 2. **event**: The event instance, containing event-specific data
The structure of the event object depends on the event type, but all events inherit from `BaseEvent` and include: The structure of the event object depends on the event type, but all events inherit from `CrewEvent` and include:
- **timestamp**: The time when the event was emitted - **timestamp**: The time when the event was emitted
- **type**: A string identifier for the event type - **type**: A string identifier for the event type

View File

@@ -545,119 +545,6 @@ The `third_method` and `fourth_method` listen to the output of the `second_metho
When you run this Flow, the output will change based on the random boolean value generated by the `start_method`. When you run this Flow, the output will change based on the random boolean value generated by the `start_method`.
## Adding Agents to Flows
Agents can be seamlessly integrated into your flows, providing a lightweight alternative to full Crews when you need simpler, focused task execution. Here's an example of how to use an Agent within a flow to perform market research:
```python
import asyncio
from typing import Any, Dict, List
from crewai_tools import SerperDevTool
from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.flow.flow import Flow, listen, start
# Define a structured output format
class MarketAnalysis(BaseModel):
key_trends: List[str] = Field(description="List of identified market trends")
market_size: str = Field(description="Estimated market size")
competitors: List[str] = Field(description="Major competitors in the space")
# Define flow state
class MarketResearchState(BaseModel):
product: str = ""
analysis: MarketAnalysis | None = None
# Create a flow class
class MarketResearchFlow(Flow[MarketResearchState]):
@start()
def initialize_research(self) -> Dict[str, Any]:
print(f"Starting market research for {self.state.product}")
return {"product": self.state.product}
@listen(initialize_research)
async def analyze_market(self) -> Dict[str, Any]:
# Create an Agent for market research
analyst = Agent(
role="Market Research Analyst",
goal=f"Analyze the market for {self.state.product}",
backstory="You are an experienced market analyst with expertise in "
"identifying market trends and opportunities.",
tools=[SerperDevTool()],
verbose=True,
)
# Define the research query
query = f"""
Research the market for {self.state.product}. Include:
1. Key market trends
2. Market size
3. Major competitors
Format your response according to the specified structure.
"""
# Execute the analysis with structured output format
result = await analyst.kickoff_async(query, response_format=MarketAnalysis)
if result.pydantic:
print("result", result.pydantic)
else:
print("result", result)
# Return the analysis to update the state
return {"analysis": result.pydantic}
@listen(analyze_market)
def present_results(self, analysis) -> None:
print("\nMarket Analysis Results")
print("=====================")
if isinstance(analysis, dict):
# If we got a dict with 'analysis' key, extract the actual analysis object
market_analysis = analysis.get("analysis")
else:
market_analysis = analysis
if market_analysis and isinstance(market_analysis, MarketAnalysis):
print("\nKey Market Trends:")
for trend in market_analysis.key_trends:
print(f"- {trend}")
print(f"\nMarket Size: {market_analysis.market_size}")
print("\nMajor Competitors:")
for competitor in market_analysis.competitors:
print(f"- {competitor}")
else:
print("No structured analysis data available.")
print("Raw analysis:", analysis)
# Usage example
async def run_flow():
flow = MarketResearchFlow()
result = await flow.kickoff_async(inputs={"product": "AI-powered chatbots"})
return result
# Run the flow
if __name__ == "__main__":
asyncio.run(run_flow())
```
This example demonstrates several key features of using Agents in flows:
1. **Structured Output**: Using Pydantic models to define the expected output format (`MarketAnalysis`) ensures type safety and structured data throughout the flow.
2. **State Management**: The flow state (`MarketResearchState`) maintains context between steps and stores both inputs and outputs.
3. **Tool Integration**: Agents can use tools (like `WebsiteSearchTool`) to enhance their capabilities.
## Adding Crews to Flows ## Adding Crews to Flows
Creating a flow with multiple crews in CrewAI is straightforward. Creating a flow with multiple crews in CrewAI is straightforward.

View File

@@ -438,7 +438,7 @@ In this section, you'll find detailed examples that help you select, configure,
@agent @agent
def researcher(self) -> Agent: def researcher(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['researcher'], # type: ignore[index] config=self.agents_config['researcher'],
llm=local_nvidia_nim_llm llm=local_nvidia_nim_llm
) )
@@ -535,13 +535,14 @@ In this section, you'll find detailed examples that help you select, configure,
<Accordion title="Hugging Face"> <Accordion title="Hugging Face">
Set the following environment variables in your `.env` file: Set the following environment variables in your `.env` file:
```toml Code ```toml Code
HF_TOKEN=<your-api-key> HUGGINGFACE_API_KEY=<your-api-key>
``` ```
Example usage in your CrewAI project: Example usage in your CrewAI project:
```python Code ```python Code
llm = LLM( llm = LLM(
model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct" model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
base_url="your_api_endpoint"
) )
``` ```
</Accordion> </Accordion>

View File

@@ -18,8 +18,7 @@ reason, and learn from past interactions.
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. | | **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. | | **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. | | **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
| **External Memory** | Enables integration with external memory systems and providers (like Mem0), allowing for specialized memory storage and retrieval across different applications. Supports custom storage implementations for flexible memory management. | | **User Memory** | Stores user-specific information and preferences, enhancing personalization and user experience. |
| **User Memory** | ⚠️ **DEPRECATED**: This component is deprecated and will be removed in a future version. Please use [External Memory](#using-external-memory) instead. |
## How Memory Systems Empower Agents ## How Memory Systems Empower Agents
@@ -145,7 +144,6 @@ from crewai.memory import LongTermMemory
# Simple memory configuration # Simple memory configuration
crew = Crew(memory=True) # Uses default storage locations crew = Crew(memory=True) # Uses default storage locations
``` ```
Note that External Memory wont be defined when `memory=True` is set, as we cant infer which external memory would be suitable for your case
### Custom Storage Configuration ### Custom Storage Configuration
```python ```python
@@ -166,10 +164,7 @@ crew = Crew(
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences. [Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
To include user-specific memory you can get your API key [here](https://app.mem0.ai/dashboard/api-keys) and refer the [docs](https://docs.mem0.ai/platform/quickstart#4-1-create-memories) for adding user preferences.
### Using Mem0 API platform
To include user-specific memory you can get your API key [here](https://app.mem0.ai/dashboard/api-keys) and refer the [docs](https://docs.mem0.ai/platform/quickstart#4-1-create-memories) for adding user preferences. In this case `user_memory` is set to `MemoryClient` from mem0.
```python Code ```python Code
@@ -180,7 +175,18 @@ from mem0 import MemoryClient
# Set environment variables for Mem0 # Set environment variables for Mem0
os.environ["MEM0_API_KEY"] = "m0-xx" os.environ["MEM0_API_KEY"] = "m0-xx"
# Step 1: Create a Crew with User Memory # Step 1: Record preferences based on past conversation or user input
client = MemoryClient()
messages = [
{"role": "user", "content": "Hi there! I'm planning a vacation and could use some advice."},
{"role": "assistant", "content": "Hello! I'd be happy to help with your vacation planning. What kind of destination do you prefer?"},
{"role": "user", "content": "I am more of a beach person than a mountain person."},
{"role": "assistant", "content": "That's interesting. Do you like hotels or Airbnb?"},
{"role": "user", "content": "I like Airbnb more."},
]
client.add(messages, user_id="john")
# Step 2: Create a Crew with User Memory
crew = Crew( crew = Crew(
agents=[...], agents=[...],
@@ -191,12 +197,11 @@ crew = Crew(
memory_config={ memory_config={
"provider": "mem0", "provider": "mem0",
"config": {"user_id": "john"}, "config": {"user_id": "john"},
"user_memory" : {} #Set user_memory explicitly to a dictionary, we are working on this issue.
}, },
) )
``` ```
#### Additional Memory Configuration Options ## Memory Configuration Options
If you want to access a specific organization and project, you can set the `org_id` and `project_id` parameters in the memory configuration. If you want to access a specific organization and project, you can set the `org_id` and `project_id` parameters in the memory configuration.
```python Code ```python Code
@@ -210,172 +215,10 @@ crew = Crew(
memory_config={ memory_config={
"provider": "mem0", "provider": "mem0",
"config": {"user_id": "john", "org_id": "my_org_id", "project_id": "my_project_id"}, "config": {"user_id": "john", "org_id": "my_org_id", "project_id": "my_project_id"},
"user_memory" : {} #Set user_memory explicitly to a dictionary, we are working on this issue.
}, },
) )
``` ```
### Using Local Mem0 memory
If you want to use local mem0 memory, with a custom configuration, you can set a parameter `local_mem0_config` in the config itself.
If both os environment key is set and local_mem0_config is given, the API platform takes higher priority over the local configuration.
Check [this](https://docs.mem0.ai/open-source/python-quickstart#run-mem0-locally) mem0 local configuration docs for more understanding.
In this case `user_memory` is set to `Memory` from mem0.
```python Code
from crewai import Crew
#local mem0 config
config = {
"vector_store": {
"provider": "qdrant",
"config": {
"host": "localhost",
"port": 6333
}
},
"llm": {
"provider": "openai",
"config": {
"api_key": "your-api-key",
"model": "gpt-4"
}
},
"embedder": {
"provider": "openai",
"config": {
"api_key": "your-api-key",
"model": "text-embedding-3-small"
}
},
"graph_store": {
"provider": "neo4j",
"config": {
"url": "neo4j+s://your-instance",
"username": "neo4j",
"password": "password"
}
},
"history_db_path": "/path/to/history.db",
"version": "v1.1",
"custom_fact_extraction_prompt": "Optional custom prompt for fact extraction for memory",
"custom_update_memory_prompt": "Optional custom prompt for update memory"
}
crew = Crew(
agents=[...],
tasks=[...],
verbose=True,
memory=True,
memory_config={
"provider": "mem0",
"config": {"user_id": "john", 'local_mem0_config': config},
"user_memory" : {} #Set user_memory explicitly to a dictionary, we are working on this issue.
},
)
```
### Using External Memory
External Memory is a powerful feature that allows you to integrate external memory systems with your CrewAI applications. This is particularly useful when you want to use specialized memory providers or maintain memory across different applications.
Since its an external memory, were not able to add a default value to it - unlike with Long Term and Short Term memory.
#### Basic Usage with Mem0
The most common way to use External Memory is with Mem0 as the provider:
```python
import os
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
os.environ["MEM0_API_KEY"] = "YOUR-API-KEY"
agent = Agent(
role="You are a helpful assistant",
goal="Plan a vacation for the user",
backstory="You are a helpful assistant that can plan a vacation for the user",
verbose=True,
)
task = Task(
description="Give things related to the user's vacation",
expected_output="A plan for the vacation",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
process=Process.sequential,
external_memory=ExternalMemory(
embedder_config={"provider": "mem0", "config": {"user_id": "U-123"}} # you can provide an entire Mem0 configuration
),
)
crew.kickoff(
inputs={"question": "which destination is better for a beach vacation?"}
)
```
#### Using External Memory with Custom Storage
You can also create custom storage implementations for External Memory. Here's an example of how to create a custom storage:
```python
from crewai import Agent, Crew, Process, Task
from crewai.memory.external.external_memory import ExternalMemory
from crewai.memory.storage.interface import Storage
class CustomStorage(Storage):
def __init__(self):
self.memories = []
def save(self, value, metadata=None, agent=None):
self.memories.append({"value": value, "metadata": metadata, "agent": agent})
def search(self, query, limit=10, score_threshold=0.5):
# Implement your search logic here
return []
def reset(self):
self.memories = []
# Create external memory with custom storage
external_memory = ExternalMemory(
storage=CustomStorage(),
embedder_config={"provider": "mem0", "config": {"user_id": "U-123"}},
)
agent = Agent(
role="You are a helpful assistant",
goal="Plan a vacation for the user",
backstory="You are a helpful assistant that can plan a vacation for the user",
verbose=True,
)
task = Task(
description="Give things related to the user's vacation",
expected_output="A plan for the vacation",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
process=Process.sequential,
external_memory=external_memory,
)
crew.kickoff(
inputs={"question": "which destination is better for a beach vacation?"}
)
```
## Additional Embedding Providers ## Additional Embedding Providers
### Using OpenAI embeddings (already default) ### Using OpenAI embeddings (already default)

View File

@@ -12,18 +12,6 @@ Tasks provide all necessary details for execution, such as a description, the ag
Tasks within CrewAI can be collaborative, requiring multiple agents to work together. This is managed through the task properties and orchestrated by the Crew's process, enhancing teamwork and efficiency. Tasks within CrewAI can be collaborative, requiring multiple agents to work together. This is managed through the task properties and orchestrated by the Crew's process, enhancing teamwork and efficiency.
<Note type="info" title="Enterprise Enhancement: Visual Task Builder">
CrewAI Enterprise includes a Visual Task Builder in Crew Studio that simplifies complex task creation and chaining. Design your task flows visually and test them in real-time without writing code.
![Task Builder Screenshot](../images/enterprise/crew-studio-quickstart.png)
The Visual Task Builder enables:
- Drag-and-drop task creation
- Visual task dependencies and flow
- Real-time testing and validation
- Easy sharing and collaboration
</Note>
### Task Execution Flow ### Task Execution Flow
Tasks can be executed in two ways: Tasks can be executed in two ways:
@@ -113,7 +101,7 @@ class LatestAiDevelopmentCrew():
@agent @agent
def researcher(self) -> Agent: def researcher(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['researcher'], # type: ignore[index] config=self.agents_config['researcher'],
verbose=True, verbose=True,
tools=[SerperDevTool()] tools=[SerperDevTool()]
) )
@@ -121,20 +109,20 @@ class LatestAiDevelopmentCrew():
@agent @agent
def reporting_analyst(self) -> Agent: def reporting_analyst(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['reporting_analyst'], # type: ignore[index] config=self.agents_config['reporting_analyst'],
verbose=True verbose=True
) )
@task @task
def research_task(self) -> Task: def research_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['research_task'] # type: ignore[index] config=self.tasks_config['research_task']
) )
@task @task
def reporting_task(self) -> Task: def reporting_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['reporting_task'] # type: ignore[index] config=self.tasks_config['reporting_task']
) )
@crew @crew
@@ -288,20 +276,26 @@ To add a guardrail to a task, provide a validation function through the `guardra
```python Code ```python Code
from typing import Tuple, Union, Dict, Any from typing import Tuple, Union, Dict, Any
from crewai import TaskOutput
def validate_blog_content(result: TaskOutput) -> Tuple[bool, Any]: def validate_blog_content(result: str) -> Tuple[bool, Union[Dict[str, Any], str]]:
"""Validate blog content meets requirements.""" """Validate blog content meets requirements."""
try: try:
# Check word count # Check word count
word_count = len(result.split()) word_count = len(result.split())
if word_count > 200: if word_count > 200:
return (False, "Blog content exceeds 200 words") return (False, {
"error": "Blog content exceeds 200 words",
"code": "WORD_COUNT_ERROR",
"context": {"word_count": word_count}
})
# Additional validation logic here # Additional validation logic here
return (True, result.strip()) return (True, result.strip())
except Exception as e: except Exception as e:
return (False, "Unexpected error during validation") return (False, {
"error": "Unexpected error during validation",
"code": "SYSTEM_ERROR"
})
blog_task = Task( blog_task = Task(
description="Write a blog post about AI", description="Write a blog post about AI",
@@ -319,24 +313,29 @@ blog_task = Task(
- Type hints are recommended but optional - Type hints are recommended but optional
2. **Return Values**: 2. **Return Values**:
- On success: it returns a tuple of `(bool, Any)`. For example: `(True, validated_result)` - Success: Return `(True, validated_result)`
- On Failure: it returns a tuple of `(bool, str)`. For example: `(False, "Error message explain the failure")` - Failure: Return `(False, error_details)`
### Error Handling Best Practices ### Error Handling Best Practices
1. **Structured Error Responses**: 1. **Structured Error Responses**:
```python Code ```python Code
from crewai import TaskOutput def validate_with_context(result: str) -> Tuple[bool, Union[Dict[str, Any], str]]:
def validate_with_context(result: TaskOutput) -> Tuple[bool, Any]:
try: try:
# Main validation logic # Main validation logic
validated_data = perform_validation(result) validated_data = perform_validation(result)
return (True, validated_data) return (True, validated_data)
except ValidationError as e: except ValidationError as e:
return (False, f"VALIDATION_ERROR: {str(e)}") return (False, {
"error": str(e),
"code": "VALIDATION_ERROR",
"context": {"input": result}
})
except Exception as e: except Exception as e:
return (False, str(e)) return (False, {
"error": "Unexpected error",
"code": "SYSTEM_ERROR"
})
``` ```
2. **Error Categories**: 2. **Error Categories**:
@@ -347,25 +346,28 @@ def validate_with_context(result: TaskOutput) -> Tuple[bool, Any]:
3. **Validation Chain**: 3. **Validation Chain**:
```python Code ```python Code
from typing import Any, Dict, List, Tuple, Union from typing import Any, Dict, List, Tuple, Union
from crewai import TaskOutput
def complex_validation(result: TaskOutput) -> Tuple[bool, Any]: def complex_validation(result: str) -> Tuple[bool, Union[str, Dict[str, Any]]]:
"""Chain multiple validation steps.""" """Chain multiple validation steps."""
# Step 1: Basic validation # Step 1: Basic validation
if not result: if not result:
return (False, "Empty result") return (False, {"error": "Empty result", "code": "EMPTY_INPUT"})
# Step 2: Content validation # Step 2: Content validation
try: try:
validated = validate_content(result) validated = validate_content(result)
if not validated: if not validated:
return (False, "Invalid content") return (False, {"error": "Invalid content", "code": "CONTENT_ERROR"})
# Step 3: Format validation # Step 3: Format validation
formatted = format_output(validated) formatted = format_output(validated)
return (True, formatted) return (True, formatted)
except Exception as e: except Exception as e:
return (False, str(e)) return (False, {
"error": str(e),
"code": "VALIDATION_ERROR",
"context": {"step": "content_validation"}
})
``` ```
### Handling Guardrail Results ### Handling Guardrail Results
@@ -380,16 +382,19 @@ When a guardrail returns `(False, error)`:
Example with retry handling: Example with retry handling:
```python Code ```python Code
from typing import Optional, Tuple, Union from typing import Optional, Tuple, Union
from crewai import TaskOutput, Task
def validate_json_output(result: TaskOutput) -> Tuple[bool, Any]: def validate_json_output(result: str) -> Tuple[bool, Union[Dict[str, Any], str]]:
"""Validate and parse JSON output.""" """Validate and parse JSON output."""
try: try:
# Try to parse as JSON # Try to parse as JSON
data = json.loads(result) data = json.loads(result)
return (True, data) return (True, data)
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
return (False, "Invalid JSON format") return (False, {
"error": "Invalid JSON format",
"code": "JSON_ERROR",
"context": {"line": e.lineno, "column": e.colno}
})
task = Task( task = Task(
description="Generate a JSON report", description="Generate a JSON report",
@@ -409,7 +414,7 @@ It's also important to note that the output of the final task of a crew becomes
### Using `output_pydantic` ### Using `output_pydantic`
The `output_pydantic` property allows you to define a Pydantic model that the task output should conform to. This ensures that the output is not only structured but also validated according to the Pydantic model. The `output_pydantic` property allows you to define a Pydantic model that the task output should conform to. This ensures that the output is not only structured but also validated according to the Pydantic model.
Here's an example demonstrating how to use output_pydantic: Heres an example demonstrating how to use output_pydantic:
```python Code ```python Code
import json import json
@@ -490,7 +495,7 @@ In this example:
### Using `output_json` ### Using `output_json`
The `output_json` property allows you to define the expected output in JSON format. This ensures that the task's output is a valid JSON structure that can be easily parsed and used in your application. The `output_json` property allows you to define the expected output in JSON format. This ensures that the task's output is a valid JSON structure that can be easily parsed and used in your application.
Here's an example demonstrating how to use `output_json`: Heres an example demonstrating how to use `output_json`:
```python Code ```python Code
import json import json

View File

@@ -15,18 +15,6 @@ A tool in CrewAI is a skill or function that agents can utilize to perform vario
This includes tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools), This includes tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools),
enabling everything from simple searches to complex interactions and effective teamwork among agents. enabling everything from simple searches to complex interactions and effective teamwork among agents.
<Note type="info" title="Enterprise Enhancement: Tools Repository">
CrewAI Enterprise provides a comprehensive Tools Repository with pre-built integrations for common business systems and APIs. Deploy agents with enterprise tools in minutes instead of days.
![Tools Repository Screenshot](../images/enterprise/tools-repository.png)
The Enterprise Tools Repository includes:
- Pre-built connectors for popular enterprise systems
- Custom tool creation interface
- Version control and sharing capabilities
- Security and compliance features
</Note>
## Key Characteristics of Tools ## Key Characteristics of Tools
- **Utility**: Crafted for tasks such as web searching, data analysis, content generation, and agent collaboration. - **Utility**: Crafted for tasks such as web searching, data analysis, content generation, and agent collaboration.
@@ -91,7 +79,7 @@ research = Task(
) )
write = Task( write = Task(
description='Write an engaging blog post about the AI industry, based on the research analyst's summary. Draw inspiration from the latest blog posts in the directory.', description='Write an engaging blog post about the AI industry, based on the research analysts summary. Draw inspiration from the latest blog posts in the directory.',
expected_output='A 4-paragraph blog post formatted in markdown with engaging, informative, and accessible content, avoiding complex jargon.', expected_output='A 4-paragraph blog post formatted in markdown with engaging, informative, and accessible content, avoiding complex jargon.',
agent=writer, agent=writer,
output_file='blog-posts/new_post.md' # The final blog post will be saved here output_file='blog-posts/new_post.md' # The final blog post will be saved here
@@ -153,7 +141,7 @@ Here is a list of the available tools and their descriptions:
## Creating your own Tools ## Creating your own Tools
<Tip> <Tip>
Developers can craft `custom tools` tailored for their agent's needs or Developers can craft `custom tools` tailored for their agents needs or
utilize pre-built options. utilize pre-built options.
</Tip> </Tip>

View File

@@ -1,642 +0,0 @@
# Custom LLM Implementations
CrewAI now supports custom LLM implementations through the `BaseLLM` abstract base class. This allows you to create your own LLM implementations that don't rely on litellm's authentication mechanism.
## Using Custom LLM Implementations
To create a custom LLM implementation, you need to:
1. Inherit from the `BaseLLM` abstract base class
2. Implement the required methods:
- `call()`: The main method to call the LLM with messages
- `supports_function_calling()`: Whether the LLM supports function calling
- `supports_stop_words()`: Whether the LLM supports stop words
- `get_context_window_size()`: The context window size of the LLM
## Example: Basic Custom LLM
```python
from crewai import BaseLLM
from typing import Any, Dict, List, Optional, Union
class CustomLLM(BaseLLM):
def __init__(self, api_key: str, endpoint: str):
super().__init__() # Initialize the base class to set default attributes
if not api_key or not isinstance(api_key, str):
raise ValueError("Invalid API key: must be a non-empty string")
if not endpoint or not isinstance(endpoint, str):
raise ValueError("Invalid endpoint URL: must be a non-empty string")
self.api_key = api_key
self.endpoint = endpoint
self.stop = [] # You can customize stop words if needed
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
"""Call the LLM with the given messages.
Args:
messages: Input messages for the LLM.
tools: Optional list of tool schemas for function calling.
callbacks: Optional list of callback functions.
available_functions: Optional dict mapping function names to callables.
Returns:
Either a text response from the LLM or the result of a tool function call.
Raises:
TimeoutError: If the LLM request times out.
RuntimeError: If the LLM request fails for other reasons.
ValueError: If the response format is invalid.
"""
# Implement your own logic to call the LLM
# For example, using requests:
import requests
try:
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json"
}
# Convert string message to proper format if needed
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
data = {
"messages": messages,
"tools": tools
}
response = requests.post(
self.endpoint,
headers=headers,
json=data,
timeout=30 # Set a reasonable timeout
)
response.raise_for_status() # Raise an exception for HTTP errors
return response.json()["choices"][0]["message"]["content"]
except requests.Timeout:
raise TimeoutError("LLM request timed out")
except requests.RequestException as e:
raise RuntimeError(f"LLM request failed: {str(e)}")
except (KeyError, IndexError, ValueError) as e:
raise ValueError(f"Invalid response format: {str(e)}")
def supports_function_calling(self) -> bool:
"""Check if the LLM supports function calling.
Returns:
True if the LLM supports function calling, False otherwise.
"""
# Return True if your LLM supports function calling
return True
def supports_stop_words(self) -> bool:
"""Check if the LLM supports stop words.
Returns:
True if the LLM supports stop words, False otherwise.
"""
# Return True if your LLM supports stop words
return True
def get_context_window_size(self) -> int:
"""Get the context window size of the LLM.
Returns:
The context window size as an integer.
"""
# Return the context window size of your LLM
return 8192
```
## Error Handling Best Practices
When implementing custom LLMs, it's important to handle errors properly to ensure robustness and reliability. Here are some best practices:
### 1. Implement Try-Except Blocks for API Calls
Always wrap API calls in try-except blocks to handle different types of errors:
```python
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
try:
# API call implementation
response = requests.post(
self.endpoint,
headers=self.headers,
json=self.prepare_payload(messages),
timeout=30 # Set a reasonable timeout
)
response.raise_for_status() # Raise an exception for HTTP errors
return response.json()["choices"][0]["message"]["content"]
except requests.Timeout:
raise TimeoutError("LLM request timed out")
except requests.RequestException as e:
raise RuntimeError(f"LLM request failed: {str(e)}")
except (KeyError, IndexError, ValueError) as e:
raise ValueError(f"Invalid response format: {str(e)}")
```
### 2. Implement Retry Logic for Transient Failures
For transient failures like network issues or rate limiting, implement retry logic with exponential backoff:
```python
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
import time
max_retries = 3
retry_delay = 1 # seconds
for attempt in range(max_retries):
try:
response = requests.post(
self.endpoint,
headers=self.headers,
json=self.prepare_payload(messages),
timeout=30
)
response.raise_for_status()
return response.json()["choices"][0]["message"]["content"]
except (requests.Timeout, requests.ConnectionError) as e:
if attempt < max_retries - 1:
time.sleep(retry_delay * (2 ** attempt)) # Exponential backoff
continue
raise TimeoutError(f"LLM request failed after {max_retries} attempts: {str(e)}")
except requests.RequestException as e:
raise RuntimeError(f"LLM request failed: {str(e)}")
```
### 3. Validate Input Parameters
Always validate input parameters to prevent runtime errors:
```python
def __init__(self, api_key: str, endpoint: str):
super().__init__()
if not api_key or not isinstance(api_key, str):
raise ValueError("Invalid API key: must be a non-empty string")
if not endpoint or not isinstance(endpoint, str):
raise ValueError("Invalid endpoint URL: must be a non-empty string")
self.api_key = api_key
self.endpoint = endpoint
```
### 4. Handle Authentication Errors Gracefully
Provide clear error messages for authentication failures:
```python
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
try:
response = requests.post(self.endpoint, headers=self.headers, json=data)
if response.status_code == 401:
raise ValueError("Authentication failed: Invalid API key or token")
elif response.status_code == 403:
raise ValueError("Authorization failed: Insufficient permissions")
response.raise_for_status()
# Process response
except Exception as e:
# Handle error
raise
```
## Example: JWT-based Authentication
For services that use JWT-based authentication instead of API keys, you can implement a custom LLM like this:
```python
from crewai import BaseLLM, Agent, Task
from typing import Any, Dict, List, Optional, Union
class JWTAuthLLM(BaseLLM):
def __init__(self, jwt_token: str, endpoint: str):
super().__init__() # Initialize the base class to set default attributes
if not jwt_token or not isinstance(jwt_token, str):
raise ValueError("Invalid JWT token: must be a non-empty string")
if not endpoint or not isinstance(endpoint, str):
raise ValueError("Invalid endpoint URL: must be a non-empty string")
self.jwt_token = jwt_token
self.endpoint = endpoint
self.stop = [] # You can customize stop words if needed
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
"""Call the LLM with JWT authentication.
Args:
messages: Input messages for the LLM.
tools: Optional list of tool schemas for function calling.
callbacks: Optional list of callback functions.
available_functions: Optional dict mapping function names to callables.
Returns:
Either a text response from the LLM or the result of a tool function call.
Raises:
TimeoutError: If the LLM request times out.
RuntimeError: If the LLM request fails for other reasons.
ValueError: If the response format is invalid.
"""
# Implement your own logic to call the LLM with JWT authentication
import requests
try:
headers = {
"Authorization": f"Bearer {self.jwt_token}",
"Content-Type": "application/json"
}
# Convert string message to proper format if needed
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
data = {
"messages": messages,
"tools": tools
}
response = requests.post(
self.endpoint,
headers=headers,
json=data,
timeout=30 # Set a reasonable timeout
)
if response.status_code == 401:
raise ValueError("Authentication failed: Invalid JWT token")
elif response.status_code == 403:
raise ValueError("Authorization failed: Insufficient permissions")
response.raise_for_status() # Raise an exception for HTTP errors
return response.json()["choices"][0]["message"]["content"]
except requests.Timeout:
raise TimeoutError("LLM request timed out")
except requests.RequestException as e:
raise RuntimeError(f"LLM request failed: {str(e)}")
except (KeyError, IndexError, ValueError) as e:
raise ValueError(f"Invalid response format: {str(e)}")
def supports_function_calling(self) -> bool:
"""Check if the LLM supports function calling.
Returns:
True if the LLM supports function calling, False otherwise.
"""
return True
def supports_stop_words(self) -> bool:
"""Check if the LLM supports stop words.
Returns:
True if the LLM supports stop words, False otherwise.
"""
return True
def get_context_window_size(self) -> int:
"""Get the context window size of the LLM.
Returns:
The context window size as an integer.
"""
return 8192
```
## Troubleshooting
Here are some common issues you might encounter when implementing custom LLMs and how to resolve them:
### 1. Authentication Failures
**Symptoms**: 401 Unauthorized or 403 Forbidden errors
**Solutions**:
- Verify that your API key or JWT token is valid and not expired
- Check that you're using the correct authentication header format
- Ensure that your token has the necessary permissions
### 2. Timeout Issues
**Symptoms**: Requests taking too long or timing out
**Solutions**:
- Implement timeout handling as shown in the examples
- Use retry logic with exponential backoff
- Consider using a more reliable network connection
### 3. Response Parsing Errors
**Symptoms**: KeyError, IndexError, or ValueError when processing responses
**Solutions**:
- Validate the response format before accessing nested fields
- Implement proper error handling for malformed responses
- Check the API documentation for the expected response format
### 4. Rate Limiting
**Symptoms**: 429 Too Many Requests errors
**Solutions**:
- Implement rate limiting in your custom LLM
- Add exponential backoff for retries
- Consider using a token bucket algorithm for more precise rate control
## Advanced Features
### Logging
Adding logging to your custom LLM can help with debugging and monitoring:
```python
import logging
from typing import Any, Dict, List, Optional, Union
class LoggingLLM(BaseLLM):
def __init__(self, api_key: str, endpoint: str):
super().__init__()
self.api_key = api_key
self.endpoint = endpoint
self.logger = logging.getLogger("crewai.llm.custom")
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
self.logger.info(f"Calling LLM with {len(messages) if isinstance(messages, list) else 1} messages")
try:
# API call implementation
response = self._make_api_call(messages, tools)
self.logger.debug(f"LLM response received: {response[:100]}...")
return response
except Exception as e:
self.logger.error(f"LLM call failed: {str(e)}")
raise
```
### Rate Limiting
Implementing rate limiting can help avoid overwhelming the LLM API:
```python
import time
from typing import Any, Dict, List, Optional, Union
class RateLimitedLLM(BaseLLM):
def __init__(
self,
api_key: str,
endpoint: str,
requests_per_minute: int = 60
):
super().__init__()
self.api_key = api_key
self.endpoint = endpoint
self.requests_per_minute = requests_per_minute
self.request_times: List[float] = []
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
self._enforce_rate_limit()
# Record this request time
self.request_times.append(time.time())
# Make the actual API call
return self._make_api_call(messages, tools)
def _enforce_rate_limit(self) -> None:
"""Enforce the rate limit by waiting if necessary."""
now = time.time()
# Remove request times older than 1 minute
self.request_times = [t for t in self.request_times if now - t < 60]
if len(self.request_times) >= self.requests_per_minute:
# Calculate how long to wait
oldest_request = min(self.request_times)
wait_time = 60 - (now - oldest_request)
if wait_time > 0:
time.sleep(wait_time)
```
### Metrics Collection
Collecting metrics can help you monitor your LLM usage:
```python
import time
from typing import Any, Dict, List, Optional, Union
class MetricsCollectingLLM(BaseLLM):
def __init__(self, api_key: str, endpoint: str):
super().__init__()
self.api_key = api_key
self.endpoint = endpoint
self.metrics: Dict[str, Any] = {
"total_calls": 0,
"total_tokens": 0,
"errors": 0,
"latency": []
}
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
start_time = time.time()
self.metrics["total_calls"] += 1
try:
response = self._make_api_call(messages, tools)
# Estimate tokens (simplified)
if isinstance(messages, str):
token_estimate = len(messages) // 4
else:
token_estimate = sum(len(m.get("content", "")) // 4 for m in messages)
self.metrics["total_tokens"] += token_estimate
return response
except Exception as e:
self.metrics["errors"] += 1
raise
finally:
latency = time.time() - start_time
self.metrics["latency"].append(latency)
def get_metrics(self) -> Dict[str, Any]:
"""Return the collected metrics."""
avg_latency = sum(self.metrics["latency"]) / len(self.metrics["latency"]) if self.metrics["latency"] else 0
return {
**self.metrics,
"avg_latency": avg_latency
}
```
## Advanced Usage: Function Calling
If your LLM supports function calling, you can implement the function calling logic in your custom LLM:
```python
import json
from typing import Any, Dict, List, Optional, Union
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
import requests
try:
headers = {
"Authorization": f"Bearer {self.jwt_token}",
"Content-Type": "application/json"
}
# Convert string message to proper format if needed
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
data = {
"messages": messages,
"tools": tools
}
response = requests.post(
self.endpoint,
headers=headers,
json=data,
timeout=30
)
response.raise_for_status()
response_data = response.json()
# Check if the LLM wants to call a function
if response_data["choices"][0]["message"].get("tool_calls"):
tool_calls = response_data["choices"][0]["message"]["tool_calls"]
# Process each tool call
for tool_call in tool_calls:
function_name = tool_call["function"]["name"]
function_args = json.loads(tool_call["function"]["arguments"])
if available_functions and function_name in available_functions:
function_to_call = available_functions[function_name]
function_response = function_to_call(**function_args)
# Add the function response to the messages
messages.append({
"role": "tool",
"tool_call_id": tool_call["id"],
"name": function_name,
"content": str(function_response)
})
# Call the LLM again with the updated messages
return self.call(messages, tools, callbacks, available_functions)
# Return the text response if no function call
return response_data["choices"][0]["message"]["content"]
except requests.Timeout:
raise TimeoutError("LLM request timed out")
except requests.RequestException as e:
raise RuntimeError(f"LLM request failed: {str(e)}")
except (KeyError, IndexError, ValueError) as e:
raise ValueError(f"Invalid response format: {str(e)}")
```
## Using Your Custom LLM with CrewAI
Once you've implemented your custom LLM, you can use it with CrewAI agents and crews:
```python
from crewai import Agent, Task, Crew
from typing import Dict, Any
# Create your custom LLM instance
jwt_llm = JWTAuthLLM(
jwt_token="your.jwt.token",
endpoint="https://your-llm-endpoint.com/v1/chat/completions"
)
# Use it with an agent
agent = Agent(
role="Research Assistant",
goal="Find information on a topic",
backstory="You are a research assistant tasked with finding information.",
llm=jwt_llm,
)
# Create a task for the agent
task = Task(
description="Research the benefits of exercise",
agent=agent,
expected_output="A summary of the benefits of exercise",
)
# Execute the task
result = agent.execute_task(task)
print(result)
# Or use it with a crew
crew = Crew(
agents=[agent],
tasks=[task],
manager_llm=jwt_llm, # Use your custom LLM for the manager
)
# Run the crew
result = crew.kickoff()
print(result)
```
## Implementing Your Own Authentication Mechanism
The `BaseLLM` class allows you to implement any authentication mechanism you need, not just JWT or API keys. You can use:
- OAuth tokens
- Client certificates
- Custom headers
- Session-based authentication
- Any other authentication method required by your LLM provider
Simply implement the appropriate authentication logic in your custom LLM class.

View File

@@ -1,6 +1,6 @@
{ {
"$schema": "https://mintlify.com/docs.json", "$schema": "https://mintlify.com/docs.json",
"theme": "mint", "theme": "palm",
"name": "CrewAI", "name": "CrewAI",
"colors": { "colors": {
"primary": "#EB6658", "primary": "#EB6658",
@@ -76,7 +76,9 @@
"concepts/testing", "concepts/testing",
"concepts/cli", "concepts/cli",
"concepts/tools", "concepts/tools",
"concepts/event-listener" "concepts/event-listener",
"concepts/langchain-tools",
"concepts/llamaindex-tools"
] ]
}, },
{ {
@@ -96,22 +98,12 @@
"how-to/kickoff-for-each", "how-to/kickoff-for-each",
"how-to/replay-tasks-from-latest-crew-kickoff", "how-to/replay-tasks-from-latest-crew-kickoff",
"how-to/conditional-tasks", "how-to/conditional-tasks",
"how-to/langchain-tools",
"how-to/llamaindex-tools"
]
},
{
"group": "Agent Monitoring & Observability",
"pages": [
"how-to/agentops-observability", "how-to/agentops-observability",
"how-to/arize-phoenix-observability",
"how-to/langfuse-observability",
"how-to/langtrace-observability", "how-to/langtrace-observability",
"how-to/mlflow-observability", "how-to/mlflow-observability",
"how-to/openlit-observability", "how-to/openlit-observability",
"how-to/opik-observability",
"how-to/portkey-observability", "how-to/portkey-observability",
"how-to/weave-integration" "how-to/langfuse-observability"
] ]
}, },
{ {
@@ -119,8 +111,6 @@
"pages": [ "pages": [
"tools/aimindtool", "tools/aimindtool",
"tools/apifyactorstool", "tools/apifyactorstool",
"tools/bedrockinvokeagenttool",
"tools/bedrockkbretriever",
"tools/bravesearchtool", "tools/bravesearchtool",
"tools/browserbaseloadtool", "tools/browserbaseloadtool",
"tools/codedocssearchtool", "tools/codedocssearchtool",
@@ -196,11 +186,6 @@
"anchor": "Community", "anchor": "Community",
"href": "https://community.crewai.com", "href": "https://community.crewai.com",
"icon": "discourse" "icon": "discourse"
},
{
"anchor": "Tutorials",
"href": "https://www.youtube.com/@crewAIInc",
"icon": "youtube"
} }
] ]
} }

View File

@@ -185,20 +185,15 @@ Let's modify the `crew.py` file:
from crewai import Agent, Crew, Process, Task from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task from crewai.project import CrewBase, agent, crew, task
from crewai_tools import SerperDevTool from crewai_tools import SerperDevTool
from crewai.agents.agent_builder.base_agent import BaseAgent
from typing import List
@CrewBase @CrewBase
class ResearchCrew(): class ResearchCrew():
"""Research crew for comprehensive topic analysis and reporting""" """Research crew for comprehensive topic analysis and reporting"""
agents: List[BaseAgent]
tasks: List[Task]
@agent @agent
def researcher(self) -> Agent: def researcher(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['researcher'], # type: ignore[index] config=self.agents_config['researcher'],
verbose=True, verbose=True,
tools=[SerperDevTool()] tools=[SerperDevTool()]
) )
@@ -206,20 +201,20 @@ class ResearchCrew():
@agent @agent
def analyst(self) -> Agent: def analyst(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['analyst'], # type: ignore[index] config=self.agents_config['analyst'],
verbose=True verbose=True
) )
@task @task
def research_task(self) -> Task: def research_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['research_task'] # type: ignore[index] config=self.tasks_config['research_task']
) )
@task @task
def analysis_task(self) -> Task: def analysis_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['analysis_task'], # type: ignore[index] config=self.tasks_config['analysis_task'],
output_file='output/report.md' output_file='output/report.md'
) )

View File

@@ -203,40 +203,35 @@ These task definitions provide detailed instructions to our agents, ensuring the
# src/guide_creator_flow/crews/content_crew/content_crew.py # src/guide_creator_flow/crews/content_crew/content_crew.py
from crewai import Agent, Crew, Process, Task from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task from crewai.project import CrewBase, agent, crew, task
from crewai.agents.agent_builder.base_agent import BaseAgent
from typing import List
@CrewBase @CrewBase
class ContentCrew(): class ContentCrew():
"""Content writing crew""" """Content writing crew"""
agents: List[BaseAgent]
tasks: List[Task]
@agent @agent
def content_writer(self) -> Agent: def content_writer(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['content_writer'], # type: ignore[index] config=self.agents_config['content_writer'],
verbose=True verbose=True
) )
@agent @agent
def content_reviewer(self) -> Agent: def content_reviewer(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['content_reviewer'], # type: ignore[index] config=self.agents_config['content_reviewer'],
verbose=True verbose=True
) )
@task @task
def write_section_task(self) -> Task: def write_section_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['write_section_task'] # type: ignore[index] config=self.tasks_config['write_section_task']
) )
@task @task
def review_section_task(self) -> Task: def review_section_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['review_section_task'], # type: ignore[index] config=self.tasks_config['review_section_task'],
context=[self.write_section_task()] context=[self.write_section_task()]
) )
@@ -268,7 +263,6 @@ Let's create our flow in the `main.py` file:
```python ```python
#!/usr/bin/env python #!/usr/bin/env python
import json import json
import os
from typing import List, Dict from typing import List, Dict
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from crewai import LLM from crewai import LLM
@@ -347,9 +341,6 @@ class GuideCreatorFlow(Flow[GuideCreatorState]):
outline_dict = json.loads(response) outline_dict = json.loads(response)
self.state.guide_outline = GuideOutline(**outline_dict) self.state.guide_outline = GuideOutline(**outline_dict)
# Ensure output directory exists before saving
os.makedirs("output", exist_ok=True)
# Save the outline to a file # Save the outline to a file
with open("output/guide_outline.json", "w") as f: with open("output/guide_outline.json", "w") as f:
json.dump(outline_dict, f, indent=2) json.dump(outline_dict, f, indent=2)

View File

@@ -1,5 +1,5 @@
--- ---
title: AgentOps Integration title: Agent Monitoring with AgentOps
description: Understanding and logging your agent performance with AgentOps. description: Understanding and logging your agent performance with AgentOps.
icon: paperclip icon: paperclip
--- ---

View File

@@ -1,145 +0,0 @@
---
title: Arize Phoenix
description: Arize Phoenix integration for CrewAI with OpenTelemetry and OpenInference
icon: magnifying-glass-chart
---
# Arize Phoenix Integration
This guide demonstrates how to integrate **Arize Phoenix** with **CrewAI** using OpenTelemetry via the [OpenInference](https://github.com/openinference/openinference) SDK. By the end of this guide, you will be able to trace your CrewAI agents and easily debug your agents.
> **What is Arize Phoenix?** [Arize Phoenix](https://phoenix.arize.com) is an LLM observability platform that provides tracing and evaluation for AI applications.
[![Watch a Video Demo of Our Integration with Phoenix](https://storage.googleapis.com/arize-assets/fixtures/setup_crewai.png)](https://www.youtube.com/watch?v=Yc5q3l6F7Ww)
## Get Started
We'll walk through a simple example of using CrewAI and integrating it with Arize Phoenix via OpenTelemetry using OpenInference.
You can also access this guide on [Google Colab](https://colab.research.google.com/github/Arize-ai/phoenix/blob/main/tutorials/tracing/crewai_tracing_tutorial.ipynb).
### Step 1: Install Dependencies
```bash
pip install openinference-instrumentation-crewai crewai crewai-tools arize-phoenix-otel
```
### Step 2: Set Up Environment Variables
Setup Phoenix Cloud API keys and configure OpenTelemetry to send traces to Phoenix. Phoenix Cloud is a hosted version of Arize Phoenix, but it is not required to use this integration.
You can get your free Serper API key [here](https://serper.dev/).
```python
import os
from getpass import getpass
# Get your Phoenix Cloud credentials
PHOENIX_API_KEY = getpass("🔑 Enter your Phoenix Cloud API Key: ")
# Get API keys for services
OPENAI_API_KEY = getpass("🔑 Enter your OpenAI API key: ")
SERPER_API_KEY = getpass("🔑 Enter your Serper API key: ")
# Set environment variables
os.environ["PHOENIX_CLIENT_HEADERS"] = f"api_key={PHOENIX_API_KEY}"
os.environ["PHOENIX_COLLECTOR_ENDPOINT"] = "https://app.phoenix.arize.com" # Phoenix Cloud, change this to your own endpoint if you are using a self-hosted instance
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
os.environ["SERPER_API_KEY"] = SERPER_API_KEY
```
### Step 3: Initialize OpenTelemetry with Phoenix
Initialize the OpenInference OpenTelemetry instrumentation SDK to start capturing traces and send them to Phoenix.
```python
from phoenix.otel import register
tracer_provider = register(
project_name="crewai-tracing-demo",
auto_instrument=True,
)
```
### Step 4: Create a CrewAI Application
We'll create a CrewAI application where two agents collaborate to research and write a blog post about AI advancements.
```python
from crewai import Agent, Crew, Process, Task
from crewai_tools import SerperDevTool
search_tool = SerperDevTool()
# Define your agents with roles and goals
researcher = Agent(
role="Senior Research Analyst",
goal="Uncover cutting-edge developments in AI and data science",
backstory="""You work at a leading tech think tank.
Your expertise lies in identifying emerging trends.
You have a knack for dissecting complex data and presenting actionable insights.""",
verbose=True,
allow_delegation=False,
# You can pass an optional llm attribute specifying what model you wanna use.
# llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7),
tools=[search_tool],
)
writer = Agent(
role="Tech Content Strategist",
goal="Craft compelling content on tech advancements",
backstory="""You are a renowned Content Strategist, known for your insightful and engaging articles.
You transform complex concepts into compelling narratives.""",
verbose=True,
allow_delegation=True,
)
# Create tasks for your agents
task1 = Task(
description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
Identify key trends, breakthrough technologies, and potential industry impacts.""",
expected_output="Full analysis report in bullet points",
agent=researcher,
)
task2 = Task(
description="""Using the insights provided, develop an engaging blog
post that highlights the most significant AI advancements.
Your post should be informative yet accessible, catering to a tech-savvy audience.
Make it sound cool, avoid complex words so it doesn't sound like AI.""",
expected_output="Full blog post of at least 4 paragraphs",
agent=writer,
)
# Instantiate your crew with a sequential process
crew = Crew(
agents=[researcher, writer], tasks=[task1, task2], verbose=1, process=Process.sequential
)
# Get your crew to work!
result = crew.kickoff()
print("######################")
print(result)
```
### Step 5: View Traces in Phoenix
After running the agent, you can view the traces generated by your CrewAI application in Phoenix. You should see detailed steps of the agent interactions and LLM calls, which can help you debug and optimize your AI agents.
Log into your Phoenix Cloud account and navigate to the project you specified in the `project_name` parameter. You'll see a timeline view of your trace with all the agent interactions, tool usages, and LLM calls.
![Example trace in Phoenix showing agent interactions](https://storage.googleapis.com/arize-assets/fixtures/crewai_traces.png)
### Version Compatibility Information
- Python 3.8+
- CrewAI >= 0.86.0
- Arize Phoenix >= 7.0.1
- OpenTelemetry SDK >= 1.31.0
### References
- [Phoenix Documentation](https://docs.arize.com/phoenix/) - Overview of the Phoenix platform.
- [CrewAI Documentation](https://docs.crewai.com/) - Overview of the CrewAI framework.
- [OpenTelemetry Docs](https://opentelemetry.io/docs/) - OpenTelemetry guide
- [OpenInference GitHub](https://github.com/openinference/openinference) - Source code for OpenInference SDK.

View File

@@ -1,443 +0,0 @@
---
title: Bring your own agent
description: Learn how to bring your own agents that work within a Crew.
icon: robots
---
Interoperability is a core concept in CrewAI. This guide will show you how to bring your own agents that work within a Crew.
## Adapter Guide for Bringing your own agents (Langgraph Agents, OpenAI Agents, etc...)
We require 3 adapters to turn any agent from different frameworks to work within crew.
1. BaseAgentAdapter
2. BaseToolAdapter
3. BaseConverter
## BaseAgentAdapter
This abstract class defines the common interface and functionality that all
agent adapters must implement. It extends BaseAgent to maintain compatibility
with the CrewAI framework while adding adapter-specific requirements.
Required Methods:
1. `def configure_tools`
2. `def configure_structured_output`
## Creating your own Adapter
To integrate an agent from a different framework (e.g., LangGraph, Autogen, OpenAI Assistants) into CrewAI, you need to create a custom adapter by inheriting from `BaseAgentAdapter`. This adapter acts as a compatibility layer, translating between the CrewAI interfaces and the specific requirements of your external agent.
Here's how you implement your custom adapter:
1. **Inherit from `BaseAgentAdapter`**:
```python
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
from crewai.tools import BaseTool
from typing import List, Optional, Any, Dict
class MyCustomAgentAdapter(BaseAgentAdapter):
# ... implementation details ...
```
2. **Implement `__init__`**:
The constructor should call the parent class constructor `super().__init__(**kwargs)` and perform any initialization specific to your external agent. You can use the optional `agent_config` dictionary passed during CrewAI's `Agent` initialization to configure your adapter and the underlying agent.
```python
def __init__(self, agent_config: Optional[Dict[str, Any]] = None, **kwargs: Any):
super().__init__(agent_config=agent_config, **kwargs)
# Initialize your external agent here, possibly using agent_config
# Example: self.external_agent = initialize_my_agent(agent_config)
print(f"Initializing MyCustomAgentAdapter with config: {agent_config}")
```
3. **Implement `configure_tools`**:
This abstract method is crucial. It receives a list of CrewAI `BaseTool` instances. Your implementation must convert or adapt these tools into the format expected by your external agent framework. This might involve wrapping them, extracting specific attributes, or registering them with the external agent instance.
```python
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
if tools:
adapted_tools = []
for tool in tools:
# Adapt CrewAI BaseTool to the format your agent expects
# Example: adapted_tool = adapt_to_my_framework(tool)
# adapted_tools.append(adapted_tool)
pass # Replace with your actual adaptation logic
# Configure the external agent with the adapted tools
# Example: self.external_agent.set_tools(adapted_tools)
print(f"Configuring tools for MyCustomAgentAdapter: {adapted_tools}") # Placeholder
else:
# Handle the case where no tools are provided
# Example: self.external_agent.set_tools([])
print("No tools provided for MyCustomAgentAdapter.")
```
4. **Implement `configure_structured_output`**:
This method is called when the CrewAI `Agent` is configured with structured output requirements (e.g., `output_json` or `output_pydantic`). Your adapter needs to ensure the external agent is set up to comply with these requirements. This might involve setting specific parameters on the external agent or ensuring its underlying model supports the requested format. If the external agent doesn't support structured output in a way compatible with CrewAI's expectations, you might need to handle the conversion or raise an appropriate error.
```python
def configure_structured_output(self, structured_output: Any) -> None:
# Configure your external agent to produce output in the specified format
# Example: self.external_agent.set_output_format(structured_output)
self.adapted_structured_output = True # Signal that structured output is handled
print(f"Configuring structured output for MyCustomAgentAdapter: {structured_output}")
```
By implementing these methods, your `MyCustomAgentAdapter` will allow your custom agent implementation to function correctly within a CrewAI crew, interacting with tasks and tools seamlessly. Remember to replace the example comments and print statements with your actual adaptation logic specific to the external agent framework you are integrating.
## BaseToolAdapter implementation
The `BaseToolAdapter` class is responsible for converting CrewAI's native `BaseTool` objects into a format that your specific external agent framework can understand and utilize. Different agent frameworks (like LangGraph, OpenAI Assistants, etc.) have their own unique ways of defining and handling tools, and the `BaseToolAdapter` acts as the translator.
Here's how you implement your custom tool adapter:
1. **Inherit from `BaseToolAdapter`**:
```python
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
from crewai.tools import BaseTool
from typing import List, Any
class MyCustomToolAdapter(BaseToolAdapter):
# ... implementation details ...
```
2. **Implement `configure_tools`**:
This is the core abstract method you must implement. It receives a list of CrewAI `BaseTool` instances provided to the agent. Your task is to iterate through this list, adapt each `BaseTool` into the format expected by your external framework, and store the converted tools in the `self.converted_tools` list (which is initialized in the base class constructor).
```python
def configure_tools(self, tools: List[BaseTool]) -> None:
"""Configure and convert CrewAI tools for the specific implementation."""
self.converted_tools = [] # Reset in case it's called multiple times
for tool in tools:
# Sanitize the tool name if required by the target framework
sanitized_name = self.sanitize_tool_name(tool.name)
# --- Your Conversion Logic Goes Here ---
# Example: Convert BaseTool to a dictionary format for LangGraph
# converted_tool = {
# "name": sanitized_name,
# "description": tool.description,
# "parameters": tool.args_schema.schema() if tool.args_schema else {},
# # Add any other framework-specific fields
# }
# Example: Convert BaseTool to an OpenAI function definition
# converted_tool = {
# "type": "function",
# "function": {
# "name": sanitized_name,
# "description": tool.description,
# "parameters": tool.args_schema.schema() if tool.args_schema else {"type": "object", "properties": {}},
# }
# }
# --- Replace above examples with your actual adaptation ---
converted_tool = self.adapt_tool_to_my_framework(tool, sanitized_name) # Placeholder
self.converted_tools.append(converted_tool)
print(f"Adapted tool '{tool.name}' to '{sanitized_name}' for MyCustomToolAdapter") # Placeholder
print(f"MyCustomToolAdapter finished configuring tools: {len(self.converted_tools)} adapted.") # Placeholder
# --- Helper method for adaptation (Example) ---
def adapt_tool_to_my_framework(self, tool: BaseTool, sanitized_name: str) -> Any:
# Replace this with the actual logic to convert a CrewAI BaseTool
# to the format needed by your specific external agent framework.
# This will vary greatly depending on the target framework.
adapted_representation = {
"framework_specific_name": sanitized_name,
"framework_specific_description": tool.description,
"inputs": tool.args_schema.schema() if tool.args_schema else None,
"implementation_reference": tool.run # Or however the framework needs to call it
}
# Also ensure the tool works both sync and async
async def async_tool_wrapper(*args, **kwargs):
output = tool.run(*args, **kwargs)
if inspect.isawaitable(output):
return await output
else:
return output
adapted_tool = MyFrameworkTool(
name=sanitized_name,
description=tool.description,
inputs=tool.args_schema.schema() if tool.args_schema else None,
implementation_reference=async_tool_wrapper
)
return adapted_representation
```
3. **Using the Adapter**:
Typically, you would instantiate your `MyCustomToolAdapter` within your `MyCustomAgentAdapter`'s `configure_tools` method and use it to process the tools before configuring your external agent.
```python
# Inside MyCustomAgentAdapter.configure_tools
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
if tools:
tool_adapter = MyCustomToolAdapter() # Instantiate your tool adapter
tool_adapter.configure_tools(tools) # Convert the tools
adapted_tools = tool_adapter.tools() # Get the converted tools
# Now configure your external agent with the adapted_tools
# Example: self.external_agent.set_tools(adapted_tools)
print(f"Configuring external agent with adapted tools: {adapted_tools}") # Placeholder
else:
# Handle no tools case
print("No tools provided for MyCustomAgentAdapter.")
```
By creating a `BaseToolAdapter`, you decouple the tool conversion logic from the agent adaptation, making the integration cleaner and more modular. Remember to replace the placeholder examples with the actual conversion logic required by your specific external agent framework.
## BaseConverter
The `BaseConverterAdapter` plays a crucial role when a CrewAI `Task` requires an agent to return its final output in a specific structured format, such as JSON or a Pydantic model. It bridges the gap between CrewAI's structured output requirements and the capabilities of your external agent.
Its primary responsibilities are:
1. **Configuring the Agent for Structured Output:** Based on the `Task`'s requirements (`output_json` or `output_pydantic`), it instructs the associated `BaseAgentAdapter` (and indirectly, the external agent) on what format is expected.
2. **Enhancing the System Prompt:** It modifies the agent's system prompt to include clear instructions on *how* to generate the output in the required structure.
3. **Post-processing the Result:** It takes the raw output from the agent and attempts to parse, validate, and format it according to the required structure, ultimately returning a string representation (e.g., a JSON string).
Here's how you implement your custom converter adapter:
1. **Inherit from `BaseConverterAdapter`**:
```python
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
# Assuming you have your MyCustomAgentAdapter defined
# from .my_custom_agent_adapter import MyCustomAgentAdapter
from crewai.task import Task
from typing import Any
class MyCustomConverterAdapter(BaseConverterAdapter):
# Store the expected output type (e.g., 'json', 'pydantic', 'text')
_output_type: str = 'text'
_output_schema: Any = None # Store JSON schema or Pydantic model
# ... implementation details ...
```
2. **Implement `__init__`**:
The constructor must accept the corresponding `agent_adapter` instance it will work with.
```python
def __init__(self, agent_adapter: Any): # Use your specific AgentAdapter type hint
self.agent_adapter = agent_adapter
print(f"Initializing MyCustomConverterAdapter for agent adapter: {type(agent_adapter).__name__}")
```
3. **Implement `configure_structured_output`**:
This method receives the CrewAI `Task` object. You need to check the task's `output_json` and `output_pydantic` attributes to determine the required output structure. Store this information (e.g., in `_output_type` and `_output_schema`) and potentially call configuration methods on your `self.agent_adapter` if the external agent needs specific setup for structured output (which might have been partially handled in the agent adapter's `configure_structured_output` already).
```python
def configure_structured_output(self, task: Task) -> None:
"""Configure the expected structured output based on the task."""
if task.output_pydantic:
self._output_type = 'pydantic'
self._output_schema = task.output_pydantic
print(f"Converter: Configured for Pydantic output: {self._output_schema.__name__}")
elif task.output_json:
self._output_type = 'json'
self._output_schema = task.output_json
print(f"Converter: Configured for JSON output with schema: {self._output_schema}")
else:
self._output_type = 'text'
self._output_schema = None
print("Converter: Configured for standard text output.")
# Optionally, inform the agent adapter if needed
# self.agent_adapter.set_output_mode(self._output_type, self._output_schema)
```
4. **Implement `enhance_system_prompt`**:
This method takes the agent's base system prompt string and should append instructions tailored to the currently configured `_output_type` and `_output_schema`. The goal is to guide the LLM powering the agent to produce output in the correct format.
```python
def enhance_system_prompt(self, base_prompt: str) -> str:
"""Enhance the system prompt with structured output instructions."""
if self._output_type == 'text':
return base_prompt # No enhancement needed for plain text
instructions = "\n\nYour final answer MUST be formatted as "
if self._output_type == 'json':
schema_str = json.dumps(self._output_schema, indent=2)
instructions += f"a JSON object conforming to the following schema:\n```json\n{schema_str}\n```"
elif self._output_type == 'pydantic':
schema_str = json.dumps(self._output_schema.model_json_schema(), indent=2)
instructions += f"a JSON object conforming to the Pydantic model '{self._output_schema.__name__}' with the following schema:\n```json\n{schema_str}\n```"
instructions += "\nEnsure your entire response is ONLY the valid JSON object, without any introductory text, explanations, or concluding remarks."
print(f"Converter: Enhancing prompt for {self._output_type} output.")
return base_prompt + instructions
```
*Note: The exact prompt engineering might need tuning based on the agent/LLM being used.*
5. **Implement `post_process_result`**:
This method receives the raw string output from the agent. If structured output was requested (`json` or `pydantic`), you should attempt to parse the string into the expected format. Handle potential parsing errors (e.g., log them, attempt simple fixes, or raise an exception). Crucially, the method must **always return a string**, even if the intermediate format was a dictionary or Pydantic object (e.g., by serializing it back to a JSON string).
```python
import json
from pydantic import ValidationError
def post_process_result(self, result: str) -> str:
"""Post-process the agent's result to ensure it matches the expected format."""
print(f"Converter: Post-processing result for {self._output_type} output.")
if self._output_type == 'json':
try:
# Attempt to parse and re-serialize to ensure validity and consistent format
parsed_json = json.loads(result)
# Optional: Validate against self._output_schema if it's a JSON schema dictionary
# from jsonschema import validate
# validate(instance=parsed_json, schema=self._output_schema)
return json.dumps(parsed_json)
except json.JSONDecodeError as e:
print(f"Error: Failed to parse JSON output: {e}\nRaw output:\n{result}")
# Handle error: return raw, raise exception, or try to fix
return result # Example: return raw output on failure
# except Exception as e: # Catch validation errors if using jsonschema
# print(f"Error: JSON output failed schema validation: {e}\nRaw output:\n{result}")
# return result
elif self._output_type == 'pydantic':
try:
# Attempt to parse into the Pydantic model
model_instance = self._output_schema.model_validate_json(result)
# Return the model serialized back to JSON
return model_instance.model_dump_json()
except ValidationError as e:
print(f"Error: Failed to validate Pydantic output: {e}\nRaw output:\n{result}")
# Handle error
return result # Example: return raw output on failure
except json.JSONDecodeError as e:
print(f"Error: Failed to parse JSON for Pydantic model: {e}\nRaw output:\n{result}")
return result
else: # 'text'
return result # No processing needed for plain text
```
By implementing these methods, your `MyCustomConverterAdapter` ensures that structured output requests from CrewAI tasks are correctly handled by your integrated external agent, improving the reliability and usability of your custom agent within the CrewAI framework.
## Out of the Box Adapters
We provide out of the box adapters for the following frameworks:
1. LangGraph
2. OpenAI Agents
## Kicking off a crew with adapted agents:
```python
import json
import os
from typing import List
from crewai_tools import SerperDevTool
from src.crewai import Agent, Crew, Task
from langchain_openai import ChatOpenAI
from pydantic import BaseModel
from crewai.agents.agent_adapters.langgraph.langgraph_adapter import (
LangGraphAgentAdapter,
)
from crewai.agents.agent_adapters.openai_agents.openai_adapter import OpenAIAgentAdapter
# CrewAI Agent
code_helper_agent = Agent(
role="Code Helper",
goal="Help users solve coding problems effectively and provide clear explanations.",
backstory="You are an experienced programmer with deep knowledge across multiple programming languages and frameworks. You specialize in solving complex coding challenges and explaining solutions clearly.",
allow_delegation=False,
verbose=True,
)
# OpenAI Agent Adapter
link_finder_agent = OpenAIAgentAdapter(
role="Link Finder",
goal="Find the most relevant and high-quality resources for coding tasks.",
backstory="You are a research specialist with a talent for finding the most helpful resources. You're skilled at using search tools to discover documentation, tutorials, and examples that directly address the user's coding needs.",
tools=[SerperDevTool()],
allow_delegation=False,
verbose=True,
)
# LangGraph Agent Adapter
reporter_agent = LangGraphAgentAdapter(
role="Reporter",
goal="Report the results of the tasks.",
backstory="You are a reporter who reports the results of the other tasks",
llm=ChatOpenAI(model="gpt-4o"),
allow_delegation=True,
verbose=True,
)
class Code(BaseModel):
code: str
task = Task(
description="Give an answer to the coding question: {task}",
expected_output="A thorough answer to the coding question: {task}",
agent=code_helper_agent,
output_json=Code,
)
task2 = Task(
description="Find links to resources that can help with coding tasks. Use the serper tool to find resources that can help.",
expected_output="A list of links to resources that can help with coding tasks",
agent=link_finder_agent,
)
class Report(BaseModel):
code: str
links: List[str]
task3 = Task(
description="Report the results of the tasks.",
expected_output="A report of the results of the tasks. this is the code produced and then the links to the resources that can help with the coding task.",
agent=reporter_agent,
output_json=Report,
)
# Use in CrewAI
crew = Crew(
agents=[code_helper_agent, link_finder_agent, reporter_agent],
tasks=[task, task2, task3],
verbose=True,
)
result = crew.kickoff(
inputs={"task": "How do you implement an abstract class in python?"}
)
# Print raw result first
print("Raw result:", result)
# Handle result based on its type
if hasattr(result, "json_dict") and result.json_dict:
json_result = result.json_dict
print("\nStructured JSON result:")
print(f"{json.dumps(json_result, indent=2)}")
# Access fields safely
if isinstance(json_result, dict):
if "code" in json_result:
print("\nCode:")
print(
json_result["code"][:200] + "..."
if len(json_result["code"]) > 200
else json_result["code"]
)
if "links" in json_result:
print("\nLinks:")
for link in json_result["links"][:5]: # Print first 5 links
print(f"- {link}")
if len(json_result["links"]) > 5:
print(f"...and {len(json_result['links']) - 5} more links")
elif hasattr(result, "pydantic") and result.pydantic:
print("\nPydantic model result:")
print(result.pydantic.model_dump_json(indent=2))
else:
# Fallback to raw output
print("\nNo structured result available, using raw output:")
print(result.raw[:500] + "..." if len(result.raw) > 500 else result.raw)
```

View File

@@ -92,14 +92,12 @@ coding_agent = Agent(
# Create tasks that require code execution # Create tasks that require code execution
task_1 = Task( task_1 = Task(
description="Analyze the first dataset and calculate the average age of participants. Ages: {ages}", description="Analyze the first dataset and calculate the average age of participants. Ages: {ages}",
agent=coding_agent, agent=coding_agent
expected_output="The average age of the participants."
) )
task_2 = Task( task_2 = Task(
description="Analyze the second dataset and calculate the average age of participants. Ages: {ages}", description="Analyze the second dataset and calculate the average age of participants. Ages: {ages}",
agent=coding_agent, agent=coding_agent
expected_output="The average age of the participants."
) )
# Create two crews and add tasks # Create two crews and add tasks

View File

@@ -39,7 +39,8 @@ analysis_crew = Crew(
agents=[coding_agent], agents=[coding_agent],
tasks=[data_analysis_task], tasks=[data_analysis_task],
verbose=True, verbose=True,
memory=False memory=False,
respect_context_window=True # enable by default
) )
datasets = [ datasets = [

View File

@@ -1,7 +1,7 @@
--- ---
title: Langfuse Integration title: Agent Monitoring with Langfuse
description: Learn how to integrate Langfuse with CrewAI via OpenTelemetry using OpenLit description: Learn how to integrate Langfuse with CrewAI via OpenTelemetry using OpenLit
icon: vials icon: magnifying-glass-chart
--- ---
# Integrate Langfuse with CrewAI # Integrate Langfuse with CrewAI

View File

@@ -1,5 +1,5 @@
--- ---
title: Langtrace Integration title: Agent Monitoring with Langtrace
description: How to monitor cost, latency, and performance of CrewAI Agents using Langtrace, an external observability tool. description: How to monitor cost, latency, and performance of CrewAI Agents using Langtrace, an external observability tool.
icon: chart-line icon: chart-line
--- ---

View File

@@ -1,5 +1,5 @@
--- ---
title: MLflow Integration title: Agent Monitoring with MLflow
description: Quickly start monitoring your Agents with MLflow. description: Quickly start monitoring your Agents with MLflow.
icon: bars-staggered icon: bars-staggered
--- ---

View File

@@ -1,5 +1,5 @@
--- ---
title: OpenLIT Integration title: Agent Monitoring with OpenLIT
description: Quickly start monitoring your Agents in just a single line of code with OpenTelemetry. description: Quickly start monitoring your Agents in just a single line of code with OpenTelemetry.
icon: magnifying-glass-chart icon: magnifying-glass-chart
--- ---

View File

@@ -1,129 +0,0 @@
---
title: Opik Integration
description: Learn how to use Comet Opik to debug, evaluate, and monitor your CrewAI applications with comprehensive tracing, automated evaluations, and production-ready dashboards.
icon: meteor
---
# Opik Overview
With [Comet Opik](https://www.comet.com/docs/opik/), debug, evaluate, and monitor your LLM applications, RAG systems, and agentic workflows with comprehensive tracing, automated evaluations, and production-ready dashboards.
<Frame caption="Opik Agent Dashboard">
<img src="/images/opik-crewai-dashboard.png" alt="Opik agent monitoring example with CrewAI" />
</Frame>
Opik provides comprehensive support for every stage of your CrewAI application development:
- **Log Traces and Spans**: Automatically track LLM calls and application logic to debug and analyze development and production systems. Manually or programmatically annotate, view, and compare responses across projects.
- **Evaluate Your LLM Application's Performance**: Evaluate against a custom test set and run built-in evaluation metrics or define your own metrics in the SDK or UI.
- **Test Within Your CI/CD Pipeline**: Establish reliable performance baselines with Opik's LLM unit tests, built on PyTest. Run online evaluations for continuous monitoring in production.
- **Monitor & Analyze Production Data**: Understand your models' performance on unseen data in production and generate datasets for new dev iterations.
## Setup
Comet provides a hosted version of the Opik platform, or you can run the platform locally.
To use the hosted version, simply [create a free Comet account](https://www.comet.com/signup?utm_medium=github&utm_source=crewai_docs) and grab you API Key.
To run the Opik platform locally, see our [installation guide](https://www.comet.com/docs/opik/self-host/overview/) for more information.
For this guide we will use CrewAIs quickstart example.
<Steps>
<Step title="Install required packages">
```shell
pip install crewai crewai-tools opik --upgrade
```
</Step>
<Step title="Configure Opik">
```python
import opik
opik.configure(use_local=False)
```
</Step>
<Step title="Prepare environment">
First, we set up our API keys for our LLM-provider as environment variables:
```python
import os
import getpass
if "OPENAI_API_KEY" not in os.environ:
os.environ["OPENAI_API_KEY"] = getpass.getpass("Enter your OpenAI API key: ")
```
</Step>
<Step title="Using CrewAI">
The first step is to create our project. We will use an example from CrewAIs documentation:
```python
from crewai import Agent, Crew, Task, Process
class YourCrewName:
def agent_one(self) -> Agent:
return Agent(
role="Data Analyst",
goal="Analyze data trends in the market",
backstory="An experienced data analyst with a background in economics",
verbose=True,
)
def agent_two(self) -> Agent:
return Agent(
role="Market Researcher",
goal="Gather information on market dynamics",
backstory="A diligent researcher with a keen eye for detail",
verbose=True,
)
def task_one(self) -> Task:
return Task(
name="Collect Data Task",
description="Collect recent market data and identify trends.",
expected_output="A report summarizing key trends in the market.",
agent=self.agent_one(),
)
def task_two(self) -> Task:
return Task(
name="Market Research Task",
description="Research factors affecting market dynamics.",
expected_output="An analysis of factors influencing the market.",
agent=self.agent_two(),
)
def crew(self) -> Crew:
return Crew(
agents=[self.agent_one(), self.agent_two()],
tasks=[self.task_one(), self.task_two()],
process=Process.sequential,
verbose=True,
)
```
Now we can import Opiks tracker and run our crew:
```python
from opik.integrations.crewai import track_crewai
track_crewai(project_name="crewai-integration-demo")
my_crew = YourCrewName().crew()
result = my_crew.kickoff()
print(result)
```
After running your CrewAI application, visit the Opik app to view:
- LLM traces, spans, and their metadata
- Agent interactions and task execution flow
- Performance metrics like latency and token usage
- Evaluation metrics (built-in or custom)
</Step>
</Steps>
## Resources
- [🦉 Opik Documentation](https://www.comet.com/docs/opik/)
- [👉 Opik + CrewAI Colab](https://colab.research.google.com/github/comet-ml/opik/blob/main/apps/opik-documentation/documentation/docs/cookbook/crewai.ipynb)
- [🐦 X](https://x.com/cometml)
- [💬 Slack](https://slack.comet.com/)

View File

@@ -1,5 +1,5 @@
--- ---
title: Portkey Integration title: Agent Monitoring with Portkey
description: How to use Portkey with CrewAI description: How to use Portkey with CrewAI
icon: key icon: key
--- ---

View File

@@ -1,124 +0,0 @@
---
title: Weave Integration
description: Learn how to use Weights & Biases (W&B) Weave to track, experiment with, evaluate, and improve your CrewAI applications.
icon: radar
---
# Weave Overview
[Weights & Biases (W&B) Weave](https://weave-docs.wandb.ai/) is a framework for tracking, experimenting with, evaluating, deploying, and improving LLM-based applications.
![Overview of W&B Weave CrewAI tracing usage](/images/weave-tracing.gif)
Weave provides comprehensive support for every stage of your CrewAI application development:
- **Tracing & Monitoring**: Automatically track LLM calls and application logic to debug and analyze production systems
- **Systematic Iteration**: Refine and iterate on prompts, datasets, and models
- **Evaluation**: Use custom or pre-built scorers to systematically assess and enhance agent performance
- **Guardrails**: Protect your agents with pre- and post-safeguards for content moderation and prompt safety
Weave automatically captures traces for your CrewAI applications, enabling you to monitor and analyze your agents' performance, interactions, and execution flow. This helps you build better evaluation datasets and optimize your agent workflows.
## Setup Instructions
<Steps>
<Step title="Install required packages">
```shell
pip install crewai weave
```
</Step>
<Step title="Set up W&B Account">
Sign up for a [Weights & Biases account](https://wandb.ai) if you haven't already. You'll need this to view your traces and metrics.
</Step>
<Step title="Initialize Weave in Your Application">
Add the following code to your application:
```python
import weave
# Initialize Weave with your project name
weave.init(project_name="crewai_demo")
```
After initialization, Weave will provide a URL where you can view your traces and metrics.
</Step>
<Step title="Create your Crews/Flows">
```python
from crewai import Agent, Task, Crew, LLM, Process
# Create an LLM with a temperature of 0 to ensure deterministic outputs
llm = LLM(model="gpt-4o", temperature=0)
# Create agents
researcher = Agent(
role='Research Analyst',
goal='Find and analyze the best investment opportunities',
backstory='Expert in financial analysis and market research',
llm=llm,
verbose=True,
allow_delegation=False,
)
writer = Agent(
role='Report Writer',
goal='Write clear and concise investment reports',
backstory='Experienced in creating detailed financial reports',
llm=llm,
verbose=True,
allow_delegation=False,
)
# Create tasks
research_task = Task(
description='Deep research on the {topic}',
expected_output='Comprehensive market data including key players, market size, and growth trends.',
agent=researcher
)
writing_task = Task(
description='Write a detailed report based on the research',
expected_output='The report should be easy to read and understand. Use bullet points where applicable.',
agent=writer
)
# Create a crew
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
verbose=True,
process=Process.sequential,
)
# Run the crew
result = crew.kickoff(inputs={"topic": "AI in material science"})
print(result)
```
</Step>
<Step title="View Traces in Weave">
After running your CrewAI application, visit the Weave URL provided during initialization to view:
- LLM calls and their metadata
- Agent interactions and task execution flow
- Performance metrics like latency and token usage
- Any errors or issues that occurred during execution
<Frame caption="Weave Tracing Dashboard">
<img src="/images/weave-tracing.png" alt="Weave tracing example with CrewAI" />
</Frame>
</Step>
</Steps>
## Features
- Weave automatically captures all CrewAI operations: agent interactions and task executions; LLM calls with metadata and token usage; tool usage and results.
- The integration supports all CrewAI execution methods: `kickoff()`, `kickoff_for_each()`, `kickoff_async()`, and `kickoff_for_each_async()`.
- Automatic tracing of all [crewAI-tools](https://github.com/crewAIInc/crewAI-tools).
- Flow feature support with decorator patching (`@start`, `@listen`, `@router`, `@or_`, `@and_`).
- Track custom guardrails passed to CrewAI `Task` with `@weave.op()`.
For detailed information on what's supported, visit the [Weave CrewAI documentation](https://weave-docs.wandb.ai/guides/integrations/crewai/#getting-started-with-flow).
## Resources
- [📘 Weave Documentation](https://weave-docs.wandb.ai)
- [📊 Example Weave x CrewAI dashboard](https://wandb.ai/ayut/crewai_demo/weave/traces?cols=%7B%22wb_run_id%22%3Afalse%2C%22attributes.weave.client_version%22%3Afalse%2C%22attributes.weave.os_name%22%3Afalse%2C%22attributes.weave.os_release%22%3Afalse%2C%22attributes.weave.os_version%22%3Afalse%2C%22attributes.weave.source%22%3Afalse%2C%22attributes.weave.sys_version%22%3Afalse%7D&peekPath=%2Fayut%2Fcrewai_demo%2Fcalls%2F0195c838-38cb-71a2-8a15-651ecddf9d89)
- [🐦 X](https://x.com/weave_wb)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 99 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 693 KiB

View File

@@ -4,21 +4,6 @@ description: Get started with CrewAI - Install, configure, and build your first
icon: wrench icon: wrench
--- ---
## Video Tutorial
Watch this video tutorial for a step-by-step demonstration of the installation process:
<iframe
width="100%"
height="400"
src="https://www.youtube.com/embed/-kSOTtYzgEw"
title="CrewAI Installation Guide"
frameborder="0"
style={{ borderRadius: '10px' }}
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
allowfullscreen
></iframe>
## Text Tutorial
<Note> <Note>
**Python Version Requirements** **Python Version Requirements**
@@ -155,27 +140,6 @@ We recommend using the `YAML` template scaffolding for a structured approach to
</Step> </Step>
</Steps> </Steps>
## Enterprise Installation Options
<Note type="info">
For teams and organizations, CrewAI offers enterprise deployment options that eliminate setup complexity:
### CrewAI Enterprise (SaaS)
- Zero installation required - just sign up for free at [app.crewai.com](https://app.crewai.com)
- Automatic updates and maintenance
- Managed infrastructure and scaling
- Build Crews with no Code
### CrewAI Factory (Self-hosted)
- Containerized deployment for your infrastructure
- Supports any hyperscaler including on prem depployments
- Integration with your existing security systems
<Card title="Explore Enterprise Options" icon="building" href="https://crewai.com/enterprise">
Learn about CrewAI's enterprise offerings and schedule a demo
</Card>
</Note>
## Next Steps ## Next Steps
<CardGroup cols={2}> <CardGroup cols={2}>

View File

@@ -15,7 +15,6 @@ CrewAI empowers developers with both high-level simplicity and precise low-level
With over 100,000 developers certified through our community courses, CrewAI is rapidly becoming the standard for enterprise-ready AI automation. With over 100,000 developers certified through our community courses, CrewAI is rapidly becoming the standard for enterprise-ready AI automation.
## How Crews Work ## How Crews Work
<Note> <Note>

View File

@@ -87,20 +87,15 @@ Follow the steps below to get Crewing! 🚣‍♂️
from crewai import Agent, Crew, Process, Task from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task from crewai.project import CrewBase, agent, crew, task
from crewai_tools import SerperDevTool from crewai_tools import SerperDevTool
from crewai.agents.agent_builder.base_agent import BaseAgent
from typing import List
@CrewBase @CrewBase
class LatestAiDevelopmentCrew(): class LatestAiDevelopmentCrew():
"""LatestAiDevelopment crew""" """LatestAiDevelopment crew"""
agents: List[BaseAgent]
tasks: List[Task]
@agent @agent
def researcher(self) -> Agent: def researcher(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['researcher'], # type: ignore[index] config=self.agents_config['researcher'],
verbose=True, verbose=True,
tools=[SerperDevTool()] tools=[SerperDevTool()]
) )
@@ -108,20 +103,20 @@ Follow the steps below to get Crewing! 🚣‍♂️
@agent @agent
def reporting_analyst(self) -> Agent: def reporting_analyst(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['reporting_analyst'], # type: ignore[index] config=self.agents_config['reporting_analyst'],
verbose=True verbose=True
) )
@task @task
def research_task(self) -> Task: def research_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['research_task'], # type: ignore[index] config=self.tasks_config['research_task'],
) )
@task @task
def reporting_task(self) -> Task: def reporting_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['reporting_task'], # type: ignore[index] config=self.tasks_config['reporting_task'],
output_file='output/report.md' # This is the file that will be contain the final report. output_file='output/report.md' # This is the file that will be contain the final report.
) )
@@ -205,22 +200,6 @@ Follow the steps below to get Crewing! 🚣‍♂️
``` ```
</CodeGroup> </CodeGroup>
</Step> </Step>
<Step title="Enterprise Alternative: Create in Crew Studio">
For CrewAI Enterprise users, you can create the same crew without writing code:
1. Log in to your CrewAI Enterprise account (create a free account at [app.crewai.com](https://app.crewai.com))
2. Open Crew Studio
3. Type what is the automation you're tryign to build
4. Create your tasks visually and connect them in sequence
5. Configure your inputs and click "Download Code" or "Deploy"
![Crew Studio Quickstart](../images/enterprise/crew-studio-quickstart.png)
<Card title="Try CrewAI Enterprise" icon="rocket" href="https://app.crewai.com">
Start your free account at CrewAI Enterprise
</Card>
</Step>
<Step title="View your final report"> <Step title="View your final report">
You should see the output in the console and the `report.md` file should be created in the root of your project with the final report. You should see the output in the console and the `report.md` file should be created in the root of your project with the final report.

View File

@@ -22,16 +22,7 @@ usage of tools, API calls, responses, any data processed by the agents, or secre
When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected
to provide deeper insights. This expanded data collection may include personal information if users have incorporated it into their crews or tasks. to provide deeper insights. This expanded data collection may include personal information if users have incorporated it into their crews or tasks.
Users should carefully consider the content of their crews and tasks before enabling `share_crew`. Users should carefully consider the content of their crews and tasks before enabling `share_crew`.
Users can disable telemetry by setting the environment variable `CREWAI_DISABLE_TELEMETRY` to `true` or by setting `OTEL_SDK_DISABLED` to `true` (note that the latter disables all OpenTelemetry instrumentation globally). Users can disable telemetry by setting the environment variable `OTEL_SDK_DISABLED` to `true`.
### Examples:
```python
# Disable CrewAI telemetry only
os.environ['CREWAI_DISABLE_TELEMETRY'] = 'true'
# Disable all OpenTelemetry (including CrewAI)
os.environ['OTEL_SDK_DISABLED'] = 'true'
```
### Data Explanation: ### Data Explanation:
| Defaulted | Data | Reason and Specifics | | Defaulted | Data | Reason and Specifics |

View File

@@ -1,187 +0,0 @@
---
title: Bedrock Invoke Agent Tool
description: Enables CrewAI agents to invoke Amazon Bedrock Agents and leverage their capabilities within your workflows
icon: aws
---
# `BedrockInvokeAgentTool`
The `BedrockInvokeAgentTool` enables CrewAI agents to invoke Amazon Bedrock Agents and leverage their capabilities within your workflows.
## Installation
```bash
uv pip install 'crewai[tools]'
```
## Requirements
- AWS credentials configured (either through environment variables or AWS CLI)
- `boto3` and `python-dotenv` packages
- Access to Amazon Bedrock Agents
## Usage
Here's how to use the tool with a CrewAI agent:
```python {2, 4-8}
from crewai import Agent, Task, Crew
from crewai_tools.aws.bedrock.agents.invoke_agent_tool import BedrockInvokeAgentTool
# Initialize the tool
agent_tool = BedrockInvokeAgentTool(
agent_id="your-agent-id",
agent_alias_id="your-agent-alias-id"
)
# Create a CrewAI agent that uses the tool
aws_expert = Agent(
role='AWS Service Expert',
goal='Help users understand AWS services and quotas',
backstory='I am an expert in AWS services and can provide detailed information about them.',
tools=[agent_tool],
verbose=True
)
# Create a task for the agent
quota_task = Task(
description="Find out the current service quotas for EC2 in us-west-2 and explain any recent changes.",
agent=aws_expert
)
# Create a crew with the agent
crew = Crew(
agents=[aws_expert],
tasks=[quota_task],
verbose=2
)
# Run the crew
result = crew.kickoff()
print(result)
```
## Tool Arguments
| Argument | Type | Required | Default | Description |
|:---------|:-----|:---------|:--------|:------------|
| **agent_id** | `str` | Yes | None | The unique identifier of the Bedrock agent |
| **agent_alias_id** | `str` | Yes | None | The unique identifier of the agent alias |
| **session_id** | `str` | No | timestamp | The unique identifier of the session |
| **enable_trace** | `bool` | No | False | Whether to enable trace for debugging |
| **end_session** | `bool` | No | False | Whether to end the session after invocation |
| **description** | `str` | No | None | Custom description for the tool |
## Environment Variables
```bash
BEDROCK_AGENT_ID=your-agent-id # Alternative to passing agent_id
BEDROCK_AGENT_ALIAS_ID=your-agent-alias-id # Alternative to passing agent_alias_id
AWS_REGION=your-aws-region # Defaults to us-west-2
AWS_ACCESS_KEY_ID=your-access-key # Required for AWS authentication
AWS_SECRET_ACCESS_KEY=your-secret-key # Required for AWS authentication
```
## Advanced Usage
### Multi-Agent Workflow with Session Management
```python {2, 4-22}
from crewai import Agent, Task, Crew, Process
from crewai_tools.aws.bedrock.agents.invoke_agent_tool import BedrockInvokeAgentTool
# Initialize tools with session management
initial_tool = BedrockInvokeAgentTool(
agent_id="your-agent-id",
agent_alias_id="your-agent-alias-id",
session_id="custom-session-id"
)
followup_tool = BedrockInvokeAgentTool(
agent_id="your-agent-id",
agent_alias_id="your-agent-alias-id",
session_id="custom-session-id"
)
final_tool = BedrockInvokeAgentTool(
agent_id="your-agent-id",
agent_alias_id="your-agent-alias-id",
session_id="custom-session-id",
end_session=True
)
# Create agents for different stages
researcher = Agent(
role='AWS Service Researcher',
goal='Gather information about AWS services',
backstory='I am specialized in finding detailed AWS service information.',
tools=[initial_tool]
)
analyst = Agent(
role='Service Compatibility Analyst',
goal='Analyze service compatibility and requirements',
backstory='I analyze AWS services for compatibility and integration possibilities.',
tools=[followup_tool]
)
summarizer = Agent(
role='Technical Documentation Writer',
goal='Create clear technical summaries',
backstory='I specialize in creating clear, concise technical documentation.',
tools=[final_tool]
)
# Create tasks
research_task = Task(
description="Find all available AWS services in us-west-2 region.",
agent=researcher
)
analysis_task = Task(
description="Analyze which services support IPv6 and their implementation requirements.",
agent=analyst
)
summary_task = Task(
description="Create a summary of IPv6-compatible services and their key features.",
agent=summarizer
)
# Create a crew with the agents and tasks
crew = Crew(
agents=[researcher, analyst, summarizer],
tasks=[research_task, analysis_task, summary_task],
process=Process.sequential,
verbose=2
)
# Run the crew
result = crew.kickoff()
```
## Use Cases
### Hybrid Multi-Agent Collaborations
- Create workflows where CrewAI agents collaborate with managed Bedrock agents running as services in AWS
- Enable scenarios where sensitive data processing happens within your AWS environment while other agents operate externally
- Bridge on-premises CrewAI agents with cloud-based Bedrock agents for distributed intelligence workflows
### Data Sovereignty and Compliance
- Keep data-sensitive agentic workflows within your AWS environment while allowing external CrewAI agents to orchestrate tasks
- Maintain compliance with data residency requirements by processing sensitive information only within your AWS account
- Enable secure multi-agent collaborations where some agents cannot access your organization's private data
### Seamless AWS Service Integration
- Access any AWS service through Amazon Bedrock Actions without writing complex integration code
- Enable CrewAI agents to interact with AWS services through natural language requests
- Leverage pre-built Bedrock agent capabilities to interact with AWS services like Bedrock Knowledge Bases, Lambda, and more
### Scalable Hybrid Agent Architectures
- Offload computationally intensive tasks to managed Bedrock agents while lightweight tasks run in CrewAI
- Scale agent processing by distributing workloads between local CrewAI agents and cloud-based Bedrock agents
### Cross-Organizational Agent Collaboration
- Enable secure collaboration between your organization's CrewAI agents and partner organizations' Bedrock agents
- Create workflows where external expertise from Bedrock agents can be incorporated without exposing sensitive data
- Build agent ecosystems that span organizational boundaries while maintaining security and data control

View File

@@ -1,165 +0,0 @@
---
title: 'Bedrock Knowledge Base Retriever'
description: 'Retrieve information from Amazon Bedrock Knowledge Bases using natural language queries'
icon: aws
---
# `BedrockKBRetrieverTool`
The `BedrockKBRetrieverTool` enables CrewAI agents to retrieve information from Amazon Bedrock Knowledge Bases using natural language queries.
## Installation
```bash
uv pip install 'crewai[tools]'
```
## Requirements
- AWS credentials configured (either through environment variables or AWS CLI)
- `boto3` and `python-dotenv` packages
- Access to Amazon Bedrock Knowledge Base
## Usage
Here's how to use the tool with a CrewAI agent:
```python {2, 4-17}
from crewai import Agent, Task, Crew
from crewai_tools.aws.bedrock.knowledge_base.retriever_tool import BedrockKBRetrieverTool
# Initialize the tool
kb_tool = BedrockKBRetrieverTool(
knowledge_base_id="your-kb-id",
number_of_results=5
)
# Create a CrewAI agent that uses the tool
researcher = Agent(
role='Knowledge Base Researcher',
goal='Find information about company policies',
backstory='I am a researcher specialized in retrieving and analyzing company documentation.',
tools=[kb_tool],
verbose=True
)
# Create a task for the agent
research_task = Task(
description="Find our company's remote work policy and summarize the key points.",
agent=researcher
)
# Create a crew with the agent
crew = Crew(
agents=[researcher],
tasks=[research_task],
verbose=2
)
# Run the crew
result = crew.kickoff()
print(result)
```
## Tool Arguments
| Argument | Type | Required | Default | Description |
|:---------|:-----|:---------|:---------|:-------------|
| **knowledge_base_id** | `str` | Yes | None | The unique identifier of the knowledge base (0-10 alphanumeric characters) |
| **number_of_results** | `int` | No | 5 | Maximum number of results to return |
| **retrieval_configuration** | `dict` | No | None | Custom configurations for the knowledge base query |
| **guardrail_configuration** | `dict` | No | None | Content filtering settings |
| **next_token** | `str` | No | None | Token for pagination |
## Environment Variables
```bash
BEDROCK_KB_ID=your-knowledge-base-id # Alternative to passing knowledge_base_id
AWS_REGION=your-aws-region # Defaults to us-east-1
AWS_ACCESS_KEY_ID=your-access-key # Required for AWS authentication
AWS_SECRET_ACCESS_KEY=your-secret-key # Required for AWS authentication
```
## Response Format
The tool returns results in JSON format:
```json
{
"results": [
{
"content": "Retrieved text content",
"content_type": "text",
"source_type": "S3",
"source_uri": "s3://bucket/document.pdf",
"score": 0.95,
"metadata": {
"additional": "metadata"
}
}
],
"nextToken": "pagination-token",
"guardrailAction": "NONE"
}
```
## Advanced Usage
### Custom Retrieval Configuration
```python
kb_tool = BedrockKBRetrieverTool(
knowledge_base_id="your-kb-id",
retrieval_configuration={
"vectorSearchConfiguration": {
"numberOfResults": 10,
"overrideSearchType": "HYBRID"
}
}
)
policy_expert = Agent(
role='Policy Expert',
goal='Analyze company policies in detail',
backstory='I am an expert in corporate policy analysis with deep knowledge of regulatory requirements.',
tools=[kb_tool]
)
```
## Supported Data Sources
- Amazon S3
- Confluence
- Salesforce
- SharePoint
- Web pages
- Custom document locations
- Amazon Kendra
- SQL databases
## Use Cases
### Enterprise Knowledge Integration
- Enable CrewAI agents to access your organization's proprietary knowledge without exposing sensitive data
- Allow agents to make decisions based on your company's specific policies, procedures, and documentation
- Create agents that can answer questions based on your internal documentation while maintaining data security
### Specialized Domain Knowledge
- Connect CrewAI agents to domain-specific knowledge bases (legal, medical, technical) without retraining models
- Leverage existing knowledge repositories that are already maintained in your AWS environment
- Combine CrewAI's reasoning with domain-specific information from your knowledge bases
### Data-Driven Decision Making
- Ground CrewAI agent responses in your actual company data rather than general knowledge
- Ensure agents provide recommendations based on your specific business context and documentation
- Reduce hallucinations by retrieving factual information from your knowledge bases
### Scalable Information Access
- Access terabytes of organizational knowledge without embedding it all into your models
- Dynamically query only the relevant information needed for specific tasks
- Leverage AWS's scalable infrastructure to handle large knowledge bases efficiently
### Compliance and Governance
- Ensure CrewAI agents provide responses that align with your company's approved documentation
- Create auditable trails of information sources used by your agents
- Maintain control over what information sources your agents can access

View File

@@ -30,7 +30,7 @@ pip install 'crewai[tools]'
Here are updated examples on how to utilize the JSONSearchTool effectively for searching within JSON files. These examples take into account the current implementation and usage patterns identified in the codebase. Here are updated examples on how to utilize the JSONSearchTool effectively for searching within JSON files. These examples take into account the current implementation and usage patterns identified in the codebase.
```python Code ```python Code
from crewai_tools import JSONSearchTool from crewai.json_tools import JSONSearchTool # Updated import path
# General JSON content search # General JSON content search
# This approach is suitable when the JSON path is either known beforehand or can be dynamically identified. # This approach is suitable when the JSON path is either known beforehand or can be dynamically identified.

View File

@@ -25,7 +25,7 @@ uv add weaviate-client
To effectively use the `WeaviateVectorSearchTool`, follow these steps: To effectively use the `WeaviateVectorSearchTool`, follow these steps:
1. **Package Installation**: Confirm that the `crewai[tools]` and `weaviate-client` packages are installed in your Python environment. 1. **Package Installation**: Confirm that the `crewai[tools]` and `weaviate-client` packages are installed in your Python environment.
2. **Weaviate Setup**: Set up a Weaviate cluster. You can follow the [Weaviate documentation](https://weaviate.io/developers/wcs/manage-clusters/connect) for instructions. 2. **Weaviate Setup**: Set up a Weaviate cluster. You can follow the [Weaviate documentation](https://weaviate.io/developers/wcs/connect) for instructions.
3. **API Keys**: Obtain your Weaviate cluster URL and API key. 3. **API Keys**: Obtain your Weaviate cluster URL and API key.
4. **OpenAI API Key**: Ensure you have an OpenAI API key set in your environment variables as `OPENAI_API_KEY`. 4. **OpenAI API Key**: Ensure you have an OpenAI API key set in your environment variables as `OPENAI_API_KEY`.

View File

@@ -1,6 +1,6 @@
[project] [project]
name = "crewai" name = "crewai"
version = "0.114.0" version = "0.108.0"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks." description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md" readme = "README.md"
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"
@@ -45,7 +45,7 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI" Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies] [project.optional-dependencies]
tools = ["crewai-tools~=0.40.1"] tools = ["crewai-tools>=0.37.0"]
embeddings = [ embeddings = [
"tiktoken~=0.7.0" "tiktoken~=0.7.0"
] ]
@@ -64,9 +64,6 @@ mem0 = ["mem0ai>=0.1.29"]
docling = [ docling = [
"docling>=2.12.0", "docling>=2.12.0",
] ]
aisuite = [
"aisuite>=0.1.10",
]
[tool.uv] [tool.uv]
dev-dependencies = [ dev-dependencies = [
@@ -81,10 +78,10 @@ dev-dependencies = [
"pillow>=10.2.0", "pillow>=10.2.0",
"cairosvg>=2.7.1", "cairosvg>=2.7.1",
"pytest>=8.0.0", "pytest>=8.0.0",
"pytest-vcr>=1.0.2",
"python-dotenv>=1.0.0", "python-dotenv>=1.0.0",
"pytest-asyncio>=0.23.7", "pytest-asyncio>=0.23.7",
"pytest-subprocess>=1.5.2", "pytest-subprocess>=1.5.2",
"pytest-recording>=0.13.2",
] ]
[project.scripts] [project.scripts]

View File

@@ -2,14 +2,11 @@ import warnings
from crewai.agent import Agent from crewai.agent import Agent
from crewai.crew import Crew from crewai.crew import Crew
from crewai.crews.crew_output import CrewOutput
from crewai.flow.flow import Flow from crewai.flow.flow import Flow
from crewai.knowledge.knowledge import Knowledge from crewai.knowledge.knowledge import Knowledge
from crewai.llm import LLM from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.process import Process from crewai.process import Process
from crewai.task import Task from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
warnings.filterwarnings( warnings.filterwarnings(
"ignore", "ignore",
@@ -17,16 +14,13 @@ warnings.filterwarnings(
category=UserWarning, category=UserWarning,
module="pydantic.main", module="pydantic.main",
) )
__version__ = "0.114.0" __version__ = "0.108.0"
__all__ = [ __all__ = [
"Agent", "Agent",
"Crew", "Crew",
"CrewOutput",
"Process", "Process",
"Task", "Task",
"LLM", "LLM",
"BaseLLM",
"Flow", "Flow",
"Knowledge", "Knowledge",
"TaskOutput",
] ]

View File

@@ -1,6 +1,7 @@
import re
import shutil import shutil
import subprocess import subprocess
from typing import Any, Dict, List, Literal, Optional, Sequence, Type, Union from typing import Any, Dict, List, Literal, Optional, Sequence, Union
from pydantic import Field, InstanceOf, PrivateAttr, model_validator from pydantic import Field, InstanceOf, PrivateAttr, model_validator
@@ -10,19 +11,13 @@ from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.knowledge.knowledge import Knowledge from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
from crewai.lite_agent import LiteAgent, LiteAgentOutput from crewai.llm import LLM
from crewai.llm import BaseLLM
from crewai.memory.contextual.contextual_memory import ContextualMemory from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.security import Fingerprint from crewai.security import Fingerprint
from crewai.task import Task from crewai.task import Task
from crewai.tools import BaseTool from crewai.tools import BaseTool
from crewai.tools.agent_tools.agent_tools import AgentTools from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.utilities import Converter, Prompts from crewai.utilities import Converter, Prompts
from crewai.utilities.agent_utils import (
get_tool_names,
parse_tools,
render_text_description_and_args,
)
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.converter import generate_model_description from crewai.utilities.converter import generate_model_description
from crewai.utilities.events.agent_events import ( from crewai.utilities.events.agent_events import (
@@ -76,10 +71,10 @@ class Agent(BaseAgent):
default=True, default=True,
description="Use system prompt for the agent.", description="Use system prompt for the agent.",
) )
llm: Union[str, InstanceOf[BaseLLM], Any] = Field( llm: Union[str, InstanceOf[LLM], Any] = Field(
description="Language model that will run the agent.", default=None description="Language model that will run the agent.", default=None
) )
function_calling_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field( function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
description="Language model that will run the agent.", default=None description="Language model that will run the agent.", default=None
) )
system_template: Optional[str] = Field( system_template: Optional[str] = Field(
@@ -91,6 +86,9 @@ class Agent(BaseAgent):
response_template: Optional[str] = Field( response_template: Optional[str] = Field(
default=None, description="Response format for the agent." default=None, description="Response format for the agent."
) )
tools_results: Optional[List[Any]] = Field(
default=[], description="Results of the tools used by the agent."
)
allow_code_execution: Optional[bool] = Field( allow_code_execution: Optional[bool] = Field(
default=False, description="Enable code execution for the agent." default=False, description="Enable code execution for the agent."
) )
@@ -120,9 +118,7 @@ class Agent(BaseAgent):
self.agent_ops_agent_name = self.role self.agent_ops_agent_name = self.role
self.llm = create_llm(self.llm) self.llm = create_llm(self.llm)
if self.function_calling_llm and not isinstance( if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
self.function_calling_llm, BaseLLM
):
self.function_calling_llm = create_llm(self.function_calling_llm) self.function_calling_llm = create_llm(self.function_calling_llm)
if not self.agent_executor: if not self.agent_executor:
@@ -144,35 +140,20 @@ class Agent(BaseAgent):
self.embedder = crew_embedder self.embedder = crew_embedder
if self.knowledge_sources: if self.knowledge_sources:
full_pattern = re.compile(r"[^a-zA-Z0-9\-_\r\n]|(\.\.)")
knowledge_agent_name = f"{re.sub(full_pattern, '_', self.role)}"
if isinstance(self.knowledge_sources, list) and all( if isinstance(self.knowledge_sources, list) and all(
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
): ):
self.knowledge = Knowledge( self.knowledge = Knowledge(
sources=self.knowledge_sources, sources=self.knowledge_sources,
embedder=self.embedder, embedder=self.embedder,
collection_name=self.role, collection_name=knowledge_agent_name,
storage=self.knowledge_storage or None, storage=self.knowledge_storage or None,
) )
except (TypeError, ValueError) as e: except (TypeError, ValueError) as e:
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}") raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
def _is_any_available_memory(self) -> bool:
"""Check if any memory is available."""
if not self.crew:
return False
memory_attributes = [
"memory",
"memory_config",
"_short_term_memory",
"_long_term_memory",
"_entity_memory",
"_user_memory",
"_external_memory",
]
return any(getattr(self.crew, attr) for attr in memory_attributes)
def execute_task( def execute_task(
self, self,
task: Task, task: Task,
@@ -217,14 +198,13 @@ class Agent(BaseAgent):
task=task_prompt, context=context task=task_prompt, context=context
) )
if self._is_any_available_memory(): if self.crew and self.crew.memory:
contextual_memory = ContextualMemory( contextual_memory = ContextualMemory(
self.crew.memory_config, self.crew.memory_config,
self.crew._short_term_memory, self.crew._short_term_memory,
self.crew._long_term_memory, self.crew._long_term_memory,
self.crew._entity_memory, self.crew._entity_memory,
self.crew._user_memory, self.crew._user_memory,
self.crew._external_memory,
) )
memory = contextual_memory.build_context_for_task(task, context) memory = contextual_memory.build_context_for_task(task, context)
if memory.strip() != "": if memory.strip() != "":
@@ -320,12 +300,12 @@ class Agent(BaseAgent):
Returns: Returns:
An instance of the CrewAgentExecutor class. An instance of the CrewAgentExecutor class.
""" """
raw_tools: List[BaseTool] = tools or self.tools or [] tools = tools or self.tools or []
parsed_tools = parse_tools(raw_tools) parsed_tools = self._parse_tools(tools)
prompt = Prompts( prompt = Prompts(
agent=self, agent=self,
has_tools=len(raw_tools) > 0, tools=tools,
i18n=self.i18n, i18n=self.i18n,
use_system_prompt=self.use_system_prompt, use_system_prompt=self.use_system_prompt,
system_template=self.system_template, system_template=self.system_template,
@@ -347,12 +327,12 @@ class Agent(BaseAgent):
crew=self.crew, crew=self.crew,
tools=parsed_tools, tools=parsed_tools,
prompt=prompt, prompt=prompt,
original_tools=raw_tools, original_tools=tools,
stop_words=stop_words, stop_words=stop_words,
max_iter=self.max_iter, max_iter=self.max_iter,
tools_handler=self.tools_handler, tools_handler=self.tools_handler,
tools_names=get_tool_names(parsed_tools), tools_names=self.__tools_names(parsed_tools),
tools_description=render_text_description_and_args(parsed_tools), tools_description=self._render_text_description_and_args(parsed_tools),
step_callback=self.step_callback, step_callback=self.step_callback,
function_calling_llm=self.function_calling_llm, function_calling_llm=self.function_calling_llm,
respect_context_window=self.respect_context_window, respect_context_window=self.respect_context_window,
@@ -387,6 +367,25 @@ class Agent(BaseAgent):
def get_output_converter(self, llm, text, model, instructions): def get_output_converter(self, llm, text, model, instructions):
return Converter(llm=llm, text=text, model=model, instructions=instructions) return Converter(llm=llm, text=text, model=model, instructions=instructions)
def _parse_tools(self, tools: List[Any]) -> List[Any]: # type: ignore
"""Parse tools to be used for the task."""
tools_list = []
try:
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
from crewai.tools import BaseTool as CrewAITool
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_structured_tool())
else:
tools_list.append(tool)
except ModuleNotFoundError:
tools_list = []
for tool in tools:
tools_list.append(tool)
return tools_list
def _training_handler(self, task_prompt: str) -> str: def _training_handler(self, task_prompt: str) -> str:
"""Handle training data for the agent task prompt to improve output on Training.""" """Handle training data for the agent task prompt to improve output on Training."""
if data := CrewTrainingHandler(TRAINING_DATA_FILE).load(): if data := CrewTrainingHandler(TRAINING_DATA_FILE).load():
@@ -432,6 +431,23 @@ class Agent(BaseAgent):
return description return description
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
"""Render the tool name, description, and args in plain text.
Output will be in the format of:
.. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \
args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
tool_strings.append(tool.description)
return "\n".join(tool_strings)
def _validate_docker_installation(self) -> None: def _validate_docker_installation(self) -> None:
"""Check if Docker is installed and running.""" """Check if Docker is installed and running."""
if not shutil.which("docker"): if not shutil.which("docker"):
@@ -451,6 +467,10 @@ class Agent(BaseAgent):
f"Docker is not running. Please start Docker to use code execution with agent: {self.role}" f"Docker is not running. Please start Docker to use code execution with agent: {self.role}"
) )
@staticmethod
def __tools_names(tools) -> str:
return ", ".join([t.name for t in tools])
def __repr__(self): def __repr__(self):
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})" return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
@@ -463,79 +483,3 @@ class Agent(BaseAgent):
Fingerprint: The agent's fingerprint Fingerprint: The agent's fingerprint
""" """
return self.security_config.fingerprint return self.security_config.fingerprint
def set_fingerprint(self, fingerprint: Fingerprint):
self.security_config.fingerprint = fingerprint
def kickoff(
self,
messages: Union[str, List[Dict[str, str]]],
response_format: Optional[Type[Any]] = None,
) -> LiteAgentOutput:
"""
Execute the agent with the given messages using a LiteAgent instance.
This method is useful when you want to use the Agent configuration but
with the simpler and more direct execution flow of LiteAgent.
Args:
messages: Either a string query or a list of message dictionaries.
If a string is provided, it will be converted to a user message.
If a list is provided, each dict should have 'role' and 'content' keys.
response_format: Optional Pydantic model for structured output.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
lite_agent = LiteAgent(
role=self.role,
goal=self.goal,
backstory=self.backstory,
llm=self.llm,
tools=self.tools or [],
max_iterations=self.max_iter,
max_execution_time=self.max_execution_time,
respect_context_window=self.respect_context_window,
verbose=self.verbose,
response_format=response_format,
i18n=self.i18n,
original_agent=self,
)
return lite_agent.kickoff(messages)
async def kickoff_async(
self,
messages: Union[str, List[Dict[str, str]]],
response_format: Optional[Type[Any]] = None,
) -> LiteAgentOutput:
"""
Execute the agent asynchronously with the given messages using a LiteAgent instance.
This is the async version of the kickoff method.
Args:
messages: Either a string query or a list of message dictionaries.
If a string is provided, it will be converted to a user message.
If a list is provided, each dict should have 'role' and 'content' keys.
response_format: Optional Pydantic model for structured output.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
lite_agent = LiteAgent(
role=self.role,
goal=self.goal,
backstory=self.backstory,
llm=self.llm,
tools=self.tools or [],
max_iterations=self.max_iter,
max_execution_time=self.max_execution_time,
respect_context_window=self.respect_context_window,
verbose=self.verbose,
response_format=response_format,
i18n=self.i18n,
original_agent=self,
)
return await lite_agent.kickoff_async(messages)

View File

@@ -1,42 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
from pydantic import PrivateAttr
from crewai.agent import BaseAgent
from crewai.tools import BaseTool
class BaseAgentAdapter(BaseAgent, ABC):
"""Base class for all agent adapters in CrewAI.
This abstract class defines the common interface and functionality that all
agent adapters must implement. It extends BaseAgent to maintain compatibility
with the CrewAI framework while adding adapter-specific requirements.
"""
adapted_structured_output: bool = False
_agent_config: Optional[Dict[str, Any]] = PrivateAttr(default=None)
model_config = {"arbitrary_types_allowed": True}
def __init__(self, agent_config: Optional[Dict[str, Any]] = None, **kwargs: Any):
super().__init__(adapted_agent=True, **kwargs)
self._agent_config = agent_config
@abstractmethod
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
"""Configure and adapt tools for the specific agent implementation.
Args:
tools: Optional list of BaseTool instances to be configured
"""
pass
def configure_structured_output(self, structured_output: Any) -> None:
"""Configure the structured output for the specific agent implementation.
Args:
structured_output: The structured output to be configured
"""
pass

View File

@@ -1,29 +0,0 @@
from abc import ABC, abstractmethod
class BaseConverterAdapter(ABC):
"""Base class for all converter adapters in CrewAI.
This abstract class defines the common interface and functionality that all
converter adapters must implement for converting structured output.
"""
def __init__(self, agent_adapter):
self.agent_adapter = agent_adapter
@abstractmethod
def configure_structured_output(self, task) -> None:
"""Configure agents to return structured output.
Must support json and pydantic output.
"""
pass
@abstractmethod
def enhance_system_prompt(self, base_prompt: str) -> str:
"""Enhance the system prompt with structured output instructions."""
pass
@abstractmethod
def post_process_result(self, result: str) -> str:
"""Post-process the result to ensure it matches the expected format: string."""
pass

View File

@@ -1,37 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from crewai.tools.base_tool import BaseTool
class BaseToolAdapter(ABC):
"""Base class for all tool adapters in CrewAI.
This abstract class defines the common interface that all tool adapters
must implement. It provides the structure for adapting CrewAI tools to
different frameworks and platforms.
"""
original_tools: List[BaseTool]
converted_tools: List[Any]
def __init__(self, tools: Optional[List[BaseTool]] = None):
self.original_tools = tools or []
self.converted_tools = []
@abstractmethod
def configure_tools(self, tools: List[BaseTool]) -> None:
"""Configure and convert tools for the specific implementation.
Args:
tools: List of BaseTool instances to be configured and converted
"""
pass
def tools(self) -> List[Any]:
"""Return all converted tools."""
return self.converted_tools
def sanitize_tool_name(self, tool_name: str) -> str:
"""Sanitize tool name for API compatibility."""
return tool_name.replace(" ", "_")

View File

@@ -1,226 +0,0 @@
from typing import Any, AsyncIterable, Dict, List, Optional
from pydantic import Field, PrivateAttr
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
from crewai.agents.agent_adapters.langgraph.langgraph_tool_adapter import (
LangGraphToolAdapter,
)
from crewai.agents.agent_adapters.langgraph.structured_output_converter import (
LangGraphConverterAdapter,
)
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.tools.base_tool import BaseTool
from crewai.utilities import Logger
from crewai.utilities.converter import Converter
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,
)
try:
from langchain_core.messages import ToolMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
LANGGRAPH_AVAILABLE = True
except ImportError:
LANGGRAPH_AVAILABLE = False
class LangGraphAgentAdapter(BaseAgentAdapter):
"""Adapter for LangGraph agents to work with CrewAI."""
model_config = {"arbitrary_types_allowed": True}
_logger: Logger = PrivateAttr(default_factory=lambda: Logger())
_tool_adapter: LangGraphToolAdapter = PrivateAttr()
_graph: Any = PrivateAttr(default=None)
_memory: Any = PrivateAttr(default=None)
_max_iterations: int = PrivateAttr(default=10)
function_calling_llm: Any = Field(default=None)
step_callback: Any = Field(default=None)
model: str = Field(default="gpt-4o")
verbose: bool = Field(default=False)
def __init__(
self,
role: str,
goal: str,
backstory: str,
tools: Optional[List[BaseTool]] = None,
llm: Any = None,
max_iterations: int = 10,
agent_config: Optional[Dict[str, Any]] = None,
**kwargs,
):
"""Initialize the LangGraph agent adapter."""
if not LANGGRAPH_AVAILABLE:
raise ImportError(
"LangGraph Agent Dependencies are not installed. Please install it using `uv add langchain-core langgraph`"
)
super().__init__(
role=role,
goal=goal,
backstory=backstory,
tools=tools,
llm=llm or self.model,
agent_config=agent_config,
**kwargs,
)
self._tool_adapter = LangGraphToolAdapter(tools=tools)
self._converter_adapter = LangGraphConverterAdapter(self)
self._max_iterations = max_iterations
self._setup_graph()
def _setup_graph(self) -> None:
"""Set up the LangGraph workflow graph."""
try:
self._memory = MemorySaver()
converted_tools: List[Any] = self._tool_adapter.tools()
if self._agent_config:
self._graph = create_react_agent(
model=self.llm,
tools=converted_tools,
checkpointer=self._memory,
debug=self.verbose,
**self._agent_config,
)
else:
self._graph = create_react_agent(
model=self.llm,
tools=converted_tools or [],
checkpointer=self._memory,
debug=self.verbose,
)
except ImportError as e:
self._logger.log(
"error", f"Failed to import LangGraph dependencies: {str(e)}"
)
raise
except Exception as e:
self._logger.log("error", f"Error setting up LangGraph agent: {str(e)}")
raise
def _build_system_prompt(self) -> str:
"""Build a system prompt for the LangGraph agent."""
base_prompt = f"""
You are {self.role}.
Your goal is: {self.goal}
Your backstory: {self.backstory}
When working on tasks, think step-by-step and use the available tools when necessary.
"""
return self._converter_adapter.enhance_system_prompt(base_prompt)
def execute_task(
self,
task: Any,
context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None,
) -> str:
"""Execute a task using the LangGraph workflow."""
self.create_agent_executor(tools)
self.configure_structured_output(task)
try:
task_prompt = task.prompt() if hasattr(task, "prompt") else str(task)
if context:
task_prompt = self.i18n.slice("task_with_context").format(
task=task_prompt, context=context
)
crewai_event_bus.emit(
self,
event=AgentExecutionStartedEvent(
agent=self,
tools=self.tools,
task_prompt=task_prompt,
task=task,
),
)
session_id = f"task_{id(task)}"
config = {"configurable": {"thread_id": session_id}}
result = self._graph.invoke(
{
"messages": [
("system", self._build_system_prompt()),
("user", task_prompt),
]
},
config,
)
messages = result.get("messages", [])
last_message = messages[-1] if messages else None
final_answer = ""
if isinstance(last_message, dict):
final_answer = last_message.get("content", "")
elif hasattr(last_message, "content"):
final_answer = getattr(last_message, "content", "")
final_answer = (
self._converter_adapter.post_process_result(final_answer)
or "Task execution completed but no clear answer was provided."
)
crewai_event_bus.emit(
self,
event=AgentExecutionCompletedEvent(
agent=self, task=task, output=final_answer
),
)
return final_answer
except Exception as e:
self._logger.log("error", f"Error executing LangGraph task: {str(e)}")
crewai_event_bus.emit(
self,
event=AgentExecutionErrorEvent(
agent=self,
task=task,
error=str(e),
),
)
raise
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
"""Configure the LangGraph agent for execution."""
self.configure_tools(tools)
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
"""Configure tools for the LangGraph agent."""
if tools:
all_tools = list(self.tools or []) + list(tools or [])
self._tool_adapter.configure_tools(all_tools)
available_tools = self._tool_adapter.tools()
self._graph.tools = available_tools
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
"""Implement delegation tools support for LangGraph."""
agent_tools = AgentTools(agents=agents)
return agent_tools.tools()
def get_output_converter(
self, llm: Any, text: str, model: Any, instructions: str
) -> Any:
"""Convert output format if needed."""
return Converter(llm=llm, text=text, model=model, instructions=instructions)
def configure_structured_output(self, task) -> None:
"""Configure the structured output for LangGraph."""
self._converter_adapter.configure_structured_output(task)

View File

@@ -1,61 +0,0 @@
import inspect
from typing import Any, List, Optional
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
from crewai.tools.base_tool import BaseTool
class LangGraphToolAdapter(BaseToolAdapter):
"""Adapts CrewAI tools to LangGraph agent tool compatible format"""
def __init__(self, tools: Optional[List[BaseTool]] = None):
self.original_tools = tools or []
self.converted_tools = []
def configure_tools(self, tools: List[BaseTool]) -> None:
"""
Configure and convert CrewAI tools to LangGraph-compatible format.
LangGraph expects tools in langchain_core.tools format.
"""
from langchain_core.tools import BaseTool, StructuredTool
converted_tools = []
if self.original_tools:
all_tools = tools + self.original_tools
else:
all_tools = tools
for tool in all_tools:
if isinstance(tool, BaseTool):
converted_tools.append(tool)
continue
sanitized_name = self.sanitize_tool_name(tool.name)
async def tool_wrapper(*args, tool=tool, **kwargs):
output = None
if len(args) > 0 and isinstance(args[0], str):
output = tool.run(args[0])
elif "input" in kwargs:
output = tool.run(kwargs["input"])
else:
output = tool.run(**kwargs)
if inspect.isawaitable(output):
result = await output
else:
result = output
return result
converted_tool = StructuredTool(
name=sanitized_name,
description=tool.description,
func=tool_wrapper,
args_schema=tool.args_schema,
)
converted_tools.append(converted_tool)
self.converted_tools = converted_tools
def tools(self) -> List[Any]:
return self.converted_tools or []

View File

@@ -1,80 +0,0 @@
import json
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
from crewai.utilities.converter import generate_model_description
class LangGraphConverterAdapter(BaseConverterAdapter):
"""Adapter for handling structured output conversion in LangGraph agents"""
def __init__(self, agent_adapter):
"""Initialize the converter adapter with a reference to the agent adapter"""
self.agent_adapter = agent_adapter
self._output_format = None
self._schema = None
self._system_prompt_appendix = None
def configure_structured_output(self, task) -> None:
"""Configure the structured output for LangGraph."""
if not (task.output_json or task.output_pydantic):
self._output_format = None
self._schema = None
self._system_prompt_appendix = None
return
if task.output_json:
self._output_format = "json"
self._schema = generate_model_description(task.output_json)
elif task.output_pydantic:
self._output_format = "pydantic"
self._schema = generate_model_description(task.output_pydantic)
self._system_prompt_appendix = self._generate_system_prompt_appendix()
def _generate_system_prompt_appendix(self) -> str:
"""Generate an appendix for the system prompt to enforce structured output"""
if not self._output_format or not self._schema:
return ""
return f"""
Important: Your final answer MUST be provided in the following structured format:
{self._schema}
DO NOT include any markdown code blocks, backticks, or other formatting around your response.
The output should be raw JSON that exactly matches the specified schema.
"""
def enhance_system_prompt(self, original_prompt: str) -> str:
"""Add structured output instructions to the system prompt if needed"""
if not self._system_prompt_appendix:
return original_prompt
return f"{original_prompt}\n{self._system_prompt_appendix}"
def post_process_result(self, result: str) -> str:
"""Post-process the result to ensure it matches the expected format"""
if not self._output_format:
return result
# Try to extract valid JSON if it's wrapped in code blocks or other text
if self._output_format in ["json", "pydantic"]:
try:
# First, try to parse as is
json.loads(result)
return result
except json.JSONDecodeError:
# Try to extract JSON from the text
import re
json_match = re.search(r"(\{.*\})", result, re.DOTALL)
if json_match:
try:
extracted = json_match.group(1)
# Validate it's proper JSON
json.loads(extracted)
return extracted
except:
pass
return result

View File

@@ -1,178 +0,0 @@
from typing import Any, List, Optional
from pydantic import Field, PrivateAttr
from crewai.agents.agent_adapters.base_agent_adapter import BaseAgentAdapter
from crewai.agents.agent_adapters.openai_agents.structured_output_converter import (
OpenAIConverterAdapter,
)
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools import BaseTool
from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.utilities import Logger
from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionErrorEvent,
AgentExecutionStartedEvent,
)
try:
from agents import Agent as OpenAIAgent # type: ignore
from agents import Runner, enable_verbose_stdout_logging # type: ignore
from .openai_agent_tool_adapter import OpenAIAgentToolAdapter
OPENAI_AVAILABLE = True
except ImportError:
OPENAI_AVAILABLE = False
class OpenAIAgentAdapter(BaseAgentAdapter):
"""Adapter for OpenAI Assistants"""
model_config = {"arbitrary_types_allowed": True}
_openai_agent: "OpenAIAgent" = PrivateAttr()
_logger: Logger = PrivateAttr(default_factory=lambda: Logger())
_active_thread: Optional[str] = PrivateAttr(default=None)
function_calling_llm: Any = Field(default=None)
step_callback: Any = Field(default=None)
_tool_adapter: "OpenAIAgentToolAdapter" = PrivateAttr()
_converter_adapter: OpenAIConverterAdapter = PrivateAttr()
def __init__(
self,
model: str = "gpt-4o-mini",
tools: Optional[List[BaseTool]] = None,
agent_config: Optional[dict] = None,
**kwargs,
):
if not OPENAI_AVAILABLE:
raise ImportError(
"OpenAI Agent Dependencies are not installed. Please install it using `uv add openai-agents`"
)
else:
role = kwargs.pop("role", None)
goal = kwargs.pop("goal", None)
backstory = kwargs.pop("backstory", None)
super().__init__(
role=role,
goal=goal,
backstory=backstory,
tools=tools,
agent_config=agent_config,
**kwargs,
)
self._tool_adapter = OpenAIAgentToolAdapter(tools=tools)
self.llm = model
self._converter_adapter = OpenAIConverterAdapter(self)
def _build_system_prompt(self) -> str:
"""Build a system prompt for the OpenAI agent."""
base_prompt = f"""
You are {self.role}.
Your goal is: {self.goal}
Your backstory: {self.backstory}
When working on tasks, think step-by-step and use the available tools when necessary.
"""
return self._converter_adapter.enhance_system_prompt(base_prompt)
def execute_task(
self,
task: Any,
context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None,
) -> str:
"""Execute a task using the OpenAI Assistant"""
self._converter_adapter.configure_structured_output(task)
self.create_agent_executor(tools)
if self.verbose:
enable_verbose_stdout_logging()
try:
task_prompt = task.prompt()
if context:
task_prompt = self.i18n.slice("task_with_context").format(
task=task_prompt, context=context
)
crewai_event_bus.emit(
self,
event=AgentExecutionStartedEvent(
agent=self,
tools=self.tools,
task_prompt=task_prompt,
task=task,
),
)
result = self.agent_executor.run_sync(self._openai_agent, task_prompt)
final_answer = self.handle_execution_result(result)
crewai_event_bus.emit(
self,
event=AgentExecutionCompletedEvent(
agent=self, task=task, output=final_answer
),
)
return final_answer
except Exception as e:
self._logger.log("error", f"Error executing OpenAI task: {str(e)}")
crewai_event_bus.emit(
self,
event=AgentExecutionErrorEvent(
agent=self,
task=task,
error=str(e),
),
)
raise
def create_agent_executor(self, tools: Optional[List[BaseTool]] = None) -> None:
"""
Configure the OpenAI agent for execution.
While OpenAI handles execution differently through Runner,
we can use this method to set up tools and configurations.
"""
all_tools = list(self.tools or []) + list(tools or [])
instructions = self._build_system_prompt()
self._openai_agent = OpenAIAgent(
name=self.role,
instructions=instructions,
model=self.llm,
**self._agent_config or {},
)
if all_tools:
self.configure_tools(all_tools)
self.agent_executor = Runner
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
"""Configure tools for the OpenAI Assistant"""
if tools:
self._tool_adapter.configure_tools(tools)
if self._tool_adapter.converted_tools:
self._openai_agent.tools = self._tool_adapter.converted_tools
def handle_execution_result(self, result: Any) -> str:
"""Process OpenAI Assistant execution result converting any structured output to a string"""
return self._converter_adapter.post_process_result(result.final_output)
def get_delegation_tools(self, agents: List[BaseAgent]) -> List[BaseTool]:
"""Implement delegation tools support"""
agent_tools = AgentTools(agents=agents)
tools = agent_tools.tools()
return tools
def configure_structured_output(self, task) -> None:
"""Configure the structured output for the specific agent implementation.
Args:
structured_output: The structured output to be configured
"""
self._converter_adapter.configure_structured_output(task)

View File

@@ -1,91 +0,0 @@
import inspect
from typing import Any, List, Optional
from agents import FunctionTool, Tool
from crewai.agents.agent_adapters.base_tool_adapter import BaseToolAdapter
from crewai.tools import BaseTool
class OpenAIAgentToolAdapter(BaseToolAdapter):
"""Adapter for OpenAI Assistant tools"""
def __init__(self, tools: Optional[List[BaseTool]] = None):
self.original_tools = tools or []
def configure_tools(self, tools: List[BaseTool]) -> None:
"""Configure tools for the OpenAI Assistant"""
if self.original_tools:
all_tools = tools + self.original_tools
else:
all_tools = tools
if all_tools:
self.converted_tools = self._convert_tools_to_openai_format(all_tools)
def _convert_tools_to_openai_format(
self, tools: Optional[List[BaseTool]]
) -> List[Tool]:
"""Convert CrewAI tools to OpenAI Assistant tool format"""
if not tools:
return []
def sanitize_tool_name(name: str) -> str:
"""Convert tool name to match OpenAI's required pattern"""
import re
sanitized = re.sub(r"[^a-zA-Z0-9_-]", "_", name).lower()
return sanitized
def create_tool_wrapper(tool: BaseTool):
"""Create a wrapper function that handles the OpenAI function tool interface"""
async def wrapper(context_wrapper: Any, arguments: Any) -> Any:
# Get the parameter name from the schema
param_name = list(
tool.args_schema.model_json_schema()["properties"].keys()
)[0]
# Handle different argument types
if isinstance(arguments, dict):
args_dict = arguments
elif isinstance(arguments, str):
try:
import json
args_dict = json.loads(arguments)
except json.JSONDecodeError:
args_dict = {param_name: arguments}
else:
args_dict = {param_name: str(arguments)}
# Run the tool with the processed arguments
output = tool._run(**args_dict)
# Await if the tool returned a coroutine
if inspect.isawaitable(output):
result = await output
else:
result = output
# Ensure the result is JSON serializable
if isinstance(result, (dict, list, str, int, float, bool, type(None))):
return result
return str(result)
return wrapper
openai_tools = []
for tool in tools:
schema = tool.args_schema.model_json_schema()
schema.update({"additionalProperties": False, "type": "object"})
openai_tool = FunctionTool(
name=sanitize_tool_name(tool.name),
description=tool.description,
params_json_schema=schema,
on_invoke_tool=create_tool_wrapper(tool),
)
openai_tools.append(openai_tool)
return openai_tools

View File

@@ -1,122 +0,0 @@
import json
import re
from crewai.agents.agent_adapters.base_converter_adapter import BaseConverterAdapter
from crewai.utilities.converter import generate_model_description
from crewai.utilities.i18n import I18N
class OpenAIConverterAdapter(BaseConverterAdapter):
"""
Adapter for handling structured output conversion in OpenAI agents.
This adapter enhances the OpenAI agent to handle structured output formats
and post-processes the results when needed.
Attributes:
_output_format: The expected output format (json, pydantic, or None)
_schema: The schema description for the expected output
_output_model: The Pydantic model for the output
"""
def __init__(self, agent_adapter):
"""Initialize the converter adapter with a reference to the agent adapter"""
self.agent_adapter = agent_adapter
self._output_format = None
self._schema = None
self._output_model = None
def configure_structured_output(self, task) -> None:
"""
Configure the structured output for OpenAI agent based on task requirements.
Args:
task: The task containing output format requirements
"""
# Reset configuration
self._output_format = None
self._schema = None
self._output_model = None
# If no structured output is required, return early
if not (task.output_json or task.output_pydantic):
return
# Configure based on task output format
if task.output_json:
self._output_format = "json"
self._schema = generate_model_description(task.output_json)
self.agent_adapter._openai_agent.output_type = task.output_json
self._output_model = task.output_json
elif task.output_pydantic:
self._output_format = "pydantic"
self._schema = generate_model_description(task.output_pydantic)
self.agent_adapter._openai_agent.output_type = task.output_pydantic
self._output_model = task.output_pydantic
def enhance_system_prompt(self, base_prompt: str) -> str:
"""
Enhance the base system prompt with structured output requirements if needed.
Args:
base_prompt: The original system prompt
Returns:
Enhanced system prompt with output format instructions if needed
"""
if not self._output_format:
return base_prompt
output_schema = (
I18N()
.slice("formatted_task_instructions")
.format(output_format=self._schema)
)
return f"{base_prompt}\n\n{output_schema}"
def post_process_result(self, result: str) -> str:
"""
Post-process the result to ensure it matches the expected format.
This method attempts to extract valid JSON from the result if necessary.
Args:
result: The raw result from the agent
Returns:
Processed result conforming to the expected output format
"""
if not self._output_format:
return result
# Try to extract valid JSON if it's wrapped in code blocks or other text
if isinstance(result, str) and self._output_format in ["json", "pydantic"]:
# First, try to parse as is
try:
json.loads(result)
return result
except json.JSONDecodeError:
# Try to extract JSON from markdown code blocks
code_block_pattern = r"```(?:json)?\s*([\s\S]*?)```"
code_blocks = re.findall(code_block_pattern, result)
for block in code_blocks:
try:
json.loads(block.strip())
return block.strip()
except json.JSONDecodeError:
continue
# Try to extract any JSON-like structure
json_pattern = r"(\{[\s\S]*\})"
json_matches = re.findall(json_pattern, result, re.DOTALL)
for match in json_matches:
try:
json.loads(match)
return match
except json.JSONDecodeError:
continue
# If all extraction attempts fail, return the original
return str(result)

View File

@@ -2,7 +2,7 @@ import uuid
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from copy import copy as shallow_copy from copy import copy as shallow_copy
from hashlib import md5 from hashlib import md5
from typing import Any, Callable, Dict, List, Optional, TypeVar from typing import Any, Dict, List, Optional, TypeVar
from pydantic import ( from pydantic import (
UUID4, UUID4,
@@ -25,7 +25,6 @@ from crewai.tools.base_tool import BaseTool, Tool
from crewai.utilities import I18N, Logger, RPMController from crewai.utilities import I18N, Logger, RPMController
from crewai.utilities.config import process_config from crewai.utilities.config import process_config
from crewai.utilities.converter import Converter from crewai.utilities.converter import Converter
from crewai.utilities.string_utils import interpolate_only
T = TypeVar("T", bound="BaseAgent") T = TypeVar("T", bound="BaseAgent")
@@ -62,6 +61,8 @@ class BaseAgent(ABC, BaseModel):
Abstract method to execute a task. Abstract method to execute a task.
create_agent_executor(tools=None) -> None: create_agent_executor(tools=None) -> None:
Abstract method to create an agent executor. Abstract method to create an agent executor.
_parse_tools(tools: List[BaseTool]) -> List[Any]:
Abstract method to parse tools.
get_delegation_tools(agents: List["BaseAgent"]): get_delegation_tools(agents: List["BaseAgent"]):
Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew. Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew.
get_output_converter(llm, model, instructions): get_output_converter(llm, model, instructions):
@@ -70,6 +71,8 @@ class BaseAgent(ABC, BaseModel):
Interpolate inputs into the agent description and backstory. Interpolate inputs into the agent description and backstory.
set_cache_handler(cache_handler: CacheHandler) -> None: set_cache_handler(cache_handler: CacheHandler) -> None:
Set the cache handler for the agent. Set the cache handler for the agent.
increment_formatting_errors() -> None:
Increment formatting errors.
copy() -> "BaseAgent": copy() -> "BaseAgent":
Create a copy of the agent. Create a copy of the agent.
set_rpm_controller(rpm_controller: RPMController) -> None: set_rpm_controller(rpm_controller: RPMController) -> None:
@@ -87,6 +90,9 @@ class BaseAgent(ABC, BaseModel):
_original_backstory: Optional[str] = PrivateAttr(default=None) _original_backstory: Optional[str] = PrivateAttr(default=None)
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess) _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True) id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
formatting_errors: int = Field(
default=0, description="Number of formatting errors."
)
role: str = Field(description="Role of the agent") role: str = Field(description="Role of the agent")
goal: str = Field(description="Objective of the agent") goal: str = Field(description="Objective of the agent")
backstory: str = Field(description="Backstory of the agent") backstory: str = Field(description="Backstory of the agent")
@@ -128,9 +134,6 @@ class BaseAgent(ABC, BaseModel):
default_factory=ToolsHandler, default_factory=ToolsHandler,
description="An instance of the ToolsHandler class.", description="An instance of the ToolsHandler class.",
) )
tools_results: List[Dict[str, Any]] = Field(
default=[], description="Results of the tools used by the agent."
)
max_tokens: Optional[int] = Field( max_tokens: Optional[int] = Field(
default=None, description="Maximum number of tokens for the agent's execution." default=None, description="Maximum number of tokens for the agent's execution."
) )
@@ -149,12 +152,6 @@ class BaseAgent(ABC, BaseModel):
default_factory=SecurityConfig, default_factory=SecurityConfig,
description="Security configuration for the agent, including fingerprinting.", description="Security configuration for the agent, including fingerprinting.",
) )
callbacks: List[Callable] = Field(
default=[], description="Callbacks to be used for the agent"
)
adapted_agent: bool = Field(
default=False, description="Whether the agent is adapted"
)
@model_validator(mode="before") @model_validator(mode="before")
@classmethod @classmethod
@@ -171,15 +168,15 @@ class BaseAgent(ABC, BaseModel):
tool meets these criteria, it is processed and added to the list of tool meets these criteria, it is processed and added to the list of
tools. Otherwise, a ValueError is raised. tools. Otherwise, a ValueError is raised.
""" """
if not tools:
return []
processed_tools = [] processed_tools = []
required_attrs = ["name", "func", "description"]
for tool in tools: for tool in tools:
if isinstance(tool, BaseTool): if isinstance(tool, BaseTool):
processed_tools.append(tool) processed_tools.append(tool)
elif all(hasattr(tool, attr) for attr in required_attrs): elif (
hasattr(tool, "name")
and hasattr(tool, "func")
and hasattr(tool, "description")
):
# Tool has the required attributes, create a Tool instance # Tool has the required attributes, create a Tool instance
processed_tools.append(Tool.from_langchain(tool)) processed_tools.append(Tool.from_langchain(tool))
else: else:
@@ -256,11 +253,22 @@ class BaseAgent(ABC, BaseModel):
def create_agent_executor(self, tools=None) -> None: def create_agent_executor(self, tools=None) -> None:
pass pass
@abstractmethod
def _parse_tools(self, tools: List[BaseTool]) -> List[BaseTool]:
pass
@abstractmethod @abstractmethod
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]: def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
"""Set the task tools that init BaseAgenTools class.""" """Set the task tools that init BaseAgenTools class."""
pass pass
@abstractmethod
def get_output_converter(
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
) -> Converter:
"""Get the converter class for the agent to create json/pydantic outputs."""
pass
def copy(self: T) -> T: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel" def copy(self: T) -> T: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel"
"""Create a deep copy of the Agent.""" """Create a deep copy of the Agent."""
exclude = { exclude = {
@@ -325,15 +333,9 @@ class BaseAgent(ABC, BaseModel):
self._original_backstory = self.backstory self._original_backstory = self.backstory
if inputs: if inputs:
self.role = interpolate_only( self.role = self._original_role.format(**inputs)
input_string=self._original_role, inputs=inputs self.goal = self._original_goal.format(**inputs)
) self.backstory = self._original_backstory.format(**inputs)
self.goal = interpolate_only(
input_string=self._original_goal, inputs=inputs
)
self.backstory = interpolate_only(
input_string=self._original_backstory, inputs=inputs
)
def set_cache_handler(self, cache_handler: CacheHandler) -> None: def set_cache_handler(self, cache_handler: CacheHandler) -> None:
"""Set the cache handler for the agent. """Set the cache handler for the agent.
@@ -347,6 +349,9 @@ class BaseAgent(ABC, BaseModel):
self.tools_handler.cache = cache_handler self.tools_handler.cache = cache_handler
self.create_agent_executor() self.create_agent_executor()
def increment_formatting_errors(self) -> None:
self.formatting_errors += 1
def set_rpm_controller(self, rpm_controller: RPMController) -> None: def set_rpm_controller(self, rpm_controller: RPMController) -> None:
"""Set the rpm controller for the agent. """Set the rpm controller for the agent.

View File

@@ -1,5 +1,5 @@
import time import time
from typing import TYPE_CHECKING from typing import TYPE_CHECKING, Optional
from crewai.memory.entity.entity_memory_item import EntityMemoryItem from crewai.memory.entity.entity_memory_item import EntityMemoryItem
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
@@ -15,9 +15,9 @@ if TYPE_CHECKING:
class CrewAgentExecutorMixin: class CrewAgentExecutorMixin:
crew: "Crew" crew: Optional["Crew"]
agent: "BaseAgent" agent: Optional["BaseAgent"]
task: "Task" task: Optional["Task"]
iterations: int iterations: int
max_iter: int max_iter: int
_i18n: I18N _i18n: I18N
@@ -47,31 +47,11 @@ class CrewAgentExecutorMixin:
print(f"Failed to add to short term memory: {e}") print(f"Failed to add to short term memory: {e}")
pass pass
def _create_external_memory(self, output) -> None:
"""Create and save a external-term memory item if conditions are met."""
if (
self.crew
and self.agent
and self.task
and hasattr(self.crew, "_external_memory")
and self.crew._external_memory
):
try:
self.crew._external_memory.save(
value=output.text,
metadata={
"description": self.task.description,
},
agent=self.agent.role,
)
except Exception as e:
print(f"Failed to add to external memory: {e}")
pass
def _create_long_term_memory(self, output) -> None: def _create_long_term_memory(self, output) -> None:
"""Create and save long-term and entity memory items based on evaluation.""" """Create and save long-term and entity memory items based on evaluation."""
if ( if (
self.crew self.crew
and self.crew.memory
and self.crew._long_term_memory and self.crew._long_term_memory
and self.crew._entity_memory and self.crew._entity_memory
and self.task and self.task
@@ -113,15 +93,6 @@ class CrewAgentExecutorMixin:
except Exception as e: except Exception as e:
print(f"Failed to add to long term memory: {e}") print(f"Failed to add to long term memory: {e}")
pass pass
elif (
self.crew
and self.crew._long_term_memory
and self.crew._entity_memory is None
):
self._printer.print(
content="Long term memory is enabled, but entity memory is not enabled. Please configure entity memory or set memory=True to automatically enable it.",
color="bold_yellow",
)
def _ask_human_input(self, final_answer: str) -> str: def _ask_human_input(self, final_answer: str) -> str:
"""Prompt human input with mode-appropriate messaging.""" """Prompt human input with mode-appropriate messaging."""

View File

@@ -1,40 +1,42 @@
import json import json
import re import re
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union from typing import Any, Callable, Dict, List, Optional, Union
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.parser import ( from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction, AgentAction,
AgentFinish, AgentFinish,
CrewAgentParser,
OutputParserException, OutputParserException,
) )
from crewai.agents.tools_handler import ToolsHandler from crewai.agents.tools_handler import ToolsHandler
from crewai.llm import BaseLLM from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
from crewai.tools.tool_types import ToolResult
from crewai.utilities import I18N, Printer from crewai.utilities import I18N, Printer
from crewai.utilities.agent_utils import (
enforce_rpm_limit,
format_message_for_llm,
get_llm_response,
handle_agent_action_core,
handle_context_length,
handle_max_iterations_exceeded,
handle_output_parser_exception,
handle_unknown_error,
has_reached_max_iterations,
is_context_length_exceeded,
process_llm_response,
show_agent_logs,
)
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
from crewai.utilities.events import (
ToolUsageErrorEvent,
ToolUsageStartedEvent,
crewai_event_bus,
)
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
from crewai.utilities.logger import Logger from crewai.utilities.logger import Logger
from crewai.utilities.tool_utils import execute_tool_and_check_finality
from crewai.utilities.training_handler import CrewTrainingHandler from crewai.utilities.training_handler import CrewTrainingHandler
@dataclass
class ToolResult:
result: Any
result_as_answer: bool
class CrewAgentExecutor(CrewAgentExecutorMixin): class CrewAgentExecutor(CrewAgentExecutorMixin):
_logger: Logger = Logger() _logger: Logger = Logger()
@@ -46,7 +48,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
agent: BaseAgent, agent: BaseAgent,
prompt: dict[str, str], prompt: dict[str, str],
max_iter: int, max_iter: int,
tools: List[CrewStructuredTool], tools: List[BaseTool],
tools_names: str, tools_names: str,
stop_words: List[str], stop_words: List[str],
tools_description: str, tools_description: str,
@@ -59,7 +61,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
callbacks: List[Any] = [], callbacks: List[Any] = [],
): ):
self._i18n: I18N = I18N() self._i18n: I18N = I18N()
self.llm: BaseLLM = llm self.llm: LLM = llm
self.task = task self.task = task
self.agent = agent self.agent = agent
self.crew = crew self.crew = crew
@@ -82,27 +84,21 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.messages: List[Dict[str, str]] = [] self.messages: List[Dict[str, str]] = []
self.iterations = 0 self.iterations = 0
self.log_error_after = 3 self.log_error_after = 3
self.tool_name_to_tool_map: Dict[str, Union[CrewStructuredTool, BaseTool]] = { self.tool_name_to_tool_map: Dict[str, BaseTool] = {
tool.name: tool for tool in self.tools tool.name: tool for tool in self.tools
} }
existing_stop = self.llm.stop or [] self.stop = stop_words
self.llm.stop = list( self.llm.stop = list(set(self.llm.stop + self.stop))
set(
existing_stop + self.stop
if isinstance(existing_stop, list)
else self.stop
)
)
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]: def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
if "system" in self.prompt: if "system" in self.prompt:
system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs) system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs)
user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs) user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs)
self.messages.append(format_message_for_llm(system_prompt, role="system")) self.messages.append(self._format_msg(system_prompt, role="system"))
self.messages.append(format_message_for_llm(user_prompt)) self.messages.append(self._format_msg(user_prompt))
else: else:
user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs) user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs)
self.messages.append(format_message_for_llm(user_prompt)) self.messages.append(self._format_msg(user_prompt))
self._show_start_logs() self._show_start_logs()
@@ -117,7 +113,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
) )
raise raise
except Exception as e: except Exception as e:
handle_unknown_error(self._printer, e) self._handle_unknown_error(e)
if e.__class__.__module__.startswith("litellm"): if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors # Do not retry on litellm errors
raise e raise e
@@ -129,7 +125,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._create_short_term_memory(formatted_answer) self._create_short_term_memory(formatted_answer)
self._create_long_term_memory(formatted_answer) self._create_long_term_memory(formatted_answer)
self._create_external_memory(formatted_answer)
return {"output": formatted_answer.output} return {"output": formatted_answer.output}
def _invoke_loop(self) -> AgentFinish: def _invoke_loop(self) -> AgentFinish:
@@ -140,51 +135,20 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
formatted_answer = None formatted_answer = None
while not isinstance(formatted_answer, AgentFinish): while not isinstance(formatted_answer, AgentFinish):
try: try:
if has_reached_max_iterations(self.iterations, self.max_iter): if self._has_reached_max_iterations():
formatted_answer = handle_max_iterations_exceeded( formatted_answer = self._handle_max_iterations_exceeded(
formatted_answer, formatted_answer
printer=self._printer,
i18n=self._i18n,
messages=self.messages,
llm=self.llm,
callbacks=self.callbacks,
) )
break
enforce_rpm_limit(self.request_within_rpm_limit) self._enforce_rpm_limit()
answer = get_llm_response( answer = self._get_llm_response()
llm=self.llm, formatted_answer = self._process_llm_response(answer)
messages=self.messages,
callbacks=self.callbacks,
printer=self._printer,
)
formatted_answer = process_llm_response(answer, self.use_stop_words)
if isinstance(formatted_answer, AgentAction): if isinstance(formatted_answer, AgentAction):
# Extract agent fingerprint if available tool_result = self._execute_tool_and_check_finality(
fingerprint_context = {} formatted_answer
if (
self.agent
and hasattr(self.agent, "security_config")
and hasattr(self.agent.security_config, "fingerprint")
):
fingerprint_context = {
"agent_fingerprint": str(
self.agent.security_config.fingerprint
)
}
tool_result = execute_tool_and_check_finality(
agent_action=formatted_answer,
fingerprint_context=fingerprint_context,
tools=self.tools,
i18n=self._i18n,
agent_key=self.agent.key if self.agent else None,
agent_role=self.agent.role if self.agent else None,
tools_handler=self.tools_handler,
task=self.task,
agent=self.agent,
function_calling_llm=self.function_calling_llm,
) )
formatted_answer = self._handle_agent_action( formatted_answer = self._handle_agent_action(
formatted_answer, tool_result formatted_answer, tool_result
@@ -194,30 +158,17 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._append_message(formatted_answer.text, role="assistant") self._append_message(formatted_answer.text, role="assistant")
except OutputParserException as e: except OutputParserException as e:
formatted_answer = handle_output_parser_exception( formatted_answer = self._handle_output_parser_exception(e)
e=e,
messages=self.messages,
iterations=self.iterations,
log_error_after=self.log_error_after,
printer=self._printer,
)
except Exception as e: except Exception as e:
if e.__class__.__module__.startswith("litellm"): if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors # Do not retry on litellm errors
raise e raise e
if is_context_length_exceeded(e): if self._is_context_length_exceeded(e):
handle_context_length( self._handle_context_length()
respect_context_window=self.respect_context_window,
printer=self._printer,
messages=self.messages,
llm=self.llm,
callbacks=self.callbacks,
i18n=self._i18n,
)
continue continue
else: else:
handle_unknown_error(self._printer, e) self._handle_unknown_error(e)
raise e raise e
finally: finally:
self.iterations += 1 self.iterations += 1
@@ -230,27 +181,89 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._show_logs(formatted_answer) self._show_logs(formatted_answer)
return formatted_answer return formatted_answer
def _handle_unknown_error(self, exception: Exception) -> None:
"""Handle unknown errors by informing the user."""
self._printer.print(
content="An unknown error occurred. Please check the details below.",
color="red",
)
self._printer.print(
content=f"Error details: {exception}",
color="red",
)
def _has_reached_max_iterations(self) -> bool:
"""Check if the maximum number of iterations has been reached."""
return self.iterations >= self.max_iter
def _enforce_rpm_limit(self) -> None:
"""Enforce the requests per minute (RPM) limit if applicable."""
if self.request_within_rpm_limit:
self.request_within_rpm_limit()
def _get_llm_response(self) -> str:
"""Call the LLM and return the response, handling any invalid responses."""
try:
answer = self.llm.call(
self.messages,
callbacks=self.callbacks,
)
except Exception as e:
self._printer.print(
content=f"Error during LLM call: {e}",
color="red",
)
raise e
if not answer:
self._printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError("Invalid response from LLM call - None or empty.")
return answer
def _process_llm_response(self, answer: str) -> Union[AgentAction, AgentFinish]:
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
if not self.use_stop_words:
try:
# Preliminary parsing to check for errors.
self._format_answer(answer)
except OutputParserException as e:
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
answer = answer.split("Observation:")[0].strip()
return self._format_answer(answer)
def _handle_agent_action( def _handle_agent_action(
self, formatted_answer: AgentAction, tool_result: ToolResult self, formatted_answer: AgentAction, tool_result: ToolResult
) -> Union[AgentAction, AgentFinish]: ) -> Union[AgentAction, AgentFinish]:
"""Handle the AgentAction, execute tools, and process the results.""" """Handle the AgentAction, execute tools, and process the results."""
# Special case for add_image_tool
add_image_tool = self._i18n.tools("add_image") add_image_tool = self._i18n.tools("add_image")
if ( if (
isinstance(add_image_tool, dict) isinstance(add_image_tool, dict)
and formatted_answer.tool.casefold().strip() and formatted_answer.tool.casefold().strip()
== add_image_tool.get("name", "").casefold().strip() == add_image_tool.get("name", "").casefold().strip()
): ):
self.messages.append({"role": "assistant", "content": tool_result.result}) self.messages.append(tool_result.result)
return formatted_answer return formatted_answer # Continue the loop
return handle_agent_action_core( if self.step_callback:
formatted_answer=formatted_answer, self.step_callback(tool_result)
tool_result=tool_result,
messages=self.messages, formatted_answer.text += f"\nObservation: {tool_result.result}"
step_callback=self.step_callback, formatted_answer.result = tool_result.result
show_logs=self._show_logs,
) if tool_result.result_as_answer:
return AgentFinish(
thought="",
output=tool_result.result,
text=formatted_answer.text,
)
self._show_logs(formatted_answer)
return formatted_answer
def _invoke_step_callback(self, formatted_answer) -> None: def _invoke_step_callback(self, formatted_answer) -> None:
"""Invoke the step callback if it exists.""" """Invoke the step callback if it exists."""
@@ -259,33 +272,151 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
def _append_message(self, text: str, role: str = "assistant") -> None: def _append_message(self, text: str, role: str = "assistant") -> None:
"""Append a message to the message list with the given role.""" """Append a message to the message list with the given role."""
self.messages.append(format_message_for_llm(text, role=role)) self.messages.append(self._format_msg(text, role=role))
def _handle_output_parser_exception(self, e: OutputParserException) -> AgentAction:
"""Handle OutputParserException by updating messages and formatted_answer."""
self.messages.append({"role": "user", "content": e.error})
formatted_answer = AgentAction(
text=e.error,
tool="",
tool_input="",
thought="",
)
if self.iterations > self.log_error_after:
self._printer.print(
content=f"Error parsing LLM output, agent will retry: {e.error}",
color="red",
)
return formatted_answer
def _is_context_length_exceeded(self, exception: Exception) -> bool:
"""Check if the exception is due to context length exceeding."""
return LLMContextLengthExceededException(
str(exception)
)._is_context_limit_error(str(exception))
def _show_start_logs(self): def _show_start_logs(self):
"""Show logs for the start of agent execution."""
if self.agent is None: if self.agent is None:
raise ValueError("Agent cannot be None") raise ValueError("Agent cannot be None")
show_agent_logs( if self.agent.verbose or (
printer=self._printer, hasattr(self, "crew") and getattr(self.crew, "verbose", False)
agent_role=self.agent.role, ):
task_description=( agent_role = self.agent.role.split("\n")[0]
self._printer.print(
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
description = (
getattr(self.task, "description") if self.task else "Not Found" getattr(self.task, "description") if self.task else "Not Found"
), )
verbose=self.agent.verbose self._printer.print(
or (hasattr(self, "crew") and getattr(self.crew, "verbose", False)), content=f"\033[95m## Task:\033[00m \033[92m{description}\033[00m"
) )
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]): def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
"""Show logs for the agent's execution."""
if self.agent is None: if self.agent is None:
raise ValueError("Agent cannot be None") raise ValueError("Agent cannot be None")
show_agent_logs( if self.agent.verbose or (
printer=self._printer, hasattr(self, "crew") and getattr(self.crew, "verbose", False)
agent_role=self.agent.role, ):
formatted_answer=formatted_answer, agent_role = self.agent.role.split("\n")[0]
verbose=self.agent.verbose if isinstance(formatted_answer, AgentAction):
or (hasattr(self, "crew") and getattr(self.crew, "verbose", False)), thought = re.sub(r"\n+", "\n", formatted_answer.thought)
) formatted_json = json.dumps(
formatted_answer.tool_input,
indent=2,
ensure_ascii=False,
)
self._printer.print(
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
if thought and thought != "":
self._printer.print(
content=f"\033[95m## Thought:\033[00m \033[92m{thought}\033[00m"
)
self._printer.print(
content=f"\033[95m## Using tool:\033[00m \033[92m{formatted_answer.tool}\033[00m"
)
self._printer.print(
content=f"\033[95m## Tool Input:\033[00m \033[92m\n{formatted_json}\033[00m"
)
self._printer.print(
content=f"\033[95m## Tool Output:\033[00m \033[92m\n{formatted_answer.result}\033[00m"
)
elif isinstance(formatted_answer, AgentFinish):
self._printer.print(
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
self._printer.print(
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
)
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
try:
if self.agent:
crewai_event_bus.emit(
self,
event=ToolUsageStartedEvent(
agent_key=self.agent.key,
agent_role=self.agent.role,
tool_name=agent_action.tool,
tool_args=agent_action.tool_input,
tool_class=agent_action.tool,
),
)
tool_usage = ToolUsage(
tools_handler=self.tools_handler,
tools=self.tools,
original_tools=self.original_tools,
tools_description=self.tools_description,
tools_names=self.tools_names,
function_calling_llm=self.function_calling_llm,
task=self.task, # type: ignore[arg-type]
agent=self.agent,
action=agent_action,
)
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
if isinstance(tool_calling, ToolUsageErrorException):
tool_result = tool_calling.message
return ToolResult(result=tool_result, result_as_answer=False)
else:
if tool_calling.tool_name.casefold().strip() in [
name.casefold().strip() for name in self.tool_name_to_tool_map
] or tool_calling.tool_name.casefold().replace("_", " ") in [
name.casefold().strip() for name in self.tool_name_to_tool_map
]:
tool_result = tool_usage.use(tool_calling, agent_action.text)
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
if tool:
return ToolResult(
result=tool_result, result_as_answer=tool.result_as_answer
)
else:
tool_result = self._i18n.errors("wrong_tool_name").format(
tool=tool_calling.tool_name,
tools=", ".join([tool.name.casefold() for tool in self.tools]),
)
return ToolResult(result=tool_result, result_as_answer=False)
except Exception as e:
# TODO: drop
if self.agent:
crewai_event_bus.emit(
self,
event=ToolUsageErrorEvent( # validation error
agent_key=self.agent.key,
agent_role=self.agent.role,
tool_name=agent_action.tool,
tool_args=agent_action.tool_input,
tool_class=agent_action.tool,
error=str(e),
),
)
raise e
def _summarize_messages(self) -> None: def _summarize_messages(self) -> None:
messages_groups = [] messages_groups = []
@@ -293,33 +424,47 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
content = message["content"] content = message["content"]
cut_size = self.llm.get_context_window_size() cut_size = self.llm.get_context_window_size()
for i in range(0, len(content), cut_size): for i in range(0, len(content), cut_size):
messages_groups.append({"content": content[i : i + cut_size]}) messages_groups.append(content[i : i + cut_size])
summarized_contents = [] summarized_contents = []
for group in messages_groups: for group in messages_groups:
summary = self.llm.call( summary = self.llm.call(
[ [
format_message_for_llm( self._format_msg(
self._i18n.slice("summarizer_system_message"), role="system" self._i18n.slice("summarizer_system_message"), role="system"
), ),
format_message_for_llm( self._format_msg(
self._i18n.slice("summarize_instruction").format( self._i18n.slice("summarize_instruction").format(group=group),
group=group["content"]
),
), ),
], ],
callbacks=self.callbacks, callbacks=self.callbacks,
) )
summarized_contents.append({"content": str(summary)}) summarized_contents.append(summary)
merged_summary = " ".join(content["content"] for content in summarized_contents) merged_summary = " ".join(str(content) for content in summarized_contents)
self.messages = [ self.messages = [
format_message_for_llm( self._format_msg(
self._i18n.slice("summary").format(merged_summary=merged_summary) self._i18n.slice("summary").format(merged_summary=merged_summary)
) )
] ]
def _handle_context_length(self) -> None:
if self.respect_context_window:
self._printer.print(
content="Context length exceeded. Summarizing content to fit the model context window.",
color="yellow",
)
self._summarize_messages()
else:
self._printer.print(
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
color="red",
)
raise SystemExit(
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
)
def _handle_crew_training_output( def _handle_crew_training_output(
self, result: AgentFinish, human_feedback: Optional[str] = None self, result: AgentFinish, human_feedback: Optional[str] = None
) -> None: ) -> None:
@@ -372,6 +517,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
prompt = prompt.replace("{tools}", inputs["tools"]) prompt = prompt.replace("{tools}", inputs["tools"])
return prompt return prompt
def _format_answer(self, answer: str) -> Union[AgentAction, AgentFinish]:
return CrewAgentParser(agent=self.agent).parse(answer)
def _format_msg(self, prompt: str, role: str = "user") -> Dict[str, str]:
prompt = prompt.rstrip()
return {"role": role, "content": prompt}
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish: def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
"""Handle human feedback with different flows for training vs regular use. """Handle human feedback with different flows for training vs regular use.
@@ -398,7 +550,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
"""Process feedback for training scenarios with single iteration.""" """Process feedback for training scenarios with single iteration."""
self._handle_crew_training_output(initial_answer, feedback) self._handle_crew_training_output(initial_answer, feedback)
self.messages.append( self.messages.append(
format_message_for_llm( self._format_msg(
self._i18n.slice("feedback_instructions").format(feedback=feedback) self._i18n.slice("feedback_instructions").format(feedback=feedback)
) )
) )
@@ -427,7 +579,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
def _process_feedback_iteration(self, feedback: str) -> AgentFinish: def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
"""Process a single feedback iteration.""" """Process a single feedback iteration."""
self.messages.append( self.messages.append(
format_message_for_llm( self._format_msg(
self._i18n.slice("feedback_instructions").format(feedback=feedback) self._i18n.slice("feedback_instructions").format(feedback=feedback)
) )
) )
@@ -452,3 +604,45 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
), ),
color="red", color="red",
) )
def _handle_max_iterations_exceeded(self, formatted_answer):
"""
Handles the case when the maximum number of iterations is exceeded.
Performs one more LLM call to get the final answer.
Parameters:
formatted_answer: The last formatted answer from the agent.
Returns:
The final formatted answer after exceeding max iterations.
"""
self._printer.print(
content="Maximum iterations reached. Requesting final answer.",
color="yellow",
)
if formatted_answer and hasattr(formatted_answer, "text"):
assistant_message = (
formatted_answer.text + f'\n{self._i18n.errors("force_final_answer")}'
)
else:
assistant_message = self._i18n.errors("force_final_answer")
self.messages.append(self._format_msg(assistant_message, role="assistant"))
# Perform one more LLM call to get the final answer
answer = self.llm.call(
self.messages,
callbacks=self.callbacks,
)
if answer is None or answer == "":
self._printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError("Invalid response from LLM call - None or empty.")
formatted_answer = self._format_answer(answer)
# Return the formatted answer, regardless of its type
return formatted_answer

View File

@@ -1,5 +1,5 @@
import re import re
from typing import Any, Optional, Union from typing import Any, Union
from json_repair import repair_json from json_repair import repair_json
@@ -67,23 +67,9 @@ class CrewAgentParser:
_i18n: I18N = I18N() _i18n: I18N = I18N()
agent: Any = None agent: Any = None
def __init__(self, agent: Optional[Any] = None): def __init__(self, agent: Any):
self.agent = agent self.agent = agent
@staticmethod
def parse_text(text: str) -> Union[AgentAction, AgentFinish]:
"""
Static method to parse text into an AgentAction or AgentFinish without needing to instantiate the class.
Args:
text: The text to parse.
Returns:
Either an AgentAction or AgentFinish based on the parsed content.
"""
parser = CrewAgentParser()
return parser.parse(text)
def parse(self, text: str) -> Union[AgentAction, AgentFinish]: def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
thought = self._extract_thought(text) thought = self._extract_thought(text)
includes_answer = FINAL_ANSWER_ACTION in text includes_answer = FINAL_ANSWER_ACTION in text
@@ -91,18 +77,11 @@ class CrewAgentParser:
r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)" r"Action\s*\d*\s*:[\s]*(.*?)[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
) )
action_match = re.search(regex, text, re.DOTALL) action_match = re.search(regex, text, re.DOTALL)
if includes_answer: if action_match:
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip() if includes_answer:
# Check whether the final answer ends with triple backticks. raise OutputParserException(
if final_answer.endswith("```"): f"{FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE}"
# Count occurrences of triple backticks in the final answer. )
count = final_answer.count("```")
# If count is odd then it's an unmatched trailing set; remove it.
if count % 2 != 0:
final_answer = final_answer[:-3].rstrip()
return AgentFinish(thought, final_answer, text)
elif action_match:
action = action_match.group(1) action = action_match.group(1)
clean_action = self._clean_action(action) clean_action = self._clean_action(action)
@@ -113,19 +92,33 @@ class CrewAgentParser:
return AgentAction(thought, clean_action, safe_tool_input, text) return AgentAction(thought, clean_action, safe_tool_input, text)
elif includes_answer:
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
# Check whether the final answer ends with triple backticks.
if final_answer.endswith("```"):
# Count occurrences of triple backticks in the final answer.
count = final_answer.count("```")
# If count is odd then it's an unmatched trailing set; remove it.
if count % 2 != 0:
final_answer = final_answer[:-3].rstrip()
return AgentFinish(thought, final_answer, text)
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL): if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
self.agent.increment_formatting_errors()
raise OutputParserException( raise OutputParserException(
f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}", f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{self._i18n.slice('final_answer_format')}",
) )
elif not re.search( elif not re.search(
r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL r"[\s]*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)", text, re.DOTALL
): ):
self.agent.increment_formatting_errors()
raise OutputParserException( raise OutputParserException(
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
) )
else: else:
format = self._i18n.slice("format_without_tools") format = self._i18n.slice("format_without_tools")
error = f"{format}" error = f"{format}"
self.agent.increment_formatting_errors()
raise OutputParserException( raise OutputParserException(
error, error,
) )

View File

@@ -91,12 +91,6 @@ ENV_VARS = {
"key_name": "CEREBRAS_API_KEY", "key_name": "CEREBRAS_API_KEY",
}, },
], ],
"huggingface": [
{
"prompt": "Enter your Huggingface API key (HF_TOKEN) (press Enter to skip)",
"key_name": "HF_TOKEN",
},
],
"sambanova": [ "sambanova": [
{ {
"prompt": "Enter your SambaNovaCloud API key (press Enter to skip)", "prompt": "Enter your SambaNovaCloud API key (press Enter to skip)",
@@ -112,7 +106,6 @@ PROVIDERS = [
"gemini", "gemini",
"nvidia_nim", "nvidia_nim",
"groq", "groq",
"huggingface",
"ollama", "ollama",
"watson", "watson",
"bedrock", "bedrock",
@@ -277,12 +270,6 @@ MODELS = {
"bedrock/mistral.mistral-7b-instruct-v0:2", "bedrock/mistral.mistral-7b-instruct-v0:2",
"bedrock/mistral.mixtral-8x7b-instruct-v0:1", "bedrock/mistral.mixtral-8x7b-instruct-v0:1",
], ],
"huggingface": [
"huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
"huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1",
"huggingface/tiiuae/falcon-180B-chat",
"huggingface/google/gemma-7b-it",
],
"sambanova": [ "sambanova": [
"sambanova/Meta-Llama-3.3-70B-Instruct", "sambanova/Meta-Llama-3.3-70B-Instruct",
"sambanova/QwQ-32B-Preview", "sambanova/QwQ-32B-Preview",

View File

@@ -14,7 +14,7 @@ from packaging import version
from crewai.cli.utils import read_toml from crewai.cli.utils import read_toml
from crewai.cli.version import get_crewai_version from crewai.cli.version import get_crewai_version
from crewai.crew import Crew from crewai.crew import Crew
from crewai.llm import LLM, BaseLLM from crewai.llm import LLM
from crewai.types.crew_chat import ChatInputField, ChatInputs from crewai.types.crew_chat import ChatInputField, ChatInputs
from crewai.utilities.llm_utils import create_llm from crewai.utilities.llm_utils import create_llm
@@ -116,7 +116,7 @@ def show_loading(event: threading.Event):
print() print()
def initialize_chat_llm(crew: Crew) -> Optional[LLM | BaseLLM]: def initialize_chat_llm(crew: Crew) -> Optional[LLM]:
"""Initializes the chat LLM and handles exceptions.""" """Initializes the chat LLM and handles exceptions."""
try: try:
return create_llm(crew.chat_llm) return create_llm(crew.chat_llm)

View File

@@ -3,10 +3,6 @@ import subprocess
import click import click
# Be mindful about changing this.
# on some enviorments we don't use this command but instead uv sync directly
# so if you expect this to support more things you will need to replicate it there
# ask @joaomdmoura if you are unsure
def install_crew(proxy_options: list[str]) -> None: def install_crew(proxy_options: list[str]) -> None:
""" """
Install the crew by running the UV command to lock and install. Install the crew by running the UV command to lock and install.

View File

@@ -1,7 +1,6 @@
from crewai import Agent, Crew, Process, Task from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task from crewai.project import CrewBase, agent, crew, task
from crewai.agents.agent_builder.base_agent import BaseAgent
from typing import List
# If you want to run a snippet of code before or after the crew starts, # If you want to run a snippet of code before or after the crew starts,
# you can use the @before_kickoff and @after_kickoff decorators # you can use the @before_kickoff and @after_kickoff decorators
# https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators # https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
@@ -10,26 +9,25 @@ from typing import List
class {{crew_name}}(): class {{crew_name}}():
"""{{crew_name}} crew""" """{{crew_name}} crew"""
agents: List[BaseAgent]
tasks: List[Task]
# Learn more about YAML configuration files here: # Learn more about YAML configuration files here:
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended # Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
# Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended # Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
agents_config = 'config/agents.yaml'
tasks_config = 'config/tasks.yaml'
# If you would like to add tools to your agents, you can learn more about it here: # If you would like to add tools to your agents, you can learn more about it here:
# https://docs.crewai.com/concepts/agents#agent-tools # https://docs.crewai.com/concepts/agents#agent-tools
@agent @agent
def researcher(self) -> Agent: def researcher(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['researcher'], # type: ignore[index] config=self.agents_config['researcher'],
verbose=True verbose=True
) )
@agent @agent
def reporting_analyst(self) -> Agent: def reporting_analyst(self) -> Agent:
return Agent( return Agent(
config=self.agents_config['reporting_analyst'], # type: ignore[index] config=self.agents_config['reporting_analyst'],
verbose=True verbose=True
) )
@@ -39,13 +37,13 @@ class {{crew_name}}():
@task @task
def research_task(self) -> Task: def research_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['research_task'], # type: ignore[index] config=self.tasks_config['research_task'],
) )
@task @task
def reporting_task(self) -> Task: def reporting_task(self) -> Task:
return Task( return Task(
config=self.tasks_config['reporting_task'], # type: ignore[index] config=self.tasks_config['reporting_task'],
output_file='report.md' output_file='report.md'
) )

View File

@@ -33,8 +33,7 @@ def train():
Train the crew for a given number of iterations. Train the crew for a given number of iterations.
""" """
inputs = { inputs = {
"topic": "AI LLMs", "topic": "AI LLMs"
'current_year': str(datetime.now().year)
} }
try: try:
{{crew_name}}().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs) {{crew_name}}().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
@@ -60,9 +59,8 @@ def test():
"topic": "AI LLMs", "topic": "AI LLMs",
"current_year": str(datetime.now().year) "current_year": str(datetime.now().year)
} }
try: try:
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), eval_llm=sys.argv[2], inputs=inputs) {{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
except Exception as e: except Exception as e:
raise Exception(f"An error occurred while testing the crew: {e}") raise Exception(f"An error occurred while testing the crew: {e}")

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.114.0,<1.0.0" "crewai[tools]>=0.108.0,<1.0.0"
] ]
[project.scripts] [project.scripts]

View File

@@ -1,7 +1,5 @@
from crewai import Agent, Crew, Process, Task from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task from crewai.project import CrewBase, agent, crew, task
from crewai.agents.agent_builder.base_agent import BaseAgent
from typing import List
# If you want to run a snippet of code before or after the crew starts, # If you want to run a snippet of code before or after the crew starts,
# you can use the @before_kickoff and @after_kickoff decorators # you can use the @before_kickoff and @after_kickoff decorators
@@ -12,9 +10,6 @@ from typing import List
class PoemCrew: class PoemCrew:
"""Poem Crew""" """Poem Crew"""
agents: List[BaseAgent]
tasks: List[Task]
# Learn more about YAML configuration files here: # Learn more about YAML configuration files here:
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended # Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
# Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended # Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
@@ -26,7 +21,7 @@ class PoemCrew:
@agent @agent
def poem_writer(self) -> Agent: def poem_writer(self) -> Agent:
return Agent( return Agent(
config=self.agents_config["poem_writer"], # type: ignore[index] config=self.agents_config["poem_writer"],
) )
# To learn more about structured task outputs, # To learn more about structured task outputs,
@@ -35,7 +30,7 @@ class PoemCrew:
@task @task
def write_poem(self) -> Task: def write_poem(self) -> Task:
return Task( return Task(
config=self.tasks_config["write_poem"], # type: ignore[index] config=self.tasks_config["write_poem"],
) )
@crew @crew

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.114.0,<1.0.0", "crewai[tools]>=0.108.0,<1.0.0",
] ]
[project.scripts] [project.scripts]

View File

@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md" readme = "README.md"
requires-python = ">=3.10,<3.13" requires-python = ">=3.10,<3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.114.0" "crewai[tools]>=0.108.0"
] ]
[tool.crewai] [tool.crewai]

View File

@@ -273,9 +273,11 @@ def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
for attr_name in dir(module): for attr_name in dir(module):
attr = getattr(module, attr_name) attr = getattr(module, attr_name)
try: try:
if callable(attr) and hasattr(attr, "crew"): if isinstance(attr, Crew) and hasattr(attr, "kickoff"):
crew_instance = attr().crew() print(
return crew_instance f"Found valid crew object in attribute '{attr_name}' at {crew_os_path}."
)
return attr
except Exception as e: except Exception as e:
print(f"Error processing attribute {attr_name}: {e}") print(f"Error processing attribute {attr_name}: {e}")

View File

@@ -6,7 +6,7 @@ import warnings
from concurrent.futures import Future from concurrent.futures import Future
from copy import copy as shallow_copy from copy import copy as shallow_copy
from hashlib import md5 from hashlib import md5
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union, cast from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
from pydantic import ( from pydantic import (
UUID4, UUID4,
@@ -26,9 +26,8 @@ from crewai.agents.cache import CacheHandler
from crewai.crews.crew_output import CrewOutput from crewai.crews.crew_output import CrewOutput
from crewai.knowledge.knowledge import Knowledge from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
from crewai.llm import LLM, BaseLLM from crewai.llm import LLM
from crewai.memory.entity.entity_memory import EntityMemory from crewai.memory.entity.entity_memory import EntityMemory
from crewai.memory.external.external_memory import ExternalMemory
from crewai.memory.long_term.long_term_memory import LongTermMemory from crewai.memory.long_term.long_term_memory import LongTermMemory
from crewai.memory.short_term.short_term_memory import ShortTermMemory from crewai.memory.short_term.short_term_memory import ShortTermMemory
from crewai.memory.user.user_memory import UserMemory from crewai.memory.user.user_memory import UserMemory
@@ -38,7 +37,7 @@ from crewai.task import Task
from crewai.tasks.conditional_task import ConditionalTask from crewai.tasks.conditional_task import ConditionalTask
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.tools.agent_tools.agent_tools import AgentTools from crewai.tools.agent_tools.agent_tools import AgentTools
from crewai.tools.base_tool import BaseTool, Tool from crewai.tools.base_tool import Tool
from crewai.types.usage_metrics import UsageMetrics from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities import I18N, FileHandler, Logger, RPMController from crewai.utilities import I18N, FileHandler, Logger, RPMController
from crewai.utilities.constants import TRAINING_DATA_FILE from crewai.utilities.constants import TRAINING_DATA_FILE
@@ -106,7 +105,6 @@ class Crew(BaseModel):
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr() _long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr() _entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
_user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr() _user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr()
_external_memory: Optional[InstanceOf[ExternalMemory]] = PrivateAttr()
_train: Optional[bool] = PrivateAttr(default=False) _train: Optional[bool] = PrivateAttr(default=False)
_train_iteration: Optional[int] = PrivateAttr() _train_iteration: Optional[int] = PrivateAttr()
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None) _inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
@@ -147,10 +145,6 @@ class Crew(BaseModel):
default=None, default=None,
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.", description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
) )
external_memory: Optional[InstanceOf[ExternalMemory]] = Field(
default=None,
description="An Instance of the ExternalMemory to be used by the Crew",
)
embedder: Optional[dict] = Field( embedder: Optional[dict] = Field(
default=None, default=None,
description="Configuration for the embedder to be used for the crew.", description="Configuration for the embedder to be used for the crew.",
@@ -159,7 +153,7 @@ class Crew(BaseModel):
default=None, default=None,
description="Metrics for the LLM usage during all tasks execution.", description="Metrics for the LLM usage during all tasks execution.",
) )
manager_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field( manager_llm: Optional[Any] = Field(
description="Language model that will run the agent.", default=None description="Language model that will run the agent.", default=None
) )
manager_agent: Optional[BaseAgent] = Field( manager_agent: Optional[BaseAgent] = Field(
@@ -193,7 +187,7 @@ class Crew(BaseModel):
default=None, default=None,
description="Maximum number of requests per minute for the crew execution to be respected.", description="Maximum number of requests per minute for the crew execution to be respected.",
) )
prompt_file: Optional[str] = Field( prompt_file: str = Field(
default=None, default=None,
description="Path to the prompt json file to be used for the crew.", description="Path to the prompt json file to be used for the crew.",
) )
@@ -205,7 +199,7 @@ class Crew(BaseModel):
default=False, default=False,
description="Plan the crew execution and add the plan to the crew.", description="Plan the crew execution and add the plan to the crew.",
) )
planning_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field( planning_llm: Optional[Any] = Field(
default=None, default=None,
description="Language model that will run the AgentPlanner if planning is True.", description="Language model that will run the AgentPlanner if planning is True.",
) )
@@ -221,7 +215,7 @@ class Crew(BaseModel):
default=None, default=None,
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.", description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
) )
chat_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field( chat_llm: Optional[Any] = Field(
default=None, default=None,
description="LLM used to handle chatting with the crew.", description="LLM used to handle chatting with the crew.",
) )
@@ -275,51 +269,46 @@ class Crew(BaseModel):
return self return self
def _initialize_user_memory(self):
if (
self.memory_config
and "user_memory" in self.memory_config
and self.memory_config.get("provider") == "mem0"
): # Check for user_memory in config
user_memory_config = self.memory_config["user_memory"]
if isinstance(
user_memory_config, dict
): # Check if it's a configuration dict
self._user_memory = UserMemory(crew=self)
else:
raise TypeError("user_memory must be a configuration dictionary")
def _initialize_default_memories(self):
self._long_term_memory = self._long_term_memory or LongTermMemory()
self._short_term_memory = self._short_term_memory or ShortTermMemory(
crew=self,
embedder_config=self.embedder,
)
self._entity_memory = self.entity_memory or EntityMemory(
crew=self, embedder_config=self.embedder
)
@model_validator(mode="after") @model_validator(mode="after")
def create_crew_memory(self) -> "Crew": def create_crew_memory(self) -> "Crew":
"""Initialize private memory attributes.""" """Set private attributes."""
self._external_memory = (
# External memory doesnt support a default value since it was designed to be managed entirely externally
self.external_memory.set_crew(self)
if self.external_memory
else None
)
self._long_term_memory = self.long_term_memory
self._short_term_memory = self.short_term_memory
self._entity_memory = self.entity_memory
# UserMemory is gonna to be deprecated in the future, but we have to initialize a default value for now
self._user_memory = None
if self.memory: if self.memory:
self._initialize_default_memories() self._long_term_memory = (
self._initialize_user_memory() self.long_term_memory if self.long_term_memory else LongTermMemory()
)
self._short_term_memory = (
self.short_term_memory
if self.short_term_memory
else ShortTermMemory(
crew=self,
embedder_config=self.embedder,
)
)
self._entity_memory = (
self.entity_memory
if self.entity_memory
else EntityMemory(crew=self, embedder_config=self.embedder)
)
if (
self.memory_config and "user_memory" in self.memory_config
): # Check for user_memory in config
user_memory_config = self.memory_config["user_memory"]
if isinstance(
user_memory_config, UserMemory
): # Check if it is already an instance
self._user_memory = user_memory_config
elif isinstance(
user_memory_config, dict
): # Check if it's a configuration dict
self._user_memory = UserMemory(
crew=self, **user_memory_config
) # Initialize with config
else:
raise TypeError(
"user_memory must be a UserMemory instance or a configuration dictionary"
)
else:
self._user_memory = None # No user memory if not in config
return self return self
@model_validator(mode="after") @model_validator(mode="after")
@@ -830,12 +819,7 @@ class Crew(BaseModel):
# Determine which tools to use - task tools take precedence over agent tools # Determine which tools to use - task tools take precedence over agent tools
tools_for_task = task.tools or agent_to_use.tools or [] tools_for_task = task.tools or agent_to_use.tools or []
# Prepare tools and ensure they're compatible with task execution tools_for_task = self._prepare_tools(agent_to_use, task, tools_for_task)
tools_for_task = self._prepare_tools(
agent_to_use,
task,
cast(Union[List[Tool], List[BaseTool]], tools_for_task),
)
self._log_task_start(task, agent_to_use.role) self._log_task_start(task, agent_to_use.role)
@@ -854,7 +838,7 @@ class Crew(BaseModel):
future = task.execute_async( future = task.execute_async(
agent=agent_to_use, agent=agent_to_use,
context=context, context=context,
tools=cast(List[BaseTool], tools_for_task), tools=tools_for_task,
) )
futures.append((task, future, task_index)) futures.append((task, future, task_index))
else: else:
@@ -866,7 +850,7 @@ class Crew(BaseModel):
task_output = task.execute_sync( task_output = task.execute_sync(
agent=agent_to_use, agent=agent_to_use,
context=context, context=context,
tools=cast(List[BaseTool], tools_for_task), tools=tools_for_task,
) )
task_outputs.append(task_output) task_outputs.append(task_output)
self._process_task_result(task, task_output) self._process_task_result(task, task_output)
@@ -904,12 +888,10 @@ class Crew(BaseModel):
return None return None
def _prepare_tools( def _prepare_tools(
self, agent: BaseAgent, task: Task, tools: Union[List[Tool], List[BaseTool]] self, agent: BaseAgent, task: Task, tools: List[Tool]
) -> List[BaseTool]: ) -> List[Tool]:
# Add delegation tools if agent allows delegation # Add delegation tools if agent allows delegation
if hasattr(agent, "allow_delegation") and getattr( if agent.allow_delegation:
agent, "allow_delegation", False
):
if self.process == Process.hierarchical: if self.process == Process.hierarchical:
if self.manager_agent: if self.manager_agent:
tools = self._update_manager_tools(task, tools) tools = self._update_manager_tools(task, tools)
@@ -918,24 +900,17 @@ class Crew(BaseModel):
"Manager agent is required for hierarchical process." "Manager agent is required for hierarchical process."
) )
elif agent: elif agent and agent.allow_delegation:
tools = self._add_delegation_tools(task, tools) tools = self._add_delegation_tools(task, tools)
# Add code execution tools if agent allows code execution # Add code execution tools if agent allows code execution
if hasattr(agent, "allow_code_execution") and getattr( if agent.allow_code_execution:
agent, "allow_code_execution", False
):
tools = self._add_code_execution_tools(agent, tools) tools = self._add_code_execution_tools(agent, tools)
if ( if agent and agent.multimodal:
agent
and hasattr(agent, "multimodal")
and getattr(agent, "multimodal", False)
):
tools = self._add_multimodal_tools(agent, tools) tools = self._add_multimodal_tools(agent, tools)
# Return a List[BaseTool] which is compatible with both Task.execute_sync and Task.execute_async return tools
return cast(List[BaseTool], tools)
def _get_agent_to_use(self, task: Task) -> Optional[BaseAgent]: def _get_agent_to_use(self, task: Task) -> Optional[BaseAgent]:
if self.process == Process.hierarchical: if self.process == Process.hierarchical:
@@ -943,13 +918,11 @@ class Crew(BaseModel):
return task.agent return task.agent
def _merge_tools( def _merge_tools(
self, self, existing_tools: List[Tool], new_tools: List[Tool]
existing_tools: Union[List[Tool], List[BaseTool]], ) -> List[Tool]:
new_tools: Union[List[Tool], List[BaseTool]],
) -> List[BaseTool]:
"""Merge new tools into existing tools list, avoiding duplicates by tool name.""" """Merge new tools into existing tools list, avoiding duplicates by tool name."""
if not new_tools: if not new_tools:
return cast(List[BaseTool], existing_tools) return existing_tools
# Create mapping of tool names to new tools # Create mapping of tool names to new tools
new_tool_map = {tool.name: tool for tool in new_tools} new_tool_map = {tool.name: tool for tool in new_tools}
@@ -960,41 +933,23 @@ class Crew(BaseModel):
# Add all new tools # Add all new tools
tools.extend(new_tools) tools.extend(new_tools)
return cast(List[BaseTool], tools) return tools
def _inject_delegation_tools( def _inject_delegation_tools(
self, self, tools: List[Tool], task_agent: BaseAgent, agents: List[BaseAgent]
tools: Union[List[Tool], List[BaseTool]], ):
task_agent: BaseAgent, delegation_tools = task_agent.get_delegation_tools(agents)
agents: List[BaseAgent], return self._merge_tools(tools, delegation_tools)
) -> List[BaseTool]:
if hasattr(task_agent, "get_delegation_tools"):
delegation_tools = task_agent.get_delegation_tools(agents)
# Cast delegation_tools to the expected type for _merge_tools
return self._merge_tools(tools, cast(List[BaseTool], delegation_tools))
return cast(List[BaseTool], tools)
def _add_multimodal_tools( def _add_multimodal_tools(self, agent: BaseAgent, tools: List[Tool]):
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]] multimodal_tools = agent.get_multimodal_tools()
) -> List[BaseTool]: return self._merge_tools(tools, multimodal_tools)
if hasattr(agent, "get_multimodal_tools"):
multimodal_tools = agent.get_multimodal_tools()
# Cast multimodal_tools to the expected type for _merge_tools
return self._merge_tools(tools, cast(List[BaseTool], multimodal_tools))
return cast(List[BaseTool], tools)
def _add_code_execution_tools( def _add_code_execution_tools(self, agent: BaseAgent, tools: List[Tool]):
self, agent: BaseAgent, tools: Union[List[Tool], List[BaseTool]] code_tools = agent.get_code_execution_tools()
) -> List[BaseTool]: return self._merge_tools(tools, code_tools)
if hasattr(agent, "get_code_execution_tools"):
code_tools = agent.get_code_execution_tools()
# Cast code_tools to the expected type for _merge_tools
return self._merge_tools(tools, cast(List[BaseTool], code_tools))
return cast(List[BaseTool], tools)
def _add_delegation_tools( def _add_delegation_tools(self, task: Task, tools: List[Tool]):
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
) -> List[BaseTool]:
agents_for_delegation = [agent for agent in self.agents if agent != task.agent] agents_for_delegation = [agent for agent in self.agents if agent != task.agent]
if len(self.agents) > 1 and len(agents_for_delegation) > 0 and task.agent: if len(self.agents) > 1 and len(agents_for_delegation) > 0 and task.agent:
if not tools: if not tools:
@@ -1002,7 +957,7 @@ class Crew(BaseModel):
tools = self._inject_delegation_tools( tools = self._inject_delegation_tools(
tools, task.agent, agents_for_delegation tools, task.agent, agents_for_delegation
) )
return cast(List[BaseTool], tools) return tools
def _log_task_start(self, task: Task, role: str = "None"): def _log_task_start(self, task: Task, role: str = "None"):
if self.output_log_file: if self.output_log_file:
@@ -1010,9 +965,7 @@ class Crew(BaseModel):
task_name=task.name, task=task.description, agent=role, status="started" task_name=task.name, task=task.description, agent=role, status="started"
) )
def _update_manager_tools( def _update_manager_tools(self, task: Task, tools: List[Tool]):
self, task: Task, tools: Union[List[Tool], List[BaseTool]]
) -> List[BaseTool]:
if self.manager_agent: if self.manager_agent:
if task.agent: if task.agent:
tools = self._inject_delegation_tools(tools, task.agent, [task.agent]) tools = self._inject_delegation_tools(tools, task.agent, [task.agent])
@@ -1020,7 +973,7 @@ class Crew(BaseModel):
tools = self._inject_delegation_tools( tools = self._inject_delegation_tools(
tools, self.manager_agent, self.agents tools, self.manager_agent, self.agents
) )
return cast(List[BaseTool], tools) return tools
def _get_context(self, task: Task, task_outputs: List[TaskOutput]): def _get_context(self, task: Task, task_outputs: List[TaskOutput]):
context = ( context = (
@@ -1167,12 +1120,7 @@ class Crew(BaseModel):
return required_inputs return required_inputs
def copy(self): def copy(self):
""" """Create a deep copy of the Crew."""
Creates a deep copy of the Crew instance.
Returns:
Crew: A new instance with copied components
"""
exclude = { exclude = {
"id", "id",
@@ -1184,19 +1132,13 @@ class Crew(BaseModel):
"_short_term_memory", "_short_term_memory",
"_long_term_memory", "_long_term_memory",
"_entity_memory", "_entity_memory",
"_external_memory",
"_telemetry",
"agents", "agents",
"tasks", "tasks",
"knowledge_sources", "knowledge_sources",
"knowledge", "knowledge",
"manager_agent",
"manager_llm",
} }
cloned_agents = [agent.copy() for agent in self.agents] cloned_agents = [agent.copy() for agent in self.agents]
manager_agent = self.manager_agent.copy() if self.manager_agent else None
manager_llm = shallow_copy(self.manager_llm) if self.manager_llm else None
task_mapping = {} task_mapping = {}
@@ -1219,17 +1161,6 @@ class Crew(BaseModel):
copied_data = self.model_dump(exclude=exclude) copied_data = self.model_dump(exclude=exclude)
copied_data = {k: v for k, v in copied_data.items() if v is not None} copied_data = {k: v for k, v in copied_data.items() if v is not None}
if self.short_term_memory:
copied_data["short_term_memory"] = self.short_term_memory.model_copy(deep=True)
if self.long_term_memory:
copied_data["long_term_memory"] = self.long_term_memory.model_copy(deep=True)
if self.entity_memory:
copied_data["entity_memory"] = self.entity_memory.model_copy(deep=True)
if self.external_memory:
copied_data["external_memory"] = self.external_memory.model_copy(deep=True)
if self.user_memory:
copied_data["user_memory"] = self.user_memory.model_copy(deep=True)
copied_data.pop("agents", None) copied_data.pop("agents", None)
copied_data.pop("tasks", None) copied_data.pop("tasks", None)
@@ -1240,8 +1171,6 @@ class Crew(BaseModel):
tasks=cloned_tasks, tasks=cloned_tasks,
knowledge_sources=existing_knowledge_sources, knowledge_sources=existing_knowledge_sources,
knowledge=existing_knowledge, knowledge=existing_knowledge,
manager_agent=manager_agent,
manager_llm=manager_llm,
) )
return copied_crew return copied_crew
@@ -1285,14 +1214,13 @@ class Crew(BaseModel):
def test( def test(
self, self,
n_iterations: int, n_iterations: int,
eval_llm: Union[str, InstanceOf[BaseLLM]], eval_llm: Union[str, InstanceOf[LLM]],
inputs: Optional[Dict[str, Any]] = None, inputs: Optional[Dict[str, Any]] = None,
) -> None: ) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.""" """Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
try: try:
# Create LLM instance and ensure it's of type LLM for CrewEvaluator eval_llm = create_llm(eval_llm)
llm_instance = create_llm(eval_llm) if not eval_llm:
if not llm_instance:
raise ValueError("Failed to create LLM instance.") raise ValueError("Failed to create LLM instance.")
crewai_event_bus.emit( crewai_event_bus.emit(
@@ -1300,12 +1228,12 @@ class Crew(BaseModel):
CrewTestStartedEvent( CrewTestStartedEvent(
crew_name=self.name or "crew", crew_name=self.name or "crew",
n_iterations=n_iterations, n_iterations=n_iterations,
eval_llm=llm_instance, eval_llm=eval_llm,
inputs=inputs, inputs=inputs,
), ),
) )
test_crew = self.copy() test_crew = self.copy()
evaluator = CrewEvaluator(test_crew, llm_instance) evaluator = CrewEvaluator(test_crew, eval_llm) # type: ignore[arg-type]
for i in range(1, n_iterations + 1): for i in range(1, n_iterations + 1):
evaluator.set_iteration(i) evaluator.set_iteration(i)
@@ -1342,15 +1270,7 @@ class Crew(BaseModel):
RuntimeError: If memory reset operation fails. RuntimeError: If memory reset operation fails.
""" """
VALID_TYPES = frozenset( VALID_TYPES = frozenset(
[ ["long", "short", "entity", "knowledge", "kickoff_outputs", "all"]
"long",
"short",
"entity",
"knowledge",
"kickoff_outputs",
"all",
"external",
]
) )
if command_type not in VALID_TYPES: if command_type not in VALID_TYPES:
@@ -1376,7 +1296,6 @@ class Crew(BaseModel):
memory_systems = [ memory_systems = [
("short term", getattr(self, "_short_term_memory", None)), ("short term", getattr(self, "_short_term_memory", None)),
("entity", getattr(self, "_entity_memory", None)), ("entity", getattr(self, "_entity_memory", None)),
("external", getattr(self, "_external_memory", None)),
("long term", getattr(self, "_long_term_memory", None)), ("long term", getattr(self, "_long_term_memory", None)),
("task output", getattr(self, "_task_output_handler", None)), ("task output", getattr(self, "_task_output_handler", None)),
("knowledge", getattr(self, "knowledge", None)), ("knowledge", getattr(self, "knowledge", None)),
@@ -1399,12 +1318,11 @@ class Crew(BaseModel):
RuntimeError: If the specified memory system fails to reset RuntimeError: If the specified memory system fails to reset
""" """
reset_functions = { reset_functions = {
"long": (getattr(self, "_long_term_memory", None), "long term"), "long": (self._long_term_memory, "long term"),
"short": (getattr(self, "_short_term_memory", None), "short term"), "short": (self._short_term_memory, "short term"),
"entity": (getattr(self, "_entity_memory", None), "entity"), "entity": (self._entity_memory, "entity"),
"knowledge": (getattr(self, "knowledge", None), "knowledge"), "knowledge": (self.knowledge, "knowledge"),
"kickoff_outputs": (getattr(self, "_task_output_handler", None), "task output"), "kickoff_outputs": (self._task_output_handler, "task output"),
"external": (getattr(self, "_external_memory", None), "external"),
} }
memory_system, name = reset_functions[memory_type] memory_system, name = reset_functions[memory_type]

View File

@@ -1043,7 +1043,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
import traceback import traceback
traceback.print_exc() traceback.print_exc()
raise
def _log_flow_event( def _log_flow_event(
self, message: str, color: str = "yellow", level: str = "info" self, message: str, color: str = "yellow", level: str = "info"

View File

@@ -29,7 +29,7 @@ class FlowPersistence(abc.ABC):
self, self,
flow_uuid: str, flow_uuid: str,
method_name: str, method_name: str,
state_data: Union[Dict[str, Any], BaseModel] state_data: Union[Dict[str, Any], BaseModel],
) -> None: ) -> None:
"""Persist the flow state after method completion. """Persist the flow state after method completion.

View File

@@ -11,6 +11,7 @@ from typing import Any, Dict, Optional, Union
from pydantic import BaseModel from pydantic import BaseModel
from crewai.flow.persistence.base import FlowPersistence from crewai.flow.persistence.base import FlowPersistence
from crewai.flow.state_utils import to_serializable
class SQLiteFlowPersistence(FlowPersistence): class SQLiteFlowPersistence(FlowPersistence):
@@ -21,7 +22,7 @@ class SQLiteFlowPersistence(FlowPersistence):
moderate performance requirements. moderate performance requirements.
""" """
db_path: str db_path: str # Type annotation for instance variable
def __init__(self, db_path: Optional[str] = None): def __init__(self, db_path: Optional[str] = None):
"""Initialize SQLite persistence. """Initialize SQLite persistence.
@@ -78,34 +79,53 @@ class SQLiteFlowPersistence(FlowPersistence):
flow_uuid: Unique identifier for the flow instance flow_uuid: Unique identifier for the flow instance
method_name: Name of the method that just completed method_name: Name of the method that just completed
state_data: Current state data (either dict or Pydantic model) state_data: Current state data (either dict or Pydantic model)
"""
# Convert state_data to dict, handling both Pydantic and dict cases
if isinstance(state_data, BaseModel):
state_dict = dict(state_data) # Use dict() for better type compatibility
elif isinstance(state_data, dict):
state_dict = state_data
else:
raise ValueError(
f"state_data must be either a Pydantic BaseModel or dict, got {type(state_data)}"
)
with sqlite3.connect(self.db_path) as conn: Raises:
conn.execute( ValueError: If state_data is neither a dict nor a BaseModel
""" RuntimeError: If database operations fail
INSERT INTO flow_states ( TypeError: If JSON serialization fails
flow_uuid, """
method_name, try:
timestamp, # Convert state_data to a JSON-serializable dict using the helper method
state_json state_dict = to_serializable(state_data)
) VALUES (?, ?, ?, ?)
""", # Try to serialize to JSON to catch any serialization issues early
( try:
flow_uuid, state_json = json.dumps(state_dict)
method_name, except (TypeError, ValueError, OverflowError) as json_err:
datetime.now(timezone.utc).isoformat(), raise TypeError(
json.dumps(state_dict), f"Failed to serialize state to JSON: {json_err}"
), ) from json_err
)
# Perform database operation with error handling
try:
with sqlite3.connect(self.db_path) as conn:
conn.execute(
"""
INSERT INTO flow_states (
flow_uuid,
method_name,
timestamp,
state_json
) VALUES (?, ?, ?, ?)
""",
(
flow_uuid,
method_name,
datetime.now(timezone.utc).isoformat(),
state_json,
),
)
except sqlite3.Error as db_err:
raise RuntimeError(f"Database operation failed: {db_err}") from db_err
except Exception as e:
# Log the error but don't crash the application
import logging
logging.error(f"Failed to save flow state: {e}")
# Re-raise to allow caller to handle or ignore
raise
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]: def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
"""Load the most recent state for a given flow UUID. """Load the most recent state for a given flow UUID.

View File

@@ -1,6 +1,6 @@
import json import json
import uuid
from datetime import date, datetime from datetime import date, datetime
from enum import Enum
from typing import Any, Dict, List, Union from typing import Any, Dict, List, Union
from pydantic import BaseModel from pydantic import BaseModel
@@ -12,10 +12,7 @@ Serializable = Union[
def to_serializable( def to_serializable(
obj: Any, obj: Any, max_depth: int = 5, _current_depth: int = 0
exclude: set[str] | None = None,
max_depth: int = 5,
_current_depth: int = 0,
) -> Serializable: ) -> Serializable:
"""Converts a Python object into a JSON-compatible representation. """Converts a Python object into a JSON-compatible representation.
@@ -25,7 +22,6 @@ def to_serializable(
Args: Args:
obj (Any): Object to transform. obj (Any): Object to transform.
exclude (set[str], optional): Set of keys to exclude from the result.
max_depth (int, optional): Maximum recursion depth. Defaults to 5. max_depth (int, optional): Maximum recursion depth. Defaults to 5.
Returns: Returns:
@@ -34,39 +30,23 @@ def to_serializable(
if _current_depth >= max_depth: if _current_depth >= max_depth:
return repr(obj) return repr(obj)
if exclude is None:
exclude = set()
if isinstance(obj, (str, int, float, bool, type(None))): if isinstance(obj, (str, int, float, bool, type(None))):
return obj return obj
elif isinstance(obj, uuid.UUID): elif isinstance(obj, Enum):
return str(obj) return obj.value
elif isinstance(obj, (date, datetime)): elif isinstance(obj, (date, datetime)):
return obj.isoformat() return obj.isoformat()
elif isinstance(obj, (list, tuple, set)): elif isinstance(obj, (list, tuple, set)):
return [ return [to_serializable(item, max_depth, _current_depth + 1) for item in obj]
to_serializable(
item, max_depth=max_depth, _current_depth=_current_depth + 1
)
for item in obj
]
elif isinstance(obj, dict): elif isinstance(obj, dict):
return { return {
_to_serializable_key(key): to_serializable( _to_serializable_key(key): to_serializable(
obj=value, value, max_depth, _current_depth + 1
exclude=exclude,
max_depth=max_depth,
_current_depth=_current_depth + 1,
) )
for key, value in obj.items() for key, value in obj.items()
if key not in exclude
} }
elif isinstance(obj, BaseModel): elif isinstance(obj, BaseModel):
return to_serializable( return to_serializable(obj.model_dump(), max_depth, _current_depth + 1)
obj=obj.model_dump(exclude=exclude),
max_depth=max_depth,
_current_depth=_current_depth + 1,
)
else: else:
return repr(obj) return repr(obj)

View File

@@ -14,7 +14,6 @@ from chromadb.config import Settings
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
from crewai.utilities import EmbeddingConfigurator from crewai.utilities import EmbeddingConfigurator
from crewai.utilities.chromadb import sanitize_collection_name
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
from crewai.utilities.logger import Logger from crewai.utilities.logger import Logger
from crewai.utilities.paths import db_storage_path from crewai.utilities.paths import db_storage_path
@@ -100,8 +99,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
) )
if self.app: if self.app:
self.collection = self.app.get_or_create_collection( self.collection = self.app.get_or_create_collection(
name=sanitize_collection_name(collection_name), name=collection_name, embedding_function=self.embedder
embedding_function=self.embedder,
) )
else: else:
raise Exception("Vector Database Client not initialized") raise Exception("Vector Database Client not initialized")

View File

@@ -1,478 +0,0 @@
import asyncio
import uuid
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Type, Union, cast
from pydantic import BaseModel, Field, InstanceOf, PrivateAttr, model_validator
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.agents.cache import CacheHandler
from crewai.agents.parser import (
AgentAction,
AgentFinish,
OutputParserException,
)
from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.utilities import I18N
from crewai.utilities.agent_utils import (
enforce_rpm_limit,
format_message_for_llm,
get_llm_response,
get_tool_names,
handle_agent_action_core,
handle_context_length,
handle_max_iterations_exceeded,
handle_output_parser_exception,
handle_unknown_error,
has_reached_max_iterations,
is_context_length_exceeded,
parse_tools,
process_llm_response,
render_text_description_and_args,
show_agent_logs,
)
from crewai.utilities.converter import convert_to_model, generate_model_description
from crewai.utilities.events.agent_events import (
LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
)
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.printer import Printer
from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.tool_utils import execute_tool_and_check_finality
class LiteAgentOutput(BaseModel):
"""Class that represents the result of a LiteAgent execution."""
model_config = {"arbitrary_types_allowed": True}
raw: str = Field(description="Raw output of the agent", default="")
pydantic: Optional[BaseModel] = Field(
description="Pydantic output of the agent", default=None
)
agent_role: str = Field(description="Role of the agent that produced this output")
usage_metrics: Optional[Dict[str, Any]] = Field(
description="Token usage metrics for this execution", default=None
)
def to_dict(self) -> Dict[str, Any]:
"""Convert pydantic_output to a dictionary."""
if self.pydantic:
return self.pydantic.model_dump()
return {}
def __str__(self) -> str:
"""String representation of the output."""
if self.pydantic:
return str(self.pydantic)
return self.raw
class LiteAgent(BaseModel):
"""
A lightweight agent that can process messages and use tools.
This agent is simpler than the full Agent class, focusing on direct execution
rather than task delegation. It's designed to be used for simple interactions
where a full crew is not needed.
Attributes:
role: The role of the agent.
goal: The objective of the agent.
backstory: The backstory of the agent.
llm: The language model that will run the agent.
tools: Tools at the agent's disposal.
verbose: Whether the agent execution should be in verbose mode.
max_iterations: Maximum number of iterations for tool usage.
max_execution_time: Maximum execution time in seconds.
response_format: Optional Pydantic model for structured output.
"""
model_config = {"arbitrary_types_allowed": True}
# Core Agent Properties
role: str = Field(description="Role of the agent")
goal: str = Field(description="Goal of the agent")
backstory: str = Field(description="Backstory of the agent")
llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
default=None, description="Language model that will run the agent"
)
tools: List[BaseTool] = Field(
default_factory=list, description="Tools at agent's disposal"
)
# Execution Control Properties
max_iterations: int = Field(
default=15, description="Maximum number of iterations for tool usage"
)
max_execution_time: Optional[int] = Field(
default=None, description="Maximum execution time in seconds"
)
respect_context_window: bool = Field(
default=True,
description="Whether to respect the context window of the LLM",
)
use_stop_words: bool = Field(
default=True,
description="Whether to use stop words to prevent the LLM from using tools",
)
request_within_rpm_limit: Optional[Callable[[], bool]] = Field(
default=None,
description="Callback to check if the request is within the RPM limit",
)
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
# Output and Formatting Properties
response_format: Optional[Type[BaseModel]] = Field(
default=None, description="Pydantic model for structured output"
)
verbose: bool = Field(
default=False, description="Whether to print execution details"
)
callbacks: List[Callable] = Field(
default=[], description="Callbacks to be used for the agent"
)
# State and Results
tools_results: List[Dict[str, Any]] = Field(
default=[], description="Results of the tools used by the agent."
)
# Reference of Agent
original_agent: Optional[BaseAgent] = Field(
default=None, description="Reference to the agent that created this LiteAgent"
)
# Private Attributes
_parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list)
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
_cache_handler: CacheHandler = PrivateAttr(default_factory=CacheHandler)
_key: str = PrivateAttr(default_factory=lambda: str(uuid.uuid4()))
_messages: List[Dict[str, str]] = PrivateAttr(default_factory=list)
_iterations: int = PrivateAttr(default=0)
_printer: Printer = PrivateAttr(default_factory=Printer)
@model_validator(mode="after")
def setup_llm(self):
"""Set up the LLM and other components after initialization."""
self.llm = create_llm(self.llm)
if not isinstance(self.llm, LLM):
raise ValueError("Unable to create LLM instance")
# Initialize callbacks
token_callback = TokenCalcHandler(token_cost_process=self._token_process)
self._callbacks = [token_callback]
return self
@model_validator(mode="after")
def parse_tools(self):
"""Parse the tools and convert them to CrewStructuredTool instances."""
self._parsed_tools = parse_tools(self.tools)
return self
@property
def key(self) -> str:
"""Get the unique key for this agent instance."""
return self._key
@property
def _original_role(self) -> str:
"""Return the original role for compatibility with tool interfaces."""
return self.role
def kickoff(self, messages: Union[str, List[Dict[str, str]]]) -> LiteAgentOutput:
"""
Execute the agent with the given messages.
Args:
messages: Either a string query or a list of message dictionaries.
If a string is provided, it will be converted to a user message.
If a list is provided, each dict should have 'role' and 'content' keys.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
# Create agent info for event emission
agent_info = {
"role": self.role,
"goal": self.goal,
"backstory": self.backstory,
"tools": self._parsed_tools,
"verbose": self.verbose,
}
try:
# Reset state for this run
self._iterations = 0
self.tools_results = []
# Format messages for the LLM
self._messages = self._format_messages(messages)
# Emit event for agent execution start
crewai_event_bus.emit(
self,
event=LiteAgentExecutionStartedEvent(
agent_info=agent_info,
tools=self._parsed_tools,
messages=messages,
),
)
# Execute the agent using invoke loop
agent_finish = self._invoke_loop()
formatted_result: Optional[BaseModel] = None
if self.response_format:
try:
# Cast to BaseModel to ensure type safety
result = self.response_format.model_validate_json(
agent_finish.output
)
if isinstance(result, BaseModel):
formatted_result = result
except Exception as e:
self._printer.print(
content=f"Failed to parse output into response format: {str(e)}",
color="yellow",
)
# Calculate token usage metrics
usage_metrics = self._token_process.get_summary()
# Create output
output = LiteAgentOutput(
raw=agent_finish.output,
pydantic=formatted_result,
agent_role=self.role,
usage_metrics=usage_metrics.model_dump() if usage_metrics else None,
)
# Emit completion event
crewai_event_bus.emit(
self,
event=LiteAgentExecutionCompletedEvent(
agent_info=agent_info,
output=agent_finish.output,
),
)
return output
except Exception as e:
self._printer.print(
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
color="red",
)
handle_unknown_error(self._printer, e)
# Emit error event
crewai_event_bus.emit(
self,
event=LiteAgentExecutionErrorEvent(
agent_info=agent_info,
error=str(e),
),
)
raise e
async def kickoff_async(
self, messages: Union[str, List[Dict[str, str]]]
) -> LiteAgentOutput:
"""
Execute the agent asynchronously with the given messages.
Args:
messages: Either a string query or a list of message dictionaries.
If a string is provided, it will be converted to a user message.
If a list is provided, each dict should have 'role' and 'content' keys.
Returns:
LiteAgentOutput: The result of the agent execution.
"""
return await asyncio.to_thread(self.kickoff, messages)
def _get_default_system_prompt(self) -> str:
"""Get the default system prompt for the agent."""
base_prompt = ""
if self._parsed_tools:
# Use the prompt template for agents with tools
base_prompt = self.i18n.slice("lite_agent_system_prompt_with_tools").format(
role=self.role,
backstory=self.backstory,
goal=self.goal,
tools=render_text_description_and_args(self._parsed_tools),
tool_names=get_tool_names(self._parsed_tools),
)
else:
# Use the prompt template for agents without tools
base_prompt = self.i18n.slice(
"lite_agent_system_prompt_without_tools"
).format(
role=self.role,
backstory=self.backstory,
goal=self.goal,
)
# Add response format instructions if specified
if self.response_format:
schema = generate_model_description(self.response_format)
base_prompt += self.i18n.slice("lite_agent_response_format").format(
response_format=schema
)
return base_prompt
def _format_messages(
self, messages: Union[str, List[Dict[str, str]]]
) -> List[Dict[str, str]]:
"""Format messages for the LLM."""
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
system_prompt = self._get_default_system_prompt()
# Add system message at the beginning
formatted_messages = [{"role": "system", "content": system_prompt}]
# Add the rest of the messages
formatted_messages.extend(messages)
return formatted_messages
def _invoke_loop(self) -> AgentFinish:
"""
Run the agent's thought process until it reaches a conclusion or max iterations.
Returns:
AgentFinish: The final result of the agent execution.
"""
# Execute the agent loop
formatted_answer = None
while not isinstance(formatted_answer, AgentFinish):
try:
if has_reached_max_iterations(self._iterations, self.max_iterations):
formatted_answer = handle_max_iterations_exceeded(
formatted_answer,
printer=self._printer,
i18n=self.i18n,
messages=self._messages,
llm=cast(LLM, self.llm),
callbacks=self._callbacks,
)
enforce_rpm_limit(self.request_within_rpm_limit)
# Emit LLM call started event
crewai_event_bus.emit(
self,
event=LLMCallStartedEvent(
messages=self._messages,
tools=None,
callbacks=self._callbacks,
),
)
try:
answer = get_llm_response(
llm=cast(LLM, self.llm),
messages=self._messages,
callbacks=self._callbacks,
printer=self._printer,
)
# Emit LLM call completed event
crewai_event_bus.emit(
self,
event=LLMCallCompletedEvent(
response=answer,
call_type=LLMCallType.LLM_CALL,
),
)
except Exception as e:
# Emit LLM call failed event
crewai_event_bus.emit(
self,
event=LLMCallFailedEvent(error=str(e)),
)
raise e
formatted_answer = process_llm_response(answer, self.use_stop_words)
if isinstance(formatted_answer, AgentAction):
try:
tool_result = execute_tool_and_check_finality(
agent_action=formatted_answer,
tools=self._parsed_tools,
i18n=self.i18n,
agent_key=self.key,
agent_role=self.role,
agent=self.original_agent,
)
except Exception as e:
raise e
formatted_answer = handle_agent_action_core(
formatted_answer=formatted_answer,
tool_result=tool_result,
show_logs=self._show_logs,
)
self._append_message(formatted_answer.text, role="assistant")
except OutputParserException as e:
formatted_answer = handle_output_parser_exception(
e=e,
messages=self._messages,
iterations=self._iterations,
log_error_after=3,
printer=self._printer,
)
except Exception as e:
if e.__class__.__module__.startswith("litellm"):
# Do not retry on litellm errors
raise e
if is_context_length_exceeded(e):
handle_context_length(
respect_context_window=self.respect_context_window,
printer=self._printer,
messages=self._messages,
llm=cast(LLM, self.llm),
callbacks=self._callbacks,
i18n=self.i18n,
)
continue
else:
handle_unknown_error(self._printer, e)
raise e
finally:
self._iterations += 1
assert isinstance(formatted_answer, AgentFinish)
self._show_logs(formatted_answer)
return formatted_answer
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
"""Show logs for the agent's execution."""
show_agent_logs(
printer=self._printer,
agent_role=self.role,
formatted_answer=formatted_answer,
verbose=self.verbose,
)
def _append_message(self, text: str, role: str = "assistant") -> None:
"""Append a message to the message list with the given role."""
self._messages.append(format_message_for_llm(text, role=role))

View File

@@ -4,12 +4,9 @@ import os
import sys import sys
import threading import threading
import warnings import warnings
from collections import defaultdict
from contextlib import contextmanager from contextlib import contextmanager
from types import SimpleNamespace
from typing import ( from typing import (
Any, Any,
DefaultDict,
Dict, Dict,
List, List,
Literal, Literal,
@@ -21,8 +18,7 @@ from typing import (
) )
from dotenv import load_dotenv from dotenv import load_dotenv
from litellm.types.utils import ChatCompletionDeltaToolCall from pydantic import BaseModel
from pydantic import BaseModel, Field
from crewai.utilities.events.llm_events import ( from crewai.utilities.events.llm_events import (
LLMCallCompletedEvent, LLMCallCompletedEvent,
@@ -44,7 +40,6 @@ with warnings.catch_warnings():
from litellm.utils import supports_response_schema from litellm.utils import supports_response_schema
from crewai.llms.base_llm import BaseLLM
from crewai.utilities.events import crewai_event_bus from crewai.utilities.events import crewai_event_bus
from crewai.utilities.exceptions.context_window_exceeding_exception import ( from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException, LLMContextLengthExceededException,
@@ -223,16 +218,7 @@ class StreamingChoices(TypedDict):
finish_reason: Optional[str] finish_reason: Optional[str]
class FunctionArgs(BaseModel): class LLM:
name: str = ""
arguments: str = ""
class AccumulatedToolArgs(BaseModel):
function: FunctionArgs = Field(default_factory=FunctionArgs)
class LLM(BaseLLM):
def __init__( def __init__(
self, self,
model: str, model: str,
@@ -384,11 +370,6 @@ class LLM(BaseLLM):
last_chunk = None last_chunk = None
chunk_count = 0 chunk_count = 0
usage_info = None usage_info = None
tool_calls = None
accumulated_tool_args: DefaultDict[int, AccumulatedToolArgs] = defaultdict(
AccumulatedToolArgs
)
# --- 2) Make sure stream is set to True and include usage metrics # --- 2) Make sure stream is set to True and include usage metrics
params["stream"] = True params["stream"] = True
@@ -446,20 +427,6 @@ class LLM(BaseLLM):
if chunk_content is None and isinstance(delta, dict): if chunk_content is None and isinstance(delta, dict):
# Some models might send empty content chunks # Some models might send empty content chunks
chunk_content = "" chunk_content = ""
# Enable tool calls using streaming
if "tool_calls" in delta:
tool_calls = delta["tool_calls"]
if tool_calls:
result = self._handle_streaming_tool_calls(
tool_calls=tool_calls,
accumulated_tool_args=accumulated_tool_args,
available_functions=available_functions,
)
if result is not None:
chunk_content = result
except Exception as e: except Exception as e:
logging.debug(f"Error extracting content from chunk: {e}") logging.debug(f"Error extracting content from chunk: {e}")
logging.debug(f"Chunk format: {type(chunk)}, content: {chunk}") logging.debug(f"Chunk format: {type(chunk)}, content: {chunk}")
@@ -474,6 +441,7 @@ class LLM(BaseLLM):
self, self,
event=LLMStreamChunkEvent(chunk=chunk_content), event=LLMStreamChunkEvent(chunk=chunk_content),
) )
# --- 4) Fallback to non-streaming if no content received # --- 4) Fallback to non-streaming if no content received
if not full_response.strip() and chunk_count == 0: if not full_response.strip() and chunk_count == 0:
logging.warning( logging.warning(
@@ -532,7 +500,7 @@ class LLM(BaseLLM):
) )
# --- 6) If still empty, raise an error instead of using a default response # --- 6) If still empty, raise an error instead of using a default response
if not full_response.strip() and len(accumulated_tool_args) == 0: if not full_response.strip():
raise Exception( raise Exception(
"No content received from streaming response. Received empty chunks or failed to extract content." "No content received from streaming response. Received empty chunks or failed to extract content."
) )
@@ -564,8 +532,8 @@ class LLM(BaseLLM):
tool_calls = getattr(message, "tool_calls") tool_calls = getattr(message, "tool_calls")
except Exception as e: except Exception as e:
logging.debug(f"Error checking for tool calls: {e}") logging.debug(f"Error checking for tool calls: {e}")
# --- 8) If no tool calls or no available functions, return the text response directly
# --- 8) If no tool calls or no available functions, return the text response directly
if not tool_calls or not available_functions: if not tool_calls or not available_functions:
# Log token usage if available in streaming mode # Log token usage if available in streaming mode
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk) self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
@@ -599,47 +567,6 @@ class LLM(BaseLLM):
) )
raise Exception(f"Failed to get streaming response: {str(e)}") raise Exception(f"Failed to get streaming response: {str(e)}")
def _handle_streaming_tool_calls(
self,
tool_calls: List[ChatCompletionDeltaToolCall],
accumulated_tool_args: DefaultDict[int, AccumulatedToolArgs],
available_functions: Optional[Dict[str, Any]] = None,
) -> None | str:
for tool_call in tool_calls:
current_tool_accumulator = accumulated_tool_args[tool_call.index]
if tool_call.function.name:
current_tool_accumulator.function.name = tool_call.function.name
if tool_call.function.arguments:
current_tool_accumulator.function.arguments += (
tool_call.function.arguments
)
crewai_event_bus.emit(
self,
event=LLMStreamChunkEvent(
tool_call=tool_call.to_dict(),
chunk=tool_call.function.arguments,
),
)
if (
current_tool_accumulator.function.name
and current_tool_accumulator.function.arguments
and available_functions
):
try:
json.loads(current_tool_accumulator.function.arguments)
return self._handle_tool_call(
[current_tool_accumulator],
available_functions,
)
except json.JSONDecodeError:
continue
return None
def _handle_streaming_callbacks( def _handle_streaming_callbacks(
self, self,
callbacks: Optional[List[Any]], callbacks: Optional[List[Any]],
@@ -779,6 +706,15 @@ class LLM(BaseLLM):
function_name, lambda: None function_name, lambda: None
) # Ensure fn is always a callable ) # Ensure fn is always a callable
logging.error(f"Error executing function '{function_name}': {e}") logging.error(f"Error executing function '{function_name}': {e}")
crewai_event_bus.emit(
self,
event=ToolExecutionErrorEvent(
tool_name=function_name,
tool_args=function_args,
tool_class=fn,
error=str(e),
),
)
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
event=LLMCallFailedEvent(error=f"Tool execution error: {str(e)}"), event=LLMCallFailedEvent(error=f"Tool execution error: {str(e)}"),

View File

@@ -1,91 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Union
class BaseLLM(ABC):
"""Abstract base class for LLM implementations.
This class defines the interface that all LLM implementations must follow.
Users can extend this class to create custom LLM implementations that don't
rely on litellm's authentication mechanism.
Custom LLM implementations should handle error cases gracefully, including
timeouts, authentication failures, and malformed responses. They should also
implement proper validation for input parameters and provide clear error
messages when things go wrong.
Attributes:
stop (list): A list of stop sequences that the LLM should use to stop generation.
This is used by the CrewAgentExecutor and other components.
"""
model: str
temperature: Optional[float] = None
stop: Optional[List[str]] = None
def __init__(
self,
model: str,
temperature: Optional[float] = None,
):
"""Initialize the BaseLLM with default attributes.
This constructor sets default values for attributes that are expected
by the CrewAgentExecutor and other components.
All custom LLM implementations should call super().__init__() to ensure
that these default attributes are properly initialized.
"""
self.model = model
self.temperature = temperature
self.stop = []
@abstractmethod
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
"""Call the LLM with the given messages.
Args:
messages: Input messages for the LLM.
Can be a string or list of message dictionaries.
If string, it will be converted to a single user message.
If list, each dict must have 'role' and 'content' keys.
tools: Optional list of tool schemas for function calling.
Each tool should define its name, description, and parameters.
callbacks: Optional list of callback functions to be executed
during and after the LLM call.
available_functions: Optional dict mapping function names to callables
that can be invoked by the LLM.
Returns:
Either a text response from the LLM (str) or
the result of a tool function call (Any).
Raises:
ValueError: If the messages format is invalid.
TimeoutError: If the LLM request times out.
RuntimeError: If the LLM request fails for other reasons.
"""
pass
def supports_stop_words(self) -> bool:
"""Check if the LLM supports stop words.
Returns:
bool: True if the LLM supports stop words, False otherwise.
"""
return True # Default implementation assumes support for stop words
def get_context_window_size(self) -> int:
"""Get the context window size for the LLM.
Returns:
int: The number of tokens/characters the model can handle.
"""
# Default implementation - subclasses should override with model-specific values
return 4096

View File

@@ -1,38 +0,0 @@
from typing import Any, Dict, List, Optional, Union
import aisuite as ai
from crewai.llms.base_llm import BaseLLM
class AISuiteLLM(BaseLLM):
def __init__(self, model: str, temperature: Optional[float] = None, **kwargs):
super().__init__(model, temperature, **kwargs)
self.client = ai.Client()
def call(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
callbacks: Optional[List[Any]] = None,
available_functions: Optional[Dict[str, Any]] = None,
) -> Union[str, Any]:
completion_params = self._prepare_completion_params(messages, tools)
response = self.client.chat.completions.create(**completion_params)
return response.choices[0].message.content
def _prepare_completion_params(
self,
messages: Union[str, List[Dict[str, str]]],
tools: Optional[List[dict]] = None,
) -> Dict[str, Any]:
return {
"model": self.model,
"messages": messages,
"temperature": self.temperature,
"tools": tools,
}
def supports_function_calling(self) -> bool:
return False

View File

@@ -2,12 +2,5 @@ from .entity.entity_memory import EntityMemory
from .long_term.long_term_memory import LongTermMemory from .long_term.long_term_memory import LongTermMemory
from .short_term.short_term_memory import ShortTermMemory from .short_term.short_term_memory import ShortTermMemory
from .user.user_memory import UserMemory from .user.user_memory import UserMemory
from .external.external_memory import ExternalMemory
__all__ = [ __all__ = ["UserMemory", "EntityMemory", "LongTermMemory", "ShortTermMemory"]
"UserMemory",
"EntityMemory",
"LongTermMemory",
"ShortTermMemory",
"ExternalMemory",
]

View File

@@ -1,12 +1,6 @@
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
from crewai.memory import ( from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory, UserMemory
EntityMemory,
ExternalMemory,
LongTermMemory,
ShortTermMemory,
UserMemory,
)
class ContextualMemory: class ContextualMemory:
@@ -17,7 +11,6 @@ class ContextualMemory:
ltm: LongTermMemory, ltm: LongTermMemory,
em: EntityMemory, em: EntityMemory,
um: UserMemory, um: UserMemory,
exm: ExternalMemory,
): ):
if memory_config is not None: if memory_config is not None:
self.memory_provider = memory_config.get("provider") self.memory_provider = memory_config.get("provider")
@@ -27,7 +20,6 @@ class ContextualMemory:
self.ltm = ltm self.ltm = ltm
self.em = em self.em = em
self.um = um self.um = um
self.exm = exm
def build_context_for_task(self, task, context) -> str: def build_context_for_task(self, task, context) -> str:
""" """
@@ -43,7 +35,6 @@ class ContextualMemory:
context.append(self._fetch_ltm_context(task.description)) context.append(self._fetch_ltm_context(task.description))
context.append(self._fetch_stm_context(query)) context.append(self._fetch_stm_context(query))
context.append(self._fetch_entity_context(query)) context.append(self._fetch_entity_context(query))
context.append(self._fetch_external_context(query))
if self.memory_provider == "mem0": if self.memory_provider == "mem0":
context.append(self._fetch_user_context(query)) context.append(self._fetch_user_context(query))
return "\n".join(filter(None, context)) return "\n".join(filter(None, context))
@@ -53,10 +44,6 @@ class ContextualMemory:
Fetches recent relevant insights from STM related to the task's description and expected_output, Fetches recent relevant insights from STM related to the task's description and expected_output,
formatted as bullet points. formatted as bullet points.
""" """
if self.stm is None:
return ""
stm_results = self.stm.search(query) stm_results = self.stm.search(query)
formatted_results = "\n".join( formatted_results = "\n".join(
[ [
@@ -71,10 +58,6 @@ class ContextualMemory:
Fetches historical data or insights from LTM that are relevant to the task's description and expected_output, Fetches historical data or insights from LTM that are relevant to the task's description and expected_output,
formatted as bullet points. formatted as bullet points.
""" """
if self.ltm is None:
return ""
ltm_results = self.ltm.search(task, latest_n=2) ltm_results = self.ltm.search(task, latest_n=2)
if not ltm_results: if not ltm_results:
return None return None
@@ -94,9 +77,6 @@ class ContextualMemory:
Fetches relevant entity information from Entity Memory related to the task's description and expected_output, Fetches relevant entity information from Entity Memory related to the task's description and expected_output,
formatted as bullet points. formatted as bullet points.
""" """
if self.em is None:
return ""
em_results = self.em.search(query) em_results = self.em.search(query)
formatted_results = "\n".join( formatted_results = "\n".join(
[ [
@@ -114,10 +94,6 @@ class ContextualMemory:
Returns: Returns:
str: Formatted user memories as bullet points, or an empty string if none found. str: Formatted user memories as bullet points, or an empty string if none found.
""" """
if self.um is None:
return ""
user_memories = self.um.search(query) user_memories = self.um.search(query)
if not user_memories: if not user_memories:
return "" return ""
@@ -126,24 +102,3 @@ class ContextualMemory:
f"- {result['memory']}" for result in user_memories f"- {result['memory']}" for result in user_memories
) )
return f"User memories/preferences:\n{formatted_memories}" return f"User memories/preferences:\n{formatted_memories}"
def _fetch_external_context(self, query: str) -> str:
"""
Fetches and formats relevant information from External Memory.
Args:
query (str): The search query to find relevant information.
Returns:
str: Formatted information as bullet points, or an empty string if none found.
"""
if self.exm is None:
return ""
external_memories = self.exm.search(query)
if not external_memories:
return ""
formatted_memories = "\n".join(
f"- {result['memory']}" for result in external_memories
)
return f"External memories:\n{formatted_memories}"

View File

@@ -1,61 +0,0 @@
from typing import TYPE_CHECKING, Any, Dict, Optional
from crewai.memory.external.external_memory_item import ExternalMemoryItem
from crewai.memory.memory import Memory
from crewai.memory.storage.interface import Storage
if TYPE_CHECKING:
from crewai.memory.storage.mem0_storage import Mem0Storage
class ExternalMemory(Memory):
def __init__(self, storage: Optional[Storage] = None, **data: Any):
super().__init__(storage=storage, **data)
@staticmethod
def _configure_mem0(crew: Any, config: Dict[str, Any]) -> "Mem0Storage":
from crewai.memory.storage.mem0_storage import Mem0Storage
return Mem0Storage(type="external", crew=crew, config=config)
@staticmethod
def external_supported_storages() -> Dict[str, Any]:
return {
"mem0": ExternalMemory._configure_mem0,
}
@staticmethod
def create_storage(crew: Any, embedder_config: Optional[Dict[str, Any]]) -> Storage:
if not embedder_config:
raise ValueError("embedder_config is required")
if "provider" not in embedder_config:
raise ValueError("embedder_config must include a 'provider' key")
provider = embedder_config["provider"]
supported_storages = ExternalMemory.external_supported_storages()
if provider not in supported_storages:
raise ValueError(f"Provider {provider} not supported")
return supported_storages[provider](crew, embedder_config.get("config", {}))
def save(
self,
value: Any,
metadata: Optional[Dict[str, Any]] = None,
agent: Optional[str] = None,
) -> None:
"""Saves a value into the external storage."""
item = ExternalMemoryItem(value=value, metadata=metadata, agent=agent)
super().save(value=item.value, metadata=item.metadata, agent=item.agent)
def reset(self) -> None:
self.storage.reset()
def set_crew(self, crew: Any) -> "ExternalMemory":
super().set_crew(crew)
if not self.storage:
self.storage = self.create_storage(crew, self.embedder_config)
return self

View File

@@ -1,13 +0,0 @@
from typing import Any, Dict, Optional
class ExternalMemoryItem:
def __init__(
self,
value: Any,
metadata: Optional[Dict[str, Any]] = None,
agent: Optional[str] = None,
):
self.value = value
self.metadata = metadata
self.agent = agent

View File

@@ -9,7 +9,6 @@ class Memory(BaseModel):
""" """
embedder_config: Optional[Dict[str, Any]] = None embedder_config: Optional[Dict[str, Any]] = None
crew: Optional[Any] = None
storage: Any storage: Any
@@ -37,7 +36,3 @@ class Memory(BaseModel):
return self.storage.search( return self.storage.search(
query=query, limit=limit, score_threshold=score_threshold query=query, limit=limit, score_threshold=score_threshold
) )
def set_crew(self, crew: Any) -> "Memory":
self.crew = crew
return self

View File

@@ -11,20 +11,15 @@ class Mem0Storage(Storage):
Extends Storage to handle embedding and searching across entities using Mem0. Extends Storage to handle embedding and searching across entities using Mem0.
""" """
def __init__(self, type, crew=None, config=None): def __init__(self, type, crew=None):
super().__init__() super().__init__()
supported_types = ["user", "short_term", "long_term", "entities", "external"]
if type not in supported_types: if type not in ["user", "short_term", "long_term", "entities"]:
raise ValueError( raise ValueError("Invalid type for Mem0Storage. Must be 'user' or 'agent'.")
f"Invalid type '{type}' for Mem0Storage. Must be one of: "
+ ", ".join(supported_types)
)
self.memory_type = type self.memory_type = type
self.crew = crew self.crew = crew
self.config = config or {} self.memory_config = crew.memory_config
# TODO: Memory config will be removed in the future the config will be passed as a parameter
self.memory_config = self.config or getattr(crew, "memory_config", {}) or {}
# User ID is required for user memory type "user" since it's used as a unique identifier for the user. # User ID is required for user memory type "user" since it's used as a unique identifier for the user.
user_id = self._get_user_id() user_id = self._get_user_id()
@@ -32,11 +27,10 @@ class Mem0Storage(Storage):
raise ValueError("User ID is required for user memory type") raise ValueError("User ID is required for user memory type")
# API key in memory config overrides the environment variable # API key in memory config overrides the environment variable
config = self._get_config() config = self.memory_config.get("config", {})
mem0_api_key = config.get("api_key") or os.getenv("MEM0_API_KEY") mem0_api_key = config.get("api_key") or os.getenv("MEM0_API_KEY")
mem0_org_id = config.get("org_id") mem0_org_id = config.get("org_id")
mem0_project_id = config.get("project_id") mem0_project_id = config.get("project_id")
mem0_local_config = config.get("local_mem0_config")
# Initialize MemoryClient or Memory based on the presence of the mem0_api_key # Initialize MemoryClient or Memory based on the presence of the mem0_api_key
if mem0_api_key: if mem0_api_key:
@@ -47,10 +41,7 @@ class Mem0Storage(Storage):
else: else:
self.memory = MemoryClient(api_key=mem0_api_key) self.memory = MemoryClient(api_key=mem0_api_key)
else: else:
if mem0_local_config and len(mem0_local_config): self.memory = Memory() # Fallback to Memory if no Mem0 API key is provided
self.memory = Memory.from_config(mem0_local_config)
else:
self.memory = Memory()
def _sanitize_role(self, role: str) -> str: def _sanitize_role(self, role: str) -> str:
""" """
@@ -61,34 +52,26 @@ class Mem0Storage(Storage):
def save(self, value: Any, metadata: Dict[str, Any]) -> None: def save(self, value: Any, metadata: Dict[str, Any]) -> None:
user_id = self._get_user_id() user_id = self._get_user_id()
agent_name = self._get_agent_name() agent_name = self._get_agent_name()
params = None if self.memory_type == "user":
if self.memory_type == "short_term": self.memory.add(value, user_id=user_id, metadata={**metadata})
params = { elif self.memory_type == "short_term":
"agent_id": agent_name, agent_name = self._get_agent_name()
"infer": False, self.memory.add(
"metadata": {"type": "short_term", **metadata}, value, agent_id=agent_name, metadata={"type": "short_term", **metadata}
} )
elif self.memory_type == "long_term": elif self.memory_type == "long_term":
params = { agent_name = self._get_agent_name()
"agent_id": agent_name, self.memory.add(
"infer": False, value,
"metadata": {"type": "long_term", **metadata}, agent_id=agent_name,
} infer=False,
metadata={"type": "long_term", **metadata},
)
elif self.memory_type == "entities": elif self.memory_type == "entities":
params = { entity_name = self._get_agent_name()
"agent_id": agent_name, self.memory.add(
"infer": False, value, user_id=entity_name, metadata={"type": "entity", **metadata}
"metadata": {"type": "entity", **metadata}, )
}
elif self.memory_type == "external":
params = {
"user_id": user_id,
"agent_id": agent_name,
"metadata": {"type": "external", **metadata},
}
if params:
self.memory.add(value, **params | {"output_format": "v1.1"})
def search( def search(
self, self,
@@ -97,43 +80,37 @@ class Mem0Storage(Storage):
score_threshold: float = 0.35, score_threshold: float = 0.35,
) -> List[Any]: ) -> List[Any]:
params = {"query": query, "limit": limit} params = {"query": query, "limit": limit}
if user_id := self._get_user_id(): if self.memory_type == "user":
user_id = self._get_user_id()
params["user_id"] = user_id params["user_id"] = user_id
elif self.memory_type == "short_term":
agent_name = self._get_agent_name() agent_name = self._get_agent_name()
if self.memory_type == "short_term":
params["agent_id"] = agent_name params["agent_id"] = agent_name
params["metadata"] = {"type": "short_term"} params["metadata"] = {"type": "short_term"}
elif self.memory_type == "long_term": elif self.memory_type == "long_term":
agent_name = self._get_agent_name()
params["agent_id"] = agent_name params["agent_id"] = agent_name
params["metadata"] = {"type": "long_term"} params["metadata"] = {"type": "long_term"}
elif self.memory_type == "entities": elif self.memory_type == "entities":
agent_name = self._get_agent_name()
params["agent_id"] = agent_name params["agent_id"] = agent_name
params["metadata"] = {"type": "entity"} params["metadata"] = {"type": "entity"}
elif self.memory_type == "external":
params["agent_id"] = agent_name
params["metadata"] = {"type": "external"}
# Discard the filters for now since we create the filters # Discard the filters for now since we create the filters
# automatically when the crew is created. # automatically when the crew is created.
results = self.memory.search(**params) results = self.memory.search(**params)
return [r for r in results if r["score"] >= score_threshold] return [r for r in results if r["score"] >= score_threshold]
def _get_user_id(self) -> str: def _get_user_id(self):
return self._get_config().get("user_id", "") if self.memory_type == "user":
if hasattr(self, "memory_config") and self.memory_config is not None:
return self.memory_config.get("config", {}).get("user_id")
else:
return None
return None
def _get_agent_name(self) -> str: def _get_agent_name(self):
if not self.crew: agents = self.crew.agents if self.crew else []
return ""
agents = self.crew.agents
agents = [self._sanitize_role(agent.role) for agent in agents] agents = [self._sanitize_role(agent.role) for agent in agents]
agents = "_".join(agents) agents = "_".join(agents)
return agents return agents
def _get_config(self) -> Dict[str, Any]:
return self.config or getattr(self, "memory_config", {}).get("config", {}) or {}
def reset(self):
if self.memory:
self.memory.reset()

View File

@@ -1,4 +1,3 @@
import warnings
from typing import Any, Dict, Optional from typing import Any, Dict, Optional
from crewai.memory.memory import Memory from crewai.memory.memory import Memory
@@ -13,12 +12,6 @@ class UserMemory(Memory):
""" """
def __init__(self, crew=None): def __init__(self, crew=None):
warnings.warn(
"UserMemory is deprecated and will be removed in a future version. "
"Please use ExternalMemory instead.",
DeprecationWarning,
stacklevel=2,
)
try: try:
from crewai.memory.storage.mem0_storage import Mem0Storage from crewai.memory.storage.mem0_storage import Mem0Storage
except ImportError: except ImportError:
@@ -50,9 +43,3 @@ class UserMemory(Memory):
score_threshold=score_threshold, score_threshold=score_threshold,
) )
return results return results
def reset(self) -> None:
try:
self.storage.reset()
except Exception as e:
raise Exception(f"An error occurred while resetting the user memory: {e}")

View File

@@ -137,11 +137,13 @@ def CrewBase(cls: T) -> T:
all_functions, "is_cache_handler" all_functions, "is_cache_handler"
) )
callbacks = self._filter_functions(all_functions, "is_callback") callbacks = self._filter_functions(all_functions, "is_callback")
agents = self._filter_functions(all_functions, "is_agent")
for agent_name, agent_info in self.agents_config.items(): for agent_name, agent_info in self.agents_config.items():
self._map_agent_variables( self._map_agent_variables(
agent_name, agent_name,
agent_info, agent_info,
agents,
llms, llms,
tool_functions, tool_functions,
cache_handler_functions, cache_handler_functions,
@@ -152,6 +154,7 @@ def CrewBase(cls: T) -> T:
self, self,
agent_name: str, agent_name: str,
agent_info: Dict[str, Any], agent_info: Dict[str, Any],
agents: Dict[str, Callable],
llms: Dict[str, Callable], llms: Dict[str, Callable],
tool_functions: Dict[str, Callable], tool_functions: Dict[str, Callable],
cache_handler_functions: Dict[str, Callable], cache_handler_functions: Dict[str, Callable],
@@ -169,10 +172,9 @@ def CrewBase(cls: T) -> T:
] ]
if function_calling_llm := agent_info.get("function_calling_llm"): if function_calling_llm := agent_info.get("function_calling_llm"):
try: self.agents_config[agent_name]["function_calling_llm"] = agents[
self.agents_config[agent_name]["function_calling_llm"] = llms[function_calling_llm]() function_calling_llm
except KeyError: ]()
self.agents_config[agent_name]["function_calling_llm"] = function_calling_llm
if step_callback := agent_info.get("step_callback"): if step_callback := agent_info.get("step_callback"):
self.agents_config[agent_name]["step_callback"] = callbacks[ self.agents_config[agent_name]["step_callback"] = callbacks[

View File

@@ -2,7 +2,6 @@ import datetime
import inspect import inspect
import json import json
import logging import logging
import re
import threading import threading
import uuid import uuid
from concurrent.futures import Future from concurrent.futures import Future
@@ -50,7 +49,6 @@ from crewai.utilities.events import (
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.i18n import I18N from crewai.utilities.i18n import I18N
from crewai.utilities.printer import Printer from crewai.utilities.printer import Printer
from crewai.utilities.string_utils import interpolate_only
class Task(BaseModel): class Task(BaseModel):
@@ -388,7 +386,7 @@ class Task(BaseModel):
tools = tools or self.tools or [] tools = tools or self.tools or []
self.processed_by_agents.add(agent.role) self.processed_by_agents.add(agent.role)
crewai_event_bus.emit(self, TaskStartedEvent(context=context, task=self)) crewai_event_bus.emit(self, TaskStartedEvent(context=context))
result = agent.execute_task( result = agent.execute_task(
task=self, task=self,
context=context, context=context,
@@ -464,11 +462,11 @@ class Task(BaseModel):
) )
) )
self._save_file(content) self._save_file(content)
crewai_event_bus.emit(self, TaskCompletedEvent(output=task_output, task=self)) crewai_event_bus.emit(self, TaskCompletedEvent(output=task_output))
return task_output return task_output
except Exception as e: except Exception as e:
self.end_time = datetime.datetime.now() self.end_time = datetime.datetime.now()
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e), task=self)) crewai_event_bus.emit(self, TaskFailedEvent(error=str(e)))
raise e # Re-raise the exception after emitting the event raise e # Re-raise the exception after emitting the event
def prompt(self) -> str: def prompt(self) -> str:
@@ -509,9 +507,7 @@ class Task(BaseModel):
return return
try: try:
self.description = interpolate_only( self.description = self._original_description.format(**inputs)
input_string=self._original_description, inputs=inputs
)
except KeyError as e: except KeyError as e:
raise ValueError( raise ValueError(
f"Missing required template variable '{e.args[0]}' in description" f"Missing required template variable '{e.args[0]}' in description"
@@ -520,7 +516,7 @@ class Task(BaseModel):
raise ValueError(f"Error interpolating description: {str(e)}") from e raise ValueError(f"Error interpolating description: {str(e)}") from e
try: try:
self.expected_output = interpolate_only( self.expected_output = self.interpolate_only(
input_string=self._original_expected_output, inputs=inputs input_string=self._original_expected_output, inputs=inputs
) )
except (KeyError, ValueError) as e: except (KeyError, ValueError) as e:
@@ -528,7 +524,7 @@ class Task(BaseModel):
if self.output_file is not None: if self.output_file is not None:
try: try:
self.output_file = interpolate_only( self.output_file = self.interpolate_only(
input_string=self._original_output_file, inputs=inputs input_string=self._original_output_file, inputs=inputs
) )
except (KeyError, ValueError) as e: except (KeyError, ValueError) as e:
@@ -559,6 +555,72 @@ class Task(BaseModel):
f"\n\n{conversation_instruction}\n\n{conversation_history}" f"\n\n{conversation_instruction}\n\n{conversation_history}"
) )
def interpolate_only(
self,
input_string: Optional[str],
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]],
) -> str:
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
Args:
input_string: The string containing template variables to interpolate.
Can be None or empty, in which case an empty string is returned.
inputs: Dictionary mapping template variables to their values.
Supported value types are strings, integers, floats, and dicts/lists
containing only these types and other nested dicts/lists.
Returns:
The interpolated string with all template variables replaced with their values.
Empty string if input_string is None or empty.
Raises:
ValueError: If a value contains unsupported types
"""
# Validation function for recursive type checking
def validate_type(value: Any) -> None:
if value is None:
return
if isinstance(value, (str, int, float, bool)):
return
if isinstance(value, (dict, list)):
for item in value.values() if isinstance(value, dict) else value:
validate_type(item)
return
raise ValueError(
f"Unsupported type {type(value).__name__} in inputs. "
"Only str, int, float, bool, dict, and list are allowed."
)
# Validate all input values
for key, value in inputs.items():
try:
validate_type(value)
except ValueError as e:
raise ValueError(f"Invalid value for key '{key}': {str(e)}") from e
if input_string is None or not input_string:
return ""
if "{" not in input_string and "}" not in input_string:
return input_string
if not inputs:
raise ValueError(
"Inputs dictionary cannot be empty when interpolating variables"
)
try:
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
for key in inputs.keys():
escaped_string = escaped_string.replace(f"{{{{{key}}}}}", f"{{{key}}}")
return escaped_string.format(**inputs)
except KeyError as e:
raise KeyError(
f"Template variable '{e.args[0]}' not found in inputs dictionary"
) from e
except ValueError as e:
raise ValueError(f"Error during string interpolation: {str(e)}") from e
def increment_tools_errors(self) -> None: def increment_tools_errors(self) -> None:
"""Increment the tools errors counter.""" """Increment the tools errors counter."""
self.tools_errors += 1 self.tools_errors += 1
@@ -572,15 +634,7 @@ class Task(BaseModel):
def copy( def copy(
self, agents: List["BaseAgent"], task_mapping: Dict[str, "Task"] self, agents: List["BaseAgent"], task_mapping: Dict[str, "Task"]
) -> "Task": ) -> "Task":
"""Creates a deep copy of the Task while preserving its original class type. """Create a deep copy of the Task."""
Args:
agents: List of agents available for the task.
task_mapping: Dictionary mapping task IDs to Task instances.
Returns:
A copy of the task with the same class type as the original.
"""
exclude = { exclude = {
"id", "id",
"agent", "agent",
@@ -603,7 +657,7 @@ class Task(BaseModel):
cloned_agent = get_agent_by_role(self.agent.role) if self.agent else None cloned_agent = get_agent_by_role(self.agent.role) if self.agent else None
cloned_tools = copy(self.tools) if self.tools else [] cloned_tools = copy(self.tools) if self.tools else []
copied_task = self.__class__( copied_task = Task(
**copied_data, **copied_data,
context=cloned_context, context=cloned_context,
agent=cloned_agent, agent=cloned_agent,

View File

@@ -1,2 +0,0 @@
CREWAI_TELEMETRY_BASE_URL: str = "https://telemetry.crewai.com:4319"
CREWAI_TELEMETRY_SERVICE_NAME: str = "crewAI-telemetry"

View File

@@ -9,11 +9,6 @@ from contextlib import contextmanager
from importlib.metadata import version from importlib.metadata import version
from typing import TYPE_CHECKING, Any, Optional from typing import TYPE_CHECKING, Any, Optional
from crewai.telemetry.constants import (
CREWAI_TELEMETRY_BASE_URL,
CREWAI_TELEMETRY_SERVICE_NAME,
)
@contextmanager @contextmanager
def suppress_warnings(): def suppress_warnings():
@@ -50,22 +45,23 @@ class Telemetry:
""" """
def __init__(self): def __init__(self):
self.ready: bool = False self.ready = False
self.trace_set: bool = False self.trace_set = False
if self._is_telemetry_disabled(): if os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true":
return return
try: try:
telemetry_endpoint = "https://telemetry.crewai.com:4319"
self.resource = Resource( self.resource = Resource(
attributes={SERVICE_NAME: CREWAI_TELEMETRY_SERVICE_NAME}, attributes={SERVICE_NAME: "crewAI-telemetry"},
) )
with suppress_warnings(): with suppress_warnings():
self.provider = TracerProvider(resource=self.resource) self.provider = TracerProvider(resource=self.resource)
processor = BatchSpanProcessor( processor = BatchSpanProcessor(
OTLPSpanExporter( OTLPSpanExporter(
endpoint=f"{CREWAI_TELEMETRY_BASE_URL}/v1/traces", endpoint=f"{telemetry_endpoint}/v1/traces",
timeout=30, timeout=30,
) )
) )
@@ -80,13 +76,6 @@ class Telemetry:
raise # Re-raise the exception to not interfere with system signals raise # Re-raise the exception to not interfere with system signals
self.ready = False self.ready = False
def _is_telemetry_disabled(self) -> bool:
"""Check if telemetry should be disabled based on environment variables."""
return (
os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true"
or os.getenv("CREWAI_DISABLE_TELEMETRY", "false").lower() == "true"
)
def set_tracer(self): def set_tracer(self):
if self.ready and not self.trace_set: if self.ready and not self.trace_set:
try: try:
@@ -123,23 +112,6 @@ class Telemetry:
self._add_attribute(span, "crew_memory", crew.memory) self._add_attribute(span, "crew_memory", crew.memory)
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks)) self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
self._add_attribute(span, "crew_number_of_agents", len(crew.agents)) self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
# Add fingerprint data
if hasattr(crew, "fingerprint") and crew.fingerprint:
self._add_attribute(span, "crew_fingerprint", crew.fingerprint.uuid_str)
self._add_attribute(
span,
"crew_fingerprint_created_at",
crew.fingerprint.created_at.isoformat(),
)
# Add fingerprint metadata if it exists
if hasattr(crew.fingerprint, "metadata") and crew.fingerprint.metadata:
self._add_attribute(
span,
"crew_fingerprint_metadata",
json.dumps(crew.fingerprint.metadata),
)
if crew.share_crew: if crew.share_crew:
self._add_attribute( self._add_attribute(
span, span,
@@ -157,43 +129,17 @@ class Telemetry:
"max_rpm": agent.max_rpm, "max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file, "i18n": agent.i18n.prompt_file,
"function_calling_llm": ( "function_calling_llm": (
getattr( agent.function_calling_llm.model
getattr(agent, "function_calling_llm", None), if agent.function_calling_llm
"model",
"",
)
if getattr(agent, "function_calling_llm", None)
else "" else ""
), ),
"llm": agent.llm.model, "llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation, "delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": getattr( "allow_code_execution?": agent.allow_code_execution,
agent, "allow_code_execution", False "max_retry_limit": agent.max_retry_limit,
),
"max_retry_limit": getattr(agent, "max_retry_limit", 3),
"tools_names": [ "tools_names": [
tool.name.casefold() for tool in agent.tools or [] tool.name.casefold() for tool in agent.tools or []
], ],
# Add agent fingerprint data if sharing crew details
"fingerprint": (
getattr(
getattr(agent, "fingerprint", None),
"uuid_str",
None,
)
),
"fingerprint_created_at": (
created_at.isoformat()
if (
created_at := getattr(
getattr(agent, "fingerprint", None),
"created_at",
None,
)
)
is not None
else None
),
} }
for agent in crew.agents for agent in crew.agents
] ]
@@ -223,17 +169,6 @@ class Telemetry:
"tools_names": [ "tools_names": [
tool.name.casefold() for tool in task.tools or [] tool.name.casefold() for tool in task.tools or []
], ],
# Add task fingerprint data if sharing crew details
"fingerprint": (
task.fingerprint.uuid_str
if hasattr(task, "fingerprint") and task.fingerprint
else None
),
"fingerprint_created_at": (
task.fingerprint.created_at.isoformat()
if hasattr(task, "fingerprint") and task.fingerprint
else None
),
} }
for task in crew.tasks for task in crew.tasks
] ]
@@ -261,20 +196,14 @@ class Telemetry:
"max_iter": agent.max_iter, "max_iter": agent.max_iter,
"max_rpm": agent.max_rpm, "max_rpm": agent.max_rpm,
"function_calling_llm": ( "function_calling_llm": (
getattr( agent.function_calling_llm.model
getattr(agent, "function_calling_llm", None), if agent.function_calling_llm
"model",
"",
)
if getattr(agent, "function_calling_llm", None)
else "" else ""
), ),
"llm": agent.llm.model, "llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation, "delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": getattr( "allow_code_execution?": agent.allow_code_execution,
agent, "allow_code_execution", False "max_retry_limit": agent.max_retry_limit,
),
"max_retry_limit": getattr(agent, "max_retry_limit", 3),
"tools_names": [ "tools_names": [
tool.name.casefold() for tool in agent.tools or [] tool.name.casefold() for tool in agent.tools or []
], ],
@@ -323,39 +252,6 @@ class Telemetry:
self._add_attribute(created_span, "task_key", task.key) self._add_attribute(created_span, "task_key", task.key)
self._add_attribute(created_span, "task_id", str(task.id)) self._add_attribute(created_span, "task_id", str(task.id))
# Add fingerprint data
if hasattr(crew, "fingerprint") and crew.fingerprint:
self._add_attribute(
created_span, "crew_fingerprint", crew.fingerprint.uuid_str
)
if hasattr(task, "fingerprint") and task.fingerprint:
self._add_attribute(
created_span, "task_fingerprint", task.fingerprint.uuid_str
)
self._add_attribute(
created_span,
"task_fingerprint_created_at",
task.fingerprint.created_at.isoformat(),
)
# Add fingerprint metadata if it exists
if hasattr(task.fingerprint, "metadata") and task.fingerprint.metadata:
self._add_attribute(
created_span,
"task_fingerprint_metadata",
json.dumps(task.fingerprint.metadata),
)
# Add agent fingerprint if task has an assigned agent
if hasattr(task, "agent") and task.agent:
agent_fingerprint = getattr(
getattr(task.agent, "fingerprint", None), "uuid_str", None
)
if agent_fingerprint:
self._add_attribute(
created_span, "agent_fingerprint", agent_fingerprint
)
if crew.share_crew: if crew.share_crew:
self._add_attribute( self._add_attribute(
created_span, "formatted_description", task.description created_span, "formatted_description", task.description
@@ -374,21 +270,6 @@ class Telemetry:
self._add_attribute(span, "task_key", task.key) self._add_attribute(span, "task_key", task.key)
self._add_attribute(span, "task_id", str(task.id)) self._add_attribute(span, "task_id", str(task.id))
# Add fingerprint data to execution span
if hasattr(crew, "fingerprint") and crew.fingerprint:
self._add_attribute(span, "crew_fingerprint", crew.fingerprint.uuid_str)
if hasattr(task, "fingerprint") and task.fingerprint:
self._add_attribute(span, "task_fingerprint", task.fingerprint.uuid_str)
# Add agent fingerprint if task has an assigned agent
if hasattr(task, "agent") and task.agent:
agent_fingerprint = getattr(
getattr(task.agent, "fingerprint", None), "uuid_str", None
)
if agent_fingerprint:
self._add_attribute(span, "agent_fingerprint", agent_fingerprint)
if crew.share_crew: if crew.share_crew:
self._add_attribute(span, "formatted_description", task.description) self._add_attribute(span, "formatted_description", task.description)
self._add_attribute( self._add_attribute(
@@ -410,12 +291,7 @@ class Telemetry:
Note: Note:
If share_crew is enabled, this will also record the task output If share_crew is enabled, this will also record the task output
""" """
def operation(): def operation():
# Ensure fingerprint data is present on completion span
if hasattr(task, "fingerprint") and task.fingerprint:
self._add_attribute(span, "task_fingerprint", task.fingerprint.uuid_str)
if crew.share_crew: if crew.share_crew:
self._add_attribute( self._add_attribute(
span, span,
@@ -436,7 +312,6 @@ class Telemetry:
tool_name (str): Name of the tool being repeatedly used tool_name (str): Name of the tool being repeatedly used
attempts (int): Number of attempts made with this tool attempts (int): Number of attempts made with this tool
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Repeated Usage") span = tracer.start_span("Tool Repeated Usage")
@@ -454,16 +329,14 @@ class Telemetry:
self._safe_telemetry_operation(operation) self._safe_telemetry_operation(operation)
def tool_usage(self, llm: Any, tool_name: str, attempts: int, agent: Any = None): def tool_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the usage of a tool by an agent. """Records the usage of a tool by an agent.
Args: Args:
llm (Any): The language model being used llm (Any): The language model being used
tool_name (str): Name of the tool being used tool_name (str): Name of the tool being used
attempts (int): Number of attempts made with this tool attempts (int): Number of attempts made with this tool
agent (Any, optional): The agent using the tool
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage") span = tracer.start_span("Tool Usage")
@@ -476,31 +349,17 @@ class Telemetry:
self._add_attribute(span, "attempts", attempts) self._add_attribute(span, "attempts", attempts)
if llm: if llm:
self._add_attribute(span, "llm", llm.model) self._add_attribute(span, "llm", llm.model)
# Add agent fingerprint data if available
if agent and hasattr(agent, "fingerprint") and agent.fingerprint:
self._add_attribute(
span, "agent_fingerprint", agent.fingerprint.uuid_str
)
if hasattr(agent, "role"):
self._add_attribute(span, "agent_role", agent.role)
span.set_status(Status(StatusCode.OK)) span.set_status(Status(StatusCode.OK))
span.end() span.end()
self._safe_telemetry_operation(operation) self._safe_telemetry_operation(operation)
def tool_usage_error( def tool_usage_error(self, llm: Any):
self, llm: Any, agent: Any = None, tool_name: Optional[str] = None
):
"""Records when a tool usage results in an error. """Records when a tool usage results in an error.
Args: Args:
llm (Any): The language model being used when the error occurred llm (Any): The language model being used when the error occurred
agent (Any, optional): The agent using the tool
tool_name (str, optional): Name of the tool that caused the error
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage Error") span = tracer.start_span("Tool Usage Error")
@@ -511,18 +370,6 @@ class Telemetry:
) )
if llm: if llm:
self._add_attribute(span, "llm", llm.model) self._add_attribute(span, "llm", llm.model)
if tool_name:
self._add_attribute(span, "tool_name", tool_name)
# Add agent fingerprint data if available
if agent and hasattr(agent, "fingerprint") and agent.fingerprint:
self._add_attribute(
span, "agent_fingerprint", agent.fingerprint.uuid_str
)
if hasattr(agent, "role"):
self._add_attribute(span, "agent_role", agent.role)
span.set_status(Status(StatusCode.OK)) span.set_status(Status(StatusCode.OK))
span.end() span.end()
@@ -539,7 +386,6 @@ class Telemetry:
exec_time (int): Execution time in seconds exec_time (int): Execution time in seconds
model_name (str): Name of the model used model_name (str): Name of the model used
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Individual Test Result") span = tracer.start_span("Crew Individual Test Result")
@@ -574,7 +420,6 @@ class Telemetry:
inputs (dict[str, Any] | None): Input parameters for the test inputs (dict[str, Any] | None): Input parameters for the test
model_name (str): Name of the model used in testing model_name (str): Name of the model used in testing
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Test Execution") span = tracer.start_span("Crew Test Execution")
@@ -601,7 +446,6 @@ class Telemetry:
def deploy_signup_error_span(self): def deploy_signup_error_span(self):
"""Records when an error occurs during the deployment signup process.""" """Records when an error occurs during the deployment signup process."""
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Deploy Signup Error") span = tracer.start_span("Deploy Signup Error")
@@ -616,7 +460,6 @@ class Telemetry:
Args: Args:
uuid (Optional[str]): Unique identifier for the deployment uuid (Optional[str]): Unique identifier for the deployment
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Start Deployment") span = tracer.start_span("Start Deployment")
@@ -629,7 +472,6 @@ class Telemetry:
def create_crew_deployment_span(self): def create_crew_deployment_span(self):
"""Records the creation of a new crew deployment.""" """Records the creation of a new crew deployment."""
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Create Crew Deployment") span = tracer.start_span("Create Crew Deployment")
@@ -645,7 +487,6 @@ class Telemetry:
uuid (Optional[str]): Unique identifier for the crew uuid (Optional[str]): Unique identifier for the crew
log_type (str, optional): Type of logs being retrieved. Defaults to "deployment". log_type (str, optional): Type of logs being retrieved. Defaults to "deployment".
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Get Crew Logs") span = tracer.start_span("Get Crew Logs")
@@ -663,7 +504,6 @@ class Telemetry:
Args: Args:
uuid (Optional[str]): Unique identifier for the crew being removed uuid (Optional[str]): Unique identifier for the crew being removed
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Remove Crew") span = tracer.start_span("Remove Crew")
@@ -794,7 +634,6 @@ class Telemetry:
Args: Args:
flow_name (str): Name of the flow being created flow_name (str): Name of the flow being created
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Flow Creation") span = tracer.start_span("Flow Creation")
@@ -811,7 +650,6 @@ class Telemetry:
flow_name (str): Name of the flow being plotted flow_name (str): Name of the flow being plotted
node_names (list[str]): List of node names in the flow node_names (list[str]): List of node names in the flow
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Flow Plotting") span = tracer.start_span("Flow Plotting")
@@ -829,7 +667,6 @@ class Telemetry:
flow_name (str): Name of the flow being executed flow_name (str): Name of the flow being executed
node_names (list[str]): List of nodes being executed in the flow node_names (list[str]): List of nodes being executed in the flow
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Flow Execution") span = tracer.start_span("Flow Execution")

View File

@@ -1,4 +1,3 @@
import asyncio
import warnings import warnings
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from inspect import signature from inspect import signature
@@ -8,27 +7,29 @@ from pydantic import (
BaseModel, BaseModel,
ConfigDict, ConfigDict,
Field, Field,
PydanticDeprecatedSince20,
create_model, create_model,
field_validator, validator,
) )
from pydantic import BaseModel as PydanticBaseModel from pydantic import BaseModel as PydanticBaseModel
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.structured_tool import CrewStructuredTool
# Ignore all "PydanticDeprecatedSince20" warnings globally
warnings.filterwarnings("ignore", category=PydanticDeprecatedSince20)
class BaseTool(BaseModel, ABC): class BaseTool(BaseModel, ABC):
class _ArgsSchemaPlaceholder(PydanticBaseModel): class _ArgsSchemaPlaceholder(PydanticBaseModel):
pass pass
model_config = ConfigDict(arbitrary_types_allowed=True) model_config = ConfigDict()
name: str name: str
"""The unique name of the tool that clearly communicates its purpose.""" """The unique name of the tool that clearly communicates its purpose."""
description: str description: str
"""Used to tell the model how/when/why to use the tool.""" """Used to tell the model how/when/why to use the tool."""
args_schema: Type[PydanticBaseModel] = Field( args_schema: Type[PydanticBaseModel] = Field(default_factory=_ArgsSchemaPlaceholder)
default_factory=_ArgsSchemaPlaceholder, validate_default=True
)
"""The schema for the arguments that the tool accepts.""" """The schema for the arguments that the tool accepts."""
description_updated: bool = False description_updated: bool = False
"""Flag to check if the description has been updated.""" """Flag to check if the description has been updated."""
@@ -37,8 +38,7 @@ class BaseTool(BaseModel, ABC):
result_as_answer: bool = False result_as_answer: bool = False
"""Flag to check if the tool should be the final agent answer.""" """Flag to check if the tool should be the final agent answer."""
@field_validator("args_schema", mode="before") @validator("args_schema", always=True, pre=True)
@classmethod
def _default_args_schema( def _default_args_schema(
cls, v: Type[PydanticBaseModel] cls, v: Type[PydanticBaseModel]
) -> Type[PydanticBaseModel]: ) -> Type[PydanticBaseModel]:
@@ -66,13 +66,7 @@ class BaseTool(BaseModel, ABC):
**kwargs: Any, **kwargs: Any,
) -> Any: ) -> Any:
print(f"Using Tool: {self.name}") print(f"Using Tool: {self.name}")
result = self._run(*args, **kwargs) return self._run(*args, **kwargs)
# If _run is async, we safely run it
if asyncio.iscoroutine(result):
return asyncio.run(result)
return result
@abstractmethod @abstractmethod
def _run( def _run(
@@ -251,13 +245,9 @@ def to_langchain(
return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools] return [t.to_structured_tool() if isinstance(t, BaseTool) else t for t in tools]
def tool(*args, result_as_answer=False): def tool(*args):
""" """
Decorator to create a tool from a function. Decorator to create a tool from a function.
Args:
*args: Positional arguments, either the function to decorate or the tool name.
result_as_answer: Flag to indicate if the tool result should be used as the final agent answer.
""" """
def _make_with_name(tool_name: str) -> Callable: def _make_with_name(tool_name: str) -> Callable:
@@ -283,7 +273,6 @@ def tool(*args, result_as_answer=False):
description=f.__doc__, description=f.__doc__,
func=f, func=f,
args_schema=args_schema, args_schema=args_schema,
result_as_answer=result_as_answer,
) )
return _make_tool return _make_tool

View File

@@ -1,9 +0,0 @@
from dataclasses import dataclass
@dataclass
class ToolResult:
"""Result of tool execution."""
result: str
result_as_answer: bool = False

View File

@@ -5,7 +5,7 @@ import time
from difflib import SequenceMatcher from difflib import SequenceMatcher
from json import JSONDecodeError from json import JSONDecodeError
from textwrap import dedent from textwrap import dedent
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from typing import Any, Dict, List, Optional, Union
import json5 import json5
from json_repair import repair_json from json_repair import repair_json
@@ -13,26 +13,18 @@ from json_repair import repair_json
from crewai.agents.tools_handler import ToolsHandler from crewai.agents.tools_handler import ToolsHandler
from crewai.task import Task from crewai.task import Task
from crewai.telemetry import Telemetry from crewai.telemetry import Telemetry
from crewai.tools import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
from crewai.utilities import I18N, Converter, Printer from crewai.utilities import I18N, Converter, ConverterError, Printer
from crewai.utilities.agent_utils import (
get_tool_names,
render_text_description_and_args,
)
from crewai.utilities.events.crewai_event_bus import crewai_event_bus from crewai.utilities.events.crewai_event_bus import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ( from crewai.utilities.events.tool_usage_events import (
ToolSelectionErrorEvent, ToolSelectionErrorEvent,
ToolUsageErrorEvent, ToolUsageErrorEvent,
ToolUsageFinishedEvent, ToolUsageFinishedEvent,
ToolUsageStartedEvent,
ToolValidateInputErrorEvent, ToolValidateInputErrorEvent,
) )
if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.lite_agent import LiteAgent
OPENAI_BIGGER_MODELS = [ OPENAI_BIGGER_MODELS = [
"gpt-4", "gpt-4",
"gpt-4o", "gpt-4o",
@@ -68,29 +60,31 @@ class ToolUsage:
def __init__( def __init__(
self, self,
tools_handler: Optional[ToolsHandler], tools_handler: ToolsHandler,
tools: List[CrewStructuredTool], tools: List[BaseTool],
task: Optional[Task], original_tools: List[Any],
tools_description: str,
tools_names: str,
task: Task,
function_calling_llm: Any, function_calling_llm: Any,
agent: Optional[Union["BaseAgent", "LiteAgent"]] = None, agent: Any,
action: Any = None, action: Any,
fingerprint_context: Optional[Dict[str, str]] = None,
) -> None: ) -> None:
self._i18n: I18N = agent.i18n if agent else I18N() self._i18n: I18N = agent.i18n
self._printer: Printer = Printer() self._printer: Printer = Printer()
self._telemetry: Telemetry = Telemetry() self._telemetry: Telemetry = Telemetry()
self._run_attempts: int = 1 self._run_attempts: int = 1
self._max_parsing_attempts: int = 3 self._max_parsing_attempts: int = 3
self._remember_format_after_usages: int = 3 self._remember_format_after_usages: int = 3
self.agent = agent self.agent = agent
self.tools_description = render_text_description_and_args(tools) self.tools_description = tools_description
self.tools_names = get_tool_names(tools) self.tools_names = tools_names
self.tools_handler = tools_handler self.tools_handler = tools_handler
self.original_tools = original_tools
self.tools = tools self.tools = tools
self.task = task self.task = task
self.action = action self.action = action
self.function_calling_llm = function_calling_llm self.function_calling_llm = function_calling_llm
self.fingerprint_context = fingerprint_context or {}
# Set the maximum parsing attempts for bigger models # Set the maximum parsing attempts for bigger models
if ( if (
@@ -109,35 +103,29 @@ class ToolUsage:
) -> str: ) -> str:
if isinstance(calling, ToolUsageErrorException): if isinstance(calling, ToolUsageErrorException):
error = calling.message error = calling.message
if self.agent and self.agent.verbose: if self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red") self._printer.print(content=f"\n\n{error}\n", color="red")
if self.task: self.task.increment_tools_errors()
self.task.increment_tools_errors()
return error return error
try: try:
tool = self._select_tool(calling.tool_name) tool = self._select_tool(calling.tool_name)
except Exception as e: except Exception as e:
error = getattr(e, "message", str(e)) error = getattr(e, "message", str(e))
if self.task: self.task.increment_tools_errors()
self.task.increment_tools_errors() if self.agent.verbose:
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red") self._printer.print(content=f"\n\n{error}\n", color="red")
return error return error
if ( if isinstance(tool, CrewStructuredTool) and tool.name == self._i18n.tools("add_image")["name"]: # type: ignore
isinstance(tool, CrewStructuredTool)
and tool.name == self._i18n.tools("add_image")["name"] # type: ignore
):
try: try:
result = self._use(tool_string=tool_string, tool=tool, calling=calling) result = self._use(tool_string=tool_string, tool=tool, calling=calling)
return result return result
except Exception as e: except Exception as e:
error = getattr(e, "message", str(e)) error = getattr(e, "message", str(e))
if self.task: self.task.increment_tools_errors()
self.task.increment_tools_errors() if self.agent.verbose:
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{error}\n", color="red") self._printer.print(content=f"\n\n{error}\n", color="red")
return error return error
@@ -146,9 +134,9 @@ class ToolUsage:
def _use( def _use(
self, self,
tool_string: str, tool_string: str,
tool: CrewStructuredTool, tool: Any,
calling: Union[ToolCalling, InstructorToolCalling], calling: Union[ToolCalling, InstructorToolCalling],
) -> str: ) -> str: # TODO: Fix this return type
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None) if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
try: try:
result = self._i18n.errors("task_repeated_usage").format( result = self._i18n.errors("task_repeated_usage").format(
@@ -163,44 +151,24 @@ class ToolUsage:
return result # type: ignore # Fix the return type of this function return result # type: ignore # Fix the return type of this function
except Exception: except Exception:
if self.task: self.task.increment_tools_errors()
self.task.increment_tools_errors()
if self.agent:
event_data = {
"agent_key": self.agent.key,
"agent_role": self.agent.role,
"tool_name": self.action.tool,
"tool_args": self.action.tool_input,
"tool_class": self.action.tool,
"agent": self.agent,
}
if self.agent.fingerprint:
event_data.update(self.agent.fingerprint)
crewai_event_bus.emit(self,ToolUsageStartedEvent(**event_data))
started_at = time.time() started_at = time.time()
from_cache = False from_cache = False
result = None # type: ignore
if self.tools_handler and self.tools_handler.cache: result = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
result = self.tools_handler.cache.read( # check if cache is available
if self.tools_handler.cache:
result = self.tools_handler.cache.read( # type: ignore # Incompatible types in assignment (expression has type "str | None", variable has type "str")
tool=calling.tool_name, input=calling.arguments tool=calling.tool_name, input=calling.arguments
) # type: ignore )
from_cache = result is not None from_cache = result is not None
available_tool = next( original_tool = next(
( (ot for ot in self.original_tools if ot.name == tool.name), None
available_tool
for available_tool in self.tools
if available_tool.name == tool.name
),
None,
) )
if result is None: if result is None: #! finecwg: if not result --> if result is None
try: try:
if calling.tool_name in [ if calling.tool_name in [
"Delegate work to coworker", "Delegate work to coworker",
@@ -209,31 +177,22 @@ class ToolUsage:
coworker = ( coworker = (
calling.arguments.get("coworker") if calling.arguments else None calling.arguments.get("coworker") if calling.arguments else None
) )
if self.task: self.task.increment_delegations(coworker)
self.task.increment_delegations(coworker)
if calling.arguments: if calling.arguments:
try: try:
acceptable_args = tool.args_schema.model_json_schema()[ acceptable_args = tool.args_schema.model_json_schema()["properties"].keys() # type: ignore
"properties"
].keys() # type: ignore
arguments = { arguments = {
k: v k: v
for k, v in calling.arguments.items() for k, v in calling.arguments.items()
if k in acceptable_args if k in acceptable_args
} }
# Add fingerprint metadata if available
arguments = self._add_fingerprint_metadata(arguments)
result = tool.invoke(input=arguments) result = tool.invoke(input=arguments)
except Exception: except Exception:
arguments = calling.arguments arguments = calling.arguments
# Add fingerprint metadata if available
arguments = self._add_fingerprint_metadata(arguments)
result = tool.invoke(input=arguments) result = tool.invoke(input=arguments)
else: else:
# Add fingerprint metadata even to empty arguments result = tool.invoke(input={})
arguments = self._add_fingerprint_metadata({})
result = tool.invoke(input=arguments)
except Exception as e: except Exception as e:
self.on_tool_error(tool=tool, tool_calling=calling, e=e) self.on_tool_error(tool=tool, tool_calling=calling, e=e)
self._run_attempts += 1 self._run_attempts += 1
@@ -243,27 +202,25 @@ class ToolUsage:
error=e, tool=tool.name, tool_inputs=tool.description error=e, tool=tool.name, tool_inputs=tool.description
) )
error = ToolUsageErrorException( error = ToolUsageErrorException(
f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}" f'\n{error_message}.\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
).message ).message
if self.task: self.task.increment_tools_errors()
self.task.increment_tools_errors() if self.agent.verbose:
if self.agent and self.agent.verbose:
self._printer.print( self._printer.print(
content=f"\n\n{error_message}\n", color="red" content=f"\n\n{error_message}\n", color="red"
) )
return error # type: ignore # No return value expected return error # type: ignore # No return value expected
if self.task: self.task.increment_tools_errors()
self.task.increment_tools_errors()
return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected return self.use(calling=calling, tool_string=tool_string) # type: ignore # No return value expected
if self.tools_handler: if self.tools_handler:
should_cache = True should_cache = True
if ( if (
hasattr(available_tool, "cache_function") hasattr(original_tool, "cache_function")
and available_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function" and original_tool.cache_function # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
): ):
should_cache = available_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function" should_cache = original_tool.cache_function( # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
calling.arguments, result calling.arguments, result
) )
@@ -287,50 +244,44 @@ class ToolUsage:
tool_calling=calling, tool_calling=calling,
from_cache=from_cache, from_cache=from_cache,
started_at=started_at, started_at=started_at,
result=result,
) )
if ( if (
hasattr(available_tool, "result_as_answer") hasattr(original_tool, "result_as_answer")
and available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function" and original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "cache_function"
): ):
result_as_answer = available_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer" result_as_answer = original_tool.result_as_answer # type: ignore # Item "None" of "Any | None" has no attribute "result_as_answer"
data["result_as_answer"] = result_as_answer # type: ignore data["result_as_answer"] = result_as_answer
if self.agent and hasattr(self.agent, "tools_results"): self.agent.tools_results.append(data)
self.agent.tools_results.append(data)
return result # type: ignore # No return value expected
def _format_result(self, result: Any) -> None:
self.task.used_tools += 1
if self._should_remember_format(): # type: ignore # "_should_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
result = self._remember_format(result=result) # type: ignore # "_remember_format" of "ToolUsage" does not return a value (it only ever returns None)
return result return result
def _format_result(self, result: Any) -> str:
if self.task:
self.task.used_tools += 1
if self._should_remember_format():
result = self._remember_format(result=result)
return str(result)
def _should_remember_format(self) -> bool: def _should_remember_format(self) -> bool:
if self.task: return self.task.used_tools % self._remember_format_after_usages == 0
return self.task.used_tools % self._remember_format_after_usages == 0
return False
def _remember_format(self, result: str) -> str: def _remember_format(self, result: str) -> None:
result = str(result) result = str(result)
result += "\n\n" + self._i18n.slice("tools").format( result += "\n\n" + self._i18n.slice("tools").format(
tools=self.tools_description, tool_names=self.tools_names tools=self.tools_description, tool_names=self.tools_names
) )
return result return result # type: ignore # No return value expected
def _check_tool_repeated_usage( def _check_tool_repeated_usage(
self, calling: Union[ToolCalling, InstructorToolCalling] self, calling: Union[ToolCalling, InstructorToolCalling]
) -> bool: ) -> None:
if not self.tools_handler: if not self.tools_handler:
return False return False # type: ignore # No return value expected
if last_tool_usage := self.tools_handler.last_used_tool: if last_tool_usage := self.tools_handler.last_used_tool:
return (calling.tool_name == last_tool_usage.tool_name) and ( return (calling.tool_name == last_tool_usage.tool_name) and ( # type: ignore # No return value expected
calling.arguments == last_tool_usage.arguments calling.arguments == last_tool_usage.arguments
) )
return False
def _select_tool(self, tool_name: str) -> Any: def _select_tool(self, tool_name: str) -> Any:
order_tools = sorted( order_tools = sorted(
@@ -349,11 +300,10 @@ class ToolUsage:
> 0.85 > 0.85
): ):
return tool return tool
if self.task: self.task.increment_tools_errors()
self.task.increment_tools_errors() tool_selection_data = {
tool_selection_data: Dict[str, Any] = { "agent_key": self.agent.key,
"agent_key": getattr(self.agent, "key", None) if self.agent else None, "agent_role": self.agent.role,
"agent_role": getattr(self.agent, "role", None) if self.agent else None,
"tool_name": tool_name, "tool_name": tool_name,
"tool_args": {}, "tool_args": {},
"tool_class": self.tools_description, "tool_class": self.tools_description,
@@ -386,9 +336,7 @@ class ToolUsage:
descriptions.append(tool.description) descriptions.append(tool.description)
return "\n--\n".join(descriptions) return "\n--\n".join(descriptions)
def _function_calling( def _function_calling(self, tool_string: str):
self, tool_string: str
) -> Union[ToolCalling, InstructorToolCalling]:
model = ( model = (
InstructorToolCalling InstructorToolCalling
if self.function_calling_llm.supports_function_calling() if self.function_calling_llm.supports_function_calling()
@@ -410,14 +358,18 @@ class ToolUsage:
max_attempts=1, max_attempts=1,
) )
tool_object = converter.to_pydantic() tool_object = converter.to_pydantic()
if not isinstance(tool_object, (ToolCalling, InstructorToolCalling)): calling = ToolCalling(
raise ToolUsageErrorException("Failed to parse tool calling") tool_name=tool_object["tool_name"],
arguments=tool_object["arguments"],
log=tool_string, # type: ignore
)
return tool_object if isinstance(calling, ConverterError):
raise calling
def _original_tool_calling( return calling
self, tool_string: str, raise_error: bool = False
) -> Union[ToolCalling, InstructorToolCalling, ToolUsageErrorException]: def _original_tool_calling(self, tool_string: str, raise_error: bool = False):
tool_name = self.action.tool tool_name = self.action.tool
tool = self._select_tool(tool_name) tool = self._select_tool(tool_name)
try: try:
@@ -428,7 +380,7 @@ class ToolUsage:
raise raise
else: else:
return ToolUsageErrorException( return ToolUsageErrorException(
f"{self._i18n.errors('tool_arguments_error')}" f'{self._i18n.errors("tool_arguments_error")}'
) )
if not isinstance(arguments, dict): if not isinstance(arguments, dict):
@@ -436,17 +388,18 @@ class ToolUsage:
raise raise
else: else:
return ToolUsageErrorException( return ToolUsageErrorException(
f"{self._i18n.errors('tool_arguments_error')}" f'{self._i18n.errors("tool_arguments_error")}'
) )
return ToolCalling( return ToolCalling(
tool_name=tool.name, tool_name=tool.name,
arguments=arguments, arguments=arguments,
log=tool_string,
) )
def _tool_calling( def _tool_calling(
self, tool_string: str self, tool_string: str
) -> Union[ToolCalling, InstructorToolCalling, ToolUsageErrorException]: ) -> Union[ToolCalling, InstructorToolCalling]:
try: try:
try: try:
return self._original_tool_calling(tool_string, raise_error=True) return self._original_tool_calling(tool_string, raise_error=True)
@@ -459,12 +412,11 @@ class ToolUsage:
self._run_attempts += 1 self._run_attempts += 1
if self._run_attempts > self._max_parsing_attempts: if self._run_attempts > self._max_parsing_attempts:
self._telemetry.tool_usage_error(llm=self.function_calling_llm) self._telemetry.tool_usage_error(llm=self.function_calling_llm)
if self.task: self.task.increment_tools_errors()
self.task.increment_tools_errors() if self.agent.verbose:
if self.agent and self.agent.verbose:
self._printer.print(content=f"\n\n{e}\n", color="red") self._printer.print(content=f"\n\n{e}\n", color="red")
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling") return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f"{self._i18n.errors('tool_usage_error').format(error=e)}\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}" f'{self._i18n.errors("tool_usage_error").format(error=e)}\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}'
) )
return self._tool_calling(tool_string) return self._tool_calling(tool_string)
@@ -491,7 +443,6 @@ class ToolUsage:
if isinstance(arguments, dict): if isinstance(arguments, dict):
return arguments return arguments
except (ValueError, SyntaxError): except (ValueError, SyntaxError):
repaired_input = repair_json(tool_input)
pass # Continue to the next parsing attempt pass # Continue to the next parsing attempt
# Attempt 3: Parse as JSON5 # Attempt 3: Parse as JSON5
@@ -504,7 +455,7 @@ class ToolUsage:
# Attempt 4: Repair JSON # Attempt 4: Repair JSON
try: try:
repaired_input = str(repair_json(tool_input, skip_json_loads=True)) repaired_input = repair_json(tool_input, skip_json_loads=True)
self._printer.print( self._printer.print(
content=f"Repaired JSON: {repaired_input}", color="blue" content=f"Repaired JSON: {repaired_input}", color="blue"
) )
@@ -524,39 +475,24 @@ class ToolUsage:
def _emit_validate_input_error(self, final_error: str): def _emit_validate_input_error(self, final_error: str):
tool_selection_data = { tool_selection_data = {
"agent_key": getattr(self.agent, "key", None) if self.agent else None, "agent_key": self.agent.key,
"agent_role": getattr(self.agent, "role", None) if self.agent else None, "agent_role": self.agent.role,
"tool_name": self.action.tool, "tool_name": self.action.tool,
"tool_args": str(self.action.tool_input), "tool_args": str(self.action.tool_input),
"tool_class": self.__class__.__name__, "tool_class": self.__class__.__name__,
"agent": self.agent, # Adding agent for fingerprint extraction
} }
# Include fingerprint context if available
if self.fingerprint_context:
tool_selection_data.update(self.fingerprint_context)
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
ToolValidateInputErrorEvent(**tool_selection_data, error=final_error), ToolValidateInputErrorEvent(**tool_selection_data, error=final_error),
) )
def on_tool_error( def on_tool_error(self, tool: Any, tool_calling: ToolCalling, e: Exception) -> None:
self,
tool: Any,
tool_calling: Union[ToolCalling, InstructorToolCalling],
e: Exception,
) -> None:
event_data = self._prepare_event_data(tool, tool_calling) event_data = self._prepare_event_data(tool, tool_calling)
crewai_event_bus.emit(self, ToolUsageErrorEvent(**{**event_data, "error": e})) crewai_event_bus.emit(self, ToolUsageErrorEvent(**{**event_data, "error": e}))
def on_tool_use_finished( def on_tool_use_finished(
self, self, tool: Any, tool_calling: ToolCalling, from_cache: bool, started_at: float
tool: Any,
tool_calling: Union[ToolCalling, InstructorToolCalling],
from_cache: bool,
started_at: float,
result: Any,
) -> None: ) -> None:
finished_at = time.time() finished_at = time.time()
event_data = self._prepare_event_data(tool, tool_calling) event_data = self._prepare_event_data(tool, tool_calling)
@@ -565,75 +501,17 @@ class ToolUsage:
"started_at": datetime.datetime.fromtimestamp(started_at), "started_at": datetime.datetime.fromtimestamp(started_at),
"finished_at": datetime.datetime.fromtimestamp(finished_at), "finished_at": datetime.datetime.fromtimestamp(finished_at),
"from_cache": from_cache, "from_cache": from_cache,
"output": result,
} }
) )
crewai_event_bus.emit(self, ToolUsageFinishedEvent(**event_data)) crewai_event_bus.emit(self, ToolUsageFinishedEvent(**event_data))
def _prepare_event_data( def _prepare_event_data(self, tool: Any, tool_calling: ToolCalling) -> dict:
self, tool: Any, tool_calling: Union[ToolCalling, InstructorToolCalling] return {
) -> dict: "agent_key": self.agent.key,
event_data = { "agent_role": (self.agent._original_role or self.agent.role),
"run_attempts": self._run_attempts, "run_attempts": self._run_attempts,
"delegations": self.task.delegations if self.task else 0, "delegations": self.task.delegations,
"tool_name": tool.name, "tool_name": tool.name,
"tool_args": tool_calling.arguments, "tool_args": tool_calling.arguments,
"tool_class": tool.__class__.__name__, "tool_class": tool.__class__.__name__,
"agent_key": (
getattr(self.agent, "key", "unknown") if self.agent else "unknown"
),
"agent_role": (
getattr(self.agent, "_original_role", None)
or getattr(self.agent, "role", "unknown")
if self.agent
else "unknown"
),
} }
# Include fingerprint context if available
if self.fingerprint_context:
event_data.update(self.fingerprint_context)
return event_data
def _add_fingerprint_metadata(self, arguments: dict) -> dict:
"""Add fingerprint metadata to tool arguments if available.
Args:
arguments: The original tool arguments
Returns:
Updated arguments dictionary with fingerprint metadata
"""
# Create a shallow copy to avoid modifying the original
arguments = arguments.copy()
# Add security metadata under a designated key
if "security_context" not in arguments:
arguments["security_context"] = {}
security_context = arguments["security_context"]
# Add agent fingerprint if available
if self.agent and hasattr(self.agent, "security_config"):
security_config = getattr(self.agent, "security_config", None)
if security_config and hasattr(security_config, "fingerprint"):
try:
security_context["agent_fingerprint"] = (
security_config.fingerprint.to_dict()
)
except AttributeError:
pass
# Add task fingerprint if available
if self.task and hasattr(self.task, "security_config"):
security_config = getattr(self.task, "security_config", None)
if security_config and hasattr(security_config, "fingerprint"):
try:
security_context["task_fingerprint"] = (
security_config.fingerprint.to_dict()
)
except AttributeError:
pass
return arguments

View File

@@ -24,10 +24,7 @@
"manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.", "manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.",
"formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.", "formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.",
"conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.", "conversation_history_instruction": "You are a member of a crew collaborating to achieve a common goal. Your task is a specific action that contributes to this larger objective. For additional context, please review the conversation history between you and the user that led to the initiation of this crew. Use any relevant information or feedback from the conversation to inform your task execution and ensure your response aligns with both the immediate task and the crew's overall goals.",
"feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary.", "feedback_instructions": "User feedback: {feedback}\nInstructions: Use this feedback to enhance the next output iteration.\nNote: Do not respond or add commentary."
"lite_agent_system_prompt_with_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\n{tools}\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [{tool_names}], just the name, exactly as it's written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final answer to the original input question\n```",
"lite_agent_system_prompt_without_tools": "You are {role}. {backstory}\nYour personal goal is: {goal}\n\nTo give my best complete final answer to the task respond using the exact following format:\n\nThought: I now can give a great answer\nFinal Answer: Your final answer must be the great and the most complete as possible, it must be outcome described.\n\nI MUST use these formats, my job depends on it!",
"lite_agent_response_format": "\nIMPORTANT: Your final answer MUST contain all the information requested in the following format: {response_format}\n\nIMPORTANT: Ensure the final output does not include any code block markers like ```json or ```python."
}, },
"errors": { "errors": {
"force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}", "force_final_answer_error": "You can't keep going, here is the best final answer you generated:\n\n {formatted_answer}",

View File

@@ -1,430 +0,0 @@
import json
import re
from typing import Any, Callable, Dict, List, Optional, Sequence, Union
from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
AgentAction,
AgentFinish,
CrewAgentParser,
OutputParserException,
)
from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM
from crewai.tools import BaseTool as CrewAITool
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.tools.tool_types import ToolResult
from crewai.utilities import I18N, Printer
from crewai.utilities.exceptions.context_window_exceeding_exception import (
LLMContextLengthExceededException,
)
def parse_tools(tools: List[BaseTool]) -> List[CrewStructuredTool]:
"""Parse tools to be used for the task."""
tools_list = []
for tool in tools:
if isinstance(tool, CrewAITool):
tools_list.append(tool.to_structured_tool())
else:
raise ValueError("Tool is not a CrewStructuredTool or BaseTool")
return tools_list
def get_tool_names(tools: Sequence[Union[CrewStructuredTool, BaseTool]]) -> str:
"""Get the names of the tools."""
return ", ".join([t.name for t in tools])
def render_text_description_and_args(
tools: Sequence[Union[CrewStructuredTool, BaseTool]],
) -> str:
"""Render the tool name, description, and args in plain text.
search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \
args: {"expression": {"type": "string"}}
"""
tool_strings = []
for tool in tools:
tool_strings.append(tool.description)
return "\n".join(tool_strings)
def has_reached_max_iterations(iterations: int, max_iterations: int) -> bool:
"""Check if the maximum number of iterations has been reached."""
return iterations >= max_iterations
def handle_max_iterations_exceeded(
formatted_answer: Union[AgentAction, AgentFinish, None],
printer: Printer,
i18n: I18N,
messages: List[Dict[str, str]],
llm: Union[LLM, BaseLLM],
callbacks: List[Any],
) -> Union[AgentAction, AgentFinish]:
"""
Handles the case when the maximum number of iterations is exceeded.
Performs one more LLM call to get the final answer.
Parameters:
formatted_answer: The last formatted answer from the agent.
Returns:
The final formatted answer after exceeding max iterations.
"""
printer.print(
content="Maximum iterations reached. Requesting final answer.",
color="yellow",
)
if formatted_answer and hasattr(formatted_answer, "text"):
assistant_message = (
formatted_answer.text + f'\n{i18n.errors("force_final_answer")}'
)
else:
assistant_message = i18n.errors("force_final_answer")
messages.append(format_message_for_llm(assistant_message, role="assistant"))
# Perform one more LLM call to get the final answer
answer = llm.call(
messages,
callbacks=callbacks,
)
if answer is None or answer == "":
printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError("Invalid response from LLM call - None or empty.")
formatted_answer = format_answer(answer)
# Return the formatted answer, regardless of its type
return formatted_answer
def format_message_for_llm(prompt: str, role: str = "user") -> Dict[str, str]:
prompt = prompt.rstrip()
return {"role": role, "content": prompt}
def format_answer(answer: str) -> Union[AgentAction, AgentFinish]:
"""Format a response from the LLM into an AgentAction or AgentFinish."""
try:
return CrewAgentParser.parse_text(answer)
except Exception:
# If parsing fails, return a default AgentFinish
return AgentFinish(
thought="Failed to parse LLM response",
output=answer,
text=answer,
)
def enforce_rpm_limit(
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
) -> None:
"""Enforce the requests per minute (RPM) limit if applicable."""
if request_within_rpm_limit:
request_within_rpm_limit()
def get_llm_response(
llm: Union[LLM, BaseLLM],
messages: List[Dict[str, str]],
callbacks: List[Any],
printer: Printer,
) -> str:
"""Call the LLM and return the response, handling any invalid responses."""
try:
answer = llm.call(
messages,
callbacks=callbacks,
)
except Exception as e:
printer.print(
content=f"Error during LLM call: {e}",
color="red",
)
raise e
if not answer:
printer.print(
content="Received None or empty response from LLM call.",
color="red",
)
raise ValueError("Invalid response from LLM call - None or empty.")
return answer
def process_llm_response(
answer: str, use_stop_words: bool
) -> Union[AgentAction, AgentFinish]:
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
if not use_stop_words:
try:
# Preliminary parsing to check for errors.
format_answer(answer)
except OutputParserException as e:
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
answer = answer.split("Observation:")[0].strip()
return format_answer(answer)
def handle_agent_action_core(
formatted_answer: AgentAction,
tool_result: ToolResult,
messages: Optional[List[Dict[str, str]]] = None,
step_callback: Optional[Callable] = None,
show_logs: Optional[Callable] = None,
) -> Union[AgentAction, AgentFinish]:
"""Core logic for handling agent actions and tool results.
Args:
formatted_answer: The agent's action
tool_result: The result of executing the tool
messages: Optional list of messages to append results to
step_callback: Optional callback to execute after processing
show_logs: Optional function to show logs
Returns:
Either an AgentAction or AgentFinish
"""
if step_callback:
step_callback(tool_result)
formatted_answer.text += f"\nObservation: {tool_result.result}"
formatted_answer.result = tool_result.result
if tool_result.result_as_answer:
return AgentFinish(
thought="",
output=tool_result.result,
text=formatted_answer.text,
)
if show_logs:
show_logs(formatted_answer)
if messages is not None:
messages.append({"role": "assistant", "content": tool_result.result})
return formatted_answer
def handle_unknown_error(printer: Any, exception: Exception) -> None:
"""Handle unknown errors by informing the user.
Args:
printer: Printer instance for output
exception: The exception that occurred
"""
printer.print(
content="An unknown error occurred. Please check the details below.",
color="red",
)
printer.print(
content=f"Error details: {exception}",
color="red",
)
def handle_output_parser_exception(
e: OutputParserException,
messages: List[Dict[str, str]],
iterations: int,
log_error_after: int = 3,
printer: Optional[Any] = None,
) -> AgentAction:
"""Handle OutputParserException by updating messages and formatted_answer.
Args:
e: The OutputParserException that occurred
messages: List of messages to append to
iterations: Current iteration count
log_error_after: Number of iterations after which to log errors
printer: Optional printer instance for logging
Returns:
AgentAction: A formatted answer with the error
"""
messages.append({"role": "user", "content": e.error})
formatted_answer = AgentAction(
text=e.error,
tool="",
tool_input="",
thought="",
)
if iterations > log_error_after and printer:
printer.print(
content=f"Error parsing LLM output, agent will retry: {e.error}",
color="red",
)
return formatted_answer
def is_context_length_exceeded(exception: Exception) -> bool:
"""Check if the exception is due to context length exceeding.
Args:
exception: The exception to check
Returns:
bool: True if the exception is due to context length exceeding
"""
return LLMContextLengthExceededException(str(exception))._is_context_limit_error(
str(exception)
)
def handle_context_length(
respect_context_window: bool,
printer: Any,
messages: List[Dict[str, str]],
llm: Any,
callbacks: List[Any],
i18n: Any,
) -> None:
"""Handle context length exceeded by either summarizing or raising an error.
Args:
respect_context_window: Whether to respect context window
printer: Printer instance for output
messages: List of messages to summarize
llm: LLM instance for summarization
callbacks: List of callbacks for LLM
i18n: I18N instance for messages
"""
if respect_context_window:
printer.print(
content="Context length exceeded. Summarizing content to fit the model context window.",
color="yellow",
)
summarize_messages(messages, llm, callbacks, i18n)
else:
printer.print(
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
color="red",
)
raise SystemExit(
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
)
def summarize_messages(
messages: List[Dict[str, str]],
llm: Any,
callbacks: List[Any],
i18n: Any,
) -> None:
"""Summarize messages to fit within context window.
Args:
messages: List of messages to summarize
llm: LLM instance for summarization
callbacks: List of callbacks for LLM
i18n: I18N instance for messages
"""
messages_groups = []
for message in messages:
content = message["content"]
cut_size = llm.get_context_window_size()
for i in range(0, len(content), cut_size):
messages_groups.append({"content": content[i : i + cut_size]})
summarized_contents = []
for group in messages_groups:
summary = llm.call(
[
format_message_for_llm(
i18n.slice("summarizer_system_message"), role="system"
),
format_message_for_llm(
i18n.slice("summarize_instruction").format(group=group["content"]),
),
],
callbacks=callbacks,
)
summarized_contents.append({"content": str(summary)})
merged_summary = " ".join(content["content"] for content in summarized_contents)
messages.clear()
messages.append(
format_message_for_llm(
i18n.slice("summary").format(merged_summary=merged_summary)
)
)
def show_agent_logs(
printer: Printer,
agent_role: str,
formatted_answer: Optional[Union[AgentAction, AgentFinish]] = None,
task_description: Optional[str] = None,
verbose: bool = False,
) -> None:
"""Show agent logs for both start and execution states.
Args:
printer: Printer instance for output
agent_role: Role of the agent
formatted_answer: Optional AgentAction or AgentFinish for execution logs
task_description: Optional task description for start logs
verbose: Whether to show verbose output
"""
if not verbose:
return
agent_role = agent_role.split("\n")[0]
if formatted_answer is None:
# Start logs
printer.print(
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
if task_description:
printer.print(
content=f"\033[95m## Task:\033[00m \033[92m{task_description}\033[00m"
)
else:
# Execution logs
printer.print(
content=f"\n\n\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
)
if isinstance(formatted_answer, AgentAction):
thought = re.sub(r"\n+", "\n", formatted_answer.thought)
formatted_json = json.dumps(
formatted_answer.tool_input,
indent=2,
ensure_ascii=False,
)
if thought and thought != "":
printer.print(
content=f"\033[95m## Thought:\033[00m \033[92m{thought}\033[00m"
)
printer.print(
content=f"\033[95m## Using tool:\033[00m \033[92m{formatted_answer.tool}\033[00m"
)
printer.print(
content=f"\033[95m## Tool Input:\033[00m \033[92m\n{formatted_json}\033[00m"
)
printer.print(
content=f"\033[95m## Tool Output:\033[00m \033[92m\n{formatted_answer.result}\033[00m"
)
elif isinstance(formatted_answer, AgentFinish):
printer.print(
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
)

View File

@@ -1,62 +0,0 @@
import re
from typing import Optional
MIN_COLLECTION_LENGTH = 3
MAX_COLLECTION_LENGTH = 63
DEFAULT_COLLECTION = "default_collection"
# Compiled regex patterns for better performance
INVALID_CHARS_PATTERN = re.compile(r"[^a-zA-Z0-9_-]")
IPV4_PATTERN = re.compile(r"^(\d{1,3}\.){3}\d{1,3}$")
def is_ipv4_pattern(name: str) -> bool:
"""
Check if a string matches an IPv4 address pattern.
Args:
name: The string to check
Returns:
True if the string matches an IPv4 pattern, False otherwise
"""
return bool(IPV4_PATTERN.match(name))
def sanitize_collection_name(name: Optional[str]) -> str:
"""
Sanitize a collection name to meet ChromaDB requirements:
1. 3-63 characters long
2. Starts and ends with alphanumeric character
3. Contains only alphanumeric characters, underscores, or hyphens
4. No consecutive periods
5. Not a valid IPv4 address
Args:
name: The original collection name to sanitize
Returns:
A sanitized collection name that meets ChromaDB requirements
"""
if not name:
return DEFAULT_COLLECTION
if is_ipv4_pattern(name):
name = f"ip_{name}"
sanitized = INVALID_CHARS_PATTERN.sub("_", name)
if not sanitized[0].isalnum():
sanitized = "a" + sanitized
if not sanitized[-1].isalnum():
sanitized = sanitized[:-1] + "z"
if len(sanitized) < MIN_COLLECTION_LENGTH:
sanitized = sanitized + "x" * (MIN_COLLECTION_LENGTH - len(sanitized))
if len(sanitized) > MAX_COLLECTION_LENGTH:
sanitized = sanitized[:MAX_COLLECTION_LENGTH]
if not sanitized[-1].isalnum():
sanitized = sanitized[:-1] + "z"
return sanitized

View File

@@ -216,7 +216,7 @@ def convert_with_instructions(
def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str: def get_conversion_instructions(model: Type[BaseModel], llm: Any) -> str:
instructions = "Please convert the following text into valid JSON." instructions = "Please convert the following text into valid JSON."
if llm and not isinstance(llm, str) and llm.supports_function_calling(): if llm.supports_function_calling():
model_schema = PydanticSchemaParser(model=model).get_schema() model_schema = PydanticSchemaParser(model=model).get_schema()
instructions += ( instructions += (
f"\n\nOutput ONLY the valid JSON and nothing else.\n\n" f"\n\nOutput ONLY the valid JSON and nothing else.\n\n"
@@ -287,9 +287,8 @@ def generate_model_description(model: Type[BaseModel]) -> str:
else: else:
return str(field_type) return str(field_type)
fields = model.model_fields fields = model.__annotations__
field_descriptions = [ field_descriptions = [
f'"{name}": {describe_field(field.annotation)}' f'"{name}": {describe_field(type_)}' for name, type_ in fields.items()
for name, field in fields.items()
] ]
return "{\n " + ",\n ".join(field_descriptions) + "\n}" return "{\n " + ",\n ".join(field_descriptions) + "\n}"

View File

@@ -6,7 +6,7 @@ from rich.console import Console
from rich.table import Table from rich.table import Table
from crewai.agent import Agent from crewai.agent import Agent
from crewai.llm import BaseLLM from crewai.llm import LLM
from crewai.task import Task from crewai.task import Task
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry from crewai.telemetry import Telemetry
@@ -24,7 +24,7 @@ class CrewEvaluator:
Attributes: Attributes:
crew (Crew): The crew of agents to evaluate. crew (Crew): The crew of agents to evaluate.
eval_llm (BaseLLM): Language model instance to use for evaluations eval_llm (LLM): Language model instance to use for evaluations
tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task. tasks_scores (defaultdict): A dictionary to store the scores of the agents for each task.
iteration (int): The current iteration of the evaluation. iteration (int): The current iteration of the evaluation.
""" """
@@ -33,7 +33,7 @@ class CrewEvaluator:
run_execution_times: defaultdict = defaultdict(list) run_execution_times: defaultdict = defaultdict(list)
iteration: int = 0 iteration: int = 0
def __init__(self, crew, eval_llm: InstanceOf[BaseLLM]): def __init__(self, crew, eval_llm: InstanceOf[LLM]):
self.crew = crew self.crew = crew
self.llm = eval_llm self.llm = eval_llm
self._telemetry = Telemetry() self._telemetry = Telemetry()

View File

@@ -45,7 +45,7 @@ class TaskEvaluator:
def evaluate(self, task, output) -> TaskEvaluation: def evaluate(self, task, output) -> TaskEvaluation:
crewai_event_bus.emit( crewai_event_bus.emit(
self, TaskEvaluationEvent(evaluation_type="task_evaluation", task=task) self, TaskEvaluationEvent(evaluation_type="task_evaluation")
) )
evaluation_query = ( evaluation_query = (
f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n" f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n"

View File

@@ -1,16 +1,16 @@
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Union
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.structured_tool import CrewStructuredTool
from .base_events import BaseEvent from .base_events import CrewEvent
if TYPE_CHECKING: if TYPE_CHECKING:
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
class AgentExecutionStartedEvent(BaseEvent): class AgentExecutionStartedEvent(CrewEvent):
"""Event emitted when an agent starts executing a task""" """Event emitted when an agent starts executing a task"""
agent: BaseAgent agent: BaseAgent
@@ -21,20 +21,8 @@ class AgentExecutionStartedEvent(BaseEvent):
model_config = {"arbitrary_types_allowed": True} model_config = {"arbitrary_types_allowed": True}
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if (
hasattr(self.agent.fingerprint, "metadata")
and self.agent.fingerprint.metadata
):
self.fingerprint_metadata = self.agent.fingerprint.metadata
class AgentExecutionCompletedEvent(CrewEvent):
class AgentExecutionCompletedEvent(BaseEvent):
"""Event emitted when an agent completes executing a task""" """Event emitted when an agent completes executing a task"""
agent: BaseAgent agent: BaseAgent
@@ -42,63 +30,11 @@ class AgentExecutionCompletedEvent(BaseEvent):
output: str output: str
type: str = "agent_execution_completed" type: str = "agent_execution_completed"
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if (
hasattr(self.agent.fingerprint, "metadata")
and self.agent.fingerprint.metadata
):
self.fingerprint_metadata = self.agent.fingerprint.metadata
class AgentExecutionErrorEvent(CrewEvent):
class AgentExecutionErrorEvent(BaseEvent):
"""Event emitted when an agent encounters an error during execution""" """Event emitted when an agent encounters an error during execution"""
agent: BaseAgent agent: BaseAgent
task: Any task: Any
error: str error: str
type: str = "agent_execution_error" type: str = "agent_execution_error"
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if hasattr(self.agent, "fingerprint") and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if (
hasattr(self.agent.fingerprint, "metadata")
and self.agent.fingerprint.metadata
):
self.fingerprint_metadata = self.agent.fingerprint.metadata
# New event classes for LiteAgent
class LiteAgentExecutionStartedEvent(BaseEvent):
"""Event emitted when a LiteAgent starts executing"""
agent_info: Dict[str, Any]
tools: Optional[Sequence[Union[BaseTool, CrewStructuredTool]]]
messages: Union[str, List[Dict[str, str]]]
type: str = "lite_agent_execution_started"
model_config = {"arbitrary_types_allowed": True}
class LiteAgentExecutionCompletedEvent(BaseEvent):
"""Event emitted when a LiteAgent completes execution"""
agent_info: Dict[str, Any]
output: str
type: str = "lite_agent_execution_completed"
class LiteAgentExecutionErrorEvent(BaseEvent):
"""Event emitted when a LiteAgent encounters an error during execution"""
agent_info: Dict[str, Any]
error: str
type: str = "lite_agent_execution_error"

Some files were not shown because too many files have changed in this diff Show More