mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 12:28:30 +00:00
Compare commits
74 Commits
feat/ibm-m
...
brandon/cr
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0695c26703 | ||
|
|
4fb3331c6a | ||
|
|
b6c6eea6f5 | ||
|
|
1af95f5146 | ||
|
|
ed3487aa22 | ||
|
|
77af733e44 | ||
|
|
aaf80d1d43 | ||
|
|
9e9b945a46 | ||
|
|
308a8dc925 | ||
|
|
7d9d0ff6f7 | ||
|
|
f8a8e7b2a5 | ||
|
|
3285c1b196 | ||
|
|
4bc23affe0 | ||
|
|
bca56eea48 | ||
|
|
588ad3c4a4 | ||
|
|
c6a6c918e0 | ||
|
|
366bbbbea3 | ||
|
|
293305790d | ||
|
|
8bc09eb054 | ||
|
|
db1b678c3a | ||
|
|
6f32bf52cc | ||
|
|
49d173a02d | ||
|
|
4069b621d5 | ||
|
|
a7147c99c6 | ||
|
|
6fe308202e | ||
|
|
63ecb7395d | ||
|
|
8cf1cd5a62 | ||
|
|
93c0467bba | ||
|
|
8f5f67de41 | ||
|
|
f8ca49d8df | ||
|
|
c119230fd6 | ||
|
|
14a36d3f5e | ||
|
|
fde1ee45f9 | ||
|
|
6774bc2c53 | ||
|
|
94c62263ed | ||
|
|
495c3859af | ||
|
|
3e003f5e32 | ||
|
|
1c8b509d7d | ||
|
|
58af5c08f9 | ||
|
|
55e968c9e0 | ||
|
|
0b9092702b | ||
|
|
8376698534 | ||
|
|
3dc02310b6 | ||
|
|
e70bc94ab6 | ||
|
|
9285ebf8a2 | ||
|
|
4ca785eb15 | ||
|
|
c57cbd8591 | ||
|
|
7fb1289205 | ||
|
|
f02681ae01 | ||
|
|
c725105b1f | ||
|
|
36aa4bcb46 | ||
|
|
b98f8f9fe1 | ||
|
|
bcfcf88e78 | ||
|
|
fd0de3a47e | ||
|
|
c7b9ae02fd | ||
|
|
4afb022572 | ||
|
|
8610faef22 | ||
|
|
6d677541c7 | ||
|
|
49220ec163 | ||
|
|
40a676b7ac | ||
|
|
50bf146d1e | ||
|
|
40d378abfb | ||
|
|
1b09b085a7 | ||
|
|
9f2acfe91f | ||
|
|
e856359e23 | ||
|
|
faa231e278 | ||
|
|
3d44795476 | ||
|
|
f50e709985 | ||
|
|
d70c542547 | ||
|
|
57201fb856 | ||
|
|
9b142e580b | ||
|
|
3878daffd6 | ||
|
|
34954e6f74 | ||
|
|
e66a135d5d |
2
.github/workflows/linter.yml
vendored
2
.github/workflows/linter.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
|
||||
8
.github/workflows/mkdocs.yml
vendored
8
.github/workflows/mkdocs.yml
vendored
@@ -13,10 +13,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
run: echo "::set-output name=hash::$(sha256sum requirements-doc.txt | awk '{print $1}')"
|
||||
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ steps.req-hash.outputs.hash }}
|
||||
path: .cache
|
||||
@@ -42,4 +42,4 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Build and deploy MkDocs
|
||||
run: mkdocs gh-deploy --force
|
||||
run: mkdocs gh-deploy --force
|
||||
|
||||
4
.github/workflows/security-checker.yml
vendored
4
.github/workflows/security-checker.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
|
||||
@@ -19,5 +19,5 @@ jobs:
|
||||
run: pip install bandit
|
||||
|
||||
- name: Run Bandit
|
||||
run: bandit -c pyproject.toml -r src/ -lll
|
||||
run: bandit -c pyproject.toml -r src/ -ll
|
||||
|
||||
|
||||
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
run: uv python install 3.11.9
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --dev
|
||||
run: uv sync --dev --all-extras
|
||||
|
||||
- name: Run tests
|
||||
run: uv run pytest tests
|
||||
run: uv run pytest tests -vv
|
||||
|
||||
2
.github/workflows/type-checker.yml
vendored
2
.github/workflows/type-checker.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -17,3 +17,7 @@ rc-tests/*
|
||||
temp/*
|
||||
.vscode/*
|
||||
crew_tasks_output.json
|
||||
.codesight
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.venv
|
||||
|
||||
12
README.md
12
README.md
@@ -100,7 +100,7 @@ You can now start developing your crew by editing the files in the `src/my_proje
|
||||
|
||||
#### Example of a simple crew with a sequential process:
|
||||
|
||||
Instatiate your crew:
|
||||
Instantiate your crew:
|
||||
|
||||
```shell
|
||||
crewai create crew latest-ai-development
|
||||
@@ -121,7 +121,7 @@ researcher:
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
@@ -205,7 +205,7 @@ class LatestAiDevelopmentCrew():
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
**main.py**
|
||||
@@ -357,7 +357,7 @@ uv run pytest .
|
||||
### Running static type checks
|
||||
|
||||
```bash
|
||||
uvx mypy
|
||||
uvx mypy src
|
||||
```
|
||||
|
||||
### Packaging
|
||||
@@ -376,7 +376,7 @@ pip install dist/*.tar.gz
|
||||
|
||||
CrewAI uses anonymous telemetry to collect usage data with the main purpose of helping us improve the library by focusing our efforts on the most used features, integrations and tools.
|
||||
|
||||
It's pivotal to understand that **NO data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables, with the exception of the conditions mentioned. When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected to provide deeper insights while respecting user privacy. We don't offer a way to disable it now, but we will in the future.
|
||||
It's pivotal to understand that **NO data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables, with the exception of the conditions mentioned. When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected to provide deeper insights while respecting user privacy. Users can disable telemetry by setting the environment variable OTEL_SDK_DISABLED to true.
|
||||
|
||||
Data collected includes:
|
||||
|
||||
@@ -399,7 +399,7 @@ Data collected includes:
|
||||
- Roles of agents in a crew
|
||||
- Understand high level use cases so we can build better tools, integrations and examples about it
|
||||
- Tools names available
|
||||
- Understand out of the publically available tools, which ones are being used the most so we can improve them
|
||||
- Understand out of the publicly available tools, which ones are being used the most so we can improve them
|
||||
|
||||
Users can opt-in to Further Telemetry, sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews. Enabling `share_crew` results in the collection of detailed crew and task execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns while respecting the user's choice to share.
|
||||
|
||||
|
||||
@@ -1,161 +1,343 @@
|
||||
---
|
||||
title: Agents
|
||||
description: What are CrewAI Agents and how to use them.
|
||||
description: Detailed guide on creating and managing agents within the CrewAI framework.
|
||||
icon: robot
|
||||
---
|
||||
|
||||
## What is an agent?
|
||||
## Overview of an Agent
|
||||
|
||||
An agent is an **autonomous unit** programmed to:
|
||||
<ul>
|
||||
<li class='leading-3'>Perform tasks</li>
|
||||
<li class='leading-3'>Make decisions</li>
|
||||
<li class='leading-3'>Communicate with other agents</li>
|
||||
</ul>
|
||||
In the CrewAI framework, an `Agent` is an autonomous unit that can:
|
||||
- Perform specific tasks
|
||||
- Make decisions based on its role and goal
|
||||
- Use tools to accomplish objectives
|
||||
- Communicate and collaborate with other agents
|
||||
- Maintain memory of interactions
|
||||
- Delegate tasks when allowed
|
||||
|
||||
<Tip>
|
||||
Think of an agent as a member of a team, with specific skills and a particular job to do. Agents can have different roles like `Researcher`, `Writer`, or `Customer Support`, each contributing to the overall goal of the crew.
|
||||
Think of an agent as a specialized team member with specific skills, expertise, and responsibilities. For example, a `Researcher` agent might excel at gathering and analyzing information, while a `Writer` agent might be better at creating content.
|
||||
</Tip>
|
||||
|
||||
## Agent attributes
|
||||
## Agent Attributes
|
||||
|
||||
| Attribute | Parameter | Description |
|
||||
| :------------------------- | :--------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Role** | `role` | Defines the agent's function within the crew. It determines the kind of tasks the agent is best suited for. |
|
||||
| **Goal** | `goal` | The individual objective that the agent aims to achieve. It guides the agent's decision-making process. |
|
||||
| **Backstory** | `backstory`| Provides context to the agent's role and goal, enriching the interaction and collaboration dynamics. |
|
||||
| **LLM** *(optional)* | `llm` | Represents the language model that will run the agent. It dynamically fetches the model name from the `OPENAI_MODEL_NAME` environment variable, defaulting to "gpt-4" if not specified. |
|
||||
| **Tools** *(optional)* | `tools` | Set of capabilities or functions that the agent can use to perform tasks. Expected to be instances of custom classes compatible with the agent's execution environment. Tools are initialized with a default value of an empty list. |
|
||||
| **Function Calling LLM** *(optional)* | `function_calling_llm` | Specifies the language model that will handle the tool calling for this agent, overriding the crew function calling LLM if passed. Default is `None`. |
|
||||
| **Max Iter** *(optional)* | `max_iter` | Max Iter is the maximum number of iterations the agent can perform before being forced to give its best answer. Default is `25`. |
|
||||
| **Max RPM** *(optional)* | `max_rpm` | Max RPM is the maximum number of requests per minute the agent can perform to avoid rate limits. It's optional and can be left unspecified, with a default value of `None`. |
|
||||
| **Max Execution Time** *(optional)* | `max_execution_time` | Max Execution Time is the maximum execution time for an agent to execute a task. It's optional and can be left unspecified, with a default value of `None`, meaning no max execution time. |
|
||||
| **Verbose** *(optional)* | `verbose` | Setting this to `True` configures the internal logger to provide detailed execution logs, aiding in debugging and monitoring. Default is `False`. |
|
||||
| **Allow Delegation** *(optional)* | `allow_delegation` | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. Default is `False`. |
|
||||
| **Step Callback** *(optional)* | `step_callback` | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback`. |
|
||||
| **Cache** *(optional)* | `cache` | Indicates if the agent should use a cache for tool usage. Default is `True`. |
|
||||
| **System Template** *(optional)* | `system_template` | Specifies the system format for the agent. Default is `None`. |
|
||||
| **Prompt Template** *(optional)* | `prompt_template` | Specifies the prompt format for the agent. Default is `None`. |
|
||||
| **Response Template** *(optional)* | `response_template` | Specifies the response format for the agent. Default is `None`. |
|
||||
| **Allow Code Execution** *(optional)* | `allow_code_execution` | Enable code execution for the agent. Default is `False`. |
|
||||
| **Max Retry Limit** *(optional)* | `max_retry_limit` | Maximum number of retries for an agent to execute a task when an error occurs. Default is `2`. |
|
||||
| **Use System Prompt** *(optional)* | `use_system_prompt` | Adds the ability to not use system prompt (to support o1 models). Default is `True`. |
|
||||
| **Respect Context Window** *(optional)* | `respect_context_window` | Summary strategy to avoid overflowing the context window. Default is `True`. |
|
||||
| **Code Execution Mode** *(optional)* | `code_execution_mode` | Determines the mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution on the host machine). Default is `safe`. |
|
||||
| Attribute | Parameter | Type | Description |
|
||||
| :-------------------------------------- | :----------------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Role** | `role` | `str` | Defines the agent's function and expertise within the crew. |
|
||||
| **Goal** | `goal` | `str` | The individual objective that guides the agent's decision-making. |
|
||||
| **Backstory** | `backstory` | `str` | Provides context and personality to the agent, enriching interactions. |
|
||||
| **LLM** _(optional)_ | `llm` | `Union[str, LLM, Any]` | Language model that powers the agent. Defaults to the model specified in `OPENAI_MODEL_NAME` or "gpt-4". |
|
||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | Capabilities or functions available to the agent. Defaults to an empty list. |
|
||||
| **Function Calling LLM** _(optional)_ | `function_calling_llm` | `Optional[Any]` | Language model for tool calling, overrides crew's LLM if specified. |
|
||||
| **Max Iterations** _(optional)_ | `max_iter` | `int` | Maximum iterations before the agent must provide its best answer. Default is 20. |
|
||||
| **Max RPM** _(optional)_ | `max_rpm` | `Optional[int]` | Maximum requests per minute to avoid rate limits. |
|
||||
| **Max Execution Time** _(optional)_ | `max_execution_time` | `Optional[int]` | Maximum time (in seconds) for task execution. |
|
||||
| **Memory** _(optional)_ | `memory` | `bool` | Whether the agent should maintain memory of interactions. Default is True. |
|
||||
| **Verbose** _(optional)_ | `verbose` | `bool` | Enable detailed execution logs for debugging. Default is False. |
|
||||
| **Allow Delegation** _(optional)_ | `allow_delegation` | `bool` | Allow the agent to delegate tasks to other agents. Default is False. |
|
||||
| **Step Callback** _(optional)_ | `step_callback` | `Optional[Any]` | Function called after each agent step, overrides crew callback. |
|
||||
| **Cache** _(optional)_ | `cache` | `bool` | Enable caching for tool usage. Default is True. |
|
||||
| **System Template** _(optional)_ | `system_template` | `Optional[str]` | Custom system prompt template for the agent. |
|
||||
| **Prompt Template** _(optional)_ | `prompt_template` | `Optional[str]` | Custom prompt template for the agent. |
|
||||
| **Response Template** _(optional)_ | `response_template` | `Optional[str]` | Custom response template for the agent. |
|
||||
| **Allow Code Execution** _(optional)_ | `allow_code_execution` | `Optional[bool]` | Enable code execution for the agent. Default is False. |
|
||||
| **Max Retry Limit** _(optional)_ | `max_retry_limit` | `int` | Maximum number of retries when an error occurs. Default is 2. |
|
||||
| **Respect Context Window** _(optional)_ | `respect_context_window` | `bool` | Keep messages under context window size by summarizing. Default is True. |
|
||||
| **Code Execution Mode** _(optional)_ | `code_execution_mode` | `Literal["safe", "unsafe"]` | Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct). Default is 'safe'. |
|
||||
| **Embedder Config** _(optional)_ | `embedder_config` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
||||
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | `Optional[List[BaseKnowledgeSource]]` | Knowledge sources available to the agent. |
|
||||
| **Use System Prompt** _(optional)_ | `use_system_prompt` | `Optional[bool]` | Whether to use system prompt (for o1 model support). Default is True. |
|
||||
|
||||
## Creating an agent
|
||||
## Creating Agents
|
||||
|
||||
There are two ways to create agents in CrewAI: using **YAML configuration (recommended)** or defining them **directly in code**.
|
||||
|
||||
### YAML Configuration (Recommended)
|
||||
|
||||
Using YAML configuration provides a cleaner, more maintainable way to define agents. We strongly recommend using this approach in your CrewAI projects.
|
||||
|
||||
After creating your CrewAI project as outlined in the [Installation](/installation) section, navigate to the `src/latest_ai_development/config/agents.yaml` file and modify the template to match your requirements.
|
||||
|
||||
<Note>
|
||||
**Agent interaction**: Agents can interact with each other using CrewAI's built-in delegation and communication mechanisms. This allows for dynamic task management and problem-solving within the crew.
|
||||
Variables in your YAML files (like `{topic}`) will be replaced with values from your inputs when running the crew:
|
||||
```python Code
|
||||
crew.kickoff(inputs={'topic': 'AI Agents'})
|
||||
```
|
||||
</Note>
|
||||
|
||||
To create an agent, you would typically initialize an instance of the `Agent` class with the desired properties. Here's a conceptual example including all attributes:
|
||||
Here's an example of how to configure agents using YAML:
|
||||
|
||||
```python Code example
|
||||
```yaml agents.yaml
|
||||
# src/latest_ai_development/config/agents.yaml
|
||||
researcher:
|
||||
role: >
|
||||
{topic} Senior Data Researcher
|
||||
goal: >
|
||||
Uncover cutting-edge developments in {topic}
|
||||
backstory: >
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
goal: >
|
||||
Create detailed reports based on {topic} data analysis and research findings
|
||||
backstory: >
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
```
|
||||
|
||||
To use this YAML configuration in your code, create a crew class that inherits from `CrewBase`:
|
||||
|
||||
```python Code
|
||||
# src/latest_ai_development/crew.py
|
||||
from crewai import Agent, Crew, Process
|
||||
from crewai.project import CrewBase, agent, crew
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class LatestAiDevelopmentCrew():
|
||||
"""LatestAiDevelopment crew"""
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True,
|
||||
tools=[SerperDevTool()]
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
<Note>
|
||||
The names you use in your YAML files (`agents.yaml`) should match the method names in your Python code.
|
||||
</Note>
|
||||
|
||||
### Direct Code Definition
|
||||
|
||||
You can create agents directly in code by instantiating the `Agent` class. Here's a comprehensive example showing all available parameters:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
# Create an agent with all available parameters
|
||||
agent = Agent(
|
||||
role='Data Analyst',
|
||||
goal='Extract actionable insights',
|
||||
backstory="""You're a data analyst at a large company.
|
||||
You're responsible for analyzing data and providing insights
|
||||
to the business.
|
||||
You're currently working on a project to analyze the
|
||||
performance of our marketing campaigns.""",
|
||||
tools=[my_tool1, my_tool2], # Optional, defaults to an empty list
|
||||
llm=my_llm, # Optional
|
||||
function_calling_llm=my_llm, # Optional
|
||||
max_iter=15, # Optional
|
||||
max_rpm=None, # Optional
|
||||
max_execution_time=None, # Optional
|
||||
verbose=True, # Optional
|
||||
allow_delegation=False, # Optional
|
||||
step_callback=my_intermediate_step_callback, # Optional
|
||||
cache=True, # Optional
|
||||
system_template=my_system_template, # Optional
|
||||
prompt_template=my_prompt_template, # Optional
|
||||
response_template=my_response_template, # Optional
|
||||
config=my_config, # Optional
|
||||
crew=my_crew, # Optional
|
||||
tools_handler=my_tools_handler, # Optional
|
||||
cache_handler=my_cache_handler, # Optional
|
||||
callbacks=[callback1, callback2], # Optional
|
||||
allow_code_execution=True, # Optional
|
||||
max_retry_limit=2, # Optional
|
||||
use_system_prompt=True, # Optional
|
||||
respect_context_window=True, # Optional
|
||||
code_execution_mode='safe', # Optional, defaults to 'safe'
|
||||
role="Senior Data Scientist",
|
||||
goal="Analyze and interpret complex datasets to provide actionable insights",
|
||||
backstory="With over 10 years of experience in data science and machine learning, "
|
||||
"you excel at finding patterns in complex datasets.",
|
||||
llm="gpt-4", # Default: OPENAI_MODEL_NAME or "gpt-4"
|
||||
function_calling_llm=None, # Optional: Separate LLM for tool calling
|
||||
memory=True, # Default: True
|
||||
verbose=False, # Default: False
|
||||
allow_delegation=False, # Default: False
|
||||
max_iter=20, # Default: 20 iterations
|
||||
max_rpm=None, # Optional: Rate limit for API calls
|
||||
max_execution_time=None, # Optional: Maximum execution time in seconds
|
||||
max_retry_limit=2, # Default: 2 retries on error
|
||||
allow_code_execution=False, # Default: False
|
||||
code_execution_mode="safe", # Default: "safe" (options: "safe", "unsafe")
|
||||
respect_context_window=True, # Default: True
|
||||
use_system_prompt=True, # Default: True
|
||||
tools=[SerperDevTool()], # Optional: List of tools
|
||||
knowledge_sources=None, # Optional: List of knowledge sources
|
||||
embedder_config=None, # Optional: Custom embedder configuration
|
||||
system_template=None, # Optional: Custom system prompt template
|
||||
prompt_template=None, # Optional: Custom prompt template
|
||||
response_template=None, # Optional: Custom response template
|
||||
step_callback=None, # Optional: Callback function for monitoring
|
||||
)
|
||||
```
|
||||
|
||||
## Setting prompt templates
|
||||
Let's break down some key parameter combinations for common use cases:
|
||||
|
||||
Prompt templates are used to format the prompt for the agent. You can use to update the system, regular and response templates for the agent. Here's an example of how to set prompt templates:
|
||||
#### Basic Research Agent
|
||||
```python Code
|
||||
research_agent = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Find and summarize information about specific topics",
|
||||
backstory="You are an experienced researcher with attention to detail",
|
||||
tools=[SerperDevTool()],
|
||||
verbose=True # Enable logging for debugging
|
||||
)
|
||||
```
|
||||
|
||||
```python Code example
|
||||
agent = Agent(
|
||||
role="{topic} specialist",
|
||||
goal="Figure {goal} out",
|
||||
backstory="I am the master of {role}",
|
||||
system_template="""<|start_header_id|>system<|end_header_id|>
|
||||
#### Code Development Agent
|
||||
```python Code
|
||||
dev_agent = Agent(
|
||||
role="Senior Python Developer",
|
||||
goal="Write and debug Python code",
|
||||
backstory="Expert Python developer with 10 years of experience",
|
||||
allow_code_execution=True,
|
||||
code_execution_mode="safe", # Uses Docker for safety
|
||||
max_execution_time=300, # 5-minute timeout
|
||||
max_retry_limit=3 # More retries for complex code tasks
|
||||
)
|
||||
```
|
||||
|
||||
#### Long-Running Analysis Agent
|
||||
```python Code
|
||||
analysis_agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Perform deep analysis of large datasets",
|
||||
backstory="Specialized in big data analysis and pattern recognition",
|
||||
memory=True,
|
||||
respect_context_window=True,
|
||||
max_rpm=10, # Limit API calls
|
||||
function_calling_llm="gpt-4o-mini" # Cheaper model for tool calls
|
||||
)
|
||||
```
|
||||
|
||||
#### Custom Template Agent
|
||||
```python Code
|
||||
custom_agent = Agent(
|
||||
role="Customer Service Representative",
|
||||
goal="Assist customers with their inquiries",
|
||||
backstory="Experienced in customer support with a focus on satisfaction",
|
||||
system_template="""<|start_header_id|>system<|end_header_id|>
|
||||
{{ .System }}<|eot_id|>""",
|
||||
prompt_template="""<|start_header_id|>user<|end_header_id|>
|
||||
prompt_template="""<|start_header_id|>user<|end_header_id|>
|
||||
{{ .Prompt }}<|eot_id|>""",
|
||||
response_template="""<|start_header_id|>assistant<|end_header_id|>
|
||||
response_template="""<|start_header_id|>assistant<|end_header_id|>
|
||||
{{ .Response }}<|eot_id|>""",
|
||||
)
|
||||
```
|
||||
|
||||
## Bring your third-party agents
|
||||
### Parameter Details
|
||||
|
||||
Extend your third-party agents like LlamaIndex, Langchain, Autogen or fully custom agents using the the CrewAI's `BaseAgent` class.
|
||||
#### Critical Parameters
|
||||
- `role`, `goal`, and `backstory` are required and shape the agent's behavior
|
||||
- `llm` determines the language model used (default: OpenAI's GPT-4)
|
||||
|
||||
<Note>
|
||||
**BaseAgent** includes attributes and methods required to integrate with your crews to run and delegate tasks to other agents within your own crew.
|
||||
#### Memory and Context
|
||||
- `memory`: Enable to maintain conversation history
|
||||
- `respect_context_window`: Prevents token limit issues
|
||||
- `knowledge_sources`: Add domain-specific knowledge bases
|
||||
|
||||
#### Execution Control
|
||||
- `max_iter`: Maximum attempts before giving best answer
|
||||
- `max_execution_time`: Timeout in seconds
|
||||
- `max_rpm`: Rate limiting for API calls
|
||||
- `max_retry_limit`: Retries on error
|
||||
|
||||
#### Code Execution
|
||||
- `allow_code_execution`: Must be True to run code
|
||||
- `code_execution_mode`:
|
||||
- `"safe"`: Uses Docker (recommended for production)
|
||||
- `"unsafe"`: Direct execution (use only in trusted environments)
|
||||
|
||||
#### Templates
|
||||
- `system_template`: Defines agent's core behavior
|
||||
- `prompt_template`: Structures input format
|
||||
- `response_template`: Formats agent responses
|
||||
|
||||
<Note>
|
||||
When using custom templates, you can use variables like `{role}`, `{goal}`, and `{input}` in your templates. These will be automatically populated during execution.
|
||||
</Note>
|
||||
|
||||
CrewAI is a universal multi-agent framework that allows for all agents to work together to automate tasks and solve problems.
|
||||
## Agent Tools
|
||||
|
||||
```python Code example
|
||||
from crewai import Agent, Task, Crew
|
||||
from custom_agent import CustomAgent # You need to build and extend your own agent logic with the CrewAI BaseAgent class then import it here.
|
||||
Agents can be equipped with various tools to enhance their capabilities. CrewAI supports tools from:
|
||||
- [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools)
|
||||
- [LangChain Tools](https://python.langchain.com/docs/integrations/tools)
|
||||
|
||||
from langchain.agents import load_tools
|
||||
Here's how to add tools to an agent:
|
||||
|
||||
langchain_tools = load_tools(["google-serper"], llm=llm)
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai_tools import SerperDevTool, WikipediaTools
|
||||
|
||||
agent1 = CustomAgent(
|
||||
role="agent role",
|
||||
goal="who is {input}?",
|
||||
backstory="agent backstory",
|
||||
verbose=True,
|
||||
# Create tools
|
||||
search_tool = SerperDevTool()
|
||||
wiki_tool = WikipediaTools()
|
||||
|
||||
# Add tools to agent
|
||||
researcher = Agent(
|
||||
role="AI Technology Researcher",
|
||||
goal="Research the latest AI developments",
|
||||
tools=[search_tool, wiki_tool],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
expected_output="a short biography of {input}",
|
||||
description="a short biography of {input}",
|
||||
agent=agent1,
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
role="agent role",
|
||||
goal="summarize the short bio for {input} and if needed do more research",
|
||||
backstory="agent backstory",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task2 = Task(
|
||||
description="a tldr summary of the short biography",
|
||||
expected_output="5 bullet point summary of the biography",
|
||||
agent=agent2,
|
||||
context=[task1],
|
||||
)
|
||||
|
||||
my_crew = Crew(agents=[agent1, agent2], tasks=[task1, task2])
|
||||
crew = my_crew.kickoff(inputs={"input": "Mark Twain"})
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
## Agent Memory and Context
|
||||
|
||||
Agents are the building blocks of the CrewAI framework. By understanding how to define and interact with agents,
|
||||
you can create sophisticated AI systems that leverage the power of collaborative intelligence. The `code_execution_mode` attribute provides flexibility in how agents execute code, allowing for both secure and direct execution options.
|
||||
Agents can maintain memory of their interactions and use context from previous tasks. This is particularly useful for complex workflows where information needs to be retained across multiple tasks.
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
|
||||
analyst = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze and remember complex data patterns",
|
||||
memory=True, # Enable memory
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
<Note>
|
||||
When `memory` is enabled, the agent will maintain context across multiple interactions, improving its ability to handle complex, multi-step tasks.
|
||||
</Note>
|
||||
|
||||
## Important Considerations and Best Practices
|
||||
|
||||
### Security and Code Execution
|
||||
- When using `allow_code_execution`, be cautious with user input and always validate it
|
||||
- Use `code_execution_mode: "safe"` (Docker) in production environments
|
||||
- Consider setting appropriate `max_execution_time` limits to prevent infinite loops
|
||||
|
||||
### Performance Optimization
|
||||
- Use `respect_context_window: true` to prevent token limit issues
|
||||
- Set appropriate `max_rpm` to avoid rate limiting
|
||||
- Enable `cache: true` to improve performance for repetitive tasks
|
||||
- Adjust `max_iter` and `max_retry_limit` based on task complexity
|
||||
|
||||
### Memory and Context Management
|
||||
- Use `memory: true` for tasks requiring historical context
|
||||
- Leverage `knowledge_sources` for domain-specific information
|
||||
- Configure `embedder_config` when using custom embedding models
|
||||
- Use custom templates (`system_template`, `prompt_template`, `response_template`) for fine-grained control over agent behavior
|
||||
|
||||
### Agent Collaboration
|
||||
- Enable `allow_delegation: true` when agents need to work together
|
||||
- Use `step_callback` to monitor and log agent interactions
|
||||
- Consider using different LLMs for different purposes:
|
||||
- Main `llm` for complex reasoning
|
||||
- `function_calling_llm` for efficient tool usage
|
||||
|
||||
### Model Compatibility
|
||||
- Set `use_system_prompt: false` for older models that don't support system messages
|
||||
- Ensure your chosen `llm` supports the features you need (like function calling)
|
||||
|
||||
## Troubleshooting Common Issues
|
||||
|
||||
1. **Rate Limiting**: If you're hitting API rate limits:
|
||||
- Implement appropriate `max_rpm`
|
||||
- Use caching for repetitive operations
|
||||
- Consider batching requests
|
||||
|
||||
2. **Context Window Errors**: If you're exceeding context limits:
|
||||
- Enable `respect_context_window`
|
||||
- Use more efficient prompts
|
||||
- Clear agent memory periodically
|
||||
|
||||
3. **Code Execution Issues**: If code execution fails:
|
||||
- Verify Docker is installed for safe mode
|
||||
- Check execution permissions
|
||||
- Review code sandbox settings
|
||||
|
||||
4. **Memory Issues**: If agent responses seem inconsistent:
|
||||
- Verify memory is enabled
|
||||
- Check knowledge source configuration
|
||||
- Review conversation history management
|
||||
|
||||
Remember that agents are most effective when configured according to their specific use case. Take time to understand your requirements and adjust these parameters accordingly.
|
||||
|
||||
@@ -22,7 +22,8 @@ A crew in crewAI represents a collaborative group of agents working together to
|
||||
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
|
||||
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
||||
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). Defaults to `False`. |
|
||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
||||
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||
|
||||
@@ -560,40 +560,6 @@ uv run kickoff
|
||||
|
||||
The flow will execute, and you should see the output in the console.
|
||||
|
||||
|
||||
### Adding Additional Crews Using the CLI
|
||||
|
||||
Once you have created your initial flow, you can easily add additional crews to your project using the CLI. This allows you to expand your flow's capabilities by integrating new crews without starting from scratch.
|
||||
|
||||
To add a new crew to your existing flow, use the following command:
|
||||
|
||||
```bash
|
||||
crewai flow add-crew <crew_name>
|
||||
```
|
||||
|
||||
This command will create a new directory for your crew within the `crews` folder of your flow project. It will include the necessary configuration files and a crew definition file, similar to the initial setup.
|
||||
|
||||
#### Folder Structure
|
||||
|
||||
After adding a new crew, your folder structure will look like this:
|
||||
|
||||
name_of_flow/
|
||||
├── crews/
|
||||
│ ├── poem_crew/
|
||||
│ │ ├── config/
|
||||
│ │ │ ├── agents.yaml
|
||||
│ │ │ └── tasks.yaml
|
||||
│ │ └── poem_crew.py
|
||||
│ └── name_of_crew/
|
||||
│ ├── config/
|
||||
│ │ ├── agents.yaml
|
||||
│ │ └── tasks.yaml
|
||||
│ └── name_of_crew.py
|
||||
|
||||
You can then customize the `agents.yaml` and `tasks.yaml` files to define the agents and tasks for your new crew. The `name_of_crew.py` file will contain the crew's logic, which you can modify to suit your needs.
|
||||
|
||||
By using the CLI to add additional crews, you can efficiently build complex AI workflows that leverage multiple crews working together.
|
||||
|
||||
## Plot Flows
|
||||
|
||||
Visualizing your AI workflows can provide valuable insights into the structure and execution paths of your flows. CrewAI offers a powerful visualization tool that allows you to generate interactive plots of your flows, making it easier to understand and optimize your AI workflows.
|
||||
@@ -633,114 +599,13 @@ The generated plot will display nodes representing the tasks in your flow, with
|
||||
|
||||
By visualizing your flows, you can gain a clearer understanding of the workflow's structure, making it easier to debug, optimize, and communicate your AI processes to others.
|
||||
|
||||
### Conclusion
|
||||
|
||||
## Advanced
|
||||
|
||||
In this section, we explore more complex use cases of CrewAI Flows, starting with a self-evaluation loop. This pattern is crucial for developing AI systems that can iteratively improve their outputs through feedback.
|
||||
|
||||
### 1) Self-Evaluation Loop
|
||||
|
||||
The self-evaluation loop is a powerful pattern that allows AI workflows to automatically assess and refine their outputs. This example demonstrates how to set up a flow that generates content, evaluates it, and iterates based on feedback until the desired quality is achieved.
|
||||
|
||||
#### Overview
|
||||
|
||||
The self-evaluation loop involves two main Crews:
|
||||
|
||||
1. **ShakespeareanXPostCrew**: Generates a Shakespearean-style post on a given topic.
|
||||
2. **XPostReviewCrew**: Evaluates the generated post, providing feedback on its validity and quality.
|
||||
|
||||
The process iterates until the post meets the criteria or a maximum retry limit is reached. This approach ensures high-quality outputs through iterative refinement.
|
||||
|
||||
#### Importance
|
||||
|
||||
This pattern is essential for building robust AI systems that can adapt and improve over time. By automating the evaluation and feedback loop, developers can ensure that their AI workflows produce reliable and high-quality results.
|
||||
|
||||
#### Main Code Highlights
|
||||
|
||||
Below is the `main.py` file for the self-evaluation loop flow:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
from crewai.flow.flow import Flow, listen, router, start
|
||||
from pydantic import BaseModel
|
||||
from self_evaluation_loop_flow.crews.shakespeare_crew.shakespeare_crew import (
|
||||
ShakespeareanXPostCrew,
|
||||
)
|
||||
from self_evaluation_loop_flow.crews.x_post_review_crew.x_post_review_crew import (
|
||||
XPostReviewCrew,
|
||||
)
|
||||
|
||||
class ShakespeareXPostFlowState(BaseModel):
|
||||
x_post: str = ""
|
||||
feedback: Optional[str] = None
|
||||
valid: bool = False
|
||||
retry_count: int = 0
|
||||
|
||||
class ShakespeareXPostFlow(Flow[ShakespeareXPostFlowState]):
|
||||
|
||||
@start("retry")
|
||||
def generate_shakespeare_x_post(self):
|
||||
print("Generating Shakespearean X post")
|
||||
topic = "Flying cars"
|
||||
result = (
|
||||
ShakespeareanXPostCrew()
|
||||
.crew()
|
||||
.kickoff(inputs={"topic": topic, "feedback": self.state.feedback})
|
||||
)
|
||||
print("X post generated", result.raw)
|
||||
self.state.x_post = result.raw
|
||||
|
||||
@router(generate_shakespeare_x_post)
|
||||
def evaluate_x_post(self):
|
||||
if self.state.retry_count > 3:
|
||||
return "max_retry_exceeded"
|
||||
result = XPostReviewCrew().crew().kickoff(inputs={"x_post": self.state.x_post})
|
||||
self.state.valid = result["valid"]
|
||||
self.state.feedback = result["feedback"]
|
||||
print("valid", self.state.valid)
|
||||
print("feedback", self.state.feedback)
|
||||
self.state.retry_count += 1
|
||||
if self.state.valid:
|
||||
return "complete"
|
||||
return "retry"
|
||||
|
||||
@listen("complete")
|
||||
def save_result(self):
|
||||
print("X post is valid")
|
||||
print("X post:", self.state.x_post)
|
||||
with open("x_post.txt", "w") as file:
|
||||
file.write(self.state.x_post)
|
||||
|
||||
@listen("max_retry_exceeded")
|
||||
def max_retry_exceeded_exit(self):
|
||||
print("Max retry count exceeded")
|
||||
print("X post:", self.state.x_post)
|
||||
print("Feedback:", self.state.feedback)
|
||||
|
||||
def kickoff():
|
||||
shakespeare_flow = ShakespeareXPostFlow()
|
||||
shakespeare_flow.kickoff()
|
||||
|
||||
def plot():
|
||||
shakespeare_flow = ShakespeareXPostFlow()
|
||||
shakespeare_flow.plot()
|
||||
|
||||
if __name__ == "__main__":
|
||||
kickoff()
|
||||
```
|
||||
|
||||
#### Code Highlights
|
||||
|
||||
- **Retry Mechanism**: The flow uses a retry mechanism to regenerate the post if it doesn't meet the criteria, up to a maximum of three retries.
|
||||
- **Feedback Loop**: Feedback from the `XPostReviewCrew` is used to refine the post iteratively.
|
||||
- **State Management**: The flow maintains state using a Pydantic model, ensuring type safety and clarity.
|
||||
|
||||
For a complete example and further details, please refer to the [Self Evaluation Loop Flow repository](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow).
|
||||
|
||||
Plotting your flows is a powerful feature of CrewAI that enhances your ability to design and manage complex AI workflows. Whether you choose to use the `plot()` method or the command line, generating plots will provide you with a visual representation of your workflows, aiding in both development and presentation.
|
||||
|
||||
## Next Steps
|
||||
|
||||
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are five specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
|
||||
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are four specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
|
||||
|
||||
1. **Email Auto Responder Flow**: This example demonstrates an infinite loop where a background job continually runs to automate email responses. It's a great use case for tasks that need to be performed repeatedly without manual intervention. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/email_auto_responder_flow)
|
||||
|
||||
@@ -750,8 +615,6 @@ If you're interested in exploring additional examples of flows, we have a variet
|
||||
|
||||
4. **Meeting Assistant Flow**: This flow demonstrates how to broadcast one event to trigger multiple follow-up actions. For instance, after a meeting is completed, the flow can update a Trello board, send a Slack message, and save the results. It's a great example of handling multiple outcomes from a single event, making it ideal for comprehensive task management and notification systems. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/meeting_assistant_flow)
|
||||
|
||||
5. **Self Evaluation Loop Flow**: This flow demonstrates a self-evaluation loop where AI workflows automatically assess and refine their outputs through feedback. It involves generating content, evaluating it, and iterating until the desired quality is achieved. This pattern is crucial for developing robust AI systems that can adapt and improve over time. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow)
|
||||
|
||||
By exploring these examples, you can gain insights into how to leverage CrewAI Flows for various use cases, from automating repetitive tasks to managing complex, multi-step processes with dynamic decision-making and human feedback.
|
||||
|
||||
Also, check out our YouTube video on how to use flows in CrewAI below!
|
||||
@@ -765,4 +628,4 @@ Also, check out our YouTube video on how to use flows in CrewAI below!
|
||||
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
||||
referrerpolicy="strict-origin-when-cross-origin"
|
||||
allowfullscreen
|
||||
></iframe>
|
||||
></iframe>
|
||||
231
docs/concepts/knowledge.mdx
Normal file
231
docs/concepts/knowledge.mdx
Normal file
@@ -0,0 +1,231 @@
|
||||
---
|
||||
title: Knowledge
|
||||
description: Understand what knowledge is in CrewAI and how to effectively use it.
|
||||
icon: book
|
||||
---
|
||||
|
||||
# Using Knowledge in CrewAI
|
||||
|
||||
## What is Knowledge?
|
||||
|
||||
Knowledge in CrewAI is a powerful system that allows AI agents to access and utilize external information sources during their tasks. Think of it as giving your agents a reference library they can consult while working.
|
||||
|
||||
<Info>
|
||||
Key benefits of using Knowledge:
|
||||
- Enhance agents with domain-specific information
|
||||
- Support decisions with real-world data
|
||||
- Maintain context across conversations
|
||||
- Ground responses in factual information
|
||||
</Info>
|
||||
|
||||
## Supported Knowledge Sources
|
||||
|
||||
CrewAI supports various types of knowledge sources out of the box:
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Text Sources" icon="text">
|
||||
- Raw strings
|
||||
- Text files (.txt)
|
||||
- PDF documents
|
||||
</Card>
|
||||
<Card title="Structured Data" icon="table">
|
||||
- CSV files
|
||||
- Excel spreadsheets
|
||||
- JSON documents
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Quick Start
|
||||
|
||||
Here's a simple example using string-based knowledge:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.knowledge import StringKnowledgeSource
|
||||
|
||||
# 1. Create a knowledge source
|
||||
product_info = StringKnowledgeSource(
|
||||
content="""Our product X1000 has the following features:
|
||||
- 10-hour battery life
|
||||
- Water-resistant
|
||||
- Available in black and silver
|
||||
Price: $299.99""",
|
||||
metadata={"category": "product"}
|
||||
)
|
||||
|
||||
# 2. Create an agent with knowledge
|
||||
sales_agent = Agent(
|
||||
role="Sales Representative",
|
||||
goal="Accurately answer customer questions about products",
|
||||
backstory="Expert in product features and customer service",
|
||||
knowledge_sources=[product_info] # Attach knowledge to agent
|
||||
)
|
||||
|
||||
# 3. Create a task
|
||||
answer_task = Task(
|
||||
description="Answer: What colors is the X1000 available in and how much does it cost?",
|
||||
agent=sales_agent
|
||||
)
|
||||
|
||||
# 4. Create and run the crew
|
||||
crew = Crew(
|
||||
agents=[sales_agent],
|
||||
tasks=[answer_task]
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Knowledge Configuration
|
||||
|
||||
### Collection Names
|
||||
|
||||
Knowledge sources are organized into collections for better management:
|
||||
|
||||
```python
|
||||
# Create knowledge sources with specific collections
|
||||
tech_specs = StringKnowledgeSource(
|
||||
content="Technical specifications...",
|
||||
collection_name="product_tech_specs"
|
||||
)
|
||||
|
||||
pricing_info = StringKnowledgeSource(
|
||||
content="Pricing information...",
|
||||
collection_name="product_pricing"
|
||||
)
|
||||
```
|
||||
|
||||
### Metadata and Filtering
|
||||
|
||||
Add metadata to organize and filter knowledge:
|
||||
|
||||
```python
|
||||
knowledge_source = StringKnowledgeSource(
|
||||
content="Product details...",
|
||||
metadata={
|
||||
"category": "electronics",
|
||||
"product_line": "premium",
|
||||
"last_updated": "2024-03"
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Chunking Configuration
|
||||
|
||||
Control how your content is split for processing:
|
||||
|
||||
```python
|
||||
knowledge_source = PDFKnowledgeSource(
|
||||
file_path="product_manual.pdf",
|
||||
chunk_size=2000, # Characters per chunk
|
||||
chunk_overlap=200 # Overlap between chunks
|
||||
)
|
||||
```
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom Knowledge Sources
|
||||
|
||||
Create your own knowledge source by extending the base class:
|
||||
|
||||
```python
|
||||
from crewai.knowledge.source import BaseKnowledgeSource
|
||||
|
||||
class APIKnowledgeSource(BaseKnowledgeSource):
|
||||
def __init__(self, api_endpoint: str, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.api_endpoint = api_endpoint
|
||||
|
||||
def load_content(self):
|
||||
# Implement API data fetching
|
||||
response = requests.get(self.api_endpoint)
|
||||
return response.json()
|
||||
|
||||
def add(self):
|
||||
content = self.load_content()
|
||||
# Process and store content
|
||||
self.save_documents({"source": "api"})
|
||||
```
|
||||
|
||||
### Embedder Configuration
|
||||
|
||||
Customize the embedding process:
|
||||
|
||||
```python
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
knowledge_sources=[source],
|
||||
embedder={
|
||||
"provider": "ollama",
|
||||
"config": {"model": "nomic-embed-text:latest"},
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Referencing Sources
|
||||
|
||||
You can reference knowledge sources by their collection name or metadata.
|
||||
|
||||
* Add a directory to your crew project called `knowledge`:
|
||||
* File paths in knowledge can be referenced relative to the `knowledge` directory.
|
||||
|
||||
Example:
|
||||
A file inside the `knowledge` directory called `example.txt` can be referenced as `example.txt`.
|
||||
|
||||
```python
|
||||
source = TextFileKnowledgeSource(
|
||||
file_path="example.txt", # or /example.txt
|
||||
collection_name="example"
|
||||
)
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
knowledge_sources=[source],
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Content Organization">
|
||||
- Use meaningful collection names
|
||||
- Add detailed metadata for filtering
|
||||
- Keep chunk sizes appropriate for your content
|
||||
- Consider content overlap for context preservation
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Performance Tips">
|
||||
- Use smaller chunk sizes for precise retrieval
|
||||
- Implement metadata filtering for faster searches
|
||||
- Choose appropriate embedding models for your use case
|
||||
- Cache frequently accessed knowledge
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Error Handling">
|
||||
- Validate knowledge source content
|
||||
- Handle missing or corrupted files
|
||||
- Monitor embedding generation
|
||||
- Implement fallback options
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Content Not Found">
|
||||
If agents can't find relevant information:
|
||||
- Check chunk sizes
|
||||
- Verify knowledge source loading
|
||||
- Review metadata filters
|
||||
- Test with simpler queries first
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Performance Issues">
|
||||
If knowledge retrieval is slow:
|
||||
- Reduce chunk sizes
|
||||
- Optimize metadata filtering
|
||||
- Consider using a lighter embedding model
|
||||
- Cache frequently accessed content
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
@@ -7,32 +7,45 @@ icon: link
|
||||
## Using LangChain Tools
|
||||
|
||||
<Info>
|
||||
CrewAI seamlessly integrates with LangChain’s comprehensive [list of tools](https://python.langchain.com/docs/integrations/tools/), all of which can be used with CrewAI.
|
||||
CrewAI seamlessly integrates with LangChain's comprehensive [list of tools](https://python.langchain.com/docs/integrations/tools/), all of which can be used with CrewAI.
|
||||
</Info>
|
||||
|
||||
```python Code
|
||||
import os
|
||||
from crewai import Agent
|
||||
from langchain.agents import Tool
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
from dotenv import load_dotenv
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import Field
|
||||
from langchain_community.utilities import GoogleSerperAPIWrapper
|
||||
|
||||
# Setup API keys
|
||||
os.environ["SERPER_API_KEY"] = "Your Key"
|
||||
# Set up your SERPER_API_KEY key in an .env file, eg:
|
||||
# SERPER_API_KEY=<your api key>
|
||||
load_dotenv()
|
||||
|
||||
search = GoogleSerperAPIWrapper()
|
||||
|
||||
# Create and assign the search tool to an agent
|
||||
serper_tool = Tool(
|
||||
name="Intermediate Answer",
|
||||
func=search.run,
|
||||
description="Useful for search-based queries",
|
||||
)
|
||||
class SearchTool(BaseTool):
|
||||
name: str = "Search"
|
||||
description: str = "Useful for search-based queries. Use this to find current information about markets, companies, and trends."
|
||||
search: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper)
|
||||
|
||||
agent = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[serper_tool]
|
||||
def _run(self, query: str) -> str:
|
||||
"""Execute the search query and return results"""
|
||||
try:
|
||||
return self.search.run(query)
|
||||
except Exception as e:
|
||||
return f"Error performing search: {str(e)}"
|
||||
|
||||
# Create Agents
|
||||
researcher = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Gather current market data and trends',
|
||||
backstory="""You are an expert research analyst with years of experience in
|
||||
gathering market intelligence. You're known for your ability to find
|
||||
relevant and up-to-date market information and present it in a clear,
|
||||
actionable format.""",
|
||||
tools=[SearchTool()],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# rest of the code ...
|
||||
@@ -40,6 +53,6 @@ agent = Agent(
|
||||
|
||||
## Conclusion
|
||||
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
|
||||
When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling, caching mechanisms,
|
||||
and the flexibility of tool arguments to optimize your agents' performance and capabilities.
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
|
||||
When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling, caching mechanisms,
|
||||
and the flexibility of tool arguments to optimize your agents' performance and capabilities.
|
||||
|
||||
@@ -1,110 +1,323 @@
|
||||
---
|
||||
title: LLMs
|
||||
description: Learn how to configure and optimize LLMs for your CrewAI projects.
|
||||
icon: microchip-ai
|
||||
title: 'LLMs'
|
||||
description: 'A comprehensive guide to configuring and using Large Language Models (LLMs) in your CrewAI projects'
|
||||
icon: 'microchip-ai'
|
||||
---
|
||||
|
||||
# Large Language Models (LLMs) in CrewAI
|
||||
<Note>
|
||||
CrewAI integrates with multiple LLM providers through LiteLLM, giving you the flexibility to choose the right model for your specific use case. This guide will help you understand how to configure and use different LLM providers in your CrewAI projects.
|
||||
</Note>
|
||||
|
||||
Large Language Models (LLMs) are the backbone of intelligent agents in the CrewAI framework. This guide will help you understand, configure, and optimize LLM usage for your CrewAI projects.
|
||||
## What are LLMs?
|
||||
|
||||
## Key Concepts
|
||||
Large Language Models (LLMs) are the core intelligence behind CrewAI agents. They enable agents to understand context, make decisions, and generate human-like responses. Here's what you need to know:
|
||||
|
||||
- **LLM**: Large Language Model, the AI powering agent intelligence
|
||||
- **Agent**: A CrewAI entity that uses an LLM to perform tasks
|
||||
- **Provider**: A service that offers LLM capabilities (e.g., OpenAI, Anthropic, Ollama, [more providers](https://docs.litellm.ai/docs/providers))
|
||||
<CardGroup cols={2}>
|
||||
<Card title="LLM Basics" icon="brain">
|
||||
Large Language Models are AI systems trained on vast amounts of text data. They power the intelligence of your CrewAI agents, enabling them to understand and generate human-like text.
|
||||
</Card>
|
||||
<Card title="Context Window" icon="window">
|
||||
The context window determines how much text an LLM can process at once. Larger windows (e.g., 128K tokens) allow for more context but may be more expensive and slower.
|
||||
</Card>
|
||||
<Card title="Temperature" icon="temperature-three-quarters">
|
||||
Temperature (0.0 to 1.0) controls response randomness. Lower values (e.g., 0.2) produce more focused, deterministic outputs, while higher values (e.g., 0.8) increase creativity and variability.
|
||||
</Card>
|
||||
<Card title="Provider Selection" icon="server">
|
||||
Each LLM provider (e.g., OpenAI, Anthropic, Google) offers different models with varying capabilities, pricing, and features. Choose based on your needs for accuracy, speed, and cost.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Configuring LLMs for Agents
|
||||
## Available Models and Their Capabilities
|
||||
|
||||
CrewAI offers flexible options for setting up LLMs:
|
||||
|
||||
### 1. Default Configuration
|
||||
|
||||
By default, CrewAI uses the `gpt-4o-mini` model. It uses environment variables if no LLM is specified:
|
||||
- `OPENAI_MODEL_NAME` (defaults to "gpt-4o-mini" if not set)
|
||||
- `OPENAI_API_BASE`
|
||||
- `OPENAI_API_KEY`
|
||||
|
||||
### 2. Custom LLM Objects
|
||||
|
||||
Pass a custom LLM implementation or object from another library.
|
||||
|
||||
See below for examples.
|
||||
Here's a detailed breakdown of supported models and their capabilities:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="String Identifier">
|
||||
```python Code
|
||||
agent = Agent(llm="gpt-4o", ...)
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="OpenAI">
|
||||
| Model | Context Window | Best For |
|
||||
|-------|---------------|-----------|
|
||||
| GPT-4 | 8,192 tokens | High-accuracy tasks, complex reasoning |
|
||||
| GPT-4 Turbo | 128,000 tokens | Long-form content, document analysis |
|
||||
| GPT-4o & GPT-4o-mini | 128,000 tokens | Cost-effective large context processing |
|
||||
|
||||
<Tab title="LLM Instance">
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
<Note>
|
||||
1 token ≈ 4 characters in English. For example, 8,192 tokens ≈ 32,768 characters or about 6,000 words.
|
||||
</Note>
|
||||
</Tab>
|
||||
<Tab title="Groq">
|
||||
| Model | Context Window | Best For |
|
||||
|-------|---------------|-----------|
|
||||
| Llama 3.1 70B/8B | 131,072 tokens | High-performance, large context tasks |
|
||||
| Llama 3.2 Series | 8,192 tokens | General-purpose tasks |
|
||||
| Mixtral 8x7B | 32,768 tokens | Balanced performance and context |
|
||||
| Gemma Series | 8,192 tokens | Efficient, smaller-scale tasks |
|
||||
|
||||
llm = LLM(model="gpt-4", temperature=0.7)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Tab>
|
||||
<Tip>
|
||||
Groq is known for its fast inference speeds, making it suitable for real-time applications.
|
||||
</Tip>
|
||||
</Tab>
|
||||
<Tab title="Others">
|
||||
| Provider | Context Window | Key Features |
|
||||
|----------|---------------|--------------|
|
||||
| Deepseek Chat | 128,000 tokens | Specialized in technical discussions |
|
||||
| Claude 3 | Up to 200K tokens | Strong reasoning, code understanding |
|
||||
| Gemini | Varies by model | Multimodal capabilities |
|
||||
|
||||
<Info>
|
||||
Provider selection should consider factors like:
|
||||
- API availability in your region
|
||||
- Pricing structure
|
||||
- Required features (e.g., streaming, function calling)
|
||||
- Performance requirements
|
||||
</Info>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Connecting to OpenAI-Compatible LLMs
|
||||
## Setting Up Your LLM
|
||||
|
||||
You can connect to OpenAI-compatible LLMs using either environment variables or by setting specific attributes on the LLM class:
|
||||
There are three ways to configure LLMs in CrewAI. Choose the method that best fits your workflow:
|
||||
|
||||
<Tabs>
|
||||
<Tab title="Using Environment Variables">
|
||||
```python Code
|
||||
import os
|
||||
<Tab title="1. Environment Variables">
|
||||
The simplest way to get started. Set these variables in your environment:
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||
os.environ["OPENAI_API_BASE"] = "https://api.your-provider.com/v1"
|
||||
```bash
|
||||
# Required: Your API key for authentication
|
||||
OPENAI_API_KEY=<your-api-key>
|
||||
|
||||
# Optional: Default model selection
|
||||
OPENAI_MODEL_NAME=gpt-4o-mini # Default if not set
|
||||
|
||||
# Optional: Organization ID (if applicable)
|
||||
OPENAI_ORGANIZATION_ID=<your-org-id>
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Using LLM Class Attributes">
|
||||
```python Code
|
||||
|
||||
<Warning>
|
||||
Never commit API keys to version control. Use environment files (.env) or your system's secret management.
|
||||
</Warning>
|
||||
</Tab>
|
||||
<Tab title="2. YAML Configuration">
|
||||
Create a YAML file to define your agent configurations. This method is great for version control and team collaboration:
|
||||
|
||||
```yaml
|
||||
researcher:
|
||||
# Agent Definition
|
||||
role: Research Specialist
|
||||
goal: Conduct comprehensive research and analysis
|
||||
backstory: A dedicated research professional with years of experience
|
||||
verbose: true
|
||||
|
||||
# Model Selection (uncomment your choice)
|
||||
|
||||
# OpenAI Models - Known for reliability and performance
|
||||
llm: openai/gpt-4o-mini
|
||||
# llm: openai/gpt-4 # More accurate but expensive
|
||||
# llm: openai/gpt-4-turbo # Fast with large context
|
||||
# llm: openai/gpt-4o # Optimized for longer texts
|
||||
# llm: openai/o1-preview # Latest features
|
||||
# llm: openai/o1-mini # Cost-effective
|
||||
|
||||
# Azure Models - For enterprise deployments
|
||||
# llm: azure/gpt-4o-mini
|
||||
# llm: azure/gpt-4
|
||||
# llm: azure/gpt-35-turbo
|
||||
|
||||
# Anthropic Models - Strong reasoning capabilities
|
||||
# llm: anthropic/claude-3-opus-20240229-v1:0
|
||||
# llm: anthropic/claude-3-sonnet-20240229-v1:0
|
||||
# llm: anthropic/claude-3-haiku-20240307-v1:0
|
||||
# llm: anthropic/claude-2.1
|
||||
# llm: anthropic/claude-2.0
|
||||
|
||||
# Google Models - Good for general tasks
|
||||
# llm: gemini/gemini-pro
|
||||
# llm: gemini/gemini-1.5-pro-latest
|
||||
# llm: gemini/gemini-1.0-pro-latest
|
||||
|
||||
# AWS Bedrock Models - Enterprise-grade
|
||||
# llm: bedrock/anthropic.claude-3-sonnet-20240229-v1:0
|
||||
# llm: bedrock/anthropic.claude-v2:1
|
||||
# llm: bedrock/amazon.titan-text-express-v1
|
||||
# llm: bedrock/meta.llama2-70b-chat-v1
|
||||
|
||||
# Mistral Models - Open source alternative
|
||||
# llm: mistral/mistral-large-latest
|
||||
# llm: mistral/mistral-medium-latest
|
||||
# llm: mistral/mistral-small-latest
|
||||
|
||||
# Groq Models - Fast inference
|
||||
# llm: groq/mixtral-8x7b-32768
|
||||
# llm: groq/llama-3.1-70b-versatile
|
||||
# llm: groq/llama-3.2-90b-text-preview
|
||||
# llm: groq/gemma2-9b-it
|
||||
# llm: groq/gemma-7b-it
|
||||
|
||||
# IBM watsonx.ai Models - Enterprise features
|
||||
# llm: watsonx/ibm/granite-13b-chat-v2
|
||||
# llm: watsonx/meta-llama/llama-3-1-70b-instruct
|
||||
# llm: watsonx/bigcode/starcoder2-15b
|
||||
|
||||
# Ollama Models - Local deployment
|
||||
# llm: ollama/llama3:70b
|
||||
# llm: ollama/codellama
|
||||
# llm: ollama/mistral
|
||||
# llm: ollama/mixtral
|
||||
# llm: ollama/phi
|
||||
|
||||
# Fireworks AI Models - Specialized tasks
|
||||
# llm: fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct
|
||||
# llm: fireworks_ai/accounts/fireworks/models/mixtral-8x7b
|
||||
# llm: fireworks_ai/accounts/fireworks/models/zephyr-7b-beta
|
||||
|
||||
# Perplexity AI Models - Research focused
|
||||
# llm: pplx/llama-3.1-sonar-large-128k-online
|
||||
# llm: pplx/mistral-7b-instruct
|
||||
# llm: pplx/codellama-34b-instruct
|
||||
# llm: pplx/mixtral-8x7b-instruct
|
||||
|
||||
# Hugging Face Models - Community models
|
||||
# llm: huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct
|
||||
# llm: huggingface/mistralai/Mixtral-8x7B-Instruct-v0.1
|
||||
# llm: huggingface/tiiuae/falcon-180B-chat
|
||||
# llm: huggingface/google/gemma-7b-it
|
||||
|
||||
# Nvidia NIM Models - GPU-optimized
|
||||
# llm: nvidia_nim/meta/llama3-70b-instruct
|
||||
# llm: nvidia_nim/mistral/mixtral-8x7b
|
||||
# llm: nvidia_nim/google/gemma-7b
|
||||
|
||||
# SambaNova Models - Enterprise AI
|
||||
# llm: sambanova/Meta-Llama-3.1-8B-Instruct
|
||||
# llm: sambanova/BioMistral-7B
|
||||
# llm: sambanova/Falcon-180B
|
||||
```
|
||||
|
||||
<Info>
|
||||
The YAML configuration allows you to:
|
||||
- Version control your agent settings
|
||||
- Easily switch between different models
|
||||
- Share configurations across team members
|
||||
- Document model choices and their purposes
|
||||
</Info>
|
||||
</Tab>
|
||||
<Tab title="3. Direct Code">
|
||||
For maximum flexibility, configure LLMs directly in your Python code:
|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
|
||||
# Basic configuration
|
||||
llm = LLM(model="gpt-4")
|
||||
|
||||
# Advanced configuration with detailed parameters
|
||||
llm = LLM(
|
||||
model="gpt-4o-mini",
|
||||
temperature=0.7, # Higher for more creative outputs
|
||||
timeout=120, # Seconds to wait for response
|
||||
max_tokens=4000, # Maximum length of response
|
||||
top_p=0.9, # Nucleus sampling parameter
|
||||
frequency_penalty=0.1, # Reduce repetition
|
||||
presence_penalty=0.1, # Encourage topic diversity
|
||||
response_format={"type": "json"}, # For structured outputs
|
||||
seed=42 # For reproducible results
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Parameter explanations:
|
||||
- `temperature`: Controls randomness (0.0-1.0)
|
||||
- `timeout`: Maximum wait time for response
|
||||
- `max_tokens`: Limits response length
|
||||
- `top_p`: Alternative to temperature for sampling
|
||||
- `frequency_penalty`: Reduces word repetition
|
||||
- `presence_penalty`: Encourages new topics
|
||||
- `response_format`: Specifies output structure
|
||||
- `seed`: Ensures consistent outputs
|
||||
</Info>
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Advanced Features and Optimization
|
||||
|
||||
Learn how to get the most out of your LLM configuration:
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Context Window Management">
|
||||
CrewAI includes smart context management features:
|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
|
||||
# CrewAI automatically handles:
|
||||
# 1. Token counting and tracking
|
||||
# 2. Content summarization when needed
|
||||
# 3. Task splitting for large contexts
|
||||
|
||||
llm = LLM(
|
||||
model="custom-model-name",
|
||||
api_key="your-api-key",
|
||||
base_url="https://api.your-provider.com/v1"
|
||||
model="gpt-4",
|
||||
max_tokens=4000, # Limit response length
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## LLM Configuration Options
|
||||
<Info>
|
||||
Best practices for context management:
|
||||
1. Choose models with appropriate context windows
|
||||
2. Pre-process long inputs when possible
|
||||
3. Use chunking for large documents
|
||||
4. Monitor token usage to optimize costs
|
||||
</Info>
|
||||
</Accordion>
|
||||
|
||||
When configuring an LLM for your agent, you have access to a wide range of parameters:
|
||||
<Accordion title="Performance Optimization">
|
||||
<Steps>
|
||||
<Step title="Token Usage Optimization">
|
||||
Choose the right context window for your task:
|
||||
- Small tasks (up to 4K tokens): Standard models
|
||||
- Medium tasks (between 4K-32K): Enhanced models
|
||||
- Large tasks (over 32K): Large context models
|
||||
|
||||
```python
|
||||
# Configure model with appropriate settings
|
||||
llm = LLM(
|
||||
model="openai/gpt-4-turbo-preview",
|
||||
temperature=0.7, # Adjust based on task
|
||||
max_tokens=4096, # Set based on output needs
|
||||
timeout=300 # Longer timeout for complex tasks
|
||||
)
|
||||
```
|
||||
<Tip>
|
||||
- Lower temperature (0.1 to 0.3) for factual responses
|
||||
- Higher temperature (0.7 to 0.9) for creative tasks
|
||||
</Tip>
|
||||
</Step>
|
||||
|
||||
| Parameter | Type | Description |
|
||||
|:------------------|:---------------:|:-------------------------------------------------------------------------------------------------|
|
||||
| **model** | `str` | Name of the model to use (e.g., "gpt-4", "gpt-3.5-turbo", "ollama/llama3.1"). For more options, visit the providers documentation. |
|
||||
| **timeout** | `float, int` | Maximum time (in seconds) to wait for a response. |
|
||||
| **temperature** | `float` | Controls randomness in output (0.0 to 1.0). |
|
||||
| **top_p** | `float` | Controls diversity of output (0.0 to 1.0). |
|
||||
| **n** | `int` | Number of completions to generate. |
|
||||
| **stop** | `str, List[str]` | Sequence(s) where generation should stop. |
|
||||
| **max_tokens** | `int` | Maximum number of tokens to generate. |
|
||||
| **presence_penalty** | `float` | Penalizes new tokens based on their presence in prior text. |
|
||||
| **frequency_penalty**| `float` | Penalizes new tokens based on their frequency in prior text. |
|
||||
| **logit_bias** | `Dict[int, float]`| Modifies likelihood of specified tokens appearing. |
|
||||
| **response_format** | `Dict[str, Any]` | Specifies the format of the response (e.g., JSON object). |
|
||||
| **seed** | `int` | Sets a random seed for deterministic results. |
|
||||
| **logprobs** | `bool` | Returns log probabilities of output tokens if enabled. |
|
||||
| **top_logprobs** | `int` | Number of most likely tokens for which to return log probabilities. |
|
||||
| **base_url** | `str` | The base URL for the API endpoint. |
|
||||
| **api_version** | `str` | Version of the API to use. |
|
||||
| **api_key** | `str` | Your API key for authentication. |
|
||||
<Step title="Best Practices">
|
||||
1. Monitor token usage
|
||||
2. Implement rate limiting
|
||||
3. Use caching when possible
|
||||
4. Set appropriate max_tokens limits
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<Info>
|
||||
Remember to regularly monitor your token usage and adjust your configuration as needed to optimize costs and performance.
|
||||
</Info>
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
These are examples of how to configure LLMs for your agent.
|
||||
## Provider Configuration Examples
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="OpenAI">
|
||||
<AccordionGroup>
|
||||
<Accordion title="OpenAI">
|
||||
```python Code
|
||||
# Required
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Optional
|
||||
OPENAI_API_BASE=<custom-base-url>
|
||||
OPENAI_ORGANIZATION=<your-org-id>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
@@ -116,162 +329,306 @@ These are examples of how to configure LLMs for your agent.
|
||||
frequency_penalty=0.1,
|
||||
presence_penalty=0.1,
|
||||
stop=["END"],
|
||||
seed=42,
|
||||
base_url="https://api.openai.com/v1",
|
||||
api_key="your-api-key-here"
|
||||
seed=42
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Cerebras">
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Anthropic">
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="cerebras/llama-3.1-70b",
|
||||
base_url="https://api.cerebras.ai/v1",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
ANTHROPIC_API_KEY=sk-ant-...
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Ollama (Local LLMs)">
|
||||
|
||||
CrewAI supports using Ollama for running open-source models locally:
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="anthropic/claude-3-sonnet-20240229-v1:0",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Google">
|
||||
```python Code
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="gemini/gemini-pro",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Azure">
|
||||
```python Code
|
||||
# Required
|
||||
AZURE_API_KEY=<your-api-key>
|
||||
AZURE_API_BASE=<your-resource-url>
|
||||
AZURE_API_VERSION=<api-version>
|
||||
|
||||
# Optional
|
||||
AZURE_AD_TOKEN=<your-azure-ad-token>
|
||||
AZURE_API_TYPE=<your-azure-api-type>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="azure/gpt-4",
|
||||
api_version="2023-05-15"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="AWS Bedrock">
|
||||
```python Code
|
||||
AWS_ACCESS_KEY_ID=<your-access-key>
|
||||
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
||||
AWS_DEFAULT_REGION=<your-region>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Mistral">
|
||||
```python Code
|
||||
MISTRAL_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="mistral/mistral-large-latest",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Groq">
|
||||
```python Code
|
||||
GROQ_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="groq/llama-3.2-90b-text-preview",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="IBM watsonx.ai">
|
||||
```python Code
|
||||
# Required
|
||||
WATSONX_URL=<your-url>
|
||||
WATSONX_APIKEY=<your-apikey>
|
||||
WATSONX_PROJECT_ID=<your-project-id>
|
||||
|
||||
# Optional
|
||||
WATSONX_TOKEN=<your-token>
|
||||
WATSONX_DEPLOYMENT_SPACE_ID=<your-space-id>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="watsonx/meta-llama/llama-3-1-70b-instruct",
|
||||
base_url="https://api.watsonx.ai/v1"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Ollama (Local LLMs)">
|
||||
1. Install Ollama: [ollama.ai](https://ollama.ai/)
|
||||
2. Run a model: `ollama run llama2`
|
||||
3. Configure agent:
|
||||
3. Configure:
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
agent = Agent(
|
||||
llm=LLM(
|
||||
model="ollama/llama3.1",
|
||||
base_url="http://localhost:11434"
|
||||
),
|
||||
...
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Groq">
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="groq/llama3-8b-8192",
|
||||
base_url="https://api.groq.com/openai/v1",
|
||||
api_key="your-api-key-here"
|
||||
model="ollama/llama3:70b",
|
||||
base_url="http://localhost:11434"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Anthropic">
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Fireworks AI">
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="anthropic/claude-3-5-sonnet-20241022",
|
||||
base_url="https://api.anthropic.com/v1",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
FIREWORKS_API_KEY=<your-api-key>
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Fireworks">
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="fireworks/meta-llama-3.1-8b-instruct",
|
||||
base_url="https://api.fireworks.ai/inference/v1",
|
||||
api_key="your-api-key-here"
|
||||
model="fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct",
|
||||
temperature=0.7
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Gemini">
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Perplexity AI">
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-1.5-flash",
|
||||
base_url="https://api.gemini.google.com/v1",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
PERPLEXITY_API_KEY=<your-api-key>
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Perplexity AI (pplx-api)">
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="perplexity/mistral-7b-instruct",
|
||||
base_url="https://api.perplexity.ai/v1",
|
||||
api_key="your-api-key-here"
|
||||
model="llama-3.1-sonar-large-128k-online",
|
||||
base_url="https://api.perplexity.ai/"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="IBM watsonx.ai">
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Hugging Face">
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="watsonx/ibm/granite-13b-chat-v2",
|
||||
base_url="https://api.watsonx.ai/v1",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
HUGGINGFACE_API_KEY=<your-api-key>
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
base_url="your_api_endpoint"
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Nvidia NIM">
|
||||
```python Code
|
||||
NVIDIA_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="nvidia_nim/meta/llama3-70b-instruct",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="SambaNova">
|
||||
```python Code
|
||||
SAMBANOVA_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="sambanova/Meta-Llama-3.1-8B-Instruct",
|
||||
temperature=0.7
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Cerebras">
|
||||
```python Code
|
||||
# Required
|
||||
CEREBRAS_API_KEY=<your-api-key>
|
||||
```
|
||||
|
||||
Example usage:
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="cerebras/llama3.1-70b",
|
||||
temperature=0.7,
|
||||
max_tokens=8192
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Cerebras features:
|
||||
- Fast inference speeds
|
||||
- Competitive pricing
|
||||
- Good balance of speed and quality
|
||||
- Support for long context windows
|
||||
</Info>
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Changing the Base API URL
|
||||
## Common Issues and Solutions
|
||||
|
||||
You can change the base API URL for any LLM provider by setting the `base_url` parameter:
|
||||
<Tabs>
|
||||
<Tab title="Authentication">
|
||||
<Warning>
|
||||
Most authentication issues can be resolved by checking API key format and environment variable names.
|
||||
</Warning>
|
||||
|
||||
```bash
|
||||
# OpenAI
|
||||
OPENAI_API_KEY=sk-...
|
||||
|
||||
# Anthropic
|
||||
ANTHROPIC_API_KEY=sk-ant-...
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Model Names">
|
||||
<Check>
|
||||
Always include the provider prefix in model names
|
||||
</Check>
|
||||
|
||||
```python
|
||||
# Correct
|
||||
llm = LLM(model="openai/gpt-4")
|
||||
|
||||
# Incorrect
|
||||
llm = LLM(model="gpt-4")
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Context Length">
|
||||
<Tip>
|
||||
Use larger context models for extensive tasks
|
||||
</Tip>
|
||||
|
||||
```python
|
||||
# Large context model
|
||||
llm = LLM(model="openai/gpt-4o") # 128K tokens
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
## Getting Help
|
||||
|
||||
llm = LLM(
|
||||
model="custom-model-name",
|
||||
base_url="https://api.your-provider.com/v1",
|
||||
api_key="your-api-key"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
If you need assistance, these resources are available:
|
||||
|
||||
This is particularly useful when working with OpenAI-compatible APIs or when you need to specify a different endpoint for your chosen provider.
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="LiteLLM Documentation"
|
||||
href="https://docs.litellm.ai/docs/"
|
||||
icon="book"
|
||||
>
|
||||
Comprehensive documentation for LiteLLM integration and troubleshooting common issues.
|
||||
</Card>
|
||||
<Card
|
||||
title="GitHub Issues"
|
||||
href="https://github.com/joaomdmoura/crewAI/issues"
|
||||
icon="bug"
|
||||
>
|
||||
Report bugs, request features, or browse existing issues for solutions.
|
||||
</Card>
|
||||
<Card
|
||||
title="Community Forum"
|
||||
href="https://community.crewai.com"
|
||||
icon="comment-question"
|
||||
>
|
||||
Connect with other CrewAI users, share experiences, and get help from the community.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Choose the right model**: Balance capability and cost.
|
||||
2. **Optimize prompts**: Clear, concise instructions improve output.
|
||||
3. **Manage tokens**: Monitor and limit token usage for efficiency.
|
||||
4. **Use appropriate temperature**: Lower for factual tasks, higher for creative ones.
|
||||
5. **Implement error handling**: Gracefully manage API errors and rate limits.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **API Errors**: Check your API key, network connection, and rate limits.
|
||||
- **Unexpected Outputs**: Refine your prompts and adjust temperature or top_p.
|
||||
- **Performance Issues**: Consider using a more powerful model or optimizing your queries.
|
||||
- **Timeout Errors**: Increase the `timeout` parameter or optimize your input.
|
||||
<Note>
|
||||
Best Practices for API Key Security:
|
||||
- Use environment variables or secure vaults
|
||||
- Never commit keys to version control
|
||||
- Rotate keys regularly
|
||||
- Use separate keys for development and production
|
||||
- Monitor key usage for unusual patterns
|
||||
</Note>
|
||||
|
||||
@@ -18,6 +18,7 @@ reason, and learn from past interactions.
|
||||
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
|
||||
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
|
||||
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
|
||||
| **User Memory** | Stores user-specific information and preferences, enhancing personalization and user experience. |
|
||||
|
||||
## How Memory Systems Empower Agents
|
||||
|
||||
@@ -92,6 +93,47 @@ my_crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
## Integrating Mem0 for Enhanced User Memory
|
||||
|
||||
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
||||
|
||||
To include user-specific memory you can get your API key [here](https://app.mem0.ai/dashboard/api-keys) and refer the [docs](https://docs.mem0.ai/platform/quickstart#4-1-create-memories) for adding user preferences.
|
||||
|
||||
|
||||
```python Code
|
||||
import os
|
||||
from crewai import Crew, Process
|
||||
from mem0 import MemoryClient
|
||||
|
||||
# Set environment variables for Mem0
|
||||
os.environ["MEM0_API_KEY"] = "m0-xx"
|
||||
|
||||
# Step 1: Record preferences based on past conversation or user input
|
||||
client = MemoryClient()
|
||||
messages = [
|
||||
{"role": "user", "content": "Hi there! I'm planning a vacation and could use some advice."},
|
||||
{"role": "assistant", "content": "Hello! I'd be happy to help with your vacation planning. What kind of destination do you prefer?"},
|
||||
{"role": "user", "content": "I am more of a beach person than a mountain person."},
|
||||
{"role": "assistant", "content": "That's interesting. Do you like hotels or Airbnb?"},
|
||||
{"role": "user", "content": "I like Airbnb more."},
|
||||
]
|
||||
client.add(messages, user_id="john")
|
||||
|
||||
# Step 2: Create a Crew with User Memory
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "john"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
## Additional Embedding Providers
|
||||
|
||||
@@ -254,6 +296,31 @@ my_crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
### Using Watson embeddings
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
# Note: Ensure you have installed and imported `ibm_watsonx_ai` for Watson embeddings to work.
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "watson",
|
||||
"config": {
|
||||
"model": "<model_name>",
|
||||
"api_url": "<api_url>",
|
||||
"api_key": "<YOUR_API_KEY>",
|
||||
"project_id": "<YOUR_PROJECT_ID>",
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Resetting Memory
|
||||
|
||||
```shell
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: Tasks
|
||||
description: Detailed guide on managing and creating tasks within the CrewAI framework, reflecting the latest codebase updates.
|
||||
description: Detailed guide on managing and creating tasks within the CrewAI framework.
|
||||
icon: list-check
|
||||
---
|
||||
|
||||
@@ -8,41 +8,171 @@ icon: list-check
|
||||
|
||||
In the CrewAI framework, a `Task` is a specific assignment completed by an `Agent`.
|
||||
|
||||
They provide all necessary details for execution, such as a description, the agent responsible, required tools, and more, facilitating a wide range of action complexities.
|
||||
|
||||
Tasks provide all necessary details for execution, such as a description, the agent responsible, required tools, and more, facilitating a wide range of action complexities.
|
||||
|
||||
Tasks within CrewAI can be collaborative, requiring multiple agents to work together. This is managed through the task properties and orchestrated by the Crew's process, enhancing teamwork and efficiency.
|
||||
|
||||
### Task Execution Flow
|
||||
|
||||
Tasks can be executed in two ways:
|
||||
- **Sequential**: Tasks are executed in the order they are defined
|
||||
- **Hierarchical**: Tasks are assigned to agents based on their roles and expertise
|
||||
|
||||
The execution flow is defined when creating the crew:
|
||||
```python Code
|
||||
crew = Crew(
|
||||
agents=[agent1, agent2],
|
||||
tasks=[task1, task2],
|
||||
process=Process.sequential # or Process.hierarchical
|
||||
)
|
||||
```
|
||||
|
||||
## Task Attributes
|
||||
|
||||
| Attribute | Parameters | Type | Description |
|
||||
| :------------------------------- | :---------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
|
||||
| **Agent** | `agent` | `Optional[BaseAgent]` | The agent responsible for the task, assigned either directly or by the crew's process. |
|
||||
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
|
||||
| **Tools** _(optional)_ | `tools` | `Optional[List[Any]]` | The functions or capabilities the agent can utilize to perform the task. Defaults to an empty list. |
|
||||
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | If set, the task executes asynchronously, allowing progression without waiting for completion. Defaults to False. |
|
||||
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Specifies tasks whose outputs are used as context for this task. |
|
||||
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Additional configuration details for the agent executing the task, allowing further customization. Defaults to None. |
|
||||
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | Outputs a JSON object, requiring an OpenAI client. Only one output format can be set. |
|
||||
| **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | Outputs a Pydantic model object, requiring an OpenAI client. Only one output format can be set. |
|
||||
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | Saves the task output to a file. If used with `Output JSON` or `Output Pydantic`, specifies how the output is saved. |
|
||||
| **Output** _(optional)_ | `output` | `Optional[TaskOutput]` | An instance of `TaskOutput`, containing the raw, JSON, and Pydantic output plus additional details. |
|
||||
| **Callback** _(optional)_ | `callback` | `Optional[Any]` | A callable that is executed with the task's output upon completion. |
|
||||
| **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Indicates if the task should involve human review at the end, useful for tasks needing human oversight. Defaults to False.|
|
||||
| **Converter Class** _(optional)_ | `converter_cls` | `Optional[Type[Converter]]` | A converter class used to export structured output. Defaults to None. |
|
||||
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
||||
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
||||
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
|
||||
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
|
||||
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
|
||||
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
|
||||
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
|
||||
| **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | A Pydantic model for task output. |
|
||||
| **Callback** _(optional)_ | `callback` | `Optional[Any]` | Function/object to be executed after task completion. |
|
||||
|
||||
## Creating a Task
|
||||
## Creating Tasks
|
||||
|
||||
Creating a task involves defining its scope, responsible agent, and any additional attributes for flexibility:
|
||||
There are two ways to create tasks in CrewAI: using **YAML configuration (recommended)** or defining them **directly in code**.
|
||||
|
||||
### YAML Configuration (Recommended)
|
||||
|
||||
Using YAML configuration provides a cleaner, more maintainable way to define tasks. We strongly recommend using this approach to define tasks in your CrewAI projects.
|
||||
|
||||
After creating your CrewAI project as outlined in the [Installation](/installation) section, navigate to the `src/latest_ai_development/config/tasks.yaml` file and modify the template to match your specific task requirements.
|
||||
|
||||
<Note>
|
||||
Variables in your YAML files (like `{topic}`) will be replaced with values from your inputs when running the crew:
|
||||
```python Code
|
||||
crew.kickoff(inputs={'topic': 'AI Agents'})
|
||||
```
|
||||
</Note>
|
||||
|
||||
Here's an example of how to configure tasks using YAML:
|
||||
|
||||
```yaml tasks.yaml
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
|
||||
reporting_task:
|
||||
description: >
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
Formatted as markdown without '```'
|
||||
agent: reporting_analyst
|
||||
output_file: report.md
|
||||
```
|
||||
|
||||
To use this YAML configuration in your code, create a crew class that inherits from `CrewBase`:
|
||||
|
||||
```python crew.py
|
||||
# src/latest_ai_development/crew.py
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class LatestAiDevelopmentCrew():
|
||||
"""LatestAiDevelopment crew"""
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True,
|
||||
tools=[SerperDevTool()]
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task']
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task']
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=[
|
||||
self.researcher(),
|
||||
self.reporting_analyst()
|
||||
],
|
||||
tasks=[
|
||||
self.research_task(),
|
||||
self.reporting_task()
|
||||
],
|
||||
process=Process.sequential
|
||||
)
|
||||
```
|
||||
|
||||
<Note>
|
||||
The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code.
|
||||
</Note>
|
||||
|
||||
### Direct Code Definition (Alternative)
|
||||
|
||||
Alternatively, you can define tasks directly in your code without using YAML configuration:
|
||||
|
||||
```python task.py
|
||||
from crewai import Task
|
||||
|
||||
task = Task(
|
||||
description='Find and summarize the latest and most relevant news on AI',
|
||||
agent=sales_agent,
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
research_task = Task(
|
||||
description="""
|
||||
Conduct a thorough research about AI Agents.
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
""",
|
||||
expected_output="""
|
||||
A list with 10 bullet points of the most relevant information about AI Agents
|
||||
""",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
reporting_task = Task(
|
||||
description="""
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
""",
|
||||
expected_output="""
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
Formatted as markdown without '```'
|
||||
""",
|
||||
agent=reporting_analyst,
|
||||
output_file="report.md"
|
||||
)
|
||||
```
|
||||
|
||||
@@ -52,6 +182,8 @@ task = Task(
|
||||
|
||||
## Task Output
|
||||
|
||||
Understanding task outputs is crucial for building effective AI workflows. CrewAI provides a structured way to handle task results through the `TaskOutput` class, which supports multiple output formats and can be easily passed between tasks.
|
||||
|
||||
The output of a task in CrewAI framework is encapsulated within the `TaskOutput` class. This class provides a structured way to access results of a task, including various formats such as raw output, JSON, and Pydantic models.
|
||||
|
||||
By default, the `TaskOutput` will only include the `raw` output. A `TaskOutput` will only include the `pydantic` or `json_dict` output if the original `Task` object was configured with `output_pydantic` or `output_json`, respectively.
|
||||
@@ -112,6 +244,186 @@ if task_output.pydantic:
|
||||
print(f"Pydantic Output: {task_output.pydantic}")
|
||||
```
|
||||
|
||||
## Task Dependencies and Context
|
||||
|
||||
Tasks can depend on the output of other tasks using the `context` attribute. For example:
|
||||
|
||||
```python Code
|
||||
research_task = Task(
|
||||
description="Research the latest developments in AI",
|
||||
expected_output="A list of recent AI developments",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
analysis_task = Task(
|
||||
description="Analyze the research findings and identify key trends",
|
||||
expected_output="Analysis report of AI trends",
|
||||
agent=analyst,
|
||||
context=[research_task] # This task will wait for research_task to complete
|
||||
)
|
||||
```
|
||||
|
||||
## Getting Structured Consistent Outputs from Tasks
|
||||
When you need to ensure that a task outputs a structured and consistent format, you can use the `output_pydantic` or `output_json` properties on a task. These properties allow you to define the expected output structure, making it easier to parse and utilize the results in your application.
|
||||
|
||||
<Note>
|
||||
It's also important to note that the output of the final task of a crew becomes the final output of the actual crew itself.
|
||||
</Note>
|
||||
|
||||
### Using `output_pydantic`
|
||||
The `output_pydantic` property allows you to define a Pydantic model that the task output should conform to. This ensures that the output is not only structured but also validated according to the Pydantic model.
|
||||
|
||||
Here’s an example demonstrating how to use output_pydantic:
|
||||
|
||||
```python Code
|
||||
import json
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class Blog(BaseModel):
|
||||
title: str
|
||||
content: str
|
||||
|
||||
|
||||
blog_agent = Agent(
|
||||
role="Blog Content Generator Agent",
|
||||
goal="Generate a blog title and content",
|
||||
backstory="""You are an expert content creator, skilled in crafting engaging and informative blog posts.""",
|
||||
verbose=False,
|
||||
allow_delegation=False,
|
||||
llm="gpt-4o",
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
description="""Create a blog title and content on a given topic. Make sure the content is under 200 words.""",
|
||||
expected_output="A compelling blog title and well-written content.",
|
||||
agent=blog_agent,
|
||||
output_pydantic=Blog,
|
||||
)
|
||||
|
||||
# Instantiate your crew with a sequential process
|
||||
crew = Crew(
|
||||
agents=[blog_agent],
|
||||
tasks=[task1],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
# Option 1: Accessing Properties Using Dictionary-Style Indexing
|
||||
print("Accessing Properties - Option 1")
|
||||
title = result["title"]
|
||||
content = result["content"]
|
||||
print("Title:", title)
|
||||
print("Content:", content)
|
||||
|
||||
# Option 2: Accessing Properties Directly from the Pydantic Model
|
||||
print("Accessing Properties - Option 2")
|
||||
title = result.pydantic.title
|
||||
content = result.pydantic.content
|
||||
print("Title:", title)
|
||||
print("Content:", content)
|
||||
|
||||
# Option 3: Accessing Properties Using the to_dict() Method
|
||||
print("Accessing Properties - Option 3")
|
||||
output_dict = result.to_dict()
|
||||
title = output_dict["title"]
|
||||
content = output_dict["content"]
|
||||
print("Title:", title)
|
||||
print("Content:", content)
|
||||
|
||||
# Option 4: Printing the Entire Blog Object
|
||||
print("Accessing Properties - Option 5")
|
||||
print("Blog:", result)
|
||||
|
||||
```
|
||||
In this example:
|
||||
* A Pydantic model Blog is defined with title and content fields.
|
||||
* The task task1 uses the output_pydantic property to specify that its output should conform to the Blog model.
|
||||
* After executing the crew, you can access the structured output in multiple ways as shown.
|
||||
|
||||
#### Explanation of Accessing the Output
|
||||
1. Dictionary-Style Indexing: You can directly access the fields using result["field_name"]. This works because the CrewOutput class implements the __getitem__ method.
|
||||
2. Directly from Pydantic Model: Access the attributes directly from the result.pydantic object.
|
||||
3. Using to_dict() Method: Convert the output to a dictionary and access the fields.
|
||||
4. Printing the Entire Object: Simply print the result object to see the structured output.
|
||||
|
||||
### Using `output_json`
|
||||
The `output_json` property allows you to define the expected output in JSON format. This ensures that the task's output is a valid JSON structure that can be easily parsed and used in your application.
|
||||
|
||||
Here’s an example demonstrating how to use `output_json`:
|
||||
|
||||
```python Code
|
||||
import json
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
# Define the Pydantic model for the blog
|
||||
class Blog(BaseModel):
|
||||
title: str
|
||||
content: str
|
||||
|
||||
|
||||
# Define the agent
|
||||
blog_agent = Agent(
|
||||
role="Blog Content Generator Agent",
|
||||
goal="Generate a blog title and content",
|
||||
backstory="""You are an expert content creator, skilled in crafting engaging and informative blog posts.""",
|
||||
verbose=False,
|
||||
allow_delegation=False,
|
||||
llm="gpt-4o",
|
||||
)
|
||||
|
||||
# Define the task with output_json set to the Blog model
|
||||
task1 = Task(
|
||||
description="""Create a blog title and content on a given topic. Make sure the content is under 200 words.""",
|
||||
expected_output="A JSON object with 'title' and 'content' fields.",
|
||||
agent=blog_agent,
|
||||
output_json=Blog,
|
||||
)
|
||||
|
||||
# Instantiate the crew with a sequential process
|
||||
crew = Crew(
|
||||
agents=[blog_agent],
|
||||
tasks=[task1],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
)
|
||||
|
||||
# Kickoff the crew to execute the task
|
||||
result = crew.kickoff()
|
||||
|
||||
# Option 1: Accessing Properties Using Dictionary-Style Indexing
|
||||
print("Accessing Properties - Option 1")
|
||||
title = result["title"]
|
||||
content = result["content"]
|
||||
print("Title:", title)
|
||||
print("Content:", content)
|
||||
|
||||
# Option 2: Printing the Entire Blog Object
|
||||
print("Accessing Properties - Option 2")
|
||||
print("Blog:", result)
|
||||
```
|
||||
|
||||
In this example:
|
||||
* A Pydantic model Blog is defined with title and content fields, which is used to specify the structure of the JSON output.
|
||||
* The task task1 uses the output_json property to indicate that it expects a JSON output conforming to the Blog model.
|
||||
* After executing the crew, you can access the structured JSON output in two ways as shown.
|
||||
|
||||
#### Explanation of Accessing the Output
|
||||
|
||||
1. Accessing Properties Using Dictionary-Style Indexing: You can access the fields directly using result["field_name"]. This is possible because the CrewOutput class implements the __getitem__ method, allowing you to treat the output like a dictionary. In this option, we're retrieving the title and content from the result.
|
||||
2. Printing the Entire Blog Object: By printing result, you get the string representation of the CrewOutput object. Since the __str__ method is implemented to return the JSON output, this will display the entire output as a formatted string representing the Blog object.
|
||||
|
||||
---
|
||||
|
||||
By using output_pydantic or output_json, you ensure that your tasks produce outputs in a consistent and structured format, making it easier to process and utilize the data within your application or across multiple tasks.
|
||||
|
||||
## Integrating Tools with Tasks
|
||||
|
||||
Leverage tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools) for enhanced task performance and agent interaction.
|
||||
@@ -167,16 +479,16 @@ This is useful when you have a task that depends on the output of another task t
|
||||
# ...
|
||||
|
||||
research_ai_task = Task(
|
||||
description='Find and summarize the latest AI news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
description="Research the latest developments in AI",
|
||||
expected_output="A list of recent AI developments",
|
||||
async_execution=True,
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
)
|
||||
|
||||
research_ops_task = Task(
|
||||
description='Find and summarize the latest AI Ops news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI Ops news',
|
||||
description="Research the latest developments in AI Ops",
|
||||
expected_output="A list of recent AI Ops developments",
|
||||
async_execution=True,
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
@@ -184,7 +496,7 @@ research_ops_task = Task(
|
||||
|
||||
write_blog_task = Task(
|
||||
description="Write a full blog post about the importance of AI and its latest news",
|
||||
expected_output='Full blog post that is 4 paragraphs long',
|
||||
expected_output="Full blog post that is 4 paragraphs long",
|
||||
agent=writer_agent,
|
||||
context=[research_ai_task, research_ops_task]
|
||||
)
|
||||
@@ -320,4 +632,4 @@ save_output_task = Task(
|
||||
Tasks are the driving force behind the actions of agents in CrewAI.
|
||||
By properly defining tasks and their outcomes, you set the stage for your AI agents to work effectively, either independently or as a collaborative unit.
|
||||
Equipping tasks with appropriate tools, understanding the execution process, and following robust validation practices are crucial for maximizing CrewAI's potential,
|
||||
ensuring agents are effectively prepared for their assignments and that tasks are executed as intended.
|
||||
ensuring agents are effectively prepared for their assignments and that tasks are executed as intended.
|
||||
|
||||
@@ -5,13 +5,14 @@ icon: screwdriver-wrench
|
||||
---
|
||||
|
||||
## Introduction
|
||||
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers.
|
||||
|
||||
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers.
|
||||
This documentation outlines how to create, integrate, and leverage these tools within the CrewAI framework, including a new focus on collaboration tools.
|
||||
|
||||
## What is a Tool?
|
||||
|
||||
A tool in CrewAI is a skill or function that agents can utilize to perform various actions.
|
||||
This includes tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools),
|
||||
A tool in CrewAI is a skill or function that agents can utilize to perform various actions.
|
||||
This includes tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools),
|
||||
enabling everything from simple searches to complex interactions and effective teamwork among agents.
|
||||
|
||||
## Key Characteristics of Tools
|
||||
@@ -103,57 +104,53 @@ crew.kickoff()
|
||||
|
||||
Here is a list of the available tools and their descriptions:
|
||||
|
||||
| Tool | Description |
|
||||
| :-------------------------- | :-------------------------------------------------------------------------------------------- |
|
||||
| **BrowserbaseLoadTool** | A tool for interacting with and extracting data from web browsers. |
|
||||
| **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents. |
|
||||
| **CodeInterpreterTool** | A tool for interpreting python code. |
|
||||
| **ComposioTool** | Enables use of Composio tools. |
|
||||
| **CSVSearchTool** | A RAG tool designed for searching within CSV files, tailored to handle structured data. |
|
||||
| **DALL-E Tool** | A tool for generating images using the DALL-E API. |
|
||||
| **DirectorySearchTool** | A RAG tool for searching within directories, useful for navigating through file systems. |
|
||||
| **DOCXSearchTool** | A RAG tool aimed at searching within DOCX documents, ideal for processing Word files. |
|
||||
| **DirectoryReadTool** | Facilitates reading and processing of directory structures and their contents. |
|
||||
| **EXASearchTool** | A tool designed for performing exhaustive searches across various data sources. |
|
||||
| **FileReadTool** | Enables reading and extracting data from files, supporting various file formats. |
|
||||
| **FirecrawlSearchTool** | A tool to search webpages using Firecrawl and return the results. |
|
||||
| **FirecrawlCrawlWebsiteTool** | A tool for crawling webpages using Firecrawl. |
|
||||
| **FirecrawlScrapeWebsiteTool** | A tool for scraping webpages URL using Firecrawl and returning its contents. |
|
||||
| **GithubSearchTool** | A RAG tool for searching within GitHub repositories, useful for code and documentation search.|
|
||||
| **SerperDevTool** | A specialized tool for development purposes, with specific functionalities under development. |
|
||||
| **TXTSearchTool** | A RAG tool focused on searching within text (.txt) files, suitable for unstructured data. |
|
||||
| **JSONSearchTool** | A RAG tool designed for searching within JSON files, catering to structured data handling. |
|
||||
| **LlamaIndexTool** | Enables the use of LlamaIndex tools. |
|
||||
| **MDXSearchTool** | A RAG tool tailored for searching within Markdown (MDX) files, useful for documentation. |
|
||||
| **PDFSearchTool** | A RAG tool aimed at searching within PDF documents, ideal for processing scanned documents. |
|
||||
| **PGSearchTool** | A RAG tool optimized for searching within PostgreSQL databases, suitable for database queries. |
|
||||
| **Vision Tool** | A tool for generating images using the DALL-E API. |
|
||||
| **RagTool** | A general-purpose RAG tool capable of handling various data sources and types. |
|
||||
| **ScrapeElementFromWebsiteTool** | Enables scraping specific elements from websites, useful for targeted data extraction. |
|
||||
| **ScrapeWebsiteTool** | Facilitates scraping entire websites, ideal for comprehensive data collection. |
|
||||
| **WebsiteSearchTool** | A RAG tool for searching website content, optimized for web data extraction. |
|
||||
| **XMLSearchTool** | A RAG tool designed for searching within XML files, suitable for structured data formats. |
|
||||
| **YoutubeChannelSearchTool**| A RAG tool for searching within YouTube channels, useful for video content analysis. |
|
||||
| **YoutubeVideoSearchTool** | A RAG tool aimed at searching within YouTube videos, ideal for video data extraction. |
|
||||
| Tool | Description |
|
||||
| :------------------------------- | :--------------------------------------------------------------------------------------------- |
|
||||
| **BrowserbaseLoadTool** | A tool for interacting with and extracting data from web browsers. |
|
||||
| **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents. |
|
||||
| **CodeInterpreterTool** | A tool for interpreting python code. |
|
||||
| **ComposioTool** | Enables use of Composio tools. |
|
||||
| **CSVSearchTool** | A RAG tool designed for searching within CSV files, tailored to handle structured data. |
|
||||
| **DALL-E Tool** | A tool for generating images using the DALL-E API. |
|
||||
| **DirectorySearchTool** | A RAG tool for searching within directories, useful for navigating through file systems. |
|
||||
| **DOCXSearchTool** | A RAG tool aimed at searching within DOCX documents, ideal for processing Word files. |
|
||||
| **DirectoryReadTool** | Facilitates reading and processing of directory structures and their contents. |
|
||||
| **EXASearchTool** | A tool designed for performing exhaustive searches across various data sources. |
|
||||
| **FileReadTool** | Enables reading and extracting data from files, supporting various file formats. |
|
||||
| **FirecrawlSearchTool** | A tool to search webpages using Firecrawl and return the results. |
|
||||
| **FirecrawlCrawlWebsiteTool** | A tool for crawling webpages using Firecrawl. |
|
||||
| **FirecrawlScrapeWebsiteTool** | A tool for scraping webpages URL using Firecrawl and returning its contents. |
|
||||
| **GithubSearchTool** | A RAG tool for searching within GitHub repositories, useful for code and documentation search. |
|
||||
| **SerperDevTool** | A specialized tool for development purposes, with specific functionalities under development. |
|
||||
| **TXTSearchTool** | A RAG tool focused on searching within text (.txt) files, suitable for unstructured data. |
|
||||
| **JSONSearchTool** | A RAG tool designed for searching within JSON files, catering to structured data handling. |
|
||||
| **LlamaIndexTool** | Enables the use of LlamaIndex tools. |
|
||||
| **MDXSearchTool** | A RAG tool tailored for searching within Markdown (MDX) files, useful for documentation. |
|
||||
| **PDFSearchTool** | A RAG tool aimed at searching within PDF documents, ideal for processing scanned documents. |
|
||||
| **PGSearchTool** | A RAG tool optimized for searching within PostgreSQL databases, suitable for database queries. |
|
||||
| **Vision Tool** | A tool for generating images using the DALL-E API. |
|
||||
| **RagTool** | A general-purpose RAG tool capable of handling various data sources and types. |
|
||||
| **ScrapeElementFromWebsiteTool** | Enables scraping specific elements from websites, useful for targeted data extraction. |
|
||||
| **ScrapeWebsiteTool** | Facilitates scraping entire websites, ideal for comprehensive data collection. |
|
||||
| **WebsiteSearchTool** | A RAG tool for searching website content, optimized for web data extraction. |
|
||||
| **XMLSearchTool** | A RAG tool designed for searching within XML files, suitable for structured data formats. |
|
||||
| **YoutubeChannelSearchTool** | A RAG tool for searching within YouTube channels, useful for video content analysis. |
|
||||
| **YoutubeVideoSearchTool** | A RAG tool aimed at searching within YouTube videos, ideal for video data extraction. |
|
||||
|
||||
## Creating your own Tools
|
||||
|
||||
<Tip>
|
||||
Developers can craft `custom tools` tailored for their agent’s needs or utilize pre-built options.
|
||||
Developers can craft `custom tools` tailored for their agent’s needs or
|
||||
utilize pre-built options.
|
||||
</Tip>
|
||||
|
||||
To create your own CrewAI tools you will need to install our extra tools package:
|
||||
|
||||
```bash
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
Once you do that there are two main ways for one to create a CrewAI tool:
|
||||
There are two main ways for one to create a CrewAI tool:
|
||||
|
||||
### Subclassing `BaseTool`
|
||||
|
||||
```python Code
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
@@ -167,7 +164,7 @@ class MyCustomTool(BaseTool):
|
||||
### Utilizing the `tool` Decorator
|
||||
|
||||
```python Code
|
||||
from crewai_tools import tool
|
||||
from crewai.tools import tool
|
||||
@tool("Name of my tool")
|
||||
def my_tool(question: str) -> str:
|
||||
"""Clear description for what this tool is useful for, your agent will need this information to use it."""
|
||||
@@ -178,11 +175,13 @@ def my_tool(question: str) -> str:
|
||||
### Custom Caching Mechanism
|
||||
|
||||
<Tip>
|
||||
Tools can optionally implement a `cache_function` to fine-tune caching behavior. This function determines when to cache results based on specific conditions, offering granular control over caching logic.
|
||||
Tools can optionally implement a `cache_function` to fine-tune caching
|
||||
behavior. This function determines when to cache results based on specific
|
||||
conditions, offering granular control over caching logic.
|
||||
</Tip>
|
||||
|
||||
```python Code
|
||||
from crewai_tools import tool
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool
|
||||
def multiplication_tool(first_number: int, second_number: int) -> str:
|
||||
@@ -208,6 +207,6 @@ writer1 = Agent(
|
||||
|
||||
## Conclusion
|
||||
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
|
||||
When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling,
|
||||
caching mechanisms, and the flexibility of tool arguments to optimize your agents' performance and capabilities.
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
|
||||
When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling,
|
||||
caching mechanisms, and the flexibility of tool arguments to optimize your agents' performance and capabilities.
|
||||
|
||||
59
docs/how-to/before-and-after-kickoff-hooks.mdx
Normal file
59
docs/how-to/before-and-after-kickoff-hooks.mdx
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
title: Before and After Kickoff Hooks
|
||||
description: Learn how to use before and after kickoff hooks in CrewAI
|
||||
---
|
||||
|
||||
CrewAI provides hooks that allow you to execute code before and after a crew's kickoff. These hooks are useful for preprocessing inputs or post-processing results.
|
||||
|
||||
## Before Kickoff Hook
|
||||
|
||||
The before kickoff hook is executed before the crew starts its tasks. It receives the input dictionary and can modify it before passing it to the crew. You can use this hook to set up your environment, load necessary data, or preprocess your inputs. This is useful in scenarios where the input data might need enrichment or validation before being processed by the crew.
|
||||
|
||||
Here's an example of defining a before kickoff function in your `crew.py`:
|
||||
|
||||
```python
|
||||
from crewai import CrewBase, before_kickoff
|
||||
|
||||
@CrewBase
|
||||
class MyCrew:
|
||||
@before_kickoff
|
||||
def prepare_data(self, inputs):
|
||||
# Preprocess or modify inputs
|
||||
inputs['processed'] = True
|
||||
return inputs
|
||||
|
||||
#...
|
||||
```
|
||||
|
||||
In this example, the prepare_data function modifies the inputs by adding a new key-value pair indicating that the inputs have been processed.
|
||||
|
||||
## After Kickoff Hook
|
||||
|
||||
The after kickoff hook is executed after the crew has completed its tasks. It receives the result object, which contains the outputs of the crew's execution. This hook is ideal for post-processing results, such as logging, data transformation, or further analysis.
|
||||
|
||||
Here's how you can define an after kickoff function in your `crew.py`:
|
||||
|
||||
```python
|
||||
from crewai import CrewBase, after_kickoff
|
||||
|
||||
@CrewBase
|
||||
class MyCrew:
|
||||
@after_kickoff
|
||||
def log_results(self, result):
|
||||
# Log or modify the results
|
||||
print("Crew execution completed with result:", result)
|
||||
return result
|
||||
|
||||
# ...
|
||||
```
|
||||
|
||||
|
||||
In the `log_results` function, the results of the crew execution are simply printed out. You can extend this to perform more complex operations such as sending notifications or integrating with other services.
|
||||
|
||||
## Utilizing Both Hooks
|
||||
|
||||
Both hooks can be used together to provide a comprehensive setup and teardown process for your crew's execution. They are particularly useful in maintaining clean code architecture by separating concerns and enhancing the modularity of your CrewAI implementations.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Before and after kickoff hooks in CrewAI offer powerful ways to interact with the lifecycle of a crew's execution. By understanding and utilizing these hooks, you can greatly enhance the robustness and flexibility of your AI agents.
|
||||
@@ -6,25 +6,17 @@ icon: hammer
|
||||
|
||||
## Creating and Utilizing Tools in CrewAI
|
||||
|
||||
This guide provides detailed instructions on creating custom tools for the CrewAI framework and how to efficiently manage and utilize these tools,
|
||||
incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools,
|
||||
This guide provides detailed instructions on creating custom tools for the CrewAI framework and how to efficiently manage and utilize these tools,
|
||||
incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools,
|
||||
enabling agents to perform a wide range of actions.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before creating your own tools, ensure you have the crewAI extra tools package installed:
|
||||
|
||||
```bash
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
### Subclassing `BaseTool`
|
||||
|
||||
To create a personalized tool, inherit from `BaseTool` and define the necessary attributes, including the `args_schema` for input validation, and the `_run` method.
|
||||
|
||||
```python Code
|
||||
from typing import Type
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class MyToolInput(BaseModel):
|
||||
@@ -47,7 +39,7 @@ Alternatively, you can use the tool decorator `@tool`. This approach allows you
|
||||
offering a concise and efficient way to create specialized tools tailored to your needs.
|
||||
|
||||
```python Code
|
||||
from crewai_tools import tool
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool("Tool Name")
|
||||
def my_simple_tool(question: str) -> str:
|
||||
@@ -73,5 +65,5 @@ def my_cache_strategy(arguments: dict, result: str) -> bool:
|
||||
cached_tool.cache_function = my_cache_strategy
|
||||
```
|
||||
|
||||
By adhering to these guidelines and incorporating new functionalities and collaboration tools into your tool creation and management processes,
|
||||
By adhering to these guidelines and incorporating new functionalities and collaboration tools into your tool creation and management processes,
|
||||
you can leverage the full capabilities of the CrewAI framework, enhancing both the development experience and the efficiency of your AI agents.
|
||||
|
||||
@@ -125,10 +125,10 @@ You can connect to OpenAI-compatible LLMs using either environment variables or
|
||||
</Tab>
|
||||
<Tab title="Using LLM Class Attributes">
|
||||
<CodeGroup>
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="custom-model-name",
|
||||
api_key="your-api-key",
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="custom-model-name",
|
||||
api_key="your-api-key",
|
||||
base_url="https://api.your-provider.com/v1"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
@@ -179,4 +179,4 @@ This is particularly useful when working with OpenAI-compatible APIs or when you
|
||||
|
||||
## Conclusion
|
||||
|
||||
By leveraging LiteLLM, CrewAI offers seamless integration with a vast array of LLMs. This flexibility allows you to choose the most suitable model for your specific needs, whether you prioritize performance, cost-efficiency, or local deployment. Remember to consult the [LiteLLM documentation](https://docs.litellm.ai/docs/) for the most up-to-date information on supported models and configuration options.
|
||||
By leveraging LiteLLM, CrewAI offers seamless integration with a vast array of LLMs. This flexibility allows you to choose the most suitable model for your specific needs, whether you prioritize performance, cost-efficiency, or local deployment. Remember to consult the [LiteLLM documentation](https://docs.litellm.ai/docs/) for the most up-to-date information on supported models and configuration options.
|
||||
|
||||
181
docs/how-to/openlit-observability.mdx
Normal file
181
docs/how-to/openlit-observability.mdx
Normal file
@@ -0,0 +1,181 @@
|
||||
---
|
||||
title: Agent Monitoring with OpenLIT
|
||||
description: Quickly start monitoring your Agents in just a single line of code with OpenTelemetry.
|
||||
icon: magnifying-glass-chart
|
||||
---
|
||||
|
||||
# OpenLIT Overview
|
||||
|
||||
[OpenLIT](https://github.com/openlit/openlit?src=crewai-docs) is an open-source tool that makes it simple to monitor the performance of AI agents, LLMs, VectorDBs, and GPUs with just **one** line of code.
|
||||
|
||||
It provides OpenTelemetry-native tracing and metrics to track important parameters like cost, latency, interactions and task sequences.
|
||||
This setup enables you to track hyperparameters and monitor for performance issues, helping you find ways to enhance and fine-tune your agents over time.
|
||||
|
||||
<Frame caption="OpenLIT Dashboard">
|
||||
<img src="/images/openlit1.png" alt="Overview Agent usage including cost and tokens" />
|
||||
<img src="/images/openlit2.png" alt="Overview of agent otel traces and metrics" />
|
||||
<img src="/images/openlit3.png" alt="Overview of agent traces in details" />
|
||||
</Frame>
|
||||
|
||||
### Features
|
||||
|
||||
- **Analytics Dashboard**: Monitor your Agents health and performance with detailed dashboards that track metrics, costs, and user interactions.
|
||||
- **OpenTelemetry-native Observability SDK**: Vendor-neutral SDKs to send traces and metrics to your existing observability tools like Grafana, DataDog and more.
|
||||
- **Cost Tracking for Custom and Fine-Tuned Models**: Tailor cost estimations for specific models using custom pricing files for precise budgeting.
|
||||
- **Exceptions Monitoring Dashboard**: Quickly spot and resolve issues by tracking common exceptions and errors with a monitoring dashboard.
|
||||
- **Compliance and Security**: Detect potential threats such as profanity and PII leaks.
|
||||
- **Prompt Injection Detection**: Identify potential code injection and secret leaks.
|
||||
- **API Keys and Secrets Management**: Securely handle your LLM API keys and secrets centrally, avoiding insecure practices.
|
||||
- **Prompt Management**: Manage and version Agent prompts using PromptHub for consistent and easy access across Agents.
|
||||
- **Model Playground** Test and compare different models for your CrewAI agents before deployment.
|
||||
|
||||
## Setup Instructions
|
||||
|
||||
<Steps>
|
||||
<Step title="Deploy OpenLIT">
|
||||
<Steps>
|
||||
<Step title="Git Clone OpenLIT Repository">
|
||||
```shell
|
||||
git clone git@github.com:openlit/openlit.git
|
||||
```
|
||||
</Step>
|
||||
<Step title="Start Docker Compose">
|
||||
From the root directory of the [OpenLIT Repo](https://github.com/openlit/openlit), Run the below command:
|
||||
```shell
|
||||
docker compose up -d
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
</Step>
|
||||
<Step title="Install OpenLIT SDK">
|
||||
```shell
|
||||
pip install openlit
|
||||
```
|
||||
</Step>
|
||||
<Step title="Initialize OpenLIT in Your Application">
|
||||
Add the following two lines to your application code:
|
||||
<Tabs>
|
||||
<Tab title="Setup using function arguments">
|
||||
```python
|
||||
import openlit
|
||||
openlit.init(otlp_endpoint="http://127.0.0.1:4318")
|
||||
```
|
||||
|
||||
Example Usage for monitoring a CrewAI Agent:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
import openlit
|
||||
|
||||
openlit.init(disable_metrics=True)
|
||||
# Define your agents
|
||||
researcher = Agent(
|
||||
role="Researcher",
|
||||
goal="Conduct thorough research and analysis on AI and AI agents",
|
||||
backstory="You're an expert researcher, specialized in technology, software engineering, AI, and startups. You work as a freelancer and are currently researching for a new client.",
|
||||
allow_delegation=False,
|
||||
llm='command-r'
|
||||
)
|
||||
|
||||
|
||||
# Define your task
|
||||
task = Task(
|
||||
description="Generate a list of 5 interesting ideas for an article, then write one captivating paragraph for each idea that showcases the potential of a full article on this topic. Return the list of ideas with their paragraphs and your notes.",
|
||||
expected_output="5 bullet points, each with a paragraph and accompanying notes.",
|
||||
)
|
||||
|
||||
# Define the manager agent
|
||||
manager = Agent(
|
||||
role="Project Manager",
|
||||
goal="Efficiently manage the crew and ensure high-quality task completion",
|
||||
backstory="You're an experienced project manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
|
||||
allow_delegation=True,
|
||||
llm='command-r'
|
||||
)
|
||||
|
||||
# Instantiate your crew with a custom manager
|
||||
crew = Crew(
|
||||
agents=[researcher],
|
||||
tasks=[task],
|
||||
manager_agent=manager,
|
||||
process=Process.hierarchical,
|
||||
)
|
||||
|
||||
# Start the crew's work
|
||||
result = crew.kickoff()
|
||||
|
||||
print(result)
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Setup using Environment Variables">
|
||||
|
||||
Add the following two lines to your application code:
|
||||
```python
|
||||
import openlit
|
||||
|
||||
openlit.init()
|
||||
```
|
||||
|
||||
Run the following command to configure the OTEL export endpoint:
|
||||
```shell
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT = "http://127.0.0.1:4318"
|
||||
```
|
||||
|
||||
Example Usage for monitoring a CrewAI Async Agent:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crewai import Crew, Agent, Task
|
||||
import openlit
|
||||
|
||||
openlit.init(otlp_endpoint="http://127.0.0.1:4318")
|
||||
|
||||
# Create an agent with code execution enabled
|
||||
coding_agent = Agent(
|
||||
role="Python Data Analyst",
|
||||
goal="Analyze data and provide insights using Python",
|
||||
backstory="You are an experienced data analyst with strong Python skills.",
|
||||
allow_code_execution=True,
|
||||
llm="command-r"
|
||||
)
|
||||
|
||||
# Create a task that requires code execution
|
||||
data_analysis_task = Task(
|
||||
description="Analyze the given dataset and calculate the average age of participants. Ages: {ages}",
|
||||
agent=coding_agent,
|
||||
expected_output="5 bullet points, each with a paragraph and accompanying notes.",
|
||||
)
|
||||
|
||||
# Create a crew and add the task
|
||||
analysis_crew = Crew(
|
||||
agents=[coding_agent],
|
||||
tasks=[data_analysis_task]
|
||||
)
|
||||
|
||||
# Async function to kickoff the crew asynchronously
|
||||
async def async_crew_execution():
|
||||
result = await analysis_crew.kickoff_async(inputs={"ages": [25, 30, 35, 40, 45]})
|
||||
print("Crew Result:", result)
|
||||
|
||||
# Run the async function
|
||||
asyncio.run(async_crew_execution())
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
Refer to OpenLIT [Python SDK repository](https://github.com/openlit/openlit/tree/main/sdk/python) for more advanced configurations and use cases.
|
||||
</Step>
|
||||
<Step title="Visualize and Analyze">
|
||||
With the Agent Observability data now being collected and sent to OpenLIT, the next step is to visualize and analyze this data to get insights into your Agent's performance, behavior, and identify areas of improvement.
|
||||
|
||||
Just head over to OpenLIT at `127.0.0.1:3000` on your browser to start exploring. You can login using the default credentials
|
||||
- **Email**: `user@openlit.io`
|
||||
- **Password**: `openlituser`
|
||||
|
||||
<Frame caption="OpenLIT Dashboard">
|
||||
<img src="/images/openlit1.png" alt="Overview Agent usage including cost and tokens" />
|
||||
<img src="/images/openlit2.png" alt="Overview of agent otel traces and metrics" />
|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
BIN
docs/images/openlit1.png
Normal file
BIN
docs/images/openlit1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 390 KiB |
BIN
docs/images/openlit2.png
Normal file
BIN
docs/images/openlit2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 422 KiB |
BIN
docs/images/openlit3.png
Normal file
BIN
docs/images/openlit3.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 799 KiB |
@@ -1,128 +1,145 @@
|
||||
---
|
||||
title: Installation
|
||||
description:
|
||||
description: Get started with CrewAI - Install, configure, and build your first AI crew
|
||||
icon: wrench
|
||||
---
|
||||
|
||||
This guide will walk you through the installation process for CrewAI and its dependencies.
|
||||
CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently.
|
||||
Let's get started! 🚀
|
||||
<Note>
|
||||
**Python Version Requirements**
|
||||
|
||||
CrewAI requires `Python >=3.10 and <=3.13`. Here's how to check your version:
|
||||
```bash
|
||||
python3 --version
|
||||
```
|
||||
|
||||
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
|
||||
</Note>
|
||||
|
||||
<Tip>
|
||||
Make sure you have `Python >=3.10 <=3.13` installed on your system before you proceed.
|
||||
</Tip>
|
||||
# Installing CrewAI
|
||||
|
||||
CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently.
|
||||
Let's get you set up! 🚀
|
||||
|
||||
<Steps>
|
||||
<Step title="Install CrewAI">
|
||||
Install the main CrewAI package with the following command:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip install crewai
|
||||
```
|
||||
</CodeGroup>
|
||||
You can also install the main CrewAI package and the tools package that include a series of helpful tools for your agents:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
</CodeGroup>
|
||||
Alternatively, you can also use:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip install crewai crewai-tools
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="Upgrade CrewAI">
|
||||
To upgrade CrewAI and CrewAI Tools to the latest version, run the following command
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip install --upgrade crewai crewai-tools
|
||||
```
|
||||
</CodeGroup>
|
||||
<Note>
|
||||
1. If you're using an older version of CrewAI, you may receive a warning about using `Poetry` for dependency management.
|
||||

|
||||
|
||||
2. In this case, you'll need to run the command below to update your project.
|
||||
This command will migrate your project to use [UV](https://github.com/astral-sh/uv) and update the necessary files.
|
||||
Install CrewAI with all recommended tools using either method:
|
||||
```shell Terminal
|
||||
crewai update
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
or
|
||||
```shell Terminal
|
||||
pip install crewai crewai-tools
|
||||
```
|
||||
3. After running the command above, you should see the following output:
|
||||

|
||||
|
||||
4. You're all set! You can now proceed to the next step! 🎉
|
||||
</Note>
|
||||
<Note>
|
||||
Both methods install the core package and additional tools needed for most use cases.
|
||||
</Note>
|
||||
</Step>
|
||||
<Step title="Verify the installation">
|
||||
To verify that `crewai` and `crewai-tools` are installed correctly, run the following command
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip freeze | grep crewai
|
||||
```
|
||||
</CodeGroup>
|
||||
You should see the version number of `crewai` and `crewai-tools`.
|
||||
<CodeGroup>
|
||||
```markdown Version
|
||||
crewai==X.X.X
|
||||
crewai-tools==X.X.X
|
||||
```
|
||||
</CodeGroup>
|
||||
If you see the version number, then the installation was successful! 🎉
|
||||
|
||||
<Step title="Upgrade CrewAI (Existing Installations Only)">
|
||||
If you have an older version of CrewAI installed, you can upgrade it:
|
||||
```shell Terminal
|
||||
pip install --upgrade crewai crewai-tools
|
||||
```
|
||||
|
||||
<Warning>
|
||||
If you see a Poetry-related warning, you'll need to migrate to our new dependency manager:
|
||||
```shell Terminal
|
||||
crewai update
|
||||
```
|
||||
This will update your project to use [UV](https://github.com/astral-sh/uv), our new faster dependency manager.
|
||||
</Warning>
|
||||
|
||||
<Note>
|
||||
Skip this step if you're doing a fresh installation.
|
||||
</Note>
|
||||
</Step>
|
||||
|
||||
<Step title="Verify Installation">
|
||||
Check your installed versions:
|
||||
```shell Terminal
|
||||
pip freeze | grep crewai
|
||||
```
|
||||
|
||||
You should see something like:
|
||||
```markdown Output
|
||||
crewai==X.X.X
|
||||
crewai-tools==X.X.X
|
||||
```
|
||||
<Check>Installation successful! You're ready to create your first crew.</Check>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Create a new CrewAI project
|
||||
# Creating a New Project
|
||||
|
||||
The next step is to create a new CrewAI project.
|
||||
We recommend using the YAML Template scaffolding to get started as it provides a structured approach to defining agents and tasks.
|
||||
<Info>
|
||||
We recommend using the YAML Template scaffolding for a structured approach to defining agents and tasks.
|
||||
</Info>
|
||||
|
||||
<Steps>
|
||||
<Step title="Create a new CrewAI project using the YAML Template Configuration">
|
||||
To create a new CrewAI project, run the following CLI (Command Line Interface) command:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
crewai create crew <project_name>
|
||||
```
|
||||
</CodeGroup>
|
||||
This command creates a new project folder with the following structure:
|
||||
| File/Directory | Description |
|
||||
|:------------------------|:-------------------------------------------------|
|
||||
| `my_project/` | Root directory of the project |
|
||||
| ├── `.gitignore` | Specifies files and directories to ignore in Git |
|
||||
| ├── `pyproject.toml` | Project configuration and dependencies |
|
||||
| ├── `README.md` | Project documentation |
|
||||
| ├── `.env` | Environment variables |
|
||||
| └── `src/` | Source code directory |
|
||||
| └── `my_project/` | Main application package |
|
||||
| ├── `__init__.py` | Marks the directory as a Python package |
|
||||
| ├── `main.py` | Main application script |
|
||||
| ├── `crew.py` | Crew-related functionalities |
|
||||
| ├── `tools/` | Custom tools directory |
|
||||
| │ ├── `custom_tool.py` | Custom tool implementation |
|
||||
| │ └── `__init__.py` | Marks tools directory as a package |
|
||||
| └── `config/` | Configuration files directory |
|
||||
| ├── `agents.yaml` | Agent configurations |
|
||||
| └── `tasks.yaml` | Task configurations |
|
||||
<Step title="Generate Project Structure">
|
||||
Run the CrewAI CLI command:
|
||||
```shell Terminal
|
||||
crewai create crew <project_name>
|
||||
```
|
||||
|
||||
You can now start developing your crew by editing the files in the `src/my_project` folder.
|
||||
The `main.py` file is the entry point of the project, the `crew.py` file is where you define your crew, the `agents.yaml` file is where you define your agents,
|
||||
and the `tasks.yaml` file is where you define your tasks.
|
||||
This creates a new project with the following structure:
|
||||
<Frame>
|
||||
```
|
||||
my_project/
|
||||
├── .gitignore
|
||||
├── pyproject.toml
|
||||
├── README.md
|
||||
├── .env
|
||||
└── src/
|
||||
└── my_project/
|
||||
├── __init__.py
|
||||
├── main.py
|
||||
├── crew.py
|
||||
├── tools/
|
||||
│ ├── custom_tool.py
|
||||
│ └── __init__.py
|
||||
└── config/
|
||||
├── agents.yaml
|
||||
└── tasks.yaml
|
||||
```
|
||||
</Frame>
|
||||
</Step>
|
||||
<Step title="Customize your project">
|
||||
To customize your project, you can:
|
||||
- Modify `src/my_project/config/agents.yaml` to define your agents.
|
||||
- Modify `src/my_project/config/tasks.yaml` to define your tasks.
|
||||
- Modify `src/my_project/crew.py` to add your own logic, tools, and specific arguments.
|
||||
- Modify `src/my_project/main.py` to add custom inputs for your agents and tasks.
|
||||
- Add your environment variables into the `.env` file.
|
||||
|
||||
<Step title="Customize Your Project">
|
||||
Your project will contain these essential files:
|
||||
|
||||
| File | Purpose |
|
||||
| --- | --- |
|
||||
| `agents.yaml` | Define your AI agents and their roles |
|
||||
| `tasks.yaml` | Set up agent tasks and workflows |
|
||||
| `.env` | Store API keys and environment variables |
|
||||
| `main.py` | Project entry point and execution flow |
|
||||
| `crew.py` | Crew orchestration and coordination |
|
||||
| `tools/` | Directory for custom agent tools |
|
||||
|
||||
<Tip>
|
||||
Start by editing `agents.yaml` and `tasks.yaml` to define your crew's behavior.
|
||||
Keep sensitive information like API keys in `.env`.
|
||||
</Tip>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Next steps
|
||||
## Next Steps
|
||||
|
||||
Now that you have installed `crewai` and `crewai-tools`, you're ready to spin up your first crew!
|
||||
|
||||
- 👨💻 Build your first agent with CrewAI by following the [Quickstart](/quickstart) guide.
|
||||
- 💬 Join the [Community](https://community.crewai.com) to get help and share your feedback.
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
title="Build Your First Agent"
|
||||
icon="code"
|
||||
href="/quickstart"
|
||||
>
|
||||
Follow our quickstart guide to create your first CrewAI agent and get hands-on experience.
|
||||
</Card>
|
||||
<Card
|
||||
title="Join the Community"
|
||||
icon="comments"
|
||||
href="https://community.crewai.com"
|
||||
>
|
||||
Connect with other developers, get help, and share your CrewAI experiences.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
@@ -1,49 +1,85 @@
|
||||
---
|
||||
title: Introduction
|
||||
description: Welcome to CrewAI docs!
|
||||
description: Build AI agent teams that work together to tackle complex tasks
|
||||
icon: handshake
|
||||
---
|
||||
|
||||
# What is CrewAI?
|
||||
|
||||
**CrewAI is a cutting-edge Python framework for orchestrating role-playing, autonomous AI agents.**
|
||||
**CrewAI is a cutting-edge framework for orchestrating autonomous AI agents.**
|
||||
|
||||
By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks.
|
||||
CrewAI enables you to create AI teams where each agent has specific roles, tools, and goals, working together to accomplish complex tasks.
|
||||
|
||||
<Frame caption="CrewAI Mindmap">
|
||||
<img src="crewAI-mindmap.png" alt="CrewAI Mindmap" />
|
||||
</Frame>
|
||||
Think of it as assembling your dream team - each member (agent) brings unique skills and expertise, collaborating seamlessly to achieve your objectives.
|
||||
|
||||
## Why CrewAI?
|
||||
- 🤼♀️ **Role-Playing Agents**: Agents can take on different roles and personas to better understand and interact with complex systems.
|
||||
- 🤖 **Autonomous Decision Making**: Agents can make decisions autonomously based on the given context and available tools.
|
||||
- 🤝 **Seamless Collaboration**: Agents can work together seamlessly, sharing information and resources to achieve common goals.
|
||||
- 🧠 **Complex Task Tackling**: CrewAI is designed to tackle complex tasks, such as multi-step workflows, decision making, and problem solving.
|
||||
## How CrewAI Works
|
||||
|
||||
# Get Started with CrewAI
|
||||
<Note>
|
||||
Just like a company has departments (Sales, Engineering, Marketing) working together under leadership to achieve business goals, CrewAI helps you create an organization of AI agents with specialized roles collaborating to accomplish complex tasks.
|
||||
</Note>
|
||||
|
||||
<Frame caption="CrewAI Framework Overview">
|
||||
<img src="crewAI-mindmap.png" alt="CrewAI Framework Overview" />
|
||||
</Frame>
|
||||
|
||||
| Component | Description | Key Features |
|
||||
|:----------|:-----------:|:------------|
|
||||
| **Crew** | The top-level organization | • Manages AI agent teams<br/>• Oversees workflows<br/>• Ensures collaboration<br/>• Delivers outcomes |
|
||||
| **AI Agents** | Specialized team members | • Have specific roles (researcher, writer)<br/>• Use designated tools<br/>• Can delegate tasks<br/>• Make autonomous decisions |
|
||||
| **Process** | Workflow management system | • Defines collaboration patterns<br/>• Controls task assignments<br/>• Manages interactions<br/>• Ensures efficient execution |
|
||||
| **Tasks** | Individual assignments | • Have clear objectives<br/>• Use specific tools<br/>• Feed into larger process<br/>• Produce actionable results |
|
||||
|
||||
### How It All Works Together
|
||||
|
||||
1. The **Crew** organizes the overall operation
|
||||
2. **AI Agents** work on their specialized tasks
|
||||
3. The **Process** ensures smooth collaboration
|
||||
4. **Tasks** get completed to achieve the goal
|
||||
|
||||
## Key Features
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Role-Based Agents" icon="users">
|
||||
Create specialized agents with defined roles, expertise, and goals - from researchers to analysts to writers
|
||||
</Card>
|
||||
<Card title="Flexible Tools" icon="screwdriver-wrench">
|
||||
Equip agents with custom tools and APIs to interact with external services and data sources
|
||||
</Card>
|
||||
<Card title="Intelligent Collaboration" icon="people-arrows">
|
||||
Agents work together, sharing insights and coordinating tasks to achieve complex objectives
|
||||
</Card>
|
||||
<Card title="Task Management" icon="list-check">
|
||||
Define sequential or parallel workflows, with agents automatically handling task dependencies
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Why Choose CrewAI?
|
||||
|
||||
- 🧠 **Autonomous Operation**: Agents make intelligent decisions based on their roles and available tools
|
||||
- 📝 **Natural Interaction**: Agents communicate and collaborate like human team members
|
||||
- 🛠️ **Extensible Design**: Easy to add new tools, roles, and capabilities
|
||||
- 🚀 **Production Ready**: Built for reliability and scalability in real-world applications
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Quickstart"
|
||||
color="#F3A78B"
|
||||
href="quickstart"
|
||||
icon="terminal"
|
||||
iconType="solid"
|
||||
title="Install CrewAI"
|
||||
icon="wrench"
|
||||
href="/installation"
|
||||
>
|
||||
Getting started with CrewAI
|
||||
Get started with CrewAI in your development environment.
|
||||
</Card>
|
||||
<Card
|
||||
title="Quick Start"
|
||||
icon="bolt"
|
||||
href="/quickstart"
|
||||
>
|
||||
Follow our quickstart guide to create your first CrewAI agent and get hands-on experience.
|
||||
</Card>
|
||||
<Card
|
||||
title="Join the Community"
|
||||
color="#F3A78B"
|
||||
icon="comments"
|
||||
href="https://community.crewai.com"
|
||||
icon="comment-question"
|
||||
iconType="duotone"
|
||||
>
|
||||
Join the CrewAI community and get help with your project!
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Next Step
|
||||
|
||||
- [Install CrewAI](/installation) to get started with your first agent.
|
||||
|
||||
>
|
||||
Connect with other developers, get help, and share your CrewAI experiences.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -68,6 +68,7 @@
|
||||
"concepts/tasks",
|
||||
"concepts/crews",
|
||||
"concepts/flows",
|
||||
"concepts/knowledge",
|
||||
"concepts/llms",
|
||||
"concepts/processes",
|
||||
"concepts/collaboration",
|
||||
@@ -98,7 +99,8 @@
|
||||
"how-to/replay-tasks-from-latest-crew-kickoff",
|
||||
"how-to/conditional-tasks",
|
||||
"how-to/agentops-observability",
|
||||
"how-to/langtrace-observability"
|
||||
"how-to/langtrace-observability",
|
||||
"how-to/openlit-observability"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@ icon: rocket
|
||||
|
||||
Let's create a simple crew that will help us `research` and `report` on the `latest AI developments` for a given topic or subject.
|
||||
|
||||
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
|
||||
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
|
||||
If you haven't installed them yet, you can do so by following the [installation guide](/installation).
|
||||
|
||||
Follow the steps below to get crewing! 🚣♂️
|
||||
@@ -23,7 +23,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="Modify your `agents.yaml` file">
|
||||
<Step title="Modify your `agents.yaml` file">
|
||||
<Tip>
|
||||
You can also modify the agents as needed to fit your use case or copy and paste as is to your project.
|
||||
Any variable interpolated in your `agents.yaml` and `tasks.yaml` files like `{topic}` will be replaced by the value of the variable in the `main.py` file.
|
||||
@@ -39,7 +39,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
@@ -51,7 +51,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
it easy for others to understand and act on the information you provide.
|
||||
```
|
||||
</Step>
|
||||
<Step title="Modify your `tasks.yaml` file">
|
||||
<Step title="Modify your `tasks.yaml` file">
|
||||
```yaml tasks.yaml
|
||||
# src/latest_ai_development/config/tasks.yaml
|
||||
research_task:
|
||||
@@ -73,8 +73,8 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
agent: reporting_analyst
|
||||
output_file: report.md
|
||||
```
|
||||
</Step>
|
||||
<Step title="Modify your `crew.py` file">
|
||||
</Step>
|
||||
<Step title="Modify your `crew.py` file">
|
||||
```python crew.py
|
||||
# src/latest_ai_development/crew.py
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
@@ -121,10 +121,34 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
)
|
||||
```
|
||||
</Step>
|
||||
<Step title="Feel free to pass custom inputs to your crew">
|
||||
<Step title="[Optional] Add before and after crew functions">
|
||||
```python crew.py
|
||||
# src/latest_ai_development/crew.py
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class LatestAiDevelopmentCrew():
|
||||
"""LatestAiDevelopment crew"""
|
||||
|
||||
@before_kickoff
|
||||
def before_kickoff_function(self, inputs):
|
||||
print(f"Before kickoff function with inputs: {inputs}")
|
||||
return inputs # You can return the inputs or modify them as needed
|
||||
|
||||
@after_kickoff
|
||||
def after_kickoff_function(self, result):
|
||||
print(f"After kickoff function with result: {result}")
|
||||
return result # You can return the result or modify it as needed
|
||||
|
||||
# ... remaining code
|
||||
```
|
||||
</Step>
|
||||
<Step title="Feel free to pass custom inputs to your crew">
|
||||
For example, you can pass the `topic` input to your crew to customize the research and reporting.
|
||||
```python main.py
|
||||
#!/usr/bin/env python
|
||||
@@ -237,14 +261,14 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
### Note on Consistency in Naming
|
||||
|
||||
The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code.
|
||||
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
|
||||
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
|
||||
This naming consistency allows CrewAI to automatically link your configurations with your code; otherwise, your task won't recognize the reference properly.
|
||||
|
||||
#### Example References
|
||||
|
||||
<Tip>
|
||||
Note how we use the same name for the agent in the `agents.yaml` (`email_summarizer`) file as the method name in the `crew.py` (`email_summarizer`) file.
|
||||
</Tip>
|
||||
</Tip>
|
||||
|
||||
```yaml agents.yaml
|
||||
email_summarizer:
|
||||
@@ -281,6 +305,8 @@ Use the annotations to properly reference the agent and task in the `crew.py` fi
|
||||
* `@task`
|
||||
* `@crew`
|
||||
* `@tool`
|
||||
* `@before_kickoff`
|
||||
* `@after_kickoff`
|
||||
* `@callback`
|
||||
* `@output_json`
|
||||
* `@output_pydantic`
|
||||
@@ -304,7 +330,7 @@ def email_summarizer_task(self) -> Task:
|
||||
|
||||
<Tip>
|
||||
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
|
||||
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
|
||||
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
|
||||
You can learn more about the core concepts [here](/concepts).
|
||||
</Tip>
|
||||
|
||||
@@ -323,11 +349,28 @@ Replace `<task_id>` with the ID of the task you want to replay.
|
||||
If you need to reset the memory of your crew before running it again, you can do so by calling the reset memory feature:
|
||||
|
||||
```shell
|
||||
crewai reset-memory
|
||||
crewai reset-memories --all
|
||||
```
|
||||
|
||||
This will clear the crew's memory, allowing for a fresh start.
|
||||
|
||||
## Deploying Your Project
|
||||
|
||||
The easiest way to deploy your crew is through [CrewAI Enterprise](https://www.crewai.com/crewaiplus), where you can deploy your crew in a few clicks.
|
||||
The easiest way to deploy your crew is through CrewAI Enterprise, where you can deploy your crew in a few clicks.
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
title="Deploy on Enterprise"
|
||||
icon="rocket"
|
||||
href="http://app.crewai.com"
|
||||
>
|
||||
Get started with CrewAI Enterprise and deploy your crew in a production environment with just a few clicks.
|
||||
</Card>
|
||||
<Card
|
||||
title="Join the Community"
|
||||
icon="comments"
|
||||
href="https://community.crewai.com"
|
||||
>
|
||||
Join our open source community to discuss ideas, share your projects, and connect with other CrewAI developers.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
@@ -34,6 +34,7 @@ from crewai_tools import GithubSearchTool
|
||||
# Initialize the tool for semantic searches within a specific GitHub repository
|
||||
tool = GithubSearchTool(
|
||||
github_repo='https://github.com/example/repo',
|
||||
gh_token='your_github_personal_access_token',
|
||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||
)
|
||||
|
||||
@@ -41,6 +42,7 @@ tool = GithubSearchTool(
|
||||
|
||||
# Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution
|
||||
tool = GithubSearchTool(
|
||||
gh_token='your_github_personal_access_token',
|
||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||
)
|
||||
```
|
||||
@@ -48,6 +50,7 @@ tool = GithubSearchTool(
|
||||
## Arguments
|
||||
|
||||
- `github_repo` : The URL of the GitHub repository where the search will be conducted. This is a mandatory field and specifies the target repository for your search.
|
||||
- `gh_token` : Your GitHub Personal Access Token (PAT) required for authentication. You can create one in your GitHub account settings under Developer Settings > Personal Access Tokens.
|
||||
- `content_types` : Specifies the types of content to include in your search. You must provide a list of content types from the following options: `code` for searching within the code,
|
||||
`repo` for searching within the repository's general information, `pr` for searching within pull requests, and `issue` for searching within issues.
|
||||
This field is mandatory and allows tailoring the search to specific content types within the GitHub repository.
|
||||
@@ -77,5 +80,4 @@ tool = GithubSearchTool(
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
```
|
||||
)
|
||||
@@ -152,6 +152,7 @@ nav:
|
||||
- Conditional Tasks: 'how-to/Conditional-Tasks.md'
|
||||
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
|
||||
- Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md'
|
||||
- Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md'
|
||||
- Tools Docs:
|
||||
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
|
||||
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'
|
||||
|
||||
6
poetry.lock
generated
6
poetry.lock
generated
@@ -1597,12 +1597,12 @@ files = [
|
||||
google-auth = ">=2.14.1,<3.0.dev0"
|
||||
googleapis-common-protos = ">=1.56.2,<2.0.dev0"
|
||||
grpcio = [
|
||||
{version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
]
|
||||
grpcio-status = [
|
||||
{version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
]
|
||||
proto-plus = ">=1.22.3,<2.0.0dev"
|
||||
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0"
|
||||
@@ -4286,8 +4286,8 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
numpy = [
|
||||
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
|
||||
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
|
||||
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
|
||||
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
|
||||
]
|
||||
python-dateutil = ">=2.8.2"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "crewai"
|
||||
version = "0.76.9"
|
||||
version = "0.85.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
@@ -9,14 +9,13 @@ authors = [
|
||||
]
|
||||
dependencies = [
|
||||
"pydantic>=2.4.2",
|
||||
"langchain>=0.2.16",
|
||||
"openai>=1.13.3",
|
||||
"opentelemetry-api>=1.22.0",
|
||||
"opentelemetry-sdk>=1.22.0",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
||||
"instructor>=1.3.3",
|
||||
"regex>=2024.9.11",
|
||||
"crewai-tools>=0.13.4",
|
||||
"crewai-tools>=0.14.0",
|
||||
"click>=8.1.7",
|
||||
"python-dotenv>=1.0.0",
|
||||
"appdirs>=1.4.4",
|
||||
@@ -27,8 +26,10 @@ dependencies = [
|
||||
"pyvis>=0.3.2",
|
||||
"uv>=0.4.25",
|
||||
"tomli-w>=1.1.0",
|
||||
"chromadb>=0.4.24",
|
||||
"tomli>=2.0.2",
|
||||
"chromadb>=0.5.18",
|
||||
"pdfplumber>=0.11.4",
|
||||
"openpyxl>=3.1.5",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -37,8 +38,19 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools>=0.13.4"]
|
||||
tools = ["crewai-tools>=0.14.0"]
|
||||
agentops = ["agentops>=0.3.0"]
|
||||
fastembed = ["fastembed>=0.4.1"]
|
||||
pdfplumber = [
|
||||
"pdfplumber>=0.11.4",
|
||||
]
|
||||
pandas = [
|
||||
"pandas>=2.2.3",
|
||||
]
|
||||
openpyxl = [
|
||||
"openpyxl>=3.1.5",
|
||||
]
|
||||
mem0 = ["mem0ai>=0.1.29"]
|
||||
|
||||
[tool.uv]
|
||||
dev-dependencies = [
|
||||
@@ -52,7 +64,7 @@ dev-dependencies = [
|
||||
"mkdocs-material-extensions>=1.3.1",
|
||||
"pillow>=10.2.0",
|
||||
"cairosvg>=2.7.1",
|
||||
"crewai-tools>=0.13.4",
|
||||
"crewai-tools>=0.14.0",
|
||||
"pytest>=8.0.0",
|
||||
"pytest-vcr>=1.0.2",
|
||||
"python-dotenv>=1.0.0",
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import warnings
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.pipeline import Pipeline
|
||||
from crewai.process import Process
|
||||
@@ -14,5 +16,15 @@ warnings.filterwarnings(
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
__version__ = "0.76.9"
|
||||
__all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router", "LLM", "Flow"]
|
||||
__version__ = "0.85.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
"Process",
|
||||
"Task",
|
||||
"Pipeline",
|
||||
"Router",
|
||||
"LLM",
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
]
|
||||
|
||||
@@ -1,18 +1,25 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from typing import Any, List, Literal, Optional, Union
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
@@ -50,6 +57,7 @@ class Agent(BaseAgent):
|
||||
role: The role of the agent.
|
||||
goal: The objective of the agent.
|
||||
backstory: The backstory of the agent.
|
||||
knowledge: The knowledge base of the agent.
|
||||
config: Dict representation of agent configuration.
|
||||
llm: The language model that will run the agent.
|
||||
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
||||
@@ -60,6 +68,7 @@ class Agent(BaseAgent):
|
||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
knowledge_sources: Knowledge sources for the agent.
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
@@ -117,10 +126,27 @@ class Agent(BaseAgent):
|
||||
default="safe",
|
||||
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
||||
)
|
||||
embedder_config: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
default=None,
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
self._set_knowledge()
|
||||
self.agent_ops_agent_name = self.role
|
||||
unaccepted_attributes = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"AWS_REGION_NAME",
|
||||
]
|
||||
|
||||
# Handle different cases for self.llm
|
||||
if isinstance(self.llm, str):
|
||||
@@ -130,8 +156,12 @@ class Agent(BaseAgent):
|
||||
# If it's already an LLM instance, keep it as is
|
||||
pass
|
||||
elif self.llm is None:
|
||||
# If it's None, use environment variables or default
|
||||
model_name = os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini")
|
||||
# Determine the model name from environment variables or use default
|
||||
model_name = (
|
||||
os.environ.get("OPENAI_MODEL_NAME")
|
||||
or os.environ.get("MODEL")
|
||||
or "gpt-4o-mini"
|
||||
)
|
||||
llm_params = {"model": model_name}
|
||||
|
||||
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get(
|
||||
@@ -140,9 +170,39 @@ class Agent(BaseAgent):
|
||||
if api_base:
|
||||
llm_params["base_url"] = api_base
|
||||
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
if api_key:
|
||||
llm_params["api_key"] = api_key
|
||||
set_provider = model_name.split("/")[0] if "/" in model_name else "openai"
|
||||
|
||||
# Iterate over all environment variables to find matching API keys or use defaults
|
||||
for provider, env_vars in ENV_VARS.items():
|
||||
if provider == set_provider:
|
||||
for env_var in env_vars:
|
||||
# Check if the environment variable is set
|
||||
key_name = env_var.get("key_name")
|
||||
if key_name and key_name not in unaccepted_attributes:
|
||||
env_value = os.environ.get(key_name)
|
||||
if env_value:
|
||||
# Map key names containing "API_KEY" to "api_key"
|
||||
key_name = (
|
||||
"api_key" if "API_KEY" in key_name else key_name
|
||||
)
|
||||
# Map key names containing "API_BASE" to "api_base"
|
||||
key_name = (
|
||||
"api_base" if "API_BASE" in key_name else key_name
|
||||
)
|
||||
# Map key names containing "API_VERSION" to "api_version"
|
||||
key_name = (
|
||||
"api_version"
|
||||
if "API_VERSION" in key_name
|
||||
else key_name
|
||||
)
|
||||
llm_params[key_name] = env_value
|
||||
# Check for default values if the environment variable is not set
|
||||
elif env_var.get("default", False):
|
||||
for key, value in env_var.items():
|
||||
if key not in ["prompt", "key_name", "default"]:
|
||||
# Only add default if the key is already set in os.environ
|
||||
if key in os.environ:
|
||||
llm_params[key] = value
|
||||
|
||||
self.llm = LLM(**llm_params)
|
||||
else:
|
||||
@@ -188,11 +248,26 @@ class Agent(BaseAgent):
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
|
||||
def _set_knowledge(self):
|
||||
try:
|
||||
if self.knowledge_sources:
|
||||
knowledge_agent_name = f"{self.role.replace(' ', '_')}"
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
):
|
||||
self._knowledge = Knowledge(
|
||||
sources=self.knowledge_sources,
|
||||
embedder_config=self.embedder_config,
|
||||
collection_name=knowledge_agent_name,
|
||||
)
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
|
||||
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
task: Task,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
"""Execute a task with the agent.
|
||||
|
||||
@@ -209,6 +284,22 @@ class Agent(BaseAgent):
|
||||
|
||||
task_prompt = task.prompt()
|
||||
|
||||
# If the task requires output in JSON or Pydantic format,
|
||||
# append specific instructions to the task prompt to ensure
|
||||
# that the final answer does not include any code block markers
|
||||
if task.output_json or task.output_pydantic:
|
||||
# Generate the schema based on the output format
|
||||
if task.output_json:
|
||||
# schema = json.dumps(task.output_json, indent=2)
|
||||
schema = generate_model_description(task.output_json)
|
||||
|
||||
elif task.output_pydantic:
|
||||
schema = generate_model_description(task.output_pydantic)
|
||||
|
||||
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
if context:
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
@@ -216,14 +307,32 @@ class Agent(BaseAgent):
|
||||
|
||||
if self.crew and self.crew.memory:
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew.memory_config,
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._user_memory,
|
||||
)
|
||||
memory = contextual_memory.build_context_for_task(task, context)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
if self._knowledge:
|
||||
agent_knowledge_snippets = self._knowledge.query([task.prompt()])
|
||||
if agent_knowledge_snippets:
|
||||
agent_knowledge_context = extract_knowledge_context(
|
||||
agent_knowledge_snippets
|
||||
)
|
||||
if agent_knowledge_context:
|
||||
task_prompt += agent_knowledge_context
|
||||
|
||||
if self.crew:
|
||||
knowledge_snippets = self.crew.query_knowledge([task.prompt()])
|
||||
if knowledge_snippets:
|
||||
crew_knowledge_context = extract_knowledge_context(knowledge_snippets)
|
||||
if crew_knowledge_context:
|
||||
task_prompt += crew_knowledge_context
|
||||
|
||||
tools = tools or self.tools or []
|
||||
self.create_agent_executor(tools=tools, task=task)
|
||||
|
||||
@@ -259,7 +368,9 @@ class Agent(BaseAgent):
|
||||
|
||||
return result
|
||||
|
||||
def create_agent_executor(self, tools=None, task=None) -> None:
|
||||
def create_agent_executor(
|
||||
self, tools: Optional[List[BaseTool]] = None, task=None
|
||||
) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
|
||||
Returns:
|
||||
@@ -332,11 +443,11 @@ class Agent(BaseAgent):
|
||||
tools_list = []
|
||||
try:
|
||||
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
|
||||
from crewai_tools import BaseTool as CrewAITool
|
||||
from crewai.tools import BaseTool as CrewAITool
|
||||
|
||||
for tool in tools:
|
||||
if isinstance(tool, CrewAITool):
|
||||
tools_list.append(tool.to_langchain())
|
||||
tools_list.append(tool.to_structured_tool())
|
||||
else:
|
||||
tools_list.append(tool)
|
||||
except ModuleNotFoundError:
|
||||
@@ -391,7 +502,7 @@ class Agent(BaseAgent):
|
||||
|
||||
return description
|
||||
|
||||
def _render_text_description_and_args(self, tools: List[Any]) -> str:
|
||||
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
|
||||
"""Render the tool name, description, and args in plain text.
|
||||
|
||||
Output will be in the format of:
|
||||
@@ -404,17 +515,7 @@ class Agent(BaseAgent):
|
||||
"""
|
||||
tool_strings = []
|
||||
for tool in tools:
|
||||
args_schema = {
|
||||
name: {
|
||||
"description": field.description,
|
||||
"type": field.annotation.__name__,
|
||||
}
|
||||
for name, field in tool.args_schema.model_fields.items()
|
||||
}
|
||||
description = (
|
||||
f"Tool Name: {tool.name}\nTool Description: {tool.description}"
|
||||
)
|
||||
tool_strings.append(f"{description}\nTool Arguments: {args_schema}")
|
||||
tool_strings.append(tool.description)
|
||||
|
||||
return "\n".join(tool_strings)
|
||||
|
||||
|
||||
@@ -18,6 +18,8 @@ from pydantic_core import PydanticCustomError
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
|
||||
@@ -49,11 +51,11 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
|
||||
Methods:
|
||||
execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> str:
|
||||
execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[BaseTool]] = None) -> str:
|
||||
Abstract method to execute a task.
|
||||
create_agent_executor(tools=None) -> None:
|
||||
Abstract method to create an agent executor.
|
||||
_parse_tools(tools: List[Any]) -> List[Any]:
|
||||
_parse_tools(tools: List[BaseTool]) -> List[Any]:
|
||||
Abstract method to parse tools.
|
||||
get_delegation_tools(agents: List["BaseAgent"]):
|
||||
Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew.
|
||||
@@ -134,6 +136,35 @@ class BaseAgent(ABC, BaseModel):
|
||||
def process_model_config(cls, values):
|
||||
return process_config(values, cls)
|
||||
|
||||
@field_validator("tools")
|
||||
@classmethod
|
||||
def validate_tools(cls, tools: List[Any]) -> List[BaseTool]:
|
||||
"""Validate and process the tools provided to the agent.
|
||||
|
||||
This method ensures that each tool is either an instance of BaseTool
|
||||
or an object with 'name', 'func', and 'description' attributes. If the
|
||||
tool meets these criteria, it is processed and added to the list of
|
||||
tools. Otherwise, a ValueError is raised.
|
||||
"""
|
||||
processed_tools = []
|
||||
for tool in tools:
|
||||
if isinstance(tool, BaseTool):
|
||||
processed_tools.append(tool)
|
||||
elif (
|
||||
hasattr(tool, "name")
|
||||
and hasattr(tool, "func")
|
||||
and hasattr(tool, "description")
|
||||
):
|
||||
# Tool has the required attributes, create a Tool instance
|
||||
processed_tools.append(Tool.from_langchain(tool))
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid tool type: {type(tool)}. "
|
||||
"Tool must be an instance of BaseTool or "
|
||||
"an object with 'name', 'func', and 'description' attributes."
|
||||
)
|
||||
return processed_tools
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_and_set_attributes(self):
|
||||
# Validate required fields
|
||||
@@ -188,7 +219,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@@ -197,11 +228,11 @@ class BaseAgent(ABC, BaseModel):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _parse_tools(self, tools: List[Any]) -> List[Any]:
|
||||
def _parse_tools(self, tools: List[BaseTool]) -> List[BaseTool]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[Any]:
|
||||
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
|
||||
"""Set the task tools that init BaseAgenTools class."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ from crewai.types.usage_metrics import UsageMetrics
|
||||
class TokenProcess:
|
||||
total_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
cached_prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
successful_requests: int = 0
|
||||
|
||||
@@ -15,6 +16,9 @@ class TokenProcess:
|
||||
self.completion_tokens = self.completion_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_cached_prompt_tokens(self, tokens: int):
|
||||
self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
|
||||
|
||||
def sum_successful_requests(self, requests: int):
|
||||
self.successful_requests = self.successful_requests + requests
|
||||
|
||||
@@ -22,6 +26,7 @@ class TokenProcess:
|
||||
return UsageMetrics(
|
||||
total_tokens=self.total_tokens,
|
||||
prompt_tokens=self.prompt_tokens,
|
||||
cached_prompt_tokens=self.cached_prompt_tokens,
|
||||
completion_tokens=self.completion_tokens,
|
||||
successful_requests=self.successful_requests,
|
||||
)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import json
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
@@ -12,6 +13,7 @@ from crewai.agents.parser import (
|
||||
OutputParserException,
|
||||
)
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N, Printer
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
@@ -22,6 +24,12 @@ from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolResult:
|
||||
result: Any
|
||||
result_as_answer: bool
|
||||
|
||||
|
||||
class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
_logger: Logger = Logger()
|
||||
|
||||
@@ -33,7 +41,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
agent: BaseAgent,
|
||||
prompt: dict[str, str],
|
||||
max_iter: int,
|
||||
tools: List[Any],
|
||||
tools: List[BaseTool],
|
||||
tools_names: str,
|
||||
stop_words: List[str],
|
||||
tools_description: str,
|
||||
@@ -70,7 +78,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.iterations = 0
|
||||
self.log_error_after = 3
|
||||
self.have_forced_answer = False
|
||||
self.name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
||||
tool.name: tool for tool in self.tools
|
||||
}
|
||||
if self.llm.stop:
|
||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
||||
else:
|
||||
@@ -117,6 +127,15 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
|
||||
if answer is None or answer == "":
|
||||
self._printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError(
|
||||
"Invalid response from LLM call - None or empty."
|
||||
)
|
||||
|
||||
if not self.use_stop_words:
|
||||
try:
|
||||
self._format_answer(answer)
|
||||
@@ -131,30 +150,39 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
formatted_answer = self._format_answer(answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
action_result = self._use_tool(formatted_answer)
|
||||
formatted_answer.text += f"\nObservation: {action_result}"
|
||||
formatted_answer.result = action_result
|
||||
tool_result = self._execute_tool_and_check_finality(
|
||||
formatted_answer
|
||||
)
|
||||
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
||||
formatted_answer.result = tool_result.result
|
||||
if tool_result.result_as_answer:
|
||||
return AgentFinish(
|
||||
thought="",
|
||||
output=tool_result.result,
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
self._show_logs(formatted_answer)
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
|
||||
if self._should_force_answer():
|
||||
if self.have_forced_answer:
|
||||
return AgentFinish(
|
||||
output=self._i18n.errors(
|
||||
"force_final_answer_error"
|
||||
).format(formatted_answer.text),
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
else:
|
||||
formatted_answer.text += (
|
||||
f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
self.have_forced_answer = True
|
||||
self.messages.append(
|
||||
self._format_msg(formatted_answer.text, role="assistant")
|
||||
)
|
||||
if self._should_force_answer():
|
||||
if self.have_forced_answer:
|
||||
return AgentFinish(
|
||||
thought="",
|
||||
output=self._i18n.errors(
|
||||
"force_final_answer_error"
|
||||
).format(formatted_answer.text),
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
else:
|
||||
formatted_answer.text += (
|
||||
f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
self.have_forced_answer = True
|
||||
self.messages.append(
|
||||
self._format_msg(formatted_answer.text, role="assistant")
|
||||
)
|
||||
|
||||
except OutputParserException as e:
|
||||
self.messages.append({"role": "user", "content": e.error})
|
||||
@@ -229,7 +257,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
|
||||
)
|
||||
|
||||
def _use_tool(self, agent_action: AgentAction) -> Any:
|
||||
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
@@ -245,19 +273,25 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
tool_result = tool_calling.message
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
else:
|
||||
if tool_calling.tool_name.casefold().strip() in [
|
||||
name.casefold().strip() for name in self.name_to_tool_map
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||
name.casefold().strip() for name in self.name_to_tool_map
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
]:
|
||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
|
||||
if tool:
|
||||
return ToolResult(
|
||||
result=tool_result, result_as_answer=tool.result_as_answer
|
||||
)
|
||||
else:
|
||||
tool_result = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||
)
|
||||
return tool_result
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
|
||||
def _summarize_messages(self) -> None:
|
||||
messages_groups = []
|
||||
@@ -376,4 +410,5 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return CrewAgentParser(agent=self.agent).parse(answer)
|
||||
|
||||
def _format_msg(self, prompt: str, role: str = "user") -> Dict[str, str]:
|
||||
prompt = prompt.rstrip()
|
||||
return {"role": role, "content": prompt}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from ..tools.cache_tools import CacheTools
|
||||
from ..tools.cache_tools.cache_tools import CacheTools
|
||||
from ..tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from .cache.cache_handler import CacheHandler
|
||||
|
||||
|
||||
@@ -54,7 +54,7 @@ def create_embedded_crew(crew_name: str, parent_folder: Path) -> None:
|
||||
|
||||
templates_dir = Path(__file__).parent / "templates" / "crew"
|
||||
config_template_files = ["agents.yaml", "tasks.yaml"]
|
||||
crew_template_file = f"{folder_name}_crew.py" # Updated file name
|
||||
crew_template_file = f"{folder_name}.py" # Updated file name
|
||||
|
||||
for file_name in config_template_files:
|
||||
src_file = templates_dir / "config" / file_name
|
||||
|
||||
@@ -7,6 +7,7 @@ from rich.console import Console
|
||||
|
||||
from .constants import AUTH0_AUDIENCE, AUTH0_CLIENT_ID, AUTH0_DOMAIN
|
||||
from .utils import TokenManager, validate_token
|
||||
from crewai.cli.tools.main import ToolCommand
|
||||
|
||||
console = Console()
|
||||
|
||||
@@ -34,7 +35,9 @@ class AuthenticationCommand:
|
||||
"scope": "openid",
|
||||
"audience": AUTH0_AUDIENCE,
|
||||
}
|
||||
response = requests.post(url=self.DEVICE_CODE_URL, data=device_code_payload)
|
||||
response = requests.post(
|
||||
url=self.DEVICE_CODE_URL, data=device_code_payload, timeout=20
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
@@ -54,14 +57,29 @@ class AuthenticationCommand:
|
||||
|
||||
attempts = 0
|
||||
while True and attempts < 5:
|
||||
response = requests.post(self.TOKEN_URL, data=token_payload)
|
||||
response = requests.post(self.TOKEN_URL, data=token_payload, timeout=30)
|
||||
token_data = response.json()
|
||||
|
||||
if response.status_code == 200:
|
||||
validate_token(token_data["id_token"])
|
||||
expires_in = 360000 # Token expiration time in seconds
|
||||
self.token_manager.save_tokens(token_data["access_token"], expires_in)
|
||||
console.print("\nWelcome to CrewAI+ !!", style="green")
|
||||
|
||||
try:
|
||||
ToolCommand().login()
|
||||
except Exception:
|
||||
console.print(
|
||||
"\n[bold yellow]Warning:[/bold yellow] Authentication with the Tool Repository failed.",
|
||||
style="yellow",
|
||||
)
|
||||
console.print(
|
||||
"Other features will work normally, but you may experience limitations "
|
||||
"with downloading and publishing tools."
|
||||
"\nRun [bold]crewai login[/bold] to try logging in again.\n",
|
||||
style="yellow",
|
||||
)
|
||||
|
||||
console.print("\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n")
|
||||
return
|
||||
|
||||
if token_data["error"] not in ("authorization_pending", "slow_down"):
|
||||
|
||||
10
src/crewai/cli/authentication/token.py
Normal file
10
src/crewai/cli/authentication/token.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from .utils import TokenManager
|
||||
|
||||
def get_auth_token() -> str:
|
||||
"""Get the authentication token."""
|
||||
access_token = TokenManager().get_token()
|
||||
if not access_token:
|
||||
raise Exception()
|
||||
return access_token
|
||||
|
||||
|
||||
@@ -136,6 +136,7 @@ def log_tasks_outputs() -> None:
|
||||
@click.option("-l", "--long", is_flag=True, help="Reset LONG TERM memory")
|
||||
@click.option("-s", "--short", is_flag=True, help="Reset SHORT TERM memory")
|
||||
@click.option("-e", "--entities", is_flag=True, help="Reset ENTITIES memory")
|
||||
@click.option("-kn", "--knowledge", is_flag=True, help="Reset KNOWLEDGE storage")
|
||||
@click.option(
|
||||
"-k",
|
||||
"--kickoff-outputs",
|
||||
@@ -143,17 +144,24 @@ def log_tasks_outputs() -> None:
|
||||
help="Reset LATEST KICKOFF TASK OUTPUTS",
|
||||
)
|
||||
@click.option("-a", "--all", is_flag=True, help="Reset ALL memories")
|
||||
def reset_memories(long, short, entities, kickoff_outputs, all):
|
||||
def reset_memories(
|
||||
long: bool,
|
||||
short: bool,
|
||||
entities: bool,
|
||||
knowledge: bool,
|
||||
kickoff_outputs: bool,
|
||||
all: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_ouputs). This will delete all the data saved.
|
||||
"""
|
||||
try:
|
||||
if not all and not (long or short or entities or kickoff_outputs):
|
||||
if not all and not (long or short or entities or knowledge or kickoff_outputs):
|
||||
click.echo(
|
||||
"Please specify at least one memory type to reset using the appropriate flags."
|
||||
)
|
||||
return
|
||||
reset_memories_command(long, short, entities, kickoff_outputs, all)
|
||||
reset_memories_command(long, short, entities, knowledge, kickoff_outputs, all)
|
||||
except Exception as e:
|
||||
click.echo(f"An error occurred while resetting memories: {e}", err=True)
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import requests
|
||||
from requests.exceptions import JSONDecodeError
|
||||
from rich.console import Console
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
from crewai.cli.utils import get_auth_token
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
console = Console()
|
||||
|
||||
@@ -1,19 +1,161 @@
|
||||
ENV_VARS = {
|
||||
'openai': ['OPENAI_API_KEY'],
|
||||
'anthropic': ['ANTHROPIC_API_KEY'],
|
||||
'gemini': ['GEMINI_API_KEY'],
|
||||
'groq': ['GROQ_API_KEY'],
|
||||
'ollama': ['FAKE_KEY'],
|
||||
"openai": [
|
||||
{
|
||||
"prompt": "Enter your OPENAI API key (press Enter to skip)",
|
||||
"key_name": "OPENAI_API_KEY",
|
||||
}
|
||||
],
|
||||
"anthropic": [
|
||||
{
|
||||
"prompt": "Enter your ANTHROPIC API key (press Enter to skip)",
|
||||
"key_name": "ANTHROPIC_API_KEY",
|
||||
}
|
||||
],
|
||||
"gemini": [
|
||||
{
|
||||
"prompt": "Enter your GEMINI API key (press Enter to skip)",
|
||||
"key_name": "GEMINI_API_KEY",
|
||||
}
|
||||
],
|
||||
"groq": [
|
||||
{
|
||||
"prompt": "Enter your GROQ API key (press Enter to skip)",
|
||||
"key_name": "GROQ_API_KEY",
|
||||
}
|
||||
],
|
||||
"watson": [
|
||||
{
|
||||
"prompt": "Enter your WATSONX URL (press Enter to skip)",
|
||||
"key_name": "WATSONX_URL",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your WATSONX API Key (press Enter to skip)",
|
||||
"key_name": "WATSONX_APIKEY",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your WATSONX Project Id (press Enter to skip)",
|
||||
"key_name": "WATSONX_PROJECT_ID",
|
||||
},
|
||||
],
|
||||
"ollama": [
|
||||
{
|
||||
"default": True,
|
||||
"API_BASE": "http://localhost:11434",
|
||||
}
|
||||
],
|
||||
"bedrock": [
|
||||
{
|
||||
"prompt": "Enter your AWS Access Key ID (press Enter to skip)",
|
||||
"key_name": "AWS_ACCESS_KEY_ID",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AWS Secret Access Key (press Enter to skip)",
|
||||
"key_name": "AWS_SECRET_ACCESS_KEY",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AWS Region Name (press Enter to skip)",
|
||||
"key_name": "AWS_REGION_NAME",
|
||||
},
|
||||
],
|
||||
"azure": [
|
||||
{
|
||||
"prompt": "Enter your Azure deployment name (must start with 'azure/')",
|
||||
"key_name": "model",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AZURE API key (press Enter to skip)",
|
||||
"key_name": "AZURE_API_KEY",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AZURE API base URL (press Enter to skip)",
|
||||
"key_name": "AZURE_API_BASE",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AZURE API version (press Enter to skip)",
|
||||
"key_name": "AZURE_API_VERSION",
|
||||
},
|
||||
],
|
||||
"cerebras": [
|
||||
{
|
||||
"prompt": "Enter your Cerebras model name (must start with 'cerebras/')",
|
||||
"key_name": "model",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your Cerebras API version (press Enter to skip)",
|
||||
"key_name": "CEREBRAS_API_KEY",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
PROVIDERS = ['openai', 'anthropic', 'gemini', 'groq', 'ollama']
|
||||
|
||||
PROVIDERS = [
|
||||
"openai",
|
||||
"anthropic",
|
||||
"gemini",
|
||||
"groq",
|
||||
"ollama",
|
||||
"watson",
|
||||
"bedrock",
|
||||
"azure",
|
||||
"cerebras",
|
||||
]
|
||||
|
||||
MODELS = {
|
||||
'openai': ['gpt-4', 'gpt-4o', 'gpt-4o-mini', 'o1-mini', 'o1-preview'],
|
||||
'anthropic': ['claude-3-5-sonnet-20240620', 'claude-3-sonnet-20240229', 'claude-3-opus-20240229', 'claude-3-haiku-20240307'],
|
||||
'gemini': ['gemini-1.5-flash', 'gemini-1.5-pro', 'gemini-gemma-2-9b-it', 'gemini-gemma-2-27b-it'],
|
||||
'groq': ['llama-3.1-8b-instant', 'llama-3.1-70b-versatile', 'llama-3.1-405b-reasoning', 'gemma2-9b-it', 'gemma-7b-it'],
|
||||
'ollama': ['llama3.1', 'mixtral'],
|
||||
"openai": ["gpt-4", "gpt-4o", "gpt-4o-mini", "o1-mini", "o1-preview"],
|
||||
"anthropic": [
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
],
|
||||
"gemini": [
|
||||
"gemini/gemini-1.5-flash",
|
||||
"gemini/gemini-1.5-pro",
|
||||
"gemini/gemini-gemma-2-9b-it",
|
||||
"gemini/gemini-gemma-2-27b-it",
|
||||
],
|
||||
"groq": [
|
||||
"groq/llama-3.1-8b-instant",
|
||||
"groq/llama-3.1-70b-versatile",
|
||||
"groq/llama-3.1-405b-reasoning",
|
||||
"groq/gemma2-9b-it",
|
||||
"groq/gemma-7b-it",
|
||||
],
|
||||
"ollama": ["ollama/llama3.1", "ollama/mixtral"],
|
||||
"watson": [
|
||||
"watsonx/meta-llama/llama-3-1-70b-instruct",
|
||||
"watsonx/meta-llama/llama-3-1-8b-instruct",
|
||||
"watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
||||
"watsonx/meta-llama/llama-3-2-1b-instruct",
|
||||
"watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
||||
"watsonx/meta-llama/llama-3-405b-instruct",
|
||||
"watsonx/mistral/mistral-large",
|
||||
"watsonx/ibm/granite-3-8b-instruct",
|
||||
],
|
||||
"bedrock": [
|
||||
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
||||
"bedrock/anthropic.claude-v2:1",
|
||||
"bedrock/anthropic.claude-v2",
|
||||
"bedrock/anthropic.claude-instant-v1",
|
||||
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
||||
"bedrock/meta.llama3-1-70b-instruct-v1:0",
|
||||
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
||||
"bedrock/meta.llama3-70b-instruct-v1:0",
|
||||
"bedrock/meta.llama3-8b-instruct-v1:0",
|
||||
"bedrock/amazon.titan-text-lite-v1",
|
||||
"bedrock/amazon.titan-text-express-v1",
|
||||
"bedrock/cohere.command-text-v14",
|
||||
"bedrock/ai21.j2-mid-v1",
|
||||
"bedrock/ai21.j2-ultra-v1",
|
||||
"bedrock/ai21.jamba-instruct-v1:0",
|
||||
"bedrock/meta.llama2-13b-chat-v1",
|
||||
"bedrock/meta.llama2-70b-chat-v1",
|
||||
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
||||
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
||||
],
|
||||
}
|
||||
|
||||
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
||||
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
from crewai.cli.constants import ENV_VARS, MODELS
|
||||
from crewai.cli.provider import (
|
||||
PROVIDERS,
|
||||
get_provider_data,
|
||||
select_model,
|
||||
select_provider,
|
||||
@@ -29,20 +29,21 @@ def create_folder_structure(name, parent_folder=None):
|
||||
click.secho("Operation cancelled.", fg="yellow")
|
||||
sys.exit(0)
|
||||
click.secho(f"Overriding folder {folder_name}...", fg="green", bold=True)
|
||||
else:
|
||||
click.secho(
|
||||
f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
|
||||
fg="green",
|
||||
bold=True,
|
||||
)
|
||||
shutil.rmtree(folder_path) # Delete the existing folder and its contents
|
||||
|
||||
if not folder_path.exists():
|
||||
folder_path.mkdir(parents=True)
|
||||
(folder_path / "tests").mkdir(exist_ok=True)
|
||||
if not parent_folder:
|
||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
||||
click.secho(
|
||||
f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
|
||||
fg="green",
|
||||
bold=True,
|
||||
)
|
||||
|
||||
folder_path.mkdir(parents=True)
|
||||
(folder_path / "tests").mkdir(exist_ok=True)
|
||||
(folder_path / "knowledge").mkdir(exist_ok=True)
|
||||
if not parent_folder:
|
||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
||||
|
||||
return folder_path, folder_name, class_name
|
||||
|
||||
@@ -52,7 +53,14 @@ def copy_template_files(folder_path, name, class_name, parent_folder):
|
||||
templates_dir = package_dir / "templates" / "crew"
|
||||
|
||||
root_template_files = (
|
||||
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
|
||||
[
|
||||
".gitignore",
|
||||
"pyproject.toml",
|
||||
"README.md",
|
||||
"knowledge/user_preference.txt",
|
||||
]
|
||||
if not parent_folder
|
||||
else []
|
||||
)
|
||||
tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"]
|
||||
config_template_files = ["config/agents.yaml", "config/tasks.yaml"]
|
||||
@@ -92,7 +100,10 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||
|
||||
existing_provider = None
|
||||
for provider, env_keys in ENV_VARS.items():
|
||||
if any(key in env_vars for key in env_keys):
|
||||
if any(
|
||||
"key_name" in details and details["key_name"] in env_vars
|
||||
for details in env_keys
|
||||
):
|
||||
existing_provider = provider
|
||||
break
|
||||
|
||||
@@ -118,53 +129,56 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||
"No provider selected. Please try again or press 'q' to exit.", fg="red"
|
||||
)
|
||||
|
||||
while True:
|
||||
selected_model = select_model(selected_provider, provider_models)
|
||||
if selected_model is None: # User typed 'q'
|
||||
click.secho("Exiting...", fg="yellow")
|
||||
sys.exit(0)
|
||||
if selected_model: # Valid selection
|
||||
break
|
||||
click.secho(
|
||||
"No model selected. Please try again or press 'q' to exit.", fg="red"
|
||||
)
|
||||
# Check if the selected provider has predefined models
|
||||
if selected_provider in MODELS and MODELS[selected_provider]:
|
||||
while True:
|
||||
selected_model = select_model(selected_provider, provider_models)
|
||||
if selected_model is None: # User typed 'q'
|
||||
click.secho("Exiting...", fg="yellow")
|
||||
sys.exit(0)
|
||||
if selected_model: # Valid selection
|
||||
break
|
||||
click.secho(
|
||||
"No model selected. Please try again or press 'q' to exit.",
|
||||
fg="red",
|
||||
)
|
||||
env_vars["MODEL"] = selected_model
|
||||
|
||||
if selected_provider in PROVIDERS:
|
||||
api_key_var = ENV_VARS[selected_provider][0]
|
||||
else:
|
||||
api_key_var = click.prompt(
|
||||
f"Enter the environment variable name for your {selected_provider.capitalize()} API key",
|
||||
type=str,
|
||||
default="",
|
||||
)
|
||||
# Check if the selected provider requires API keys
|
||||
if selected_provider in ENV_VARS:
|
||||
provider_env_vars = ENV_VARS[selected_provider]
|
||||
for details in provider_env_vars:
|
||||
if details.get("default", False):
|
||||
# Automatically add default key-value pairs
|
||||
for key, value in details.items():
|
||||
if key not in ["prompt", "key_name", "default"]:
|
||||
env_vars[key] = value
|
||||
elif "key_name" in details:
|
||||
# Prompt for non-default key-value pairs
|
||||
prompt = details["prompt"]
|
||||
key_name = details["key_name"]
|
||||
api_key_value = click.prompt(prompt, default="", show_default=False)
|
||||
|
||||
api_key_value = ""
|
||||
click.echo(
|
||||
f"Enter your {selected_provider.capitalize()} API key (press Enter to skip): ",
|
||||
nl=False,
|
||||
)
|
||||
try:
|
||||
api_key_value = input()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
api_key_value = ""
|
||||
if api_key_value.strip():
|
||||
env_vars[key_name] = api_key_value
|
||||
|
||||
if api_key_value.strip():
|
||||
env_vars = {api_key_var: api_key_value}
|
||||
if env_vars:
|
||||
write_env_file(folder_path, env_vars)
|
||||
click.secho("API key saved to .env file", fg="green")
|
||||
click.secho("API keys and model saved to .env file", fg="green")
|
||||
else:
|
||||
click.secho(
|
||||
"No API key provided. Skipping .env file creation.", fg="yellow"
|
||||
"No API keys provided. Skipping .env file creation.", fg="yellow"
|
||||
)
|
||||
|
||||
env_vars["MODEL"] = selected_model
|
||||
click.secho(f"Selected model: {selected_model}", fg="green")
|
||||
click.secho(f"Selected model: {env_vars.get('MODEL', 'N/A')}", fg="green")
|
||||
|
||||
package_dir = Path(__file__).parent
|
||||
templates_dir = package_dir / "templates" / "crew"
|
||||
|
||||
root_template_files = (
|
||||
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
|
||||
[".gitignore", "pyproject.toml", "README.md", "knowledge/user_preference.txt"]
|
||||
if not parent_folder
|
||||
else []
|
||||
)
|
||||
tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"]
|
||||
config_template_files = ["config/agents.yaml", "config/tasks.yaml"]
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
from typing import Optional
|
||||
import requests
|
||||
from os import getenv
|
||||
from crewai.cli.utils import get_crewai_version
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from urllib.parse import urljoin
|
||||
|
||||
|
||||
|
||||
@@ -164,7 +164,7 @@ def fetch_provider_data(cache_file):
|
||||
- dict or None: The fetched provider data or None if the operation fails.
|
||||
"""
|
||||
try:
|
||||
response = requests.get(JSON_URL, stream=True, timeout=10)
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60)
|
||||
response.raise_for_status()
|
||||
data = download_data(response)
|
||||
with open(cache_file, "w") as f:
|
||||
|
||||
@@ -5,9 +5,17 @@ from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
|
||||
|
||||
def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
def reset_memories_command(
|
||||
long,
|
||||
short,
|
||||
entity,
|
||||
knowledge,
|
||||
kickoff_outputs,
|
||||
all,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories.
|
||||
|
||||
@@ -17,6 +25,7 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
entity (bool): Whether to reset the entity memory.
|
||||
kickoff_outputs (bool): Whether to reset the latest kickoff task outputs.
|
||||
all (bool): Whether to reset all memories.
|
||||
knowledge (bool): Whether to reset the knowledge.
|
||||
"""
|
||||
|
||||
try:
|
||||
@@ -25,6 +34,7 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
EntityMemory().reset()
|
||||
LongTermMemory().reset()
|
||||
TaskOutputStorageHandler().reset()
|
||||
KnowledgeStorage().reset()
|
||||
click.echo("All memories have been reset.")
|
||||
else:
|
||||
if long:
|
||||
@@ -40,6 +50,9 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
if kickoff_outputs:
|
||||
TaskOutputStorageHandler().reset()
|
||||
click.echo("Latest Kickoff outputs stored has been reset.")
|
||||
if knowledge:
|
||||
KnowledgeStorage().reset()
|
||||
click.echo("Knowledge has been reset.")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
||||
|
||||
@@ -3,7 +3,8 @@ import subprocess
|
||||
import click
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import get_crewai_version, read_toml
|
||||
from crewai.cli.utils import read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
def run_crew() -> None:
|
||||
@@ -24,7 +25,6 @@ def run_crew() -> None:
|
||||
f"Please run `crewai update` to update your pyproject.toml to use uv.",
|
||||
fg="red",
|
||||
)
|
||||
print()
|
||||
|
||||
try:
|
||||
subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
@@ -1,16 +1,32 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from {{folder_name}}.tools.custom_tool import MyCustomTool
|
||||
# Uncomment the following line to use an example of a knowledge source
|
||||
# from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class {{crew_name}}Crew():
|
||||
class {{crew_name}}():
|
||||
"""{{crew_name}} crew"""
|
||||
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@before_kickoff # Optional hook to be executed before the crew starts
|
||||
def pull_data_example(self, inputs):
|
||||
# Example of pulling data from an external API, dynamically changing the inputs
|
||||
inputs['extra_data'] = "This is extra data"
|
||||
return inputs
|
||||
|
||||
@after_kickoff # Optional hook to be executed after the crew has finished
|
||||
def log_results(self, output):
|
||||
# Example of logging results, dynamically changing the output
|
||||
print(f"Results: {output}")
|
||||
return output
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
@@ -42,10 +58,20 @@ class {{crew_name}}Crew():
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the {{crew_name}} crew"""
|
||||
# You can add knowledge sources here
|
||||
# knowledge_path = "user_preference.txt"
|
||||
# sources = [
|
||||
# TextFileKnowledgeSource(
|
||||
# file_path="knowledge/user_preference.txt",
|
||||
# metadata={"preference": "personal"}
|
||||
# ),
|
||||
# ]
|
||||
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
# process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
|
||||
)
|
||||
# knowledge_sources=sources, # In the case you want to add knowledge sources
|
||||
)
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
User name is John Doe.
|
||||
User is an AI Engineer.
|
||||
User is interested in AI Agents.
|
||||
User is based in San Francisco, California.
|
||||
@@ -1,6 +1,10 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
from {{folder_name}}.crew import {{crew_name}}Crew
|
||||
import warnings
|
||||
|
||||
from {{folder_name}}.crew import {{crew_name}}
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
|
||||
# This main file is intended to be a way for you to run your
|
||||
# crew locally, so refrain from adding unnecessary logic into this file.
|
||||
@@ -14,7 +18,7 @@ def run():
|
||||
inputs = {
|
||||
'topic': 'AI LLMs'
|
||||
}
|
||||
{{crew_name}}Crew().crew().kickoff(inputs=inputs)
|
||||
{{crew_name}}().crew().kickoff(inputs=inputs)
|
||||
|
||||
|
||||
def train():
|
||||
@@ -25,7 +29,7 @@ def train():
|
||||
"topic": "AI LLMs"
|
||||
}
|
||||
try:
|
||||
{{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
|
||||
{{crew_name}}().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while training the crew: {e}")
|
||||
@@ -35,7 +39,7 @@ def replay():
|
||||
Replay the crew execution from a specific task.
|
||||
"""
|
||||
try:
|
||||
{{crew_name}}Crew().crew().replay(task_id=sys.argv[1])
|
||||
{{crew_name}}().crew().replay(task_id=sys.argv[1])
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while replaying the crew: {e}")
|
||||
@@ -48,7 +52,7 @@ def test():
|
||||
"topic": "AI LLMs"
|
||||
}
|
||||
try:
|
||||
{{crew_name}}Crew().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while replaying the crew: {e}")
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.76.9,<1.0.0"
|
||||
"crewai[tools]>=0.85.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from crewai.tools import BaseTool
|
||||
from typing import Type
|
||||
from crewai_tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class MyCustomToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.76.9,<1.0.0",
|
||||
"crewai[tools]>=0.85.0,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Type
|
||||
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = ">=0.76.9,<1.0.0" }
|
||||
crewai = { extras = ["tools"], version = ">=0.85.0,<1.0.0" }
|
||||
asyncio = "*"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from typing import Type
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class MyCustomToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.76.9,<1.0.0"
|
||||
"crewai[tools]>=0.85.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
from typing import Type
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class MyCustomToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
@@ -5,6 +5,6 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.76.9"
|
||||
"crewai[tools]>=0.85.0"
|
||||
]
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class {{class_name}}(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import importlib.metadata
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
@@ -9,7 +8,6 @@ import click
|
||||
import tomli
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.utils import TokenManager
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
@@ -137,11 +135,6 @@ def _get_nested_value(data: Dict[str, Any], keys: List[str]) -> Any:
|
||||
return reduce(dict.__getitem__, keys, data)
|
||||
|
||||
|
||||
def get_crewai_version() -> str:
|
||||
"""Get the version number of CrewAI running the CLI"""
|
||||
return importlib.metadata.version("crewai")
|
||||
|
||||
|
||||
def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
|
||||
"""Fetch the environment variables from a .env file and return them as a dictionary."""
|
||||
try:
|
||||
@@ -166,14 +159,6 @@ def fetch_and_json_env_file(env_file_path: str = ".env") -> dict:
|
||||
return {}
|
||||
|
||||
|
||||
def get_auth_token() -> str:
|
||||
"""Get the authentication token."""
|
||||
access_token = TokenManager().get_token()
|
||||
if not access_token:
|
||||
raise Exception()
|
||||
return access_token
|
||||
|
||||
|
||||
def tree_copy(source, destination):
|
||||
"""Copies the entire directory structure from the source to the destination."""
|
||||
for item in os.listdir(source):
|
||||
|
||||
6
src/crewai/cli/version.py
Normal file
6
src/crewai/cli/version.py
Normal file
@@ -0,0 +1,6 @@
|
||||
import importlib.metadata
|
||||
|
||||
def get_crewai_version() -> str:
|
||||
"""Get the version number of CrewAI running the CLI"""
|
||||
return importlib.metadata.version("crewai")
|
||||
|
||||
@@ -5,7 +5,7 @@ import uuid
|
||||
import warnings
|
||||
from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -27,17 +27,18 @@ from crewai.llm import LLM
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.memory.user.user_memory import UserMemory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import (
|
||||
TRAINING_DATA_FILE,
|
||||
)
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.formatter import (
|
||||
@@ -71,6 +72,7 @@ class Crew(BaseModel):
|
||||
manager_llm: The language model that will run manager agent.
|
||||
manager_agent: Custom agent that will be used as manager.
|
||||
memory: Whether the crew should use memory to store memories of it's execution.
|
||||
memory_config: Configuration for the memory to be used for the crew.
|
||||
cache: Whether the crew should use a cache to store the results of the tools execution.
|
||||
function_calling_llm: The language model that will run the tool calling for all the agents.
|
||||
process: The process flow that the crew will follow (e.g., sequential, hierarchical).
|
||||
@@ -94,6 +96,7 @@ class Crew(BaseModel):
|
||||
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
|
||||
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
|
||||
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
|
||||
_user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr()
|
||||
_train: Optional[bool] = PrivateAttr(default=False)
|
||||
_train_iteration: Optional[int] = PrivateAttr()
|
||||
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
@@ -114,6 +117,10 @@ class Crew(BaseModel):
|
||||
default=False,
|
||||
description="Whether the crew should use memory to store memories of it's execution",
|
||||
)
|
||||
memory_config: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Configuration for the memory to be used for the crew.",
|
||||
)
|
||||
short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(
|
||||
default=None,
|
||||
description="An Instance of the ShortTermMemory to be used by the Crew",
|
||||
@@ -126,7 +133,11 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="An Instance of the EntityMemory to be used by the Crew",
|
||||
)
|
||||
embedder: Optional[Any] = Field(
|
||||
user_memory: Optional[InstanceOf[UserMemory]] = Field(
|
||||
default=None,
|
||||
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
|
||||
)
|
||||
embedder: Optional[dict] = Field(
|
||||
default=None,
|
||||
description="Configuration for the embedder to be used for the crew.",
|
||||
)
|
||||
@@ -154,6 +165,16 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="Callback to be executed after each task for all agents execution.",
|
||||
)
|
||||
before_kickoff_callbacks: List[
|
||||
Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]
|
||||
] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed before crew kickoff. It may be used to adjust inputs before the crew is executed.",
|
||||
)
|
||||
after_kickoff_callbacks: List[Callable[[CrewOutput], CrewOutput]] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed after crew kickoff. It may be used to adjust the output of the crew.",
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the crew execution to be respected.",
|
||||
@@ -182,6 +203,13 @@ class Crew(BaseModel):
|
||||
default=[],
|
||||
description="List of execution logs for tasks",
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the crew. Add knowledge sources to the knowledge object.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
default=None,
|
||||
)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
@@ -238,13 +266,42 @@ class Crew(BaseModel):
|
||||
self._short_term_memory = (
|
||||
self.short_term_memory
|
||||
if self.short_term_memory
|
||||
else ShortTermMemory(crew=self, embedder_config=self.embedder)
|
||||
else ShortTermMemory(
|
||||
crew=self,
|
||||
embedder_config=self.embedder,
|
||||
)
|
||||
)
|
||||
self._entity_memory = (
|
||||
self.entity_memory
|
||||
if self.entity_memory
|
||||
else EntityMemory(crew=self, embedder_config=self.embedder)
|
||||
)
|
||||
if hasattr(self, "memory_config") and self.memory_config is not None:
|
||||
self._user_memory = (
|
||||
self.user_memory if self.user_memory else UserMemory(crew=self)
|
||||
)
|
||||
else:
|
||||
self._user_memory = None
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def create_crew_knowledge(self) -> "Crew":
|
||||
"""Create the knowledge for the crew."""
|
||||
if self.knowledge_sources:
|
||||
try:
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
):
|
||||
self._knowledge = Knowledge(
|
||||
sources=self.knowledge_sources,
|
||||
embedder_config=self.embedder,
|
||||
collection_name="crew",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self._logger.log(
|
||||
"warning", f"Failed to init knowledge: {e}", color="yellow"
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -445,18 +502,22 @@ class Crew(BaseModel):
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
|
||||
for agent in train_crew.agents:
|
||||
result = TaskEvaluator(agent).evaluate_training_data(
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
if training_data.get(str(agent.id)):
|
||||
result = TaskEvaluator(agent).evaluate_training_data(
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> CrewOutput:
|
||||
for before_callback in self.before_kickoff_callbacks:
|
||||
inputs = before_callback(inputs)
|
||||
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
||||
self._task_output_handler.reset()
|
||||
@@ -499,6 +560,9 @@ class Crew(BaseModel):
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
|
||||
for after_callback in self.after_kickoff_callbacks:
|
||||
result = after_callback(result)
|
||||
|
||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||
|
||||
self.usage_metrics = UsageMetrics()
|
||||
@@ -893,6 +957,11 @@ class Crew(BaseModel):
|
||||
result = self._execute_tasks(self.tasks, start_index, True)
|
||||
return result
|
||||
|
||||
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
|
||||
if self._knowledge:
|
||||
return self._knowledge.query(query)
|
||||
return None
|
||||
|
||||
def copy(self):
|
||||
"""Create a deep copy of the Crew."""
|
||||
|
||||
|
||||
@@ -1,8 +1,20 @@
|
||||
import asyncio
|
||||
import inspect
|
||||
from typing import Any, Callable, Dict, Generic, List, Set, Type, TypeVar, Union
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Generic,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
from crewai.flow.utils import get_possible_return_constants
|
||||
@@ -119,7 +131,6 @@ class FlowMeta(type):
|
||||
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
||||
listeners[attr_name] = (condition_type, methods)
|
||||
|
||||
# TODO: should we add a check for __condition_type__ 'AND'?
|
||||
elif hasattr(attr_value, "__is_router__"):
|
||||
routers[attr_value.__router_for__] = attr_name
|
||||
possible_returns = get_possible_return_constants(attr_value)
|
||||
@@ -159,8 +170,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
def __init__(self) -> None:
|
||||
self._methods: Dict[str, Callable] = {}
|
||||
self._state: T = self._create_initial_state()
|
||||
self._executed_methods: Set[str] = set()
|
||||
self._scheduled_tasks: Set[str] = set()
|
||||
self._method_execution_counts: Dict[str, int] = {}
|
||||
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||
|
||||
@@ -191,10 +201,74 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
"""Returns the list of all outputs from executed methods."""
|
||||
return self._method_outputs
|
||||
|
||||
def kickoff(self) -> Any:
|
||||
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Initializes or updates the state with the provided inputs.
|
||||
|
||||
Args:
|
||||
inputs: Dictionary of inputs to initialize or update the state.
|
||||
|
||||
Raises:
|
||||
ValueError: If inputs do not match the structured state model.
|
||||
TypeError: If state is neither a BaseModel instance nor a dictionary.
|
||||
"""
|
||||
if isinstance(self._state, BaseModel):
|
||||
# Structured state management
|
||||
try:
|
||||
# Define a function to create the dynamic class
|
||||
def create_model_with_extra_forbid(
|
||||
base_model: Type[BaseModel],
|
||||
) -> Type[BaseModel]:
|
||||
class ModelWithExtraForbid(base_model): # type: ignore
|
||||
model_config = base_model.model_config.copy()
|
||||
model_config["extra"] = "forbid"
|
||||
|
||||
return ModelWithExtraForbid
|
||||
|
||||
# Create the dynamic class
|
||||
ModelWithExtraForbid = create_model_with_extra_forbid(
|
||||
self._state.__class__
|
||||
)
|
||||
|
||||
# Create a new instance using the combined state and inputs
|
||||
self._state = cast(
|
||||
T, ModelWithExtraForbid(**{**self._state.model_dump(), **inputs})
|
||||
)
|
||||
|
||||
except ValidationError as e:
|
||||
raise ValueError(f"Invalid inputs for structured state: {e}") from e
|
||||
elif isinstance(self._state, dict):
|
||||
# Unstructured state management
|
||||
self._state.update(inputs)
|
||||
else:
|
||||
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
||||
|
||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
"""
|
||||
Starts the execution of the flow synchronously.
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary of inputs to initialize or update the state.
|
||||
|
||||
Returns:
|
||||
The final output from the flow execution.
|
||||
"""
|
||||
if inputs is not None:
|
||||
self._initialize_state(inputs)
|
||||
return asyncio.run(self.kickoff_async())
|
||||
|
||||
async def kickoff_async(self) -> Any:
|
||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
"""
|
||||
Starts the execution of the flow asynchronously.
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary of inputs to initialize or update the state.
|
||||
|
||||
Returns:
|
||||
The final output from the flow execution.
|
||||
"""
|
||||
if inputs is not None:
|
||||
self._initialize_state(inputs)
|
||||
if not self._start_methods:
|
||||
raise ValueError("No start method defined")
|
||||
|
||||
@@ -233,7 +307,10 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
)
|
||||
self._method_outputs.append(result) # Store the output
|
||||
|
||||
self._executed_methods.add(method_name)
|
||||
# Track method execution counts
|
||||
self._method_execution_counts[method_name] = (
|
||||
self._method_execution_counts.get(method_name, 0) + 1
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
@@ -243,35 +320,34 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
if trigger_method in self._routers:
|
||||
router_method = self._methods[self._routers[trigger_method]]
|
||||
path = await self._execute_method(
|
||||
trigger_method, router_method
|
||||
) # TODO: Change or not?
|
||||
# Use the path as the new trigger method
|
||||
self._routers[trigger_method], router_method
|
||||
)
|
||||
trigger_method = path
|
||||
|
||||
for listener_name, (condition_type, methods) in self._listeners.items():
|
||||
if condition_type == "OR":
|
||||
if trigger_method in methods:
|
||||
if (
|
||||
listener_name not in self._executed_methods
|
||||
and listener_name not in self._scheduled_tasks
|
||||
):
|
||||
self._scheduled_tasks.add(listener_name)
|
||||
listener_tasks.append(
|
||||
self._execute_single_listener(listener_name, result)
|
||||
)
|
||||
# Schedule the listener without preventing re-execution
|
||||
listener_tasks.append(
|
||||
self._execute_single_listener(listener_name, result)
|
||||
)
|
||||
elif condition_type == "AND":
|
||||
if all(method in self._executed_methods for method in methods):
|
||||
if (
|
||||
listener_name not in self._executed_methods
|
||||
and listener_name not in self._scheduled_tasks
|
||||
):
|
||||
self._scheduled_tasks.add(listener_name)
|
||||
listener_tasks.append(
|
||||
self._execute_single_listener(listener_name, result)
|
||||
)
|
||||
# Initialize pending methods for this listener if not already done
|
||||
if listener_name not in self._pending_and_listeners:
|
||||
self._pending_and_listeners[listener_name] = set(methods)
|
||||
# Remove the trigger method from pending methods
|
||||
self._pending_and_listeners[listener_name].discard(trigger_method)
|
||||
if not self._pending_and_listeners[listener_name]:
|
||||
# All required methods have been executed
|
||||
listener_tasks.append(
|
||||
self._execute_single_listener(listener_name, result)
|
||||
)
|
||||
# Reset pending methods for this listener
|
||||
self._pending_and_listeners.pop(listener_name, None)
|
||||
|
||||
# Run all listener tasks concurrently and wait for them to complete
|
||||
await asyncio.gather(*listener_tasks)
|
||||
if listener_tasks:
|
||||
await asyncio.gather(*listener_tasks)
|
||||
|
||||
async def _execute_single_listener(self, listener_name: str, result: Any) -> None:
|
||||
try:
|
||||
@@ -291,9 +367,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
# If listener does not expect parameters, call without arguments
|
||||
listener_result = await self._execute_method(listener_name, method)
|
||||
|
||||
# Remove from scheduled tasks after execution
|
||||
self._scheduled_tasks.discard(listener_name)
|
||||
|
||||
# Execute listeners of this listener
|
||||
await self._execute_listeners(listener_name, listener_result)
|
||||
except Exception as e:
|
||||
|
||||
55
src/crewai/knowledge/embedder/base_embedder.py
Normal file
55
src/crewai/knowledge/embedder/base_embedder.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class BaseEmbedder(ABC):
|
||||
"""
|
||||
Abstract base class for text embedding models
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def embed_chunks(self, chunks: List[str]) -> np.ndarray:
|
||||
"""
|
||||
Generate embeddings for a list of text chunks
|
||||
|
||||
Args:
|
||||
chunks: List of text chunks to embed
|
||||
|
||||
Returns:
|
||||
Array of embeddings
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def embed_texts(self, texts: List[str]) -> np.ndarray:
|
||||
"""
|
||||
Generate embeddings for a list of texts
|
||||
|
||||
Args:
|
||||
texts: List of texts to embed
|
||||
|
||||
Returns:
|
||||
Array of embeddings
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def embed_text(self, text: str) -> np.ndarray:
|
||||
"""
|
||||
Generate embedding for a single text
|
||||
|
||||
Args:
|
||||
text: Text to embed
|
||||
|
||||
Returns:
|
||||
Embedding array
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def dimension(self) -> int:
|
||||
"""Get the dimension of the embeddings"""
|
||||
pass
|
||||
93
src/crewai/knowledge/embedder/fastembed.py
Normal file
93
src/crewai/knowledge/embedder/fastembed.py
Normal file
@@ -0,0 +1,93 @@
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .base_embedder import BaseEmbedder
|
||||
|
||||
try:
|
||||
from fastembed_gpu import TextEmbedding # type: ignore
|
||||
|
||||
FASTEMBED_AVAILABLE = True
|
||||
except ImportError:
|
||||
try:
|
||||
from fastembed import TextEmbedding
|
||||
|
||||
FASTEMBED_AVAILABLE = True
|
||||
except ImportError:
|
||||
FASTEMBED_AVAILABLE = False
|
||||
|
||||
|
||||
class FastEmbed(BaseEmbedder):
|
||||
"""
|
||||
A wrapper class for text embedding models using FastEmbed
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str = "BAAI/bge-small-en-v1.5",
|
||||
cache_dir: Optional[Union[str, Path]] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the embedding model
|
||||
|
||||
Args:
|
||||
model_name: Name of the model to use
|
||||
cache_dir: Directory to cache the model
|
||||
gpu: Whether to use GPU acceleration
|
||||
"""
|
||||
if not FASTEMBED_AVAILABLE:
|
||||
raise ImportError(
|
||||
"FastEmbed is not installed. Please install it with: "
|
||||
"uv pip install fastembed or uv pip install fastembed-gpu for GPU support"
|
||||
)
|
||||
|
||||
self.model = TextEmbedding(
|
||||
model_name=model_name,
|
||||
cache_dir=str(cache_dir) if cache_dir else None,
|
||||
)
|
||||
|
||||
def embed_chunks(self, chunks: List[str]) -> List[np.ndarray]:
|
||||
"""
|
||||
Generate embeddings for a list of text chunks
|
||||
|
||||
Args:
|
||||
chunks: List of text chunks to embed
|
||||
|
||||
Returns:
|
||||
List of embeddings
|
||||
"""
|
||||
embeddings = list(self.model.embed(chunks))
|
||||
return embeddings
|
||||
|
||||
def embed_texts(self, texts: List[str]) -> List[np.ndarray]:
|
||||
"""
|
||||
Generate embeddings for a list of texts
|
||||
|
||||
Args:
|
||||
texts: List of texts to embed
|
||||
|
||||
Returns:
|
||||
List of embeddings
|
||||
"""
|
||||
embeddings = list(self.model.embed(texts))
|
||||
return embeddings
|
||||
|
||||
def embed_text(self, text: str) -> np.ndarray:
|
||||
"""
|
||||
Generate embedding for a single text
|
||||
|
||||
Args:
|
||||
text: Text to embed
|
||||
|
||||
Returns:
|
||||
Embedding array
|
||||
"""
|
||||
return self.embed_texts([text])[0]
|
||||
|
||||
@property
|
||||
def dimension(self) -> int:
|
||||
"""Get the dimension of the embeddings"""
|
||||
# Generate a test embedding to get dimensions
|
||||
test_embed = self.embed_text("test")
|
||||
return len(test_embed)
|
||||
68
src/crewai/knowledge/knowledge.py
Normal file
68
src/crewai/knowledge/knowledge.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import os
|
||||
|
||||
from typing import List, Optional, Dict, Any
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
from crewai.utilities.constants import DEFAULT_SCORE_THRESHOLD
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
|
||||
|
||||
|
||||
class Knowledge(BaseModel):
|
||||
"""
|
||||
Knowledge is a collection of sources and setup for the vector store to save and query relevant context.
|
||||
Args:
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
"""
|
||||
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
collection_name: Optional[str] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
collection_name: str,
|
||||
sources: List[BaseKnowledgeSource],
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
storage: Optional[KnowledgeStorage] = None,
|
||||
**data,
|
||||
):
|
||||
super().__init__(**data)
|
||||
if storage:
|
||||
self.storage = storage
|
||||
else:
|
||||
self.storage = KnowledgeStorage(
|
||||
embedder_config=embedder_config, collection_name=collection_name
|
||||
)
|
||||
self.sources = sources
|
||||
self.storage.initialize_knowledge_storage()
|
||||
for source in sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
|
||||
def query(
|
||||
self, query: List[str], limit: int = 3, preference: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Query across all knowledge sources to find the most relevant information.
|
||||
Returns the top_k most relevant chunks.
|
||||
"""
|
||||
|
||||
results = self.storage.search(
|
||||
query,
|
||||
limit,
|
||||
filter={"preference": preference} if preference else None,
|
||||
score_threshold=DEFAULT_SCORE_THRESHOLD,
|
||||
)
|
||||
return results
|
||||
|
||||
def _add_sources(self):
|
||||
for source in self.sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
0
src/crewai/knowledge/source/__init__.py
Normal file
0
src/crewai/knowledge/source/__init__.py
Normal file
72
src/crewai/knowledge/source/base_file_knowledge_source.py
Normal file
72
src/crewai/knowledge/source/base_file_knowledge_source.py
Normal file
@@ -0,0 +1,72 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from pathlib import Path
|
||||
from typing import Union, List, Dict, Any
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
|
||||
|
||||
|
||||
class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
||||
"""Base class for knowledge sources that load content from files."""
|
||||
|
||||
_logger: Logger = Logger(verbose=True)
|
||||
file_path: Union[Path, List[Path], str, List[str]] = Field(
|
||||
..., description="The path to the file"
|
||||
)
|
||||
content: Dict[Path, str] = Field(init=False, default_factory=dict)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
safe_file_paths: List[Path] = Field(default_factory=list)
|
||||
|
||||
def model_post_init(self, _):
|
||||
"""Post-initialization method to load content."""
|
||||
self.safe_file_paths = self._process_file_paths()
|
||||
self.validate_paths()
|
||||
self.content = self.load_content()
|
||||
|
||||
@abstractmethod
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess file content. Should be overridden by subclasses. Assume that the file path is relative to the project root in the knowledge directory."""
|
||||
pass
|
||||
|
||||
def validate_paths(self):
|
||||
"""Validate the paths."""
|
||||
for path in self.safe_file_paths:
|
||||
if not path.exists():
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"File not found: {path}. Try adding sources to the knowledge directory. If its inside the knowledge directory, use the relative path.",
|
||||
color="red",
|
||||
)
|
||||
raise FileNotFoundError(f"File not found: {path}")
|
||||
if not path.is_file():
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"Path is not a file: {path}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def save_documents(self, metadata: Dict[str, Any]):
|
||||
"""Save the documents to the storage."""
|
||||
chunk_metadatas = [metadata.copy() for _ in self.chunks]
|
||||
self.storage.save(self.chunks, chunk_metadatas)
|
||||
|
||||
def convert_to_path(self, path: Union[Path, str]) -> Path:
|
||||
"""Convert a path to a Path object."""
|
||||
return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path
|
||||
|
||||
def _process_file_paths(self) -> List[Path]:
|
||||
"""Convert file_path to a list of Path objects."""
|
||||
paths = (
|
||||
[self.file_path]
|
||||
if isinstance(self.file_path, (str, Path))
|
||||
else self.file_path
|
||||
)
|
||||
|
||||
if not isinstance(paths, list):
|
||||
raise ValueError("file_path must be a Path, str, or a list of these types")
|
||||
|
||||
return [self.convert_to_path(path) for path in paths]
|
||||
49
src/crewai/knowledge/source/base_knowledge_source.py
Normal file
49
src/crewai/knowledge/source/base_knowledge_source.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Any, Optional
|
||||
|
||||
import numpy as np
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
|
||||
|
||||
class BaseKnowledgeSource(BaseModel, ABC):
|
||||
"""Abstract base class for knowledge sources."""
|
||||
|
||||
chunk_size: int = 4000
|
||||
chunk_overlap: int = 200
|
||||
chunks: List[str] = Field(default_factory=list)
|
||||
chunk_embeddings: List[np.ndarray] = Field(default_factory=list)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
collection_name: Optional[str] = Field(default=None)
|
||||
|
||||
@abstractmethod
|
||||
def load_content(self) -> Dict[Any, str]:
|
||||
"""Load and preprocess content from the source."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add(self) -> None:
|
||||
"""Process content, chunk it, compute embeddings, and save them."""
|
||||
pass
|
||||
|
||||
def get_embeddings(self) -> List[np.ndarray]:
|
||||
"""Return the list of embeddings for the chunks."""
|
||||
return self.chunk_embeddings
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
|
||||
def save_documents(self, metadata: Dict[str, Any]):
|
||||
"""
|
||||
Save the documents to the storage.
|
||||
This method should be called after the chunks and embeddings are generated.
|
||||
"""
|
||||
self.storage.save(self.chunks, metadata)
|
||||
40
src/crewai/knowledge/source/csv_knowledge_source.py
Normal file
40
src/crewai/knowledge/source/csv_knowledge_source.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import csv
|
||||
from typing import Dict, List
|
||||
from pathlib import Path
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class CSVKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries CSV file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess CSV file content."""
|
||||
content_dict = {}
|
||||
for file_path in self.safe_file_paths:
|
||||
with open(file_path, "r", encoding="utf-8") as csvfile:
|
||||
reader = csv.reader(csvfile)
|
||||
content = ""
|
||||
for row in reader:
|
||||
content += " ".join(row) + "\n"
|
||||
content_dict[file_path] = content
|
||||
return content_dict
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add CSV file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
content_str = (
|
||||
str(self.content) if isinstance(self.content, dict) else self.content
|
||||
)
|
||||
new_chunks = self._chunk_text(content_str)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
54
src/crewai/knowledge/source/excel_knowledge_source.py
Normal file
54
src/crewai/knowledge/source/excel_knowledge_source.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from typing import Dict, List
|
||||
from pathlib import Path
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class ExcelKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries Excel file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess Excel file content."""
|
||||
pd = self._import_dependencies()
|
||||
|
||||
content_dict = {}
|
||||
for file_path in self.safe_file_paths:
|
||||
file_path = self.convert_to_path(file_path)
|
||||
df = pd.read_excel(file_path)
|
||||
content = df.to_csv(index=False)
|
||||
content_dict[file_path] = content
|
||||
return content_dict
|
||||
|
||||
def _import_dependencies(self):
|
||||
"""Dynamically import dependencies."""
|
||||
try:
|
||||
import openpyxl # noqa
|
||||
import pandas as pd
|
||||
|
||||
return pd
|
||||
except ImportError as e:
|
||||
missing_package = str(e).split()[-1]
|
||||
raise ImportError(
|
||||
f"{missing_package} is not installed. Please install it with: pip install {missing_package}"
|
||||
)
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add Excel file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
# Convert dictionary values to a single string if content is a dictionary
|
||||
if isinstance(self.content, dict):
|
||||
content_str = "\n".join(str(value) for value in self.content.values())
|
||||
else:
|
||||
content_str = str(self.content)
|
||||
|
||||
new_chunks = self._chunk_text(content_str)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
52
src/crewai/knowledge/source/json_knowledge_source.py
Normal file
52
src/crewai/knowledge/source/json_knowledge_source.py
Normal file
@@ -0,0 +1,52 @@
|
||||
import json
|
||||
from typing import Any, Dict, List
|
||||
from pathlib import Path
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class JSONKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries JSON file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess JSON file content."""
|
||||
content: Dict[Path, str] = {}
|
||||
for path in self.safe_file_paths:
|
||||
path = self.convert_to_path(path)
|
||||
with open(path, "r", encoding="utf-8") as json_file:
|
||||
data = json.load(json_file)
|
||||
content[path] = self._json_to_text(data)
|
||||
return content
|
||||
|
||||
def _json_to_text(self, data: Any, level: int = 0) -> str:
|
||||
"""Recursively convert JSON data to a text representation."""
|
||||
text = ""
|
||||
indent = " " * level
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
text += f"{indent}{key}: {self._json_to_text(value, level + 1)}\n"
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
text += f"{indent}- {self._json_to_text(item, level + 1)}\n"
|
||||
else:
|
||||
text += f"{str(data)}"
|
||||
return text
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add JSON file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
content_str = (
|
||||
str(self.content) if isinstance(self.content, dict) else self.content
|
||||
)
|
||||
new_chunks = self._chunk_text(content_str)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
53
src/crewai/knowledge/source/pdf_knowledge_source.py
Normal file
53
src/crewai/knowledge/source/pdf_knowledge_source.py
Normal file
@@ -0,0 +1,53 @@
|
||||
from typing import List, Dict
|
||||
from pathlib import Path
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class PDFKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries PDF file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess PDF file content."""
|
||||
pdfplumber = self._import_pdfplumber()
|
||||
|
||||
content = {}
|
||||
|
||||
for path in self.safe_file_paths:
|
||||
text = ""
|
||||
path = self.convert_to_path(path)
|
||||
with pdfplumber.open(path) as pdf:
|
||||
for page in pdf.pages:
|
||||
page_text = page.extract_text()
|
||||
if page_text:
|
||||
text += page_text + "\n"
|
||||
content[path] = text
|
||||
return content
|
||||
|
||||
def _import_pdfplumber(self):
|
||||
"""Dynamically import pdfplumber."""
|
||||
try:
|
||||
import pdfplumber
|
||||
|
||||
return pdfplumber
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"pdfplumber is not installed. Please install it with: pip install pdfplumber"
|
||||
)
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add PDF file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
for _, text in self.content.items():
|
||||
new_chunks = self._chunk_text(text)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
34
src/crewai/knowledge/source/string_knowledge_source.py
Normal file
34
src/crewai/knowledge/source/string_knowledge_source.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from typing import List, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
|
||||
|
||||
class StringKnowledgeSource(BaseKnowledgeSource):
|
||||
"""A knowledge source that stores and queries plain text content using embeddings."""
|
||||
|
||||
content: str = Field(...)
|
||||
collection_name: Optional[str] = Field(default=None)
|
||||
|
||||
def model_post_init(self, _):
|
||||
"""Post-initialization method to validate content."""
|
||||
self.load_content()
|
||||
|
||||
def load_content(self):
|
||||
"""Validate string content."""
|
||||
if not isinstance(self.content, str):
|
||||
raise ValueError("StringKnowledgeSource only accepts string content")
|
||||
|
||||
def add(self) -> None:
|
||||
"""Add string content to the knowledge source, chunk it, compute embeddings, and save them."""
|
||||
new_chunks = self._chunk_text(self.content)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
34
src/crewai/knowledge/source/text_file_knowledge_source.py
Normal file
34
src/crewai/knowledge/source/text_file_knowledge_source.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from typing import Dict, List
|
||||
from pathlib import Path
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class TextFileKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries text file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess text file content."""
|
||||
content = {}
|
||||
for path in self.safe_file_paths:
|
||||
path = self.convert_to_path(path)
|
||||
with open(path, "r", encoding="utf-8") as f:
|
||||
content[path] = f.read()
|
||||
return content
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add text file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
for _, text in self.content.items():
|
||||
new_chunks = self._chunk_text(text)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
0
src/crewai/knowledge/storage/__init__.py
Normal file
0
src/crewai/knowledge/storage/__init__.py
Normal file
29
src/crewai/knowledge/storage/base_knowledge_storage.py
Normal file
29
src/crewai/knowledge/storage/base_knowledge_storage.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
|
||||
class BaseKnowledgeStorage(ABC):
|
||||
"""Abstract base class for knowledge storage implementations."""
|
||||
|
||||
@abstractmethod
|
||||
def search(
|
||||
self,
|
||||
query: List[str],
|
||||
limit: int = 3,
|
||||
filter: Optional[dict] = None,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Search for documents in the knowledge base."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save(
|
||||
self, documents: List[str], metadata: Dict[str, Any] | List[Dict[str, Any]]
|
||||
) -> None:
|
||||
"""Save documents to the knowledge base."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def reset(self) -> None:
|
||||
"""Reset the knowledge base."""
|
||||
pass
|
||||
175
src/crewai/knowledge/storage/knowledge_storage.py
Normal file
175
src/crewai/knowledge/storage/knowledge_storage.py
Normal file
@@ -0,0 +1,175 @@
|
||||
import contextlib
|
||||
import io
|
||||
import logging
|
||||
import chromadb
|
||||
import os
|
||||
|
||||
import chromadb.errors
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
from typing import Optional, List, Dict, Any, Union
|
||||
from crewai.utilities import EmbeddingConfigurator
|
||||
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||
import hashlib
|
||||
from chromadb.config import Settings
|
||||
from chromadb.api import ClientAPI
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def suppress_logging(
|
||||
logger_name="chromadb.segment.impl.vector.local_persistent_hnsw",
|
||||
level=logging.ERROR,
|
||||
):
|
||||
logger = logging.getLogger(logger_name)
|
||||
original_level = logger.getEffectiveLevel()
|
||||
logger.setLevel(level)
|
||||
with (
|
||||
contextlib.redirect_stdout(io.StringIO()),
|
||||
contextlib.redirect_stderr(io.StringIO()),
|
||||
contextlib.suppress(UserWarning),
|
||||
):
|
||||
yield
|
||||
logger.setLevel(original_level)
|
||||
|
||||
|
||||
class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
"""
|
||||
Extends Storage to handle embeddings for memory entries, improving
|
||||
search efficiency.
|
||||
"""
|
||||
|
||||
collection: Optional[chromadb.Collection] = None
|
||||
collection_name: Optional[str] = "knowledge"
|
||||
app: Optional[ClientAPI] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
):
|
||||
self.collection_name = collection_name
|
||||
self._set_embedder_config(embedder_config)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: List[str],
|
||||
limit: int = 3,
|
||||
filter: Optional[dict] = None,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Dict[str, Any]]:
|
||||
with suppress_logging():
|
||||
if self.collection:
|
||||
fetched = self.collection.query(
|
||||
query_texts=query,
|
||||
n_results=limit,
|
||||
where=filter,
|
||||
)
|
||||
results = []
|
||||
for i in range(len(fetched["ids"][0])): # type: ignore
|
||||
result = {
|
||||
"id": fetched["ids"][0][i], # type: ignore
|
||||
"metadata": fetched["metadatas"][0][i], # type: ignore
|
||||
"context": fetched["documents"][0][i], # type: ignore
|
||||
"score": fetched["distances"][0][i], # type: ignore
|
||||
}
|
||||
if result["score"] >= score_threshold: # type: ignore
|
||||
results.append(result)
|
||||
return results
|
||||
else:
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
def initialize_knowledge_storage(self):
|
||||
base_path = os.path.join(db_storage_path(), "knowledge")
|
||||
chroma_client = chromadb.PersistentClient(
|
||||
path=base_path,
|
||||
settings=Settings(allow_reset=True),
|
||||
)
|
||||
|
||||
self.app = chroma_client
|
||||
|
||||
try:
|
||||
collection_name = (
|
||||
f"knowledge_{self.collection_name}"
|
||||
if self.collection_name
|
||||
else "knowledge"
|
||||
)
|
||||
if self.app:
|
||||
self.collection = self.app.get_or_create_collection(
|
||||
name=collection_name, embedding_function=self.embedder_config
|
||||
)
|
||||
else:
|
||||
raise Exception("Vector Database Client not initialized")
|
||||
except Exception:
|
||||
raise Exception("Failed to create or get collection")
|
||||
|
||||
def reset(self):
|
||||
if self.app:
|
||||
self.app.reset()
|
||||
else:
|
||||
base_path = os.path.join(db_storage_path(), "knowledge")
|
||||
self.app = chromadb.PersistentClient(
|
||||
path=base_path,
|
||||
settings=Settings(allow_reset=True),
|
||||
)
|
||||
self.app.reset()
|
||||
|
||||
def save(
|
||||
self,
|
||||
documents: List[str],
|
||||
metadata: Union[Dict[str, Any], List[Dict[str, Any]]],
|
||||
):
|
||||
if self.collection:
|
||||
try:
|
||||
metadatas = [metadata] if isinstance(metadata, dict) else metadata
|
||||
|
||||
ids = [
|
||||
hashlib.sha256(doc.encode("utf-8")).hexdigest() for doc in documents
|
||||
]
|
||||
|
||||
self.collection.upsert(
|
||||
documents=documents,
|
||||
metadatas=metadatas,
|
||||
ids=ids,
|
||||
)
|
||||
except chromadb.errors.InvalidDimensionException as e:
|
||||
Logger(verbose=True).log(
|
||||
"error",
|
||||
"Embedding dimension mismatch. This usually happens when mixing different embedding models. Try resetting the collection using `crewai reset-memories -a`",
|
||||
"red",
|
||||
)
|
||||
raise ValueError(
|
||||
"Embedding dimension mismatch. Make sure you're using the same embedding model "
|
||||
"across all operations with this collection."
|
||||
"Try resetting the collection using `crewai reset-memories -a`"
|
||||
) from e
|
||||
except Exception as e:
|
||||
Logger(verbose=True).log(
|
||||
"error", f"Failed to upsert documents: {e}", "red"
|
||||
)
|
||||
raise
|
||||
else:
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
def _create_default_embedding_function(self):
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
|
||||
return OpenAIEmbeddingFunction(
|
||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||
)
|
||||
|
||||
def _set_embedder_config(
|
||||
self, embedder_config: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
"""Set the embedding configuration for the knowledge storage.
|
||||
|
||||
Args:
|
||||
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
||||
If None or empty, defaults to the default embedding function.
|
||||
"""
|
||||
self.embedder_config = (
|
||||
EmbeddingConfigurator().configure_embedder(embedder_config)
|
||||
if embedder_config
|
||||
else self._create_default_embedding_function()
|
||||
)
|
||||
12
src/crewai/knowledge/utils/knowledge_utils.py
Normal file
12
src/crewai/knowledge/utils/knowledge_utils.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from typing import Any, Dict, List
|
||||
|
||||
|
||||
def extract_knowledge_context(knowledge_snippets: List[Dict[str, Any]]) -> str:
|
||||
"""Extract knowledge from the task prompt."""
|
||||
valid_snippets = [
|
||||
result["context"]
|
||||
for result in knowledge_snippets
|
||||
if result and result.get("context")
|
||||
]
|
||||
snippet = "\n".join(valid_snippets)
|
||||
return f"Additional Information: {snippet}" if valid_snippets else ""
|
||||
@@ -1,7 +1,10 @@
|
||||
import logging
|
||||
import sys
|
||||
import threading
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
import logging
|
||||
import warnings
|
||||
|
||||
import litellm
|
||||
from litellm import get_supported_openai_params
|
||||
|
||||
@@ -9,20 +12,26 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
|
||||
import sys
|
||||
import io
|
||||
|
||||
class FilteredStream:
|
||||
def __init__(self, original_stream):
|
||||
self._original_stream = original_stream
|
||||
self._lock = threading.Lock()
|
||||
|
||||
class FilteredStream(io.StringIO):
|
||||
def write(self, s):
|
||||
if (
|
||||
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
|
||||
in s
|
||||
or "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True`"
|
||||
in s
|
||||
):
|
||||
return
|
||||
super().write(s)
|
||||
def write(self, s) -> int:
|
||||
with self._lock:
|
||||
if (
|
||||
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
|
||||
in s
|
||||
or "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True`"
|
||||
in s
|
||||
):
|
||||
return 0
|
||||
return self._original_stream.write(s)
|
||||
|
||||
def flush(self):
|
||||
with self._lock:
|
||||
return self._original_stream.flush()
|
||||
|
||||
|
||||
LLM_CONTEXT_WINDOW_SIZES = {
|
||||
@@ -60,8 +69,8 @@ def suppress_warnings():
|
||||
# Redirect stdout and stderr
|
||||
old_stdout = sys.stdout
|
||||
old_stderr = sys.stderr
|
||||
sys.stdout = FilteredStream()
|
||||
sys.stderr = FilteredStream()
|
||||
sys.stdout = FilteredStream(old_stdout)
|
||||
sys.stderr = FilteredStream(old_stderr)
|
||||
|
||||
try:
|
||||
yield
|
||||
@@ -118,12 +127,12 @@ class LLM:
|
||||
|
||||
litellm.drop_params = True
|
||||
litellm.set_verbose = False
|
||||
litellm.callbacks = callbacks
|
||||
self.set_callbacks(callbacks)
|
||||
|
||||
def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = []) -> str:
|
||||
with suppress_warnings():
|
||||
if callbacks and len(callbacks) > 0:
|
||||
litellm.callbacks = callbacks
|
||||
self.set_callbacks(callbacks)
|
||||
|
||||
try:
|
||||
params = {
|
||||
@@ -181,3 +190,15 @@ class LLM:
|
||||
def get_context_window_size(self) -> int:
|
||||
# Only using 75% of the context window size to avoid cutting the message in the middle
|
||||
return int(LLM_CONTEXT_WINDOW_SIZES.get(self.model, 8192) * 0.75)
|
||||
|
||||
def set_callbacks(self, callbacks: List[Any]):
|
||||
callback_types = [type(callback) for callback in callbacks]
|
||||
for callback in litellm.success_callback[:]:
|
||||
if type(callback) in callback_types:
|
||||
litellm.success_callback.remove(callback)
|
||||
|
||||
for callback in litellm._async_success_callback[:]:
|
||||
if type(callback) in callback_types:
|
||||
litellm._async_success_callback.remove(callback)
|
||||
|
||||
litellm.callbacks = callbacks
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from .entity.entity_memory import EntityMemory
|
||||
from .long_term.long_term_memory import LongTermMemory
|
||||
from .short_term.short_term_memory import ShortTermMemory
|
||||
from .user.user_memory import UserMemory
|
||||
|
||||
__all__ = ["EntityMemory", "LongTermMemory", "ShortTermMemory"]
|
||||
__all__ = ["UserMemory", "EntityMemory", "LongTermMemory", "ShortTermMemory"]
|
||||
|
||||
@@ -1,13 +1,25 @@
|
||||
from typing import Optional
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory
|
||||
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory, UserMemory
|
||||
|
||||
|
||||
class ContextualMemory:
|
||||
def __init__(self, stm: ShortTermMemory, ltm: LongTermMemory, em: EntityMemory):
|
||||
def __init__(
|
||||
self,
|
||||
memory_config: Optional[Dict[str, Any]],
|
||||
stm: ShortTermMemory,
|
||||
ltm: LongTermMemory,
|
||||
em: EntityMemory,
|
||||
um: UserMemory,
|
||||
):
|
||||
if memory_config is not None:
|
||||
self.memory_provider = memory_config.get("provider")
|
||||
else:
|
||||
self.memory_provider = None
|
||||
self.stm = stm
|
||||
self.ltm = ltm
|
||||
self.em = em
|
||||
self.um = um
|
||||
|
||||
def build_context_for_task(self, task, context) -> str:
|
||||
"""
|
||||
@@ -23,6 +35,8 @@ class ContextualMemory:
|
||||
context.append(self._fetch_ltm_context(task.description))
|
||||
context.append(self._fetch_stm_context(query))
|
||||
context.append(self._fetch_entity_context(query))
|
||||
if self.memory_provider == "mem0":
|
||||
context.append(self._fetch_user_context(query))
|
||||
return "\n".join(filter(None, context))
|
||||
|
||||
def _fetch_stm_context(self, query) -> str:
|
||||
@@ -32,7 +46,10 @@ class ContextualMemory:
|
||||
"""
|
||||
stm_results = self.stm.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['context']}" for result in stm_results]
|
||||
[
|
||||
f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
|
||||
for result in stm_results
|
||||
]
|
||||
)
|
||||
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
||||
|
||||
@@ -62,6 +79,26 @@ class ContextualMemory:
|
||||
"""
|
||||
em_results = self.em.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['context']}" for result in em_results] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
|
||||
[
|
||||
f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
|
||||
for result in em_results
|
||||
] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
|
||||
)
|
||||
return f"Entities:\n{formatted_results}" if em_results else ""
|
||||
|
||||
def _fetch_user_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches and formats relevant user information from User Memory.
|
||||
Args:
|
||||
query (str): The search query to find relevant user memories.
|
||||
Returns:
|
||||
str: Formatted user memories as bullet points, or an empty string if none found.
|
||||
"""
|
||||
user_memories = self.um.search(query)
|
||||
if not user_memories:
|
||||
return ""
|
||||
|
||||
formatted_memories = "\n".join(
|
||||
f"- {result['memory']}" for result in user_memories
|
||||
)
|
||||
return f"User memories/preferences:\n{formatted_memories}"
|
||||
|
||||
@@ -10,22 +10,45 @@ class EntityMemory(Memory):
|
||||
Inherits from the Memory class.
|
||||
"""
|
||||
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None):
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="entities",
|
||||
allow_reset=True,
|
||||
embedder_config=embedder_config,
|
||||
crew=crew,
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||
self.memory_provider = crew.memory_config.get("provider")
|
||||
else:
|
||||
self.memory_provider = None
|
||||
|
||||
if self.memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
)
|
||||
storage = Mem0Storage(type="entities", crew=crew)
|
||||
else:
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="entities",
|
||||
allow_reset=True,
|
||||
embedder_config=embedder_config,
|
||||
crew=crew,
|
||||
path=path,
|
||||
)
|
||||
)
|
||||
)
|
||||
super().__init__(storage)
|
||||
|
||||
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||
"""Saves an entity item into the SQLite storage."""
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
if self.memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
super().save(data, item.metadata)
|
||||
|
||||
def reset(self) -> None:
|
||||
|
||||
@@ -14,8 +14,9 @@ class LongTermMemory(Memory):
|
||||
LongTermMemoryItem instances.
|
||||
"""
|
||||
|
||||
def __init__(self, storage=None):
|
||||
storage = storage if storage else LTMSQLiteStorage()
|
||||
def __init__(self, storage=None, path=None):
|
||||
if not storage:
|
||||
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
||||
super().__init__(storage)
|
||||
|
||||
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||
|
||||
@@ -23,5 +23,12 @@ class Memory:
|
||||
|
||||
self.storage.save(value, metadata)
|
||||
|
||||
def search(self, query: str) -> List[Dict[str, Any]]:
|
||||
return self.storage.search(query)
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Any]:
|
||||
return self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
@@ -13,14 +13,28 @@ class ShortTermMemory(Memory):
|
||||
MemoryItem instances.
|
||||
"""
|
||||
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None):
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="short_term", embedder_config=embedder_config, crew=crew
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||
self.memory_provider = crew.memory_config.get("provider")
|
||||
else:
|
||||
self.memory_provider = None
|
||||
|
||||
if self.memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
)
|
||||
storage = Mem0Storage(type="short_term", crew=crew)
|
||||
else:
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="short_term", embedder_config=embedder_config, crew=crew, path=path
|
||||
)
|
||||
)
|
||||
)
|
||||
super().__init__(storage)
|
||||
|
||||
def save(
|
||||
@@ -30,11 +44,20 @@ class ShortTermMemory(Memory):
|
||||
agent: Optional[str] = None,
|
||||
) -> None:
|
||||
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
||||
if self.memory_provider == "mem0":
|
||||
item.data = f"Remember the following insights from Agent run: {item.data}"
|
||||
|
||||
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
||||
|
||||
def search(self, query: str, score_threshold: float = 0.35):
|
||||
return self.storage.search(query=query, score_threshold=score_threshold) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
):
|
||||
return self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
|
||||
@@ -7,8 +7,10 @@ class Storage:
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
pass
|
||||
|
||||
def search(self, key: str) -> List[Dict[str, Any]]: # type: ignore
|
||||
pass
|
||||
def search(
|
||||
self, query: str, limit: int, score_threshold: float
|
||||
) -> Dict[str, Any] | List[Any]:
|
||||
return {}
|
||||
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
@@ -103,7 +103,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
else value
|
||||
)
|
||||
|
||||
query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?"
|
||||
query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?" # nosec
|
||||
values.append(task_index)
|
||||
|
||||
cursor.execute(query, tuple(values))
|
||||
|
||||
@@ -83,7 +83,7 @@ class LTMSQLiteStorage:
|
||||
WHERE task_description = ?
|
||||
ORDER BY datetime DESC, score ASC
|
||||
LIMIT {latest_n}
|
||||
""",
|
||||
""", # nosec
|
||||
(task_description,),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
|
||||
104
src/crewai/memory/storage/mem0_storage.py
Normal file
104
src/crewai/memory/storage/mem0_storage.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from mem0 import MemoryClient
|
||||
from crewai.memory.storage.interface import Storage
|
||||
|
||||
|
||||
class Mem0Storage(Storage):
|
||||
"""
|
||||
Extends Storage to handle embedding and searching across entities using Mem0.
|
||||
"""
|
||||
|
||||
def __init__(self, type, crew=None):
|
||||
super().__init__()
|
||||
|
||||
if type not in ["user", "short_term", "long_term", "entities"]:
|
||||
raise ValueError("Invalid type for Mem0Storage. Must be 'user' or 'agent'.")
|
||||
|
||||
self.memory_type = type
|
||||
self.crew = crew
|
||||
self.memory_config = crew.memory_config
|
||||
|
||||
# User ID is required for user memory type "user" since it's used as a unique identifier for the user.
|
||||
user_id = self._get_user_id()
|
||||
if type == "user" and not user_id:
|
||||
raise ValueError("User ID is required for user memory type")
|
||||
|
||||
# API key in memory config overrides the environment variable
|
||||
mem0_api_key = self.memory_config.get("config", {}).get("api_key") or os.getenv(
|
||||
"MEM0_API_KEY"
|
||||
)
|
||||
self.memory = MemoryClient(api_key=mem0_api_key)
|
||||
|
||||
def _sanitize_role(self, role: str) -> str:
|
||||
"""
|
||||
Sanitizes agent roles to ensure valid directory names.
|
||||
"""
|
||||
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
user_id = self._get_user_id()
|
||||
agent_name = self._get_agent_name()
|
||||
if self.memory_type == "user":
|
||||
self.memory.add(value, user_id=user_id, metadata={**metadata})
|
||||
elif self.memory_type == "short_term":
|
||||
agent_name = self._get_agent_name()
|
||||
self.memory.add(
|
||||
value, agent_id=agent_name, metadata={"type": "short_term", **metadata}
|
||||
)
|
||||
elif self.memory_type == "long_term":
|
||||
agent_name = self._get_agent_name()
|
||||
self.memory.add(
|
||||
value,
|
||||
agent_id=agent_name,
|
||||
infer=False,
|
||||
metadata={"type": "long_term", **metadata},
|
||||
)
|
||||
elif self.memory_type == "entities":
|
||||
entity_name = None
|
||||
self.memory.add(
|
||||
value, user_id=entity_name, metadata={"type": "entity", **metadata}
|
||||
)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Any]:
|
||||
params = {"query": query, "limit": limit}
|
||||
if self.memory_type == "user":
|
||||
user_id = self._get_user_id()
|
||||
params["user_id"] = user_id
|
||||
elif self.memory_type == "short_term":
|
||||
agent_name = self._get_agent_name()
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "short_term"}
|
||||
elif self.memory_type == "long_term":
|
||||
agent_name = self._get_agent_name()
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "long_term"}
|
||||
elif self.memory_type == "entities":
|
||||
agent_name = self._get_agent_name()
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "entity"}
|
||||
|
||||
# Discard the filters for now since we create the filters
|
||||
# automatically when the crew is created.
|
||||
results = self.memory.search(**params)
|
||||
return [r for r in results if r["score"] >= score_threshold]
|
||||
|
||||
def _get_user_id(self):
|
||||
if self.memory_type == "user":
|
||||
if hasattr(self, "memory_config") and self.memory_config is not None:
|
||||
return self.memory_config.get("config", {}).get("user_id")
|
||||
else:
|
||||
return None
|
||||
return None
|
||||
|
||||
def _get_agent_name(self):
|
||||
agents = self.crew.agents if self.crew else []
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
return agents
|
||||
@@ -4,13 +4,12 @@ import logging
|
||||
import os
|
||||
import shutil
|
||||
import uuid
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
from chromadb.api import ClientAPI
|
||||
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
from chromadb.api import ClientAPI
|
||||
from chromadb.api.types import validate_embedding_function
|
||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||
from typing import cast
|
||||
from crewai.utilities import EmbeddingConfigurator
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@@ -21,9 +20,11 @@ def suppress_logging(
|
||||
logger = logging.getLogger(logger_name)
|
||||
original_level = logger.getEffectiveLevel()
|
||||
logger.setLevel(level)
|
||||
with contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr(
|
||||
io.StringIO()
|
||||
), contextlib.suppress(UserWarning):
|
||||
with (
|
||||
contextlib.redirect_stdout(io.StringIO()),
|
||||
contextlib.redirect_stderr(io.StringIO()),
|
||||
contextlib.suppress(UserWarning),
|
||||
):
|
||||
yield
|
||||
logger.setLevel(original_level)
|
||||
|
||||
@@ -36,7 +37,7 @@ class RAGStorage(BaseRAGStorage):
|
||||
|
||||
app: ClientAPI | None = None
|
||||
|
||||
def __init__(self, type, allow_reset=True, embedder_config=None, crew=None):
|
||||
def __init__(self, type, allow_reset=True, embedder_config=None, crew=None, path=None):
|
||||
super().__init__(type, allow_reset, embedder_config, crew)
|
||||
agents = crew.agents if crew else []
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
@@ -46,80 +47,12 @@ class RAGStorage(BaseRAGStorage):
|
||||
self.type = type
|
||||
|
||||
self.allow_reset = allow_reset
|
||||
self.path = path
|
||||
self._initialize_app()
|
||||
|
||||
def _set_embedder_config(self):
|
||||
import chromadb.utils.embedding_functions as embedding_functions
|
||||
|
||||
if self.embedder_config is None:
|
||||
self.embedder_config = self._create_default_embedding_function()
|
||||
|
||||
if isinstance(self.embedder_config, dict):
|
||||
provider = self.embedder_config.get("provider")
|
||||
config = self.embedder_config.get("config", {})
|
||||
model_name = config.get("model")
|
||||
if provider == "openai":
|
||||
self.embedder_config = embedding_functions.OpenAIEmbeddingFunction(
|
||||
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
|
||||
model_name=model_name,
|
||||
)
|
||||
elif provider == "azure":
|
||||
self.embedder_config = embedding_functions.OpenAIEmbeddingFunction(
|
||||
api_key=config.get("api_key"),
|
||||
api_base=config.get("api_base"),
|
||||
api_type=config.get("api_type", "azure"),
|
||||
api_version=config.get("api_version"),
|
||||
model_name=model_name,
|
||||
)
|
||||
elif provider == "ollama":
|
||||
from openai import OpenAI
|
||||
|
||||
class OllamaEmbeddingFunction(EmbeddingFunction):
|
||||
def __call__(self, input: Documents) -> Embeddings:
|
||||
client = OpenAI(
|
||||
base_url="http://localhost:11434/v1",
|
||||
api_key=config.get("api_key", "ollama"),
|
||||
)
|
||||
try:
|
||||
response = client.embeddings.create(
|
||||
input=input, model=model_name
|
||||
)
|
||||
embeddings = [item.embedding for item in response.data]
|
||||
return cast(Embeddings, embeddings)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
self.embedder_config = OllamaEmbeddingFunction()
|
||||
elif provider == "vertexai":
|
||||
self.embedder_config = (
|
||||
embedding_functions.GoogleVertexEmbeddingFunction(
|
||||
model_name=model_name,
|
||||
api_key=config.get("api_key"),
|
||||
)
|
||||
)
|
||||
elif provider == "google":
|
||||
self.embedder_config = (
|
||||
embedding_functions.GoogleGenerativeAiEmbeddingFunction(
|
||||
model_name=model_name,
|
||||
api_key=config.get("api_key"),
|
||||
)
|
||||
)
|
||||
elif provider == "cohere":
|
||||
self.embedder_config = embedding_functions.CohereEmbeddingFunction(
|
||||
model_name=model_name,
|
||||
api_key=config.get("api_key"),
|
||||
)
|
||||
elif provider == "huggingface":
|
||||
self.embedder_config = embedding_functions.HuggingFaceEmbeddingServer(
|
||||
url=config.get("api_url"),
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"Unsupported embedding provider: {provider}, supported providers: [openai, azure, ollama, vertexai, google, cohere, huggingface]"
|
||||
)
|
||||
else:
|
||||
validate_embedding_function(self.embedder_config) # type: ignore # used for validating embedder_config if defined a embedding function/class
|
||||
self.embedder_config = self.embedder_config
|
||||
configurator = EmbeddingConfigurator()
|
||||
self.embedder_config = configurator.configure_embedder(self.embedder_config)
|
||||
|
||||
def _initialize_app(self):
|
||||
import chromadb
|
||||
@@ -127,7 +60,7 @@ class RAGStorage(BaseRAGStorage):
|
||||
|
||||
self._set_embedder_config()
|
||||
chroma_client = chromadb.PersistentClient(
|
||||
path=f"{db_storage_path()}/{self.type}/{self.agents}",
|
||||
path=self.path if self.path else f"{db_storage_path()}/{self.type}/{self.agents}",
|
||||
settings=Settings(allow_reset=self.allow_reset),
|
||||
)
|
||||
|
||||
@@ -211,8 +144,10 @@ class RAGStorage(BaseRAGStorage):
|
||||
)
|
||||
|
||||
def _create_default_embedding_function(self):
|
||||
import chromadb.utils.embedding_functions as embedding_functions
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
|
||||
return embedding_functions.OpenAIEmbeddingFunction(
|
||||
return OpenAIEmbeddingFunction(
|
||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||
)
|
||||
|
||||
0
src/crewai/memory/user/__init__.py
Normal file
0
src/crewai/memory/user/__init__.py
Normal file
45
src/crewai/memory/user/user_memory.py
Normal file
45
src/crewai/memory/user/user_memory.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from crewai.memory.memory import Memory
|
||||
|
||||
|
||||
class UserMemory(Memory):
|
||||
"""
|
||||
UserMemory class for handling user memory storage and retrieval.
|
||||
Inherits from the Memory class and utilizes an instance of a class that
|
||||
adheres to the Storage for data storage, specifically working with
|
||||
MemoryItem instances.
|
||||
"""
|
||||
|
||||
def __init__(self, crew=None):
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
)
|
||||
storage = Mem0Storage(type="user", crew=crew)
|
||||
super().__init__(storage)
|
||||
|
||||
def save(
|
||||
self,
|
||||
value,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
agent: Optional[str] = None,
|
||||
) -> None:
|
||||
# TODO: Change this function since we want to take care of the case where we save memories for the usr
|
||||
data = f"Remember the details about the user: {value}"
|
||||
super().save(data, metadata)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
):
|
||||
results = super().search(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
)
|
||||
return results
|
||||
8
src/crewai/memory/user/user_memory_item.py
Normal file
8
src/crewai/memory/user/user_memory_item.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
|
||||
class UserMemoryItem:
|
||||
def __init__(self, data: Any, user: str, metadata: Optional[Dict[str, Any]] = None):
|
||||
self.data = data
|
||||
self.user = user
|
||||
self.metadata = metadata if metadata is not None else {}
|
||||
@@ -1,5 +1,7 @@
|
||||
from .annotations import (
|
||||
after_kickoff,
|
||||
agent,
|
||||
before_kickoff,
|
||||
cache_handler,
|
||||
callback,
|
||||
crew,
|
||||
@@ -26,4 +28,6 @@ __all__ = [
|
||||
"llm",
|
||||
"cache_handler",
|
||||
"pipeline",
|
||||
"before_kickoff",
|
||||
"after_kickoff",
|
||||
]
|
||||
|
||||
@@ -5,6 +5,16 @@ from crewai import Crew
|
||||
from crewai.project.utils import memoize
|
||||
|
||||
|
||||
def before_kickoff(func):
|
||||
func.is_before_kickoff = True
|
||||
return func
|
||||
|
||||
|
||||
def after_kickoff(func):
|
||||
func.is_after_kickoff = True
|
||||
return func
|
||||
|
||||
|
||||
def task(func):
|
||||
func.is_task = True
|
||||
|
||||
@@ -99,6 +109,19 @@ def crew(func) -> Callable[..., Crew]:
|
||||
self.agents = instantiated_agents
|
||||
self.tasks = instantiated_tasks
|
||||
|
||||
return func(self, *args, **kwargs)
|
||||
crew = func(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
def callback_wrapper(callback, instance):
|
||||
def wrapper(*args, **kwargs):
|
||||
return callback(instance, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
for _, callback in self._before_kickoff.items():
|
||||
crew.before_kickoff_callbacks.append(callback_wrapper(callback, self))
|
||||
for _, callback in self._after_kickoff.items():
|
||||
crew.after_kickoff_callbacks.append(callback_wrapper(callback, self))
|
||||
|
||||
return crew
|
||||
|
||||
return memoize(wrapper)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user