Compare commits
86 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
464dfc4e67 | ||
|
|
1c7f9826b4 | ||
|
|
e397a49c23 | ||
|
|
8c925237e7 | ||
|
|
0593d52b91 | ||
|
|
7b7d714109 | ||
|
|
e9aa87f62b | ||
|
|
8f5d735b2f | ||
|
|
e24f4867df | ||
|
|
ef024ca106 | ||
|
|
4c519d9d98 | ||
|
|
94cb96b288 | ||
|
|
108a0d36b7 | ||
|
|
efb097a76b | ||
|
|
af03042852 | ||
|
|
21667bc7e1 | ||
|
|
19b6c15fff | ||
|
|
3ef502024d | ||
|
|
e55cee7372 | ||
|
|
b72eb838c2 | ||
|
|
b21191dd55 | ||
|
|
76b17a8d04 | ||
|
|
e97d1a0cf8 | ||
|
|
c875d887b7 | ||
|
|
44d9cbca81 | ||
|
|
6e399101fd | ||
|
|
e8e3617ba6 | ||
|
|
45fa30c007 | ||
|
|
15768d9c4d | ||
|
|
a1fcaa398c | ||
|
|
871643d98d | ||
|
|
91659d6488 | ||
|
|
0076ea7bff | ||
|
|
e79da7bc05 | ||
|
|
00206a62ab | ||
|
|
d0b0a33be3 | ||
|
|
6ea21e95b6 | ||
|
|
c226dafd0d | ||
|
|
d4c21a23f4 | ||
|
|
b76ae5b921 | ||
|
|
b48e5af9a0 | ||
|
|
d36c2a74cb | ||
|
|
a1e0596450 | ||
|
|
596e243374 | ||
|
|
326ad08ba2 | ||
|
|
f63d4edbb4 | ||
|
|
0057ed6786 | ||
|
|
44b6bcbcaa | ||
|
|
a45c82c5f7 | ||
|
|
98133a4eb6 | ||
|
|
44c2fd223d | ||
|
|
fc249eefda | ||
|
|
1a1eb4e7aa | ||
|
|
723fdc6245 | ||
|
|
43a47b8bdf | ||
|
|
ab5647145f | ||
|
|
856981e0ed | ||
|
|
09bec0e28b | ||
|
|
2f0bf3b325 | ||
|
|
51278424c1 | ||
|
|
bfe26de026 | ||
|
|
db100439cb | ||
|
|
c37f54c86f | ||
|
|
e0262d9712 | ||
|
|
63fb5a22be | ||
|
|
05dda59cf6 | ||
|
|
5628bcca78 | ||
|
|
6042d9a7d8 | ||
|
|
144239394d | ||
|
|
d712ee8451 | ||
|
|
a8c1348235 | ||
|
|
148d9202bf | ||
|
|
44442e6407 | ||
|
|
c78237cb86 | ||
|
|
8fc0f33dd5 | ||
|
|
2010702880 | ||
|
|
29c31a2404 | ||
|
|
cd77981102 | ||
|
|
4f78d1e29c | ||
|
|
5be79454c3 | ||
|
|
d8c14ff31e | ||
|
|
9e1be4ecd2 | ||
|
|
327d5c3a53 | ||
|
|
852ca21e38 | ||
|
|
23a549ac65 | ||
|
|
3e9630afe8 |
BIN
.cache/plugin/social/0b649b356e60b558dfaafe8bb095862e.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
.cache/plugin/social/0cce129b2747506603c430fd3fe2b3d6.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
.cache/plugin/social/0f18d6e26b8551d3f42ef92b0f786024.png
Normal file
|
After Width: | Height: | Size: 37 KiB |
BIN
.cache/plugin/social/14c48b40955d6021b47ae973d9aef723.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
.cache/plugin/social/17484ad7f45b09a1db146ba3ad3df79a.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
.cache/plugin/social/1d935acb34360e4768e35ae13479bbf9.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
.cache/plugin/social/216220c022e734cc7999210b48c9fb59.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
.cache/plugin/social/246dcba6c47283feac354f5871842fe8.png
Normal file
|
After Width: | Height: | Size: 48 KiB |
BIN
.cache/plugin/social/259ba94ac7e93bd9f968c57ec4a15fe5.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
.cache/plugin/social/288fd82ce2209be4864d19bd50b21474.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
.cache/plugin/social/28a844df4871a1cdfcba05fdc87bb3e8.png
Normal file
|
After Width: | Height: | Size: 43 KiB |
BIN
.cache/plugin/social/40770a96ef2fb657a7aa16a9facf702f.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/4747e68a5e5c0f0994cdc5b37682a37c.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
.cache/plugin/social/4809f4ae19b6e78539b900da82d8a1f6.png
Normal file
|
After Width: | Height: | Size: 27 KiB |
BIN
.cache/plugin/social/481b171eb3fe3dec67ca86d2d923f598.png
Normal file
|
After Width: | Height: | Size: 24 KiB |
BIN
.cache/plugin/social/4ae47a8f7da894db700b2f29242cd0c5.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
.cache/plugin/social/4c1fb3bfd02d6b1317779fe5101058a7.png
Normal file
|
After Width: | Height: | Size: 25 KiB |
BIN
.cache/plugin/social/56e240bc0124af182495bc59877d8d11.png
Normal file
|
After Width: | Height: | Size: 49 KiB |
BIN
.cache/plugin/social/5d2431971fcde0af2c84e4680a4227a7.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
.cache/plugin/social/69bcd9a2304ea69e1244a7ac510dd98d.png
Normal file
|
After Width: | Height: | Size: 35 KiB |
BIN
.cache/plugin/social/6b49f5ef597c15cabc3df9bac4fbcf44.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
.cache/plugin/social/7296e2d6c7b2c713ed7b2e4546e3acdb.png
Normal file
|
After Width: | Height: | Size: 42 KiB |
BIN
.cache/plugin/social/805d7c5662a45ca18b52554eecbc34af.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
.cache/plugin/social/80f1492950494de7a34a1f20f6dd4368.png
Normal file
|
After Width: | Height: | Size: 30 KiB |
BIN
.cache/plugin/social/834ad7f8096fa4c92637b815777bf2bd.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
BIN
.cache/plugin/social/8b089bdf12d22c016f481d654be39eb1.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/96f1c198bf51f822eb04a25adf7ca20c.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/9f88e9bd3010b149e527e0600c2e438c.png
Normal file
|
After Width: | Height: | Size: 45 KiB |
BIN
.cache/plugin/social/Roboto-Black.ttf
Normal file
BIN
.cache/plugin/social/Roboto-BlackItalic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Bold.ttf
Normal file
BIN
.cache/plugin/social/Roboto-BoldItalic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Italic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Light.ttf
Normal file
BIN
.cache/plugin/social/Roboto-LightItalic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Medium.ttf
Normal file
BIN
.cache/plugin/social/Roboto-MediumItalic.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Regular.ttf
Normal file
BIN
.cache/plugin/social/Roboto-Thin.ttf
Normal file
BIN
.cache/plugin/social/Roboto-ThinItalic.ttf
Normal file
BIN
.cache/plugin/social/a0c21e9a7250afebc533da92c7050bed.png
Normal file
|
After Width: | Height: | Size: 34 KiB |
BIN
.cache/plugin/social/a19c79f0bc7a3e5ffc6b511a68273e5d.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
.cache/plugin/social/a1d83c5e1feb928b579ad122a8d3786d.png
Normal file
|
After Width: | Height: | Size: 52 KiB |
BIN
.cache/plugin/social/a3d8476a7b5c6630a5f91aed8c210173.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
BIN
.cache/plugin/social/ac9c4b6558565d4c349355101e95c74a.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
.cache/plugin/social/b417e4353162a563e70f1350a2777e2c.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
BIN
.cache/plugin/social/b84a1e5d0534be3c31f04a7d4a98b515.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
.cache/plugin/social/bca675d7c3c82f52ebd329487fb9ade1.png
Normal file
|
After Width: | Height: | Size: 40 KiB |
BIN
.cache/plugin/social/bdf46ef3b5230ebb45ef648933f54fa2.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
.cache/plugin/social/beacb748aad822c66a972b39186dbef1.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
.cache/plugin/social/caa7abb72303dbe5a02ec11e6f1eba6b.png
Normal file
|
After Width: | Height: | Size: 18 KiB |
BIN
.cache/plugin/social/cff5eb5aae0959e143c12945428558bc.png
Normal file
|
After Width: | Height: | Size: 21 KiB |
BIN
.cache/plugin/social/d01b95e8266a0d2c5f825b88d98a97a1.png
Normal file
|
After Width: | Height: | Size: 55 KiB |
BIN
.cache/plugin/social/d7db21df76b132d3ca3ae4313e23f77d.png
Normal file
|
After Width: | Height: | Size: 29 KiB |
BIN
.cache/plugin/social/d87db72302152f8c0953d7105c28a206.png
Normal file
|
After Width: | Height: | Size: 36 KiB |
BIN
.cache/plugin/social/e580fe32a1d3f15fc89057d053ae3e52.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/e9111c93e01f7c1dfec7bbab69843076.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
.cache/plugin/social/ebf70df39c2bfd2c4a89d70846a516ff.png
Normal file
|
After Width: | Height: | Size: 44 KiB |
BIN
.cache/plugin/social/ed5690e7952bdee0372c8d3f1f5d98d7.png
Normal file
|
After Width: | Height: | Size: 39 KiB |
BIN
.cache/plugin/social/f6d08b81ae945faa6c4a436de48d2da6.png
Normal file
|
After Width: | Height: | Size: 28 KiB |
BIN
.cache/plugin/social/f875c8d6b0cd71d9ae38300c82361d77.png
Normal file
|
After Width: | Height: | Size: 37 KiB |
BIN
.cache/plugin/social/fc9a9f44881519178d4000f24000ef9d.png
Normal file
|
After Width: | Height: | Size: 33 KiB |
4
.github/workflows/black.yml
vendored
@@ -1,10 +1,10 @@
|
||||
name: Lint
|
||||
|
||||
on: [push, pull_request]
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: psf/black@stable
|
||||
- uses: psf/black@stable
|
||||
|
||||
15
.github/workflows/mkdocs.yml
vendored
@@ -1,6 +1,7 @@
|
||||
name: Deploy MkDocs
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
@@ -21,11 +22,23 @@ jobs:
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Calculate requirements hash
|
||||
id: req-hash
|
||||
run: echo "::set-output name=hash::$(sha256sum requirements-doc.txt | awk '{print $1}')"
|
||||
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
key: mkdocs-material-${{ steps.req-hash.outputs.hash }}
|
||||
path: .cache
|
||||
restore-keys: |
|
||||
mkdocs-material-
|
||||
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
sudo apt-get update &&
|
||||
sudo apt-get install pngquant &&
|
||||
pip install mkdocs-material
|
||||
pip install mkdocs-material mkdocs-material-extensions pillow cairosvg
|
||||
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
2
.github/workflows/tests.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Run Tests
|
||||
|
||||
on: [push, pull_request]
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
30
.github/workflows/type-checker.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
|
||||
name: Run Type Checks
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
type-checker:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
sudo apt-get update &&
|
||||
pip install poetry &&
|
||||
poetry lock &&
|
||||
poetry install
|
||||
|
||||
- name: Run type checks
|
||||
run: poetry run pyright
|
||||
2
.gitignore
vendored
@@ -5,4 +5,4 @@ dist/
|
||||
.env
|
||||
assets/*
|
||||
.idea
|
||||
test.py
|
||||
test/
|
||||
174
README.md
@@ -1,18 +1,36 @@
|
||||
# crewAI
|
||||
<div align="center">
|
||||
|
||||

|
||||

|
||||
|
||||
🤖 Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks.
|
||||
# **crewAI**
|
||||
|
||||
- [Why CrewAI](#why-crewai)
|
||||
🤖 **crewAI**: Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks.
|
||||
|
||||
<h3>
|
||||
|
||||
[Homepage](https://www.crewai.io/) | [Documentation](https://docs.crewai.com/) | [Chat with Docs](https://chatg.pt/DWjSBZn) | [Examples](https://github.com/joaomdmoura/crewai-examples) | [Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
|
||||
</h3>
|
||||
|
||||
[](https://github.com/joaomdmoura/crewAI)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
</div>
|
||||
|
||||
## Table of contents
|
||||
|
||||
- [Why CrewAI?](#why-crewai)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Key Features](#key-features)
|
||||
- [Examples](#examples)
|
||||
- [Local Open Source Models](#local-open-source-models)
|
||||
- [CrewAI x AutoGen x ChatDev](#how-crewai-compares)
|
||||
- [Quick Tutorial](#quick-tutorial)
|
||||
- [Trip Planner](#trip-planner)
|
||||
- [Stock Analysis](#stock-analysis)
|
||||
- [Connecting Your Crew to a Model](#connecting-your-crew-to-a-model)
|
||||
- [How CrewAI Compares](#how-crewai-compares)
|
||||
- [Contribution](#contribution)
|
||||
- [💬 CrewAI Discord Community](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Hire Consulting](#hire-consulting)
|
||||
- [Hire CrewAI](#hire-crewai)
|
||||
- [Telemetry](#telemetry)
|
||||
- [License](#license)
|
||||
|
||||
## Why CrewAI?
|
||||
@@ -20,41 +38,38 @@
|
||||
The power of AI collaboration has too much to offer.
|
||||
CrewAI is designed to enable AI agents to assume roles, share goals, and operate in a cohesive unit - much like a well-oiled crew. Whether you're building a smart assistant platform, an automated customer service ensemble, or a multi-agent research team, CrewAI provides the backbone for sophisticated multi-agent interactions.
|
||||
|
||||
- 🤖 [Talk with the Docs](https://chatg.pt/DWjSBZn)
|
||||
- 📄 [Documentation Wiki](https://github.com/joaomdmoura/CrewAI/wiki)
|
||||
|
||||
## Getting Started
|
||||
|
||||
To get started with CrewAI, follow these simple steps:
|
||||
|
||||
1. **Installation**:
|
||||
### 1. Installation
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
The example below also uses duckduckgo, so also install that
|
||||
The example below also uses DuckDuckGo's Search. You can install it with `pip` too:
|
||||
|
||||
```shell
|
||||
pip install duckduckgo-search
|
||||
```
|
||||
|
||||
2. **Setting Up Your Crew**:
|
||||
### 2. Setting Up Your Crew
|
||||
|
||||
```python
|
||||
import os
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = "YOUR KEY"
|
||||
os.environ["OPENAI_API_KEY"] = "YOUR_API_KEY"
|
||||
|
||||
# You can choose to use a local model through Ollama for example.
|
||||
#
|
||||
# from langchain.llms import Ollama
|
||||
# You can choose to use a local model through Ollama for example. See ./docs/how-to/llm-connections.md for more information.
|
||||
# from langchain_community.llms import Ollama
|
||||
# ollama_llm = Ollama(model="openhermes")
|
||||
|
||||
# Install duckduckgo-search for this example:
|
||||
# !pip install -U duckduckgo-search
|
||||
|
||||
from langchain.tools import DuckDuckGoSearchRun
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
search_tool = DuckDuckGoSearchRun()
|
||||
|
||||
# Define your agents with roles and goals
|
||||
@@ -63,24 +78,26 @@ researcher = Agent(
|
||||
goal='Uncover cutting-edge developments in AI and data science',
|
||||
backstory="""You work at a leading tech think tank.
|
||||
Your expertise lies in identifying emerging trends.
|
||||
You have a knack for dissecting complex data and presenting
|
||||
actionable insights.""",
|
||||
You have a knack for dissecting complex data and presenting actionable insights.""",
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
tools=[search_tool]
|
||||
# You can pass an optional llm attribute specifying what mode you wanna use.
|
||||
# It can be a local model through Ollama / LM Studio or a remote
|
||||
# model like OpenAI, Mistral, Antrophic of others (https://python.langchain.com/docs/integrations/llms/)
|
||||
# model like OpenAI, Mistral, Antrophic or others (https://python.langchain.com/docs/integrations/llms/)
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# from langchain_community.llms import Ollama
|
||||
# llm=ollama_llm # was defined above in the file
|
||||
#
|
||||
# from langchain_openai import ChatOpenAI
|
||||
# llm=ChatOpenAI(model_name="gpt-3.5", temperature=0.7)
|
||||
)
|
||||
writer = Agent(
|
||||
role='Tech Content Strategist',
|
||||
goal='Craft compelling content on tech advancements',
|
||||
backstory="""You are a renowned Content Strategist, known for
|
||||
your insightful and engaging articles.
|
||||
backstory="""You are a renowned Content Strategist, known for your insightful and engaging articles.
|
||||
You transform complex concepts into compelling narratives.""",
|
||||
verbose=True,
|
||||
allow_delegation=True,
|
||||
@@ -118,64 +135,48 @@ print("######################")
|
||||
print(result)
|
||||
```
|
||||
|
||||
Currently the only supported process is `Process.sequential`, where one task is executed after the other and the outcome of one is passed as extra content into this next.
|
||||
In addition to the sequential process, you can use the hierarchical process, which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results. [See more about the processes here](https://docs.crewai.com/core-concepts/Processes/).
|
||||
|
||||
## Key Features
|
||||
|
||||
- **Role-Based Agent Design**: Customize agents with specific roles, goals, and tools.
|
||||
- **Autonomous Inter-Agent Delegation**: Agents can autonomously delegate tasks and inquire amongst themselves, enhancing problem-solving efficiency.
|
||||
- **Flexible Task Management**: Define tasks with customizable tools and assign them to agents dynamically.
|
||||
- **Processes Driven**: Currently only supports `sequential` task execution but more complex processes like consensual and hierarchical being worked on.
|
||||
- **Processes Driven**: Currently only supports `sequential` task execution and `hierarchical` processes, but more complex processes like consensual and autonomous are being worked on.
|
||||
- **Works with Open Source Models**: Run your crew using Open AI or open source models refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring you agents' connections to models, even ones running locally!
|
||||
|
||||

|
||||
|
||||
## Examples
|
||||
You can test different real life examples of AI crews [in the examples repo](https://github.com/joaomdmoura/crewAI-examples?tab=readme-ov-file)
|
||||
|
||||
### Code
|
||||
You can test different real life examples of AI crews in the [crewAI-examples repo](https://github.com/joaomdmoura/crewAI-examples?tab=readme-ov-file):
|
||||
|
||||
- [Landing Page Generator](https://github.com/joaomdmoura/crewAI-examples/tree/main/landing_page_generator)
|
||||
- [Having Human input on the execution](https://docs.crewai.com/how-to/Human-Input-on-Execution)
|
||||
- [Trip Planner](https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner)
|
||||
- [Stock Analysis](https://github.com/joaomdmoura/crewAI-examples/tree/main/stock_analysis)
|
||||
- [Landing Page Generator](https://github.com/joaomdmoura/crewAI-examples/tree/main/landing_page_generator)
|
||||
- [Having Human input on the execution](https://github.com/joaomdmoura/crewAI/wiki/Human-Input-on-Execution)
|
||||
|
||||
### Video
|
||||
#### Quick Tutorial
|
||||
[](https://www.youtube.com/watch?v=tnejrr-0a94 "CrewAI Tutorial")
|
||||
### Quick Tutorial
|
||||
|
||||
#### Trip Planner
|
||||
[](https://www.youtube.com/watch?v=xis7rWp-hjs "Trip Planner")
|
||||
[](https://www.youtube.com/watch?v=tnejrr-0a94 "CrewAI Tutorial")
|
||||
|
||||
#### Stock Analysis
|
||||
[](https://www.youtube.com/watch?v=e0Uj4yWdaAg "Stock Analysis")
|
||||
### Trip Planner
|
||||
|
||||
## Local Open Source Models
|
||||
crewAI supports integration with local models, thorugh tools such as [Ollama](https://ollama.ai/), for enhanced flexibility and customization. This allows you to utilize your own models, which can be particularly useful for specialized tasks or data privacy concerns.
|
||||
[Check out code for this example](https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner) or watch a video below:
|
||||
|
||||
### Setting Up Ollama
|
||||
- **Install Ollama**: Ensure that Ollama is properly installed in your environment. Follow the installation guide provided by Ollama for detailed instructions.
|
||||
- **Configure Ollama**: Set up Ollama to work with your local model. You will probably need to [tweak the model using a Modelfile](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md). I'd recommend adding `Observation` as a stop word and playing with `top_p` and `temperature`.
|
||||
[](https://www.youtube.com/watch?v=xis7rWp-hjs "Trip Planner")
|
||||
|
||||
### Integrating Ollama with CrewAI
|
||||
- Instantiate Ollama Model: Create an instance of the Ollama model. You can specify the model and the base URL during instantiation. For example:
|
||||
### Stock Analysis
|
||||
|
||||
```python
|
||||
from langchain.llms import Ollama
|
||||
ollama_openhermes = Ollama(model="openhermes")
|
||||
# Pass Ollama Model to Agents: When creating your agents within the CrewAI framework, you can pass the Ollama model as an argument to the Agent constructor. For instance:
|
||||
[Check out code for this example](https://github.com/joaomdmoura/crewAI-examples/tree/main/stock_analysis) or watch a video below:
|
||||
|
||||
local_expert = Agent(
|
||||
role='Local Expert at this city',
|
||||
goal='Provide the BEST insights about the selected city',
|
||||
backstory="""A knowledgeable local guide with extensive information
|
||||
about the city, it's attractions and customs""",
|
||||
tools=[
|
||||
SearchTools.search_internet,
|
||||
BrowserTools.scrape_and_summarize_website,
|
||||
],
|
||||
llm=ollama_openhermes, # Ollama model passed here
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
[](https://www.youtube.com/watch?v=e0Uj4yWdaAg "Stock Analysis")
|
||||
|
||||
## Connecting Your Crew to a Model
|
||||
|
||||
crewAI supports using various LLMs through a variety of connection options. By default your agents will use the OpenAI API when querying the model. However, there are several other ways to allow your agents to connect to models. For example, you can configure your agents to use a local model via the Ollama tool.
|
||||
|
||||
Please refer to the [Connect crewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring you agents' connections to models.
|
||||
|
||||
## How CrewAI Compares
|
||||
|
||||
@@ -196,12 +197,14 @@ CrewAI is open-source and we welcome contributions. If you're looking to contrib
|
||||
- We appreciate your input!
|
||||
|
||||
### Installing Dependencies
|
||||
|
||||
```bash
|
||||
poetry lock
|
||||
poetry install
|
||||
```
|
||||
|
||||
### Virtual Env
|
||||
|
||||
```bash
|
||||
poetry shell
|
||||
```
|
||||
@@ -213,25 +216,64 @@ pre-commit install
|
||||
```
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
poetry run pytest
|
||||
```
|
||||
|
||||
### Running static type checks
|
||||
|
||||
```bash
|
||||
poetry run pyright
|
||||
```
|
||||
|
||||
### Packaging
|
||||
|
||||
```bash
|
||||
poetry build
|
||||
```
|
||||
|
||||
### Installing Locally
|
||||
|
||||
```bash
|
||||
pip install dist/*.tar.gz
|
||||
```
|
||||
|
||||
## Hire Consulting
|
||||
I, [@joaomdmoura](https://github.com/joaomdmoura) (creator or crewAI), offer consulting through my LLC ([AI Nest Labs](https://ainestlabs.com)).
|
||||
If you are interested on hiring weekly hours with me on a retainer, feel free to email me at [joao@ainestlabs.com](mailto:joao@ainestlabs.com)
|
||||
## Hire CrewAI
|
||||
|
||||
We're a company developing crewAI and crewAI Enterprise, we for a limited time are offer consulting with selected customers, to get them early access to our enterprise solution
|
||||
If you are interested on having access to it and hiring weekly hours with our team, feel free to email us at [joao@crewai.com](mailto:joao@crewai.com).
|
||||
|
||||
## Telemetry
|
||||
|
||||
CrewAI uses anonymous telemetry to collect usage data with the main purpose of helping us improve the library by focusing our efforts on the most used features, integrations and tools.
|
||||
|
||||
There is NO data being collected on the prompts, tasks descriptions agents backstories or goals nor tools usage, no API calls, nor responses nor any data that is being processed by the agents, nor any secrets and env vars.
|
||||
|
||||
Data collected includes:
|
||||
- Version of crewAI
|
||||
- So we can understand how many users are using the latest version
|
||||
- Version of Python
|
||||
- So we can decide on what versions to better support
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- So we know what OS we should focus on and if we could build specific OS related features
|
||||
- Number of agents and tasks in a crew
|
||||
- So we make sure we are testing internally with similar use cases and educate people on the best practices
|
||||
- Crew Process being used
|
||||
- Understand where we should focus our efforts
|
||||
- If Agents are using memory or allowing delegation
|
||||
- Understand if we improved the features or maybe even drop them
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Understand if we should focus more on parallel execution
|
||||
- Language model being used
|
||||
- Improved support on most used languages
|
||||
- Roles of agents in a crew
|
||||
- Understand high level use cases so we can build better tools, integrations and examples about it
|
||||
- Tools names available
|
||||
- Understand out of the publically available tools, which ones are being used the most so we can improve them
|
||||
|
||||
Users can opt-in sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews.
|
||||
|
||||
## License
|
||||
CrewAI is released under the MIT License
|
||||
|
||||
|
||||
CrewAI is released under the MIT License.
|
||||
|
||||
63
docs/core-concepts/Agents.md
Normal file
@@ -0,0 +1,63 @@
|
||||
---
|
||||
title: crewAI Agents
|
||||
description: What are crewAI Agents and how to use them.
|
||||
---
|
||||
|
||||
## What is an Agent?
|
||||
!!! note "What is an Agent?"
|
||||
An agent is an **autonomous unit** programmed to:
|
||||
<ul>
|
||||
<li class='leading-3'>Perform tasks</li>
|
||||
<li class='leading-3'>Make decisions</li>
|
||||
<li class='leading-3'>Communicate with other agents</li>
|
||||
<br/>
|
||||
Think of an agent as a member of a team, with specific skills and a particular job to do. Agents can have different roles like 'Researcher', 'Writer', or 'Customer Support', each contributing to the overall goal of the crew.
|
||||
|
||||
## Agent Attributes
|
||||
|
||||
| Attribute | Description |
|
||||
| :---------- | :----------------------------------- |
|
||||
| **Role** | Defines the agent's function within the crew. It determines the kind of tasks the agent is best suited for. |
|
||||
| **Goal** | The individual objective that the agent aims to achieve. It guides the agent's decision-making process. |
|
||||
| **Backstory** | Provides context to the agent's role and goal, enriching the interaction and collaboration dynamics. |
|
||||
| **LLM** | The language model used by the agent to process and generate text. |
|
||||
| **Tools** | Set of capabilities or functions that the agent can use to perform tasks. Tools can be shared or exclusive to specific agents. |
|
||||
| **Function Calling LLM** | The language model used by this agent to call functions, if none is passed the same main llm for each agent will be used. |
|
||||
| **Max Iter** | The maximum number of iterations the agent can perform before forced to give its best answer |
|
||||
| **Max RPM** | The maximum number of requests per minute the agent can perform to avoid rate limits |
|
||||
| **Verbose** | This allow you to actually see what is going on during the Crew execution. |
|
||||
| **Allow Delegation** | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. |
|
||||
| **Step Callback** | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback` |
|
||||
|
||||
## Creating an Agent
|
||||
|
||||
!!! note "Agent Interaction"
|
||||
Agents can interact with each other using the CrewAI's built-in delegation and communication mechanisms.<br/>This allows for dynamic task management and problem-solving within the crew.
|
||||
|
||||
To create an agent, you would typically initialize an instance of the `Agent` class with the desired properties. Here's a conceptual example:
|
||||
|
||||
```python
|
||||
# Example: Creating an agent with all attributes
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role='Data Analyst',
|
||||
goal='Extract actionable insights',
|
||||
backstory="""You're a data analyst at a large company.
|
||||
You're responsible for analyzing data and providing insights
|
||||
to the business.
|
||||
You're currently working on a project to analyze the
|
||||
performance of our marketing campaigns.""",
|
||||
tools=[my_tool1, my_tool2],
|
||||
llm=my_llm,
|
||||
function_calling_llm=my_llm,
|
||||
max_iter=10,
|
||||
max_rpm=10,
|
||||
verbose=True,
|
||||
allow_delegation=True,
|
||||
step_callback=my_intermediate_step_callback
|
||||
)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Agents are the building blocks of the CrewAI framework. By understanding how to define and interact with agents, you can create sophisticated AI systems that leverage the power of collaborative intelligence.
|
||||
24
docs/core-concepts/Collaboration.md
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
title: How Agents Collaborate in CrewAI
|
||||
description: Exploring the dynamics of agent collaboration within the CrewAI framework.
|
||||
---
|
||||
|
||||
## Collaboration Fundamentals
|
||||
!!! note "Core of Agent Interaction"
|
||||
Collaboration in CrewAI is fundamental, enabling agents to combine their skills, share information, and assist each other in task execution, embodying a truly cooperative ecosystem.
|
||||
|
||||
- **Information Sharing**: Ensures all agents are well-informed and can contribute effectively by sharing data and findings.
|
||||
- **Task Assistance**: Allows agents to seek help from peers with the required expertise for specific tasks.
|
||||
- **Resource Allocation**: Optimizes task execution through the efficient distribution and sharing of resources among agents.
|
||||
|
||||
## Delegation: Dividing to Conquer
|
||||
Delegation enhances functionality by allowing agents to intelligently assign tasks or seek help, thereby amplifying the crew's overall capability.
|
||||
|
||||
## Implementing Collaboration and Delegation
|
||||
Setting up a crew involves defining the roles and capabilities of each agent. CrewAI seamlessly manages their interactions, ensuring efficient collaboration and delegation.
|
||||
|
||||
## Example Scenario
|
||||
Imagine a crew with a researcher agent tasked with data gathering and a writer agent responsible for compiling reports. The writer can delegate research tasks or ask questions to the researcher, facilitating a seamless workflow.
|
||||
|
||||
## Conclusion
|
||||
Collaboration and delegation are pivotal, transforming individual AI agents into a coherent, intelligent crew capable of tackling complex tasks. CrewAI's framework not only simplifies these interactions but enhances their effectiveness, paving the way for sophisticated AI-driven solutions.
|
||||
81
docs/core-concepts/Crews.md
Normal file
@@ -0,0 +1,81 @@
|
||||
---
|
||||
title: crewAI Crews
|
||||
description: Understanding and utilizing crews in the crewAI framework.
|
||||
---
|
||||
|
||||
## What is a Crew?
|
||||
!!! note "Definition of a Crew"
|
||||
A crew in crewAI represents a collaborative group of agents working together to achieve a set of tasks. Each crew defines the strategy for task execution, agent collaboration, and the overall workflow.
|
||||
|
||||
## Crew Attributes
|
||||
|
||||
| Attribute | Description |
|
||||
| :------------------- | :----------------------------------------------------------- |
|
||||
| **Tasks** | A list of tasks assigned to the crew. |
|
||||
| **Agents** | A list of agents that are part of the crew. |
|
||||
| **Process** | The process flow (e.g., sequential, hierarchical) the crew follows. |
|
||||
| **Verbose** | The verbosity level for logging during execution. |
|
||||
| **Manager LLM** | The language model used by the manager agent in a hierarchical process. |
|
||||
| **Function Calling LLM** | The language model used by all agensts in the crew to call functions, if none is passed the same main llm for each agent will be used. |
|
||||
| **Config** | Configuration settings for the crew. |
|
||||
| **Max RPM** | Maximum requests per minute the crew adheres to during execution. |
|
||||
| **Language** | Language setting for the crew's operation. |
|
||||
| **Full Output** | Whether the crew should return the full output with all tasks outputs or just the final output. |
|
||||
| **Step Callback** | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations, it won't override the agent specific `step_callback` |
|
||||
| **Share Crew** | Whether you want to share the complete crew infromation and execution with the crewAI team to make the library better, and allow us to train models. |
|
||||
|
||||
|
||||
!!! note "Crew Max RPM"
|
||||
The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents `max_rpm` settings if you set it.
|
||||
|
||||
## Creating a Crew
|
||||
|
||||
!!! note "Crew Composition"
|
||||
When assembling a crew, you combine agents with complementary roles and tools, assign tasks, and select a process that dictates their execution order and interaction.
|
||||
|
||||
### Example: Assembling a Crew
|
||||
|
||||
```python
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
|
||||
# Define agents with specific roles and tools
|
||||
researcher = Agent(
|
||||
role='Senior Research Analyst',
|
||||
goal='Discover innovative AI technologies',
|
||||
tools=[DuckDuckGoSearchRun()]
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
role='Content Writer',
|
||||
goal='Write engaging articles on AI discoveries'
|
||||
)
|
||||
|
||||
# Create tasks for the agents
|
||||
research_task = Task(description='Identify breakthrough AI technologies', agent=researcher)
|
||||
write_article_task = Task(description='Draft an article on the latest AI technologies', agent=writer)
|
||||
|
||||
# Assemble the crew with a sequential process
|
||||
my_crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, write_article_task],
|
||||
process=Process.sequential,
|
||||
full_output=True,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
## Crew Execution Process
|
||||
|
||||
- **Sequential Process**: Tasks are executed one after another, allowing for a linear flow of work.
|
||||
- **Hierarchical Process**: A manager agent coordinates the crew, delegating tasks and validating outcomes before proceeding.
|
||||
|
||||
### Kicking Off a Crew
|
||||
|
||||
Once your crew is assembled, initiate the workflow with the `kickoff()` method. This starts the execution process according to the defined process flow.
|
||||
|
||||
```python
|
||||
# Start the crew's task execution
|
||||
result = my_crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
48
docs/core-concepts/Processes.md
Normal file
@@ -0,0 +1,48 @@
|
||||
---
|
||||
title: Managing Processes in CrewAI
|
||||
description: An overview of workflow management through processes in CrewAI.
|
||||
---
|
||||
|
||||
## Understanding Processes
|
||||
!!! note "Core Concept"
|
||||
Processes in CrewAI orchestrate how tasks are executed by agents, akin to project management in human teams. They ensure tasks are distributed and completed efficiently, according to a predefined game plan.
|
||||
|
||||
## Process Implementations
|
||||
|
||||
- **Sequential**: Executes tasks one after another, ensuring a linear and orderly progression.
|
||||
- **Hierarchical**: Implements a chain of command, where tasks are delegated and executed based on a managerial structure.
|
||||
- **Consensual (WIP)**: Future process type aiming for collaborative decision-making among agents on task execution.
|
||||
|
||||
## The Role of Processes in Teamwork
|
||||
Processes transform individual agents into a unified team, coordinating their efforts to achieve common goals with efficiency and harmony.
|
||||
|
||||
## Assigning Processes to a Crew
|
||||
Specify the process during crew creation to determine the execution strategy:
|
||||
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.process import Process
|
||||
|
||||
# Example: Creating a crew with a sequential process
|
||||
crew = Crew(agents=my_agents, tasks=my_tasks, process=Process.sequential)
|
||||
|
||||
# Example: Creating a crew with a hierarchical process
|
||||
crew = Crew(agents=my_agents, tasks=my_tasks, process=Process.hierarchical)
|
||||
```
|
||||
|
||||
## Sequential Process
|
||||
Ensures a natural flow of work, mirroring human team dynamics by progressing through tasks thoughtfully and systematically.
|
||||
|
||||
Tasks need to be pre-assigned to agents, and the order of execution is determined by the order of the tasks in the list.
|
||||
|
||||
Tasks are executed one after another, ensuring a linear and orderly progression and the output of one task is automatically used as context into the next task.
|
||||
|
||||
You can also define specific task's outputs that should be used as context for another task by using the `context` parameter in the `Task` class.
|
||||
|
||||
## Hierarchical Process
|
||||
Mimics a corporate hierarchy, where a manager oversees task execution, planning, delegation, and validation, enhancing task coordination.
|
||||
|
||||
In this process tasks don't need to be pre-assigned to agents, the manager will decide which agent will perform each task, review the output and decide if the task is completed or not.
|
||||
|
||||
## Conclusion
|
||||
Processes are vital for structured collaboration within CrewAI, enabling agents to work together systematically. Future updates will introduce new processes, further mimicking the adaptability and complexity of human teamwork.
|
||||
215
docs/core-concepts/Tasks.md
Normal file
@@ -0,0 +1,215 @@
|
||||
---
|
||||
title: crewAI Tasks
|
||||
description: Overview and management of tasks within the crewAI framework.
|
||||
---
|
||||
|
||||
## Overview of a Task
|
||||
!!! note "What is a Task?"
|
||||
In the CrewAI framework, tasks are individual assignments that agents complete. They encapsulate necessary information for execution, including a description, assigned agent, and required tools, offering flexibility for various action complexities.
|
||||
|
||||
Tasks in CrewAI can be designed to require collaboration between agents. For example, one agent might gather data while another analyzes it. This collaborative approach can be defined within the task properties and managed by the Crew's process.
|
||||
|
||||
## Task Attributes
|
||||
|
||||
| Attribute | Description |
|
||||
| :---------- | :----------------------------------- |
|
||||
| **Description** | A clear, concise statement of what the task entails. |
|
||||
| **Agent** | Optionally, you can specify which agent is responsible for the task. If not, the crew's process will determine who takes it on. |
|
||||
| **Expected Output** *(optional)* | Clear and detailed definition of expected output for the task. |
|
||||
| **Tools** *(optional)* | These are the functions or capabilities the agent can utilize to perform the task. They can be anything from simple actions like 'search' to more complex interactions with other agents or APIs. |
|
||||
| **Async Execution** *(optional)* | If the task should be executed asynchronously. |
|
||||
| **Context** *(optional)* | Other tasks that will have their output used as context for this task, if one is an asynchronous task it will wait for that to finish |
|
||||
| **Output JSON** *(optional)* | Takes a pydantic model and returns the output as a JSON object. **Agent LLM needs to be using OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
|
||||
| **Output Pydantic** *(optional)* | Takes a pydantic model and returns the output as a pydantic object. **Agent LLM needs to be using OpenAI client, could be Ollama for example but using the OpenAI wrapper** |
|
||||
| **Output File** *(optional)* | Takes a file path and saves the output of the task on it. |
|
||||
| **Callback** *(optional)* | A function to be executed after the task is completed. |
|
||||
|
||||
## Creating a Task
|
||||
|
||||
This is the simpliest example for creating a task, it involves defining its scope and agent, but there are optional attributes that can provide a lot of flexibility:
|
||||
|
||||
```python
|
||||
from crewai import Task
|
||||
|
||||
task = Task(
|
||||
description='Find and summarize the latest and most relevant news on AI',
|
||||
agent=sales_agent
|
||||
)
|
||||
```
|
||||
!!! note "Task Assignment"
|
||||
Tasks can be assigned directly by specifying an `agent` to them, or they can be assigned in run time if you are using the `hierarchical` through CrewAI's process, considering roles, availability, or other criteria.
|
||||
|
||||
## Integrating Tools with Tasks
|
||||
|
||||
Tools from the [crewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools) enhance task performance, allowing agents to interact more effectively with their environment. Assigning specific tools to tasks can tailor agent capabilities to particular needs.
|
||||
|
||||
## Creating a Task with Tools
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
|
||||
from crewai import Agent, Task, Crew
|
||||
from langchain.agents import Tool
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
|
||||
research_agent = Agent(
|
||||
role='Researcher',
|
||||
goal='Find and summarize the latest AI news',
|
||||
backstory="""You're a researcher at a large company.
|
||||
You're responsible for analyzing data and providing insights
|
||||
to the business."""
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Install duckduckgo-search for this example:
|
||||
# !pip install -U duckduckgo-search
|
||||
search_tool = DuckDuckGoSearchRun()
|
||||
|
||||
task = Task(
|
||||
description='Find and summarize the latest AI news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[research_agent],
|
||||
tasks=[task],
|
||||
verbose=2
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
|
||||
This demonstrates how tasks with specific tools can override an agent's default set for tailored task execution.
|
||||
|
||||
## Refering other Tasks
|
||||
|
||||
In crewAI the output of one task is automatically relayed into the next one, but you can specifically define what tasks output should be used as context for another task.
|
||||
|
||||
This is useful when you have a task that depends on the output of another task that is not performed immediately after it. This is done through the `context` attribute of the task:
|
||||
|
||||
```python
|
||||
# ...
|
||||
|
||||
research_task = Task(
|
||||
description='Find and summarize the latest AI news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
)
|
||||
|
||||
write_blog_task = Task(
|
||||
description="Write a full blog post about the importante of AI and it's latest news",
|
||||
expected_output='Full blog post that is 4 paragraphs long',
|
||||
agent=writer_agent,
|
||||
context=[research_task]
|
||||
)
|
||||
|
||||
#...
|
||||
```
|
||||
|
||||
## Asynchronous Execution
|
||||
|
||||
You can define a task to be executed asynchronously, this means that the crew will not wait for it to be completed to continue with the next task. This is useful for tasks that take a long time to be completed, or that are not crucial for the next tasks to be performed.
|
||||
|
||||
You can then use the `context` attribute to define in a future task that it should wait for the output of the asynchronous task to be completed.
|
||||
|
||||
```python
|
||||
#...
|
||||
|
||||
list_ideas = Task(
|
||||
description="List of 5 interesting ideas to explore for na article about AI.",
|
||||
expected_output="Bullet point list of 5 ideas for an article.",
|
||||
agent=researcher,
|
||||
async_execution=True # Will be executed asynchronously
|
||||
)
|
||||
|
||||
list_important_history = Task(
|
||||
description="Research the history of AI and give me the 5 most important events.",
|
||||
expected_output="Bullet point list of 5 important events.",
|
||||
agent=researcher,
|
||||
async_execution=True # Will be executed asynchronously
|
||||
)
|
||||
|
||||
write_article = Task(
|
||||
description="Write an article about AI, it's history and interesting ideas.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=writer,
|
||||
context=[list_ideas, list_important_history] # Will wait for the output of the two tasks to be completed
|
||||
)
|
||||
|
||||
#...
|
||||
```
|
||||
|
||||
## Callback Mechanism
|
||||
|
||||
You can define a callback function that will be executed after the task is completed. This is useful for tasks that need to trigger some side effect after they are completed, while the crew is still running.
|
||||
|
||||
```python
|
||||
# ...
|
||||
|
||||
def callback_function(output: TaskOutput):
|
||||
# Do something after the task is completed
|
||||
# Example: Send an email to the manager
|
||||
print(f"""
|
||||
Task completed!
|
||||
Task: {output.description}
|
||||
Output: {output.result}
|
||||
""")
|
||||
|
||||
research_task = Task(
|
||||
description='Find and summarize the latest AI news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
agent=research_agent,
|
||||
tools=[search_tool],
|
||||
callback=callback_function
|
||||
)
|
||||
|
||||
#...
|
||||
```
|
||||
|
||||
## Accessing a specific Task Output
|
||||
|
||||
Once a crew finishes running, you can access the output of a specific task by using the `output` attribute of the task object:
|
||||
|
||||
```python
|
||||
# ...
|
||||
task1 = Task(
|
||||
description='Find and summarize the latest AI news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
)
|
||||
|
||||
#...
|
||||
|
||||
crew = Crew(
|
||||
agents=[research_agent],
|
||||
tasks=[task1, task2, task3],
|
||||
verbose=2
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
# Returns a TaskOutput object with the description and results of the task
|
||||
print(f"""
|
||||
Task completed!
|
||||
Task: {task1.output.description}
|
||||
Output: {task1.output.result}
|
||||
""")
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
## Tool Override Mechanism
|
||||
|
||||
Specifying tools in a task allows for dynamic adaptation of agent capabilities, emphasizing CrewAI's flexibility.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Tasks are the driving force behind the actions of agents in crewAI. By properly defining tasks and their outcomes, you set the stage for your AI agents to work effectively, either independently or as a collaborative unit.
|
||||
Equipping tasks with appropriate tools is crucial for maximizing CrewAI's potential, ensuring agents are effectively prepared for their assignments.
|
||||
83
docs/core-concepts/Tools.md
Normal file
@@ -0,0 +1,83 @@
|
||||
---
|
||||
title: crewAI Tools
|
||||
description: Understanding and leveraging tools within the crewAI framework.
|
||||
---
|
||||
|
||||
## What is a Tool?
|
||||
!!! note "Definition"
|
||||
A tool in CrewAI, is a skill, something Agents can use perform tasks, right now those can be tools from the [crewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools), those are basically functions that an agent can utilize for various actions, from simple searches to complex interactions with external systems.
|
||||
|
||||
## Key Characteristics of Tools
|
||||
|
||||
- **Utility**: Designed for specific tasks such as web searching, data analysis, or content generation.
|
||||
- **Integration**: Enhance agent capabilities by integrating tools directly into their workflow.
|
||||
- **Customizability**: Offers the flexibility to develop custom tools or use existing ones from LangChain's ecosystem.
|
||||
|
||||
## Creating your own Tools
|
||||
!!! example "Custom Tool Creation"
|
||||
Developers can craft custom tools tailored for their agent’s needs or utilize pre-built options. Here’s how to create one:
|
||||
|
||||
```python
|
||||
import json
|
||||
import requests
|
||||
from crewai import Agent
|
||||
from langchain.tools import tool
|
||||
from unstructured.partition.html import partition_html
|
||||
|
||||
class BrowserTools():
|
||||
|
||||
# Anotate the fuction with the tool decorator from LangChain
|
||||
@tool("Scrape website content")
|
||||
def scrape_website(website):
|
||||
# Write logic for the tool.
|
||||
# In this case a function to scrape website content
|
||||
url = f"https://chrome.browserless.io/content?token={config('BROWSERLESS_API_KEY')}"
|
||||
payload = json.dumps({"url": website})
|
||||
headers = {'cache-control': 'no-cache', 'content-type': 'application/json'}
|
||||
response = requests.request("POST", url, headers=headers, data=payload)
|
||||
elements = partition_html(text=response.text)
|
||||
content = "\n\n".join([str(el) for el in elements])
|
||||
return content[:5000]
|
||||
|
||||
# Assign the scraping tool to an agent
|
||||
agent = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[BrowserTools().scrape_website]
|
||||
)
|
||||
```
|
||||
|
||||
## Using LangChain Tools
|
||||
!!! info "LangChain Integration"
|
||||
CrewAI seamlessly integrates with LangChain’s comprehensive toolkit. Assigning an existing tool to an agent is straightforward:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from langchain.agents import Tool
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
import os
|
||||
|
||||
# Setup API keys
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
os.environ["SERPER_API_KEY"] = "Your Key"
|
||||
|
||||
search = GoogleSerperAPIWrapper()
|
||||
|
||||
# Create and assign the search tool to an agent
|
||||
serper_tool = Tool(
|
||||
name="Intermediate Answer",
|
||||
func=search.run,
|
||||
description="Useful for search-based queries",
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[serper_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Tools are crucial for extending the capabilities of CrewAI agents, allowing them to undertake a diverse array of tasks and collaborate efficiently. When building your AI solutions with CrewAI, consider both custom and existing tools to empower your agents and foster a dynamic AI ecosystem.
|
||||
BIN
docs/crew_only_logo.png
Normal file
|
After Width: | Height: | Size: 94 KiB |
112
docs/how-to/Creating-a-Crew-and-kick-it-off.md
Normal file
@@ -0,0 +1,112 @@
|
||||
---
|
||||
title: Assembling and Activating Your CrewAI Team
|
||||
description: A step-by-step guide to creating a cohesive CrewAI team for your projects.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
Embarking on your CrewAI journey involves a few straightforward steps to set up your environment and initiate your AI crew. This guide ensures a seamless start.
|
||||
|
||||
## Step 0: Installation
|
||||
Begin by installing CrewAI and any additional packages required for your project. For instance, the `duckduckgo-search` package is used in this example for enhanced search capabilities.
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
pip install duckduckgo-search
|
||||
```
|
||||
|
||||
## Step 1: Assemble Your Agents
|
||||
Begin by defining your agents with distinct roles and backstories. These elements not only add depth but also guide their task execution and interaction within the crew.
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
|
||||
from crewai import Agent
|
||||
|
||||
# Topic that will be used in the crew run
|
||||
topic = 'AI in healthcare'
|
||||
|
||||
# Creating a senior researcher agent
|
||||
researcher = Agent(
|
||||
role='Senior Researcher',
|
||||
goal=f'Uncover groundbreaking technologies around {topic}',
|
||||
verbose=True,
|
||||
backstory="""Driven by curiosity, you're at the forefront of
|
||||
innovation, eager to explore and share knowledge that could change
|
||||
the world."""
|
||||
)
|
||||
|
||||
# Creating a writer agent
|
||||
writer = Agent(
|
||||
role='Writer',
|
||||
goal=f'Narrate compelling tech stories around {topic}',
|
||||
verbose=True,
|
||||
backstory="""With a flair for simplifying complex topics, you craft
|
||||
engaging narratives that captivate and educate, bringing new
|
||||
discoveries to light in an accessible manner."""
|
||||
)
|
||||
```
|
||||
|
||||
## Step 2: Define the Tasks
|
||||
Detail the specific objectives for your agents. These tasks guide their focus and ensure a targeted approach to their roles.
|
||||
|
||||
```python
|
||||
from crewai import Task
|
||||
|
||||
# Install duckduckgo-search for this example:
|
||||
# !pip install -U duckduckgo-search
|
||||
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
search_tool = DuckDuckGoSearchRun()
|
||||
|
||||
# Research task for identifying AI trends
|
||||
research_task = Task(
|
||||
description=f"""Identify the next big trend in {topic}.
|
||||
Focus on identifying pros and cons and the overall narrative.
|
||||
|
||||
Your final report should clearly articulate the key points,
|
||||
its market opportunities, and potential risks.
|
||||
""",
|
||||
expected_output='A comprehensive 3 paragraphs long report on the latest AI trends.',
|
||||
max_inter=3,
|
||||
tools=[search_tool],
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
# Writing task based on research findings
|
||||
write_task = Task(
|
||||
description=f"""Compose an insightful article on {topic}.
|
||||
Focus on the latest trends and how it's impacting the industry.
|
||||
This article should be easy to understand, engaging and positive.
|
||||
""",
|
||||
expected_output=f'A 4 paragraph article on {topic} advancements.',
|
||||
tools=[search_tool],
|
||||
agent=writer
|
||||
)
|
||||
```
|
||||
|
||||
## Step 3: Form the Crew
|
||||
Combine your agents into a crew, setting the workflow process they'll follow to accomplish the tasks.
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process
|
||||
|
||||
# Forming the tech-focused crew
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, write_task],
|
||||
process=Process.sequential # Sequential task execution
|
||||
)
|
||||
```
|
||||
|
||||
## Step 4: Kick It Off
|
||||
With your crew ready and the stage set, initiate the process. Watch as your agents collaborate, each contributing their expertise to achieve the collective goal.
|
||||
|
||||
```python
|
||||
# Starting the task execution process
|
||||
result = crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Building and activating a crew in CrewAI is a seamless process. By carefully assigning roles, tasks, and a clear process, your AI team is equipped to tackle challenges efficiently. The depth of agent backstories and the precision of their objectives enrich the collaboration, leading to successful project outcomes.
|
||||
65
docs/how-to/Customizing-Agents.md
Normal file
@@ -0,0 +1,65 @@
|
||||
---
|
||||
title: Customizing Agents in CrewAI
|
||||
description: A guide to tailoring agents for specific roles and tasks within the CrewAI framework.
|
||||
---
|
||||
|
||||
## Customizable Attributes
|
||||
Tailoring your AI agents is pivotal in crafting an efficient CrewAI team. Customization allows agents to be dynamically adapted to the unique requirements of any project.
|
||||
|
||||
### Key Attributes for Customization
|
||||
- **Role**: Defines the agent's job within the crew, such as 'Analyst' or 'Customer Service Rep'.
|
||||
- **Goal**: The agent's objective, aligned with its role and the crew's overall goals.
|
||||
- **Backstory**: Adds depth to the agent's character, enhancing its role and motivations within the crew.
|
||||
- **Tools**: The capabilities or methods the agent employs to accomplish tasks, ranging from simple functions to complex integrations.
|
||||
|
||||
## Understanding Tools in CrewAI
|
||||
Tools empower agents with functionalities to interact and manipulate their environment, from generic utilities to specialized functions. Integrating with LangChain offers access to a broad range of tools for diverse tasks.
|
||||
|
||||
## Customizing Agents and Tools
|
||||
Agents are customized by defining their attributes during initialization, with tools being a critical aspect of their functionality.
|
||||
|
||||
### Example: Assigning Tools to an Agent
|
||||
```python
|
||||
from crewai import Agent
|
||||
from langchain.agents import Tool
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
import os
|
||||
|
||||
# Set API keys for tool initialization
|
||||
os.environ["OPENAI_API_KEY"] = "Your Key"
|
||||
os.environ["SERPER_API_KEY"] = "Your Key"
|
||||
|
||||
# Initialize a search tool
|
||||
search_tool = GoogleSerperAPIWrapper()
|
||||
|
||||
# Define and assign the tool to an agent
|
||||
serper_tool = Tool(
|
||||
name="Intermediate Answer",
|
||||
func=search_tool.run,
|
||||
description="Useful for search-based queries"
|
||||
)
|
||||
|
||||
# Initialize the agent with the tool
|
||||
agent = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[serper_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Delegation and Autonomy
|
||||
Agents in CrewAI can delegate tasks or ask questions, enhancing the crew's collaborative dynamics. This feature can be disabled to ensure straightforward task execution.
|
||||
|
||||
### Example: Disabling Delegation for an Agent
|
||||
```python
|
||||
agent = Agent(
|
||||
role='Content Writer',
|
||||
goal='Write engaging content on market trends',
|
||||
backstory='A seasoned writer with expertise in market analysis.',
|
||||
allow_delegation=False
|
||||
)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Customizing agents is key to leveraging the full potential of CrewAI. By thoughtfully setting agents' roles, goals, backstories, and tools, you craft a nuanced and capable AI team ready to tackle complex challenges.
|
||||
60
docs/how-to/Hierarchical.md
Normal file
@@ -0,0 +1,60 @@
|
||||
---
|
||||
title: Implementing the Hierarchical Process in CrewAI
|
||||
description: Understanding and applying the hierarchical process within your CrewAI projects.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
The hierarchical process in CrewAI introduces a structured approach to task management, mimicking traditional organizational hierarchies for efficient task delegation and execution.
|
||||
|
||||
!!! note "Complexity"
|
||||
The current implementation of the hierarchical process relies on tools usage that usually require more complex models like GPT-4 and usually imply of a higher token usage.
|
||||
|
||||
## Hierarchical Process Overview
|
||||
In this process, tasks are assigned and executed based on a defined hierarchy, where a 'manager' agent coordinates the workflow, delegating tasks to other agents and validating their outcomes before proceeding.
|
||||
|
||||
### Key Features
|
||||
- **Task Delegation**: A manager agent oversees task distribution among crew members.
|
||||
- **Result Validation**: The manager reviews outcomes before passing tasks along, ensuring quality and relevance.
|
||||
- **Efficient Workflow**: Mimics corporate structures for a familiar and organized task management approach.
|
||||
|
||||
## Implementing the Hierarchical Process
|
||||
To utilize the hierarchical process, you must define a crew with a designated manager and a clear chain of command for task execution.
|
||||
|
||||
!!! note "Tools on the hierarchical process"
|
||||
For tools when using the hierarchical process, you want to make sure to assign them to the agents instead of the tasks, as the manager will be the one delegating the tasks and the agents will be the ones executing them.
|
||||
|
||||
!!! note "Manager LLM"
|
||||
A manager will be automatically set for the crew, you don't need to define it. You do need to set the `manager_llm` parameter in the crew though.
|
||||
|
||||
```python
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai import Crew, Process, Agent
|
||||
|
||||
# Define your agents, no need to define a manager
|
||||
researcher = Agent(
|
||||
role='Researcher',
|
||||
goal='Conduct in-depth analysis',
|
||||
# tools = [...]
|
||||
)
|
||||
writer = Agent(
|
||||
role='Writer',
|
||||
goal='Create engaging content',
|
||||
# tools = [...]
|
||||
)
|
||||
|
||||
# Form the crew with a hierarchical process
|
||||
project_crew = Crew(
|
||||
tasks=[...], # Tasks that that manager will figure out how to complete
|
||||
agents=[researcher, writer],
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"), # The manager's LLM that will be used internally
|
||||
process=Process.hierarchical # Designating the hierarchical approach
|
||||
)
|
||||
```
|
||||
|
||||
### Workflow in Action
|
||||
1. **Task Assignment**: The manager assigns tasks based on agent roles and capabilities.
|
||||
2. **Execution and Review**: Agents perform their tasks, with the manager reviewing outcomes for approval.
|
||||
3. **Sequential Task Progression**: Tasks are completed in a sequence dictated by the manager, ensuring orderly progression.
|
||||
|
||||
## Conclusion
|
||||
The hierarchical process in CrewAI offers a familiar, structured way to manage tasks within a project. By leveraging a chain of command, it enhances efficiency and quality control, making it ideal for complex projects requiring meticulous oversight.
|
||||
76
docs/how-to/Human-Input-on-Execution.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Human Input on Execution
|
||||
|
||||
Human inputs is important in many agent execution use cases, humans are AGI so they can can be prompted to step in and provide extra details ins necessary.
|
||||
Using it with crewAI is pretty straightforward and you can do it through a LangChain Tool.
|
||||
Check [LangChain Integration](https://python.langchain.com/docs/integrations/tools/human_tools) for more details:
|
||||
|
||||
Example:
|
||||
|
||||
```python
|
||||
import os
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
from langchain_community.tools import DuckDuckGoSearchRun
|
||||
from langchain.agents import load_tools
|
||||
|
||||
search_tool = DuckDuckGoSearchRun()
|
||||
|
||||
# Loading Human Tools
|
||||
human_tools = load_tools(["human"])
|
||||
|
||||
# Define your agents with roles and goals
|
||||
researcher = Agent(
|
||||
role='Senior Research Analyst',
|
||||
goal='Uncover cutting-edge developments in AI and data science in',
|
||||
backstory="""You are a Senior Research Analyst at a leading tech think tank.
|
||||
Your expertise lies in identifying emerging trends and technologies in AI and
|
||||
data science. You have a knack for dissecting complex data and presenting
|
||||
actionable insights.""",
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
# Passing human tools to the agent
|
||||
tools=[search_tool]+human_tools
|
||||
)
|
||||
writer = Agent(
|
||||
role='Tech Content Strategist',
|
||||
goal='Craft compelling content on tech advancements',
|
||||
backstory="""You are a renowned Tech Content Strategist, known for your insightful
|
||||
and engaging articles on technology and innovation. With a deep understanding of
|
||||
the tech industry, you transform complex concepts into compelling narratives.""",
|
||||
verbose=True,
|
||||
allow_delegation=True
|
||||
)
|
||||
|
||||
# Create tasks for your agents
|
||||
# Being explicit on the task to ask for human feedback.
|
||||
task1 = Task(
|
||||
description="""Conduct a comprehensive analysis of the latest advancements in AI in 2024.
|
||||
Identify key trends, breakthrough technologies, and potential industry impacts.
|
||||
Compile your findings in a detailed report.
|
||||
Make sure to check with the human if the draft is good before returning your Final Answer.
|
||||
Your final answer MUST be a full analysis report""",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
task2 = Task(
|
||||
description="""Using the insights from the researcher's report, develop an engaging blog
|
||||
post that highlights the most significant AI advancements.
|
||||
Your post should be informative yet accessible, catering to a tech-savvy audience.
|
||||
Aim for a narrative that captures the essence of these breakthroughs and their
|
||||
implications for the future.
|
||||
Your final answer MUST be the full blog post of at least 3 paragraphs.""",
|
||||
agent=writer
|
||||
)
|
||||
|
||||
# Instantiate your crew with a sequential process
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2],
|
||||
verbose=2
|
||||
)
|
||||
|
||||
# Get your crew to work!
|
||||
result = crew.kickoff()
|
||||
|
||||
print("######################")
|
||||
print(result)
|
||||
```
|
||||
110
docs/how-to/LLM-Connections.md
Normal file
@@ -0,0 +1,110 @@
|
||||
---
|
||||
title: Connect CrewAI to LLMs
|
||||
description: Guide on integrating CrewAI with various Large Language Models (LLMs).
|
||||
---
|
||||
|
||||
## Connect CrewAI to LLMs
|
||||
!!! note "Default LLM"
|
||||
By default, crewAI uses OpenAI's GPT-4 model for language processing. However, you can configure your agents to use a different model or API. This guide will show you how to connect your agents to different LLMs. You can change the specific gpt model by setting the `OPENAI_MODEL_NAME` environment variable.
|
||||
|
||||
CrewAI offers flexibility in connecting to various LLMs, including local models via [Ollama](https://ollama.ai) and different APIs like Azure. It's compatible with all [LangChain LLM](https://python.langchain.com/docs/integrations/llms/) components, enabling diverse integrations for tailored AI solutions.
|
||||
|
||||
|
||||
## Ollama Integration
|
||||
Ollama is preferred for local LLM integration, offering customization and privacy benefits. It requires installation and configuration, including model adjustments via a Modelfile to optimize performance.
|
||||
|
||||
### Setting Up Ollama
|
||||
- **Installation**: Follow Ollama's guide for setup.
|
||||
- **Configuration**: [Adjust your local model with a Modelfile](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md), considering adding `Result` as a stop word and playing with parameters like `top_p` and `temperature`.
|
||||
|
||||
### Integrating Ollama with CrewAI
|
||||
Instantiate Ollama and pass it to your agents within CrewAI, enhancing them with the local model's capabilities.
|
||||
|
||||
```python
|
||||
# Required
|
||||
os.environ["OPENAI_API_BASE"]='http://localhost:11434/v1'
|
||||
os.environ["OPENAI_MODEL_NAME"]='openhermes'
|
||||
os.environ["OPENAI_API_KEY"]=''
|
||||
|
||||
local_expert = Agent(
|
||||
role='Local Expert',
|
||||
goal='Provide insights about the city',
|
||||
backstory="A knowledgeable local guide.",
|
||||
tools=[SearchTools.search_internet, BrowserTools.scrape_and_summarize_website],
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
## OpenAI Compatible API Endpoints
|
||||
You can use environment variables for easy switch between APIs and models, supporting diverse platforms like FastChat, LM Studio, and Mistral AI.
|
||||
|
||||
### Configuration Examples
|
||||
|
||||
### Ollama
|
||||
```sh
|
||||
OPENAI_API_BASE='http://localhost:11434/v1'
|
||||
OPENAI_MODEL_NAME='openhermes' # Depending on the model you have available
|
||||
OPENAI_API_KEY=NA
|
||||
```
|
||||
|
||||
### FastChat
|
||||
```sh
|
||||
|
||||
OPENAI_API_BASE="http://localhost:8001/v1"
|
||||
OPENAI_MODEL_NAME='oh-2.5m7b-q51' # Depending on the model you have available
|
||||
OPENAI_API_KEY=NA
|
||||
```
|
||||
|
||||
### LM Studio
|
||||
```sh
|
||||
OPENAI_API_BASE="http://localhost:8000/v1"
|
||||
OPENAI_MODEL_NAME=NA
|
||||
OPENAI_API_KEY=NA
|
||||
```
|
||||
|
||||
### Mistral API
|
||||
```sh
|
||||
OPENAI_API_KEY=your-mistral-api-key
|
||||
OPENAI_API_BASE=https://api.mistral.ai/v1
|
||||
OPENAI_MODEL_NAME="mistral-small" # Check documentation for available models
|
||||
```
|
||||
|
||||
### text-gen-web-ui
|
||||
```sh
|
||||
OPENAI_API_BASE=http://localhost:5000/v1
|
||||
OPENAI_MODEL_NAME=NA
|
||||
OPENAI_API_KEY=NA
|
||||
```
|
||||
|
||||
### Azure Open AI
|
||||
Azure's OpenAI API needs a distinct setup, utilizing the `langchain_openai` component for Azure-specific configurations.
|
||||
|
||||
Configuration settings:
|
||||
```sh
|
||||
AZURE_OPENAI_VERSION="2022-12-01"
|
||||
AZURE_OPENAI_DEPLOYMENT=""
|
||||
AZURE_OPENAI_ENDPOINT=""
|
||||
AZURE_OPENAI_KEY=""
|
||||
```
|
||||
|
||||
```python
|
||||
from dotenv import load_dotenv
|
||||
from langchain_openai import AzureChatOpenAI
|
||||
|
||||
load_dotenv()
|
||||
|
||||
default_llm = AzureChatOpenAI(
|
||||
azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
|
||||
api_key=os.environ.get("AZURE_OPENAI_KEY")
|
||||
)
|
||||
|
||||
example_agent = Agent(
|
||||
role='Example Agent',
|
||||
goal='Demonstrate custom LLM configuration',
|
||||
backstory='A diligent explorer of GitHub docs.',
|
||||
llm=default_llm
|
||||
)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Integrating CrewAI with different LLMs expands the framework's versatility, allowing for customized, efficient AI solutions across various domains and platforms.
|
||||
50
docs/how-to/Sequential.md
Normal file
@@ -0,0 +1,50 @@
|
||||
---
|
||||
title: Implementing the Sequential Process in CrewAI
|
||||
description: A guide to utilizing the sequential process for task execution in CrewAI projects.
|
||||
---
|
||||
|
||||
## Introduction
|
||||
The sequential process in CrewAI ensures tasks are executed one after the other, following a linear progression. This approach is akin to a relay race, where each agent completes their task before passing the baton to the next.
|
||||
|
||||
## Sequential Process Overview
|
||||
This process is straightforward and effective, particularly for projects where tasks must be completed in a specific order to achieve the desired outcome.
|
||||
|
||||
### Key Features
|
||||
- **Linear Task Flow**: Tasks are handled in a predetermined sequence, ensuring orderly progression.
|
||||
- **Simplicity**: Ideal for projects with clearly defined, step-by-step tasks.
|
||||
- **Easy Monitoring**: Task completion can be easily tracked, offering clear insights into project progress.
|
||||
|
||||
## Implementing the Sequential Process
|
||||
To apply the sequential process, assemble your crew and define the tasks in the order they need to be executed.
|
||||
|
||||
!!! note "Task assignment"
|
||||
In the sequential process you need to make sure all tasks are assigned to the agents, as the agents will be the ones executing them.
|
||||
|
||||
```python
|
||||
from crewai import Crew, Process, Agent, Task
|
||||
|
||||
# Define your agents
|
||||
researcher = Agent(role='Researcher', goal='Conduct foundational research')
|
||||
analyst = Agent(role='Data Analyst', goal='Analyze research findings')
|
||||
writer = Agent(role='Writer', goal='Draft the final report')
|
||||
|
||||
# Define the tasks in sequence
|
||||
research_task = Task(description='Gather relevant data', agent=researcher)
|
||||
analysis_task = Task(description='Analyze the data', agent=analyst)
|
||||
writing_task = Task(description='Compose the report', agent=writer)
|
||||
|
||||
# Form the crew with a sequential process
|
||||
report_crew = Crew(
|
||||
agents=[researcher, analyst, writer],
|
||||
tasks=[research_task, analysis_task, writing_task],
|
||||
process=Process.sequential
|
||||
)
|
||||
```
|
||||
|
||||
### Workflow in Action
|
||||
1. **Initial Task**: The first agent completes their task and signals completion.
|
||||
2. **Subsequent Tasks**: Following agents pick up their tasks in the order defined, using the outcomes of preceding tasks as inputs.
|
||||
3. **Completion**: The process concludes once the final task is executed, culminating in the project's completion.
|
||||
|
||||
## Conclusion
|
||||
The sequential process in CrewAI provides a clear, straightforward path for task execution. It's particularly suited for projects requiring a logical progression of tasks, ensuring each step is completed before the next begins, thereby facilitating a cohesive final product.
|
||||
117
docs/index.md
@@ -1,8 +1,113 @@
|
||||
<img src='./crewai_logo.png' width='250'/>
|
||||
<img src='./crew_only_logo.png' width='250' class='mb-10'/>
|
||||
|
||||
# Welcome to crewAI Documentation
|
||||
🤖 Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks.
|
||||
# crewAI Documentation
|
||||
|
||||
<p align="center">
|
||||
<img src='./crewAI-mindmap.png' />
|
||||
</p>
|
||||
Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks.
|
||||
|
||||
<div style="display:flex; margin:0 auto; justify-content: center;">
|
||||
<div style="width:25%">
|
||||
<h2>Core Concepts</h2>
|
||||
<ul>
|
||||
<li>
|
||||
<a href="./core-concepts/Agents">
|
||||
Agents
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./core-concepts/Tasks">
|
||||
Tasks
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./core-concepts/Tools">
|
||||
Tools
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./core-concepts/Processes">
|
||||
Processes
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./core-concepts/Crews">
|
||||
Crews
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div style="width:30%">
|
||||
<h2>How-To Guides</h2>
|
||||
<ul>
|
||||
<li>
|
||||
<a href="./how-to/Creating-a-Crew-and-kick-it-off">
|
||||
Getting Started
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/how-to/Sequential">
|
||||
Using Sequential Process
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Hierarchical">
|
||||
Using Hierarchical Process
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/LLM-Connections">
|
||||
Connecting to LLMs
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Customizing-Agents">
|
||||
Customizing Agents
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a href="./how-to/Human-Input-on-Execution">
|
||||
Human Input on Execution
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div style="width:30%">
|
||||
<h2>Examples</h2>
|
||||
<ul>
|
||||
<li>
|
||||
<a target='_blank' href="https://github.com/joaomdmoura/crewAI-examples/tree/main/prep-for-a-meeting">
|
||||
Prepare for meetings
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target='_blank' href="https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner">
|
||||
Trip Planner Crew
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target='_blank' href="https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post">
|
||||
Create Instagram Post
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target='_blank' href="https://github.com/joaomdmoura/crewAI-examples/tree/main/stock_analysis">
|
||||
Stock Analysis
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target='_blank' href="https://github.com/joaomdmoura/crewAI-examples/tree/main/game-builder-crew">
|
||||
Game Generator
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target='_blank' href="https://github.com/joaomdmoura/crewAI-examples/tree/main/CrewAI-LangGraph">
|
||||
Drafting emails with LangGraph
|
||||
</a>
|
||||
</li>
|
||||
<li>
|
||||
<a target='_blank' href="https://github.com/joaomdmoura/crewAI-examples/tree/main/landing_page_generator">
|
||||
Landing Page Generator
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
3
docs/postcss.config.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = {
|
||||
plugins: [require('tailwindcss'), require('autoprefixer')]
|
||||
}
|
||||
3
docs/stylesheets/extra.css
Normal file
@@ -0,0 +1,3 @@
|
||||
.md-typeset .admonition-title {
|
||||
margin-bottom: 10px;
|
||||
}
|
||||
565
docs/stylesheets/output.css
Normal file
@@ -0,0 +1,565 @@
|
||||
/*
|
||||
! tailwindcss v3.4.1 | MIT License | https://tailwindcss.com
|
||||
*/
|
||||
|
||||
/*
|
||||
1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4)
|
||||
2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116)
|
||||
*/
|
||||
|
||||
*,
|
||||
::before,
|
||||
::after {
|
||||
box-sizing: border-box;
|
||||
/* 1 */
|
||||
border-width: 0;
|
||||
/* 2 */
|
||||
border-style: solid;
|
||||
/* 2 */
|
||||
border-color: #e5e7eb;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
::before,
|
||||
::after {
|
||||
--tw-content: '';
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use a consistent sensible line-height in all browsers.
|
||||
2. Prevent adjustments of font size after orientation changes in iOS.
|
||||
3. Use a more readable tab size.
|
||||
4. Use the user's configured `sans` font-family by default.
|
||||
5. Use the user's configured `sans` font-feature-settings by default.
|
||||
6. Use the user's configured `sans` font-variation-settings by default.
|
||||
7. Disable tap highlights on iOS
|
||||
*/
|
||||
|
||||
html,
|
||||
:host {
|
||||
line-height: 1.5;
|
||||
/* 1 */
|
||||
-webkit-text-size-adjust: 100%;
|
||||
/* 2 */
|
||||
-moz-tab-size: 4;
|
||||
/* 3 */
|
||||
-o-tab-size: 4;
|
||||
tab-size: 4;
|
||||
/* 3 */
|
||||
font-family: ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
/* 4 */
|
||||
font-feature-settings: normal;
|
||||
/* 5 */
|
||||
font-variation-settings: normal;
|
||||
/* 6 */
|
||||
-webkit-tap-highlight-color: transparent;
|
||||
/* 7 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove the margin in all browsers.
|
||||
2. Inherit line-height from `html` so users can set them as a class directly on the `html` element.
|
||||
*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Add the correct height in Firefox.
|
||||
2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655)
|
||||
3. Ensure horizontal rules are visible by default.
|
||||
*/
|
||||
|
||||
hr {
|
||||
height: 0;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 2 */
|
||||
border-top-width: 1px;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct text decoration in Chrome, Edge, and Safari.
|
||||
*/
|
||||
|
||||
abbr:where([title]) {
|
||||
-webkit-text-decoration: underline dotted;
|
||||
text-decoration: underline dotted;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the default font size and weight for headings.
|
||||
*/
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6 {
|
||||
font-size: inherit;
|
||||
font-weight: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset links to optimize for opt-in styling instead of opt-out.
|
||||
*/
|
||||
|
||||
a {
|
||||
color: inherit;
|
||||
text-decoration: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font weight in Edge and Safari.
|
||||
*/
|
||||
|
||||
b,
|
||||
strong {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use the user's configured `mono` font-family by default.
|
||||
2. Use the user's configured `mono` font-feature-settings by default.
|
||||
3. Use the user's configured `mono` font-variation-settings by default.
|
||||
4. Correct the odd `em` font sizing in all browsers.
|
||||
*/
|
||||
|
||||
code,
|
||||
kbd,
|
||||
samp,
|
||||
pre {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
/* 1 */
|
||||
font-feature-settings: normal;
|
||||
/* 2 */
|
||||
font-variation-settings: normal;
|
||||
/* 3 */
|
||||
font-size: 1em;
|
||||
/* 4 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font size in all browsers.
|
||||
*/
|
||||
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent `sub` and `sup` elements from affecting the line height in all browsers.
|
||||
*/
|
||||
|
||||
sub,
|
||||
sup {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
|
||||
sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297)
|
||||
2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016)
|
||||
3. Remove gaps between table borders by default.
|
||||
*/
|
||||
|
||||
table {
|
||||
text-indent: 0;
|
||||
/* 1 */
|
||||
border-color: inherit;
|
||||
/* 2 */
|
||||
border-collapse: collapse;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Change the font styles in all browsers.
|
||||
2. Remove the margin in Firefox and Safari.
|
||||
3. Remove default padding in all browsers.
|
||||
*/
|
||||
|
||||
button,
|
||||
input,
|
||||
optgroup,
|
||||
select,
|
||||
textarea {
|
||||
font-family: inherit;
|
||||
/* 1 */
|
||||
font-feature-settings: inherit;
|
||||
/* 1 */
|
||||
font-variation-settings: inherit;
|
||||
/* 1 */
|
||||
font-size: 100%;
|
||||
/* 1 */
|
||||
font-weight: inherit;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 1 */
|
||||
margin: 0;
|
||||
/* 2 */
|
||||
padding: 0;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inheritance of text transform in Edge and Firefox.
|
||||
*/
|
||||
|
||||
button,
|
||||
select {
|
||||
text-transform: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Remove default button styles.
|
||||
*/
|
||||
|
||||
button,
|
||||
[type='button'],
|
||||
[type='reset'],
|
||||
[type='submit'] {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
background-color: transparent;
|
||||
/* 2 */
|
||||
background-image: none;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Use the modern Firefox focus style for all focusable elements.
|
||||
*/
|
||||
|
||||
:-moz-focusring {
|
||||
outline: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737)
|
||||
*/
|
||||
|
||||
:-moz-ui-invalid {
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct vertical alignment in Chrome and Firefox.
|
||||
*/
|
||||
|
||||
progress {
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
/*
|
||||
Correct the cursor style of increment and decrement buttons in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-inner-spin-button,
|
||||
::-webkit-outer-spin-button {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the odd appearance in Chrome and Safari.
|
||||
2. Correct the outline style in Safari.
|
||||
*/
|
||||
|
||||
[type='search'] {
|
||||
-webkit-appearance: textfield;
|
||||
/* 1 */
|
||||
outline-offset: -2px;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inner padding in Chrome and Safari on macOS.
|
||||
*/
|
||||
|
||||
::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Change font properties to `inherit` in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-file-upload-button {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
font: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct display in Chrome and Safari.
|
||||
*/
|
||||
|
||||
summary {
|
||||
display: list-item;
|
||||
}
|
||||
|
||||
/*
|
||||
Removes the default spacing and border for appropriate elements.
|
||||
*/
|
||||
|
||||
blockquote,
|
||||
dl,
|
||||
dd,
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6,
|
||||
hr,
|
||||
figure,
|
||||
p,
|
||||
pre {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
fieldset {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
legend {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ol,
|
||||
ul,
|
||||
menu {
|
||||
list-style: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset default styling for dialogs.
|
||||
*/
|
||||
|
||||
dialog {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent resizing textareas horizontally by default.
|
||||
*/
|
||||
|
||||
textarea {
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300)
|
||||
2. Set the default placeholder color to the user's configured gray 400 color.
|
||||
*/
|
||||
|
||||
input::-moz-placeholder, textarea::-moz-placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
input::placeholder,
|
||||
textarea::placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Set the default cursor for buttons.
|
||||
*/
|
||||
|
||||
button,
|
||||
[role="button"] {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/*
|
||||
Make sure disabled buttons don't get the pointer cursor.
|
||||
*/
|
||||
|
||||
:disabled {
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210)
|
||||
This can trigger a poorly considered lint error in some tools but is included by design.
|
||||
*/
|
||||
|
||||
img,
|
||||
svg,
|
||||
video,
|
||||
canvas,
|
||||
audio,
|
||||
iframe,
|
||||
embed,
|
||||
object {
|
||||
display: block;
|
||||
/* 1 */
|
||||
vertical-align: middle;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
*/
|
||||
|
||||
img,
|
||||
video {
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/* Make elements with the HTML hidden attribute stay hidden by default */
|
||||
|
||||
[hidden] {
|
||||
display: none;
|
||||
}
|
||||
|
||||
*, ::before, ::after {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
}
|
||||
|
||||
::backdrop {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
}
|
||||
|
||||
.mb-10 {
|
||||
margin-bottom: 2.5rem;
|
||||
}
|
||||
|
||||
.transform {
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
}
|
||||
|
||||
.leading-3 {
|
||||
line-height: .75rem;
|
||||
}
|
||||
|
||||
.transition {
|
||||
transition-property: color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, -webkit-backdrop-filter;
|
||||
transition-property: color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, backdrop-filter;
|
||||
transition-property: color, background-color, border-color, text-decoration-color, fill, stroke, opacity, box-shadow, transform, filter, backdrop-filter, -webkit-backdrop-filter;
|
||||
transition-timing-function: cubic-bezier(0.4, 0, 0.2, 1);
|
||||
transition-duration: 150ms;
|
||||
}
|
||||
3
docs/stylesheets/tailwind.css
Normal file
@@ -0,0 +1,3 @@
|
||||
@import 'tailwindcss/base';
|
||||
@import 'tailwindcss/components';
|
||||
@import 'tailwindcss/utilities';
|
||||
9
docs/tailwind.config.js
Normal file
@@ -0,0 +1,9 @@
|
||||
/** @type {import('tailwindcss').Config} */
|
||||
module.exports = {
|
||||
content: ["./**/*.md"],
|
||||
theme: {
|
||||
extend: {},
|
||||
},
|
||||
plugins: [],
|
||||
}
|
||||
|
||||
29
docs/telemetry/Telemetry.md
Normal file
@@ -0,0 +1,29 @@
|
||||
## Telemetry
|
||||
|
||||
CrewAI uses anonymous telemetry to collect usage data with the main purpose of helping us improve the library by focusing our efforts on the most used features, integrations and tools.
|
||||
|
||||
There is NO data being collected on the prompts, tasks descriptions agents backstories or goals nor tools usage, no API calls, nor responses nor any data that is being processed by the agents, nor any secrets and env vars.
|
||||
|
||||
Data collected includes:
|
||||
- Version of crewAI
|
||||
- So we can understand how many users are using the latest version
|
||||
- Version of Python
|
||||
- So we can decide on what versions to better support
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- So we know what OS we should focus on and if we could build specific OS related features
|
||||
- Number of agents and tasks in a crew
|
||||
- So we make sure we are testing internally with similar use cases and educate people on the best practices
|
||||
- Crew Process being used
|
||||
- Understand where we should focus our efforts
|
||||
- If Agents are using memory or allowing delegation
|
||||
- Understand if we improved the features or maybe even drop them
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Understand if we should focus more on parallel execution
|
||||
- Language model being used
|
||||
- Improved support on most used languages
|
||||
- Roles of agents in a crew
|
||||
- Understand high level use cases so we can build better tools, integrations and examples about it
|
||||
- Tools names available
|
||||
- Understand out of the publically available tools, which ones are being used the most so we can improve them
|
||||
|
||||
Users can opt-in sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews.
|
||||
162
mkdocs.yml
@@ -1,9 +1,161 @@
|
||||
site_name: crewAI Documentation
|
||||
site_name: crewAI
|
||||
site_author: crewAI, Inc
|
||||
site_description: Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks.
|
||||
repo_name: crewAI
|
||||
repo_url: https://github.com/joaomdmoura/crewai/
|
||||
site_url: https://crewai.com
|
||||
edit_uri: edit/main/docs/
|
||||
copyright: Copyright © 2024 crewAI, Inc
|
||||
|
||||
markdown_extensions:
|
||||
- abbr
|
||||
- admonition
|
||||
- pymdownx.details
|
||||
- attr_list
|
||||
- def_list
|
||||
- footnotes
|
||||
- md_in_html
|
||||
- toc:
|
||||
permalink: true
|
||||
- pymdownx.arithmatex:
|
||||
generic: true
|
||||
- pymdownx.betterem:
|
||||
smart_enable: all
|
||||
- pymdownx.caret
|
||||
- pymdownx.emoji:
|
||||
emoji_generator: !!python/name:material.extensions.emoji.to_svg
|
||||
emoji_index: !!python/name:material.extensions.emoji.twemoji
|
||||
- pymdownx.highlight:
|
||||
anchor_linenums: true
|
||||
line_spans: __span
|
||||
pygments_lang_class: true
|
||||
- pymdownx.inlinehilite
|
||||
- pymdownx.keys
|
||||
- pymdownx.magiclink:
|
||||
normalize_issue_symbols: true
|
||||
repo_url_shorthand: true
|
||||
user: joaomdmoura
|
||||
repo: crewAI
|
||||
- pymdownx.mark
|
||||
- pymdownx.smartsymbols
|
||||
- pymdownx.snippets:
|
||||
auto_append:
|
||||
- includes/mkdocs.md
|
||||
- pymdownx.superfences:
|
||||
custom_fences:
|
||||
- name: mermaid
|
||||
class: mermaid
|
||||
format: !!python/name:pymdownx.superfences.fence_code_format
|
||||
- pymdownx.tabbed:
|
||||
alternate_style: true
|
||||
combine_header_slug: true
|
||||
slugify: !!python/object/apply:pymdownx.slugs.slugify
|
||||
kwds:
|
||||
case: lower
|
||||
- pymdownx.tasklist:
|
||||
custom_checkbox: true
|
||||
- pymdownx.tilde
|
||||
theme:
|
||||
name: material
|
||||
language: en
|
||||
icon:
|
||||
repo: fontawesome/brands/github
|
||||
edit: material/pencil
|
||||
view: material/eye
|
||||
admonition:
|
||||
note: octicons/light-bulb-16
|
||||
abstract: octicons/checklist-16
|
||||
info: octicons/info-16
|
||||
tip: octicons/squirrel-16
|
||||
success: octicons/check-16
|
||||
question: octicons/question-16
|
||||
warning: octicons/alert-16
|
||||
failure: octicons/x-circle-16
|
||||
danger: octicons/zap-16
|
||||
bug: octicons/bug-16
|
||||
example: octicons/beaker-16
|
||||
quote: octicons/quote-16
|
||||
|
||||
palette:
|
||||
scheme: default
|
||||
primary: red
|
||||
accent: red
|
||||
- scheme: default
|
||||
primary: red
|
||||
accent: red
|
||||
toggle:
|
||||
icon: material/brightness-7
|
||||
name: Switch to dark mode
|
||||
- scheme: slate
|
||||
primary: red
|
||||
accent: red
|
||||
toggle:
|
||||
icon: material/brightness-4
|
||||
name: Switch to light mode
|
||||
features:
|
||||
- navigation.tabs
|
||||
- announce.dismiss
|
||||
- content.action.edit
|
||||
- content.action.view
|
||||
- content.code.annotate
|
||||
- content.code.copy
|
||||
- content.code.select
|
||||
- content.tabs.link
|
||||
- content.tooltips
|
||||
- header.autohide
|
||||
- navigation.footer
|
||||
- navigation.indexes
|
||||
# - navigation.prune
|
||||
# - navigation.sections
|
||||
# - navigation.tabs
|
||||
- search.suggest
|
||||
- navigation.instant
|
||||
- navigation.instant.progress
|
||||
- navigation.instant.prefetch
|
||||
- navigation.tracking
|
||||
# - navigation.expand
|
||||
- navigation.path
|
||||
- navigation.top
|
||||
- toc.follow
|
||||
- toc.integrate
|
||||
- search.highlight
|
||||
- search.share
|
||||
|
||||
nav:
|
||||
- Home: '/'
|
||||
- Core Concepts:
|
||||
- Agents: 'core-concepts/Agents.md'
|
||||
- Tasks: 'core-concepts/Tasks.md'
|
||||
- Tools: 'core-concepts/Tools.md'
|
||||
- Processes: 'core-concepts/Processes.md'
|
||||
- Crews: 'core-concepts/Crews.md'
|
||||
- Collaboration: 'core-concepts/Collaboration.md'
|
||||
- How to Guides:
|
||||
- Getting Started: 'how-to/Creating-a-Crew-and-kick-it-off.md'
|
||||
- Using Sequential Process: 'how-to/Sequential.md'
|
||||
- Using Hierarchical Process: 'how-to/Hierarchical.md'
|
||||
- Connecting to any LLM: 'how-to/LLM-Connections.md'
|
||||
- Customizing Agents: 'how-to/Customizing-Agents.md'
|
||||
- Human Input on Execution: 'how-to/Human-Input-on-Execution.md'
|
||||
- Examples:
|
||||
- Trip Planner Crew: https://github.com/joaomdmoura/crewAI-examples/tree/main/trip_planner"
|
||||
- Create Instagram Post: https://github.com/joaomdmoura/crewAI-examples/tree/main/instagram_post"
|
||||
- Stock Analysis: https://github.com/joaomdmoura/crewAI-examples/tree/main/stock_analysis"
|
||||
- Game Generator: https://github.com/joaomdmoura/crewAI-examples/tree/main/game-builder-crew"
|
||||
- Drafting emails with LangGraph: https://github.com/joaomdmoura/crewAI-examples/tree/main/CrewAI-LangGraph"
|
||||
- Landing Page Generator: https://github.com/joaomdmoura/crewAI-examples/tree/main/landing_page_generator"
|
||||
- Prepare for meetings: https://github.com/joaomdmoura/crewAI-examples/tree/main/prep-for-a-meeting"
|
||||
- Telemetry: 'telemetry/Telemetry.md'
|
||||
|
||||
extra_css:
|
||||
- stylesheets/output.css
|
||||
- stylesheets/extra.css
|
||||
|
||||
plugins:
|
||||
- social
|
||||
|
||||
extra:
|
||||
analytics:
|
||||
provider: google
|
||||
property: G-N3Q505TMQ6
|
||||
social:
|
||||
- icon: fontawesome/brands/twitter
|
||||
link: https://twitter.com/joaomdmoura
|
||||
- icon: fontawesome/brands/github
|
||||
link: https://github.com/joaomdmoura/crewAI
|
||||
4036
poetry.lock
generated
@@ -1,9 +1,9 @@
|
||||
|
||||
[tool.poetry]
|
||||
name = "crewai"
|
||||
version = "0.1.24"
|
||||
version = "0.14.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
authors = ["Joao Moura <joaomdmoura@gmail.com>"]
|
||||
authors = ["Joao Moura <joao@crewai.com>"]
|
||||
readme = "README.md"
|
||||
packages = [
|
||||
{ include = "crewai", from = "src" },
|
||||
@@ -11,30 +11,45 @@ packages = [
|
||||
|
||||
|
||||
[tool.poetry.urls]
|
||||
Homepage = "https://github.com/joaomdmoura/crewai"
|
||||
Homepage = "https://crewai.io"
|
||||
Documentation = "https://github.com/joaomdmoura/CrewAI/wiki/Index"
|
||||
Repository = "https://github.com/joaomdmoura/crewai"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.9,<4.0"
|
||||
python = ">=3.10,<=3.13"
|
||||
pydantic = "^2.4.2"
|
||||
langchain = "^0.1.0"
|
||||
openai = "^1.7.1"
|
||||
langchain-openai = "^0.0.2"
|
||||
langchain-openai = "^0.0.5"
|
||||
opentelemetry-api = "^1.22.0"
|
||||
opentelemetry-sdk = "^1.22.0"
|
||||
opentelemetry-exporter-otlp-proto-http = "^1.22.0"
|
||||
instructor = "^0.5.2"
|
||||
regex = "^2023.12.25"
|
||||
crewai-tools = "^0.0.6"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
isort = "^5.13.2"
|
||||
black = "^23.12.1"
|
||||
pyright = ">=1.1.350,<2.0.0"
|
||||
black = {git = "https://github.com/psf/black.git", rev = "stable"}
|
||||
autoflake = "^2.2.1"
|
||||
pre-commit = "^3.6.0"
|
||||
mkdocs-material = "^9.5.3"
|
||||
mkdocs = "^1.4.3"
|
||||
mkdocstrings = "^0.22.0"
|
||||
mkdocstrings-python = "^1.1.2"
|
||||
mkdocs-material = {extras = ["imaging"], version = "^9.5.7"}
|
||||
mkdocs-material-extensions = "^1.3.1"
|
||||
pillow = "^10.2.0"
|
||||
cairosvg = "^2.7.1"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
known_first_party = ["crewai"]
|
||||
|
||||
|
||||
|
||||
[tool.poetry.group.test.dependencies]
|
||||
pytest = "^7.4"
|
||||
pytest = "^8.0.0"
|
||||
pytest-vcr = "^1.0.2"
|
||||
python-dotenv = "1.0.0"
|
||||
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
import os
|
||||
import uuid
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
from langchain.agents.format_scratchpad import format_log_to_str
|
||||
from crewai_tools import BaseTool as CrewAITool
|
||||
from langchain.agents.agent import RunnableAgent
|
||||
from langchain.agents.tools import tool as LangChainTool
|
||||
from langchain.memory import ConversationSummaryMemory
|
||||
from langchain.tools.render import render_text_description
|
||||
from langchain_core.runnables.config import RunnableConfig
|
||||
from langchain_core.agents import AgentAction
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -18,12 +21,7 @@ from pydantic import (
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agents import (
|
||||
CacheHandler,
|
||||
CrewAgentExecutor,
|
||||
CrewAgentOutputParser,
|
||||
ToolsHandler,
|
||||
)
|
||||
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler
|
||||
from crewai.utilities import I18N, Logger, Prompts, RPMController
|
||||
|
||||
|
||||
@@ -39,14 +37,17 @@ class Agent(BaseModel):
|
||||
goal: The objective of the agent.
|
||||
backstory: The backstory of the agent.
|
||||
llm: The language model that will run the agent.
|
||||
function_calling_llm: The language model that will the tool calling for this agent, it overrides the crew function_calling_llm.
|
||||
max_iter: Maximum number of iterations for an agent to execute a task.
|
||||
memory: Whether the agent should have memory or not.
|
||||
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
|
||||
verbose: Whether the agent execution should be in verbose mode.
|
||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_logger: Logger = PrivateAttr()
|
||||
_rpm_controller: RPMController = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
@@ -73,31 +74,35 @@ class Agent(BaseModel):
|
||||
allow_delegation: bool = Field(
|
||||
default=True, description="Allow delegation of tasks to agents"
|
||||
)
|
||||
tools: List[Any] = Field(
|
||||
tools: Optional[List[Any]] = Field(
|
||||
default_factory=list, description="Tools at agents disposal"
|
||||
)
|
||||
max_iter: Optional[int] = Field(
|
||||
default=15, description="Maximum iterations for an agent to execute a task"
|
||||
)
|
||||
agent_executor: Optional[InstanceOf[CrewAgentExecutor]] = Field(
|
||||
agent_executor: InstanceOf[CrewAgentExecutor] = Field(
|
||||
default=None, description="An instance of the CrewAgentExecutor class."
|
||||
)
|
||||
tools_handler: Optional[InstanceOf[ToolsHandler]] = Field(
|
||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||
default=None, description="An instance of the ToolsHandler class."
|
||||
)
|
||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
default=CacheHandler(), description="An instance of the CacheHandler class."
|
||||
)
|
||||
i18n: Optional[I18N] = Field(
|
||||
default=I18N(), description="Internationalization settings."
|
||||
step_callback: Optional[Any] = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each step of the agent execution.",
|
||||
)
|
||||
llm: Optional[Any] = Field(
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
llm: Any = Field(
|
||||
default_factory=lambda: ChatOpenAI(
|
||||
temperature=0.7,
|
||||
model_name="gpt-4",
|
||||
model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4")
|
||||
),
|
||||
description="Language model that will run the agent.",
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
@@ -109,6 +114,7 @@ class Agent(BaseModel):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self):
|
||||
"""Set private attributes."""
|
||||
self._logger = Logger(self.verbose)
|
||||
if self.max_rpm and not self._rpm_controller:
|
||||
self._rpm_controller = RPMController(
|
||||
@@ -118,12 +124,16 @@ class Agent(BaseModel):
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_agent_executor(self) -> "Agent":
|
||||
"""Check if the agent executor is set."""
|
||||
if not self.agent_executor:
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
return self
|
||||
|
||||
def execute_task(
|
||||
self, task: str, context: str = None, tools: List[Any] = None
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
) -> str:
|
||||
"""Execute a task with the agent.
|
||||
|
||||
@@ -135,21 +145,25 @@ class Agent(BaseModel):
|
||||
Returns:
|
||||
Output of the agent
|
||||
"""
|
||||
task_prompt = task.prompt()
|
||||
|
||||
if context:
|
||||
task = self.i18n.slice("task_with_context").format(
|
||||
task=task, context=context
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
)
|
||||
|
||||
tools = tools or self.tools
|
||||
tools = self._parse_tools(tools or self.tools)
|
||||
self.agent_executor.tools = tools
|
||||
self.agent_executor.task = task
|
||||
self.agent_executor.tools_description = render_text_description(tools)
|
||||
self.agent_executor.tools_names = self.__tools_names(tools)
|
||||
|
||||
result = self.agent_executor.invoke(
|
||||
{
|
||||
"input": task,
|
||||
"tool_names": self.__tools_names(tools),
|
||||
"tools": render_text_description(tools),
|
||||
},
|
||||
RunnableConfig(callbacks=[self.tools_handler]),
|
||||
"input": task_prompt,
|
||||
"tool_names": self.agent_executor.tools_names,
|
||||
"tools": self.agent_executor.tools_description,
|
||||
}
|
||||
)["output"]
|
||||
|
||||
if self.max_rpm:
|
||||
@@ -157,17 +171,27 @@ class Agent(BaseModel):
|
||||
|
||||
return result
|
||||
|
||||
def set_cache_handler(self, cache_handler) -> None:
|
||||
def set_cache_handler(self, cache_handler: CacheHandler) -> None:
|
||||
"""Set the cache handler for the agent.
|
||||
|
||||
Args:
|
||||
cache_handler: An instance of the CacheHandler class.
|
||||
"""
|
||||
self.cache_handler = cache_handler
|
||||
self.tools_handler = ToolsHandler(cache=self.cache_handler)
|
||||
self.__create_agent_executor()
|
||||
self.create_agent_executor()
|
||||
|
||||
def set_rpm_controller(self, rpm_controller) -> None:
|
||||
def set_rpm_controller(self, rpm_controller: RPMController) -> None:
|
||||
"""Set the rpm controller for the agent.
|
||||
|
||||
Args:
|
||||
rpm_controller: An instance of the RPMController class.
|
||||
"""
|
||||
if not self._rpm_controller:
|
||||
self._rpm_controller = rpm_controller
|
||||
self.__create_agent_executor()
|
||||
self.create_agent_executor()
|
||||
|
||||
def __create_agent_executor(self) -> CrewAgentExecutor:
|
||||
def create_agent_executor(self) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
|
||||
Returns:
|
||||
@@ -177,14 +201,21 @@ class Agent(BaseModel):
|
||||
"input": lambda x: x["input"],
|
||||
"tools": lambda x: x["tools"],
|
||||
"tool_names": lambda x: x["tool_names"],
|
||||
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
|
||||
"agent_scratchpad": lambda x: self.format_log_to_str(
|
||||
x["intermediate_steps"]
|
||||
),
|
||||
}
|
||||
|
||||
executor_args = {
|
||||
"llm": self.llm,
|
||||
"i18n": self.i18n,
|
||||
"tools": self.tools,
|
||||
"tools": self._parse_tools(self.tools),
|
||||
"verbose": self.verbose,
|
||||
"handle_parsing_errors": True,
|
||||
"max_iterations": self.max_iter,
|
||||
"step_callback": self.step_callback,
|
||||
"tools_handler": self.tools_handler,
|
||||
"function_calling_llm": self.function_calling_llm,
|
||||
}
|
||||
|
||||
if self._rpm_controller:
|
||||
@@ -198,9 +229,11 @@ class Agent(BaseModel):
|
||||
)
|
||||
executor_args["memory"] = summary_memory
|
||||
agent_args["chat_history"] = lambda x: x["chat_history"]
|
||||
prompt = Prompts(i18n=self.i18n).task_execution_with_memory()
|
||||
prompt = Prompts(
|
||||
i18n=self.i18n, tools=self.tools
|
||||
).task_execution_with_memory()
|
||||
else:
|
||||
prompt = Prompts(i18n=self.i18n).task_execution()
|
||||
prompt = Prompts(i18n=self.i18n, tools=self.tools).task_execution()
|
||||
|
||||
execution_prompt = prompt.partial(
|
||||
goal=self.goal,
|
||||
@@ -209,17 +242,33 @@ class Agent(BaseModel):
|
||||
)
|
||||
|
||||
bind = self.llm.bind(stop=[self.i18n.slice("observation")])
|
||||
inner_agent = (
|
||||
agent_args
|
||||
| execution_prompt
|
||||
| bind
|
||||
| CrewAgentOutputParser(
|
||||
tools_handler=self.tools_handler,
|
||||
cache=self.cache_handler,
|
||||
i18n=self.i18n,
|
||||
)
|
||||
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser()
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
||||
)
|
||||
self.agent_executor = CrewAgentExecutor(agent=inner_agent, **executor_args)
|
||||
|
||||
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
|
||||
"""Parse tools to be used for the task."""
|
||||
tools_list = []
|
||||
for tool in tools:
|
||||
if isinstance(tool, CrewAITool):
|
||||
tools_list.append(tool.to_langchain())
|
||||
else:
|
||||
tools_list.append(tool)
|
||||
return tools_list
|
||||
|
||||
def format_log_to_str(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
observation_prefix: str = "Result: ",
|
||||
llm_prefix: str = "",
|
||||
) -> str:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts = ""
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts += action.log
|
||||
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
|
||||
return thoughts
|
||||
|
||||
@staticmethod
|
||||
def __tools_names(tools) -> str:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from .cache.cache_handler import CacheHandler
|
||||
from .executor import CrewAgentExecutor
|
||||
from .output_parser import CrewAgentOutputParser
|
||||
from .parser import CrewAgentParser
|
||||
from .tools_handler import ToolsHandler
|
||||
|
||||
1
src/crewai/agents/cache/__init__.py
vendored
@@ -1,2 +1 @@
|
||||
from .cache_handler import CacheHandler
|
||||
from .cache_hit import CacheHit
|
||||
|
||||
6
src/crewai/agents/cache/cache_handler.py
vendored
@@ -1,20 +1,16 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
|
||||
class CacheHandler:
|
||||
"""Callback handler for tool usage."""
|
||||
|
||||
_cache: PrivateAttr = {}
|
||||
_cache: dict = {}
|
||||
|
||||
def __init__(self):
|
||||
self._cache = {}
|
||||
|
||||
def add(self, tool, input, output):
|
||||
input = input.strip()
|
||||
self._cache[f"{tool}-{input}"] = output
|
||||
|
||||
def read(self, tool, input) -> Optional[str]:
|
||||
input = input.strip()
|
||||
return self._cache.get(f"{tool}-{input}")
|
||||
|
||||
18
src/crewai/agents/cache/cache_hit.py
vendored
@@ -1,18 +0,0 @@
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from .cache_handler import CacheHandler
|
||||
|
||||
|
||||
class CacheHit(BaseModel):
|
||||
"""Cache Hit Object."""
|
||||
|
||||
class Config:
|
||||
arbitrary_types_allowed = True
|
||||
|
||||
# Making it Any instead of AgentAction to avoind
|
||||
# pydantic v1 vs v2 incompatibility, langchain should
|
||||
# soon be updated to pydantic v2
|
||||
action: Any = Field(description="Action taken")
|
||||
cache: CacheHandler = Field(description="Cache Handler for the tool")
|
||||
@@ -1,30 +0,0 @@
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
class TaskRepeatedUsageException(OutputParserException):
|
||||
"""Exception raised when a task is used twice in a roll."""
|
||||
|
||||
i18n: I18N = I18N()
|
||||
error: str = "TaskRepeatedUsageException"
|
||||
message: str
|
||||
|
||||
def __init__(self, i18n: I18N, tool: str, tool_input: str, text: str):
|
||||
self.i18n = i18n
|
||||
self.text = text
|
||||
self.tool = tool
|
||||
self.tool_input = tool_input
|
||||
self.message = self.i18n.errors("task_repeated_usage").format(
|
||||
tool=tool, tool_input=tool_input
|
||||
)
|
||||
|
||||
super().__init__(
|
||||
error=self.error,
|
||||
observation=self.message,
|
||||
send_to_llm=True,
|
||||
llm_output=self.text,
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
return self.message
|
||||
@@ -3,25 +3,32 @@ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.agent import ExceptionTool
|
||||
from langchain.agents.tools import InvalidTool
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain_core.agents import AgentAction, AgentFinish, AgentStep
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.input import get_color_mapping
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from crewai.agents.cache.cache_hit import CacheHit
|
||||
from crewai.tools.cache_tools import CacheTools
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor):
|
||||
i18n: I18N = I18N()
|
||||
_i18n: I18N = I18N()
|
||||
llm: Any = None
|
||||
iterations: int = 0
|
||||
task: Any = None
|
||||
tools_description: str = ""
|
||||
tools_names: str = ""
|
||||
function_calling_llm: Any = None
|
||||
request_within_rpm_limit: Any = None
|
||||
tools_handler: InstanceOf[ToolsHandler] = None
|
||||
max_iterations: Optional[int] = 15
|
||||
force_answer_max_iterations: Optional[int] = None
|
||||
step_callback: Optional[Any] = None
|
||||
|
||||
@root_validator()
|
||||
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
|
||||
@@ -31,11 +38,6 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
def _should_force_answer(self) -> bool:
|
||||
return True if self.iterations == self.force_answer_max_iterations else False
|
||||
|
||||
def _force_answer(self, output: AgentAction):
|
||||
return AgentStep(
|
||||
action=output, observation=self.i18n.errors("used_too_many_tools")
|
||||
)
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
@@ -63,6 +65,10 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
intermediate_steps,
|
||||
run_manager=run_manager,
|
||||
)
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(next_step_output)
|
||||
|
||||
if isinstance(next_step_output, AgentFinish):
|
||||
return self._return(
|
||||
next_step_output, intermediate_steps, run_manager=run_manager
|
||||
@@ -98,19 +104,28 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
"""
|
||||
try:
|
||||
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
|
||||
|
||||
# Call the LLM to see what to do.
|
||||
output = self.agent.plan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
|
||||
if self._should_force_answer():
|
||||
if isinstance(output, AgentFinish):
|
||||
yield output
|
||||
return
|
||||
|
||||
if isinstance(output, AgentAction):
|
||||
output = output
|
||||
else:
|
||||
output = output.action
|
||||
yield self._force_answer(output)
|
||||
raise ValueError(
|
||||
f"Unexpected output type from agent: {type(output)}"
|
||||
)
|
||||
|
||||
yield AgentStep(
|
||||
action=output, observation=self._i18n.errors("force_final_answer")
|
||||
)
|
||||
return
|
||||
|
||||
except OutputParserException as e:
|
||||
@@ -128,14 +143,14 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
text = str(e)
|
||||
if isinstance(self.handle_parsing_errors, bool):
|
||||
if e.send_to_llm:
|
||||
observation = str(e.observation)
|
||||
observation = f"\n{str(e.observation)}"
|
||||
text = str(e.llm_output)
|
||||
else:
|
||||
observation = "Invalid or incomplete response"
|
||||
observation = ""
|
||||
elif isinstance(self.handle_parsing_errors, str):
|
||||
observation = self.handle_parsing_errors
|
||||
observation = f"\n{self.handle_parsing_errors}"
|
||||
elif callable(self.handle_parsing_errors):
|
||||
observation = self.handle_parsing_errors(e)
|
||||
observation = f"\n{self.handle_parsing_errors(e)}"
|
||||
else:
|
||||
raise ValueError("Got unexpected type of `handle_parsing_errors`")
|
||||
output = AgentAction("_Exception", observation, text)
|
||||
@@ -151,7 +166,9 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
)
|
||||
|
||||
if self._should_force_answer():
|
||||
yield self._force_answer(output)
|
||||
yield AgentStep(
|
||||
action=output, observation=self._i18n.errors("force_final_answer")
|
||||
)
|
||||
return
|
||||
|
||||
yield AgentStep(action=output, observation=observation)
|
||||
@@ -162,17 +179,6 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
yield output
|
||||
return
|
||||
|
||||
# Override tool usage to use CacheTools
|
||||
if isinstance(output, CacheHit):
|
||||
cache = output.cache
|
||||
action = output.action
|
||||
tool = CacheTools(cache_handler=cache).tool()
|
||||
output = action.copy()
|
||||
output.tool_input = f"tool:{action.tool}|input:{action.tool_input}"
|
||||
output.tool = tool.name
|
||||
name_to_tool_map[tool.name] = tool
|
||||
color_mapping[tool.name] = color_mapping[action.tool]
|
||||
|
||||
actions: List[AgentAction]
|
||||
actions = [output] if isinstance(output, AgentAction) else output
|
||||
yield from actions
|
||||
@@ -180,31 +186,27 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
if run_manager:
|
||||
run_manager.on_agent_action(agent_action, color="green")
|
||||
# Otherwise we lookup the tool
|
||||
if agent_action.tool in name_to_tool_map:
|
||||
tool = name_to_tool_map[agent_action.tool]
|
||||
return_direct = tool.return_direct
|
||||
color = color_mapping[agent_action.tool]
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
if return_direct:
|
||||
tool_run_kwargs["llm_prefix"] = ""
|
||||
# We then call the tool on the tool input to get an observation
|
||||
observation = tool.run(
|
||||
agent_action.tool_input,
|
||||
verbose=self.verbose,
|
||||
color=color,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
tools_description=self.tools_description,
|
||||
tools_names=self.tools_names,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
llm=self.llm,
|
||||
task=self.task,
|
||||
)
|
||||
tool_calling = tool_usage.parse(agent_action.log)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
observation = tool_calling.message
|
||||
else:
|
||||
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
|
||||
observation = InvalidTool().run(
|
||||
{
|
||||
"requested_tool_name": agent_action.tool,
|
||||
"available_tool_names": list(name_to_tool_map.keys()),
|
||||
},
|
||||
verbose=self.verbose,
|
||||
color=None,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**tool_run_kwargs,
|
||||
)
|
||||
if tool_calling.tool_name.lower().strip() in [
|
||||
name.lower().strip() for name in name_to_tool_map
|
||||
]:
|
||||
observation = tool_usage.use(tool_calling, agent_action.log)
|
||||
else:
|
||||
observation = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name for tool in self.tools]),
|
||||
)
|
||||
yield AgentStep(action=agent_action, observation=observation)
|
||||
|
||||