mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 12:28:30 +00:00
Compare commits
170 Commits
fix/step-c
...
brandon/en
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e6d33f626c | ||
|
|
be8e33daf6 | ||
|
|
efc8323c63 | ||
|
|
831951efc4 | ||
|
|
b1960ecb4c | ||
|
|
dfb2b4b47a | ||
|
|
b171e82990 | ||
|
|
d77902c537 | ||
|
|
90d610ed7e | ||
|
|
9f8293349c | ||
|
|
fb763885eb | ||
|
|
389d5a9982 | ||
|
|
91144a692a | ||
|
|
58a8c442da | ||
|
|
0b70b3c8b4 | ||
|
|
8833ccc123 | ||
|
|
37ad9f9d9e | ||
|
|
3f20ef101c | ||
|
|
d4afe9fe66 | ||
|
|
acb0d7e056 | ||
|
|
9fb98dbf01 | ||
|
|
219af9baa8 | ||
|
|
2b976e1f63 | ||
|
|
c653a2e691 | ||
|
|
6a4d3eb269 | ||
|
|
52133ebf99 | ||
|
|
d6e7750965 | ||
|
|
2131b94ddb | ||
|
|
6ad314af8c | ||
|
|
0dc2582ce1 | ||
|
|
26053c9d1a | ||
|
|
2107512e84 | ||
|
|
b3504e768c | ||
|
|
350457b9b8 | ||
|
|
355bf3b48b | ||
|
|
0e94236735 | ||
|
|
673a38c5d9 | ||
|
|
8f57753656 | ||
|
|
a2f839fada | ||
|
|
440883e9e8 | ||
|
|
d3da73136c | ||
|
|
7272fd15ac | ||
|
|
518800239c | ||
|
|
30bd79390a | ||
|
|
d1e2430aac | ||
|
|
bfe2c44f55 | ||
|
|
845951a0db | ||
|
|
c1172a685a | ||
|
|
4bcc3b532d | ||
|
|
ba89e43b62 | ||
|
|
4469461b38 | ||
|
|
a548463fae | ||
|
|
45b802a625 | ||
|
|
ba0965ef87 | ||
|
|
d85898cf29 | ||
|
|
73f328860b | ||
|
|
a0c322a535 | ||
|
|
86f58c95de | ||
|
|
99fe91586d | ||
|
|
0c2d23dfe0 | ||
|
|
2433819c4f | ||
|
|
97fc44c930 | ||
|
|
409892d65f | ||
|
|
62f3df7ed5 | ||
|
|
4cf8913d31 | ||
|
|
82647358b2 | ||
|
|
6cc2f510bf | ||
|
|
9a65abf6b8 | ||
|
|
b3185ad90c | ||
|
|
c887ff1f47 | ||
|
|
22e5d39884 | ||
|
|
9ee6824ccd | ||
|
|
da73865f25 | ||
|
|
627b9f1abb | ||
|
|
1b8001bf98 | ||
|
|
e59e07e4f7 | ||
|
|
ee239b1c06 | ||
|
|
bf459bf983 | ||
|
|
94eaa6740e | ||
|
|
6d7c1b0743 | ||
|
|
6b864ee21d | ||
|
|
1ffa8904db | ||
|
|
ad916abd76 | ||
|
|
9702711094 | ||
|
|
8094754239 | ||
|
|
bc5e303d5f | ||
|
|
ec89e003c8 | ||
|
|
0b0f2d30ab | ||
|
|
1df61aba4c | ||
|
|
da9220fa81 | ||
|
|
da4f356fab | ||
|
|
d932b20c6e | ||
|
|
2f9a2afd9e | ||
|
|
c1df7c410e | ||
|
|
54ebd6cf90 | ||
|
|
6b87d22a70 | ||
|
|
c4f7eaf259 | ||
|
|
236e42d0bc | ||
|
|
8c90db04b5 | ||
|
|
1261ce513f | ||
|
|
b07c51532c | ||
|
|
d763eefc2e | ||
|
|
e01c0a0f4c | ||
|
|
5a7a323f3a | ||
|
|
46be5e8097 | ||
|
|
bc2a86d66a | ||
|
|
11a3d4b840 | ||
|
|
6930b68484 | ||
|
|
c7c0647dd2 | ||
|
|
7b276e6797 | ||
|
|
3daba0c79e | ||
|
|
2c85e8e23a | ||
|
|
b0f1d1fcf0 | ||
|
|
611526596a | ||
|
|
fa373f9660 | ||
|
|
48bb8ef775 | ||
|
|
bbea797b0c | ||
|
|
066ad73423 | ||
|
|
0695c26703 | ||
|
|
4fb3331c6a | ||
|
|
b6c6eea6f5 | ||
|
|
1af95f5146 | ||
|
|
ed3487aa22 | ||
|
|
77af733e44 | ||
|
|
aaf80d1d43 | ||
|
|
9e9b945a46 | ||
|
|
308a8dc925 | ||
|
|
7d9d0ff6f7 | ||
|
|
f8a8e7b2a5 | ||
|
|
3285c1b196 | ||
|
|
4bc23affe0 | ||
|
|
bca56eea48 | ||
|
|
588ad3c4a4 | ||
|
|
c6a6c918e0 | ||
|
|
366bbbbea3 | ||
|
|
293305790d | ||
|
|
8bc09eb054 | ||
|
|
db1b678c3a | ||
|
|
6f32bf52cc | ||
|
|
49d173a02d | ||
|
|
4069b621d5 | ||
|
|
a7147c99c6 | ||
|
|
6fe308202e | ||
|
|
63ecb7395d | ||
|
|
8cf1cd5a62 | ||
|
|
93c0467bba | ||
|
|
8f5f67de41 | ||
|
|
f8ca49d8df | ||
|
|
c119230fd6 | ||
|
|
14a36d3f5e | ||
|
|
fde1ee45f9 | ||
|
|
6774bc2c53 | ||
|
|
94c62263ed | ||
|
|
495c3859af | ||
|
|
3e003f5e32 | ||
|
|
1c8b509d7d | ||
|
|
58af5c08f9 | ||
|
|
55e968c9e0 | ||
|
|
0b9092702b | ||
|
|
8376698534 | ||
|
|
3dc02310b6 | ||
|
|
e70bc94ab6 | ||
|
|
9285ebf8a2 | ||
|
|
4ca785eb15 | ||
|
|
c57cbd8591 | ||
|
|
7fb1289205 | ||
|
|
f02681ae01 | ||
|
|
c725105b1f | ||
|
|
36aa4bcb46 | ||
|
|
b98f8f9fe1 |
3
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
3
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -65,7 +65,6 @@ body:
|
||||
- '3.10'
|
||||
- '3.11'
|
||||
- '3.12'
|
||||
- '3.13'
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
@@ -113,4 +112,4 @@ body:
|
||||
label: Additional context
|
||||
description: Add any other context about the problem here.
|
||||
validations:
|
||||
required: true
|
||||
required: true
|
||||
|
||||
4
.github/workflows/linter.yml
vendored
4
.github/workflows/linter.yml
vendored
@@ -6,11 +6,11 @@ jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install Requirements
|
||||
run: |
|
||||
pip install ruff
|
||||
|
||||
- name: Run Ruff Linter
|
||||
run: ruff check --exclude "templates","__init__.py"
|
||||
run: ruff check
|
||||
|
||||
8
.github/workflows/mkdocs.yml
vendored
8
.github/workflows/mkdocs.yml
vendored
@@ -13,10 +13,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
run: echo "::set-output name=hash::$(sha256sum requirements-doc.txt | awk '{print $1}')"
|
||||
|
||||
- name: Setup cache
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
key: mkdocs-material-${{ steps.req-hash.outputs.hash }}
|
||||
path: .cache
|
||||
@@ -42,4 +42,4 @@ jobs:
|
||||
GH_TOKEN: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: Build and deploy MkDocs
|
||||
run: mkdocs gh-deploy --force
|
||||
run: mkdocs gh-deploy --force
|
||||
|
||||
4
.github/workflows/security-checker.yml
vendored
4
.github/workflows/security-checker.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
|
||||
@@ -19,5 +19,5 @@ jobs:
|
||||
run: pip install bandit
|
||||
|
||||
- name: Run Bandit
|
||||
run: bandit -c pyproject.toml -r src/ -lll
|
||||
run: bandit -c pyproject.toml -r src/ -ll
|
||||
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@@ -1,5 +1,10 @@
|
||||
name: Mark stale issues and pull requests
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '10 12 * * *'
|
||||
@@ -8,9 +13,6 @@ on:
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
|
||||
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
@@ -23,10 +23,10 @@ jobs:
|
||||
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.11.9
|
||||
run: uv python install 3.12.8
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --dev
|
||||
run: uv sync --dev --all-extras
|
||||
|
||||
- name: Run tests
|
||||
run: uv run pytest tests
|
||||
run: uv run pytest tests -vv
|
||||
|
||||
2
.github/workflows/type-checker.yml
vendored
2
.github/workflows/type-checker.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.11.9"
|
||||
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -17,3 +17,8 @@ rc-tests/*
|
||||
temp/*
|
||||
.vscode/*
|
||||
crew_tasks_output.json
|
||||
.codesight
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.venv
|
||||
agentops.log
|
||||
@@ -1,9 +1,7 @@
|
||||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.4.4
|
||||
rev: v0.8.2
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: ["--fix"]
|
||||
exclude: "templates"
|
||||
- id: ruff-format
|
||||
exclude: "templates"
|
||||
|
||||
9
.ruff.toml
Normal file
9
.ruff.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
exclude = [
|
||||
"templates",
|
||||
"__init__.py",
|
||||
]
|
||||
|
||||
[lint]
|
||||
select = [
|
||||
"I", # isort rules
|
||||
]
|
||||
189
README.md
189
README.md
@@ -4,7 +4,7 @@
|
||||
|
||||
# **CrewAI**
|
||||
|
||||
🤖 **CrewAI**: Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks.
|
||||
🤖 **CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
||||
|
||||
<h3>
|
||||
|
||||
@@ -22,13 +22,17 @@
|
||||
- [Why CrewAI?](#why-crewai)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Key Features](#key-features)
|
||||
- [Understanding Flows and Crews](#understanding-flows-and-crews)
|
||||
- [CrewAI vs LangGraph](#how-crewai-compares)
|
||||
- [Examples](#examples)
|
||||
- [Quick Tutorial](#quick-tutorial)
|
||||
- [Write Job Descriptions](#write-job-descriptions)
|
||||
- [Trip Planner](#trip-planner)
|
||||
- [Stock Analysis](#stock-analysis)
|
||||
- [Using Crews and Flows Together](#using-crews-and-flows-together)
|
||||
- [Connecting Your Crew to a Model](#connecting-your-crew-to-a-model)
|
||||
- [How CrewAI Compares](#how-crewai-compares)
|
||||
- [Frequently Asked Questions (FAQ)](#frequently-asked-questions-faq)
|
||||
- [Contribution](#contribution)
|
||||
- [Telemetry](#telemetry)
|
||||
- [License](#license)
|
||||
@@ -36,22 +40,51 @@
|
||||
## Why CrewAI?
|
||||
|
||||
The power of AI collaboration has too much to offer.
|
||||
CrewAI is designed to enable AI agents to assume roles, share goals, and operate in a cohesive unit - much like a well-oiled crew. Whether you're building a smart assistant platform, an automated customer service ensemble, or a multi-agent research team, CrewAI provides the backbone for sophisticated multi-agent interactions.
|
||||
CrewAI is a standalone framework, built from the ground up without dependencies on Langchain or other agent frameworks. It's designed to enable AI agents to assume roles, share goals, and operate in a cohesive unit - much like a well-oiled crew. Whether you're building a smart assistant platform, an automated customer service ensemble, or a multi-agent research team, CrewAI provides the backbone for sophisticated multi-agent interactions.
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Learning Resources
|
||||
|
||||
Learn CrewAI through our comprehensive courses:
|
||||
- [Multi AI Agent Systems with CrewAI](https://www.deeplearning.ai/short-courses/multi-ai-agent-systems-with-crewai/) - Master the fundamentals of multi-agent systems
|
||||
- [Practical Multi AI Agents and Advanced Use Cases](https://www.deeplearning.ai/short-courses/practical-multi-ai-agents-and-advanced-use-cases-with-crewai/) - Deep dive into advanced implementations
|
||||
|
||||
### Understanding Flows and Crews
|
||||
|
||||
CrewAI offers two powerful, complementary approaches that work seamlessly together to build sophisticated AI applications:
|
||||
|
||||
1. **Crews**: Teams of AI agents with true autonomy and agency, working together to accomplish complex tasks through role-based collaboration. Crews enable:
|
||||
- Natural, autonomous decision-making between agents
|
||||
- Dynamic task delegation and collaboration
|
||||
- Specialized roles with defined goals and expertise
|
||||
- Flexible problem-solving approaches
|
||||
|
||||
2. **Flows**: Production-ready, event-driven workflows that deliver precise control over complex automations. Flows provide:
|
||||
- Fine-grained control over execution paths for real-world scenarios
|
||||
- Secure, consistent state management between tasks
|
||||
- Clean integration of AI agents with production Python code
|
||||
- Conditional branching for complex business logic
|
||||
|
||||
The true power of CrewAI emerges when combining Crews and Flows. This synergy allows you to:
|
||||
- Build complex, production-grade applications
|
||||
- Balance autonomy with precise control
|
||||
- Handle sophisticated real-world scenarios
|
||||
- Maintain clean, maintainable code structure
|
||||
|
||||
### Getting Started with Installation
|
||||
|
||||
To get started with CrewAI, follow these simple steps:
|
||||
|
||||
### 1. Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. CrewAI uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
Ensure you have Python >=3.10 <3.13 installed on your system. CrewAI uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, install CrewAI:
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
If you want to install the 'crewai' package along with its optional features that include additional tools for agents, you can do so by using the following command:
|
||||
|
||||
```shell
|
||||
@@ -59,6 +92,22 @@ pip install 'crewai[tools]'
|
||||
```
|
||||
The command above installs the basic package and also adds extra components which require more dependencies to function.
|
||||
|
||||
### Troubleshooting Dependencies
|
||||
|
||||
If you encounter issues during installation or usage, here are some common solutions:
|
||||
|
||||
#### Common Issues
|
||||
|
||||
1. **ModuleNotFoundError: No module named 'tiktoken'**
|
||||
- Install tiktoken explicitly: `pip install 'crewai[embeddings]'`
|
||||
- If using embedchain or other tools: `pip install 'crewai[tools]'`
|
||||
|
||||
2. **Failed building wheel for tiktoken**
|
||||
- Ensure Rust compiler is installed (see installation steps above)
|
||||
- For Windows: Verify Visual C++ Build Tools are installed
|
||||
- Try upgrading pip: `pip install --upgrade pip`
|
||||
- If issues persist, use a pre-built wheel: `pip install tiktoken --prefer-binary`
|
||||
|
||||
### 2. Setting Up Your Crew with the YAML Configuration
|
||||
|
||||
To create a new CrewAI project, run the following CLI (Command Line Interface) command:
|
||||
@@ -100,7 +149,7 @@ You can now start developing your crew by editing the files in the `src/my_proje
|
||||
|
||||
#### Example of a simple crew with a sequential process:
|
||||
|
||||
Instatiate your crew:
|
||||
Instantiate your crew:
|
||||
|
||||
```shell
|
||||
crewai create crew latest-ai-development
|
||||
@@ -121,7 +170,7 @@ researcher:
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
@@ -205,7 +254,7 @@ class LatestAiDevelopmentCrew():
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
**main.py**
|
||||
@@ -264,13 +313,16 @@ In addition to the sequential process, you can use the hierarchical process, whi
|
||||
|
||||
## Key Features
|
||||
|
||||
- **Role-Based Agent Design**: Customize agents with specific roles, goals, and tools.
|
||||
- **Autonomous Inter-Agent Delegation**: Agents can autonomously delegate tasks and inquire amongst themselves, enhancing problem-solving efficiency.
|
||||
- **Flexible Task Management**: Define tasks with customizable tools and assign them to agents dynamically.
|
||||
- **Processes Driven**: Currently only supports `sequential` task execution and `hierarchical` processes, but more complex processes like consensual and autonomous are being worked on.
|
||||
- **Save output as file**: Save the output of individual tasks as a file, so you can use it later.
|
||||
- **Parse output as Pydantic or Json**: Parse the output of individual tasks as a Pydantic model or as a Json if you want to.
|
||||
- **Works with Open Source Models**: Run your crew using Open AI or open source models refer to the [Connect CrewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring your agents' connections to models, even ones running locally!
|
||||
**Note**: CrewAI is a standalone framework built from the ground up, without dependencies on Langchain or other agent frameworks.
|
||||
|
||||
- **Deep Customization**: Build sophisticated agents with full control over the system - from overriding inner prompts to accessing low-level APIs. Customize roles, goals, tools, and behaviors while maintaining clean abstractions.
|
||||
- **Autonomous Inter-Agent Delegation**: Agents can autonomously delegate tasks and inquire amongst themselves, enabling complex problem-solving in real-world scenarios.
|
||||
- **Flexible Task Management**: Define and customize tasks with granular control, from simple operations to complex multi-step processes.
|
||||
- **Production-Grade Architecture**: Support for both high-level abstractions and low-level customization, with robust error handling and state management.
|
||||
- **Predictable Results**: Ensure consistent, accurate outputs through programmatic guardrails, agent training capabilities, and flow-based execution control. See our [documentation on guardrails](https://docs.crewai.com/how-to/guardrails/) for implementation details.
|
||||
- **Model Flexibility**: Run your crew using OpenAI or open source models with production-ready integrations. See [Connect CrewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) for detailed configuration options.
|
||||
- **Event-Driven Flows**: Build complex, real-world workflows with precise control over execution paths, state management, and conditional logic.
|
||||
- **Process Orchestration**: Achieve any workflow pattern through flows - from simple sequential and hierarchical processes to complex, custom orchestration patterns with conditional branching and parallel execution.
|
||||
|
||||

|
||||
|
||||
@@ -305,6 +357,98 @@ You can test different real life examples of AI crews in the [CrewAI-examples re
|
||||
|
||||
[](https://www.youtube.com/watch?v=e0Uj4yWdaAg "Stock Analysis")
|
||||
|
||||
### Using Crews and Flows Together
|
||||
|
||||
CrewAI's power truly shines when combining Crews with Flows to create sophisticated automation pipelines. Here's how you can orchestrate multiple Crews within a Flow:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start, router
|
||||
from crewai import Crew, Agent, Task
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Define structured state for precise control
|
||||
class MarketState(BaseModel):
|
||||
sentiment: str = "neutral"
|
||||
confidence: float = 0.0
|
||||
recommendations: list = []
|
||||
|
||||
class AdvancedAnalysisFlow(Flow[MarketState]):
|
||||
@start()
|
||||
def fetch_market_data(self):
|
||||
# Demonstrate low-level control with structured state
|
||||
self.state.sentiment = "analyzing"
|
||||
return {"sector": "tech", "timeframe": "1W"} # These parameters match the task description template
|
||||
|
||||
@listen(fetch_market_data)
|
||||
def analyze_with_crew(self, market_data):
|
||||
# Show crew agency through specialized roles
|
||||
analyst = Agent(
|
||||
role="Senior Market Analyst",
|
||||
goal="Conduct deep market analysis with expert insight",
|
||||
backstory="You're a veteran analyst known for identifying subtle market patterns"
|
||||
)
|
||||
researcher = Agent(
|
||||
role="Data Researcher",
|
||||
goal="Gather and validate supporting market data",
|
||||
backstory="You excel at finding and correlating multiple data sources"
|
||||
)
|
||||
|
||||
analysis_task = Task(
|
||||
description="Analyze {sector} sector data for the past {timeframe}",
|
||||
expected_output="Detailed market analysis with confidence score",
|
||||
agent=analyst
|
||||
)
|
||||
research_task = Task(
|
||||
description="Find supporting data to validate the analysis",
|
||||
expected_output="Corroborating evidence and potential contradictions",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
# Demonstrate crew autonomy
|
||||
analysis_crew = Crew(
|
||||
agents=[analyst, researcher],
|
||||
tasks=[analysis_task, research_task],
|
||||
process=Process.sequential,
|
||||
verbose=True
|
||||
)
|
||||
return analysis_crew.kickoff(inputs=market_data) # Pass market_data as named inputs
|
||||
|
||||
@router(analyze_with_crew)
|
||||
def determine_next_steps(self):
|
||||
# Show flow control with conditional routing
|
||||
if self.state.confidence > 0.8:
|
||||
return "high_confidence"
|
||||
elif self.state.confidence > 0.5:
|
||||
return "medium_confidence"
|
||||
return "low_confidence"
|
||||
|
||||
@listen("high_confidence")
|
||||
def execute_strategy(self):
|
||||
# Demonstrate complex decision making
|
||||
strategy_crew = Crew(
|
||||
agents=[
|
||||
Agent(role="Strategy Expert",
|
||||
goal="Develop optimal market strategy")
|
||||
],
|
||||
tasks=[
|
||||
Task(description="Create detailed strategy based on analysis",
|
||||
expected_output="Step-by-step action plan")
|
||||
]
|
||||
)
|
||||
return strategy_crew.kickoff()
|
||||
|
||||
@listen("medium_confidence", "low_confidence")
|
||||
def request_additional_analysis(self):
|
||||
self.state.recommendations.append("Gather more data")
|
||||
return "Additional analysis required"
|
||||
```
|
||||
|
||||
This example demonstrates how to:
|
||||
1. Use Python code for basic data operations
|
||||
2. Create and execute Crews as steps in your workflow
|
||||
3. Use Flow decorators to manage the sequence of operations
|
||||
4. Implement conditional branching based on Crew results
|
||||
|
||||
## Connecting Your Crew to a Model
|
||||
|
||||
CrewAI supports using various LLMs through a variety of connection options. By default your agents will use the OpenAI API when querying the model. However, there are several other ways to allow your agents to connect to models. For example, you can configure your agents to use a local model via the Ollama tool.
|
||||
@@ -313,9 +457,13 @@ Please refer to the [Connect CrewAI to LLMs](https://docs.crewai.com/how-to/LLM-
|
||||
|
||||
## How CrewAI Compares
|
||||
|
||||
**CrewAI's Advantage**: CrewAI is built with production in mind. It offers the flexibility of Autogen's conversational agents and the structured process approach of ChatDev, but without the rigidity. CrewAI's processes are designed to be dynamic and adaptable, fitting seamlessly into both development and production workflows.
|
||||
**CrewAI's Advantage**: CrewAI combines autonomous agent intelligence with precise workflow control through its unique Crews and Flows architecture. The framework excels at both high-level orchestration and low-level customization, enabling complex, production-grade systems with granular control.
|
||||
|
||||
- **Autogen**: While Autogen does good in creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows.
|
||||
- **LangGraph**: While LangGraph provides a foundation for building agent workflows, its approach requires significant boilerplate code and complex state management patterns. The framework's tight coupling with LangChain can limit flexibility when implementing custom agent behaviors or integrating with external systems.
|
||||
|
||||
*P.S. CrewAI demonstrates significant performance advantages over LangGraph, executing 5.76x faster in certain cases like this QA task example ([see comparison](https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks/CrewAI%20Flows%20%26%20Langgraph/QA%20Agent)) while achieving higher evaluation scores with faster completion times in certain coding tasks, like in this example ([detailed analysis](https://github.com/crewAIInc/crewAI-examples/blob/main/Notebooks/CrewAI%20Flows%20%26%20Langgraph/Coding%20Assistant/coding_assistant_eval.ipynb)).*
|
||||
|
||||
- **Autogen**: While Autogen excels at creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows.
|
||||
|
||||
- **ChatDev**: ChatDev introduced the idea of processes into the realm of AI agents, but its implementation is quite rigid. Customizations in ChatDev are limited and not geared towards production environments, which can hinder scalability and flexibility in real-world applications.
|
||||
|
||||
@@ -357,7 +505,7 @@ uv run pytest .
|
||||
### Running static type checks
|
||||
|
||||
```bash
|
||||
uvx mypy
|
||||
uvx mypy src
|
||||
```
|
||||
|
||||
### Packaging
|
||||
@@ -376,7 +524,7 @@ pip install dist/*.tar.gz
|
||||
|
||||
CrewAI uses anonymous telemetry to collect usage data with the main purpose of helping us improve the library by focusing our efforts on the most used features, integrations and tools.
|
||||
|
||||
It's pivotal to understand that **NO data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables, with the exception of the conditions mentioned. When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected to provide deeper insights while respecting user privacy. We don't offer a way to disable it now, but we will in the future.
|
||||
It's pivotal to understand that **NO data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables, with the exception of the conditions mentioned. When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected to provide deeper insights while respecting user privacy. Users can disable telemetry by setting the environment variable OTEL_SDK_DISABLED to true.
|
||||
|
||||
Data collected includes:
|
||||
|
||||
@@ -399,7 +547,7 @@ Data collected includes:
|
||||
- Roles of agents in a crew
|
||||
- Understand high level use cases so we can build better tools, integrations and examples about it
|
||||
- Tools names available
|
||||
- Understand out of the publically available tools, which ones are being used the most so we can improve them
|
||||
- Understand out of the publicly available tools, which ones are being used the most so we can improve them
|
||||
|
||||
Users can opt-in to Further Telemetry, sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews. Enabling `share_crew` results in the collection of detailed crew and task execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns while respecting the user's choice to share.
|
||||
|
||||
@@ -440,5 +588,8 @@ A: CrewAI uses anonymous telemetry to collect usage data for improvement purpose
|
||||
### Q: Where can I find examples of CrewAI in action?
|
||||
A: You can find various real-life examples in the [CrewAI-examples repository](https://github.com/crewAIInc/crewAI-examples), including trip planners, stock analysis tools, and more.
|
||||
|
||||
### Q: What is the difference between Crews and Flows?
|
||||
A: Crews and Flows serve different but complementary purposes in CrewAI. Crews are teams of AI agents working together to accomplish specific tasks through role-based collaboration, delivering accurate and predictable results. Flows, on the other hand, are event-driven workflows that can orchestrate both Crews and regular Python code, allowing you to build complex automation pipelines with secure state management and conditional execution paths.
|
||||
|
||||
### Q: How can I contribute to CrewAI?
|
||||
A: Contributions are welcome! You can fork the repository, create a new branch for your feature, add your improvement, and send a pull request. Check the Contribution section in the README for more details.
|
||||
|
||||
@@ -1,161 +1,345 @@
|
||||
---
|
||||
title: Agents
|
||||
description: What are CrewAI Agents and how to use them.
|
||||
description: Detailed guide on creating and managing agents within the CrewAI framework.
|
||||
icon: robot
|
||||
---
|
||||
|
||||
## What is an agent?
|
||||
## Overview of an Agent
|
||||
|
||||
An agent is an **autonomous unit** programmed to:
|
||||
<ul>
|
||||
<li class='leading-3'>Perform tasks</li>
|
||||
<li class='leading-3'>Make decisions</li>
|
||||
<li class='leading-3'>Communicate with other agents</li>
|
||||
</ul>
|
||||
In the CrewAI framework, an `Agent` is an autonomous unit that can:
|
||||
- Perform specific tasks
|
||||
- Make decisions based on its role and goal
|
||||
- Use tools to accomplish objectives
|
||||
- Communicate and collaborate with other agents
|
||||
- Maintain memory of interactions
|
||||
- Delegate tasks when allowed
|
||||
|
||||
<Tip>
|
||||
Think of an agent as a member of a team, with specific skills and a particular job to do. Agents can have different roles like `Researcher`, `Writer`, or `Customer Support`, each contributing to the overall goal of the crew.
|
||||
Think of an agent as a specialized team member with specific skills, expertise, and responsibilities. For example, a `Researcher` agent might excel at gathering and analyzing information, while a `Writer` agent might be better at creating content.
|
||||
</Tip>
|
||||
|
||||
## Agent attributes
|
||||
## Agent Attributes
|
||||
|
||||
| Attribute | Parameter | Description |
|
||||
| :------------------------- | :--------- | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Role** | `role` | Defines the agent's function within the crew. It determines the kind of tasks the agent is best suited for. |
|
||||
| **Goal** | `goal` | The individual objective that the agent aims to achieve. It guides the agent's decision-making process. |
|
||||
| **Backstory** | `backstory`| Provides context to the agent's role and goal, enriching the interaction and collaboration dynamics. |
|
||||
| **LLM** *(optional)* | `llm` | Represents the language model that will run the agent. It dynamically fetches the model name from the `OPENAI_MODEL_NAME` environment variable, defaulting to "gpt-4" if not specified. |
|
||||
| **Tools** *(optional)* | `tools` | Set of capabilities or functions that the agent can use to perform tasks. Expected to be instances of custom classes compatible with the agent's execution environment. Tools are initialized with a default value of an empty list. |
|
||||
| **Function Calling LLM** *(optional)* | `function_calling_llm` | Specifies the language model that will handle the tool calling for this agent, overriding the crew function calling LLM if passed. Default is `None`. |
|
||||
| **Max Iter** *(optional)* | `max_iter` | Max Iter is the maximum number of iterations the agent can perform before being forced to give its best answer. Default is `25`. |
|
||||
| **Max RPM** *(optional)* | `max_rpm` | Max RPM is the maximum number of requests per minute the agent can perform to avoid rate limits. It's optional and can be left unspecified, with a default value of `None`. |
|
||||
| **Max Execution Time** *(optional)* | `max_execution_time` | Max Execution Time is the maximum execution time for an agent to execute a task. It's optional and can be left unspecified, with a default value of `None`, meaning no max execution time. |
|
||||
| **Verbose** *(optional)* | `verbose` | Setting this to `True` configures the internal logger to provide detailed execution logs, aiding in debugging and monitoring. Default is `False`. |
|
||||
| **Allow Delegation** *(optional)* | `allow_delegation` | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. Default is `False`. |
|
||||
| **Step Callback** *(optional)* | `step_callback` | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback`. |
|
||||
| **Cache** *(optional)* | `cache` | Indicates if the agent should use a cache for tool usage. Default is `True`. |
|
||||
| **System Template** *(optional)* | `system_template` | Specifies the system format for the agent. Default is `None`. |
|
||||
| **Prompt Template** *(optional)* | `prompt_template` | Specifies the prompt format for the agent. Default is `None`. |
|
||||
| **Response Template** *(optional)* | `response_template` | Specifies the response format for the agent. Default is `None`. |
|
||||
| **Allow Code Execution** *(optional)* | `allow_code_execution` | Enable code execution for the agent. Default is `False`. |
|
||||
| **Max Retry Limit** *(optional)* | `max_retry_limit` | Maximum number of retries for an agent to execute a task when an error occurs. Default is `2`. |
|
||||
| **Use System Prompt** *(optional)* | `use_system_prompt` | Adds the ability to not use system prompt (to support o1 models). Default is `True`. |
|
||||
| **Respect Context Window** *(optional)* | `respect_context_window` | Summary strategy to avoid overflowing the context window. Default is `True`. |
|
||||
| **Code Execution Mode** *(optional)* | `code_execution_mode` | Determines the mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution on the host machine). Default is `safe`. |
|
||||
| Attribute | Parameter | Type | Description |
|
||||
| :-------------------------------------- | :----------------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Role** | `role` | `str` | Defines the agent's function and expertise within the crew. |
|
||||
| **Goal** | `goal` | `str` | The individual objective that guides the agent's decision-making. |
|
||||
| **Backstory** | `backstory` | `str` | Provides context and personality to the agent, enriching interactions. |
|
||||
| **LLM** _(optional)_ | `llm` | `Union[str, LLM, Any]` | Language model that powers the agent. Defaults to the model specified in `OPENAI_MODEL_NAME` or "gpt-4". |
|
||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | Capabilities or functions available to the agent. Defaults to an empty list. |
|
||||
| **Function Calling LLM** _(optional)_ | `function_calling_llm` | `Optional[Any]` | Language model for tool calling, overrides crew's LLM if specified. |
|
||||
| **Max Iterations** _(optional)_ | `max_iter` | `int` | Maximum iterations before the agent must provide its best answer. Default is 20. |
|
||||
| **Max RPM** _(optional)_ | `max_rpm` | `Optional[int]` | Maximum requests per minute to avoid rate limits. |
|
||||
| **Max Execution Time** _(optional)_ | `max_execution_time` | `Optional[int]` | Maximum time (in seconds) for task execution. |
|
||||
| **Memory** _(optional)_ | `memory` | `bool` | Whether the agent should maintain memory of interactions. Default is True. |
|
||||
| **Verbose** _(optional)_ | `verbose` | `bool` | Enable detailed execution logs for debugging. Default is False. |
|
||||
| **Allow Delegation** _(optional)_ | `allow_delegation` | `bool` | Allow the agent to delegate tasks to other agents. Default is False. |
|
||||
| **Step Callback** _(optional)_ | `step_callback` | `Optional[Any]` | Function called after each agent step, overrides crew callback. |
|
||||
| **Cache** _(optional)_ | `cache` | `bool` | Enable caching for tool usage. Default is True. |
|
||||
| **System Template** _(optional)_ | `system_template` | `Optional[str]` | Custom system prompt template for the agent. |
|
||||
| **Prompt Template** _(optional)_ | `prompt_template` | `Optional[str]` | Custom prompt template for the agent. |
|
||||
| **Response Template** _(optional)_ | `response_template` | `Optional[str]` | Custom response template for the agent. |
|
||||
| **Allow Code Execution** _(optional)_ | `allow_code_execution` | `Optional[bool]` | Enable code execution for the agent. Default is False. |
|
||||
| **Max Retry Limit** _(optional)_ | `max_retry_limit` | `int` | Maximum number of retries when an error occurs. Default is 2. |
|
||||
| **Respect Context Window** _(optional)_ | `respect_context_window` | `bool` | Keep messages under context window size by summarizing. Default is True. |
|
||||
| **Code Execution Mode** _(optional)_ | `code_execution_mode` | `Literal["safe", "unsafe"]` | Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct). Default is 'safe'. |
|
||||
| **Embedder Config** _(optional)_ | `embedder_config` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
||||
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | `Optional[List[BaseKnowledgeSource]]` | Knowledge sources available to the agent. |
|
||||
| **Use System Prompt** _(optional)_ | `use_system_prompt` | `Optional[bool]` | Whether to use system prompt (for o1 model support). Default is True. |
|
||||
|
||||
## Creating an agent
|
||||
## Creating Agents
|
||||
|
||||
There are two ways to create agents in CrewAI: using **YAML configuration (recommended)** or defining them **directly in code**.
|
||||
|
||||
### YAML Configuration (Recommended)
|
||||
|
||||
Using YAML configuration provides a cleaner, more maintainable way to define agents. We strongly recommend using this approach in your CrewAI projects.
|
||||
|
||||
After creating your CrewAI project as outlined in the [Installation](/installation) section, navigate to the `src/latest_ai_development/config/agents.yaml` file and modify the template to match your requirements.
|
||||
|
||||
<Note>
|
||||
**Agent interaction**: Agents can interact with each other using CrewAI's built-in delegation and communication mechanisms. This allows for dynamic task management and problem-solving within the crew.
|
||||
Variables in your YAML files (like `{topic}`) will be replaced with values from your inputs when running the crew:
|
||||
```python Code
|
||||
crew.kickoff(inputs={'topic': 'AI Agents'})
|
||||
```
|
||||
</Note>
|
||||
|
||||
To create an agent, you would typically initialize an instance of the `Agent` class with the desired properties. Here's a conceptual example including all attributes:
|
||||
Here's an example of how to configure agents using YAML:
|
||||
|
||||
```python Code example
|
||||
```yaml agents.yaml
|
||||
# src/latest_ai_development/config/agents.yaml
|
||||
researcher:
|
||||
role: >
|
||||
{topic} Senior Data Researcher
|
||||
goal: >
|
||||
Uncover cutting-edge developments in {topic}
|
||||
backstory: >
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
goal: >
|
||||
Create detailed reports based on {topic} data analysis and research findings
|
||||
backstory: >
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
```
|
||||
|
||||
To use this YAML configuration in your code, create a crew class that inherits from `CrewBase`:
|
||||
|
||||
```python Code
|
||||
# src/latest_ai_development/crew.py
|
||||
from crewai import Agent, Crew, Process
|
||||
from crewai.project import CrewBase, agent, crew
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class LatestAiDevelopmentCrew():
|
||||
"""LatestAiDevelopment crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True,
|
||||
tools=[SerperDevTool()]
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
<Note>
|
||||
The names you use in your YAML files (`agents.yaml`) should match the method names in your Python code.
|
||||
</Note>
|
||||
|
||||
### Direct Code Definition
|
||||
|
||||
You can create agents directly in code by instantiating the `Agent` class. Here's a comprehensive example showing all available parameters:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
# Create an agent with all available parameters
|
||||
agent = Agent(
|
||||
role='Data Analyst',
|
||||
goal='Extract actionable insights',
|
||||
backstory="""You're a data analyst at a large company.
|
||||
You're responsible for analyzing data and providing insights
|
||||
to the business.
|
||||
You're currently working on a project to analyze the
|
||||
performance of our marketing campaigns.""",
|
||||
tools=[my_tool1, my_tool2], # Optional, defaults to an empty list
|
||||
llm=my_llm, # Optional
|
||||
function_calling_llm=my_llm, # Optional
|
||||
max_iter=15, # Optional
|
||||
max_rpm=None, # Optional
|
||||
max_execution_time=None, # Optional
|
||||
verbose=True, # Optional
|
||||
allow_delegation=False, # Optional
|
||||
step_callback=my_intermediate_step_callback, # Optional
|
||||
cache=True, # Optional
|
||||
system_template=my_system_template, # Optional
|
||||
prompt_template=my_prompt_template, # Optional
|
||||
response_template=my_response_template, # Optional
|
||||
config=my_config, # Optional
|
||||
crew=my_crew, # Optional
|
||||
tools_handler=my_tools_handler, # Optional
|
||||
cache_handler=my_cache_handler, # Optional
|
||||
callbacks=[callback1, callback2], # Optional
|
||||
allow_code_execution=True, # Optional
|
||||
max_retry_limit=2, # Optional
|
||||
use_system_prompt=True, # Optional
|
||||
respect_context_window=True, # Optional
|
||||
code_execution_mode='safe', # Optional, defaults to 'safe'
|
||||
role="Senior Data Scientist",
|
||||
goal="Analyze and interpret complex datasets to provide actionable insights",
|
||||
backstory="With over 10 years of experience in data science and machine learning, "
|
||||
"you excel at finding patterns in complex datasets.",
|
||||
llm="gpt-4", # Default: OPENAI_MODEL_NAME or "gpt-4"
|
||||
function_calling_llm=None, # Optional: Separate LLM for tool calling
|
||||
memory=True, # Default: True
|
||||
verbose=False, # Default: False
|
||||
allow_delegation=False, # Default: False
|
||||
max_iter=20, # Default: 20 iterations
|
||||
max_rpm=None, # Optional: Rate limit for API calls
|
||||
max_execution_time=None, # Optional: Maximum execution time in seconds
|
||||
max_retry_limit=2, # Default: 2 retries on error
|
||||
allow_code_execution=False, # Default: False
|
||||
code_execution_mode="safe", # Default: "safe" (options: "safe", "unsafe")
|
||||
respect_context_window=True, # Default: True
|
||||
use_system_prompt=True, # Default: True
|
||||
tools=[SerperDevTool()], # Optional: List of tools
|
||||
knowledge_sources=None, # Optional: List of knowledge sources
|
||||
embedder_config=None, # Optional: Custom embedder configuration
|
||||
system_template=None, # Optional: Custom system prompt template
|
||||
prompt_template=None, # Optional: Custom prompt template
|
||||
response_template=None, # Optional: Custom response template
|
||||
step_callback=None, # Optional: Callback function for monitoring
|
||||
)
|
||||
```
|
||||
|
||||
## Setting prompt templates
|
||||
Let's break down some key parameter combinations for common use cases:
|
||||
|
||||
Prompt templates are used to format the prompt for the agent. You can use to update the system, regular and response templates for the agent. Here's an example of how to set prompt templates:
|
||||
#### Basic Research Agent
|
||||
```python Code
|
||||
research_agent = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Find and summarize information about specific topics",
|
||||
backstory="You are an experienced researcher with attention to detail",
|
||||
tools=[SerperDevTool()],
|
||||
verbose=True # Enable logging for debugging
|
||||
)
|
||||
```
|
||||
|
||||
```python Code example
|
||||
agent = Agent(
|
||||
role="{topic} specialist",
|
||||
goal="Figure {goal} out",
|
||||
backstory="I am the master of {role}",
|
||||
system_template="""<|start_header_id|>system<|end_header_id|>
|
||||
#### Code Development Agent
|
||||
```python Code
|
||||
dev_agent = Agent(
|
||||
role="Senior Python Developer",
|
||||
goal="Write and debug Python code",
|
||||
backstory="Expert Python developer with 10 years of experience",
|
||||
allow_code_execution=True,
|
||||
code_execution_mode="safe", # Uses Docker for safety
|
||||
max_execution_time=300, # 5-minute timeout
|
||||
max_retry_limit=3 # More retries for complex code tasks
|
||||
)
|
||||
```
|
||||
|
||||
#### Long-Running Analysis Agent
|
||||
```python Code
|
||||
analysis_agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Perform deep analysis of large datasets",
|
||||
backstory="Specialized in big data analysis and pattern recognition",
|
||||
memory=True,
|
||||
respect_context_window=True,
|
||||
max_rpm=10, # Limit API calls
|
||||
function_calling_llm="gpt-4o-mini" # Cheaper model for tool calls
|
||||
)
|
||||
```
|
||||
|
||||
#### Custom Template Agent
|
||||
```python Code
|
||||
custom_agent = Agent(
|
||||
role="Customer Service Representative",
|
||||
goal="Assist customers with their inquiries",
|
||||
backstory="Experienced in customer support with a focus on satisfaction",
|
||||
system_template="""<|start_header_id|>system<|end_header_id|>
|
||||
{{ .System }}<|eot_id|>""",
|
||||
prompt_template="""<|start_header_id|>user<|end_header_id|>
|
||||
prompt_template="""<|start_header_id|>user<|end_header_id|>
|
||||
{{ .Prompt }}<|eot_id|>""",
|
||||
response_template="""<|start_header_id|>assistant<|end_header_id|>
|
||||
response_template="""<|start_header_id|>assistant<|end_header_id|>
|
||||
{{ .Response }}<|eot_id|>""",
|
||||
)
|
||||
```
|
||||
|
||||
## Bring your third-party agents
|
||||
### Parameter Details
|
||||
|
||||
Extend your third-party agents like LlamaIndex, Langchain, Autogen or fully custom agents using the the CrewAI's `BaseAgent` class.
|
||||
#### Critical Parameters
|
||||
- `role`, `goal`, and `backstory` are required and shape the agent's behavior
|
||||
- `llm` determines the language model used (default: OpenAI's GPT-4)
|
||||
|
||||
<Note>
|
||||
**BaseAgent** includes attributes and methods required to integrate with your crews to run and delegate tasks to other agents within your own crew.
|
||||
#### Memory and Context
|
||||
- `memory`: Enable to maintain conversation history
|
||||
- `respect_context_window`: Prevents token limit issues
|
||||
- `knowledge_sources`: Add domain-specific knowledge bases
|
||||
|
||||
#### Execution Control
|
||||
- `max_iter`: Maximum attempts before giving best answer
|
||||
- `max_execution_time`: Timeout in seconds
|
||||
- `max_rpm`: Rate limiting for API calls
|
||||
- `max_retry_limit`: Retries on error
|
||||
|
||||
#### Code Execution
|
||||
- `allow_code_execution`: Must be True to run code
|
||||
- `code_execution_mode`:
|
||||
- `"safe"`: Uses Docker (recommended for production)
|
||||
- `"unsafe"`: Direct execution (use only in trusted environments)
|
||||
|
||||
#### Templates
|
||||
- `system_template`: Defines agent's core behavior
|
||||
- `prompt_template`: Structures input format
|
||||
- `response_template`: Formats agent responses
|
||||
|
||||
<Note>
|
||||
When using custom templates, you can use variables like `{role}`, `{goal}`, and `{input}` in your templates. These will be automatically populated during execution.
|
||||
</Note>
|
||||
|
||||
CrewAI is a universal multi-agent framework that allows for all agents to work together to automate tasks and solve problems.
|
||||
## Agent Tools
|
||||
|
||||
```python Code example
|
||||
from crewai import Agent, Task, Crew
|
||||
from custom_agent import CustomAgent # You need to build and extend your own agent logic with the CrewAI BaseAgent class then import it here.
|
||||
Agents can be equipped with various tools to enhance their capabilities. CrewAI supports tools from:
|
||||
- [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools)
|
||||
- [LangChain Tools](https://python.langchain.com/docs/integrations/tools)
|
||||
|
||||
from langchain.agents import load_tools
|
||||
Here's how to add tools to an agent:
|
||||
|
||||
langchain_tools = load_tools(["google-serper"], llm=llm)
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai_tools import SerperDevTool, WikipediaTools
|
||||
|
||||
agent1 = CustomAgent(
|
||||
role="agent role",
|
||||
goal="who is {input}?",
|
||||
backstory="agent backstory",
|
||||
verbose=True,
|
||||
# Create tools
|
||||
search_tool = SerperDevTool()
|
||||
wiki_tool = WikipediaTools()
|
||||
|
||||
# Add tools to agent
|
||||
researcher = Agent(
|
||||
role="AI Technology Researcher",
|
||||
goal="Research the latest AI developments",
|
||||
tools=[search_tool, wiki_tool],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
expected_output="a short biography of {input}",
|
||||
description="a short biography of {input}",
|
||||
agent=agent1,
|
||||
)
|
||||
|
||||
agent2 = Agent(
|
||||
role="agent role",
|
||||
goal="summarize the short bio for {input} and if needed do more research",
|
||||
backstory="agent backstory",
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
task2 = Task(
|
||||
description="a tldr summary of the short biography",
|
||||
expected_output="5 bullet point summary of the biography",
|
||||
agent=agent2,
|
||||
context=[task1],
|
||||
)
|
||||
|
||||
my_crew = Crew(agents=[agent1, agent2], tasks=[task1, task2])
|
||||
crew = my_crew.kickoff(inputs={"input": "Mark Twain"})
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
## Agent Memory and Context
|
||||
|
||||
Agents are the building blocks of the CrewAI framework. By understanding how to define and interact with agents,
|
||||
you can create sophisticated AI systems that leverage the power of collaborative intelligence. The `code_execution_mode` attribute provides flexibility in how agents execute code, allowing for both secure and direct execution options.
|
||||
Agents can maintain memory of their interactions and use context from previous tasks. This is particularly useful for complex workflows where information needs to be retained across multiple tasks.
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
|
||||
analyst = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze and remember complex data patterns",
|
||||
memory=True, # Enable memory
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
<Note>
|
||||
When `memory` is enabled, the agent will maintain context across multiple interactions, improving its ability to handle complex, multi-step tasks.
|
||||
</Note>
|
||||
|
||||
## Important Considerations and Best Practices
|
||||
|
||||
### Security and Code Execution
|
||||
- When using `allow_code_execution`, be cautious with user input and always validate it
|
||||
- Use `code_execution_mode: "safe"` (Docker) in production environments
|
||||
- Consider setting appropriate `max_execution_time` limits to prevent infinite loops
|
||||
|
||||
### Performance Optimization
|
||||
- Use `respect_context_window: true` to prevent token limit issues
|
||||
- Set appropriate `max_rpm` to avoid rate limiting
|
||||
- Enable `cache: true` to improve performance for repetitive tasks
|
||||
- Adjust `max_iter` and `max_retry_limit` based on task complexity
|
||||
|
||||
### Memory and Context Management
|
||||
- Use `memory: true` for tasks requiring historical context
|
||||
- Leverage `knowledge_sources` for domain-specific information
|
||||
- Configure `embedder_config` when using custom embedding models
|
||||
- Use custom templates (`system_template`, `prompt_template`, `response_template`) for fine-grained control over agent behavior
|
||||
|
||||
### Agent Collaboration
|
||||
- Enable `allow_delegation: true` when agents need to work together
|
||||
- Use `step_callback` to monitor and log agent interactions
|
||||
- Consider using different LLMs for different purposes:
|
||||
- Main `llm` for complex reasoning
|
||||
- `function_calling_llm` for efficient tool usage
|
||||
|
||||
### Model Compatibility
|
||||
- Set `use_system_prompt: false` for older models that don't support system messages
|
||||
- Ensure your chosen `llm` supports the features you need (like function calling)
|
||||
|
||||
## Troubleshooting Common Issues
|
||||
|
||||
1. **Rate Limiting**: If you're hitting API rate limits:
|
||||
- Implement appropriate `max_rpm`
|
||||
- Use caching for repetitive operations
|
||||
- Consider batching requests
|
||||
|
||||
2. **Context Window Errors**: If you're exceeding context limits:
|
||||
- Enable `respect_context_window`
|
||||
- Use more efficient prompts
|
||||
- Clear agent memory periodically
|
||||
|
||||
3. **Code Execution Issues**: If code execution fails:
|
||||
- Verify Docker is installed for safe mode
|
||||
- Check execution permissions
|
||||
- Review code sandbox settings
|
||||
|
||||
4. **Memory Issues**: If agent responses seem inconsistent:
|
||||
- Verify memory is enabled
|
||||
- Check knowledge source configuration
|
||||
- Review conversation history management
|
||||
|
||||
Remember that agents are most effective when configured according to their specific use case. Take time to understand your requirements and adjust these parameters accordingly.
|
||||
|
||||
@@ -28,20 +28,19 @@ crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
||||
|
||||
### 1. Create
|
||||
|
||||
Create a new crew or pipeline.
|
||||
Create a new crew or flow.
|
||||
|
||||
```shell
|
||||
crewai create [OPTIONS] TYPE NAME
|
||||
```
|
||||
|
||||
- `TYPE`: Choose between "crew" or "pipeline"
|
||||
- `NAME`: Name of the crew or pipeline
|
||||
- `--router`: (Optional) Create a pipeline with router functionality
|
||||
- `TYPE`: Choose between "crew" or "flow"
|
||||
- `NAME`: Name of the crew or flow
|
||||
|
||||
Example:
|
||||
```shell
|
||||
crewai create crew my_new_crew
|
||||
crewai create pipeline my_new_pipeline --router
|
||||
crewai create flow my_new_flow
|
||||
```
|
||||
|
||||
### 2. Version
|
||||
@@ -162,6 +161,7 @@ The CLI will initially prompt for API keys for the following services:
|
||||
* Groq
|
||||
* Anthropic
|
||||
* Google Gemini
|
||||
* SambaNova
|
||||
|
||||
When you select a provider, the CLI will prompt you to enter your API key.
|
||||
|
||||
|
||||
@@ -22,7 +22,8 @@ A crew in crewAI represents a collaborative group of agents working together to
|
||||
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
|
||||
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
||||
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). Defaults to `False`. |
|
||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
||||
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||
@@ -31,7 +32,6 @@ A crew in crewAI represents a collaborative group of agents working together to
|
||||
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
||||
| **Output Log File** _(optional)_ | `output_log_file` | Whether you want to have a file with the complete crew output and execution. You can set it using True and it will default to the folder you are currently in and it will be called logs.txt or passing a string with the full path and name of the file. |
|
||||
| **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. |
|
||||
| **Manager Callbacks** _(optional)_ | `manager_callbacks` | `manager_callbacks` takes a list of callback handlers to be executed by the manager agent when a hierarchical process is used. |
|
||||
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
|
||||
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
|
||||
| **Planning LLM** *(optional)* | `planning_llm` | The language model used by the AgentPlanner in a planning process. |
|
||||
@@ -40,6 +40,155 @@ A crew in crewAI represents a collaborative group of agents working together to
|
||||
**Crew Max RPM**: The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents' `max_rpm` settings if you set it.
|
||||
</Tip>
|
||||
|
||||
## Creating Crews
|
||||
|
||||
There are two ways to create crews in CrewAI: using **YAML configuration (recommended)** or defining them **directly in code**.
|
||||
|
||||
### YAML Configuration (Recommended)
|
||||
|
||||
Using YAML configuration provides a cleaner, more maintainable way to define crews and is consistent with how agents and tasks are defined in CrewAI projects.
|
||||
|
||||
After creating your CrewAI project as outlined in the [Installation](/installation) section, you can define your crew in a class that inherits from `CrewBase` and uses decorators to define agents, tasks, and the crew itself.
|
||||
|
||||
#### Example Crew Class with Decorators
|
||||
|
||||
```python code
|
||||
from crewai import Agent, Crew, Task, Process
|
||||
from crewai.project import CrewBase, agent, task, crew, before_kickoff, after_kickoff
|
||||
|
||||
|
||||
@CrewBase
|
||||
class YourCrewName:
|
||||
"""Description of your crew"""
|
||||
|
||||
# Paths to your YAML configuration files
|
||||
# To see an example agent and task defined in YAML, checkout the following:
|
||||
# - Task: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
|
||||
# - Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@before_kickoff
|
||||
def prepare_inputs(self, inputs):
|
||||
# Modify inputs before the crew starts
|
||||
inputs['additional_data'] = "Some extra information"
|
||||
return inputs
|
||||
|
||||
@after_kickoff
|
||||
def process_output(self, output):
|
||||
# Modify output after the crew finishes
|
||||
output.raw += "\nProcessed after kickoff."
|
||||
return output
|
||||
|
||||
@agent
|
||||
def agent_one(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['agent_one'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@agent
|
||||
def agent_two(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['agent_two'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def task_one(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['task_one']
|
||||
)
|
||||
|
||||
@task
|
||||
def task_two(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['task_two']
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically collected by the @agent decorator
|
||||
tasks=self.tasks, # Automatically collected by the @task decorator.
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
<Note>
|
||||
Tasks will be executed in the order they are defined.
|
||||
</Note>
|
||||
|
||||
The `CrewBase` class, along with these decorators, automates the collection of agents and tasks, reducing the need for manual management.
|
||||
|
||||
#### Decorators overview from `annotations.py`
|
||||
|
||||
CrewAI provides several decorators in the `annotations.py` file that are used to mark methods within your crew class for special handling:
|
||||
|
||||
- `@CrewBase`: Marks the class as a crew base class.
|
||||
- `@agent`: Denotes a method that returns an `Agent` object.
|
||||
- `@task`: Denotes a method that returns a `Task` object.
|
||||
- `@crew`: Denotes the method that returns the `Crew` object.
|
||||
- `@before_kickoff`: (Optional) Marks a method to be executed before the crew starts.
|
||||
- `@after_kickoff`: (Optional) Marks a method to be executed after the crew finishes.
|
||||
|
||||
These decorators help in organizing your crew's structure and automatically collecting agents and tasks without manually listing them.
|
||||
|
||||
### Direct Code Definition (Alternative)
|
||||
|
||||
Alternatively, you can define the crew directly in code without using YAML configuration files.
|
||||
|
||||
```python code
|
||||
from crewai import Agent, Crew, Task, Process
|
||||
from crewai_tools import YourCustomTool
|
||||
|
||||
class YourCrewName:
|
||||
def agent_one(self) -> Agent:
|
||||
return Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze data trends in the market",
|
||||
backstory="An experienced data analyst with a background in economics",
|
||||
verbose=True,
|
||||
tools=[YourCustomTool()]
|
||||
)
|
||||
|
||||
def agent_two(self) -> Agent:
|
||||
return Agent(
|
||||
role="Market Researcher",
|
||||
goal="Gather information on market dynamics",
|
||||
backstory="A diligent researcher with a keen eye for detail",
|
||||
verbose=True
|
||||
)
|
||||
|
||||
def task_one(self) -> Task:
|
||||
return Task(
|
||||
description="Collect recent market data and identify trends.",
|
||||
expected_output="A report summarizing key trends in the market.",
|
||||
agent=self.agent_one()
|
||||
)
|
||||
|
||||
def task_two(self) -> Task:
|
||||
return Task(
|
||||
description="Research factors affecting market dynamics.",
|
||||
expected_output="An analysis of factors influencing the market.",
|
||||
agent=self.agent_two()
|
||||
)
|
||||
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=[self.agent_one(), self.agent_two()],
|
||||
tasks=[self.task_one(), self.task_two()],
|
||||
process=Process.sequential,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
In this example:
|
||||
|
||||
- Agents and tasks are defined directly within the class without decorators.
|
||||
- We manually create and manage the list of agents and tasks.
|
||||
- This approach provides more control but can be less maintainable for larger projects.
|
||||
|
||||
## Crew Output
|
||||
|
||||
@@ -187,4 +336,4 @@ Then, to replay from a specific task, use:
|
||||
crewai replay -t <task_id>
|
||||
```
|
||||
|
||||
These commands let you replay from your latest kickoff tasks, still retaining context from previously executed tasks.
|
||||
These commands let you replay from your latest kickoff tasks, still retaining context from previously executed tasks.
|
||||
|
||||
@@ -18,63 +18,60 @@ Flows allow you to create structured, event-driven workflows. They provide a sea
|
||||
|
||||
4. **Flexible Control Flow**: Implement conditional logic, loops, and branching within your workflows.
|
||||
|
||||
5. **Input Flexibility**: Flows can accept inputs to initialize or update their state, with different handling for structured and unstructured state management.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Let's create a simple Flow where you will use OpenAI to generate a random city in one task and then use that city to generate a fun fact in another task.
|
||||
|
||||
### Passing Inputs to Flows
|
||||
```python Code
|
||||
|
||||
Flows can accept inputs to initialize or update their state before execution. The way inputs are handled depends on whether the flow uses structured or unstructured state management.
|
||||
|
||||
#### Structured State Management
|
||||
|
||||
In structured state management, the flow's state is defined using a Pydantic `BaseModel`. Inputs must match the model's schema, and any updates will overwrite the default values.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from pydantic import BaseModel
|
||||
from dotenv import load_dotenv
|
||||
from litellm import completion
|
||||
|
||||
class ExampleState(BaseModel):
|
||||
counter: int = 0
|
||||
message: str = ""
|
||||
|
||||
class StructuredExampleFlow(Flow[ExampleState]):
|
||||
class ExampleFlow(Flow):
|
||||
model = "gpt-4o-mini"
|
||||
|
||||
@start()
|
||||
def first_method(self):
|
||||
# Implementation
|
||||
def generate_city(self):
|
||||
print("Starting flow")
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
flow.kickoff(inputs={"counter": 10})
|
||||
```
|
||||
response = completion(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Return the name of a random city in the world.",
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
In this example, the `counter` is initialized to `10`, while `message` retains its default value.
|
||||
random_city = response["choices"][0]["message"]["content"]
|
||||
print(f"Random City: {random_city}")
|
||||
|
||||
#### Unstructured State Management
|
||||
return random_city
|
||||
|
||||
In unstructured state management, the flow's state is a dictionary. You can pass any dictionary to update the state.
|
||||
@listen(generate_city)
|
||||
def generate_fun_fact(self, random_city):
|
||||
response = completion(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Tell me a fun fact about {random_city}",
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
fun_fact = response["choices"][0]["message"]["content"]
|
||||
return fun_fact
|
||||
|
||||
class UnstructuredExampleFlow(Flow):
|
||||
@start()
|
||||
def first_method(self):
|
||||
# Implementation
|
||||
|
||||
flow = UnstructuredExampleFlow()
|
||||
flow.kickoff(inputs={"counter": 5, "message": "Initial message"})
|
||||
```
|
||||
|
||||
Here, both `counter` and `message` are updated based on the provided inputs.
|
||||
flow = ExampleFlow()
|
||||
result = flow.kickoff()
|
||||
|
||||
**Note:** Ensure that inputs for structured state management adhere to the defined schema to avoid validation errors.
|
||||
|
||||
### Example Flow
|
||||
|
||||
```python
|
||||
# Existing example code
|
||||
print(f"Generated fun fact: {result}")
|
||||
```
|
||||
|
||||
In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task.
|
||||
@@ -97,14 +94,14 @@ The `@listen()` decorator can be used in several ways:
|
||||
|
||||
1. **Listening to a Method by Name**: You can pass the name of the method you want to listen to as a string. When that method completes, the listener method will be triggered.
|
||||
|
||||
```python
|
||||
```python Code
|
||||
@listen("generate_city")
|
||||
def generate_fun_fact(self, random_city):
|
||||
# Implementation
|
||||
```
|
||||
|
||||
2. **Listening to a Method Directly**: You can pass the method itself. When that method completes, the listener method will be triggered.
|
||||
```python
|
||||
```python Code
|
||||
@listen(generate_city)
|
||||
def generate_fun_fact(self, random_city):
|
||||
# Implementation
|
||||
@@ -121,7 +118,7 @@ When you run a Flow, the final output is determined by the last method that comp
|
||||
Here's how you can access the final output:
|
||||
|
||||
<CodeGroup>
|
||||
```python
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
|
||||
class OutputExampleFlow(Flow):
|
||||
@@ -133,17 +130,18 @@ class OutputExampleFlow(Flow):
|
||||
def second_method(self, first_output):
|
||||
return f"Second method received: {first_output}"
|
||||
|
||||
|
||||
flow = OutputExampleFlow()
|
||||
final_output = flow.kickoff()
|
||||
|
||||
print("---- Final Output ----")
|
||||
print(final_output)
|
||||
```
|
||||
````
|
||||
|
||||
```text
|
||||
```text Output
|
||||
---- Final Output ----
|
||||
Second method received: Output from first_method
|
||||
```
|
||||
````
|
||||
|
||||
</CodeGroup>
|
||||
|
||||
@@ -158,7 +156,7 @@ Here's an example of how to update and access the state:
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -186,7 +184,7 @@ print("Final State:")
|
||||
print(flow.state)
|
||||
```
|
||||
|
||||
```text
|
||||
```text Output
|
||||
Final Output: Hello from first_method - updated by second_method
|
||||
Final State:
|
||||
counter=2 message='Hello from first_method - updated by second_method'
|
||||
@@ -210,10 +208,10 @@ allowing developers to choose the approach that best fits their application's ne
|
||||
In unstructured state management, all state is stored in the `state` attribute of the `Flow` class.
|
||||
This approach offers flexibility, enabling developers to add or modify state attributes on the fly without defining a strict schema.
|
||||
|
||||
```python
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
|
||||
class UnstructuredExampleFlow(Flow):
|
||||
class UntructuredExampleFlow(Flow):
|
||||
|
||||
@start()
|
||||
def first_method(self):
|
||||
@@ -232,7 +230,8 @@ class UnstructuredExampleFlow(Flow):
|
||||
|
||||
print(f"State after third_method: {self.state}")
|
||||
|
||||
flow = UnstructuredExampleFlow()
|
||||
|
||||
flow = UntructuredExampleFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
|
||||
@@ -246,14 +245,16 @@ flow.kickoff()
|
||||
Structured state management leverages predefined schemas to ensure consistency and type safety across the workflow.
|
||||
By using models like Pydantic's `BaseModel`, developers can define the exact shape of the state, enabling better validation and auto-completion in development environments.
|
||||
|
||||
```python
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ExampleState(BaseModel):
|
||||
counter: int = 0
|
||||
message: str = ""
|
||||
|
||||
|
||||
class StructuredExampleFlow(Flow[ExampleState]):
|
||||
|
||||
@start()
|
||||
@@ -272,6 +273,7 @@ class StructuredExampleFlow(Flow[ExampleState]):
|
||||
|
||||
print(f"State after third_method: {self.state}")
|
||||
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
@@ -305,7 +307,7 @@ The `or_` function in Flows allows you to listen to multiple methods and trigger
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, listen, or_, start
|
||||
|
||||
class OrExampleFlow(Flow):
|
||||
@@ -322,11 +324,13 @@ class OrExampleFlow(Flow):
|
||||
def logger(self, result):
|
||||
print(f"Logger: {result}")
|
||||
|
||||
|
||||
|
||||
flow = OrExampleFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
|
||||
```text
|
||||
```text Output
|
||||
Logger: Hello from the start method
|
||||
Logger: Hello from the second method
|
||||
```
|
||||
@@ -342,7 +346,7 @@ The `and_` function in Flows allows you to listen to multiple methods and trigge
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python
|
||||
```python Code
|
||||
from crewai.flow.flow import Flow, and_, listen, start
|
||||
|
||||
class AndExampleFlow(Flow):
|
||||
@@ -364,7 +368,7 @@ flow = AndExampleFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
|
||||
```text
|
||||
```text Output
|
||||
---- Logger ----
|
||||
{'greeting': 'Hello from the start method', 'joke': 'What do computers eat? Microchips.'}
|
||||
```
|
||||
@@ -381,7 +385,7 @@ You can specify different routes based on the output of the method, allowing you
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python
|
||||
```python Code
|
||||
import random
|
||||
from crewai.flow.flow import Flow, listen, router, start
|
||||
from pydantic import BaseModel
|
||||
@@ -412,11 +416,12 @@ class RouterFlow(Flow[ExampleState]):
|
||||
def fourth_method(self):
|
||||
print("Fourth method running")
|
||||
|
||||
|
||||
flow = RouterFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
|
||||
```text
|
||||
```text Output
|
||||
Starting the structured flow
|
||||
Third method running
|
||||
Fourth method running
|
||||
@@ -479,7 +484,7 @@ The `main.py` file is where you create your flow and connect the crews together.
|
||||
|
||||
Here's an example of how you can connect the `poem_crew` in the `main.py` file:
|
||||
|
||||
```python
|
||||
```python Code
|
||||
#!/usr/bin/env python
|
||||
from random import randint
|
||||
|
||||
@@ -555,42 +560,6 @@ uv run kickoff
|
||||
|
||||
The flow will execute, and you should see the output in the console.
|
||||
|
||||
|
||||
### Adding Additional Crews Using the CLI
|
||||
|
||||
Once you have created your initial flow, you can easily add additional crews to your project using the CLI. This allows you to expand your flow's capabilities by integrating new crews without starting from scratch.
|
||||
|
||||
To add a new crew to your existing flow, use the following command:
|
||||
|
||||
```bash
|
||||
crewai flow add-crew <crew_name>
|
||||
```
|
||||
|
||||
This command will create a new directory for your crew within the `crews` folder of your flow project. It will include the necessary configuration files and a crew definition file, similar to the initial setup.
|
||||
|
||||
#### Folder Structure
|
||||
|
||||
After adding a new crew, your folder structure will look like this:
|
||||
|
||||
| Directory/File | Description |
|
||||
| :--------------------- | :----------------------------------------------------------------- |
|
||||
| `name_of_flow/` | Root directory for the flow. |
|
||||
| ├── `crews/` | Contains directories for specific crews. |
|
||||
| │ ├── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts. |
|
||||
| │ │ ├── `config/` | Configuration files directory for the "poem_crew". |
|
||||
| │ │ │ ├── `agents.yaml` | YAML file defining the agents for "poem_crew". |
|
||||
| │ │ │ └── `tasks.yaml` | YAML file defining the tasks for "poem_crew". |
|
||||
| │ │ └── `poem_crew.py` | Script for "poem_crew" functionality. |
|
||||
| └── `name_of_crew/` | Directory for the new crew. |
|
||||
| ├── `config/` | Configuration files directory for the new crew. |
|
||||
| │ ├── `agents.yaml` | YAML file defining the agents for the new crew. |
|
||||
| │ └── `tasks.yaml` | YAML file defining the tasks for the new crew. |
|
||||
| └── `name_of_crew.py` | Script for the new crew functionality. |
|
||||
|
||||
You can then customize the `agents.yaml` and `tasks.yaml` files to define the agents and tasks for your new crew. The `name_of_crew.py` file will contain the crew's logic, which you can modify to suit your needs.
|
||||
|
||||
By using the CLI to add additional crews, you can efficiently build complex AI workflows that leverage multiple crews working together.
|
||||
|
||||
## Plot Flows
|
||||
|
||||
Visualizing your AI workflows can provide valuable insights into the structure and execution paths of your flows. CrewAI offers a powerful visualization tool that allows you to generate interactive plots of your flows, making it easier to understand and optimize your AI workflows.
|
||||
@@ -607,7 +576,7 @@ CrewAI provides two convenient methods to generate plots of your flows:
|
||||
|
||||
If you are working directly with a flow instance, you can generate a plot by calling the `plot()` method on your flow object. This method will create an HTML file containing the interactive plot of your flow.
|
||||
|
||||
```python
|
||||
```python Code
|
||||
# Assuming you have a flow instance
|
||||
flow.plot("my_flow_plot")
|
||||
```
|
||||
@@ -630,114 +599,13 @@ The generated plot will display nodes representing the tasks in your flow, with
|
||||
|
||||
By visualizing your flows, you can gain a clearer understanding of the workflow's structure, making it easier to debug, optimize, and communicate your AI processes to others.
|
||||
|
||||
### Conclusion
|
||||
|
||||
## Advanced
|
||||
|
||||
In this section, we explore more complex use cases of CrewAI Flows, starting with a self-evaluation loop. This pattern is crucial for developing AI systems that can iteratively improve their outputs through feedback.
|
||||
|
||||
### 1) Self-Evaluation Loop
|
||||
|
||||
The self-evaluation loop is a powerful pattern that allows AI workflows to automatically assess and refine their outputs. This example demonstrates how to set up a flow that generates content, evaluates it, and iterates based on feedback until the desired quality is achieved.
|
||||
|
||||
#### Overview
|
||||
|
||||
The self-evaluation loop involves two main Crews:
|
||||
|
||||
1. **ShakespeareanXPostCrew**: Generates a Shakespearean-style post on a given topic.
|
||||
2. **XPostReviewCrew**: Evaluates the generated post, providing feedback on its validity and quality.
|
||||
|
||||
The process iterates until the post meets the criteria or a maximum retry limit is reached. This approach ensures high-quality outputs through iterative refinement.
|
||||
|
||||
#### Importance
|
||||
|
||||
This pattern is essential for building robust AI systems that can adapt and improve over time. By automating the evaluation and feedback loop, developers can ensure that their AI workflows produce reliable and high-quality results.
|
||||
|
||||
#### Main Code Highlights
|
||||
|
||||
Below is the `main.py` file for the self-evaluation loop flow:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
from crewai.flow.flow import Flow, listen, router, start
|
||||
from pydantic import BaseModel
|
||||
from self_evaluation_loop_flow.crews.shakespeare_crew.shakespeare_crew import (
|
||||
ShakespeareanXPostCrew,
|
||||
)
|
||||
from self_evaluation_loop_flow.crews.x_post_review_crew.x_post_review_crew import (
|
||||
XPostReviewCrew,
|
||||
)
|
||||
|
||||
class ShakespeareXPostFlowState(BaseModel):
|
||||
x_post: str = ""
|
||||
feedback: Optional[str] = None
|
||||
valid: bool = False
|
||||
retry_count: int = 0
|
||||
|
||||
class ShakespeareXPostFlow(Flow[ShakespeareXPostFlowState]):
|
||||
|
||||
@start("retry")
|
||||
def generate_shakespeare_x_post(self):
|
||||
print("Generating Shakespearean X post")
|
||||
topic = "Flying cars"
|
||||
result = (
|
||||
ShakespeareanXPostCrew()
|
||||
.crew()
|
||||
.kickoff(inputs={"topic": topic, "feedback": self.state.feedback})
|
||||
)
|
||||
print("X post generated", result.raw)
|
||||
self.state.x_post = result.raw
|
||||
|
||||
@router(generate_shakespeare_x_post)
|
||||
def evaluate_x_post(self):
|
||||
if self.state.retry_count > 3:
|
||||
return "max_retry_exceeded"
|
||||
result = XPostReviewCrew().crew().kickoff(inputs={"x_post": self.state.x_post})
|
||||
self.state.valid = result["valid"]
|
||||
self.state.feedback = result["feedback"]
|
||||
print("valid", self.state.valid)
|
||||
print("feedback", self.state.feedback)
|
||||
self.state.retry_count += 1
|
||||
if self.state.valid:
|
||||
return "complete"
|
||||
return "retry"
|
||||
|
||||
@listen("complete")
|
||||
def save_result(self):
|
||||
print("X post is valid")
|
||||
print("X post:", self.state.x_post)
|
||||
with open("x_post.txt", "w") as file:
|
||||
file.write(self.state.x_post)
|
||||
|
||||
@listen("max_retry_exceeded")
|
||||
def max_retry_exceeded_exit(self):
|
||||
print("Max retry count exceeded")
|
||||
print("X post:", self.state.x_post)
|
||||
print("Feedback:", self.state.feedback)
|
||||
|
||||
def kickoff():
|
||||
shakespeare_flow = ShakespeareXPostFlow()
|
||||
shakespeare_flow.kickoff()
|
||||
|
||||
def plot():
|
||||
shakespeare_flow = ShakespeareXPostFlow()
|
||||
shakespeare_flow.plot()
|
||||
|
||||
if __name__ == "__main__":
|
||||
kickoff()
|
||||
```
|
||||
|
||||
#### Code Highlights
|
||||
|
||||
- **Retry Mechanism**: The flow uses a retry mechanism to regenerate the post if it doesn't meet the criteria, up to a maximum of three retries.
|
||||
- **Feedback Loop**: Feedback from the `XPostReviewCrew` is used to refine the post iteratively.
|
||||
- **State Management**: The flow maintains state using a Pydantic model, ensuring type safety and clarity.
|
||||
|
||||
For a complete example and further details, please refer to the [Self Evaluation Loop Flow repository](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow).
|
||||
|
||||
Plotting your flows is a powerful feature of CrewAI that enhances your ability to design and manage complex AI workflows. Whether you choose to use the `plot()` method or the command line, generating plots will provide you with a visual representation of your workflows, aiding in both development and presentation.
|
||||
|
||||
## Next Steps
|
||||
|
||||
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are five specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
|
||||
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are four specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
|
||||
|
||||
1. **Email Auto Responder Flow**: This example demonstrates an infinite loop where a background job continually runs to automate email responses. It's a great use case for tasks that need to be performed repeatedly without manual intervention. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/email_auto_responder_flow)
|
||||
|
||||
@@ -747,8 +615,6 @@ If you're interested in exploring additional examples of flows, we have a variet
|
||||
|
||||
4. **Meeting Assistant Flow**: This flow demonstrates how to broadcast one event to trigger multiple follow-up actions. For instance, after a meeting is completed, the flow can update a Trello board, send a Slack message, and save the results. It's a great example of handling multiple outcomes from a single event, making it ideal for comprehensive task management and notification systems. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/meeting_assistant_flow)
|
||||
|
||||
5. **Self Evaluation Loop Flow**: This flow demonstrates a self-evaluation loop where AI workflows automatically assess and refine their outputs through feedback. It involves generating content, evaluating it, and iterating until the desired quality is achieved. This pattern is crucial for developing robust AI systems that can adapt and improve over time. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow)
|
||||
|
||||
By exploring these examples, you can gain insights into how to leverage CrewAI Flows for various use cases, from automating repetitive tasks to managing complex, multi-step processes with dynamic decision-making and human feedback.
|
||||
|
||||
Also, check out our YouTube video on how to use flows in CrewAI below!
|
||||
@@ -762,4 +628,4 @@ Also, check out our YouTube video on how to use flows in CrewAI below!
|
||||
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
||||
referrerpolicy="strict-origin-when-cross-origin"
|
||||
allowfullscreen
|
||||
></iframe>
|
||||
></iframe>
|
||||
610
docs/concepts/knowledge.mdx
Normal file
610
docs/concepts/knowledge.mdx
Normal file
@@ -0,0 +1,610 @@
|
||||
---
|
||||
title: Knowledge
|
||||
description: What is knowledge in CrewAI and how to use it.
|
||||
icon: book
|
||||
---
|
||||
|
||||
## What is Knowledge?
|
||||
|
||||
Knowledge in CrewAI is a powerful system that allows AI agents to access and utilize external information sources during their tasks.
|
||||
Think of it as giving your agents a reference library they can consult while working.
|
||||
|
||||
<Info>
|
||||
Key benefits of using Knowledge:
|
||||
- Enhance agents with domain-specific information
|
||||
- Support decisions with real-world data
|
||||
- Maintain context across conversations
|
||||
- Ground responses in factual information
|
||||
</Info>
|
||||
|
||||
## Supported Knowledge Sources
|
||||
|
||||
CrewAI supports various types of knowledge sources out of the box:
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Text Sources" icon="text">
|
||||
- Raw strings
|
||||
- Text files (.txt)
|
||||
- PDF documents
|
||||
</Card>
|
||||
<Card title="Structured Data" icon="table">
|
||||
- CSV files
|
||||
- Excel spreadsheets
|
||||
- JSON documents
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Supported Knowledge Parameters
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| :--------------------------- | :---------------------------------- | :------- | :---------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `sources` | **List[BaseKnowledgeSource]** | Yes | List of knowledge sources that provide content to be stored and queried. Can include PDF, CSV, Excel, JSON, text files, or string content. |
|
||||
| `collection_name` | **str** | No | Name of the collection where the knowledge will be stored. Used to identify different sets of knowledge. Defaults to "knowledge" if not provided. |
|
||||
| `storage` | **Optional[KnowledgeStorage]** | No | Custom storage configuration for managing how the knowledge is stored and retrieved. If not provided, a default storage will be created. |
|
||||
|
||||
## Quickstart Example
|
||||
|
||||
<Tip>
|
||||
For file-Based Knowledge Sources, make sure to place your files in a `knowledge` directory at the root of your project.
|
||||
Also, use relative paths from the `knowledge` directory when creating the source.
|
||||
</Tip>
|
||||
|
||||
Here's an example using string-based knowledge:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew, Process, LLM
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
|
||||
# Create a knowledge source
|
||||
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||
string_source = StringKnowledgeSource(
|
||||
content=content,
|
||||
)
|
||||
|
||||
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
|
||||
# Create an agent with the knowledge store
|
||||
agent = Agent(
|
||||
role="About User",
|
||||
goal="You know everything about the user.",
|
||||
backstory="""You are a master at understanding people and their preferences.""",
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
llm=llm,
|
||||
)
|
||||
task = Task(
|
||||
description="Answer the following questions about the user: {question}",
|
||||
expected_output="An answer to the question.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
knowledge_sources=[string_source], # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
|
||||
)
|
||||
|
||||
result = crew.kickoff(inputs={"question": "What city does John live in and how old is he?"})
|
||||
```
|
||||
|
||||
|
||||
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including TXT, PDF, DOCX, HTML, and more.
|
||||
|
||||
```python Code
|
||||
from crewai import LLM, Agent, Crew, Process, Task
|
||||
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||
|
||||
# Create a knowledge source
|
||||
content_source = CrewDoclingSource(
|
||||
file_paths=[
|
||||
"https://lilianweng.github.io/posts/2024-11-28-reward-hacking",
|
||||
"https://lilianweng.github.io/posts/2024-07-07-hallucination",
|
||||
],
|
||||
)
|
||||
|
||||
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
||||
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||
|
||||
# Create an agent with the knowledge store
|
||||
agent = Agent(
|
||||
role="About papers",
|
||||
goal="You know everything about the papers.",
|
||||
backstory="""You are a master at understanding papers and their content.""",
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
llm=llm,
|
||||
)
|
||||
task = Task(
|
||||
description="Answer the following questions about the papers: {question}",
|
||||
expected_output="An answer to the question.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
knowledge_sources=[
|
||||
content_source
|
||||
], # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
|
||||
)
|
||||
|
||||
result = crew.kickoff(
|
||||
inputs={
|
||||
"question": "What is the reward hacking paper about? Be sure to provide sources."
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
## More Examples
|
||||
|
||||
Here are examples of how to use different types of knowledge sources:
|
||||
|
||||
### Text File Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||
|
||||
# Create a text file knowledge source
|
||||
text_source = CrewDoclingSource(
|
||||
file_paths=["document.txt", "another.txt"]
|
||||
)
|
||||
|
||||
# Create crew with text file source on agents or crew level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[text_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[text_source]
|
||||
)
|
||||
```
|
||||
|
||||
### PDF Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.pdf_knowledge_source import PDFKnowledgeSource
|
||||
|
||||
# Create a PDF knowledge source
|
||||
pdf_source = PDFKnowledgeSource(
|
||||
file_paths=["document.pdf", "another.pdf"]
|
||||
)
|
||||
|
||||
# Create crew with PDF knowledge source on agents or crew level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[pdf_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[pdf_source]
|
||||
)
|
||||
```
|
||||
|
||||
### CSV Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.csv_knowledge_source import CSVKnowledgeSource
|
||||
|
||||
# Create a CSV knowledge source
|
||||
csv_source = CSVKnowledgeSource(
|
||||
file_paths=["data.csv"]
|
||||
)
|
||||
|
||||
# Create crew with CSV knowledge source or on agent level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[csv_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[csv_source]
|
||||
)
|
||||
```
|
||||
|
||||
### Excel Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.excel_knowledge_source import ExcelKnowledgeSource
|
||||
|
||||
# Create an Excel knowledge source
|
||||
excel_source = ExcelKnowledgeSource(
|
||||
file_paths=["spreadsheet.xlsx"]
|
||||
)
|
||||
|
||||
# Create crew with Excel knowledge source on agents or crew level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[excel_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[excel_source]
|
||||
)
|
||||
```
|
||||
|
||||
### JSON Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.json_knowledge_source import JSONKnowledgeSource
|
||||
|
||||
# Create a JSON knowledge source
|
||||
json_source = JSONKnowledgeSource(
|
||||
file_paths=["data.json"]
|
||||
)
|
||||
|
||||
# Create crew with JSON knowledge source on agents or crew level
|
||||
agent = Agent(
|
||||
...
|
||||
knowledge_sources=[json_source]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge_sources=[json_source]
|
||||
)
|
||||
```
|
||||
|
||||
## Knowledge Configuration
|
||||
|
||||
### Chunking Configuration
|
||||
|
||||
Knowledge sources automatically chunk content for better processing.
|
||||
You can configure chunking behavior in your knowledge sources:
|
||||
|
||||
```python
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
|
||||
source = StringKnowledgeSource(
|
||||
content="Your content here",
|
||||
chunk_size=4000, # Maximum size of each chunk (default: 4000)
|
||||
chunk_overlap=200 # Overlap between chunks (default: 200)
|
||||
)
|
||||
```
|
||||
|
||||
The chunking configuration helps in:
|
||||
- Breaking down large documents into manageable pieces
|
||||
- Maintaining context through chunk overlap
|
||||
- Optimizing retrieval accuracy
|
||||
|
||||
### Embeddings Configuration
|
||||
|
||||
You can also configure the embedder for the knowledge store.
|
||||
This is useful if you want to use a different embedder for the knowledge store than the one used for the agents.
|
||||
The `embedder` parameter supports various embedding model providers that include:
|
||||
- `openai`: OpenAI's embedding models
|
||||
- `google`: Google's text embedding models
|
||||
- `azure`: Azure OpenAI embeddings
|
||||
- `ollama`: Local embeddings with Ollama
|
||||
- `vertexai`: Google Cloud VertexAI embeddings
|
||||
- `cohere`: Cohere's embedding models
|
||||
- `bedrock`: AWS Bedrock embeddings
|
||||
- `huggingface`: Hugging Face models
|
||||
- `watson`: IBM Watson embeddings
|
||||
|
||||
Here's an example of how to configure the embedder for the knowledge store using Google's `text-embedding-004` model:
|
||||
<CodeGroup>
|
||||
```python Example
|
||||
from crewai import Agent, Task, Crew, Process, LLM
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
import os
|
||||
|
||||
# Get the GEMINI API key
|
||||
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
|
||||
|
||||
# Create a knowledge source
|
||||
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||
string_source = StringKnowledgeSource(
|
||||
content=content,
|
||||
)
|
||||
|
||||
# Create an LLM with a temperature of 0 to ensure deterministic outputs
|
||||
gemini_llm = LLM(
|
||||
model="gemini/gemini-1.5-pro-002",
|
||||
api_key=GEMINI_API_KEY,
|
||||
temperature=0,
|
||||
)
|
||||
|
||||
# Create an agent with the knowledge store
|
||||
agent = Agent(
|
||||
role="About User",
|
||||
goal="You know everything about the user.",
|
||||
backstory="""You are a master at understanding people and their preferences.""",
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
llm=gemini_llm,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Answer the following questions about the user: {question}",
|
||||
expected_output="An answer to the question.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
knowledge_sources=[string_source],
|
||||
embedder={
|
||||
"provider": "google",
|
||||
"config": {
|
||||
"model": "models/text-embedding-004",
|
||||
"api_key": GEMINI_API_KEY,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
result = crew.kickoff(inputs={"question": "What city does John live in and how old is he?"})
|
||||
```
|
||||
```text Output
|
||||
# Agent: About User
|
||||
## Task: Answer the following questions about the user: What city does John live in and how old is he?
|
||||
|
||||
# Agent: About User
|
||||
## Final Answer:
|
||||
John is 30 years old and lives in San Francisco.
|
||||
```
|
||||
</CodeGroup>
|
||||
## Clearing Knowledge
|
||||
|
||||
If you need to clear the knowledge stored in CrewAI, you can use the `crewai reset-memories` command with the `--knowledge` option.
|
||||
|
||||
```bash Command
|
||||
crewai reset-memories --knowledge
|
||||
```
|
||||
|
||||
This is useful when you've updated your knowledge sources and want to ensure that the agents are using the most recent information.
|
||||
|
||||
## Agent-Specific Knowledge
|
||||
|
||||
While knowledge can be provided at the crew level using `crew.knowledge_sources`, individual agents can also have their own knowledge sources using the `knowledge_sources` parameter:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
|
||||
# Create agent-specific knowledge about a product
|
||||
product_specs = StringKnowledgeSource(
|
||||
content="""The XPS 13 laptop features:
|
||||
- 13.4-inch 4K display
|
||||
- Intel Core i7 processor
|
||||
- 16GB RAM
|
||||
- 512GB SSD storage
|
||||
- 12-hour battery life""",
|
||||
metadata={"category": "product_specs"}
|
||||
)
|
||||
|
||||
# Create a support agent with product knowledge
|
||||
support_agent = Agent(
|
||||
role="Technical Support Specialist",
|
||||
goal="Provide accurate product information and support.",
|
||||
backstory="You are an expert on our laptop products and specifications.",
|
||||
knowledge_sources=[product_specs] # Agent-specific knowledge
|
||||
)
|
||||
|
||||
# Create a task that requires product knowledge
|
||||
support_task = Task(
|
||||
description="Answer this customer question: {question}",
|
||||
agent=support_agent
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(
|
||||
agents=[support_agent],
|
||||
tasks=[support_task]
|
||||
)
|
||||
|
||||
# Get answer about the laptop's specifications
|
||||
result = crew.kickoff(
|
||||
inputs={"question": "What is the storage capacity of the XPS 13?"}
|
||||
)
|
||||
```
|
||||
|
||||
<Info>
|
||||
Benefits of agent-specific knowledge:
|
||||
- Give agents specialized information for their roles
|
||||
- Maintain separation of concerns between agents
|
||||
- Combine with crew-level knowledge for layered information access
|
||||
</Info>
|
||||
|
||||
## Custom Knowledge Sources
|
||||
|
||||
CrewAI allows you to create custom knowledge sources for any type of data by extending the `BaseKnowledgeSource` class. Let's create a practical example that fetches and processes space news articles.
|
||||
|
||||
#### Space News Knowledge Source Example
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew, Process, LLM
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class SpaceNewsKnowledgeSource(BaseKnowledgeSource):
|
||||
"""Knowledge source that fetches data from Space News API."""
|
||||
|
||||
api_endpoint: str = Field(description="API endpoint URL")
|
||||
limit: int = Field(default=10, description="Number of articles to fetch")
|
||||
|
||||
def load_content(self) -> Dict[Any, str]:
|
||||
"""Fetch and format space news articles."""
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{self.api_endpoint}?limit={self.limit}"
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
data = response.json()
|
||||
articles = data.get('results', [])
|
||||
|
||||
formatted_data = self._format_articles(articles)
|
||||
return {self.api_endpoint: formatted_data}
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to fetch space news: {str(e)}")
|
||||
|
||||
def _format_articles(self, articles: list) -> str:
|
||||
"""Format articles into readable text."""
|
||||
formatted = "Space News Articles:\n\n"
|
||||
for article in articles:
|
||||
formatted += f"""
|
||||
Title: {article['title']}
|
||||
Published: {article['published_at']}
|
||||
Summary: {article['summary']}
|
||||
News Site: {article['news_site']}
|
||||
URL: {article['url']}
|
||||
-------------------"""
|
||||
return formatted
|
||||
|
||||
def add(self) -> None:
|
||||
"""Process and store the articles."""
|
||||
content = self.load_content()
|
||||
for _, text in content.items():
|
||||
chunks = self._chunk_text(text)
|
||||
self.chunks.extend(chunks)
|
||||
|
||||
self._save_documents()
|
||||
|
||||
# Create knowledge source
|
||||
recent_news = SpaceNewsKnowledgeSource(
|
||||
api_endpoint="https://api.spaceflightnewsapi.net/v4/articles",
|
||||
limit=10,
|
||||
)
|
||||
|
||||
# Create specialized agent
|
||||
space_analyst = Agent(
|
||||
role="Space News Analyst",
|
||||
goal="Answer questions about space news accurately and comprehensively",
|
||||
backstory="""You are a space industry analyst with expertise in space exploration,
|
||||
satellite technology, and space industry trends. You excel at answering questions
|
||||
about space news and providing detailed, accurate information.""",
|
||||
knowledge_sources=[recent_news],
|
||||
llm=LLM(model="gpt-4", temperature=0.0)
|
||||
)
|
||||
|
||||
# Create task that handles user questions
|
||||
analysis_task = Task(
|
||||
description="Answer this question about space news: {user_question}",
|
||||
expected_output="A detailed answer based on the recent space news articles",
|
||||
agent=space_analyst
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(
|
||||
agents=[space_analyst],
|
||||
tasks=[analysis_task],
|
||||
verbose=True,
|
||||
process=Process.sequential
|
||||
)
|
||||
|
||||
# Example usage
|
||||
result = crew.kickoff(
|
||||
inputs={"user_question": "What are the latest developments in space exploration?"}
|
||||
)
|
||||
```
|
||||
|
||||
```output Output
|
||||
# Agent: Space News Analyst
|
||||
## Task: Answer this question about space news: What are the latest developments in space exploration?
|
||||
|
||||
|
||||
# Agent: Space News Analyst
|
||||
## Final Answer:
|
||||
The latest developments in space exploration, based on recent space news articles, include the following:
|
||||
|
||||
1. SpaceX has received the final regulatory approvals to proceed with the second integrated Starship/Super Heavy launch, scheduled for as soon as the morning of Nov. 17, 2023. This is a significant step in SpaceX's ambitious plans for space exploration and colonization. [Source: SpaceNews](https://spacenews.com/starship-cleared-for-nov-17-launch/)
|
||||
|
||||
2. SpaceX has also informed the US Federal Communications Commission (FCC) that it plans to begin launching its first next-generation Starlink Gen2 satellites. This represents a major upgrade to the Starlink satellite internet service, which aims to provide high-speed internet access worldwide. [Source: Teslarati](https://www.teslarati.com/spacex-first-starlink-gen2-satellite-launch-2022/)
|
||||
|
||||
3. AI startup Synthetaic has raised $15 million in Series B funding. The company uses artificial intelligence to analyze data from space and air sensors, which could have significant applications in space exploration and satellite technology. [Source: SpaceNews](https://spacenews.com/ai-startup-synthetaic-raises-15-million-in-series-b-funding/)
|
||||
|
||||
4. The Space Force has formally established a unit within the U.S. Indo-Pacific Command, marking a permanent presence in the Indo-Pacific region. This could have significant implications for space security and geopolitics. [Source: SpaceNews](https://spacenews.com/space-force-establishes-permanent-presence-in-indo-pacific-region/)
|
||||
|
||||
5. Slingshot Aerospace, a space tracking and data analytics company, is expanding its network of ground-based optical telescopes to increase coverage of low Earth orbit. This could improve our ability to track and analyze objects in low Earth orbit, including satellites and space debris. [Source: SpaceNews](https://spacenews.com/slingshots-space-tracking-network-to-extend-coverage-of-low-earth-orbit/)
|
||||
|
||||
6. The National Natural Science Foundation of China has outlined a five-year project for researchers to study the assembly of ultra-large spacecraft. This could lead to significant advancements in spacecraft technology and space exploration capabilities. [Source: SpaceNews](https://spacenews.com/china-researching-challenges-of-kilometer-scale-ultra-large-spacecraft/)
|
||||
|
||||
7. The Center for AEroSpace Autonomy Research (CAESAR) at Stanford University is focusing on spacecraft autonomy. The center held a kickoff event on May 22, 2024, to highlight the industry, academia, and government collaboration it seeks to foster. This could lead to significant advancements in autonomous spacecraft technology. [Source: SpaceNews](https://spacenews.com/stanford-center-focuses-on-spacecraft-autonomy/)
|
||||
```
|
||||
|
||||
</CodeGroup>
|
||||
#### Key Components Explained
|
||||
|
||||
1. **Custom Knowledge Source (`SpaceNewsKnowledgeSource`)**:
|
||||
|
||||
- Extends `BaseKnowledgeSource` for integration with CrewAI
|
||||
- Configurable API endpoint and article limit
|
||||
- Implements three key methods:
|
||||
- `load_content()`: Fetches articles from the API
|
||||
- `_format_articles()`: Structures the articles into readable text
|
||||
- `add()`: Processes and stores the content
|
||||
|
||||
2. **Agent Configuration**:
|
||||
|
||||
- Specialized role as a Space News Analyst
|
||||
- Uses the knowledge source to access space news
|
||||
|
||||
3. **Task Setup**:
|
||||
|
||||
- Takes a user question as input through `{user_question}`
|
||||
- Designed to provide detailed answers based on the knowledge source
|
||||
|
||||
4. **Crew Orchestration**:
|
||||
- Manages the workflow between agent and task
|
||||
- Handles input/output through the kickoff method
|
||||
|
||||
This example demonstrates how to:
|
||||
|
||||
- Create a custom knowledge source that fetches real-time data
|
||||
- Process and format external data for AI consumption
|
||||
- Use the knowledge source to answer specific user questions
|
||||
- Integrate everything seamlessly with CrewAI's agent system
|
||||
|
||||
#### About the Spaceflight News API
|
||||
|
||||
The example uses the [Spaceflight News API](https://api.spaceflightnewsapi.net/v4/docs/), which:
|
||||
|
||||
- Provides free access to space-related news articles
|
||||
- Requires no authentication
|
||||
- Returns structured data about space news
|
||||
- Supports pagination and filtering
|
||||
|
||||
You can customize the API query by modifying the endpoint URL:
|
||||
|
||||
```python
|
||||
# Fetch more articles
|
||||
recent_news = SpaceNewsKnowledgeSource(
|
||||
api_endpoint="https://api.spaceflightnewsapi.net/v4/articles",
|
||||
limit=20, # Increase the number of articles
|
||||
)
|
||||
|
||||
# Add search parameters
|
||||
recent_news = SpaceNewsKnowledgeSource(
|
||||
api_endpoint="https://api.spaceflightnewsapi.net/v4/articles?search=NASA", # Search for NASA news
|
||||
limit=10,
|
||||
)
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
<AccordionGroup>
|
||||
<Accordion title="Content Organization">
|
||||
- Keep chunk sizes appropriate for your content type
|
||||
- Consider content overlap for context preservation
|
||||
- Organize related information into separate knowledge sources
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Performance Tips">
|
||||
- Adjust chunk sizes based on content complexity
|
||||
- Configure appropriate embedding models
|
||||
- Consider using local embedding providers for faster processing
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
@@ -7,32 +7,45 @@ icon: link
|
||||
## Using LangChain Tools
|
||||
|
||||
<Info>
|
||||
CrewAI seamlessly integrates with LangChain’s comprehensive [list of tools](https://python.langchain.com/docs/integrations/tools/), all of which can be used with CrewAI.
|
||||
CrewAI seamlessly integrates with LangChain's comprehensive [list of tools](https://python.langchain.com/docs/integrations/tools/), all of which can be used with CrewAI.
|
||||
</Info>
|
||||
|
||||
```python Code
|
||||
import os
|
||||
from crewai import Agent
|
||||
from langchain.agents import Tool
|
||||
from langchain.utilities import GoogleSerperAPIWrapper
|
||||
from dotenv import load_dotenv
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import Field
|
||||
from langchain_community.utilities import GoogleSerperAPIWrapper
|
||||
|
||||
# Setup API keys
|
||||
os.environ["SERPER_API_KEY"] = "Your Key"
|
||||
# Set up your SERPER_API_KEY key in an .env file, eg:
|
||||
# SERPER_API_KEY=<your api key>
|
||||
load_dotenv()
|
||||
|
||||
search = GoogleSerperAPIWrapper()
|
||||
|
||||
# Create and assign the search tool to an agent
|
||||
serper_tool = Tool(
|
||||
name="Intermediate Answer",
|
||||
func=search.run,
|
||||
description="Useful for search-based queries",
|
||||
)
|
||||
class SearchTool(BaseTool):
|
||||
name: str = "Search"
|
||||
description: str = "Useful for search-based queries. Use this to find current information about markets, companies, and trends."
|
||||
search: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper)
|
||||
|
||||
agent = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Provide up-to-date market analysis',
|
||||
backstory='An expert analyst with a keen eye for market trends.',
|
||||
tools=[serper_tool]
|
||||
def _run(self, query: str) -> str:
|
||||
"""Execute the search query and return results"""
|
||||
try:
|
||||
return self.search.run(query)
|
||||
except Exception as e:
|
||||
return f"Error performing search: {str(e)}"
|
||||
|
||||
# Create Agents
|
||||
researcher = Agent(
|
||||
role='Research Analyst',
|
||||
goal='Gather current market data and trends',
|
||||
backstory="""You are an expert research analyst with years of experience in
|
||||
gathering market intelligence. You're known for your ability to find
|
||||
relevant and up-to-date market information and present it in a clear,
|
||||
actionable format.""",
|
||||
tools=[SearchTool()],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# rest of the code ...
|
||||
@@ -40,6 +53,6 @@ agent = Agent(
|
||||
|
||||
## Conclusion
|
||||
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
|
||||
When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling, caching mechanisms,
|
||||
and the flexibility of tool arguments to optimize your agents' performance and capabilities.
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
|
||||
When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling, caching mechanisms,
|
||||
and the flexibility of tool arguments to optimize your agents' performance and capabilities.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,6 +18,7 @@ reason, and learn from past interactions.
|
||||
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
|
||||
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
|
||||
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
|
||||
| **User Memory** | Stores user-specific information and preferences, enhancing personalization and user experience. |
|
||||
|
||||
## How Memory Systems Empower Agents
|
||||
|
||||
@@ -92,6 +93,64 @@ my_crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
## Integrating Mem0 for Enhanced User Memory
|
||||
|
||||
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
||||
|
||||
To include user-specific memory you can get your API key [here](https://app.mem0.ai/dashboard/api-keys) and refer the [docs](https://docs.mem0.ai/platform/quickstart#4-1-create-memories) for adding user preferences.
|
||||
|
||||
|
||||
```python Code
|
||||
import os
|
||||
from crewai import Crew, Process
|
||||
from mem0 import MemoryClient
|
||||
|
||||
# Set environment variables for Mem0
|
||||
os.environ["MEM0_API_KEY"] = "m0-xx"
|
||||
|
||||
# Step 1: Record preferences based on past conversation or user input
|
||||
client = MemoryClient()
|
||||
messages = [
|
||||
{"role": "user", "content": "Hi there! I'm planning a vacation and could use some advice."},
|
||||
{"role": "assistant", "content": "Hello! I'd be happy to help with your vacation planning. What kind of destination do you prefer?"},
|
||||
{"role": "user", "content": "I am more of a beach person than a mountain person."},
|
||||
{"role": "assistant", "content": "That's interesting. Do you like hotels or Airbnb?"},
|
||||
{"role": "user", "content": "I like Airbnb more."},
|
||||
]
|
||||
client.add(messages, user_id="john")
|
||||
|
||||
# Step 2: Create a Crew with User Memory
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "john"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
## Memory Configuration Options
|
||||
If you want to access a specific organization and project, you can set the `org_id` and `project_id` parameters in the memory configuration.
|
||||
|
||||
```python Code
|
||||
from crewai import Crew
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
verbose=True,
|
||||
memory=True,
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "john", "org_id": "my_org_id", "project_id": "my_project_id"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
## Additional Embedding Providers
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@ From this point on, your crew will have planning enabled, and the tasks will be
|
||||
|
||||
#### Planning LLM
|
||||
|
||||
Now you can define the LLM that will be used to plan the tasks. You can use any ChatOpenAI LLM model available.
|
||||
Now you can define the LLM that will be used to plan the tasks.
|
||||
|
||||
When running the base case example, you will see something like the output below, which represents the output of the `AgentPlanner`
|
||||
responsible for creating the step-by-step logic to add to the Agents' tasks.
|
||||
@@ -39,7 +39,6 @@ responsible for creating the step-by-step logic to add to the Agents' tasks.
|
||||
<CodeGroup>
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
# Assemble your crew with planning capabilities and custom LLM
|
||||
my_crew = Crew(
|
||||
@@ -47,7 +46,7 @@ my_crew = Crew(
|
||||
tasks=self.tasks,
|
||||
process=Process.sequential,
|
||||
planning=True,
|
||||
planning_llm=ChatOpenAI(model="gpt-4o")
|
||||
planning_llm="gpt-4o"
|
||||
)
|
||||
|
||||
# Run the crew
|
||||
|
||||
@@ -23,9 +23,7 @@ Processes enable individual agents to operate as a cohesive unit, streamlining t
|
||||
To assign a process to a crew, specify the process type upon crew creation to set the execution strategy. For a hierarchical process, ensure to define `manager_llm` or `manager_agent` for the manager agent.
|
||||
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.process import Process
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai import Crew, Process
|
||||
|
||||
# Example: Creating a crew with a sequential process
|
||||
crew = Crew(
|
||||
@@ -40,7 +38,7 @@ crew = Crew(
|
||||
agents=my_agents,
|
||||
tasks=my_tasks,
|
||||
process=Process.hierarchical,
|
||||
manager_llm=ChatOpenAI(model="gpt-4")
|
||||
manager_llm="gpt-4o"
|
||||
# or
|
||||
# manager_agent=my_manager_agent
|
||||
)
|
||||
|
||||
@@ -1,48 +1,178 @@
|
||||
---
|
||||
title: Tasks
|
||||
description: Detailed guide on managing and creating tasks within the CrewAI framework, reflecting the latest codebase updates.
|
||||
description: Detailed guide on managing and creating tasks within the CrewAI framework.
|
||||
icon: list-check
|
||||
---
|
||||
|
||||
## Overview of a Task
|
||||
|
||||
In the CrewAI framework, a `Task` is a specific assignment completed by an `Agent`.
|
||||
|
||||
They provide all necessary details for execution, such as a description, the agent responsible, required tools, and more, facilitating a wide range of action complexities.
|
||||
In the CrewAI framework, a `Task` is a specific assignment completed by an `Agent`.
|
||||
|
||||
Tasks provide all necessary details for execution, such as a description, the agent responsible, required tools, and more, facilitating a wide range of action complexities.
|
||||
|
||||
Tasks within CrewAI can be collaborative, requiring multiple agents to work together. This is managed through the task properties and orchestrated by the Crew's process, enhancing teamwork and efficiency.
|
||||
|
||||
### Task Execution Flow
|
||||
|
||||
Tasks can be executed in two ways:
|
||||
- **Sequential**: Tasks are executed in the order they are defined
|
||||
- **Hierarchical**: Tasks are assigned to agents based on their roles and expertise
|
||||
|
||||
The execution flow is defined when creating the crew:
|
||||
```python Code
|
||||
crew = Crew(
|
||||
agents=[agent1, agent2],
|
||||
tasks=[task1, task2],
|
||||
process=Process.sequential # or Process.hierarchical
|
||||
)
|
||||
```
|
||||
|
||||
## Task Attributes
|
||||
|
||||
| Attribute | Parameters | Type | Description |
|
||||
| :------------------------------- | :---------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
|
||||
| **Agent** | `agent` | `Optional[BaseAgent]` | The agent responsible for the task, assigned either directly or by the crew's process. |
|
||||
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
|
||||
| **Tools** _(optional)_ | `tools` | `Optional[List[Any]]` | The functions or capabilities the agent can utilize to perform the task. Defaults to an empty list. |
|
||||
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | If set, the task executes asynchronously, allowing progression without waiting for completion. Defaults to False. |
|
||||
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Specifies tasks whose outputs are used as context for this task. |
|
||||
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Additional configuration details for the agent executing the task, allowing further customization. Defaults to None. |
|
||||
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | Outputs a JSON object, requiring an OpenAI client. Only one output format can be set. |
|
||||
| **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | Outputs a Pydantic model object, requiring an OpenAI client. Only one output format can be set. |
|
||||
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | Saves the task output to a file. If used with `Output JSON` or `Output Pydantic`, specifies how the output is saved. |
|
||||
| **Output** _(optional)_ | `output` | `Optional[TaskOutput]` | An instance of `TaskOutput`, containing the raw, JSON, and Pydantic output plus additional details. |
|
||||
| **Callback** _(optional)_ | `callback` | `Optional[Any]` | A callable that is executed with the task's output upon completion. |
|
||||
| **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Indicates if the task should involve human review at the end, useful for tasks needing human oversight. Defaults to False.|
|
||||
| **Converter Class** _(optional)_ | `converter_cls` | `Optional[Type[Converter]]` | A converter class used to export structured output. Defaults to None. |
|
||||
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
||||
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
||||
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
|
||||
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
|
||||
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
|
||||
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
|
||||
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
|
||||
| **Output Pydantic** _(optional)_ | `output_pydantic` | `Optional[Type[BaseModel]]` | A Pydantic model for task output. |
|
||||
| **Callback** _(optional)_ | `callback` | `Optional[Any]` | Function/object to be executed after task completion. |
|
||||
|
||||
## Creating a Task
|
||||
## Creating Tasks
|
||||
|
||||
Creating a task involves defining its scope, responsible agent, and any additional attributes for flexibility:
|
||||
There are two ways to create tasks in CrewAI: using **YAML configuration (recommended)** or defining them **directly in code**.
|
||||
|
||||
### YAML Configuration (Recommended)
|
||||
|
||||
Using YAML configuration provides a cleaner, more maintainable way to define tasks. We strongly recommend using this approach to define tasks in your CrewAI projects.
|
||||
|
||||
After creating your CrewAI project as outlined in the [Installation](/installation) section, navigate to the `src/latest_ai_development/config/tasks.yaml` file and modify the template to match your specific task requirements.
|
||||
|
||||
<Note>
|
||||
Variables in your YAML files (like `{topic}`) will be replaced with values from your inputs when running the crew:
|
||||
```python Code
|
||||
crew.kickoff(inputs={'topic': 'AI Agents'})
|
||||
```
|
||||
</Note>
|
||||
|
||||
Here's an example of how to configure tasks using YAML:
|
||||
|
||||
```yaml tasks.yaml
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
|
||||
reporting_task:
|
||||
description: >
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
Formatted as markdown without '```'
|
||||
agent: reporting_analyst
|
||||
output_file: report.md
|
||||
```
|
||||
|
||||
To use this YAML configuration in your code, create a crew class that inherits from `CrewBase`:
|
||||
|
||||
```python crew.py
|
||||
# src/latest_ai_development/crew.py
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class LatestAiDevelopmentCrew():
|
||||
"""LatestAiDevelopment crew"""
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True,
|
||||
tools=[SerperDevTool()]
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task']
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task']
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=[
|
||||
self.researcher(),
|
||||
self.reporting_analyst()
|
||||
],
|
||||
tasks=[
|
||||
self.research_task(),
|
||||
self.reporting_task()
|
||||
],
|
||||
process=Process.sequential
|
||||
)
|
||||
```
|
||||
|
||||
<Note>
|
||||
The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code.
|
||||
</Note>
|
||||
|
||||
### Direct Code Definition (Alternative)
|
||||
|
||||
Alternatively, you can define tasks directly in your code without using YAML configuration:
|
||||
|
||||
```python task.py
|
||||
from crewai import Task
|
||||
|
||||
task = Task(
|
||||
description='Find and summarize the latest and most relevant news on AI',
|
||||
agent=sales_agent,
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
research_task = Task(
|
||||
description="""
|
||||
Conduct a thorough research about AI Agents.
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
""",
|
||||
expected_output="""
|
||||
A list with 10 bullet points of the most relevant information about AI Agents
|
||||
""",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
reporting_task = Task(
|
||||
description="""
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
""",
|
||||
expected_output="""
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
Formatted as markdown without '```'
|
||||
""",
|
||||
agent=reporting_analyst,
|
||||
output_file="report.md"
|
||||
)
|
||||
```
|
||||
|
||||
@@ -52,6 +182,8 @@ task = Task(
|
||||
|
||||
## Task Output
|
||||
|
||||
Understanding task outputs is crucial for building effective AI workflows. CrewAI provides a structured way to handle task results through the `TaskOutput` class, which supports multiple output formats and can be easily passed between tasks.
|
||||
|
||||
The output of a task in CrewAI framework is encapsulated within the `TaskOutput` class. This class provides a structured way to access results of a task, including various formats such as raw output, JSON, and Pydantic models.
|
||||
|
||||
By default, the `TaskOutput` will only include the `raw` output. A `TaskOutput` will only include the `pydantic` or `json_dict` output if the original `Task` object was configured with `output_pydantic` or `output_json`, respectively.
|
||||
@@ -112,6 +244,326 @@ if task_output.pydantic:
|
||||
print(f"Pydantic Output: {task_output.pydantic}")
|
||||
```
|
||||
|
||||
## Task Dependencies and Context
|
||||
|
||||
Tasks can depend on the output of other tasks using the `context` attribute. For example:
|
||||
|
||||
```python Code
|
||||
research_task = Task(
|
||||
description="Research the latest developments in AI",
|
||||
expected_output="A list of recent AI developments",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
analysis_task = Task(
|
||||
description="Analyze the research findings and identify key trends",
|
||||
expected_output="Analysis report of AI trends",
|
||||
agent=analyst,
|
||||
context=[research_task] # This task will wait for research_task to complete
|
||||
)
|
||||
```
|
||||
|
||||
## Task Guardrails
|
||||
|
||||
Task guardrails provide a way to validate and transform task outputs before they
|
||||
are passed to the next task. This feature helps ensure data quality and provides
|
||||
efeedback to agents when their output doesn't meet specific criteria.
|
||||
|
||||
### Using Task Guardrails
|
||||
|
||||
To add a guardrail to a task, provide a validation function through the `guardrail` parameter:
|
||||
|
||||
```python Code
|
||||
from typing import Tuple, Union, Dict, Any
|
||||
|
||||
def validate_blog_content(result: str) -> Tuple[bool, Union[Dict[str, Any], str]]:
|
||||
"""Validate blog content meets requirements."""
|
||||
try:
|
||||
# Check word count
|
||||
word_count = len(result.split())
|
||||
if word_count > 200:
|
||||
return (False, {
|
||||
"error": "Blog content exceeds 200 words",
|
||||
"code": "WORD_COUNT_ERROR",
|
||||
"context": {"word_count": word_count}
|
||||
})
|
||||
|
||||
# Additional validation logic here
|
||||
return (True, result.strip())
|
||||
except Exception as e:
|
||||
return (False, {
|
||||
"error": "Unexpected error during validation",
|
||||
"code": "SYSTEM_ERROR"
|
||||
})
|
||||
|
||||
blog_task = Task(
|
||||
description="Write a blog post about AI",
|
||||
expected_output="A blog post under 200 words",
|
||||
agent=blog_agent,
|
||||
guardrail=validate_blog_content # Add the guardrail function
|
||||
)
|
||||
```
|
||||
|
||||
### Guardrail Function Requirements
|
||||
|
||||
1. **Function Signature**:
|
||||
- Must accept exactly one parameter (the task output)
|
||||
- Should return a tuple of `(bool, Any)`
|
||||
- Type hints are recommended but optional
|
||||
|
||||
2. **Return Values**:
|
||||
- Success: Return `(True, validated_result)`
|
||||
- Failure: Return `(False, error_details)`
|
||||
|
||||
### Error Handling Best Practices
|
||||
|
||||
1. **Structured Error Responses**:
|
||||
```python Code
|
||||
def validate_with_context(result: str) -> Tuple[bool, Union[Dict[str, Any], str]]:
|
||||
try:
|
||||
# Main validation logic
|
||||
validated_data = perform_validation(result)
|
||||
return (True, validated_data)
|
||||
except ValidationError as e:
|
||||
return (False, {
|
||||
"error": str(e),
|
||||
"code": "VALIDATION_ERROR",
|
||||
"context": {"input": result}
|
||||
})
|
||||
except Exception as e:
|
||||
return (False, {
|
||||
"error": "Unexpected error",
|
||||
"code": "SYSTEM_ERROR"
|
||||
})
|
||||
```
|
||||
|
||||
2. **Error Categories**:
|
||||
- Use specific error codes
|
||||
- Include relevant context
|
||||
- Provide actionable feedback
|
||||
|
||||
3. **Validation Chain**:
|
||||
```python Code
|
||||
from typing import Any, Dict, List, Tuple, Union
|
||||
|
||||
def complex_validation(result: str) -> Tuple[bool, Union[str, Dict[str, Any]]]:
|
||||
"""Chain multiple validation steps."""
|
||||
# Step 1: Basic validation
|
||||
if not result:
|
||||
return (False, {"error": "Empty result", "code": "EMPTY_INPUT"})
|
||||
|
||||
# Step 2: Content validation
|
||||
try:
|
||||
validated = validate_content(result)
|
||||
if not validated:
|
||||
return (False, {"error": "Invalid content", "code": "CONTENT_ERROR"})
|
||||
|
||||
# Step 3: Format validation
|
||||
formatted = format_output(validated)
|
||||
return (True, formatted)
|
||||
except Exception as e:
|
||||
return (False, {
|
||||
"error": str(e),
|
||||
"code": "VALIDATION_ERROR",
|
||||
"context": {"step": "content_validation"}
|
||||
})
|
||||
```
|
||||
|
||||
### Handling Guardrail Results
|
||||
|
||||
When a guardrail returns `(False, error)`:
|
||||
1. The error is sent back to the agent
|
||||
2. The agent attempts to fix the issue
|
||||
3. The process repeats until:
|
||||
- The guardrail returns `(True, result)`
|
||||
- Maximum retries are reached
|
||||
|
||||
Example with retry handling:
|
||||
```python Code
|
||||
from typing import Optional, Tuple, Union
|
||||
|
||||
def validate_json_output(result: str) -> Tuple[bool, Union[Dict[str, Any], str]]:
|
||||
"""Validate and parse JSON output."""
|
||||
try:
|
||||
# Try to parse as JSON
|
||||
data = json.loads(result)
|
||||
return (True, data)
|
||||
except json.JSONDecodeError as e:
|
||||
return (False, {
|
||||
"error": "Invalid JSON format",
|
||||
"code": "JSON_ERROR",
|
||||
"context": {"line": e.lineno, "column": e.colno}
|
||||
})
|
||||
|
||||
task = Task(
|
||||
description="Generate a JSON report",
|
||||
expected_output="A valid JSON object",
|
||||
agent=analyst,
|
||||
guardrail=validate_json_output,
|
||||
max_retries=3 # Limit retry attempts
|
||||
)
|
||||
```
|
||||
|
||||
## Getting Structured Consistent Outputs from Tasks
|
||||
|
||||
<Note>
|
||||
It's also important to note that the output of the final task of a crew becomes the final output of the actual crew itself.
|
||||
</Note>
|
||||
|
||||
### Using `output_pydantic`
|
||||
The `output_pydantic` property allows you to define a Pydantic model that the task output should conform to. This ensures that the output is not only structured but also validated according to the Pydantic model.
|
||||
|
||||
Here’s an example demonstrating how to use output_pydantic:
|
||||
|
||||
```python Code
|
||||
import json
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class Blog(BaseModel):
|
||||
title: str
|
||||
content: str
|
||||
|
||||
|
||||
blog_agent = Agent(
|
||||
role="Blog Content Generator Agent",
|
||||
goal="Generate a blog title and content",
|
||||
backstory="""You are an expert content creator, skilled in crafting engaging and informative blog posts.""",
|
||||
verbose=False,
|
||||
allow_delegation=False,
|
||||
llm="gpt-4o",
|
||||
)
|
||||
|
||||
task1 = Task(
|
||||
description="""Create a blog title and content on a given topic. Make sure the content is under 200 words.""",
|
||||
expected_output="A compelling blog title and well-written content.",
|
||||
agent=blog_agent,
|
||||
output_pydantic=Blog,
|
||||
)
|
||||
|
||||
# Instantiate your crew with a sequential process
|
||||
crew = Crew(
|
||||
agents=[blog_agent],
|
||||
tasks=[task1],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
# Option 1: Accessing Properties Using Dictionary-Style Indexing
|
||||
print("Accessing Properties - Option 1")
|
||||
title = result["title"]
|
||||
content = result["content"]
|
||||
print("Title:", title)
|
||||
print("Content:", content)
|
||||
|
||||
# Option 2: Accessing Properties Directly from the Pydantic Model
|
||||
print("Accessing Properties - Option 2")
|
||||
title = result.pydantic.title
|
||||
content = result.pydantic.content
|
||||
print("Title:", title)
|
||||
print("Content:", content)
|
||||
|
||||
# Option 3: Accessing Properties Using the to_dict() Method
|
||||
print("Accessing Properties - Option 3")
|
||||
output_dict = result.to_dict()
|
||||
title = output_dict["title"]
|
||||
content = output_dict["content"]
|
||||
print("Title:", title)
|
||||
print("Content:", content)
|
||||
|
||||
# Option 4: Printing the Entire Blog Object
|
||||
print("Accessing Properties - Option 5")
|
||||
print("Blog:", result)
|
||||
|
||||
```
|
||||
In this example:
|
||||
* A Pydantic model Blog is defined with title and content fields.
|
||||
* The task task1 uses the output_pydantic property to specify that its output should conform to the Blog model.
|
||||
* After executing the crew, you can access the structured output in multiple ways as shown.
|
||||
|
||||
#### Explanation of Accessing the Output
|
||||
1. Dictionary-Style Indexing: You can directly access the fields using result["field_name"]. This works because the CrewOutput class implements the __getitem__ method.
|
||||
2. Directly from Pydantic Model: Access the attributes directly from the result.pydantic object.
|
||||
3. Using to_dict() Method: Convert the output to a dictionary and access the fields.
|
||||
4. Printing the Entire Object: Simply print the result object to see the structured output.
|
||||
|
||||
### Using `output_json`
|
||||
The `output_json` property allows you to define the expected output in JSON format. This ensures that the task's output is a valid JSON structure that can be easily parsed and used in your application.
|
||||
|
||||
Here’s an example demonstrating how to use `output_json`:
|
||||
|
||||
```python Code
|
||||
import json
|
||||
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
# Define the Pydantic model for the blog
|
||||
class Blog(BaseModel):
|
||||
title: str
|
||||
content: str
|
||||
|
||||
|
||||
# Define the agent
|
||||
blog_agent = Agent(
|
||||
role="Blog Content Generator Agent",
|
||||
goal="Generate a blog title and content",
|
||||
backstory="""You are an expert content creator, skilled in crafting engaging and informative blog posts.""",
|
||||
verbose=False,
|
||||
allow_delegation=False,
|
||||
llm="gpt-4o",
|
||||
)
|
||||
|
||||
# Define the task with output_json set to the Blog model
|
||||
task1 = Task(
|
||||
description="""Create a blog title and content on a given topic. Make sure the content is under 200 words.""",
|
||||
expected_output="A JSON object with 'title' and 'content' fields.",
|
||||
agent=blog_agent,
|
||||
output_json=Blog,
|
||||
)
|
||||
|
||||
# Instantiate the crew with a sequential process
|
||||
crew = Crew(
|
||||
agents=[blog_agent],
|
||||
tasks=[task1],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
)
|
||||
|
||||
# Kickoff the crew to execute the task
|
||||
result = crew.kickoff()
|
||||
|
||||
# Option 1: Accessing Properties Using Dictionary-Style Indexing
|
||||
print("Accessing Properties - Option 1")
|
||||
title = result["title"]
|
||||
content = result["content"]
|
||||
print("Title:", title)
|
||||
print("Content:", content)
|
||||
|
||||
# Option 2: Printing the Entire Blog Object
|
||||
print("Accessing Properties - Option 2")
|
||||
print("Blog:", result)
|
||||
```
|
||||
|
||||
In this example:
|
||||
* A Pydantic model Blog is defined with title and content fields, which is used to specify the structure of the JSON output.
|
||||
* The task task1 uses the output_json property to indicate that it expects a JSON output conforming to the Blog model.
|
||||
* After executing the crew, you can access the structured JSON output in two ways as shown.
|
||||
|
||||
#### Explanation of Accessing the Output
|
||||
|
||||
1. Accessing Properties Using Dictionary-Style Indexing: You can access the fields directly using result["field_name"]. This is possible because the CrewOutput class implements the __getitem__ method, allowing you to treat the output like a dictionary. In this option, we're retrieving the title and content from the result.
|
||||
2. Printing the Entire Blog Object: By printing result, you get the string representation of the CrewOutput object. Since the __str__ method is implemented to return the JSON output, this will display the entire output as a formatted string representing the Blog object.
|
||||
|
||||
---
|
||||
|
||||
By using output_pydantic or output_json, you ensure that your tasks produce outputs in a consistent and structured format, making it easier to process and utilize the data within your application or across multiple tasks.
|
||||
|
||||
## Integrating Tools with Tasks
|
||||
|
||||
Leverage tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools) for enhanced task performance and agent interaction.
|
||||
@@ -167,16 +619,16 @@ This is useful when you have a task that depends on the output of another task t
|
||||
# ...
|
||||
|
||||
research_ai_task = Task(
|
||||
description='Find and summarize the latest AI news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI news',
|
||||
description="Research the latest developments in AI",
|
||||
expected_output="A list of recent AI developments",
|
||||
async_execution=True,
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
)
|
||||
|
||||
research_ops_task = Task(
|
||||
description='Find and summarize the latest AI Ops news',
|
||||
expected_output='A bullet list summary of the top 5 most important AI Ops news',
|
||||
description="Research the latest developments in AI Ops",
|
||||
expected_output="A list of recent AI Ops developments",
|
||||
async_execution=True,
|
||||
agent=research_agent,
|
||||
tools=[search_tool]
|
||||
@@ -184,7 +636,7 @@ research_ops_task = Task(
|
||||
|
||||
write_blog_task = Task(
|
||||
description="Write a full blog post about the importance of AI and its latest news",
|
||||
expected_output='Full blog post that is 4 paragraphs long',
|
||||
expected_output="Full blog post that is 4 paragraphs long",
|
||||
agent=writer_agent,
|
||||
context=[research_ai_task, research_ops_task]
|
||||
)
|
||||
@@ -296,6 +748,114 @@ While creating and executing tasks, certain validation mechanisms are in place t
|
||||
|
||||
These validations help in maintaining the consistency and reliability of task executions within the crewAI framework.
|
||||
|
||||
## Task Guardrails
|
||||
|
||||
Task guardrails provide a powerful way to validate, transform, or filter task outputs before they are passed to the next task. Guardrails are optional functions that execute before the next task starts, allowing you to ensure that task outputs meet specific requirements or formats.
|
||||
|
||||
### Basic Usage
|
||||
|
||||
```python Code
|
||||
from typing import Tuple, Union
|
||||
from crewai import Task
|
||||
|
||||
def validate_json_output(result: str) -> Tuple[bool, Union[dict, str]]:
|
||||
"""Validate that the output is valid JSON."""
|
||||
try:
|
||||
json_data = json.loads(result)
|
||||
return (True, json_data)
|
||||
except json.JSONDecodeError:
|
||||
return (False, "Output must be valid JSON")
|
||||
|
||||
task = Task(
|
||||
description="Generate JSON data",
|
||||
expected_output="Valid JSON object",
|
||||
guardrail=validate_json_output
|
||||
)
|
||||
```
|
||||
|
||||
### How Guardrails Work
|
||||
|
||||
1. **Optional Attribute**: Guardrails are an optional attribute at the task level, allowing you to add validation only where needed.
|
||||
2. **Execution Timing**: The guardrail function is executed before the next task starts, ensuring valid data flow between tasks.
|
||||
3. **Return Format**: Guardrails must return a tuple of `(success, data)`:
|
||||
- If `success` is `True`, `data` is the validated/transformed result
|
||||
- If `success` is `False`, `data` is the error message
|
||||
4. **Result Routing**:
|
||||
- On success (`True`), the result is automatically passed to the next task
|
||||
- On failure (`False`), the error is sent back to the agent to generate a new answer
|
||||
|
||||
### Common Use Cases
|
||||
|
||||
#### Data Format Validation
|
||||
```python Code
|
||||
def validate_email_format(result: str) -> Tuple[bool, Union[str, str]]:
|
||||
"""Ensure the output contains a valid email address."""
|
||||
import re
|
||||
email_pattern = r'^[\w\.-]+@[\w\.-]+\.\w+$'
|
||||
if re.match(email_pattern, result.strip()):
|
||||
return (True, result.strip())
|
||||
return (False, "Output must be a valid email address")
|
||||
```
|
||||
|
||||
#### Content Filtering
|
||||
```python Code
|
||||
def filter_sensitive_info(result: str) -> Tuple[bool, Union[str, str]]:
|
||||
"""Remove or validate sensitive information."""
|
||||
sensitive_patterns = ['SSN:', 'password:', 'secret:']
|
||||
for pattern in sensitive_patterns:
|
||||
if pattern.lower() in result.lower():
|
||||
return (False, f"Output contains sensitive information ({pattern})")
|
||||
return (True, result)
|
||||
```
|
||||
|
||||
#### Data Transformation
|
||||
```python Code
|
||||
def normalize_phone_number(result: str) -> Tuple[bool, Union[str, str]]:
|
||||
"""Ensure phone numbers are in a consistent format."""
|
||||
import re
|
||||
digits = re.sub(r'\D', '', result)
|
||||
if len(digits) == 10:
|
||||
formatted = f"({digits[:3]}) {digits[3:6]}-{digits[6:]}"
|
||||
return (True, formatted)
|
||||
return (False, "Output must be a 10-digit phone number")
|
||||
```
|
||||
|
||||
### Advanced Features
|
||||
|
||||
#### Chaining Multiple Validations
|
||||
```python Code
|
||||
def chain_validations(*validators):
|
||||
"""Chain multiple validators together."""
|
||||
def combined_validator(result):
|
||||
for validator in validators:
|
||||
success, data = validator(result)
|
||||
if not success:
|
||||
return (False, data)
|
||||
result = data
|
||||
return (True, result)
|
||||
return combined_validator
|
||||
|
||||
# Usage
|
||||
task = Task(
|
||||
description="Get user contact info",
|
||||
expected_output="Email and phone",
|
||||
guardrail=chain_validations(
|
||||
validate_email_format,
|
||||
filter_sensitive_info
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
#### Custom Retry Logic
|
||||
```python Code
|
||||
task = Task(
|
||||
description="Generate data",
|
||||
expected_output="Valid data",
|
||||
guardrail=validate_data,
|
||||
max_retries=5 # Override default retry limit
|
||||
)
|
||||
```
|
||||
|
||||
## Creating Directories when Saving Files
|
||||
|
||||
You can now specify if a task should create directories when saving its output to a file. This is particularly useful for organizing outputs and ensuring that file paths are correctly structured.
|
||||
@@ -317,7 +877,7 @@ save_output_task = Task(
|
||||
|
||||
## Conclusion
|
||||
|
||||
Tasks are the driving force behind the actions of agents in CrewAI.
|
||||
By properly defining tasks and their outcomes, you set the stage for your AI agents to work effectively, either independently or as a collaborative unit.
|
||||
Equipping tasks with appropriate tools, understanding the execution process, and following robust validation practices are crucial for maximizing CrewAI's potential,
|
||||
ensuring agents are effectively prepared for their assignments and that tasks are executed as intended.
|
||||
Tasks are the driving force behind the actions of agents in CrewAI.
|
||||
By properly defining tasks and their outcomes, you set the stage for your AI agents to work effectively, either independently or as a collaborative unit.
|
||||
Equipping tasks with appropriate tools, understanding the execution process, and following robust validation practices are crucial for maximizing CrewAI's potential,
|
||||
ensuring agents are effectively prepared for their assignments and that tasks are executed as intended.
|
||||
|
||||
@@ -172,6 +172,48 @@ def my_tool(question: str) -> str:
|
||||
return "Result from your custom tool"
|
||||
```
|
||||
|
||||
### Structured Tools
|
||||
|
||||
The `StructuredTool` class wraps functions as tools, providing flexibility and validation while reducing boilerplate. It supports custom schemas and dynamic logic for seamless integration of complex functionalities.
|
||||
|
||||
#### Example:
|
||||
Using `StructuredTool.from_function`, you can wrap a function that interacts with an external API or system, providing a structured interface. This enables robust validation and consistent execution, making it easier to integrate complex functionalities into your applications as demonstrated in the following example:
|
||||
|
||||
```python
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Define the schema for the tool's input using Pydantic
|
||||
class APICallInput(BaseModel):
|
||||
endpoint: str
|
||||
parameters: dict
|
||||
|
||||
# Wrapper function to execute the API call
|
||||
def tool_wrapper(*args, **kwargs):
|
||||
# Here, you would typically call the API using the parameters
|
||||
# For demonstration, we'll return a placeholder string
|
||||
return f"Call the API at {kwargs['endpoint']} with parameters {kwargs['parameters']}"
|
||||
|
||||
# Create and return the structured tool
|
||||
def create_structured_tool():
|
||||
return CrewStructuredTool.from_function(
|
||||
name='Wrapper API',
|
||||
description="A tool to wrap API calls with structured input.",
|
||||
args_schema=APICallInput,
|
||||
func=tool_wrapper,
|
||||
)
|
||||
|
||||
# Example usage
|
||||
structured_tool = create_structured_tool()
|
||||
|
||||
# Execute the tool with structured input
|
||||
result = structured_tool._run(**{
|
||||
"endpoint": "https://example.com/api",
|
||||
"parameters": {"key1": "value1", "key2": "value2"}
|
||||
})
|
||||
print(result) # Output: Call the API at https://example.com/api with parameters {'key1': 'value1', 'key2': 'value2'}
|
||||
```
|
||||
|
||||
### Custom Caching Mechanism
|
||||
|
||||
<Tip>
|
||||
|
||||
@@ -57,7 +57,7 @@ This feature is useful for debugging and understanding how agents interact with
|
||||
<Step title="Install AgentOps">
|
||||
Install AgentOps with:
|
||||
```bash
|
||||
pip install crewai[agentops]
|
||||
pip install 'crewai[agentops]'
|
||||
```
|
||||
or
|
||||
```bash
|
||||
|
||||
59
docs/how-to/before-and-after-kickoff-hooks.mdx
Normal file
59
docs/how-to/before-and-after-kickoff-hooks.mdx
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
title: Before and After Kickoff Hooks
|
||||
description: Learn how to use before and after kickoff hooks in CrewAI
|
||||
---
|
||||
|
||||
CrewAI provides hooks that allow you to execute code before and after a crew's kickoff. These hooks are useful for preprocessing inputs or post-processing results.
|
||||
|
||||
## Before Kickoff Hook
|
||||
|
||||
The before kickoff hook is executed before the crew starts its tasks. It receives the input dictionary and can modify it before passing it to the crew. You can use this hook to set up your environment, load necessary data, or preprocess your inputs. This is useful in scenarios where the input data might need enrichment or validation before being processed by the crew.
|
||||
|
||||
Here's an example of defining a before kickoff function in your `crew.py`:
|
||||
|
||||
```python
|
||||
from crewai import CrewBase, before_kickoff
|
||||
|
||||
@CrewBase
|
||||
class MyCrew:
|
||||
@before_kickoff
|
||||
def prepare_data(self, inputs):
|
||||
# Preprocess or modify inputs
|
||||
inputs['processed'] = True
|
||||
return inputs
|
||||
|
||||
#...
|
||||
```
|
||||
|
||||
In this example, the prepare_data function modifies the inputs by adding a new key-value pair indicating that the inputs have been processed.
|
||||
|
||||
## After Kickoff Hook
|
||||
|
||||
The after kickoff hook is executed after the crew has completed its tasks. It receives the result object, which contains the outputs of the crew's execution. This hook is ideal for post-processing results, such as logging, data transformation, or further analysis.
|
||||
|
||||
Here's how you can define an after kickoff function in your `crew.py`:
|
||||
|
||||
```python
|
||||
from crewai import CrewBase, after_kickoff
|
||||
|
||||
@CrewBase
|
||||
class MyCrew:
|
||||
@after_kickoff
|
||||
def log_results(self, result):
|
||||
# Log or modify the results
|
||||
print("Crew execution completed with result:", result)
|
||||
return result
|
||||
|
||||
# ...
|
||||
```
|
||||
|
||||
|
||||
In the `log_results` function, the results of the crew execution are simply printed out. You can extend this to perform more complex operations such as sending notifications or integrating with other services.
|
||||
|
||||
## Utilizing Both Hooks
|
||||
|
||||
Both hooks can be used together to provide a comprehensive setup and teardown process for your crew's execution. They are particularly useful in maintaining clean code architecture by separating concerns and enhancing the modularity of your CrewAI implementations.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Before and after kickoff hooks in CrewAI offer powerful ways to interact with the lifecycle of a crew's execution. By understanding and utilizing these hooks, you can greatly enhance the robustness and flexibility of your AI agents.
|
||||
@@ -73,9 +73,9 @@ result = crew.kickoff()
|
||||
If you're using the hierarchical process and don't want to set a custom manager agent, you can specify the language model for the manager:
|
||||
|
||||
```python Code
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai import LLM
|
||||
|
||||
manager_llm = ChatOpenAI(model_name="gpt-4")
|
||||
manager_llm = LLM(model="gpt-4o")
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
|
||||
@@ -32,6 +32,8 @@ LiteLLM supports a wide range of providers, including but not limited to:
|
||||
- Cloudflare Workers AI
|
||||
- DeepInfra
|
||||
- Groq
|
||||
- SambaNova
|
||||
- [NVIDIA NIMs](https://docs.api.nvidia.com/nim/reference/models-1)
|
||||
- And many more!
|
||||
|
||||
For a complete and up-to-date list of supported providers, please refer to the [LiteLLM Providers documentation](https://docs.litellm.ai/docs/providers).
|
||||
@@ -125,10 +127,10 @@ You can connect to OpenAI-compatible LLMs using either environment variables or
|
||||
</Tab>
|
||||
<Tab title="Using LLM Class Attributes">
|
||||
<CodeGroup>
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="custom-model-name",
|
||||
api_key="your-api-key",
|
||||
```python Code
|
||||
llm = LLM(
|
||||
model="custom-model-name",
|
||||
api_key="your-api-key",
|
||||
base_url="https://api.your-provider.com/v1"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
@@ -179,4 +181,4 @@ This is particularly useful when working with OpenAI-compatible APIs or when you
|
||||
|
||||
## Conclusion
|
||||
|
||||
By leveraging LiteLLM, CrewAI offers seamless integration with a vast array of LLMs. This flexibility allows you to choose the most suitable model for your specific needs, whether you prioritize performance, cost-efficiency, or local deployment. Remember to consult the [LiteLLM documentation](https://docs.litellm.ai/docs/) for the most up-to-date information on supported models and configuration options.
|
||||
By leveraging LiteLLM, CrewAI offers seamless integration with a vast array of LLMs. This flexibility allows you to choose the most suitable model for your specific needs, whether you prioritize performance, cost-efficiency, or local deployment. Remember to consult the [LiteLLM documentation](https://docs.litellm.ai/docs/) for the most up-to-date information on supported models and configuration options.
|
||||
|
||||
138
docs/how-to/multimodal-agents.mdx
Normal file
138
docs/how-to/multimodal-agents.mdx
Normal file
@@ -0,0 +1,138 @@
|
||||
---
|
||||
title: Using Multimodal Agents
|
||||
description: Learn how to enable and use multimodal capabilities in your agents for processing images and other non-text content within the CrewAI framework.
|
||||
icon: image
|
||||
---
|
||||
|
||||
# Using Multimodal Agents
|
||||
|
||||
CrewAI supports multimodal agents that can process both text and non-text content like images. This guide will show you how to enable and use multimodal capabilities in your agents.
|
||||
|
||||
## Enabling Multimodal Capabilities
|
||||
|
||||
To create a multimodal agent, simply set the `multimodal` parameter to `True` when initializing your agent:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
role="Image Analyst",
|
||||
goal="Analyze and extract insights from images",
|
||||
backstory="An expert in visual content interpretation with years of experience in image analysis",
|
||||
multimodal=True # This enables multimodal capabilities
|
||||
)
|
||||
```
|
||||
|
||||
When you set `multimodal=True`, the agent is automatically configured with the necessary tools for handling non-text content, including the `AddImageTool`.
|
||||
|
||||
## Working with Images
|
||||
|
||||
The multimodal agent comes pre-configured with the `AddImageTool`, which allows it to process images. You don't need to manually add this tool - it's automatically included when you enable multimodal capabilities.
|
||||
|
||||
Here's a complete example showing how to use a multimodal agent to analyze an image:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# Create a multimodal agent
|
||||
image_analyst = Agent(
|
||||
role="Product Analyst",
|
||||
goal="Analyze product images and provide detailed descriptions",
|
||||
backstory="Expert in visual product analysis with deep knowledge of design and features",
|
||||
multimodal=True
|
||||
)
|
||||
|
||||
# Create a task for image analysis
|
||||
task = Task(
|
||||
description="Analyze the product image at https://example.com/product.jpg and provide a detailed description",
|
||||
agent=image_analyst
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(
|
||||
agents=[image_analyst],
|
||||
tasks=[task]
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
### Advanced Usage with Context
|
||||
|
||||
You can provide additional context or specific questions about the image when creating tasks for multimodal agents. The task description can include specific aspects you want the agent to focus on:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# Create a multimodal agent for detailed analysis
|
||||
expert_analyst = Agent(
|
||||
role="Visual Quality Inspector",
|
||||
goal="Perform detailed quality analysis of product images",
|
||||
backstory="Senior quality control expert with expertise in visual inspection",
|
||||
multimodal=True # AddImageTool is automatically included
|
||||
)
|
||||
|
||||
# Create a task with specific analysis requirements
|
||||
inspection_task = Task(
|
||||
description="""
|
||||
Analyze the product image at https://example.com/product.jpg with focus on:
|
||||
1. Quality of materials
|
||||
2. Manufacturing defects
|
||||
3. Compliance with standards
|
||||
Provide a detailed report highlighting any issues found.
|
||||
""",
|
||||
agent=expert_analyst
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(
|
||||
agents=[expert_analyst],
|
||||
tasks=[inspection_task]
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
### Tool Details
|
||||
|
||||
When working with multimodal agents, the `AddImageTool` is automatically configured with the following schema:
|
||||
|
||||
```python
|
||||
class AddImageToolSchema:
|
||||
image_url: str # Required: The URL or path of the image to process
|
||||
action: Optional[str] = None # Optional: Additional context or specific questions about the image
|
||||
```
|
||||
|
||||
The multimodal agent will automatically handle the image processing through its built-in tools, allowing it to:
|
||||
- Access images via URLs or local file paths
|
||||
- Process image content with optional context or specific questions
|
||||
- Provide analysis and insights based on the visual information and task requirements
|
||||
|
||||
## Best Practices
|
||||
|
||||
When working with multimodal agents, keep these best practices in mind:
|
||||
|
||||
1. **Image Access**
|
||||
- Ensure your images are accessible via URLs that the agent can reach
|
||||
- For local images, consider hosting them temporarily or using absolute file paths
|
||||
- Verify that image URLs are valid and accessible before running tasks
|
||||
|
||||
2. **Task Description**
|
||||
- Be specific about what aspects of the image you want the agent to analyze
|
||||
- Include clear questions or requirements in the task description
|
||||
- Consider using the optional `action` parameter for focused analysis
|
||||
|
||||
3. **Resource Management**
|
||||
- Image processing may require more computational resources than text-only tasks
|
||||
- Some language models may require base64 encoding for image data
|
||||
- Consider batch processing for multiple images to optimize performance
|
||||
|
||||
4. **Environment Setup**
|
||||
- Verify that your environment has the necessary dependencies for image processing
|
||||
- Ensure your language model supports multimodal capabilities
|
||||
- Test with small images first to validate your setup
|
||||
|
||||
5. **Error Handling**
|
||||
- Implement proper error handling for image loading failures
|
||||
- Have fallback strategies for when image processing fails
|
||||
- Monitor and log image processing operations for debugging
|
||||
181
docs/how-to/openlit-observability.mdx
Normal file
181
docs/how-to/openlit-observability.mdx
Normal file
@@ -0,0 +1,181 @@
|
||||
---
|
||||
title: Agent Monitoring with OpenLIT
|
||||
description: Quickly start monitoring your Agents in just a single line of code with OpenTelemetry.
|
||||
icon: magnifying-glass-chart
|
||||
---
|
||||
|
||||
# OpenLIT Overview
|
||||
|
||||
[OpenLIT](https://github.com/openlit/openlit?src=crewai-docs) is an open-source tool that makes it simple to monitor the performance of AI agents, LLMs, VectorDBs, and GPUs with just **one** line of code.
|
||||
|
||||
It provides OpenTelemetry-native tracing and metrics to track important parameters like cost, latency, interactions and task sequences.
|
||||
This setup enables you to track hyperparameters and monitor for performance issues, helping you find ways to enhance and fine-tune your agents over time.
|
||||
|
||||
<Frame caption="OpenLIT Dashboard">
|
||||
<img src="/images/openlit1.png" alt="Overview Agent usage including cost and tokens" />
|
||||
<img src="/images/openlit2.png" alt="Overview of agent otel traces and metrics" />
|
||||
<img src="/images/openlit3.png" alt="Overview of agent traces in details" />
|
||||
</Frame>
|
||||
|
||||
### Features
|
||||
|
||||
- **Analytics Dashboard**: Monitor your Agents health and performance with detailed dashboards that track metrics, costs, and user interactions.
|
||||
- **OpenTelemetry-native Observability SDK**: Vendor-neutral SDKs to send traces and metrics to your existing observability tools like Grafana, DataDog and more.
|
||||
- **Cost Tracking for Custom and Fine-Tuned Models**: Tailor cost estimations for specific models using custom pricing files for precise budgeting.
|
||||
- **Exceptions Monitoring Dashboard**: Quickly spot and resolve issues by tracking common exceptions and errors with a monitoring dashboard.
|
||||
- **Compliance and Security**: Detect potential threats such as profanity and PII leaks.
|
||||
- **Prompt Injection Detection**: Identify potential code injection and secret leaks.
|
||||
- **API Keys and Secrets Management**: Securely handle your LLM API keys and secrets centrally, avoiding insecure practices.
|
||||
- **Prompt Management**: Manage and version Agent prompts using PromptHub for consistent and easy access across Agents.
|
||||
- **Model Playground** Test and compare different models for your CrewAI agents before deployment.
|
||||
|
||||
## Setup Instructions
|
||||
|
||||
<Steps>
|
||||
<Step title="Deploy OpenLIT">
|
||||
<Steps>
|
||||
<Step title="Git Clone OpenLIT Repository">
|
||||
```shell
|
||||
git clone git@github.com:openlit/openlit.git
|
||||
```
|
||||
</Step>
|
||||
<Step title="Start Docker Compose">
|
||||
From the root directory of the [OpenLIT Repo](https://github.com/openlit/openlit), Run the below command:
|
||||
```shell
|
||||
docker compose up -d
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
</Step>
|
||||
<Step title="Install OpenLIT SDK">
|
||||
```shell
|
||||
pip install openlit
|
||||
```
|
||||
</Step>
|
||||
<Step title="Initialize OpenLIT in Your Application">
|
||||
Add the following two lines to your application code:
|
||||
<Tabs>
|
||||
<Tab title="Setup using function arguments">
|
||||
```python
|
||||
import openlit
|
||||
openlit.init(otlp_endpoint="http://127.0.0.1:4318")
|
||||
```
|
||||
|
||||
Example Usage for monitoring a CrewAI Agent:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
import openlit
|
||||
|
||||
openlit.init(disable_metrics=True)
|
||||
# Define your agents
|
||||
researcher = Agent(
|
||||
role="Researcher",
|
||||
goal="Conduct thorough research and analysis on AI and AI agents",
|
||||
backstory="You're an expert researcher, specialized in technology, software engineering, AI, and startups. You work as a freelancer and are currently researching for a new client.",
|
||||
allow_delegation=False,
|
||||
llm='command-r'
|
||||
)
|
||||
|
||||
|
||||
# Define your task
|
||||
task = Task(
|
||||
description="Generate a list of 5 interesting ideas for an article, then write one captivating paragraph for each idea that showcases the potential of a full article on this topic. Return the list of ideas with their paragraphs and your notes.",
|
||||
expected_output="5 bullet points, each with a paragraph and accompanying notes.",
|
||||
)
|
||||
|
||||
# Define the manager agent
|
||||
manager = Agent(
|
||||
role="Project Manager",
|
||||
goal="Efficiently manage the crew and ensure high-quality task completion",
|
||||
backstory="You're an experienced project manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
|
||||
allow_delegation=True,
|
||||
llm='command-r'
|
||||
)
|
||||
|
||||
# Instantiate your crew with a custom manager
|
||||
crew = Crew(
|
||||
agents=[researcher],
|
||||
tasks=[task],
|
||||
manager_agent=manager,
|
||||
process=Process.hierarchical,
|
||||
)
|
||||
|
||||
# Start the crew's work
|
||||
result = crew.kickoff()
|
||||
|
||||
print(result)
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Setup using Environment Variables">
|
||||
|
||||
Add the following two lines to your application code:
|
||||
```python
|
||||
import openlit
|
||||
|
||||
openlit.init()
|
||||
```
|
||||
|
||||
Run the following command to configure the OTEL export endpoint:
|
||||
```shell
|
||||
export OTEL_EXPORTER_OTLP_ENDPOINT = "http://127.0.0.1:4318"
|
||||
```
|
||||
|
||||
Example Usage for monitoring a CrewAI Async Agent:
|
||||
|
||||
```python
|
||||
import asyncio
|
||||
from crewai import Crew, Agent, Task
|
||||
import openlit
|
||||
|
||||
openlit.init(otlp_endpoint="http://127.0.0.1:4318")
|
||||
|
||||
# Create an agent with code execution enabled
|
||||
coding_agent = Agent(
|
||||
role="Python Data Analyst",
|
||||
goal="Analyze data and provide insights using Python",
|
||||
backstory="You are an experienced data analyst with strong Python skills.",
|
||||
allow_code_execution=True,
|
||||
llm="command-r"
|
||||
)
|
||||
|
||||
# Create a task that requires code execution
|
||||
data_analysis_task = Task(
|
||||
description="Analyze the given dataset and calculate the average age of participants. Ages: {ages}",
|
||||
agent=coding_agent,
|
||||
expected_output="5 bullet points, each with a paragraph and accompanying notes.",
|
||||
)
|
||||
|
||||
# Create a crew and add the task
|
||||
analysis_crew = Crew(
|
||||
agents=[coding_agent],
|
||||
tasks=[data_analysis_task]
|
||||
)
|
||||
|
||||
# Async function to kickoff the crew asynchronously
|
||||
async def async_crew_execution():
|
||||
result = await analysis_crew.kickoff_async(inputs={"ages": [25, 30, 35, 40, 45]})
|
||||
print("Crew Result:", result)
|
||||
|
||||
# Run the async function
|
||||
asyncio.run(async_crew_execution())
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
Refer to OpenLIT [Python SDK repository](https://github.com/openlit/openlit/tree/main/sdk/python) for more advanced configurations and use cases.
|
||||
</Step>
|
||||
<Step title="Visualize and Analyze">
|
||||
With the Agent Observability data now being collected and sent to OpenLIT, the next step is to visualize and analyze this data to get insights into your Agent's performance, behavior, and identify areas of improvement.
|
||||
|
||||
Just head over to OpenLIT at `127.0.0.1:3000` on your browser to start exploring. You can login using the default credentials
|
||||
- **Email**: `user@openlit.io`
|
||||
- **Password**: `openlituser`
|
||||
|
||||
<Frame caption="OpenLIT Dashboard">
|
||||
<img src="/images/openlit1.png" alt="Overview Agent usage including cost and tokens" />
|
||||
<img src="/images/openlit2.png" alt="Overview of agent otel traces and metrics" />
|
||||
</Frame>
|
||||
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
211
docs/how-to/portkey-observability-and-guardrails.mdx
Normal file
211
docs/how-to/portkey-observability-and-guardrails.mdx
Normal file
@@ -0,0 +1,211 @@
|
||||
# Portkey Integration with CrewAI
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-CrewAI.png" alt="Portkey CrewAI Header Image" width="70%" />
|
||||
|
||||
|
||||
[Portkey](https://portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) is a 2-line upgrade to make your CrewAI agents reliable, cost-efficient, and fast.
|
||||
|
||||
Portkey adds 4 core production capabilities to any CrewAI agent:
|
||||
1. Routing to **200+ LLMs**
|
||||
2. Making each LLM call more robust
|
||||
3. Full-stack tracing & cost, performance analytics
|
||||
4. Real-time guardrails to enforce behavior
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. **Install Required Packages:**
|
||||
|
||||
```bash
|
||||
pip install -qU crewai portkey-ai
|
||||
```
|
||||
|
||||
2. **Configure the LLM Client:**
|
||||
|
||||
To build CrewAI Agents with Portkey, you'll need two keys:
|
||||
- **Portkey API Key**: Sign up on the [Portkey app](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) and copy your API key
|
||||
- **Virtual Key**: Virtual Keys securely manage your LLM API keys in one place. Store your LLM provider API keys securely in Portkey's vault
|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
||||
|
||||
gpt_llm = LLM(
|
||||
model="gpt-4",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy", # We are using Virtual key
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_VIRTUAL_KEY", # Enter your Virtual key from Portkey
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
3. **Create and Run Your First Agent:**
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# Define your agents with roles and goals
|
||||
coder = Agent(
|
||||
role='Software developer',
|
||||
goal='Write clear, concise code on demand',
|
||||
backstory='An expert coder with a keen eye for software trends.',
|
||||
llm=gpt_llm
|
||||
)
|
||||
|
||||
# Create tasks for your agents
|
||||
task1 = Task(
|
||||
description="Define the HTML for making a simple website with heading- Hello World! Portkey is working!",
|
||||
expected_output="A clear and concise HTML code",
|
||||
agent=coder
|
||||
)
|
||||
|
||||
# Instantiate your crew
|
||||
crew = Crew(
|
||||
agents=[coder],
|
||||
tasks=[task1],
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
|
||||
|
||||
## Key Features
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| 🌐 Multi-LLM Support | Access OpenAI, Anthropic, Gemini, Azure, and 250+ providers through a unified interface |
|
||||
| 🛡️ Production Reliability | Implement retries, timeouts, load balancing, and fallbacks |
|
||||
| 📊 Advanced Observability | Track 40+ metrics including costs, tokens, latency, and custom metadata |
|
||||
| 🔍 Comprehensive Logging | Debug with detailed execution traces and function call logs |
|
||||
| 🚧 Security Controls | Set budget limits and implement role-based access control |
|
||||
| 🔄 Performance Analytics | Capture and analyze feedback for continuous improvement |
|
||||
| 💾 Intelligent Caching | Reduce costs and latency with semantic or simple caching |
|
||||
|
||||
|
||||
## Production Features with Portkey Configs
|
||||
|
||||
All features mentioned below are through Portkey's Config system. Portkey's Config system allows you to define routing strategies using simple JSON objects in your LLM API calls. You can create and manage Configs directly in your code or through the Portkey Dashboard. Each Config has a unique ID for easy reference.
|
||||
|
||||
<Frame>
|
||||
<img src="https://raw.githubusercontent.com/Portkey-AI/docs-core/refs/heads/main/images/libraries/libraries-3.avif"/>
|
||||
</Frame>
|
||||
|
||||
|
||||
### 1. Use 250+ LLMs
|
||||
Access various LLMs like Anthropic, Gemini, Mistral, Azure OpenAI, and more with minimal code changes. Switch between providers or use them together seamlessly. [Learn more about Universal API](https://portkey.ai/docs/product/ai-gateway/universal-api)
|
||||
|
||||
|
||||
Easily switch between different LLM providers:
|
||||
|
||||
```python
|
||||
# Anthropic Configuration
|
||||
anthropic_llm = LLM(
|
||||
model="claude-3-5-sonnet-latest",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy",
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_ANTHROPIC_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||
trace_id="anthropic_agent"
|
||||
)
|
||||
)
|
||||
|
||||
# Azure OpenAI Configuration
|
||||
azure_llm = LLM(
|
||||
model="gpt-4",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy",
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_AZURE_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||
trace_id="azure_agent"
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
### 2. Caching
|
||||
Improve response times and reduce costs with two powerful caching modes:
|
||||
- **Simple Cache**: Perfect for exact matches
|
||||
- **Semantic Cache**: Matches responses for requests that are semantically similar
|
||||
[Learn more about Caching](https://portkey.ai/docs/product/ai-gateway/cache-simple-and-semantic)
|
||||
|
||||
```py
|
||||
config = {
|
||||
"cache": {
|
||||
"mode": "semantic", # or "simple" for exact matching
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Production Reliability
|
||||
Portkey provides comprehensive reliability features:
|
||||
- **Automatic Retries**: Handle temporary failures gracefully
|
||||
- **Request Timeouts**: Prevent hanging operations
|
||||
- **Conditional Routing**: Route requests based on specific conditions
|
||||
- **Fallbacks**: Set up automatic provider failovers
|
||||
- **Load Balancing**: Distribute requests efficiently
|
||||
|
||||
[Learn more about Reliability Features](https://portkey.ai/docs/product/ai-gateway/)
|
||||
|
||||
|
||||
|
||||
### 4. Metrics
|
||||
|
||||
Agent runs are complex. Portkey automatically logs **40+ comprehensive metrics** for your AI agents, including cost, tokens used, latency, etc. Whether you need a broad overview or granular insights into your agent runs, Portkey's customizable filters provide the metrics you need.
|
||||
|
||||
|
||||
- Cost per agent interaction
|
||||
- Response times and latency
|
||||
- Token usage and efficiency
|
||||
- Success/failure rates
|
||||
- Cache hit rates
|
||||
|
||||
<img src="https://github.com/siddharthsambharia-portkey/Portkey-Product-Images/blob/main/Portkey-Dashboard.png?raw=true" width="70%" alt="Portkey Dashboard" />
|
||||
|
||||
### 5. Detailed Logging
|
||||
Logs are essential for understanding agent behavior, diagnosing issues, and improving performance. They provide a detailed record of agent activities and tool use, which is crucial for debugging and optimizing processes.
|
||||
|
||||
|
||||
Access a dedicated section to view records of agent executions, including parameters, outcomes, function calls, and errors. Filter logs based on multiple parameters such as trace ID, model, tokens used, and metadata.
|
||||
|
||||
<details>
|
||||
<summary><b>Traces</b></summary>
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Traces.png" alt="Portkey Traces" width="70%" />
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Logs</b></summary>
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Logs.png" alt="Portkey Logs" width="70%" />
|
||||
</details>
|
||||
|
||||
### 6. Enterprise Security Features
|
||||
- Set budget limit and rate limts per Virtual Key (disposable API keys)
|
||||
- Implement role-based access control
|
||||
- Track system changes with audit logs
|
||||
- Configure data retention policies
|
||||
|
||||
|
||||
|
||||
For detailed information on creating and managing Configs, visit the [Portkey documentation](https://docs.portkey.ai/product/ai-gateway/configs).
|
||||
|
||||
## Resources
|
||||
|
||||
- [📘 Portkey Documentation](https://docs.portkey.ai)
|
||||
- [📊 Portkey Dashboard](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai)
|
||||
- [🐦 Twitter](https://twitter.com/portkeyai)
|
||||
- [💬 Discord Community](https://discord.gg/DD7vgKK299)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
202
docs/how-to/portkey-observability.mdx
Normal file
202
docs/how-to/portkey-observability.mdx
Normal file
@@ -0,0 +1,202 @@
|
||||
---
|
||||
title: Portkey Observability and Guardrails
|
||||
description: How to use Portkey with CrewAI
|
||||
icon: key
|
||||
---
|
||||
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-CrewAI.png" alt="Portkey CrewAI Header Image" width="70%" />
|
||||
|
||||
|
||||
[Portkey](https://portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) is a 2-line upgrade to make your CrewAI agents reliable, cost-efficient, and fast.
|
||||
|
||||
Portkey adds 4 core production capabilities to any CrewAI agent:
|
||||
1. Routing to **200+ LLMs**
|
||||
2. Making each LLM call more robust
|
||||
3. Full-stack tracing & cost, performance analytics
|
||||
4. Real-time guardrails to enforce behavior
|
||||
|
||||
## Getting Started
|
||||
|
||||
<Steps>
|
||||
<Step title="Install CrewAI and Portkey">
|
||||
```bash
|
||||
pip install -qU crewai portkey-ai
|
||||
```
|
||||
</Step>
|
||||
<Step title="Configure the LLM Client">
|
||||
To build CrewAI Agents with Portkey, you'll need two keys:
|
||||
- **Portkey API Key**: Sign up on the [Portkey app](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) and copy your API key
|
||||
- **Virtual Key**: Virtual Keys securely manage your LLM API keys in one place. Store your LLM provider API keys securely in Portkey's vault
|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
||||
|
||||
gpt_llm = LLM(
|
||||
model="gpt-4",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy", # We are using Virtual key
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_VIRTUAL_KEY", # Enter your Virtual key from Portkey
|
||||
)
|
||||
)
|
||||
```
|
||||
</Step>
|
||||
<Step title="Create and Run Your First Agent">
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# Define your agents with roles and goals
|
||||
coder = Agent(
|
||||
role='Software developer',
|
||||
goal='Write clear, concise code on demand',
|
||||
backstory='An expert coder with a keen eye for software trends.',
|
||||
llm=gpt_llm
|
||||
)
|
||||
|
||||
# Create tasks for your agents
|
||||
task1 = Task(
|
||||
description="Define the HTML for making a simple website with heading- Hello World! Portkey is working!",
|
||||
expected_output="A clear and concise HTML code",
|
||||
agent=coder
|
||||
)
|
||||
|
||||
# Instantiate your crew
|
||||
crew = Crew(
|
||||
agents=[coder],
|
||||
tasks=[task1],
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Key Features
|
||||
|
||||
| Feature | Description |
|
||||
|:--------|:------------|
|
||||
| 🌐 Multi-LLM Support | Access OpenAI, Anthropic, Gemini, Azure, and 250+ providers through a unified interface |
|
||||
| 🛡️ Production Reliability | Implement retries, timeouts, load balancing, and fallbacks |
|
||||
| 📊 Advanced Observability | Track 40+ metrics including costs, tokens, latency, and custom metadata |
|
||||
| 🔍 Comprehensive Logging | Debug with detailed execution traces and function call logs |
|
||||
| 🚧 Security Controls | Set budget limits and implement role-based access control |
|
||||
| 🔄 Performance Analytics | Capture and analyze feedback for continuous improvement |
|
||||
| 💾 Intelligent Caching | Reduce costs and latency with semantic or simple caching |
|
||||
|
||||
|
||||
## Production Features with Portkey Configs
|
||||
|
||||
All features mentioned below are through Portkey's Config system. Portkey's Config system allows you to define routing strategies using simple JSON objects in your LLM API calls. You can create and manage Configs directly in your code or through the Portkey Dashboard. Each Config has a unique ID for easy reference.
|
||||
|
||||
<Frame>
|
||||
<img src="https://raw.githubusercontent.com/Portkey-AI/docs-core/refs/heads/main/images/libraries/libraries-3.avif"/>
|
||||
</Frame>
|
||||
|
||||
|
||||
### 1. Use 250+ LLMs
|
||||
Access various LLMs like Anthropic, Gemini, Mistral, Azure OpenAI, and more with minimal code changes. Switch between providers or use them together seamlessly. [Learn more about Universal API](https://portkey.ai/docs/product/ai-gateway/universal-api)
|
||||
|
||||
|
||||
Easily switch between different LLM providers:
|
||||
|
||||
```python
|
||||
# Anthropic Configuration
|
||||
anthropic_llm = LLM(
|
||||
model="claude-3-5-sonnet-latest",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy",
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_ANTHROPIC_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||
trace_id="anthropic_agent"
|
||||
)
|
||||
)
|
||||
|
||||
# Azure OpenAI Configuration
|
||||
azure_llm = LLM(
|
||||
model="gpt-4",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy",
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_AZURE_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||
trace_id="azure_agent"
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
### 2. Caching
|
||||
Improve response times and reduce costs with two powerful caching modes:
|
||||
- **Simple Cache**: Perfect for exact matches
|
||||
- **Semantic Cache**: Matches responses for requests that are semantically similar
|
||||
[Learn more about Caching](https://portkey.ai/docs/product/ai-gateway/cache-simple-and-semantic)
|
||||
|
||||
```py
|
||||
config = {
|
||||
"cache": {
|
||||
"mode": "semantic", # or "simple" for exact matching
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Production Reliability
|
||||
Portkey provides comprehensive reliability features:
|
||||
- **Automatic Retries**: Handle temporary failures gracefully
|
||||
- **Request Timeouts**: Prevent hanging operations
|
||||
- **Conditional Routing**: Route requests based on specific conditions
|
||||
- **Fallbacks**: Set up automatic provider failovers
|
||||
- **Load Balancing**: Distribute requests efficiently
|
||||
|
||||
[Learn more about Reliability Features](https://portkey.ai/docs/product/ai-gateway/)
|
||||
|
||||
|
||||
|
||||
### 4. Metrics
|
||||
|
||||
Agent runs are complex. Portkey automatically logs **40+ comprehensive metrics** for your AI agents, including cost, tokens used, latency, etc. Whether you need a broad overview or granular insights into your agent runs, Portkey's customizable filters provide the metrics you need.
|
||||
|
||||
|
||||
- Cost per agent interaction
|
||||
- Response times and latency
|
||||
- Token usage and efficiency
|
||||
- Success/failure rates
|
||||
- Cache hit rates
|
||||
|
||||
<img src="https://github.com/siddharthsambharia-portkey/Portkey-Product-Images/blob/main/Portkey-Dashboard.png?raw=true" width="70%" alt="Portkey Dashboard" />
|
||||
|
||||
### 5. Detailed Logging
|
||||
Logs are essential for understanding agent behavior, diagnosing issues, and improving performance. They provide a detailed record of agent activities and tool use, which is crucial for debugging and optimizing processes.
|
||||
|
||||
|
||||
Access a dedicated section to view records of agent executions, including parameters, outcomes, function calls, and errors. Filter logs based on multiple parameters such as trace ID, model, tokens used, and metadata.
|
||||
|
||||
<details>
|
||||
<summary><b>Traces</b></summary>
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Traces.png" alt="Portkey Traces" width="70%" />
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Logs</b></summary>
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Logs.png" alt="Portkey Logs" width="70%" />
|
||||
</details>
|
||||
|
||||
### 6. Enterprise Security Features
|
||||
- Set budget limit and rate limts per Virtual Key (disposable API keys)
|
||||
- Implement role-based access control
|
||||
- Track system changes with audit logs
|
||||
- Configure data retention policies
|
||||
|
||||
|
||||
|
||||
For detailed information on creating and managing Configs, visit the [Portkey documentation](https://docs.portkey.ai/product/ai-gateway/configs).
|
||||
|
||||
## Resources
|
||||
|
||||
- [📘 Portkey Documentation](https://docs.portkey.ai)
|
||||
- [📊 Portkey Dashboard](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai)
|
||||
- [🐦 Twitter](https://twitter.com/portkeyai)
|
||||
- [💬 Discord Community](https://discord.gg/DD7vgKK299)
|
||||
BIN
docs/images/openlit1.png
Normal file
BIN
docs/images/openlit1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 390 KiB |
BIN
docs/images/openlit2.png
Normal file
BIN
docs/images/openlit2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 422 KiB |
BIN
docs/images/openlit3.png
Normal file
BIN
docs/images/openlit3.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 799 KiB |
@@ -1,128 +1,145 @@
|
||||
---
|
||||
title: Installation
|
||||
description:
|
||||
description: Get started with CrewAI - Install, configure, and build your first AI crew
|
||||
icon: wrench
|
||||
---
|
||||
|
||||
This guide will walk you through the installation process for CrewAI and its dependencies.
|
||||
CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently.
|
||||
Let's get started! 🚀
|
||||
<Note>
|
||||
**Python Version Requirements**
|
||||
|
||||
CrewAI requires `Python >=3.10 and <3.13`. Here's how to check your version:
|
||||
```bash
|
||||
python3 --version
|
||||
```
|
||||
|
||||
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
|
||||
</Note>
|
||||
|
||||
<Tip>
|
||||
Make sure you have `Python >=3.10 <=3.13` installed on your system before you proceed.
|
||||
</Tip>
|
||||
# Installing CrewAI
|
||||
|
||||
CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently.
|
||||
Let's get you set up! 🚀
|
||||
|
||||
<Steps>
|
||||
<Step title="Install CrewAI">
|
||||
Install the main CrewAI package with the following command:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip install crewai
|
||||
```
|
||||
</CodeGroup>
|
||||
You can also install the main CrewAI package and the tools package that include a series of helpful tools for your agents:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
</CodeGroup>
|
||||
Alternatively, you can also use:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip install crewai crewai-tools
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="Upgrade CrewAI">
|
||||
To upgrade CrewAI and CrewAI Tools to the latest version, run the following command
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip install --upgrade crewai crewai-tools
|
||||
```
|
||||
</CodeGroup>
|
||||
<Note>
|
||||
1. If you're using an older version of CrewAI, you may receive a warning about using `Poetry` for dependency management.
|
||||

|
||||
|
||||
2. In this case, you'll need to run the command below to update your project.
|
||||
This command will migrate your project to use [UV](https://github.com/astral-sh/uv) and update the necessary files.
|
||||
Install CrewAI with all recommended tools using either method:
|
||||
```shell Terminal
|
||||
crewai update
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
or
|
||||
```shell Terminal
|
||||
pip install crewai crewai-tools
|
||||
```
|
||||
3. After running the command above, you should see the following output:
|
||||

|
||||
|
||||
4. You're all set! You can now proceed to the next step! 🎉
|
||||
</Note>
|
||||
<Note>
|
||||
Both methods install the core package and additional tools needed for most use cases.
|
||||
</Note>
|
||||
</Step>
|
||||
<Step title="Verify the installation">
|
||||
To verify that `crewai` and `crewai-tools` are installed correctly, run the following command
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
pip freeze | grep crewai
|
||||
```
|
||||
</CodeGroup>
|
||||
You should see the version number of `crewai` and `crewai-tools`.
|
||||
<CodeGroup>
|
||||
```markdown Version
|
||||
crewai==X.X.X
|
||||
crewai-tools==X.X.X
|
||||
```
|
||||
</CodeGroup>
|
||||
If you see the version number, then the installation was successful! 🎉
|
||||
|
||||
<Step title="Upgrade CrewAI (Existing Installations Only)">
|
||||
If you have an older version of CrewAI installed, you can upgrade it:
|
||||
```shell Terminal
|
||||
pip install --upgrade crewai crewai-tools
|
||||
```
|
||||
|
||||
<Warning>
|
||||
If you see a Poetry-related warning, you'll need to migrate to our new dependency manager:
|
||||
```shell Terminal
|
||||
crewai update
|
||||
```
|
||||
This will update your project to use [UV](https://github.com/astral-sh/uv), our new faster dependency manager.
|
||||
</Warning>
|
||||
|
||||
<Note>
|
||||
Skip this step if you're doing a fresh installation.
|
||||
</Note>
|
||||
</Step>
|
||||
|
||||
<Step title="Verify Installation">
|
||||
Check your installed versions:
|
||||
```shell Terminal
|
||||
pip freeze | grep crewai
|
||||
```
|
||||
|
||||
You should see something like:
|
||||
```markdown Output
|
||||
crewai==X.X.X
|
||||
crewai-tools==X.X.X
|
||||
```
|
||||
<Check>Installation successful! You're ready to create your first crew.</Check>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Create a new CrewAI project
|
||||
# Creating a New Project
|
||||
|
||||
The next step is to create a new CrewAI project.
|
||||
We recommend using the YAML Template scaffolding to get started as it provides a structured approach to defining agents and tasks.
|
||||
<Info>
|
||||
We recommend using the YAML Template scaffolding for a structured approach to defining agents and tasks.
|
||||
</Info>
|
||||
|
||||
<Steps>
|
||||
<Step title="Create a new CrewAI project using the YAML Template Configuration">
|
||||
To create a new CrewAI project, run the following CLI (Command Line Interface) command:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
crewai create crew <project_name>
|
||||
```
|
||||
</CodeGroup>
|
||||
This command creates a new project folder with the following structure:
|
||||
| File/Directory | Description |
|
||||
|:------------------------|:-------------------------------------------------|
|
||||
| `my_project/` | Root directory of the project |
|
||||
| ├── `.gitignore` | Specifies files and directories to ignore in Git |
|
||||
| ├── `pyproject.toml` | Project configuration and dependencies |
|
||||
| ├── `README.md` | Project documentation |
|
||||
| ├── `.env` | Environment variables |
|
||||
| └── `src/` | Source code directory |
|
||||
| └── `my_project/` | Main application package |
|
||||
| ├── `__init__.py` | Marks the directory as a Python package |
|
||||
| ├── `main.py` | Main application script |
|
||||
| ├── `crew.py` | Crew-related functionalities |
|
||||
| ├── `tools/` | Custom tools directory |
|
||||
| │ ├── `custom_tool.py` | Custom tool implementation |
|
||||
| │ └── `__init__.py` | Marks tools directory as a package |
|
||||
| └── `config/` | Configuration files directory |
|
||||
| ├── `agents.yaml` | Agent configurations |
|
||||
| └── `tasks.yaml` | Task configurations |
|
||||
<Step title="Generate Project Structure">
|
||||
Run the CrewAI CLI command:
|
||||
```shell Terminal
|
||||
crewai create crew <project_name>
|
||||
```
|
||||
|
||||
You can now start developing your crew by editing the files in the `src/my_project` folder.
|
||||
The `main.py` file is the entry point of the project, the `crew.py` file is where you define your crew, the `agents.yaml` file is where you define your agents,
|
||||
and the `tasks.yaml` file is where you define your tasks.
|
||||
This creates a new project with the following structure:
|
||||
<Frame>
|
||||
```
|
||||
my_project/
|
||||
├── .gitignore
|
||||
├── pyproject.toml
|
||||
├── README.md
|
||||
├── .env
|
||||
└── src/
|
||||
└── my_project/
|
||||
├── __init__.py
|
||||
├── main.py
|
||||
├── crew.py
|
||||
├── tools/
|
||||
│ ├── custom_tool.py
|
||||
│ └── __init__.py
|
||||
└── config/
|
||||
├── agents.yaml
|
||||
└── tasks.yaml
|
||||
```
|
||||
</Frame>
|
||||
</Step>
|
||||
<Step title="Customize your project">
|
||||
To customize your project, you can:
|
||||
- Modify `src/my_project/config/agents.yaml` to define your agents.
|
||||
- Modify `src/my_project/config/tasks.yaml` to define your tasks.
|
||||
- Modify `src/my_project/crew.py` to add your own logic, tools, and specific arguments.
|
||||
- Modify `src/my_project/main.py` to add custom inputs for your agents and tasks.
|
||||
- Add your environment variables into the `.env` file.
|
||||
|
||||
<Step title="Customize Your Project">
|
||||
Your project will contain these essential files:
|
||||
|
||||
| File | Purpose |
|
||||
| --- | --- |
|
||||
| `agents.yaml` | Define your AI agents and their roles |
|
||||
| `tasks.yaml` | Set up agent tasks and workflows |
|
||||
| `.env` | Store API keys and environment variables |
|
||||
| `main.py` | Project entry point and execution flow |
|
||||
| `crew.py` | Crew orchestration and coordination |
|
||||
| `tools/` | Directory for custom agent tools |
|
||||
|
||||
<Tip>
|
||||
Start by editing `agents.yaml` and `tasks.yaml` to define your crew's behavior.
|
||||
Keep sensitive information like API keys in `.env`.
|
||||
</Tip>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Next steps
|
||||
## Next Steps
|
||||
|
||||
Now that you have installed `crewai` and `crewai-tools`, you're ready to spin up your first crew!
|
||||
|
||||
- 👨💻 Build your first agent with CrewAI by following the [Quickstart](/quickstart) guide.
|
||||
- 💬 Join the [Community](https://community.crewai.com) to get help and share your feedback.
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
title="Build Your First Agent"
|
||||
icon="code"
|
||||
href="/quickstart"
|
||||
>
|
||||
Follow our quickstart guide to create your first CrewAI agent and get hands-on experience.
|
||||
</Card>
|
||||
<Card
|
||||
title="Join the Community"
|
||||
icon="comments"
|
||||
href="https://community.crewai.com"
|
||||
>
|
||||
Connect with other developers, get help, and share your CrewAI experiences.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
@@ -1,49 +1,85 @@
|
||||
---
|
||||
title: Introduction
|
||||
description: Welcome to CrewAI docs!
|
||||
description: Build AI agent teams that work together to tackle complex tasks
|
||||
icon: handshake
|
||||
---
|
||||
|
||||
# What is CrewAI?
|
||||
|
||||
**CrewAI is a cutting-edge Python framework for orchestrating role-playing, autonomous AI agents.**
|
||||
**CrewAI is a cutting-edge framework for orchestrating autonomous AI agents.**
|
||||
|
||||
By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks.
|
||||
CrewAI enables you to create AI teams where each agent has specific roles, tools, and goals, working together to accomplish complex tasks.
|
||||
|
||||
<Frame caption="CrewAI Mindmap">
|
||||
<img src="crewAI-mindmap.png" alt="CrewAI Mindmap" />
|
||||
</Frame>
|
||||
Think of it as assembling your dream team - each member (agent) brings unique skills and expertise, collaborating seamlessly to achieve your objectives.
|
||||
|
||||
## Why CrewAI?
|
||||
- 🤼♀️ **Role-Playing Agents**: Agents can take on different roles and personas to better understand and interact with complex systems.
|
||||
- 🤖 **Autonomous Decision Making**: Agents can make decisions autonomously based on the given context and available tools.
|
||||
- 🤝 **Seamless Collaboration**: Agents can work together seamlessly, sharing information and resources to achieve common goals.
|
||||
- 🧠 **Complex Task Tackling**: CrewAI is designed to tackle complex tasks, such as multi-step workflows, decision making, and problem solving.
|
||||
## How CrewAI Works
|
||||
|
||||
# Get Started with CrewAI
|
||||
<Note>
|
||||
Just like a company has departments (Sales, Engineering, Marketing) working together under leadership to achieve business goals, CrewAI helps you create an organization of AI agents with specialized roles collaborating to accomplish complex tasks.
|
||||
</Note>
|
||||
|
||||
<Frame caption="CrewAI Framework Overview">
|
||||
<img src="crewAI-mindmap.png" alt="CrewAI Framework Overview" />
|
||||
</Frame>
|
||||
|
||||
| Component | Description | Key Features |
|
||||
|:----------|:-----------:|:------------|
|
||||
| **Crew** | The top-level organization | • Manages AI agent teams<br/>• Oversees workflows<br/>• Ensures collaboration<br/>• Delivers outcomes |
|
||||
| **AI Agents** | Specialized team members | • Have specific roles (researcher, writer)<br/>• Use designated tools<br/>• Can delegate tasks<br/>• Make autonomous decisions |
|
||||
| **Process** | Workflow management system | • Defines collaboration patterns<br/>• Controls task assignments<br/>• Manages interactions<br/>• Ensures efficient execution |
|
||||
| **Tasks** | Individual assignments | • Have clear objectives<br/>• Use specific tools<br/>• Feed into larger process<br/>• Produce actionable results |
|
||||
|
||||
### How It All Works Together
|
||||
|
||||
1. The **Crew** organizes the overall operation
|
||||
2. **AI Agents** work on their specialized tasks
|
||||
3. The **Process** ensures smooth collaboration
|
||||
4. **Tasks** get completed to achieve the goal
|
||||
|
||||
## Key Features
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card title="Role-Based Agents" icon="users">
|
||||
Create specialized agents with defined roles, expertise, and goals - from researchers to analysts to writers
|
||||
</Card>
|
||||
<Card title="Flexible Tools" icon="screwdriver-wrench">
|
||||
Equip agents with custom tools and APIs to interact with external services and data sources
|
||||
</Card>
|
||||
<Card title="Intelligent Collaboration" icon="people-arrows">
|
||||
Agents work together, sharing insights and coordinating tasks to achieve complex objectives
|
||||
</Card>
|
||||
<Card title="Task Management" icon="list-check">
|
||||
Define sequential or parallel workflows, with agents automatically handling task dependencies
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Why Choose CrewAI?
|
||||
|
||||
- 🧠 **Autonomous Operation**: Agents make intelligent decisions based on their roles and available tools
|
||||
- 📝 **Natural Interaction**: Agents communicate and collaborate like human team members
|
||||
- 🛠️ **Extensible Design**: Easy to add new tools, roles, and capabilities
|
||||
- 🚀 **Production Ready**: Built for reliability and scalability in real-world applications
|
||||
|
||||
<CardGroup cols={3}>
|
||||
<Card
|
||||
title="Quickstart"
|
||||
color="#F3A78B"
|
||||
href="quickstart"
|
||||
icon="terminal"
|
||||
iconType="solid"
|
||||
title="Install CrewAI"
|
||||
icon="wrench"
|
||||
href="/installation"
|
||||
>
|
||||
Getting started with CrewAI
|
||||
Get started with CrewAI in your development environment.
|
||||
</Card>
|
||||
<Card
|
||||
title="Quick Start"
|
||||
icon="bolt"
|
||||
href="/quickstart"
|
||||
>
|
||||
Follow our quickstart guide to create your first CrewAI agent and get hands-on experience.
|
||||
</Card>
|
||||
<Card
|
||||
title="Join the Community"
|
||||
color="#F3A78B"
|
||||
icon="comments"
|
||||
href="https://community.crewai.com"
|
||||
icon="comment-question"
|
||||
iconType="duotone"
|
||||
>
|
||||
Join the CrewAI community and get help with your project!
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
## Next Step
|
||||
|
||||
- [Install CrewAI](/installation) to get started with your first agent.
|
||||
|
||||
>
|
||||
Connect with other developers, get help, and share your CrewAI experiences.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
@@ -68,6 +68,7 @@
|
||||
"concepts/tasks",
|
||||
"concepts/crews",
|
||||
"concepts/flows",
|
||||
"concepts/knowledge",
|
||||
"concepts/llms",
|
||||
"concepts/processes",
|
||||
"concepts/collaboration",
|
||||
@@ -98,7 +99,9 @@
|
||||
"how-to/replay-tasks-from-latest-crew-kickoff",
|
||||
"how-to/conditional-tasks",
|
||||
"how-to/agentops-observability",
|
||||
"how-to/langtrace-observability"
|
||||
"how-to/langtrace-observability",
|
||||
"how-to/openlit-observability",
|
||||
"how-to/portkey-observability"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
||||
@@ -8,7 +8,7 @@ icon: rocket
|
||||
|
||||
Let's create a simple crew that will help us `research` and `report` on the `latest AI developments` for a given topic or subject.
|
||||
|
||||
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
|
||||
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
|
||||
If you haven't installed them yet, you can do so by following the [installation guide](/installation).
|
||||
|
||||
Follow the steps below to get crewing! 🚣♂️
|
||||
@@ -23,7 +23,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="Modify your `agents.yaml` file">
|
||||
<Step title="Modify your `agents.yaml` file">
|
||||
<Tip>
|
||||
You can also modify the agents as needed to fit your use case or copy and paste as is to your project.
|
||||
Any variable interpolated in your `agents.yaml` and `tasks.yaml` files like `{topic}` will be replaced by the value of the variable in the `main.py` file.
|
||||
@@ -39,7 +39,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
@@ -51,7 +51,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
it easy for others to understand and act on the information you provide.
|
||||
```
|
||||
</Step>
|
||||
<Step title="Modify your `tasks.yaml` file">
|
||||
<Step title="Modify your `tasks.yaml` file">
|
||||
```yaml tasks.yaml
|
||||
# src/latest_ai_development/config/tasks.yaml
|
||||
research_task:
|
||||
@@ -73,8 +73,8 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
agent: reporting_analyst
|
||||
output_file: report.md
|
||||
```
|
||||
</Step>
|
||||
<Step title="Modify your `crew.py` file">
|
||||
</Step>
|
||||
<Step title="Modify your `crew.py` file">
|
||||
```python crew.py
|
||||
# src/latest_ai_development/crew.py
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
@@ -121,10 +121,34 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
)
|
||||
```
|
||||
</Step>
|
||||
<Step title="Feel free to pass custom inputs to your crew">
|
||||
<Step title="[Optional] Add before and after crew functions">
|
||||
```python crew.py
|
||||
# src/latest_ai_development/crew.py
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class LatestAiDevelopmentCrew():
|
||||
"""LatestAiDevelopment crew"""
|
||||
|
||||
@before_kickoff
|
||||
def before_kickoff_function(self, inputs):
|
||||
print(f"Before kickoff function with inputs: {inputs}")
|
||||
return inputs # You can return the inputs or modify them as needed
|
||||
|
||||
@after_kickoff
|
||||
def after_kickoff_function(self, result):
|
||||
print(f"After kickoff function with result: {result}")
|
||||
return result # You can return the result or modify it as needed
|
||||
|
||||
# ... remaining code
|
||||
```
|
||||
</Step>
|
||||
<Step title="Feel free to pass custom inputs to your crew">
|
||||
For example, you can pass the `topic` input to your crew to customize the research and reporting.
|
||||
```python main.py
|
||||
#!/usr/bin/env python
|
||||
@@ -237,14 +261,14 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
### Note on Consistency in Naming
|
||||
|
||||
The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code.
|
||||
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
|
||||
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
|
||||
This naming consistency allows CrewAI to automatically link your configurations with your code; otherwise, your task won't recognize the reference properly.
|
||||
|
||||
#### Example References
|
||||
|
||||
<Tip>
|
||||
Note how we use the same name for the agent in the `agents.yaml` (`email_summarizer`) file as the method name in the `crew.py` (`email_summarizer`) file.
|
||||
</Tip>
|
||||
</Tip>
|
||||
|
||||
```yaml agents.yaml
|
||||
email_summarizer:
|
||||
@@ -277,36 +301,166 @@ Use the annotations to properly reference the agent and task in the `crew.py` fi
|
||||
|
||||
### Annotations include:
|
||||
|
||||
* `@agent`
|
||||
* `@task`
|
||||
* `@crew`
|
||||
* `@tool`
|
||||
* `@callback`
|
||||
* `@output_json`
|
||||
* `@output_pydantic`
|
||||
* `@cache_handler`
|
||||
Here are examples of how to use each annotation in your CrewAI project, and when you should use them:
|
||||
|
||||
```python crew.py
|
||||
# ...
|
||||
#### @agent
|
||||
Used to define an agent in your crew. Use this when:
|
||||
- You need to create a specialized AI agent with a specific role
|
||||
- You want the agent to be automatically collected and managed by the crew
|
||||
- You need to reuse the same agent configuration across multiple tasks
|
||||
|
||||
```python
|
||||
@agent
|
||||
def email_summarizer(self) -> Agent:
|
||||
def research_agent(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["email_summarizer"],
|
||||
role="Research Analyst",
|
||||
goal="Conduct thorough research on given topics",
|
||||
backstory="Expert researcher with years of experience in data analysis",
|
||||
tools=[SerperDevTool()],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def email_summarizer_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["email_summarizer_task"],
|
||||
)
|
||||
# ...
|
||||
```
|
||||
|
||||
<Tip>
|
||||
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
|
||||
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
|
||||
You can learn more about the core concepts [here](/concepts).
|
||||
</Tip>
|
||||
#### @task
|
||||
Used to define a task that can be executed by agents. Use this when:
|
||||
- You need to define a specific piece of work for an agent
|
||||
- You want tasks to be automatically sequenced and managed
|
||||
- You need to establish dependencies between different tasks
|
||||
|
||||
```python
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
description="Research the latest developments in AI technology",
|
||||
expected_output="A comprehensive report on AI advancements",
|
||||
agent=self.research_agent(),
|
||||
output_file="output/research.md"
|
||||
)
|
||||
```
|
||||
|
||||
#### @crew
|
||||
Used to define your crew configuration. Use this when:
|
||||
- You want to automatically collect all @agent and @task definitions
|
||||
- You need to specify how tasks should be processed (sequential or hierarchical)
|
||||
- You want to set up crew-wide configurations
|
||||
|
||||
```python
|
||||
@crew
|
||||
def research_crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically collected from @agent methods
|
||||
tasks=self.tasks, # Automatically collected from @task methods
|
||||
process=Process.sequential,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
#### @tool
|
||||
Used to create custom tools for your agents. Use this when:
|
||||
- You need to give agents specific capabilities (like web search, data analysis)
|
||||
- You want to encapsulate external API calls or complex operations
|
||||
- You need to share functionality across multiple agents
|
||||
|
||||
```python
|
||||
@tool
|
||||
def web_search_tool(query: str, max_results: int = 5) -> list[str]:
|
||||
"""
|
||||
Search the web for information.
|
||||
|
||||
Args:
|
||||
query: The search query
|
||||
max_results: Maximum number of results to return
|
||||
|
||||
Returns:
|
||||
List of search results
|
||||
"""
|
||||
# Implement your search logic here
|
||||
return [f"Result {i} for: {query}" for i in range(max_results)]
|
||||
```
|
||||
|
||||
#### @before_kickoff
|
||||
Used to execute logic before the crew starts. Use this when:
|
||||
- You need to validate or preprocess input data
|
||||
- You want to set up resources or configurations before execution
|
||||
- You need to perform any initialization logic
|
||||
|
||||
```python
|
||||
@before_kickoff
|
||||
def validate_inputs(self, inputs: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||
"""Validate and preprocess inputs before the crew starts."""
|
||||
if inputs is None:
|
||||
return None
|
||||
|
||||
if 'topic' not in inputs:
|
||||
raise ValueError("Topic is required")
|
||||
|
||||
# Add additional context
|
||||
inputs['timestamp'] = datetime.now().isoformat()
|
||||
inputs['topic'] = inputs['topic'].strip().lower()
|
||||
return inputs
|
||||
```
|
||||
|
||||
#### @after_kickoff
|
||||
Used to process results after the crew completes. Use this when:
|
||||
- You need to format or transform the final output
|
||||
- You want to perform cleanup operations
|
||||
- You need to save or log the results in a specific way
|
||||
|
||||
```python
|
||||
@after_kickoff
|
||||
def process_results(self, result: CrewOutput) -> CrewOutput:
|
||||
"""Process and format the results after the crew completes."""
|
||||
result.raw = result.raw.strip()
|
||||
result.raw = f"""
|
||||
# Research Results
|
||||
Generated on: {datetime.now().isoformat()}
|
||||
|
||||
{result.raw}
|
||||
"""
|
||||
return result
|
||||
```
|
||||
|
||||
#### @callback
|
||||
Used to handle events during crew execution. Use this when:
|
||||
- You need to monitor task progress
|
||||
- You want to log intermediate results
|
||||
- You need to implement custom progress tracking or metrics
|
||||
|
||||
```python
|
||||
@callback
|
||||
def log_task_completion(self, task: Task, output: str):
|
||||
"""Log task completion details for monitoring."""
|
||||
print(f"Task '{task.description}' completed")
|
||||
print(f"Output length: {len(output)} characters")
|
||||
print(f"Agent used: {task.agent.role}")
|
||||
print("-" * 50)
|
||||
```
|
||||
|
||||
#### @cache_handler
|
||||
Used to implement custom caching for task results. Use this when:
|
||||
- You want to avoid redundant expensive operations
|
||||
- You need to implement custom cache storage or expiration logic
|
||||
- You want to persist results between runs
|
||||
|
||||
```python
|
||||
@cache_handler
|
||||
def custom_cache(self, key: str) -> Optional[str]:
|
||||
"""Custom cache implementation for storing task results."""
|
||||
cache_file = f"cache/{key}.json"
|
||||
|
||||
if os.path.exists(cache_file):
|
||||
with open(cache_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
# Check if cache is still valid (e.g., not expired)
|
||||
if datetime.fromisoformat(data['timestamp']) > datetime.now() - timedelta(days=1):
|
||||
return data['result']
|
||||
return None
|
||||
```
|
||||
|
||||
<Note>
|
||||
These decorators are part of the CrewAI framework and help organize your crew's structure by automatically collecting agents, tasks, and handling various lifecycle events.
|
||||
They should be used within a class decorated with `@CrewBase`.
|
||||
</Note>
|
||||
|
||||
### Replay Tasks from Latest Crew Kickoff
|
||||
|
||||
@@ -323,11 +477,28 @@ Replace `<task_id>` with the ID of the task you want to replay.
|
||||
If you need to reset the memory of your crew before running it again, you can do so by calling the reset memory feature:
|
||||
|
||||
```shell
|
||||
crewai reset-memory
|
||||
crewai reset-memories --all
|
||||
```
|
||||
|
||||
This will clear the crew's memory, allowing for a fresh start.
|
||||
|
||||
## Deploying Your Project
|
||||
|
||||
The easiest way to deploy your crew is through [CrewAI Enterprise](http://app.crewai.com/), where you can deploy your crew in a few clicks.
|
||||
The easiest way to deploy your crew is through CrewAI Enterprise, where you can deploy your crew in a few clicks.
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
title="Deploy on Enterprise"
|
||||
icon="rocket"
|
||||
href="http://app.crewai.com"
|
||||
>
|
||||
Get started with CrewAI Enterprise and deploy your crew in a production environment with just a few clicks.
|
||||
</Card>
|
||||
<Card
|
||||
title="Join the Community"
|
||||
icon="comments"
|
||||
href="https://community.crewai.com"
|
||||
>
|
||||
Join our open source community to discuss ideas, share your projects, and connect with other CrewAI developers.
|
||||
</Card>
|
||||
</CardGroup>
|
||||
|
||||
@@ -34,6 +34,7 @@ from crewai_tools import GithubSearchTool
|
||||
# Initialize the tool for semantic searches within a specific GitHub repository
|
||||
tool = GithubSearchTool(
|
||||
github_repo='https://github.com/example/repo',
|
||||
gh_token='your_github_personal_access_token',
|
||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||
)
|
||||
|
||||
@@ -41,6 +42,7 @@ tool = GithubSearchTool(
|
||||
|
||||
# Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution
|
||||
tool = GithubSearchTool(
|
||||
gh_token='your_github_personal_access_token',
|
||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||
)
|
||||
```
|
||||
@@ -48,6 +50,7 @@ tool = GithubSearchTool(
|
||||
## Arguments
|
||||
|
||||
- `github_repo` : The URL of the GitHub repository where the search will be conducted. This is a mandatory field and specifies the target repository for your search.
|
||||
- `gh_token` : Your GitHub Personal Access Token (PAT) required for authentication. You can create one in your GitHub account settings under Developer Settings > Personal Access Tokens.
|
||||
- `content_types` : Specifies the types of content to include in your search. You must provide a list of content types from the following options: `code` for searching within the code,
|
||||
`repo` for searching within the repository's general information, `pr` for searching within pull requests, and `issue` for searching within issues.
|
||||
This field is mandatory and allows tailoring the search to specific content types within the GitHub repository.
|
||||
@@ -77,5 +80,4 @@ tool = GithubSearchTool(
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
```
|
||||
)
|
||||
@@ -129,7 +129,6 @@ nav:
|
||||
- Processes: 'core-concepts/Processes.md'
|
||||
- Crews: 'core-concepts/Crews.md'
|
||||
- Collaboration: 'core-concepts/Collaboration.md'
|
||||
- Pipeline: 'core-concepts/Pipeline.md'
|
||||
- Training: 'core-concepts/Training-Crew.md'
|
||||
- Memory: 'core-concepts/Memory.md'
|
||||
- Planning: 'core-concepts/Planning.md'
|
||||
@@ -152,6 +151,7 @@ nav:
|
||||
- Conditional Tasks: 'how-to/Conditional-Tasks.md'
|
||||
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
|
||||
- Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md'
|
||||
- Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md'
|
||||
- Tools Docs:
|
||||
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
|
||||
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'
|
||||
|
||||
7507
poetry.lock
generated
7507
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,34 +1,46 @@
|
||||
[project]
|
||||
name = "crewai"
|
||||
version = "0.79.4"
|
||||
version = "0.95.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
authors = [
|
||||
{ name = "Joao Moura", email = "joao@crewai.com" }
|
||||
]
|
||||
dependencies = [
|
||||
# Core Dependencies
|
||||
"pydantic>=2.4.2",
|
||||
"langchain>=0.2.16",
|
||||
"openai>=1.13.3",
|
||||
"litellm>=1.44.22",
|
||||
"instructor>=1.3.3",
|
||||
|
||||
# Text Processing
|
||||
"pdfplumber>=0.11.4",
|
||||
"regex>=2024.9.11",
|
||||
|
||||
# Telemetry and Monitoring
|
||||
"opentelemetry-api>=1.22.0",
|
||||
"opentelemetry-sdk>=1.22.0",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
||||
"instructor>=1.3.3",
|
||||
"regex>=2024.9.11",
|
||||
"crewai-tools>=0.14.0",
|
||||
"click>=8.1.7",
|
||||
|
||||
# Data Handling
|
||||
"chromadb>=0.5.23",
|
||||
"openpyxl>=3.1.5",
|
||||
"pyvis>=0.3.2",
|
||||
|
||||
# Authentication and Security
|
||||
"auth0-python>=4.7.1",
|
||||
"python-dotenv>=1.0.0",
|
||||
|
||||
# Configuration and Utils
|
||||
"click>=8.1.7",
|
||||
"appdirs>=1.4.4",
|
||||
"jsonref>=1.1.0",
|
||||
"json-repair>=0.25.2",
|
||||
"auth0-python>=4.7.1",
|
||||
"litellm>=1.44.22",
|
||||
"pyvis>=0.3.2",
|
||||
"uv>=0.4.25",
|
||||
"tomli-w>=1.1.0",
|
||||
"chromadb>=0.4.24",
|
||||
"tomli>=2.0.2",
|
||||
"blinker>=1.9.0"
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -37,12 +49,29 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools>=0.14.0"]
|
||||
tools = ["crewai-tools>=0.25.5"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.7.0"
|
||||
]
|
||||
agentops = ["agentops>=0.3.0"]
|
||||
fastembed = ["fastembed>=0.4.1"]
|
||||
pdfplumber = [
|
||||
"pdfplumber>=0.11.4",
|
||||
]
|
||||
pandas = [
|
||||
"pandas>=2.2.3",
|
||||
]
|
||||
openpyxl = [
|
||||
"openpyxl>=3.1.5",
|
||||
]
|
||||
mem0 = ["mem0ai>=0.1.29"]
|
||||
docling = [
|
||||
"docling>=2.12.0",
|
||||
]
|
||||
|
||||
[tool.uv]
|
||||
dev-dependencies = [
|
||||
"ruff>=0.4.10",
|
||||
"ruff>=0.8.2",
|
||||
"mypy>=1.10.0",
|
||||
"pre-commit>=3.6.0",
|
||||
"mkdocs>=1.4.3",
|
||||
@@ -52,7 +81,6 @@ dev-dependencies = [
|
||||
"mkdocs-material-extensions>=1.3.1",
|
||||
"pillow>=10.2.0",
|
||||
"cairosvg>=2.7.1",
|
||||
"crewai-tools>=0.14.0",
|
||||
"pytest>=8.0.0",
|
||||
"pytest-vcr>=1.0.2",
|
||||
"python-dotenv>=1.0.0",
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import warnings
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.pipeline import Pipeline
|
||||
from crewai.process import Process
|
||||
from crewai.routers import Router
|
||||
from crewai.task import Task
|
||||
|
||||
warnings.filterwarnings(
|
||||
@@ -14,5 +14,13 @@ warnings.filterwarnings(
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
__version__ = "0.79.4"
|
||||
__all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router", "LLM", "Flow"]
|
||||
__version__ = "0.95.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
"Process",
|
||||
"Task",
|
||||
"LLM",
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
]
|
||||
|
||||
@@ -1,44 +1,43 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from typing import Any, List, Literal, Optional, Union
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
agentops = None
|
||||
|
||||
def mock_agent_ops_provider():
|
||||
def track_agent(*args, **kwargs):
|
||||
try:
|
||||
import agentops # type: ignore # Name "agentops" is already defined
|
||||
from agentops import track_agent # type: ignore
|
||||
except ImportError:
|
||||
|
||||
def track_agent():
|
||||
def noop(f):
|
||||
return f
|
||||
|
||||
return noop
|
||||
|
||||
return track_agent
|
||||
|
||||
|
||||
agentops = None
|
||||
|
||||
if os.environ.get("AGENTOPS_API_KEY"):
|
||||
try:
|
||||
from agentops import track_agent
|
||||
except ImportError:
|
||||
track_agent = mock_agent_ops_provider()
|
||||
else:
|
||||
track_agent = mock_agent_ops_provider()
|
||||
|
||||
|
||||
@track_agent()
|
||||
class Agent(BaseAgent):
|
||||
@@ -52,6 +51,7 @@ class Agent(BaseAgent):
|
||||
role: The role of the agent.
|
||||
goal: The objective of the agent.
|
||||
backstory: The backstory of the agent.
|
||||
knowledge: The knowledge base of the agent.
|
||||
config: Dict representation of agent configuration.
|
||||
llm: The language model that will run the agent.
|
||||
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
||||
@@ -62,6 +62,7 @@ class Agent(BaseAgent):
|
||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
knowledge_sources: Knowledge sources for the agent.
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
@@ -85,7 +86,7 @@ class Agent(BaseAgent):
|
||||
llm: Union[str, InstanceOf[LLM], Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
function_calling_llm: Optional[Any] = Field(
|
||||
function_calling_llm: Optional[Union[str, InstanceOf[LLM], Any]] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
system_template: Optional[str] = Field(
|
||||
@@ -115,111 +116,34 @@ class Agent(BaseAgent):
|
||||
default=2,
|
||||
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||
)
|
||||
multimodal: bool = Field(
|
||||
default=False,
|
||||
description="Whether the agent is multimodal.",
|
||||
)
|
||||
code_execution_mode: Literal["safe", "unsafe"] = Field(
|
||||
default="safe",
|
||||
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
||||
)
|
||||
embedder_config: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
default=None,
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
self._set_knowledge()
|
||||
self.agent_ops_agent_name = self.role
|
||||
unnacepted_attributes = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"AWS_REGION_NAME",
|
||||
]
|
||||
|
||||
# Handle different cases for self.llm
|
||||
if isinstance(self.llm, str):
|
||||
# If it's a string, create an LLM instance
|
||||
self.llm = LLM(model=self.llm)
|
||||
elif isinstance(self.llm, LLM):
|
||||
# If it's already an LLM instance, keep it as is
|
||||
pass
|
||||
elif self.llm is None:
|
||||
# Determine the model name from environment variables or use default
|
||||
model_name = (
|
||||
os.environ.get("OPENAI_MODEL_NAME")
|
||||
or os.environ.get("MODEL")
|
||||
or "gpt-4o-mini"
|
||||
)
|
||||
llm_params = {"model": model_name}
|
||||
|
||||
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get(
|
||||
"OPENAI_BASE_URL"
|
||||
)
|
||||
if api_base:
|
||||
llm_params["base_url"] = api_base
|
||||
|
||||
set_provider = model_name.split("/")[0] if "/" in model_name else "openai"
|
||||
|
||||
# Iterate over all environment variables to find matching API keys or use defaults
|
||||
for provider, env_vars in ENV_VARS.items():
|
||||
if provider == set_provider:
|
||||
for env_var in env_vars:
|
||||
if env_var["key_name"] in unnacepted_attributes:
|
||||
continue
|
||||
# Check if the environment variable is set
|
||||
if "key_name" in env_var:
|
||||
env_value = os.environ.get(env_var["key_name"])
|
||||
if env_value:
|
||||
# Map key names containing "API_KEY" to "api_key"
|
||||
key_name = (
|
||||
"api_key"
|
||||
if "API_KEY" in env_var["key_name"]
|
||||
else env_var["key_name"]
|
||||
)
|
||||
# Map key names containing "API_BASE" to "api_base"
|
||||
key_name = (
|
||||
"api_base"
|
||||
if "API_BASE" in env_var["key_name"]
|
||||
else key_name
|
||||
)
|
||||
# Map key names containing "API_VERSION" to "api_version"
|
||||
key_name = (
|
||||
"api_version"
|
||||
if "API_VERSION" in env_var["key_name"]
|
||||
else key_name
|
||||
)
|
||||
llm_params[key_name] = env_value
|
||||
# Check for default values if the environment variable is not set
|
||||
elif env_var.get("default", False):
|
||||
for key, value in env_var.items():
|
||||
if key not in ["prompt", "key_name", "default"]:
|
||||
# Only add default if the key is already set in os.environ
|
||||
if key in os.environ:
|
||||
llm_params[key] = value
|
||||
|
||||
self.llm = LLM(**llm_params)
|
||||
else:
|
||||
# For any other type, attempt to extract relevant attributes
|
||||
llm_params = {
|
||||
"model": getattr(self.llm, "model_name", None)
|
||||
or getattr(self.llm, "deployment_name", None)
|
||||
or str(self.llm),
|
||||
"temperature": getattr(self.llm, "temperature", None),
|
||||
"max_tokens": getattr(self.llm, "max_tokens", None),
|
||||
"logprobs": getattr(self.llm, "logprobs", None),
|
||||
"timeout": getattr(self.llm, "timeout", None),
|
||||
"max_retries": getattr(self.llm, "max_retries", None),
|
||||
"api_key": getattr(self.llm, "api_key", None),
|
||||
"base_url": getattr(self.llm, "base_url", None),
|
||||
"organization": getattr(self.llm, "organization", None),
|
||||
}
|
||||
# Remove None values to avoid passing unnecessary parameters
|
||||
llm_params = {k: v for k, v in llm_params.items() if v is not None}
|
||||
self.llm = LLM(**llm_params)
|
||||
|
||||
# Similar handling for function_calling_llm
|
||||
if self.function_calling_llm:
|
||||
if isinstance(self.function_calling_llm, str):
|
||||
self.function_calling_llm = LLM(model=self.function_calling_llm)
|
||||
elif not isinstance(self.function_calling_llm, LLM):
|
||||
self.function_calling_llm = LLM(
|
||||
model=getattr(self.function_calling_llm, "model_name", None)
|
||||
or getattr(self.function_calling_llm, "deployment_name", None)
|
||||
or str(self.function_calling_llm)
|
||||
)
|
||||
self.llm = create_llm(self.llm)
|
||||
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
||||
self.function_calling_llm = create_llm(self.function_calling_llm)
|
||||
|
||||
if not self.agent_executor:
|
||||
self._setup_agent_executor()
|
||||
@@ -234,9 +158,24 @@ class Agent(BaseAgent):
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
|
||||
def _set_knowledge(self):
|
||||
try:
|
||||
if self.knowledge_sources:
|
||||
knowledge_agent_name = f"{self.role.replace(' ', '_')}"
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
):
|
||||
self._knowledge = Knowledge(
|
||||
sources=self.knowledge_sources,
|
||||
embedder_config=self.embedder_config,
|
||||
collection_name=knowledge_agent_name,
|
||||
)
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
|
||||
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
task: Task,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
@@ -255,6 +194,22 @@ class Agent(BaseAgent):
|
||||
|
||||
task_prompt = task.prompt()
|
||||
|
||||
# If the task requires output in JSON or Pydantic format,
|
||||
# append specific instructions to the task prompt to ensure
|
||||
# that the final answer does not include any code block markers
|
||||
if task.output_json or task.output_pydantic:
|
||||
# Generate the schema based on the output format
|
||||
if task.output_json:
|
||||
# schema = json.dumps(task.output_json, indent=2)
|
||||
schema = generate_model_description(task.output_json)
|
||||
|
||||
elif task.output_pydantic:
|
||||
schema = generate_model_description(task.output_pydantic)
|
||||
|
||||
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
|
||||
if context:
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
task=task_prompt, context=context
|
||||
@@ -262,14 +217,32 @@ class Agent(BaseAgent):
|
||||
|
||||
if self.crew and self.crew.memory:
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew.memory_config,
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._user_memory,
|
||||
)
|
||||
memory = contextual_memory.build_context_for_task(task, context)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
if self._knowledge:
|
||||
agent_knowledge_snippets = self._knowledge.query([task.prompt()])
|
||||
if agent_knowledge_snippets:
|
||||
agent_knowledge_context = extract_knowledge_context(
|
||||
agent_knowledge_snippets
|
||||
)
|
||||
if agent_knowledge_context:
|
||||
task_prompt += agent_knowledge_context
|
||||
|
||||
if self.crew:
|
||||
knowledge_snippets = self.crew.query_knowledge([task.prompt()])
|
||||
if knowledge_snippets:
|
||||
crew_knowledge_context = extract_knowledge_context(knowledge_snippets)
|
||||
if crew_knowledge_context:
|
||||
task_prompt += crew_knowledge_context
|
||||
|
||||
tools = tools or self.tools or []
|
||||
self.create_agent_executor(tools=tools, task=task)
|
||||
|
||||
@@ -360,6 +333,11 @@ class Agent(BaseAgent):
|
||||
tools = agent_tools.tools()
|
||||
return tools
|
||||
|
||||
def get_multimodal_tools(self) -> List[Tool]:
|
||||
from crewai.tools.agent_tools.add_image_tool import AddImageTool
|
||||
|
||||
return [AddImageTool()]
|
||||
|
||||
def get_code_execution_tools(self):
|
||||
try:
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
@@ -384,7 +362,7 @@ class Agent(BaseAgent):
|
||||
|
||||
for tool in tools:
|
||||
if isinstance(tool, CrewAITool):
|
||||
tools_list.append(tool.to_langchain())
|
||||
tools_list.append(tool.to_structured_tool())
|
||||
else:
|
||||
tools_list.append(tool)
|
||||
except ModuleNotFoundError:
|
||||
|
||||
@@ -19,6 +19,7 @@ from crewai.agents.agent_builder.utilities.base_token_process import TokenProces
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
|
||||
@@ -106,7 +107,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
tools: Optional[List[Any]] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: Optional[int] = Field(
|
||||
@@ -135,6 +136,35 @@ class BaseAgent(ABC, BaseModel):
|
||||
def process_model_config(cls, values):
|
||||
return process_config(values, cls)
|
||||
|
||||
@field_validator("tools")
|
||||
@classmethod
|
||||
def validate_tools(cls, tools: List[Any]) -> List[BaseTool]:
|
||||
"""Validate and process the tools provided to the agent.
|
||||
|
||||
This method ensures that each tool is either an instance of BaseTool
|
||||
or an object with 'name', 'func', and 'description' attributes. If the
|
||||
tool meets these criteria, it is processed and added to the list of
|
||||
tools. Otherwise, a ValueError is raised.
|
||||
"""
|
||||
processed_tools = []
|
||||
for tool in tools:
|
||||
if isinstance(tool, BaseTool):
|
||||
processed_tools.append(tool)
|
||||
elif (
|
||||
hasattr(tool, "name")
|
||||
and hasattr(tool, "func")
|
||||
and hasattr(tool, "description")
|
||||
):
|
||||
# Tool has the required attributes, create a Tool instance
|
||||
processed_tools.append(Tool.from_langchain(tool))
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid tool type: {type(tool)}. "
|
||||
"Tool must be an instance of BaseTool or "
|
||||
"an object with 'name', 'func', and 'description' attributes."
|
||||
)
|
||||
return processed_tools
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_and_set_attributes(self):
|
||||
# Validate required fields
|
||||
|
||||
@@ -3,16 +3,15 @@ from typing import TYPE_CHECKING, Optional
|
||||
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
|
||||
|
||||
class CrewAgentExecutorMixin:
|
||||
@@ -20,15 +19,10 @@ class CrewAgentExecutorMixin:
|
||||
agent: Optional["BaseAgent"]
|
||||
task: Optional["Task"]
|
||||
iterations: int
|
||||
have_forced_answer: bool
|
||||
max_iter: int
|
||||
_i18n: I18N
|
||||
_printer: Printer = Printer()
|
||||
|
||||
def _should_force_answer(self) -> bool:
|
||||
"""Determine if a forced answer is required based on iteration count."""
|
||||
return (self.iterations >= self.max_iter) and not self.have_forced_answer
|
||||
|
||||
def _create_short_term_memory(self, output) -> None:
|
||||
"""Create and save a short-term memory item if conditions are met."""
|
||||
if (
|
||||
@@ -100,14 +94,19 @@ class CrewAgentExecutorMixin:
|
||||
print(f"Failed to add to long term memory: {e}")
|
||||
pass
|
||||
|
||||
def _ask_human_input(self, final_answer: dict) -> str:
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input for final decision making."""
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
||||
)
|
||||
|
||||
self._printer.print(
|
||||
content="\n\n=====\n## Please provide feedback on the Final Result and the Agent's actions:",
|
||||
content=(
|
||||
"\n\n=====\n"
|
||||
"## Please provide feedback on the Final Result and the Agent's actions. "
|
||||
"Respond with 'looks good' or a similar phrase when you're satisfied.\n"
|
||||
"=====\n"
|
||||
),
|
||||
color="bold_yellow",
|
||||
)
|
||||
return input()
|
||||
|
||||
@@ -4,6 +4,7 @@ from crewai.types.usage_metrics import UsageMetrics
|
||||
class TokenProcess:
|
||||
total_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
cached_prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
successful_requests: int = 0
|
||||
|
||||
@@ -15,6 +16,9 @@ class TokenProcess:
|
||||
self.completion_tokens = self.completion_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_cached_prompt_tokens(self, tokens: int):
|
||||
self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
|
||||
|
||||
def sum_successful_requests(self, requests: int):
|
||||
self.successful_requests = self.successful_requests + requests
|
||||
|
||||
@@ -22,6 +26,7 @@ class TokenProcess:
|
||||
return UsageMetrics(
|
||||
total_tokens=self.total_tokens,
|
||||
prompt_tokens=self.prompt_tokens,
|
||||
cached_prompt_tokens=self.cached_prompt_tokens,
|
||||
completion_tokens=self.completion_tokens,
|
||||
successful_requests=self.successful_requests,
|
||||
)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import json
|
||||
import re
|
||||
from typing import Any, Dict, List, Union
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Callable, Dict, List, Optional, Union
|
||||
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
@@ -12,9 +13,10 @@ from crewai.agents.parser import (
|
||||
OutputParserException,
|
||||
)
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N, Printer
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
@@ -22,6 +24,12 @@ from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolResult:
|
||||
result: Any
|
||||
result_as_answer: bool
|
||||
|
||||
|
||||
class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
_logger: Logger = Logger()
|
||||
|
||||
@@ -33,7 +41,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
agent: BaseAgent,
|
||||
prompt: dict[str, str],
|
||||
max_iter: int,
|
||||
tools: List[Any],
|
||||
tools: List[BaseTool],
|
||||
tools_names: str,
|
||||
stop_words: List[str],
|
||||
tools_description: str,
|
||||
@@ -42,7 +50,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
original_tools: List[Any] = [],
|
||||
function_calling_llm: Any = None,
|
||||
respect_context_window: bool = False,
|
||||
request_within_rpm_limit: Any = None,
|
||||
request_within_rpm_limit: Optional[Callable[[], bool]] = None,
|
||||
callbacks: List[Any] = [],
|
||||
):
|
||||
self._i18n: I18N = I18N()
|
||||
@@ -69,8 +77,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.messages: List[Dict[str, str]] = []
|
||||
self.iterations = 0
|
||||
self.log_error_after = 3
|
||||
self.have_forced_answer = False
|
||||
self.name_to_tool_map = {tool.name: tool for tool in self.tools}
|
||||
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
||||
tool.name: tool for tool in self.tools
|
||||
}
|
||||
if self.llm.stop:
|
||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
||||
else:
|
||||
@@ -80,7 +89,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if "system" in self.prompt:
|
||||
system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs)
|
||||
user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs)
|
||||
|
||||
self.messages.append(self._format_msg(system_prompt, role="system"))
|
||||
self.messages.append(self._format_msg(user_prompt))
|
||||
else:
|
||||
@@ -93,99 +101,155 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
if self.ask_for_human_input:
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer, human_feedback)
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
|
||||
# Making sure we only ask for it once, so disabling for the next thought loop
|
||||
self.ask_for_human_input = False
|
||||
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer)
|
||||
self._create_short_term_memory(formatted_answer)
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _invoke_loop(self, formatted_answer=None):
|
||||
try:
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
if not self.request_within_rpm_limit or self.request_within_rpm_limit():
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
def _invoke_loop(self):
|
||||
"""
|
||||
Main loop to invoke the agent's thought process until it reaches a conclusion
|
||||
or the maximum number of iterations is reached.
|
||||
"""
|
||||
formatted_answer = None
|
||||
while not isinstance(formatted_answer, AgentFinish):
|
||||
try:
|
||||
if self._has_reached_max_iterations():
|
||||
formatted_answer = self._handle_max_iterations_exceeded(
|
||||
formatted_answer
|
||||
)
|
||||
break
|
||||
|
||||
self._enforce_rpm_limit()
|
||||
|
||||
answer = self._get_llm_response()
|
||||
|
||||
formatted_answer = self._process_llm_response(answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
tool_result = self._execute_tool_and_check_finality(
|
||||
formatted_answer
|
||||
)
|
||||
formatted_answer = self._handle_agent_action(
|
||||
formatted_answer, tool_result
|
||||
)
|
||||
|
||||
if answer is None or answer == "":
|
||||
self._printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError(
|
||||
"Invalid response from LLM call - None or empty."
|
||||
)
|
||||
self._invoke_step_callback(formatted_answer)
|
||||
self._append_message(formatted_answer.text, role="assistant")
|
||||
|
||||
if not self.use_stop_words:
|
||||
try:
|
||||
self._format_answer(answer)
|
||||
except OutputParserException as e:
|
||||
if (
|
||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE
|
||||
in e.error
|
||||
):
|
||||
answer = answer.split("Observation:")[0].strip()
|
||||
except OutputParserException as e:
|
||||
formatted_answer = self._handle_output_parser_exception(e)
|
||||
|
||||
self.iterations += 1
|
||||
formatted_answer = self._format_answer(answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
action_result = self._use_tool(formatted_answer)
|
||||
formatted_answer.text += f"\nObservation: {action_result}"
|
||||
formatted_answer.result = action_result
|
||||
self._show_logs(formatted_answer)
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
|
||||
if self._should_force_answer():
|
||||
if self.have_forced_answer:
|
||||
return AgentFinish(
|
||||
output=self._i18n.errors(
|
||||
"force_final_answer_error"
|
||||
).format(formatted_answer.text),
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
else:
|
||||
formatted_answer.text += (
|
||||
f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
self.have_forced_answer = True
|
||||
self.messages.append(
|
||||
self._format_msg(formatted_answer.text, role="assistant")
|
||||
)
|
||||
|
||||
except OutputParserException as e:
|
||||
self.messages.append({"role": "user", "content": e.error})
|
||||
if self.iterations > self.log_error_after:
|
||||
self._printer.print(
|
||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||
color="red",
|
||||
)
|
||||
return self._invoke_loop(formatted_answer)
|
||||
|
||||
except Exception as e:
|
||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
||||
str(e)
|
||||
):
|
||||
self._handle_context_length()
|
||||
return self._invoke_loop(formatted_answer)
|
||||
else:
|
||||
raise e
|
||||
except Exception as e:
|
||||
if self._is_context_length_exceeded(e):
|
||||
self._handle_context_length()
|
||||
continue
|
||||
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _has_reached_max_iterations(self) -> bool:
|
||||
"""Check if the maximum number of iterations has been reached."""
|
||||
return self.iterations >= self.max_iter
|
||||
|
||||
def _enforce_rpm_limit(self) -> None:
|
||||
"""Enforce the requests per minute (RPM) limit if applicable."""
|
||||
if self.request_within_rpm_limit:
|
||||
self.request_within_rpm_limit()
|
||||
|
||||
def _get_llm_response(self) -> str:
|
||||
"""Call the LLM and return the response, handling any invalid responses."""
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
|
||||
if not answer:
|
||||
self._printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
return answer
|
||||
|
||||
def _process_llm_response(self, answer: str) -> Union[AgentAction, AgentFinish]:
|
||||
"""Process the LLM response and format it into an AgentAction or AgentFinish."""
|
||||
if not self.use_stop_words:
|
||||
try:
|
||||
# Preliminary parsing to check for errors.
|
||||
self._format_answer(answer)
|
||||
except OutputParserException as e:
|
||||
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
||||
answer = answer.split("Observation:")[0].strip()
|
||||
|
||||
self.iterations += 1
|
||||
return self._format_answer(answer)
|
||||
|
||||
def _handle_agent_action(
|
||||
self, formatted_answer: AgentAction, tool_result: ToolResult
|
||||
) -> Union[AgentAction, AgentFinish]:
|
||||
"""Handle the AgentAction, execute tools, and process the results."""
|
||||
add_image_tool = self._i18n.tools("add_image")
|
||||
if (
|
||||
isinstance(add_image_tool, dict)
|
||||
and formatted_answer.tool.casefold().strip()
|
||||
== add_image_tool.get("name", "").casefold().strip()
|
||||
):
|
||||
self.messages.append(tool_result.result)
|
||||
return formatted_answer # Continue the loop
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(tool_result)
|
||||
|
||||
formatted_answer.text += f"\nObservation: {tool_result.result}"
|
||||
formatted_answer.result = tool_result.result
|
||||
|
||||
if tool_result.result_as_answer:
|
||||
return AgentFinish(
|
||||
thought="",
|
||||
output=tool_result.result,
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _invoke_step_callback(self, formatted_answer) -> None:
|
||||
"""Invoke the step callback if it exists."""
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
|
||||
def _append_message(self, text: str, role: str = "assistant") -> None:
|
||||
"""Append a message to the message list with the given role."""
|
||||
self.messages.append(self._format_msg(text, role=role))
|
||||
|
||||
def _handle_output_parser_exception(self, e: OutputParserException) -> AgentAction:
|
||||
"""Handle OutputParserException by updating messages and formatted_answer."""
|
||||
self.messages.append({"role": "user", "content": e.error})
|
||||
|
||||
formatted_answer = AgentAction(
|
||||
text=e.error,
|
||||
tool="",
|
||||
tool_input="",
|
||||
thought="",
|
||||
)
|
||||
|
||||
if self.iterations > self.log_error_after:
|
||||
self._printer.print(
|
||||
content=f"Error parsing LLM output, agent will retry: {e.error}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
return formatted_answer
|
||||
|
||||
def _is_context_length_exceeded(self, exception: Exception) -> bool:
|
||||
"""Check if the exception is due to context length exceeding."""
|
||||
return LLMContextLengthExceededException(
|
||||
str(exception)
|
||||
)._is_context_limit_error(str(exception))
|
||||
|
||||
def _show_start_logs(self):
|
||||
if self.agent is None:
|
||||
raise ValueError("Agent cannot be None")
|
||||
@@ -238,7 +302,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
|
||||
)
|
||||
|
||||
def _use_tool(self, agent_action: AgentAction) -> Any:
|
||||
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
@@ -250,23 +314,29 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
agent=self.agent,
|
||||
action=agent_action,
|
||||
)
|
||||
tool_calling = tool_usage.parse(agent_action.text)
|
||||
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
tool_result = tool_calling.message
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
else:
|
||||
if tool_calling.tool_name.casefold().strip() in [
|
||||
name.casefold().strip() for name in self.name_to_tool_map
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||
name.casefold().strip() for name in self.name_to_tool_map
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
]:
|
||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
|
||||
if tool:
|
||||
return ToolResult(
|
||||
result=tool_result, result_as_answer=tool.result_as_answer
|
||||
)
|
||||
else:
|
||||
tool_result = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||
)
|
||||
return tool_result
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
|
||||
def _summarize_messages(self) -> None:
|
||||
messages_groups = []
|
||||
@@ -284,7 +354,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._i18n.slice("summarizer_system_message"), role="system"
|
||||
),
|
||||
self._format_msg(
|
||||
self._i18n.slice("sumamrize_instruction").format(group=group),
|
||||
self._i18n.slice("summarize_instruction").format(group=group),
|
||||
),
|
||||
],
|
||||
callbacks=self.callbacks,
|
||||
@@ -301,16 +371,14 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
def _handle_context_length(self) -> None:
|
||||
if self.respect_context_window:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Summarizing content to fit the model context window.",
|
||||
self._printer.print(
|
||||
content="Context length exceeded. Summarizing content to fit the model context window.",
|
||||
color="yellow",
|
||||
)
|
||||
self._summarize_messages()
|
||||
else:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
self._printer.print(
|
||||
content="Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
raise SystemExit(
|
||||
@@ -332,20 +400,18 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||
train_iteration = self.crew._train_iteration
|
||||
if agent_id in training_data and isinstance(train_iteration, int):
|
||||
training_data[agent_id][train_iteration]["improved_output"] = (
|
||||
result.output
|
||||
)
|
||||
training_data[agent_id][train_iteration][
|
||||
"improved_output"
|
||||
] = result.output
|
||||
training_handler.save(training_data)
|
||||
else:
|
||||
self._logger.log(
|
||||
"error",
|
||||
"Invalid train iteration type or agent_id not in training data.",
|
||||
self._printer.print(
|
||||
content="Invalid train iteration type or agent_id not in training data.",
|
||||
color="red",
|
||||
)
|
||||
else:
|
||||
self._logger.log(
|
||||
"error",
|
||||
"Crew is None or does not have _train_iteration attribute.",
|
||||
self._printer.print(
|
||||
content="Crew is None or does not have _train_iteration attribute.",
|
||||
color="red",
|
||||
)
|
||||
|
||||
@@ -363,15 +429,13 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
train_iteration, agent_id, training_data
|
||||
)
|
||||
else:
|
||||
self._logger.log(
|
||||
"error",
|
||||
"Invalid train iteration type. Expected int.",
|
||||
self._printer.print(
|
||||
content="Invalid train iteration type. Expected int.",
|
||||
color="red",
|
||||
)
|
||||
else:
|
||||
self._logger.log(
|
||||
"error",
|
||||
"Crew is None or does not have _train_iteration attribute.",
|
||||
self._printer.print(
|
||||
content="Crew is None or does not have _train_iteration attribute.",
|
||||
color="red",
|
||||
)
|
||||
|
||||
@@ -387,3 +451,123 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
def _format_msg(self, prompt: str, role: str = "user") -> Dict[str, str]:
|
||||
prompt = prompt.rstrip()
|
||||
return {"role": role, "content": prompt}
|
||||
|
||||
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
||||
"""
|
||||
Handles the human feedback loop, allowing the user to provide feedback
|
||||
on the agent's output and determining if additional iterations are needed.
|
||||
|
||||
Parameters:
|
||||
formatted_answer (AgentFinish): The initial output from the agent.
|
||||
|
||||
Returns:
|
||||
AgentFinish: The final output after incorporating human feedback.
|
||||
"""
|
||||
while self.ask_for_human_input:
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer, human_feedback)
|
||||
|
||||
# Make an LLM call to verify if additional changes are requested based on human feedback
|
||||
additional_changes_prompt = self._i18n.slice(
|
||||
"human_feedback_classification"
|
||||
).format(feedback=human_feedback)
|
||||
|
||||
retry_count = 0
|
||||
llm_call_successful = False
|
||||
additional_changes_response = None
|
||||
|
||||
while retry_count < MAX_LLM_RETRY and not llm_call_successful:
|
||||
try:
|
||||
additional_changes_response = (
|
||||
self.llm.call(
|
||||
[
|
||||
self._format_msg(
|
||||
additional_changes_prompt, role="system"
|
||||
)
|
||||
],
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
.strip()
|
||||
.lower()
|
||||
)
|
||||
llm_call_successful = True
|
||||
except Exception as e:
|
||||
retry_count += 1
|
||||
|
||||
self._printer.print(
|
||||
content=f"Error during LLM call to classify human feedback: {e}. Retrying... ({retry_count}/{MAX_LLM_RETRY})",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if not llm_call_successful:
|
||||
self._printer.print(
|
||||
content="Error processing feedback after multiple attempts.",
|
||||
color="red",
|
||||
)
|
||||
self.ask_for_human_input = False
|
||||
break
|
||||
|
||||
if additional_changes_response == "false":
|
||||
self.ask_for_human_input = False
|
||||
elif additional_changes_response == "true":
|
||||
self.ask_for_human_input = True
|
||||
# Add human feedback to messages
|
||||
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
|
||||
# Invoke the loop again with updated messages
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer)
|
||||
else:
|
||||
# Unexpected response
|
||||
self._printer.print(
|
||||
content=f"Unexpected response from LLM: '{additional_changes_response}'. Assuming no additional changes requested.",
|
||||
color="red",
|
||||
)
|
||||
self.ask_for_human_input = False
|
||||
|
||||
return formatted_answer
|
||||
|
||||
def _handle_max_iterations_exceeded(self, formatted_answer):
|
||||
"""
|
||||
Handles the case when the maximum number of iterations is exceeded.
|
||||
Performs one more LLM call to get the final answer.
|
||||
|
||||
Parameters:
|
||||
formatted_answer: The last formatted answer from the agent.
|
||||
|
||||
Returns:
|
||||
The final formatted answer after exceeding max iterations.
|
||||
"""
|
||||
self._printer.print(
|
||||
content="Maximum iterations reached. Requesting final answer.",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
if formatted_answer and hasattr(formatted_answer, "text"):
|
||||
assistant_message = (
|
||||
formatted_answer.text + f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
else:
|
||||
assistant_message = self._i18n.errors("force_final_answer")
|
||||
|
||||
self.messages.append(self._format_msg(assistant_message, role="assistant"))
|
||||
|
||||
# Perform one more LLM call to get the final answer
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
|
||||
if answer is None or answer == "":
|
||||
self._printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError("Invalid response from LLM call - None or empty.")
|
||||
|
||||
formatted_answer = self._format_answer(answer)
|
||||
# Return the formatted answer, regardless of its type
|
||||
return formatted_answer
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import re
|
||||
from typing import Any, Union
|
||||
|
||||
from json_repair import repair_json
|
||||
|
||||
from crewai.utilities import I18N
|
||||
|
||||
@@ -5,6 +5,8 @@ from typing import Any, Dict
|
||||
import requests
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.tools.main import ToolCommand
|
||||
|
||||
from .constants import AUTH0_AUDIENCE, AUTH0_CLIENT_ID, AUTH0_DOMAIN
|
||||
from .utils import TokenManager, validate_token
|
||||
|
||||
@@ -34,7 +36,9 @@ class AuthenticationCommand:
|
||||
"scope": "openid",
|
||||
"audience": AUTH0_AUDIENCE,
|
||||
}
|
||||
response = requests.post(url=self.DEVICE_CODE_URL, data=device_code_payload)
|
||||
response = requests.post(
|
||||
url=self.DEVICE_CODE_URL, data=device_code_payload, timeout=20
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
@@ -54,14 +58,31 @@ class AuthenticationCommand:
|
||||
|
||||
attempts = 0
|
||||
while True and attempts < 5:
|
||||
response = requests.post(self.TOKEN_URL, data=token_payload)
|
||||
response = requests.post(self.TOKEN_URL, data=token_payload, timeout=30)
|
||||
token_data = response.json()
|
||||
|
||||
if response.status_code == 200:
|
||||
validate_token(token_data["id_token"])
|
||||
expires_in = 360000 # Token expiration time in seconds
|
||||
self.token_manager.save_tokens(token_data["access_token"], expires_in)
|
||||
console.print("\nWelcome to CrewAI+ !!", style="green")
|
||||
|
||||
try:
|
||||
ToolCommand().login()
|
||||
except Exception:
|
||||
console.print(
|
||||
"\n[bold yellow]Warning:[/bold yellow] Authentication with the Tool Repository failed.",
|
||||
style="yellow",
|
||||
)
|
||||
console.print(
|
||||
"Other features will work normally, but you may experience limitations "
|
||||
"with downloading and publishing tools."
|
||||
"\nRun [bold]crewai login[/bold] to try logging in again.\n",
|
||||
style="yellow",
|
||||
)
|
||||
|
||||
console.print(
|
||||
"\n[bold green]Welcome to CrewAI Enterprise![/bold green]\n"
|
||||
)
|
||||
return
|
||||
|
||||
if token_data["error"] not in ("authorization_pending", "slow_down"):
|
||||
|
||||
9
src/crewai/cli/authentication/token.py
Normal file
9
src/crewai/cli/authentication/token.py
Normal file
@@ -0,0 +1,9 @@
|
||||
from .utils import TokenManager
|
||||
|
||||
|
||||
def get_auth_token() -> str:
|
||||
"""Get the authentication token."""
|
||||
access_token = TokenManager().get_token()
|
||||
if not access_token:
|
||||
raise Exception()
|
||||
return access_token
|
||||
@@ -1,12 +1,13 @@
|
||||
from typing import Optional
|
||||
import os
|
||||
from importlib.metadata import version as get_version
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import click
|
||||
import pkg_resources
|
||||
|
||||
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
||||
from crewai.cli.create_crew import create_crew
|
||||
from crewai.cli.create_flow import create_flow
|
||||
from crewai.cli.create_pipeline import create_pipeline
|
||||
from crewai.cli.crew_chat import run_chat
|
||||
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||
KickoffTaskOutputsSQLiteStorage,
|
||||
)
|
||||
@@ -26,27 +27,24 @@ from .update_crew import update_crew
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(get_version("crewai"))
|
||||
def crewai():
|
||||
"""Top-level command group for crewai."""
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.argument("type", type=click.Choice(["crew", "pipeline", "flow"]))
|
||||
@click.argument("type", type=click.Choice(["crew", "flow"]))
|
||||
@click.argument("name")
|
||||
@click.option("--provider", type=str, help="The provider to use for the crew")
|
||||
@click.option("--skip_provider", is_flag=True, help="Skip provider validation")
|
||||
def create(type, name, provider, skip_provider=False):
|
||||
"""Create a new crew, pipeline, or flow."""
|
||||
"""Create a new crew, or flow."""
|
||||
if type == "crew":
|
||||
create_crew(name, provider, skip_provider)
|
||||
elif type == "pipeline":
|
||||
create_pipeline(name)
|
||||
elif type == "flow":
|
||||
create_flow(name)
|
||||
else:
|
||||
click.secho(
|
||||
"Error: Invalid type. Must be 'crew', 'pipeline', or 'flow'.", fg="red"
|
||||
)
|
||||
click.secho("Error: Invalid type. Must be 'crew' or 'flow'.", fg="red")
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@@ -55,14 +53,17 @@ def create(type, name, provider, skip_provider=False):
|
||||
)
|
||||
def version(tools):
|
||||
"""Show the installed version of crewai."""
|
||||
crewai_version = pkg_resources.get_distribution("crewai").version
|
||||
try:
|
||||
crewai_version = get_version("crewai")
|
||||
except Exception:
|
||||
crewai_version = "unknown version"
|
||||
click.echo(f"crewai version: {crewai_version}")
|
||||
|
||||
if tools:
|
||||
try:
|
||||
tools_version = pkg_resources.get_distribution("crewai-tools").version
|
||||
tools_version = get_version("crewai")
|
||||
click.echo(f"crewai tools version: {tools_version}")
|
||||
except pkg_resources.DistributionNotFound:
|
||||
except Exception:
|
||||
click.echo("crewai tools not installed")
|
||||
|
||||
|
||||
@@ -136,6 +137,7 @@ def log_tasks_outputs() -> None:
|
||||
@click.option("-l", "--long", is_flag=True, help="Reset LONG TERM memory")
|
||||
@click.option("-s", "--short", is_flag=True, help="Reset SHORT TERM memory")
|
||||
@click.option("-e", "--entities", is_flag=True, help="Reset ENTITIES memory")
|
||||
@click.option("-kn", "--knowledge", is_flag=True, help="Reset KNOWLEDGE storage")
|
||||
@click.option(
|
||||
"-k",
|
||||
"--kickoff-outputs",
|
||||
@@ -143,17 +145,24 @@ def log_tasks_outputs() -> None:
|
||||
help="Reset LATEST KICKOFF TASK OUTPUTS",
|
||||
)
|
||||
@click.option("-a", "--all", is_flag=True, help="Reset ALL memories")
|
||||
def reset_memories(long, short, entities, kickoff_outputs, all):
|
||||
def reset_memories(
|
||||
long: bool,
|
||||
short: bool,
|
||||
entities: bool,
|
||||
knowledge: bool,
|
||||
kickoff_outputs: bool,
|
||||
all: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_ouputs). This will delete all the data saved.
|
||||
"""
|
||||
try:
|
||||
if not all and not (long or short or entities or kickoff_outputs):
|
||||
if not all and not (long or short or entities or knowledge or kickoff_outputs):
|
||||
click.echo(
|
||||
"Please specify at least one memory type to reset using the appropriate flags."
|
||||
)
|
||||
return
|
||||
reset_memories_command(long, short, entities, kickoff_outputs, all)
|
||||
reset_memories_command(long, short, entities, knowledge, kickoff_outputs, all)
|
||||
except Exception as e:
|
||||
click.echo(f"An error occurred while resetting memories: {e}", err=True)
|
||||
|
||||
@@ -335,5 +344,15 @@ def flow_add_crew(crew_name):
|
||||
add_crew_to_flow(crew_name)
|
||||
|
||||
|
||||
@crewai.command()
|
||||
def chat():
|
||||
"""
|
||||
Start a conversation with the Crew, collecting user-supplied inputs,
|
||||
and using the Chat LLM to generate responses.
|
||||
"""
|
||||
click.echo("Starting a conversation with the Crew")
|
||||
run_chat()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
crewai()
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import requests
|
||||
from requests.exceptions import JSONDecodeError
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.token import get_auth_token
|
||||
from crewai.cli.plus_api import PlusAPI
|
||||
from crewai.cli.utils import get_auth_token
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
|
||||
console = Console()
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "crewai" / "settings.json"
|
||||
|
||||
|
||||
class Settings(BaseModel):
|
||||
tool_repository_username: Optional[str] = Field(None, description="Username for interacting with the Tool Repository")
|
||||
tool_repository_password: Optional[str] = Field(None, description="Password for interacting with the Tool Repository")
|
||||
tool_repository_username: Optional[str] = Field(
|
||||
None, description="Username for interacting with the Tool Repository"
|
||||
)
|
||||
tool_repository_password: Optional[str] = Field(
|
||||
None, description="Password for interacting with the Tool Repository"
|
||||
)
|
||||
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, exclude=True)
|
||||
|
||||
def __init__(self, config_path: Path = DEFAULT_CONFIG_PATH, **data):
|
||||
|
||||
@@ -17,6 +17,12 @@ ENV_VARS = {
|
||||
"key_name": "GEMINI_API_KEY",
|
||||
}
|
||||
],
|
||||
"nvidia_nim": [
|
||||
{
|
||||
"prompt": "Enter your NVIDIA API key (press Enter to skip)",
|
||||
"key_name": "NVIDIA_NIM_API_KEY",
|
||||
}
|
||||
],
|
||||
"groq": [
|
||||
{
|
||||
"prompt": "Enter your GROQ API key (press Enter to skip)",
|
||||
@@ -85,6 +91,12 @@ ENV_VARS = {
|
||||
"key_name": "CEREBRAS_API_KEY",
|
||||
},
|
||||
],
|
||||
"sambanova": [
|
||||
{
|
||||
"prompt": "Enter your SambaNovaCloud API key (press Enter to skip)",
|
||||
"key_name": "SAMBANOVA_API_KEY",
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@@ -92,12 +104,14 @@ PROVIDERS = [
|
||||
"openai",
|
||||
"anthropic",
|
||||
"gemini",
|
||||
"nvidia_nim",
|
||||
"groq",
|
||||
"ollama",
|
||||
"watson",
|
||||
"bedrock",
|
||||
"azure",
|
||||
"cerebras",
|
||||
"sambanova",
|
||||
]
|
||||
|
||||
MODELS = {
|
||||
@@ -114,6 +128,75 @@ MODELS = {
|
||||
"gemini/gemini-gemma-2-9b-it",
|
||||
"gemini/gemini-gemma-2-27b-it",
|
||||
],
|
||||
"nvidia_nim": [
|
||||
"nvidia_nim/nvidia/mistral-nemo-minitron-8b-8k-instruct",
|
||||
"nvidia_nim/nvidia/nemotron-4-mini-hindi-4b-instruct",
|
||||
"nvidia_nim/nvidia/llama-3.1-nemotron-70b-instruct",
|
||||
"nvidia_nim/nvidia/llama3-chatqa-1.5-8b",
|
||||
"nvidia_nim/nvidia/llama3-chatqa-1.5-70b",
|
||||
"nvidia_nim/nvidia/vila",
|
||||
"nvidia_nim/nvidia/neva-22",
|
||||
"nvidia_nim/nvidia/nemotron-mini-4b-instruct",
|
||||
"nvidia_nim/nvidia/usdcode-llama3-70b-instruct",
|
||||
"nvidia_nim/nvidia/nemotron-4-340b-instruct",
|
||||
"nvidia_nim/meta/codellama-70b",
|
||||
"nvidia_nim/meta/llama2-70b",
|
||||
"nvidia_nim/meta/llama3-8b-instruct",
|
||||
"nvidia_nim/meta/llama3-70b-instruct",
|
||||
"nvidia_nim/meta/llama-3.1-8b-instruct",
|
||||
"nvidia_nim/meta/llama-3.1-70b-instruct",
|
||||
"nvidia_nim/meta/llama-3.1-405b-instruct",
|
||||
"nvidia_nim/meta/llama-3.2-1b-instruct",
|
||||
"nvidia_nim/meta/llama-3.2-3b-instruct",
|
||||
"nvidia_nim/meta/llama-3.2-11b-vision-instruct",
|
||||
"nvidia_nim/meta/llama-3.2-90b-vision-instruct",
|
||||
"nvidia_nim/meta/llama-3.1-70b-instruct",
|
||||
"nvidia_nim/google/gemma-7b",
|
||||
"nvidia_nim/google/gemma-2b",
|
||||
"nvidia_nim/google/codegemma-7b",
|
||||
"nvidia_nim/google/codegemma-1.1-7b",
|
||||
"nvidia_nim/google/recurrentgemma-2b",
|
||||
"nvidia_nim/google/gemma-2-9b-it",
|
||||
"nvidia_nim/google/gemma-2-27b-it",
|
||||
"nvidia_nim/google/gemma-2-2b-it",
|
||||
"nvidia_nim/google/deplot",
|
||||
"nvidia_nim/google/paligemma",
|
||||
"nvidia_nim/mistralai/mistral-7b-instruct-v0.2",
|
||||
"nvidia_nim/mistralai/mixtral-8x7b-instruct-v0.1",
|
||||
"nvidia_nim/mistralai/mistral-large",
|
||||
"nvidia_nim/mistralai/mixtral-8x22b-instruct-v0.1",
|
||||
"nvidia_nim/mistralai/mistral-7b-instruct-v0.3",
|
||||
"nvidia_nim/nv-mistralai/mistral-nemo-12b-instruct",
|
||||
"nvidia_nim/mistralai/mamba-codestral-7b-v0.1",
|
||||
"nvidia_nim/microsoft/phi-3-mini-128k-instruct",
|
||||
"nvidia_nim/microsoft/phi-3-mini-4k-instruct",
|
||||
"nvidia_nim/microsoft/phi-3-small-8k-instruct",
|
||||
"nvidia_nim/microsoft/phi-3-small-128k-instruct",
|
||||
"nvidia_nim/microsoft/phi-3-medium-4k-instruct",
|
||||
"nvidia_nim/microsoft/phi-3-medium-128k-instruct",
|
||||
"nvidia_nim/microsoft/phi-3.5-mini-instruct",
|
||||
"nvidia_nim/microsoft/phi-3.5-moe-instruct",
|
||||
"nvidia_nim/microsoft/kosmos-2",
|
||||
"nvidia_nim/microsoft/phi-3-vision-128k-instruct",
|
||||
"nvidia_nim/microsoft/phi-3.5-vision-instruct",
|
||||
"nvidia_nim/databricks/dbrx-instruct",
|
||||
"nvidia_nim/snowflake/arctic",
|
||||
"nvidia_nim/aisingapore/sea-lion-7b-instruct",
|
||||
"nvidia_nim/ibm/granite-8b-code-instruct",
|
||||
"nvidia_nim/ibm/granite-34b-code-instruct",
|
||||
"nvidia_nim/ibm/granite-3.0-8b-instruct",
|
||||
"nvidia_nim/ibm/granite-3.0-3b-a800m-instruct",
|
||||
"nvidia_nim/mediatek/breeze-7b-instruct",
|
||||
"nvidia_nim/upstage/solar-10.7b-instruct",
|
||||
"nvidia_nim/writer/palmyra-med-70b-32k",
|
||||
"nvidia_nim/writer/palmyra-med-70b",
|
||||
"nvidia_nim/writer/palmyra-fin-70b-32k",
|
||||
"nvidia_nim/01-ai/yi-large",
|
||||
"nvidia_nim/deepseek-ai/deepseek-coder-6.7b-instruct",
|
||||
"nvidia_nim/rakuten/rakutenai-7b-instruct",
|
||||
"nvidia_nim/rakuten/rakutenai-7b-chat",
|
||||
"nvidia_nim/baichuan-inc/baichuan2-13b-chat",
|
||||
],
|
||||
"groq": [
|
||||
"groq/llama-3.1-8b-instant",
|
||||
"groq/llama-3.1-70b-versatile",
|
||||
@@ -123,21 +206,14 @@ MODELS = {
|
||||
],
|
||||
"ollama": ["ollama/llama3.1", "ollama/mixtral"],
|
||||
"watson": [
|
||||
"watsonx/google/flan-t5-xxl",
|
||||
"watsonx/google/flan-ul2",
|
||||
"watsonx/bigscience/mt0-xxl",
|
||||
"watsonx/eleutherai/gpt-neox-20b",
|
||||
"watsonx/ibm/mpt-7b-instruct2",
|
||||
"watsonx/bigcode/starcoder",
|
||||
"watsonx/meta-llama/llama-2-70b-chat",
|
||||
"watsonx/meta-llama/llama-2-13b-chat",
|
||||
"watsonx/ibm/granite-13b-instruct-v1",
|
||||
"watsonx/ibm/granite-13b-chat-v1",
|
||||
"watsonx/google/flan-t5-xl",
|
||||
"watsonx/ibm/granite-13b-chat-v2",
|
||||
"watsonx/ibm/granite-13b-instruct-v2",
|
||||
"watsonx/elyza/elyza-japanese-llama-2-7b-instruct",
|
||||
"watsonx/ibm-mistralai/mixtral-8x7b-instruct-v01-q",
|
||||
"watsonx/meta-llama/llama-3-1-70b-instruct",
|
||||
"watsonx/meta-llama/llama-3-1-8b-instruct",
|
||||
"watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
||||
"watsonx/meta-llama/llama-3-2-1b-instruct",
|
||||
"watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
||||
"watsonx/meta-llama/llama-3-405b-instruct",
|
||||
"watsonx/mistral/mistral-large",
|
||||
"watsonx/ibm/granite-3-8b-instruct",
|
||||
],
|
||||
"bedrock": [
|
||||
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
@@ -163,6 +239,24 @@ MODELS = {
|
||||
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
||||
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
||||
],
|
||||
"sambanova": [
|
||||
"sambanova/Meta-Llama-3.3-70B-Instruct",
|
||||
"sambanova/QwQ-32B-Preview",
|
||||
"sambanova/Qwen2.5-72B-Instruct",
|
||||
"sambanova/Qwen2.5-Coder-32B-Instruct",
|
||||
"sambanova/Meta-Llama-3.1-405B-Instruct",
|
||||
"sambanova/Meta-Llama-3.1-70B-Instruct",
|
||||
"sambanova/Meta-Llama-3.1-8B-Instruct",
|
||||
"sambanova/Llama-3.2-90B-Vision-Instruct",
|
||||
"sambanova/Llama-3.2-11B-Vision-Instruct",
|
||||
"sambanova/Meta-Llama-3.2-3B-Instruct",
|
||||
"sambanova/Meta-Llama-3.2-1B-Instruct",
|
||||
],
|
||||
}
|
||||
|
||||
DEFAULT_LLM_MODEL = "gpt-4o-mini"
|
||||
|
||||
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
||||
|
||||
|
||||
LITELLM_PARAMS = ["api_key", "api_base", "api_version"]
|
||||
|
||||
@@ -39,6 +39,7 @@ def create_folder_structure(name, parent_folder=None):
|
||||
|
||||
folder_path.mkdir(parents=True)
|
||||
(folder_path / "tests").mkdir(exist_ok=True)
|
||||
(folder_path / "knowledge").mkdir(exist_ok=True)
|
||||
if not parent_folder:
|
||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
@@ -52,7 +53,14 @@ def copy_template_files(folder_path, name, class_name, parent_folder):
|
||||
templates_dir = package_dir / "templates" / "crew"
|
||||
|
||||
root_template_files = (
|
||||
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
|
||||
[
|
||||
".gitignore",
|
||||
"pyproject.toml",
|
||||
"README.md",
|
||||
"knowledge/user_preference.txt",
|
||||
]
|
||||
if not parent_folder
|
||||
else []
|
||||
)
|
||||
tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"]
|
||||
config_template_files = ["config/agents.yaml", "config/tasks.yaml"]
|
||||
@@ -168,7 +176,9 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||
templates_dir = package_dir / "templates" / "crew"
|
||||
|
||||
root_template_files = (
|
||||
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
|
||||
[".gitignore", "pyproject.toml", "README.md", "knowledge/user_preference.txt"]
|
||||
if not parent_folder
|
||||
else []
|
||||
)
|
||||
tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"]
|
||||
config_template_files = ["config/agents.yaml", "config/tasks.yaml"]
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
|
||||
def create_pipeline(name, router=False):
|
||||
"""Create a new pipeline project."""
|
||||
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
||||
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||
|
||||
click.secho(f"Creating pipeline {folder_name}...", fg="green", bold=True)
|
||||
|
||||
project_root = Path(folder_name)
|
||||
if project_root.exists():
|
||||
click.secho(f"Error: Folder {folder_name} already exists.", fg="red")
|
||||
return
|
||||
|
||||
# Create directory structure
|
||||
(project_root / "src" / folder_name).mkdir(parents=True)
|
||||
(project_root / "src" / folder_name / "pipelines").mkdir(parents=True)
|
||||
(project_root / "src" / folder_name / "crews").mkdir(parents=True)
|
||||
(project_root / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(project_root / "tests").mkdir(exist_ok=True)
|
||||
|
||||
# Create .env file
|
||||
with open(project_root / ".env", "w") as file:
|
||||
file.write("OPENAI_API_KEY=YOUR_API_KEY")
|
||||
|
||||
package_dir = Path(__file__).parent
|
||||
template_folder = "pipeline_router" if router else "pipeline"
|
||||
templates_dir = package_dir / "templates" / template_folder
|
||||
|
||||
# List of template files to copy
|
||||
root_template_files = [".gitignore", "pyproject.toml", "README.md"]
|
||||
src_template_files = ["__init__.py", "main.py"]
|
||||
tools_template_files = ["tools/__init__.py", "tools/custom_tool.py"]
|
||||
|
||||
if router:
|
||||
crew_folders = [
|
||||
"classifier_crew",
|
||||
"normal_crew",
|
||||
"urgent_crew",
|
||||
]
|
||||
pipelines_folders = [
|
||||
"pipelines/__init__.py",
|
||||
"pipelines/pipeline_classifier.py",
|
||||
"pipelines/pipeline_normal.py",
|
||||
"pipelines/pipeline_urgent.py",
|
||||
]
|
||||
else:
|
||||
crew_folders = [
|
||||
"research_crew",
|
||||
"write_linkedin_crew",
|
||||
"write_x_crew",
|
||||
]
|
||||
pipelines_folders = ["pipelines/__init__.py", "pipelines/pipeline.py"]
|
||||
|
||||
def process_file(src_file, dst_file):
|
||||
with open(src_file, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
content = content.replace("{{name}}", name)
|
||||
content = content.replace("{{crew_name}}", class_name)
|
||||
content = content.replace("{{folder_name}}", folder_name)
|
||||
content = content.replace("{{pipeline_name}}", class_name)
|
||||
|
||||
with open(dst_file, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
# Copy and process root template files
|
||||
for file_name in root_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / file_name
|
||||
process_file(src_file, dst_file)
|
||||
|
||||
# Copy and process src template files
|
||||
for file_name in src_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / "src" / folder_name / file_name
|
||||
process_file(src_file, dst_file)
|
||||
|
||||
# Copy tools files
|
||||
for file_name in tools_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / "src" / folder_name / file_name
|
||||
shutil.copy(src_file, dst_file)
|
||||
|
||||
# Copy pipelines folders
|
||||
for file_name in pipelines_folders:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / "src" / folder_name / file_name
|
||||
process_file(src_file, dst_file)
|
||||
|
||||
# Copy crew folders
|
||||
for crew_folder in crew_folders:
|
||||
src_crew_folder = templates_dir / "crews" / crew_folder
|
||||
dst_crew_folder = project_root / "src" / folder_name / "crews" / crew_folder
|
||||
if src_crew_folder.exists():
|
||||
shutil.copytree(src_crew_folder, dst_crew_folder)
|
||||
else:
|
||||
click.secho(
|
||||
f"Warning: Crew folder {crew_folder} not found in template.",
|
||||
fg="yellow",
|
||||
)
|
||||
|
||||
click.secho(f"Pipeline {name} created successfully!", fg="green", bold=True)
|
||||
413
src/crewai/cli/crew_chat.py
Normal file
413
src/crewai/cli/crew_chat.py
Normal file
@@ -0,0 +1,413 @@
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
import click
|
||||
import tomli
|
||||
|
||||
from crewai.crew import Crew
|
||||
from crewai.llm import LLM
|
||||
from crewai.types.crew_chat import ChatInputField, ChatInputs
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
|
||||
|
||||
def run_chat():
|
||||
"""
|
||||
Runs an interactive chat loop using the Crew's chat LLM with function calling.
|
||||
Incorporates crew_name, crew_description, and input fields to build a tool schema.
|
||||
Exits if crew_name or crew_description are missing.
|
||||
"""
|
||||
crew, crew_name = load_crew_and_name()
|
||||
chat_llm = initialize_chat_llm(crew)
|
||||
if not chat_llm:
|
||||
return
|
||||
|
||||
crew_chat_inputs = generate_crew_chat_inputs(crew, crew_name, chat_llm)
|
||||
crew_tool_schema = generate_crew_tool_schema(crew_chat_inputs)
|
||||
system_message = build_system_message(crew_chat_inputs)
|
||||
|
||||
# Call the LLM to generate the introductory message
|
||||
introductory_message = chat_llm.call(
|
||||
messages=[{"role": "system", "content": system_message}]
|
||||
)
|
||||
click.secho(f"\nAssistant: {introductory_message}\n", fg="green")
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": system_message},
|
||||
{"role": "assistant", "content": introductory_message},
|
||||
]
|
||||
|
||||
available_functions = {
|
||||
crew_chat_inputs.crew_name: create_tool_function(crew, messages),
|
||||
}
|
||||
|
||||
click.secho(
|
||||
"\nEntering an interactive chat loop with function-calling.\n"
|
||||
"Type 'exit' or Ctrl+C to quit.\n",
|
||||
fg="cyan",
|
||||
)
|
||||
|
||||
chat_loop(chat_llm, messages, crew_tool_schema, available_functions)
|
||||
|
||||
|
||||
def initialize_chat_llm(crew: Crew) -> Optional[LLM]:
|
||||
"""Initializes the chat LLM and handles exceptions."""
|
||||
try:
|
||||
return create_llm(crew.chat_llm)
|
||||
except Exception as e:
|
||||
click.secho(
|
||||
f"Unable to find a Chat LLM. Please make sure you set chat_llm on the crew: {e}",
|
||||
fg="red",
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def build_system_message(crew_chat_inputs: ChatInputs) -> str:
|
||||
"""Builds the initial system message for the chat."""
|
||||
required_fields_str = (
|
||||
", ".join(
|
||||
f"{field.name} (desc: {field.description or 'n/a'})"
|
||||
for field in crew_chat_inputs.inputs
|
||||
)
|
||||
or "(No required fields detected)"
|
||||
)
|
||||
|
||||
return (
|
||||
"You are a helpful AI assistant for the CrewAI platform. "
|
||||
"Your primary purpose is to assist users with the crew's specific tasks. "
|
||||
"You can answer general questions, but should guide users back to the crew's purpose afterward. "
|
||||
"For example, after answering a general question, remind the user of your main purpose, such as generating a research report, and prompt them to specify a topic or task related to the crew's purpose. "
|
||||
"You have a function (tool) you can call by name if you have all required inputs. "
|
||||
f"Those required inputs are: {required_fields_str}. "
|
||||
"Once you have them, call the function. "
|
||||
"Please keep your responses concise and friendly. "
|
||||
"If a user asks a question outside the crew's scope, provide a brief answer and remind them of the crew's purpose. "
|
||||
"After calling the tool, be prepared to take user feedback and make adjustments as needed. "
|
||||
"If you are ever unsure about a user's request or need clarification, ask the user for more information."
|
||||
"Before doing anything else, introduce yourself with a friendly message like: 'Hey! I'm here to help you with [crew's purpose]. Could you please provide me with [inputs] so we can get started?' "
|
||||
"For example: 'Hey! I'm here to help you with uncovering and reporting cutting-edge developments through thorough research and detailed analysis. Could you please provide me with a topic you're interested in? This will help us generate a comprehensive research report and detailed analysis.'"
|
||||
f"\nCrew Name: {crew_chat_inputs.crew_name}"
|
||||
f"\nCrew Description: {crew_chat_inputs.crew_description}"
|
||||
)
|
||||
|
||||
|
||||
def create_tool_function(crew: Crew, messages: List[Dict[str, str]]) -> Any:
|
||||
"""Creates a wrapper function for running the crew tool with messages."""
|
||||
|
||||
def run_crew_tool_with_messages(**kwargs):
|
||||
return run_crew_tool(crew, messages, **kwargs)
|
||||
|
||||
return run_crew_tool_with_messages
|
||||
|
||||
|
||||
def chat_loop(chat_llm, messages, crew_tool_schema, available_functions):
|
||||
"""Main chat loop for interacting with the user."""
|
||||
while True:
|
||||
try:
|
||||
user_input = click.prompt("You", type=str)
|
||||
if user_input.strip().lower() in ["exit", "quit"]:
|
||||
click.echo("Exiting chat. Goodbye!")
|
||||
break
|
||||
|
||||
messages.append({"role": "user", "content": user_input})
|
||||
final_response = chat_llm.call(
|
||||
messages=messages,
|
||||
tools=[crew_tool_schema],
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
messages.append({"role": "assistant", "content": final_response})
|
||||
click.secho(f"\nAssistant: {final_response}\n", fg="green")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
click.echo("\nExiting chat. Goodbye!")
|
||||
break
|
||||
except Exception as e:
|
||||
click.secho(f"An error occurred: {e}", fg="red")
|
||||
break
|
||||
|
||||
|
||||
def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
|
||||
"""
|
||||
Dynamically build a Littellm 'function' schema for the given crew.
|
||||
|
||||
crew_name: The name of the crew (used for the function 'name').
|
||||
crew_inputs: A ChatInputs object containing crew_description
|
||||
and a list of input fields (each with a name & description).
|
||||
"""
|
||||
properties = {}
|
||||
for field in crew_inputs.inputs:
|
||||
properties[field.name] = {
|
||||
"type": "string",
|
||||
"description": field.description or "No description provided",
|
||||
}
|
||||
|
||||
required_fields = [field.name for field in crew_inputs.inputs]
|
||||
|
||||
return {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": crew_inputs.crew_name,
|
||||
"description": crew_inputs.crew_description or "No crew description",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"required": required_fields,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||
"""
|
||||
Runs the crew using crew.kickoff(inputs=kwargs) and returns the output.
|
||||
|
||||
Args:
|
||||
crew (Crew): The crew instance to run.
|
||||
messages (List[Dict[str, str]]): The chat messages up to this point.
|
||||
**kwargs: The inputs collected from the user.
|
||||
|
||||
Returns:
|
||||
str: The output from the crew's execution.
|
||||
|
||||
Raises:
|
||||
SystemExit: Exits the chat if an error occurs during crew execution.
|
||||
"""
|
||||
try:
|
||||
# Serialize 'messages' to JSON string before adding to kwargs
|
||||
kwargs["crew_chat_messages"] = json.dumps(messages)
|
||||
|
||||
# Run the crew with the provided inputs
|
||||
crew_output = crew.kickoff(inputs=kwargs)
|
||||
|
||||
# Convert CrewOutput to a string to send back to the user
|
||||
result = str(crew_output)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
# Exit the chat and show the error message
|
||||
click.secho("An error occurred while running the crew:", fg="red")
|
||||
click.secho(str(e), fg="red")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def load_crew_and_name() -> Tuple[Crew, str]:
|
||||
"""
|
||||
Loads the crew by importing the crew class from the user's project.
|
||||
|
||||
Returns:
|
||||
Tuple[Crew, str]: A tuple containing the Crew instance and the name of the crew.
|
||||
"""
|
||||
# Get the current working directory
|
||||
cwd = Path.cwd()
|
||||
|
||||
# Path to the pyproject.toml file
|
||||
pyproject_path = cwd / "pyproject.toml"
|
||||
if not pyproject_path.exists():
|
||||
raise FileNotFoundError("pyproject.toml not found in the current directory.")
|
||||
|
||||
# Load the pyproject.toml file using 'tomli'
|
||||
with pyproject_path.open("rb") as f:
|
||||
pyproject_data = tomli.load(f)
|
||||
|
||||
# Get the project name from the 'project' section
|
||||
project_name = pyproject_data["project"]["name"]
|
||||
folder_name = project_name
|
||||
|
||||
# Derive the crew class name from the project name
|
||||
# E.g., if project_name is 'my_project', crew_class_name is 'MyProject'
|
||||
crew_class_name = project_name.replace("_", " ").title().replace(" ", "")
|
||||
|
||||
# Add the 'src' directory to sys.path
|
||||
src_path = cwd / "src"
|
||||
if str(src_path) not in sys.path:
|
||||
sys.path.insert(0, str(src_path))
|
||||
|
||||
# Import the crew module
|
||||
crew_module_name = f"{folder_name}.crew"
|
||||
try:
|
||||
crew_module = __import__(crew_module_name, fromlist=[crew_class_name])
|
||||
except ImportError as e:
|
||||
raise ImportError(f"Failed to import crew module {crew_module_name}: {e}")
|
||||
|
||||
# Get the crew class from the module
|
||||
try:
|
||||
crew_class = getattr(crew_module, crew_class_name)
|
||||
except AttributeError:
|
||||
raise AttributeError(
|
||||
f"Crew class {crew_class_name} not found in module {crew_module_name}"
|
||||
)
|
||||
|
||||
# Instantiate the crew
|
||||
crew_instance = crew_class().crew()
|
||||
return crew_instance, crew_class_name
|
||||
|
||||
|
||||
def generate_crew_chat_inputs(crew: Crew, crew_name: str, chat_llm) -> ChatInputs:
|
||||
"""
|
||||
Generates the ChatInputs required for the crew by analyzing the tasks and agents.
|
||||
|
||||
Args:
|
||||
crew (Crew): The crew object containing tasks and agents.
|
||||
crew_name (str): The name of the crew.
|
||||
chat_llm: The chat language model to use for AI calls.
|
||||
|
||||
Returns:
|
||||
ChatInputs: An object containing the crew's name, description, and input fields.
|
||||
"""
|
||||
# Extract placeholders from tasks and agents
|
||||
required_inputs = fetch_required_inputs(crew)
|
||||
|
||||
# Generate descriptions for each input using AI
|
||||
input_fields = []
|
||||
for input_name in required_inputs:
|
||||
description = generate_input_description_with_ai(input_name, crew, chat_llm)
|
||||
input_fields.append(ChatInputField(name=input_name, description=description))
|
||||
|
||||
# Generate crew description using AI
|
||||
crew_description = generate_crew_description_with_ai(crew, chat_llm)
|
||||
|
||||
return ChatInputs(
|
||||
crew_name=crew_name, crew_description=crew_description, inputs=input_fields
|
||||
)
|
||||
|
||||
|
||||
def fetch_required_inputs(crew: Crew) -> Set[str]:
|
||||
"""
|
||||
Extracts placeholders from the crew's tasks and agents.
|
||||
|
||||
Args:
|
||||
crew (Crew): The crew object.
|
||||
|
||||
Returns:
|
||||
Set[str]: A set of placeholder names.
|
||||
"""
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
required_inputs: Set[str] = set()
|
||||
|
||||
# Scan tasks
|
||||
for task in crew.tasks:
|
||||
text = f"{task.description or ''} {task.expected_output or ''}"
|
||||
required_inputs.update(placeholder_pattern.findall(text))
|
||||
|
||||
# Scan agents
|
||||
for agent in crew.agents:
|
||||
text = f"{agent.role or ''} {agent.goal or ''} {agent.backstory or ''}"
|
||||
required_inputs.update(placeholder_pattern.findall(text))
|
||||
|
||||
return required_inputs
|
||||
|
||||
|
||||
def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) -> str:
|
||||
"""
|
||||
Generates an input description using AI based on the context of the crew.
|
||||
|
||||
Args:
|
||||
input_name (str): The name of the input placeholder.
|
||||
crew (Crew): The crew object.
|
||||
chat_llm: The chat language model to use for AI calls.
|
||||
|
||||
Returns:
|
||||
str: A concise description of the input.
|
||||
"""
|
||||
# Gather context from tasks and agents where the input is used
|
||||
context_texts = []
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
|
||||
for task in crew.tasks:
|
||||
if (
|
||||
f"{{{input_name}}}" in task.description
|
||||
or f"{{{input_name}}}" in task.expected_output
|
||||
):
|
||||
# Replace placeholders with input names
|
||||
task_description = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), task.description
|
||||
)
|
||||
expected_output = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), task.expected_output
|
||||
)
|
||||
context_texts.append(f"Task Description: {task_description}")
|
||||
context_texts.append(f"Expected Output: {expected_output}")
|
||||
for agent in crew.agents:
|
||||
if (
|
||||
f"{{{input_name}}}" in agent.role
|
||||
or f"{{{input_name}}}" in agent.goal
|
||||
or f"{{{input_name}}}" in agent.backstory
|
||||
):
|
||||
# Replace placeholders with input names
|
||||
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role)
|
||||
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal)
|
||||
agent_backstory = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), agent.backstory
|
||||
)
|
||||
context_texts.append(f"Agent Role: {agent_role}")
|
||||
context_texts.append(f"Agent Goal: {agent_goal}")
|
||||
context_texts.append(f"Agent Backstory: {agent_backstory}")
|
||||
|
||||
context = "\n".join(context_texts)
|
||||
if not context:
|
||||
# If no context is found for the input, raise an exception as per instruction
|
||||
raise ValueError(f"No context found for input '{input_name}'.")
|
||||
|
||||
prompt = (
|
||||
f"Based on the following context, write a concise description (15 words or less) of the input '{input_name}'.\n"
|
||||
"Provide only the description, without any extra text or labels. Do not include placeholders like '{topic}' in the description.\n"
|
||||
"Context:\n"
|
||||
f"{context}"
|
||||
)
|
||||
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
|
||||
description = response.strip()
|
||||
|
||||
return description
|
||||
|
||||
|
||||
def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
|
||||
"""
|
||||
Generates a brief description of the crew using AI.
|
||||
|
||||
Args:
|
||||
crew (Crew): The crew object.
|
||||
chat_llm: The chat language model to use for AI calls.
|
||||
|
||||
Returns:
|
||||
str: A concise description of the crew's purpose (15 words or less).
|
||||
"""
|
||||
# Gather context from tasks and agents
|
||||
context_texts = []
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
|
||||
for task in crew.tasks:
|
||||
# Replace placeholders with input names
|
||||
task_description = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), task.description
|
||||
)
|
||||
expected_output = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), task.expected_output
|
||||
)
|
||||
context_texts.append(f"Task Description: {task_description}")
|
||||
context_texts.append(f"Expected Output: {expected_output}")
|
||||
for agent in crew.agents:
|
||||
# Replace placeholders with input names
|
||||
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role)
|
||||
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal)
|
||||
agent_backstory = placeholder_pattern.sub(lambda m: m.group(1), agent.backstory)
|
||||
context_texts.append(f"Agent Role: {agent_role}")
|
||||
context_texts.append(f"Agent Goal: {agent_goal}")
|
||||
context_texts.append(f"Agent Backstory: {agent_backstory}")
|
||||
|
||||
context = "\n".join(context_texts)
|
||||
if not context:
|
||||
raise ValueError("No context found for generating crew description.")
|
||||
|
||||
prompt = (
|
||||
"Based on the following context, write a concise, action-oriented description (15 words or less) of the crew's purpose.\n"
|
||||
"Provide only the description, without any extra text or labels. Do not include placeholders like '{topic}' in the description.\n"
|
||||
"Context:\n"
|
||||
f"{context}"
|
||||
)
|
||||
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
|
||||
crew_description = response.strip()
|
||||
|
||||
return crew_description
|
||||
@@ -1,9 +1,11 @@
|
||||
from typing import Optional
|
||||
import requests
|
||||
from os import getenv
|
||||
from crewai.cli.utils import get_crewai_version
|
||||
from typing import Optional
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import requests
|
||||
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
class PlusAPI:
|
||||
"""
|
||||
|
||||
@@ -1,13 +1,22 @@
|
||||
import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
|
||||
|
||||
def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
def reset_memories_command(
|
||||
long,
|
||||
short,
|
||||
entity,
|
||||
knowledge,
|
||||
kickoff_outputs,
|
||||
all,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories.
|
||||
|
||||
@@ -17,6 +26,7 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
entity (bool): Whether to reset the entity memory.
|
||||
kickoff_outputs (bool): Whether to reset the latest kickoff task outputs.
|
||||
all (bool): Whether to reset all memories.
|
||||
knowledge (bool): Whether to reset the knowledge.
|
||||
"""
|
||||
|
||||
try:
|
||||
@@ -25,6 +35,7 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
EntityMemory().reset()
|
||||
LongTermMemory().reset()
|
||||
TaskOutputStorageHandler().reset()
|
||||
KnowledgeStorage().reset()
|
||||
click.echo("All memories have been reset.")
|
||||
else:
|
||||
if long:
|
||||
@@ -40,6 +51,9 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
if kickoff_outputs:
|
||||
TaskOutputStorageHandler().reset()
|
||||
click.echo("Latest Kickoff outputs stored has been reset.")
|
||||
if knowledge:
|
||||
KnowledgeStorage().reset()
|
||||
click.echo("Knowledge has been reset.")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
||||
|
||||
@@ -3,7 +3,8 @@ import subprocess
|
||||
import click
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import get_crewai_version, read_toml
|
||||
from crewai.cli.utils import read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
def run_crew() -> None:
|
||||
|
||||
@@ -4,7 +4,7 @@ Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.co
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
Ensure you have Python >=3.10 <3.13 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, if you haven't already, install uv:
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
the current year is {current_year}.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
@@ -12,6 +12,6 @@ reporting_task:
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
A fully fledged report with the main topics, each with a full section of information.
|
||||
Formatted as markdown without '```'
|
||||
agent: reporting_analyst
|
||||
|
||||
@@ -1,24 +1,26 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from {{folder_name}}.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
# If you want to run a snippet of code before or after the crew starts,
|
||||
# you can use the @before_kickoff and @after_kickoff decorators
|
||||
# https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
|
||||
|
||||
@CrewBase
|
||||
class {{crew_name}}():
|
||||
"""{{crew_name}} crew"""
|
||||
|
||||
# Learn more about YAML configuration files here:
|
||||
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
|
||||
# Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
# If you would like to add tools to your agents, you can learn more about it here:
|
||||
# https://docs.crewai.com/concepts/agents#agent-tools
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
# tools=[MyCustomTool()], # Example of custom tool, loaded on the beginning of file
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@@ -29,6 +31,9 @@ class {{crew_name}}():
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# To learn more about structured task outputs,
|
||||
# task dependencies, and task callbacks, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/tasks#overview-of-a-task
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
@@ -45,6 +50,9 @@ class {{crew_name}}():
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the {{crew_name}} crew"""
|
||||
# To learn how to add knowledge sources to your crew, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/knowledge#what-is-knowledge
|
||||
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
|
||||
@@ -0,0 +1,4 @@
|
||||
User name is John Doe.
|
||||
User is an AI Engineer.
|
||||
User is interested in AI Agents.
|
||||
User is based in San Francisco, California.
|
||||
@@ -2,6 +2,8 @@
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from {{folder_name}}.crew import {{crew_name}}
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
@@ -16,9 +18,14 @@ def run():
|
||||
Run the crew.
|
||||
"""
|
||||
inputs = {
|
||||
'topic': 'AI LLMs'
|
||||
'topic': 'AI LLMs',
|
||||
'current_year': str(datetime.now().year)
|
||||
}
|
||||
{{crew_name}}().crew().kickoff(inputs=inputs)
|
||||
|
||||
try:
|
||||
{{crew_name}}().crew().kickoff(inputs=inputs)
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while running the crew: {e}")
|
||||
|
||||
|
||||
def train():
|
||||
@@ -55,4 +62,4 @@ def test():
|
||||
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while replaying the crew: {e}")
|
||||
raise Exception(f"An error occurred while testing the crew: {e}")
|
||||
|
||||
@@ -3,9 +3,9 @@ name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.79.4,<1.0.0"
|
||||
"crewai[tools]>=0.95.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
@@ -18,3 +18,6 @@ test = "{{folder_name}}.main:test"
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.crewai]
|
||||
type = "crew"
|
||||
|
||||
@@ -10,7 +10,7 @@ class MyCustomToolInput(BaseModel):
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
"Clear description for what this tool is useful for, your agent will need this information to use it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.co
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
Ensure you have Python >=3.10 <3.13 installed on your system. This project uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, if you haven't already, install uv:
|
||||
|
||||
|
||||
@@ -1,31 +1,47 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# If you want to run a snippet of code before or after the crew starts,
|
||||
# you can use the @before_kickoff and @after_kickoff decorators
|
||||
# https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
|
||||
|
||||
|
||||
@CrewBase
|
||||
class PoemCrew():
|
||||
"""Poem Crew"""
|
||||
class PoemCrew:
|
||||
"""Poem Crew"""
|
||||
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
# Learn more about YAML configuration files here:
|
||||
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
|
||||
# Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def poem_writer(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['poem_writer'],
|
||||
)
|
||||
# If you would lik to add tools to your crew, you can learn more about it here:
|
||||
# https://docs.crewai.com/concepts/agents#agent-tools
|
||||
@agent
|
||||
def poem_writer(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["poem_writer"],
|
||||
)
|
||||
|
||||
@task
|
||||
def write_poem(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['write_poem'],
|
||||
)
|
||||
# To learn more about structured task outputs,
|
||||
# task dependencies, and task callbacks, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/tasks#overview-of-a-task
|
||||
@task
|
||||
def write_poem(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["write_poem"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Research Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Research Crew"""
|
||||
# To learn how to add knowledge sources to your crew, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/knowledge#what-is-knowledge
|
||||
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ from pydantic import BaseModel
|
||||
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
|
||||
from .crews.poem_crew.poem_crew import PoemCrew
|
||||
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew
|
||||
|
||||
|
||||
class PoemState(BaseModel):
|
||||
|
||||
@@ -3,9 +3,9 @@ name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.79.4,<1.0.0",
|
||||
"crewai[tools]>=0.95.0,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
@@ -15,3 +15,6 @@ plot = "{{folder_name}}.main:plot"
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.crewai]
|
||||
type = "flow"
|
||||
|
||||
@@ -13,7 +13,7 @@ class MyCustomToolInput(BaseModel):
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
"Clear description for what this tool is useful for, your agent will need this information to use it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||
|
||||
|
||||
2
src/crewai/cli/templates/pipeline/.gitignore
vendored
2
src/crewai/cli/templates/pipeline/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
.env
|
||||
__pycache__/
|
||||
@@ -1,57 +0,0 @@
|
||||
# {{crew_name}} Crew
|
||||
|
||||
Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [Poetry](https://python-poetry.org/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, if you haven't already, install Poetry:
|
||||
|
||||
```bash
|
||||
pip install poetry
|
||||
```
|
||||
|
||||
Next, navigate to your project directory and install the dependencies:
|
||||
|
||||
1. First lock the dependencies and then install them:
|
||||
|
||||
```bash
|
||||
crewai install
|
||||
```
|
||||
|
||||
### Customizing
|
||||
|
||||
**Add your `OPENAI_API_KEY` into the `.env` file**
|
||||
|
||||
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
|
||||
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
|
||||
- Modify `src/{{folder_name}}/crew.py` to add your own logic, tools and specific args
|
||||
- Modify `src/{{folder_name}}/main.py` to add custom inputs for your agents and tasks
|
||||
|
||||
## Running the Project
|
||||
|
||||
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
|
||||
|
||||
```bash
|
||||
crewai run
|
||||
```
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
The {{name}} Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
|
||||
|
||||
## Support
|
||||
|
||||
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
|
||||
|
||||
- Visit our [documentation](https://docs.crewai.com)
|
||||
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
|
||||
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Chat with our docs](https://chatg.pt/DWjSBZn)
|
||||
|
||||
Let's create wonders together with the power and simplicity of crewAI.
|
||||
@@ -1,16 +0,0 @@
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
|
||||
reporting_task:
|
||||
description: >
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with a title, mains topics, each with a full section of information.
|
||||
agent: reporting_analyst
|
||||
@@ -1,58 +0,0 @@
|
||||
from pydantic import BaseModel
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
class ResearchReport(BaseModel):
|
||||
"""Research Report"""
|
||||
title: str
|
||||
body: str
|
||||
|
||||
@CrewBase
|
||||
class ResearchCrew():
|
||||
"""Research Crew"""
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
output_pydantic=ResearchReport
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Research Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -1,51 +0,0 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from {{folder_name}}.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class WriteLinkedInCrew():
|
||||
"""Research Crew"""
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
output_file='report.md'
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the {{crew_name}} crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -1,14 +0,0 @@
|
||||
x_writer_agent:
|
||||
role: >
|
||||
Expert Social Media Content Creator specializing in short form written content
|
||||
goal: >
|
||||
Create viral-worthy, engaging short form posts that distill complex {topic} information
|
||||
into compelling 280-character messages
|
||||
backstory: >
|
||||
You're a social media virtuoso with a particular talent for short form content. Your posts
|
||||
consistently go viral due to your ability to craft hooks that stop users mid-scroll.
|
||||
You've studied the techniques of social media masters like Justin Welsh, Dickie Bush,
|
||||
Nicolas Cole, and Shaan Puri, incorporating their best practices into your own unique style.
|
||||
Your superpower is taking intricate {topic} concepts and transforming them into
|
||||
bite-sized, shareable content that resonates with a wide audience. You know exactly
|
||||
how to structure a post for maximum impact and engagement.
|
||||
@@ -1,22 +0,0 @@
|
||||
write_x_task:
|
||||
description: >
|
||||
Using the research report provided, create an engaging short form post about {topic}.
|
||||
Your post should have a great hook, summarize key points, and be structured for easy
|
||||
consumption on a digital platform. The post must be under 280 characters.
|
||||
Follow these guidelines:
|
||||
1. Start with an attention-grabbing hook
|
||||
2. Condense the main insights from the research
|
||||
3. Use clear, concise language
|
||||
4. Include a call-to-action or thought-provoking question if space allows
|
||||
5. Ensure the post flows well and is easy to read quickly
|
||||
|
||||
Here is the title of the research report you will be using
|
||||
|
||||
Title: {title}
|
||||
Research:
|
||||
{body}
|
||||
|
||||
expected_output: >
|
||||
A compelling X post under 280 characters that effectively summarizes the key findings
|
||||
about {topic}, starts with a strong hook, and is optimized for engagement on the platform.
|
||||
agent: x_writer_agent
|
||||
@@ -1,36 +0,0 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
@CrewBase
|
||||
class WriteXCrew:
|
||||
"""Research Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def x_writer_agent(self) -> Agent:
|
||||
return Agent(config=self.agents_config["x_writer_agent"], verbose=True)
|
||||
|
||||
@task
|
||||
def write_x_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["write_x_task"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Write X Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -1,26 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import asyncio
|
||||
from {{folder_name}}.pipelines.pipeline import {{pipeline_name}}Pipeline
|
||||
|
||||
async def run():
|
||||
"""
|
||||
Run the pipeline.
|
||||
"""
|
||||
inputs = [
|
||||
{"topic": "AI wearables"},
|
||||
]
|
||||
pipeline = {{pipeline_name}}Pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
|
||||
# Process and print results
|
||||
for result in results:
|
||||
print(f"Raw output: {result.raw}")
|
||||
if result.json_dict:
|
||||
print(f"JSON output: {result.json_dict}")
|
||||
print("\n")
|
||||
|
||||
def main():
|
||||
asyncio.run(run())
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,87 +0,0 @@
|
||||
"""
|
||||
This pipeline file includes two different examples to demonstrate the flexibility of crewAI pipelines.
|
||||
|
||||
Example 1: Two-Stage Pipeline
|
||||
-----------------------------
|
||||
This pipeline consists of two crews:
|
||||
1. ResearchCrew: Performs research on a given topic.
|
||||
2. WriteXCrew: Generates an X (Twitter) post based on the research findings.
|
||||
|
||||
Key features:
|
||||
- The ResearchCrew's final task uses output_json to store all research findings in a JSON object.
|
||||
- This JSON object is then passed to the WriteXCrew, where tasks can access the research findings.
|
||||
|
||||
Example 2: Two-Stage Pipeline with Parallel Execution
|
||||
-------------------------------------------------------
|
||||
This pipeline consists of three crews:
|
||||
1. ResearchCrew: Performs research on a given topic.
|
||||
2. WriteXCrew and WriteLinkedInCrew: Run in parallel, using the research findings to generate posts for X and LinkedIn, respectively.
|
||||
|
||||
Key features:
|
||||
- Demonstrates the ability to run multiple crews in parallel.
|
||||
- Shows how to structure a pipeline with both sequential and parallel stages.
|
||||
|
||||
Usage:
|
||||
- To switch between examples, comment/uncomment the respective code blocks below.
|
||||
- Ensure that you have implemented all necessary crew classes (ResearchCrew, WriteXCrew, WriteLinkedInCrew) before running.
|
||||
"""
|
||||
|
||||
# Common imports for both examples
|
||||
from crewai import Pipeline
|
||||
|
||||
|
||||
|
||||
# Uncomment the crews you need for your chosen example
|
||||
from ..crews.research_crew.research_crew import ResearchCrew
|
||||
from ..crews.write_x_crew.write_x_crew import WriteXCrew
|
||||
# from .crews.write_linkedin_crew.write_linkedin_crew import WriteLinkedInCrew # Uncomment for Example 2
|
||||
|
||||
# EXAMPLE 1: Two-Stage Pipeline
|
||||
# -----------------------------
|
||||
# Uncomment the following code block to use Example 1
|
||||
|
||||
class {{pipeline_name}}Pipeline:
|
||||
def __init__(self):
|
||||
# Initialize crews
|
||||
self.research_crew = ResearchCrew().crew()
|
||||
self.write_x_crew = WriteXCrew().crew()
|
||||
|
||||
def create_pipeline(self):
|
||||
return Pipeline(
|
||||
stages=[
|
||||
self.research_crew,
|
||||
self.write_x_crew
|
||||
]
|
||||
)
|
||||
|
||||
async def kickoff(self, inputs):
|
||||
pipeline = self.create_pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
return results
|
||||
|
||||
|
||||
# EXAMPLE 2: Two-Stage Pipeline with Parallel Execution
|
||||
# -------------------------------------------------------
|
||||
# Uncomment the following code block to use Example 2
|
||||
|
||||
# @PipelineBase
|
||||
# class {{pipeline_name}}Pipeline:
|
||||
# def __init__(self):
|
||||
# # Initialize crews
|
||||
# self.research_crew = ResearchCrew().crew()
|
||||
# self.write_x_crew = WriteXCrew().crew()
|
||||
# self.write_linkedin_crew = WriteLinkedInCrew().crew()
|
||||
|
||||
# @pipeline
|
||||
# def create_pipeline(self):
|
||||
# return Pipeline(
|
||||
# stages=[
|
||||
# self.research_crew,
|
||||
# [self.write_x_crew, self.write_linkedin_crew] # Parallel execution
|
||||
# ]
|
||||
# )
|
||||
|
||||
# async def run(self, inputs):
|
||||
# pipeline = self.create_pipeline()
|
||||
# results = await pipeline.kickoff(inputs)
|
||||
# return results
|
||||
@@ -1,17 +0,0 @@
|
||||
[tool.poetry]
|
||||
name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = ">=0.79.4,<1.0.0" }
|
||||
asyncio = "*"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:main"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
@@ -1,19 +0,0 @@
|
||||
from typing import Type
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class MyCustomToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
return "this is an example of a tool output, ignore it and move along."
|
||||
@@ -1,2 +0,0 @@
|
||||
.env
|
||||
__pycache__/
|
||||
@@ -1,54 +0,0 @@
|
||||
# {{crew_name}} Crew
|
||||
|
||||
Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [Poetry](https://python-poetry.org/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, if you haven't already, install Poetry:
|
||||
|
||||
```bash
|
||||
pip install poetry
|
||||
```
|
||||
|
||||
Next, navigate to your project directory and install the dependencies:
|
||||
|
||||
1. First lock the dependencies and then install them:
|
||||
```bash
|
||||
crewai install
|
||||
```
|
||||
### Customizing
|
||||
|
||||
**Add your `OPENAI_API_KEY` into the `.env` file**
|
||||
|
||||
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
|
||||
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
|
||||
- Modify `src/{{folder_name}}/crew.py` to add your own logic, tools and specific args
|
||||
- Modify `src/{{folder_name}}/main.py` to add custom inputs for your agents and tasks
|
||||
|
||||
## Running the Project
|
||||
|
||||
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
|
||||
|
||||
```bash
|
||||
crewai run
|
||||
```
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
The {{name}} Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
|
||||
|
||||
## Support
|
||||
|
||||
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
|
||||
- Visit our [documentation](https://docs.crewai.com)
|
||||
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
|
||||
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Chat with our docs](https://chatg.pt/DWjSBZn)
|
||||
|
||||
Let's create wonders together with the power and simplicity of crewAI.
|
||||
@@ -1,19 +0,0 @@
|
||||
researcher:
|
||||
role: >
|
||||
{topic} Senior Data Researcher
|
||||
goal: >
|
||||
Uncover cutting-edge developments in {topic}
|
||||
backstory: >
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
goal: >
|
||||
Create detailed reports based on {topic} data analysis and research findings
|
||||
backstory: >
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
@@ -1,40 +0,0 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
class UrgencyScore(BaseModel):
|
||||
urgency_score: int
|
||||
|
||||
@CrewBase
|
||||
class ClassifierCrew:
|
||||
"""Email Classifier Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def classifier(self) -> Agent:
|
||||
return Agent(config=self.agents_config["classifier"], verbose=True)
|
||||
|
||||
@task
|
||||
def urgent_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["classify_email"],
|
||||
output_pydantic=UrgencyScore,
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Email Classifier Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -1,7 +0,0 @@
|
||||
classifier:
|
||||
role: >
|
||||
Email Classifier
|
||||
goal: >
|
||||
Classify the email: {email} as urgent or normal from a score of 1 to 10, where 1 is not urgent and 10 is urgent. Return the urgency score only.`
|
||||
backstory: >
|
||||
You are a highly efficient and experienced email classifier, trained to quickly assess and classify emails. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing normal situations and maintaining smooth operations.
|
||||
@@ -1,7 +0,0 @@
|
||||
classify_email:
|
||||
description: >
|
||||
Classify the email: {email}
|
||||
as urgent or normal.
|
||||
expected_output: >
|
||||
Classify the email from a scale of 1 to 10, where 1 is not urgent and 10 is urgent. Return the urgency score only.
|
||||
agent: classifier
|
||||
@@ -1,7 +0,0 @@
|
||||
normal_handler:
|
||||
role: >
|
||||
Normal Email Processor
|
||||
goal: >
|
||||
Process normal emails and create an email to respond to the sender.
|
||||
backstory: >
|
||||
You are a highly efficient and experienced normal email handler, trained to quickly assess and respond to normal communications. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing normal situations and maintaining smooth operations.
|
||||
@@ -1,6 +0,0 @@
|
||||
normal_task:
|
||||
description: >
|
||||
Process and respond to normal email quickly.
|
||||
expected_output: >
|
||||
An email response to the normal email.
|
||||
agent: normal_handler
|
||||
@@ -1,36 +0,0 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
@CrewBase
|
||||
class NormalCrew:
|
||||
"""Normal Email Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def normal_handler(self) -> Agent:
|
||||
return Agent(config=self.agents_config["normal_handler"], verbose=True)
|
||||
|
||||
@task
|
||||
def urgent_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["normal_task"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Normal Email Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -1,7 +0,0 @@
|
||||
urgent_handler:
|
||||
role: >
|
||||
Urgent Email Processor
|
||||
goal: >
|
||||
Process urgent emails and create an email to respond to the sender.
|
||||
backstory: >
|
||||
You are a highly efficient and experienced urgent email handler, trained to quickly assess and respond to time-sensitive communications. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing critical situations and maintaining smooth operations.
|
||||
@@ -1,6 +0,0 @@
|
||||
urgent_task:
|
||||
description: >
|
||||
Process and respond to urgent email quickly.
|
||||
expected_output: >
|
||||
An email response to the urgent email.
|
||||
agent: urgent_handler
|
||||
@@ -1,36 +0,0 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
@CrewBase
|
||||
class UrgentCrew:
|
||||
"""Urgent Email Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def urgent_handler(self) -> Agent:
|
||||
return Agent(config=self.agents_config["urgent_handler"], verbose=True)
|
||||
|
||||
@task
|
||||
def urgent_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["urgent_task"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Urgent Email Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -1,75 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import asyncio
|
||||
from crewai.routers.router import Route
|
||||
from crewai.routers.router import Router
|
||||
|
||||
from {{folder_name}}.pipelines.pipeline_classifier import EmailClassifierPipeline
|
||||
from {{folder_name}}.pipelines.pipeline_normal import NormalPipeline
|
||||
from {{folder_name}}.pipelines.pipeline_urgent import UrgentPipeline
|
||||
|
||||
async def run():
|
||||
"""
|
||||
Run the pipeline.
|
||||
"""
|
||||
inputs = [
|
||||
{
|
||||
"email": """
|
||||
Subject: URGENT: Marketing Campaign Launch - Immediate Action Required
|
||||
Dear Team,
|
||||
I'm reaching out regarding our upcoming marketing campaign that requires your immediate attention and swift action. We're facing a critical deadline, and our success hinges on our ability to mobilize quickly.
|
||||
Key points:
|
||||
|
||||
Campaign launch: 48 hours from now
|
||||
Target audience: 250,000 potential customers
|
||||
Expected ROI: 35% increase in Q3 sales
|
||||
|
||||
What we need from you NOW:
|
||||
|
||||
Final approval on creative assets (due in 3 hours)
|
||||
Confirmation of media placements (due by end of day)
|
||||
Last-minute budget allocation for paid social media push
|
||||
|
||||
Our competitors are poised to launch similar campaigns, and we must act fast to maintain our market advantage. Delays could result in significant lost opportunities and potential revenue.
|
||||
Please prioritize this campaign above all other tasks. I'll be available for the next 24 hours to address any concerns or roadblocks.
|
||||
Let's make this happen!
|
||||
[Your Name]
|
||||
Marketing Director
|
||||
P.S. I'll be scheduling an emergency team meeting in 1 hour to discuss our action plan. Attendance is mandatory.
|
||||
"""
|
||||
}
|
||||
]
|
||||
|
||||
pipeline_classifier = EmailClassifierPipeline().create_pipeline()
|
||||
pipeline_urgent = UrgentPipeline().create_pipeline()
|
||||
pipeline_normal = NormalPipeline().create_pipeline()
|
||||
|
||||
router = Router(
|
||||
routes={
|
||||
"high_urgency": Route(
|
||||
condition=lambda x: x.get("urgency_score", 0) > 7,
|
||||
pipeline=pipeline_urgent
|
||||
),
|
||||
"low_urgency": Route(
|
||||
condition=lambda x: x.get("urgency_score", 0) <= 7,
|
||||
pipeline=pipeline_normal
|
||||
)
|
||||
},
|
||||
default=pipeline_normal
|
||||
)
|
||||
|
||||
pipeline = pipeline_classifier >> router
|
||||
|
||||
results = await pipeline.kickoff(inputs)
|
||||
|
||||
# Process and print results
|
||||
for result in results:
|
||||
print(f"Raw output: {result.raw}")
|
||||
if result.json_dict:
|
||||
print(f"JSON output: {result.json_dict}")
|
||||
print("\n")
|
||||
|
||||
def main():
|
||||
asyncio.run(run())
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user