mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 12:28:30 +00:00
Compare commits
33 Commits
devin/1764
...
gl/feat/mo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82cb72ea41 | ||
|
|
eca9077590 | ||
|
|
2607f5361c | ||
|
|
080a7d753f | ||
|
|
c5c07331bb | ||
|
|
c960f26601 | ||
|
|
78a68c677c | ||
|
|
4ddab646ce | ||
|
|
61df54273c | ||
|
|
819c06d553 | ||
|
|
af012f8f59 | ||
|
|
d9edc85e0c | ||
|
|
e1c7e6cc67 | ||
|
|
4dc2e48849 | ||
|
|
838e3fd3f9 | ||
|
|
20e1aa46d4 | ||
|
|
3f5cb77383 | ||
|
|
b26cec3a80 | ||
|
|
09d9341aa9 | ||
|
|
916c217d4b | ||
|
|
25f83d7a26 | ||
|
|
18750b67c6 | ||
|
|
fd75cd10de | ||
|
|
147488b746 | ||
|
|
1a8bf47d20 | ||
|
|
d610023527 | ||
|
|
5ee1b35889 | ||
|
|
41d9ee6d15 | ||
|
|
a7bb489e9f | ||
|
|
e16606672a | ||
|
|
6114dbe557 | ||
|
|
a7f7b1bd68 | ||
|
|
ff5cbdee07 |
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
@@ -6,7 +6,7 @@ permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
OPENAI_API_KEY: fake-api-key
|
||||
OPENAI_API_KEY: fake-openai-key
|
||||
PYTHONUNBUFFERED: 1
|
||||
|
||||
jobs:
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
enable-cache: false
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --all-extras
|
||||
run: uv sync --all-groups --all-extras && uv sync --all-extras --package crewai-core && uv sync --all-extras --package crewai-tools
|
||||
|
||||
- name: Restore test durations
|
||||
uses: actions/cache/restore@v4
|
||||
@@ -75,7 +75,7 @@ jobs:
|
||||
# DURATIONS_ARG="--durations-path=${DURATION_FILE}"
|
||||
# fi
|
||||
|
||||
uv run pytest \
|
||||
uv run pytest packages/crewai/tests \
|
||||
--block-network \
|
||||
--timeout=30 \
|
||||
-vv \
|
||||
|
||||
84
.github/workflows/tools-generate-specs.yml
vendored
Normal file
84
.github/workflows/tools-generate-specs.yml
vendored
Normal file
@@ -0,0 +1,84 @@
|
||||
name: Generate Tool Specifications
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
generate-specs:
|
||||
if: github.event.head_commit.author.name != 'crewai-tools-spec-generator[bot]'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
specs_changed: ${{ steps.check_changes.outputs.specs_changed }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v3
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.12.8
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --dev --all-extras
|
||||
|
||||
- name: Generate tool specifications
|
||||
run: uv run python packages/tools/generate_tool_specs.py
|
||||
|
||||
- name: Configure Git and add upstream
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
|
||||
git remote add upstream https://github.com/crewAIInc/crewAI-tools.git
|
||||
git fetch upstream
|
||||
|
||||
- name: Check for changes in tool specifications
|
||||
id: check_changes
|
||||
run: |
|
||||
git add packages/tools/tool.specs.json
|
||||
if git diff --quiet --staged; then
|
||||
echo "No changes detected in tool.specs.json"
|
||||
echo "specs_changed=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Changes detected in tool.specs.json"
|
||||
echo "specs_changed=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Generate GitHub App token
|
||||
if: steps.check_changes.outputs.specs_changed == 'true'
|
||||
id: app-token
|
||||
uses: tibdex/github-app-token@v2
|
||||
with:
|
||||
app_id: ${{ secrets.CREWAI_RELEASE_TOOL_APP_ID }}
|
||||
private_key: ${{ secrets.CREWAI_RELEASE_TOOL_PRIVATE_KEY }}
|
||||
|
||||
- name: Create Pull Request
|
||||
if: steps.check_changes.outputs.specs_changed == 'true'
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
title: "Automatically update tool specifications"
|
||||
base: main
|
||||
branch: update-tool-specs
|
||||
commit-message: "Automatically update tool specifications"
|
||||
committer: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
|
||||
author: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
|
||||
delete-branch: true
|
||||
|
||||
notify-api:
|
||||
if: github.event.head_commit.author.name == 'crewai-tools-spec-generator[bot]'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify API about tool specification update
|
||||
run: |
|
||||
curl -X POST https://app.crewai.com/crewai_plus/api/v1/internal_tool_releases \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer ${{ secrets.CREWAI_RELEASE_TOOL_API_KEY }}"
|
||||
42
.github/workflows/tools-tests.yml
vendored
Normal file
42
.github/workflows/tools-tests.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: Tools Tests
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
env:
|
||||
OPENAI_API_KEY: fake-openai-key
|
||||
BRAVE_API_KEY: fake-brave-key
|
||||
SNOWFLAKE_USER: fake-snowflake-user
|
||||
SNOWFLAKE_PASSWORD: fake-snowflake-password
|
||||
SNOWFLAKE_ACCOUNT: fake-snowflake-account
|
||||
SNOWFLAKE_WAREHOUSE: fake-snowflake-warehouse
|
||||
SNOWFLAKE_DATABASE: fake-snowflake-database
|
||||
SNOWFLAKE_SCHEMA: fake-snowflake-schema
|
||||
EMBEDCHAIN_DB_URI: sqlite:///test.db
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ['3.10', '3.11', '3.12', '3.13']
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v3
|
||||
with:
|
||||
enable-cache: true
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
run: uv python install ${{ matrix.python-version }}
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --all-groups --package crewai-tools --all-extras
|
||||
|
||||
- name: Run tests
|
||||
run: uv run pytest packages/tools/tests -vv
|
||||
@@ -6,14 +6,16 @@ repos:
|
||||
entry: uv run ruff check
|
||||
language: system
|
||||
types: [python]
|
||||
files: ^(packages/crewai/src/|packages/tools/src/).*\.py$
|
||||
- id: ruff-format
|
||||
name: ruff-format
|
||||
entry: uv run ruff format
|
||||
language: system
|
||||
types: [python]
|
||||
files: ^(packages/crewai/src/|packages/tools/src/).*\.py$
|
||||
- id: mypy
|
||||
name: mypy
|
||||
entry: uv run mypy
|
||||
language: system
|
||||
types: [python]
|
||||
exclude: ^tests/
|
||||
exclude: ^(packages/.*/tests/|packages/crewai/src/crewai/cli/templates/)
|
||||
|
||||
@@ -9,7 +9,7 @@ mode: "wide"
|
||||
|
||||
## Description
|
||||
|
||||
The `RagTool` is designed to answer questions by leveraging the power of Retrieval-Augmented Generation (RAG) through CrewAI's native RAG system.
|
||||
The `RagTool` is designed to answer questions by leveraging the power of Retrieval-Augmented Generation (RAG) through EmbedChain.
|
||||
It provides a dynamic knowledge base that can be queried to retrieve relevant information from various data sources.
|
||||
This tool is particularly useful for applications that require access to a vast array of information and need to provide contextually relevant answers.
|
||||
|
||||
@@ -76,8 +76,8 @@ The `RagTool` can be used with a wide variety of data sources, including:
|
||||
The `RagTool` accepts the following parameters:
|
||||
|
||||
- **summarize**: Optional. Whether to summarize the retrieved content. Default is `False`.
|
||||
- **adapter**: Optional. A custom adapter for the knowledge base. If not provided, a CrewAIRagAdapter will be used.
|
||||
- **config**: Optional. Configuration for the underlying CrewAI RAG system.
|
||||
- **adapter**: Optional. A custom adapter for the knowledge base. If not provided, an EmbedchainAdapter will be used.
|
||||
- **config**: Optional. Configuration for the underlying EmbedChain App.
|
||||
|
||||
## Adding Content
|
||||
|
||||
@@ -130,23 +130,44 @@ from crewai_tools import RagTool
|
||||
|
||||
# Create a RAG tool with custom configuration
|
||||
config = {
|
||||
"vectordb": {
|
||||
"provider": "qdrant",
|
||||
"app": {
|
||||
"name": "custom_app",
|
||||
},
|
||||
"llm": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"collection_name": "my-collection"
|
||||
"model": "gpt-4",
|
||||
}
|
||||
},
|
||||
"embedding_model": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "text-embedding-3-small"
|
||||
"model": "text-embedding-ada-002"
|
||||
}
|
||||
},
|
||||
"vectordb": {
|
||||
"provider": "elasticsearch",
|
||||
"config": {
|
||||
"collection_name": "my-collection",
|
||||
"cloud_id": "deployment-name:xxxx",
|
||||
"api_key": "your-key",
|
||||
"verify_certs": False
|
||||
}
|
||||
},
|
||||
"chunker": {
|
||||
"chunk_size": 400,
|
||||
"chunk_overlap": 100,
|
||||
"length_function": "len",
|
||||
"min_chunk_size": 0
|
||||
}
|
||||
}
|
||||
|
||||
rag_tool = RagTool(config=config, summarize=True)
|
||||
```
|
||||
|
||||
The internal RAG tool utilizes the Embedchain adapter, allowing you to pass any configuration options that are supported by Embedchain.
|
||||
You can refer to the [Embedchain documentation](https://docs.embedchain.ai/components/introduction) for details.
|
||||
Make sure to review the configuration options available in the .yaml file.
|
||||
|
||||
## Conclusion
|
||||
The `RagTool` provides a powerful way to create and query knowledge bases from various data sources. By leveraging Retrieval-Augmented Generation, it enables agents to access and retrieve relevant information efficiently, enhancing their ability to provide accurate and contextually appropriate responses.
|
||||
|
||||
777
packages/crewai/README.md
Normal file
777
packages/crewai/README.md
Normal file
@@ -0,0 +1,777 @@
|
||||
<p align="center">
|
||||
<a href="https://github.com/crewAIInc/crewAI">
|
||||
<img src="docs/images/crewai_logo.png" width="600px" alt="Open source Multi-AI Agent orchestration framework">
|
||||
</a>
|
||||
</p>
|
||||
<p align="center" style="display: flex; justify-content: center; gap: 20px; align-items: center;">
|
||||
<a href="https://trendshift.io/repositories/11239" target="_blank">
|
||||
<img src="https://trendshift.io/api/badge/repositories/11239" alt="crewAIInc%2FcrewAI | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/>
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://crewai.com">Homepage</a>
|
||||
·
|
||||
<a href="https://docs.crewai.com">Docs</a>
|
||||
·
|
||||
<a href="https://app.crewai.com">Start Cloud Trial</a>
|
||||
·
|
||||
<a href="https://blog.crewai.com">Blog</a>
|
||||
·
|
||||
<a href="https://community.crewai.com">Forum</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/crewAIInc/crewAI">
|
||||
<img src="https://img.shields.io/github/stars/crewAIInc/crewAI" alt="GitHub Repo stars">
|
||||
</a>
|
||||
<a href="https://github.com/crewAIInc/crewAI/network/members">
|
||||
<img src="https://img.shields.io/github/forks/crewAIInc/crewAI" alt="GitHub forks">
|
||||
</a>
|
||||
<a href="https://github.com/crewAIInc/crewAI/issues">
|
||||
<img src="https://img.shields.io/github/issues/crewAIInc/crewAI" alt="GitHub issues">
|
||||
</a>
|
||||
<a href="https://github.com/crewAIInc/crewAI/pulls">
|
||||
<img src="https://img.shields.io/github/issues-pr/crewAIInc/crewAI" alt="GitHub pull requests">
|
||||
</a>
|
||||
<a href="https://opensource.org/licenses/MIT">
|
||||
<img src="https://img.shields.io/badge/License-MIT-green.svg" alt="License: MIT">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://pypi.org/project/crewai/">
|
||||
<img src="https://img.shields.io/pypi/v/crewai" alt="PyPI version">
|
||||
</a>
|
||||
<a href="https://pypi.org/project/crewai/">
|
||||
<img src="https://img.shields.io/pypi/dm/crewai" alt="PyPI downloads">
|
||||
</a>
|
||||
<a href="https://twitter.com/crewAIInc">
|
||||
<img src="https://img.shields.io/twitter/follow/crewAIInc?style=social" alt="Twitter Follow">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
### Fast and Flexible Multi-Agent Automation Framework
|
||||
|
||||
> CrewAI is a lean, lightning-fast Python framework built entirely from scratch—completely **independent of LangChain or other agent frameworks**.
|
||||
> It empowers developers with both high-level simplicity and precise low-level control, ideal for creating autonomous AI agents tailored to any scenario.
|
||||
|
||||
- **CrewAI Crews**: Optimize for autonomy and collaborative intelligence.
|
||||
- **CrewAI Flows**: Enable granular, event-driven control, single LLM calls for precise task orchestration and supports Crews natively
|
||||
|
||||
With over 100,000 developers certified through our community courses at [learn.crewai.com](https://learn.crewai.com), CrewAI is rapidly becoming the
|
||||
standard for enterprise-ready AI automation.
|
||||
|
||||
# CrewAI Enterprise Suite
|
||||
|
||||
CrewAI Enterprise Suite is a comprehensive bundle tailored for organizations that require secure, scalable, and easy-to-manage agent-driven automation.
|
||||
|
||||
You can try one part of the suite the [Crew Control Plane for free](https://app.crewai.com)
|
||||
|
||||
## Crew Control Plane Key Features:
|
||||
|
||||
- **Tracing & Observability**: Monitor and track your AI agents and workflows in real-time, including metrics, logs, and traces.
|
||||
- **Unified Control Plane**: A centralized platform for managing, monitoring, and scaling your AI agents and workflows.
|
||||
- **Seamless Integrations**: Easily connect with existing enterprise systems, data sources, and cloud infrastructure.
|
||||
- **Advanced Security**: Built-in robust security and compliance measures ensuring safe deployment and management.
|
||||
- **Actionable Insights**: Real-time analytics and reporting to optimize performance and decision-making.
|
||||
- **24/7 Support**: Dedicated enterprise support to ensure uninterrupted operation and quick resolution of issues.
|
||||
- **On-premise and Cloud Deployment Options**: Deploy CrewAI Enterprise on-premise or in the cloud, depending on your security and compliance requirements.
|
||||
|
||||
CrewAI Enterprise is designed for enterprises seeking a powerful, reliable solution to transform complex business processes into efficient,
|
||||
intelligent automations.
|
||||
|
||||
## Table of contents
|
||||
|
||||
- [Why CrewAI?](#why-crewai)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Key Features](#key-features)
|
||||
- [Understanding Flows and Crews](#understanding-flows-and-crews)
|
||||
- [CrewAI vs LangGraph](#how-crewai-compares)
|
||||
- [Examples](#examples)
|
||||
- [Quick Tutorial](#quick-tutorial)
|
||||
- [Write Job Descriptions](#write-job-descriptions)
|
||||
- [Trip Planner](#trip-planner)
|
||||
- [Stock Analysis](#stock-analysis)
|
||||
- [Using Crews and Flows Together](#using-crews-and-flows-together)
|
||||
- [Connecting Your Crew to a Model](#connecting-your-crew-to-a-model)
|
||||
- [How CrewAI Compares](#how-crewai-compares)
|
||||
- [Frequently Asked Questions (FAQ)](#frequently-asked-questions-faq)
|
||||
- [Contribution](#contribution)
|
||||
- [Telemetry](#telemetry)
|
||||
- [License](#license)
|
||||
|
||||
## Why CrewAI?
|
||||
|
||||
<div align="center" style="margin-bottom: 30px;">
|
||||
<img src="docs/images/asset.png" alt="CrewAI Logo" width="100%">
|
||||
</div>
|
||||
|
||||
CrewAI unlocks the true potential of multi-agent automation, delivering the best-in-class combination of speed, flexibility, and control with either Crews of AI Agents or Flows of Events:
|
||||
|
||||
- **Standalone Framework**: Built from scratch, independent of LangChain or any other agent framework.
|
||||
- **High Performance**: Optimized for speed and minimal resource usage, enabling faster execution.
|
||||
- **Flexible Low Level Customization**: Complete freedom to customize at both high and low levels - from overall workflows and system architecture to granular agent behaviors, internal prompts, and execution logic.
|
||||
- **Ideal for Every Use Case**: Proven effective for both simple tasks and highly complex, real-world, enterprise-grade scenarios.
|
||||
- **Robust Community**: Backed by a rapidly growing community of over **100,000 certified** developers offering comprehensive support and resources.
|
||||
|
||||
CrewAI empowers developers and enterprises to confidently build intelligent automations, bridging the gap between simplicity, flexibility, and performance.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Setup and run your first CrewAI agents by following this tutorial.
|
||||
|
||||
[](https://www.youtube.com/watch?v=-kSOTtYzgEw "CrewAI Getting Started Tutorial")
|
||||
|
||||
###
|
||||
Learning Resources
|
||||
|
||||
Learn CrewAI through our comprehensive courses:
|
||||
|
||||
- [Multi AI Agent Systems with CrewAI](https://www.deeplearning.ai/short-courses/multi-ai-agent-systems-with-crewai/) - Master the fundamentals of multi-agent systems
|
||||
- [Practical Multi AI Agents and Advanced Use Cases](https://www.deeplearning.ai/short-courses/practical-multi-ai-agents-and-advanced-use-cases-with-crewai/) - Deep dive into advanced implementations
|
||||
|
||||
### Understanding Flows and Crews
|
||||
|
||||
CrewAI offers two powerful, complementary approaches that work seamlessly together to build sophisticated AI applications:
|
||||
|
||||
1. **Crews**: Teams of AI agents with true autonomy and agency, working together to accomplish complex tasks through role-based collaboration. Crews enable:
|
||||
|
||||
- Natural, autonomous decision-making between agents
|
||||
- Dynamic task delegation and collaboration
|
||||
- Specialized roles with defined goals and expertise
|
||||
- Flexible problem-solving approaches
|
||||
2. **Flows**: Production-ready, event-driven workflows that deliver precise control over complex automations. Flows provide:
|
||||
|
||||
- Fine-grained control over execution paths for real-world scenarios
|
||||
- Secure, consistent state management between tasks
|
||||
- Clean integration of AI agents with production Python code
|
||||
- Conditional branching for complex business logic
|
||||
|
||||
The true power of CrewAI emerges when combining Crews and Flows. This synergy allows you to:
|
||||
|
||||
- Build complex, production-grade applications
|
||||
- Balance autonomy with precise control
|
||||
- Handle sophisticated real-world scenarios
|
||||
- Maintain clean, maintainable code structure
|
||||
|
||||
### Getting Started with Installation
|
||||
|
||||
To get started with CrewAI, follow these simple steps:
|
||||
|
||||
### 1. Installation
|
||||
|
||||
Ensure you have Python >=3.10 <3.14 installed on your system. CrewAI uses [UV](https://docs.astral.sh/uv/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, install CrewAI:
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
If you want to install the 'crewai' package along with its optional features that include additional tools for agents, you can do so by using the following command:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
The command above installs the basic package and also adds extra components which require more dependencies to function.
|
||||
|
||||
### Troubleshooting Dependencies
|
||||
|
||||
If you encounter issues during installation or usage, here are some common solutions:
|
||||
|
||||
#### Common Issues
|
||||
|
||||
1. **ModuleNotFoundError: No module named 'tiktoken'**
|
||||
|
||||
- Install tiktoken explicitly: `pip install 'crewai[embeddings]'`
|
||||
- If using embedchain or other tools: `pip install 'crewai[tools]'`
|
||||
2. **Failed building wheel for tiktoken**
|
||||
|
||||
- Ensure Rust compiler is installed (see installation steps above)
|
||||
- For Windows: Verify Visual C++ Build Tools are installed
|
||||
- Try upgrading pip: `pip install --upgrade pip`
|
||||
- If issues persist, use a pre-built wheel: `pip install tiktoken --prefer-binary`
|
||||
|
||||
### 2. Setting Up Your Crew with the YAML Configuration
|
||||
|
||||
To create a new CrewAI project, run the following CLI (Command Line Interface) command:
|
||||
|
||||
```shell
|
||||
crewai create crew <project_name>
|
||||
```
|
||||
|
||||
This command creates a new project folder with the following structure:
|
||||
|
||||
```
|
||||
my_project/
|
||||
├── .gitignore
|
||||
├── pyproject.toml
|
||||
├── README.md
|
||||
├── .env
|
||||
└── src/
|
||||
└── my_project/
|
||||
├── __init__.py
|
||||
├── main.py
|
||||
├── crew.py
|
||||
├── tools/
|
||||
│ ├── custom_tool.py
|
||||
│ └── __init__.py
|
||||
└── config/
|
||||
├── agents.yaml
|
||||
└── tasks.yaml
|
||||
```
|
||||
|
||||
You can now start developing your crew by editing the files in the `src/my_project` folder. The `main.py` file is the entry point of the project, the `crew.py` file is where you define your crew, the `agents.yaml` file is where you define your agents, and the `tasks.yaml` file is where you define your tasks.
|
||||
|
||||
#### To customize your project, you can:
|
||||
|
||||
- Modify `src/my_project/config/agents.yaml` to define your agents.
|
||||
- Modify `src/my_project/config/tasks.yaml` to define your tasks.
|
||||
- Modify `src/my_project/crew.py` to add your own logic, tools, and specific arguments.
|
||||
- Modify `src/my_project/main.py` to add custom inputs for your agents and tasks.
|
||||
- Add your environment variables into the `.env` file.
|
||||
|
||||
#### Example of a simple crew with a sequential process:
|
||||
|
||||
Instantiate your crew:
|
||||
|
||||
```shell
|
||||
crewai create crew latest-ai-development
|
||||
```
|
||||
|
||||
Modify the files as needed to fit your use case:
|
||||
|
||||
**agents.yaml**
|
||||
|
||||
```yaml
|
||||
# src/my_project/config/agents.yaml
|
||||
researcher:
|
||||
role: >
|
||||
{topic} Senior Data Researcher
|
||||
goal: >
|
||||
Uncover cutting-edge developments in {topic}
|
||||
backstory: >
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
goal: >
|
||||
Create detailed reports based on {topic} data analysis and research findings
|
||||
backstory: >
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
```
|
||||
|
||||
**tasks.yaml**
|
||||
|
||||
```yaml
|
||||
# src/my_project/config/tasks.yaml
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2025.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
|
||||
reporting_task:
|
||||
description: >
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
Formatted as markdown without '```'
|
||||
agent: reporting_analyst
|
||||
output_file: report.md
|
||||
```
|
||||
|
||||
**crew.py**
|
||||
|
||||
```python
|
||||
# src/my_project/crew.py
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai_tools import SerperDevTool
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from typing import List
|
||||
|
||||
@CrewBase
|
||||
class LatestAiDevelopmentCrew():
|
||||
"""LatestAiDevelopment crew"""
|
||||
agents: List[BaseAgent]
|
||||
tasks: List[Task]
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True,
|
||||
tools=[SerperDevTool()]
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
output_file='report.md'
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the LatestAiDevelopment crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
**main.py**
|
||||
|
||||
```python
|
||||
#!/usr/bin/env python
|
||||
# src/my_project/main.py
|
||||
import sys
|
||||
from latest_ai_development.crew import LatestAiDevelopmentCrew
|
||||
|
||||
def run():
|
||||
"""
|
||||
Run the crew.
|
||||
"""
|
||||
inputs = {
|
||||
'topic': 'AI Agents'
|
||||
}
|
||||
LatestAiDevelopmentCrew().crew().kickoff(inputs=inputs)
|
||||
```
|
||||
|
||||
### 3. Running Your Crew
|
||||
|
||||
Before running your crew, make sure you have the following keys set as environment variables in your `.env` file:
|
||||
|
||||
- An [OpenAI API key](https://platform.openai.com/account/api-keys) (or other LLM API key): `OPENAI_API_KEY=sk-...`
|
||||
- A [Serper.dev](https://serper.dev/) API key: `SERPER_API_KEY=YOUR_KEY_HERE`
|
||||
|
||||
Lock the dependencies and install them by using the CLI command but first, navigate to your project directory:
|
||||
|
||||
```shell
|
||||
cd my_project
|
||||
crewai install (Optional)
|
||||
```
|
||||
|
||||
To run your crew, execute the following command in the root of your project:
|
||||
|
||||
```bash
|
||||
crewai run
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
python src/my_project/main.py
|
||||
```
|
||||
|
||||
If an error happens due to the usage of poetry, please run the following command to update your crewai package:
|
||||
|
||||
```bash
|
||||
crewai update
|
||||
```
|
||||
|
||||
You should see the output in the console and the `report.md` file should be created in the root of your project with the full final report.
|
||||
|
||||
In addition to the sequential process, you can use the hierarchical process, which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results. [See more about the processes here](https://docs.crewai.com/core-concepts/Processes/).
|
||||
|
||||
## Key Features
|
||||
|
||||
CrewAI stands apart as a lean, standalone, high-performance multi-AI Agent framework delivering simplicity, flexibility, and precise control—free from the complexity and limitations found in other agent frameworks.
|
||||
|
||||
- **Standalone & Lean**: Completely independent from other frameworks like LangChain, offering faster execution and lighter resource demands.
|
||||
- **Flexible & Precise**: Easily orchestrate autonomous agents through intuitive [Crews](https://docs.crewai.com/concepts/crews) or precise [Flows](https://docs.crewai.com/concepts/flows), achieving perfect balance for your needs.
|
||||
- **Seamless Integration**: Effortlessly combine Crews (autonomy) and Flows (precision) to create complex, real-world automations.
|
||||
- **Deep Customization**: Tailor every aspect—from high-level workflows down to low-level internal prompts and agent behaviors.
|
||||
- **Reliable Performance**: Consistent results across simple tasks and complex, enterprise-level automations.
|
||||
- **Thriving Community**: Backed by robust documentation and over 100,000 certified developers, providing exceptional support and guidance.
|
||||
|
||||
Choose CrewAI to easily build powerful, adaptable, and production-ready AI automations.
|
||||
|
||||
## Examples
|
||||
|
||||
You can test different real life examples of AI crews in the [CrewAI-examples repo](https://github.com/crewAIInc/crewAI-examples?tab=readme-ov-file):
|
||||
|
||||
- [Landing Page Generator](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/landing_page_generator)
|
||||
- [Having Human input on the execution](https://docs.crewai.com/how-to/Human-Input-on-Execution)
|
||||
- [Trip Planner](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/trip_planner)
|
||||
- [Stock Analysis](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/stock_analysis)
|
||||
|
||||
### Quick Tutorial
|
||||
|
||||
[](https://www.youtube.com/watch?v=tnejrr-0a94 "CrewAI Tutorial")
|
||||
|
||||
### Write Job Descriptions
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/job-posting) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=u98wEMz-9to "Jobs postings")
|
||||
|
||||
### Trip Planner
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/trip_planner) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=xis7rWp-hjs "Trip Planner")
|
||||
|
||||
### Stock Analysis
|
||||
|
||||
[Check out code for this example](https://github.com/crewAIInc/crewAI-examples/tree/main/crews/stock_analysis) or watch a video below:
|
||||
|
||||
[](https://www.youtube.com/watch?v=e0Uj4yWdaAg "Stock Analysis")
|
||||
|
||||
### Using Crews and Flows Together
|
||||
|
||||
CrewAI's power truly shines when combining Crews with Flows to create sophisticated automation pipelines.
|
||||
CrewAI flows support logical operators like `or_` and `and_` to combine multiple conditions. This can be used with `@start`, `@listen`, or `@router` decorators to create complex triggering conditions.
|
||||
|
||||
- `or_`: Triggers when any of the specified conditions are met.
|
||||
- `and_`Triggers when all of the specified conditions are met.
|
||||
|
||||
Here's how you can orchestrate multiple Crews within a Flow:
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start, router, or_
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Define structured state for precise control
|
||||
class MarketState(BaseModel):
|
||||
sentiment: str = "neutral"
|
||||
confidence: float = 0.0
|
||||
recommendations: list = []
|
||||
|
||||
class AdvancedAnalysisFlow(Flow[MarketState]):
|
||||
@start()
|
||||
def fetch_market_data(self):
|
||||
# Demonstrate low-level control with structured state
|
||||
self.state.sentiment = "analyzing"
|
||||
return {"sector": "tech", "timeframe": "1W"} # These parameters match the task description template
|
||||
|
||||
@listen(fetch_market_data)
|
||||
def analyze_with_crew(self, market_data):
|
||||
# Show crew agency through specialized roles
|
||||
analyst = Agent(
|
||||
role="Senior Market Analyst",
|
||||
goal="Conduct deep market analysis with expert insight",
|
||||
backstory="You're a veteran analyst known for identifying subtle market patterns"
|
||||
)
|
||||
researcher = Agent(
|
||||
role="Data Researcher",
|
||||
goal="Gather and validate supporting market data",
|
||||
backstory="You excel at finding and correlating multiple data sources"
|
||||
)
|
||||
|
||||
analysis_task = Task(
|
||||
description="Analyze {sector} sector data for the past {timeframe}",
|
||||
expected_output="Detailed market analysis with confidence score",
|
||||
agent=analyst
|
||||
)
|
||||
research_task = Task(
|
||||
description="Find supporting data to validate the analysis",
|
||||
expected_output="Corroborating evidence and potential contradictions",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
# Demonstrate crew autonomy
|
||||
analysis_crew = Crew(
|
||||
agents=[analyst, researcher],
|
||||
tasks=[analysis_task, research_task],
|
||||
process=Process.sequential,
|
||||
verbose=True
|
||||
)
|
||||
return analysis_crew.kickoff(inputs=market_data) # Pass market_data as named inputs
|
||||
|
||||
@router(analyze_with_crew)
|
||||
def determine_next_steps(self):
|
||||
# Show flow control with conditional routing
|
||||
if self.state.confidence > 0.8:
|
||||
return "high_confidence"
|
||||
elif self.state.confidence > 0.5:
|
||||
return "medium_confidence"
|
||||
return "low_confidence"
|
||||
|
||||
@listen("high_confidence")
|
||||
def execute_strategy(self):
|
||||
# Demonstrate complex decision making
|
||||
strategy_crew = Crew(
|
||||
agents=[
|
||||
Agent(role="Strategy Expert",
|
||||
goal="Develop optimal market strategy")
|
||||
],
|
||||
tasks=[
|
||||
Task(description="Create detailed strategy based on analysis",
|
||||
expected_output="Step-by-step action plan")
|
||||
]
|
||||
)
|
||||
return strategy_crew.kickoff()
|
||||
|
||||
@listen(or_("medium_confidence", "low_confidence"))
|
||||
def request_additional_analysis(self):
|
||||
self.state.recommendations.append("Gather more data")
|
||||
return "Additional analysis required"
|
||||
```
|
||||
|
||||
This example demonstrates how to:
|
||||
|
||||
1. Use Python code for basic data operations
|
||||
2. Create and execute Crews as steps in your workflow
|
||||
3. Use Flow decorators to manage the sequence of operations
|
||||
4. Implement conditional branching based on Crew results
|
||||
|
||||
## Connecting Your Crew to a Model
|
||||
|
||||
CrewAI supports using various LLMs through a variety of connection options. By default your agents will use the OpenAI API when querying the model. However, there are several other ways to allow your agents to connect to models. For example, you can configure your agents to use a local model via the Ollama tool.
|
||||
|
||||
Please refer to the [Connect CrewAI to LLMs](https://docs.crewai.com/how-to/LLM-Connections/) page for details on configuring your agents' connections to models.
|
||||
|
||||
## How CrewAI Compares
|
||||
|
||||
**CrewAI's Advantage**: CrewAI combines autonomous agent intelligence with precise workflow control through its unique Crews and Flows architecture. The framework excels at both high-level orchestration and low-level customization, enabling complex, production-grade systems with granular control.
|
||||
|
||||
- **LangGraph**: While LangGraph provides a foundation for building agent workflows, its approach requires significant boilerplate code and complex state management patterns. The framework's tight coupling with LangChain can limit flexibility when implementing custom agent behaviors or integrating with external systems.
|
||||
|
||||
*P.S. CrewAI demonstrates significant performance advantages over LangGraph, executing 5.76x faster in certain cases like this QA task example ([see comparison](https://github.com/crewAIInc/crewAI-examples/tree/main/Notebooks/CrewAI%20Flows%20%26%20Langgraph/QA%20Agent)) while achieving higher evaluation scores with faster completion times in certain coding tasks, like in this example ([detailed analysis](https://github.com/crewAIInc/crewAI-examples/blob/main/Notebooks/CrewAI%20Flows%20%26%20Langgraph/Coding%20Assistant/coding_assistant_eval.ipynb)).*
|
||||
|
||||
- **Autogen**: While Autogen excels at creating conversational agents capable of working together, it lacks an inherent concept of process. In Autogen, orchestrating agents' interactions requires additional programming, which can become complex and cumbersome as the scale of tasks grows.
|
||||
- **ChatDev**: ChatDev introduced the idea of processes into the realm of AI agents, but its implementation is quite rigid. Customizations in ChatDev are limited and not geared towards production environments, which can hinder scalability and flexibility in real-world applications.
|
||||
|
||||
## Contribution
|
||||
|
||||
CrewAI is open-source and we welcome contributions. If you're looking to contribute, please:
|
||||
|
||||
- Fork the repository.
|
||||
- Create a new branch for your feature.
|
||||
- Add your feature or improvement.
|
||||
- Send a pull request.
|
||||
- We appreciate your input!
|
||||
|
||||
### Installing Dependencies
|
||||
|
||||
```bash
|
||||
uv lock
|
||||
uv sync
|
||||
```
|
||||
|
||||
### Virtual Env
|
||||
|
||||
```bash
|
||||
uv venv
|
||||
```
|
||||
|
||||
### Pre-commit hooks
|
||||
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
### Running Tests
|
||||
|
||||
```bash
|
||||
uv run pytest .
|
||||
```
|
||||
|
||||
### Running static type checks
|
||||
|
||||
```bash
|
||||
uvx mypy src
|
||||
```
|
||||
|
||||
### Packaging
|
||||
|
||||
```bash
|
||||
uv build
|
||||
```
|
||||
|
||||
### Installing Locally
|
||||
|
||||
```bash
|
||||
pip install dist/*.tar.gz
|
||||
```
|
||||
|
||||
## Telemetry
|
||||
|
||||
CrewAI uses anonymous telemetry to collect usage data with the main purpose of helping us improve the library by focusing our efforts on the most used features, integrations and tools.
|
||||
|
||||
It's pivotal to understand that **NO data is collected** concerning prompts, task descriptions, agents' backstories or goals, usage of tools, API calls, responses, any data processed by the agents, or secrets and environment variables, with the exception of the conditions mentioned. When the `share_crew` feature is enabled, detailed data including task descriptions, agents' backstories or goals, and other specific attributes are collected to provide deeper insights while respecting user privacy. Users can disable telemetry by setting the environment variable OTEL_SDK_DISABLED to true.
|
||||
|
||||
Data collected includes:
|
||||
|
||||
- Version of CrewAI
|
||||
- So we can understand how many users are using the latest version
|
||||
- Version of Python
|
||||
- So we can decide on what versions to better support
|
||||
- General OS (e.g. number of CPUs, macOS/Windows/Linux)
|
||||
- So we know what OS we should focus on and if we could build specific OS related features
|
||||
- Number of agents and tasks in a crew
|
||||
- So we make sure we are testing internally with similar use cases and educate people on the best practices
|
||||
- Crew Process being used
|
||||
- Understand where we should focus our efforts
|
||||
- If Agents are using memory or allowing delegation
|
||||
- Understand if we improved the features or maybe even drop them
|
||||
- If Tasks are being executed in parallel or sequentially
|
||||
- Understand if we should focus more on parallel execution
|
||||
- Language model being used
|
||||
- Improved support on most used languages
|
||||
- Roles of agents in a crew
|
||||
- Understand high level use cases so we can build better tools, integrations and examples about it
|
||||
- Tools names available
|
||||
- Understand out of the publicly available tools, which ones are being used the most so we can improve them
|
||||
|
||||
Users can opt-in to Further Telemetry, sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews. Enabling `share_crew` results in the collection of detailed crew and task execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns while respecting the user's choice to share.
|
||||
|
||||
## License
|
||||
|
||||
CrewAI is released under the [MIT License](https://github.com/crewAIInc/crewAI/blob/main/LICENSE).
|
||||
|
||||
## Frequently Asked Questions (FAQ)
|
||||
|
||||
### General
|
||||
|
||||
- [What exactly is CrewAI?](#q-what-exactly-is-crewai)
|
||||
- [How do I install CrewAI?](#q-how-do-i-install-crewai)
|
||||
- [Does CrewAI depend on LangChain?](#q-does-crewai-depend-on-langchain)
|
||||
- [Is CrewAI open-source?](#q-is-crewai-open-source)
|
||||
- [Does CrewAI collect data from users?](#q-does-crewai-collect-data-from-users)
|
||||
|
||||
### Features and Capabilities
|
||||
|
||||
- [Can CrewAI handle complex use cases?](#q-can-crewai-handle-complex-use-cases)
|
||||
- [Can I use CrewAI with local AI models?](#q-can-i-use-crewai-with-local-ai-models)
|
||||
- [What makes Crews different from Flows?](#q-what-makes-crews-different-from-flows)
|
||||
- [How is CrewAI better than LangChain?](#q-how-is-crewai-better-than-langchain)
|
||||
- [Does CrewAI support fine-tuning or training custom models?](#q-does-crewai-support-fine-tuning-or-training-custom-models)
|
||||
|
||||
### Resources and Community
|
||||
|
||||
- [Where can I find real-world CrewAI examples?](#q-where-can-i-find-real-world-crewai-examples)
|
||||
- [How can I contribute to CrewAI?](#q-how-can-i-contribute-to-crewai)
|
||||
|
||||
### Enterprise Features
|
||||
|
||||
- [What additional features does CrewAI Enterprise offer?](#q-what-additional-features-does-crewai-enterprise-offer)
|
||||
- [Is CrewAI Enterprise available for cloud and on-premise deployments?](#q-is-crewai-enterprise-available-for-cloud-and-on-premise-deployments)
|
||||
- [Can I try CrewAI Enterprise for free?](#q-can-i-try-crewai-enterprise-for-free)
|
||||
|
||||
### Q: What exactly is CrewAI?
|
||||
|
||||
A: CrewAI is a standalone, lean, and fast Python framework built specifically for orchestrating autonomous AI agents. Unlike frameworks like LangChain, CrewAI does not rely on external dependencies, making it leaner, faster, and simpler.
|
||||
|
||||
### Q: How do I install CrewAI?
|
||||
|
||||
A: Install CrewAI using pip:
|
||||
|
||||
```shell
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
For additional tools, use:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
### Q: Does CrewAI depend on LangChain?
|
||||
|
||||
A: No. CrewAI is built entirely from the ground up, with no dependencies on LangChain or other agent frameworks. This ensures a lean, fast, and flexible experience.
|
||||
|
||||
### Q: Can CrewAI handle complex use cases?
|
||||
|
||||
A: Yes. CrewAI excels at both simple and highly complex real-world scenarios, offering deep customization options at both high and low levels, from internal prompts to sophisticated workflow orchestration.
|
||||
|
||||
### Q: Can I use CrewAI with local AI models?
|
||||
|
||||
A: Absolutely! CrewAI supports various language models, including local ones. Tools like Ollama and LM Studio allow seamless integration. Check the [LLM Connections documentation](https://docs.crewai.com/how-to/LLM-Connections/) for more details.
|
||||
|
||||
### Q: What makes Crews different from Flows?
|
||||
|
||||
A: Crews provide autonomous agent collaboration, ideal for tasks requiring flexible decision-making and dynamic interaction. Flows offer precise, event-driven control, ideal for managing detailed execution paths and secure state management. You can seamlessly combine both for maximum effectiveness.
|
||||
|
||||
### Q: How is CrewAI better than LangChain?
|
||||
|
||||
A: CrewAI provides simpler, more intuitive APIs, faster execution speeds, more reliable and consistent results, robust documentation, and an active community—addressing common criticisms and limitations associated with LangChain.
|
||||
|
||||
### Q: Is CrewAI open-source?
|
||||
|
||||
A: Yes, CrewAI is open-source and actively encourages community contributions and collaboration.
|
||||
|
||||
### Q: Does CrewAI collect data from users?
|
||||
|
||||
A: CrewAI collects anonymous telemetry data strictly for improvement purposes. Sensitive data such as prompts, tasks, or API responses are never collected unless explicitly enabled by the user.
|
||||
|
||||
### Q: Where can I find real-world CrewAI examples?
|
||||
|
||||
A: Check out practical examples in the [CrewAI-examples repository](https://github.com/crewAIInc/crewAI-examples), covering use cases like trip planners, stock analysis, and job postings.
|
||||
|
||||
### Q: How can I contribute to CrewAI?
|
||||
|
||||
A: Contributions are warmly welcomed! Fork the repository, create your branch, implement your changes, and submit a pull request. See the Contribution section of the README for detailed guidelines.
|
||||
|
||||
### Q: What additional features does CrewAI Enterprise offer?
|
||||
|
||||
A: CrewAI Enterprise provides advanced features such as a unified control plane, real-time observability, secure integrations, advanced security, actionable insights, and dedicated 24/7 enterprise support.
|
||||
|
||||
### Q: Is CrewAI Enterprise available for cloud and on-premise deployments?
|
||||
|
||||
A: Yes, CrewAI Enterprise supports both cloud-based and on-premise deployment options, allowing enterprises to meet their specific security and compliance requirements.
|
||||
|
||||
### Q: Can I try CrewAI Enterprise for free?
|
||||
|
||||
A: Yes, you can explore part of the CrewAI Enterprise Suite by accessing the [Crew Control Plane](https://app.crewai.com) for free.
|
||||
|
||||
### Q: Does CrewAI support fine-tuning or training custom models?
|
||||
|
||||
A: Yes, CrewAI can integrate with custom-trained or fine-tuned models, allowing you to enhance your agents with domain-specific knowledge and accuracy.
|
||||
|
||||
### Q: Can CrewAI agents interact with external tools and APIs?
|
||||
|
||||
A: Absolutely! CrewAI agents can easily integrate with external tools, APIs, and databases, empowering them to leverage real-world data and resources.
|
||||
|
||||
### Q: Is CrewAI suitable for production environments?
|
||||
|
||||
A: Yes, CrewAI is explicitly designed with production-grade standards, ensuring reliability, stability, and scalability for enterprise deployments.
|
||||
|
||||
### Q: How scalable is CrewAI?
|
||||
|
||||
A: CrewAI is highly scalable, supporting simple automations and large-scale enterprise workflows involving numerous agents and complex tasks simultaneously.
|
||||
|
||||
### Q: Does CrewAI offer debugging and monitoring tools?
|
||||
|
||||
A: Yes, CrewAI Enterprise includes advanced debugging, tracing, and real-time observability features, simplifying the management and troubleshooting of your automations.
|
||||
|
||||
### Q: What programming languages does CrewAI support?
|
||||
|
||||
A: CrewAI is primarily Python-based but easily integrates with services and APIs written in any programming language through its flexible API integration capabilities.
|
||||
|
||||
### Q: Does CrewAI offer educational resources for beginners?
|
||||
|
||||
A: Yes, CrewAI provides extensive beginner-friendly tutorials, courses, and documentation through learn.crewai.com, supporting developers at all skill levels.
|
||||
|
||||
### Q: Can CrewAI automate human-in-the-loop workflows?
|
||||
|
||||
A: Yes, CrewAI fully supports human-in-the-loop workflows, allowing seamless collaboration between human experts and AI agents for enhanced decision-making.
|
||||
152
packages/crewai/docs/en/tools/ai-ml/ragtool.mdx
Normal file
152
packages/crewai/docs/en/tools/ai-ml/ragtool.mdx
Normal file
@@ -0,0 +1,152 @@
|
||||
---
|
||||
title: RAG Tool
|
||||
description: The `RagTool` is a dynamic knowledge base tool for answering questions using Retrieval-Augmented Generation.
|
||||
icon: vector-square
|
||||
mode: "wide"
|
||||
---
|
||||
|
||||
# `RagTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `RagTool` is designed to answer questions by leveraging the power of Retrieval-Augmented Generation (RAG) through CrewAI's native RAG system.
|
||||
It provides a dynamic knowledge base that can be queried to retrieve relevant information from various data sources.
|
||||
This tool is particularly useful for applications that require access to a vast array of information and need to provide contextually relevant answers.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and use it with different data sources:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import RagTool
|
||||
|
||||
# Create a RAG tool with default settings
|
||||
rag_tool = RagTool()
|
||||
|
||||
# Add content from a file
|
||||
rag_tool.add(data_type="file", path="path/to/your/document.pdf")
|
||||
|
||||
# Add content from a web page
|
||||
rag_tool.add(data_type="web_page", url="https://example.com")
|
||||
|
||||
# Define an agent with the RagTool
|
||||
@agent
|
||||
def knowledge_expert(self) -> Agent:
|
||||
'''
|
||||
This agent uses the RagTool to answer questions about the knowledge base.
|
||||
'''
|
||||
return Agent(
|
||||
config=self.agents_config["knowledge_expert"],
|
||||
allow_delegation=False,
|
||||
tools=[rag_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Supported Data Sources
|
||||
|
||||
The `RagTool` can be used with a wide variety of data sources, including:
|
||||
|
||||
- 📰 PDF files
|
||||
- 📊 CSV files
|
||||
- 📃 JSON files
|
||||
- 📝 Text
|
||||
- 📁 Directories/Folders
|
||||
- 🌐 HTML Web pages
|
||||
- 📽️ YouTube Channels
|
||||
- 📺 YouTube Videos
|
||||
- 📚 Documentation websites
|
||||
- 📝 MDX files
|
||||
- 📄 DOCX files
|
||||
- 🧾 XML files
|
||||
- 📬 Gmail
|
||||
- 📝 GitHub repositories
|
||||
- 🐘 PostgreSQL databases
|
||||
- 🐬 MySQL databases
|
||||
- 🤖 Slack conversations
|
||||
- 💬 Discord messages
|
||||
- 🗨️ Discourse forums
|
||||
- 📝 Substack newsletters
|
||||
- 🐝 Beehiiv content
|
||||
- 💾 Dropbox files
|
||||
- 🖼️ Images
|
||||
- ⚙️ Custom data sources
|
||||
|
||||
## Parameters
|
||||
|
||||
The `RagTool` accepts the following parameters:
|
||||
|
||||
- **summarize**: Optional. Whether to summarize the retrieved content. Default is `False`.
|
||||
- **adapter**: Optional. A custom adapter for the knowledge base. If not provided, a CrewAIRagAdapter will be used.
|
||||
- **config**: Optional. Configuration for the underlying CrewAI RAG system.
|
||||
|
||||
## Adding Content
|
||||
|
||||
You can add content to the knowledge base using the `add` method:
|
||||
|
||||
```python Code
|
||||
# Add a PDF file
|
||||
rag_tool.add(data_type="file", path="path/to/your/document.pdf")
|
||||
|
||||
# Add a web page
|
||||
rag_tool.add(data_type="web_page", url="https://example.com")
|
||||
|
||||
# Add a YouTube video
|
||||
rag_tool.add(data_type="youtube_video", url="https://www.youtube.com/watch?v=VIDEO_ID")
|
||||
|
||||
# Add a directory of files
|
||||
rag_tool.add(data_type="directory", path="path/to/your/directory")
|
||||
```
|
||||
|
||||
## Agent Integration Example
|
||||
|
||||
Here's how to integrate the `RagTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai.project import agent
|
||||
from crewai_tools import RagTool
|
||||
|
||||
# Initialize the tool and add content
|
||||
rag_tool = RagTool()
|
||||
rag_tool.add(data_type="web_page", url="https://docs.crewai.com")
|
||||
rag_tool.add(data_type="file", path="company_data.pdf")
|
||||
|
||||
# Define an agent with the RagTool
|
||||
@agent
|
||||
def knowledge_expert(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["knowledge_expert"],
|
||||
allow_delegation=False,
|
||||
tools=[rag_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
You can customize the behavior of the `RagTool` by providing a configuration dictionary:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import RagTool
|
||||
|
||||
# Create a RAG tool with custom configuration
|
||||
config = {
|
||||
"vectordb": {
|
||||
"provider": "qdrant",
|
||||
"config": {
|
||||
"collection_name": "my-collection"
|
||||
}
|
||||
},
|
||||
"embedding_model": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "text-embedding-3-small"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rag_tool = RagTool(config=config, summarize=True)
|
||||
```
|
||||
|
||||
|
||||
## Conclusion
|
||||
The `RagTool` provides a powerful way to create and query knowledge bases from various data sources. By leveraging Retrieval-Augmented Generation, it enables agents to access and retrieve relevant information efficiently, enhancing their ability to provide accurate and contextually appropriate responses.
|
||||
131
packages/crewai/pyproject.toml
Normal file
131
packages/crewai/pyproject.toml
Normal file
@@ -0,0 +1,131 @@
|
||||
[project]
|
||||
name = "crewai-core"
|
||||
dynamic = ["version"]
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.14"
|
||||
authors = [
|
||||
{ name = "Joao Moura", email = "joao@crewai.com" }
|
||||
]
|
||||
dependencies = [
|
||||
# Core Dependencies
|
||||
"pydantic>=2.6.1",
|
||||
"openai>=1.13.3",
|
||||
"litellm==1.74.9",
|
||||
"instructor>=1.3.3",
|
||||
# Text Processing
|
||||
"pdfplumber>=0.11.4",
|
||||
"regex>=2024.9.11",
|
||||
# Telemetry and Monitoring
|
||||
"opentelemetry-api>=1.30.0",
|
||||
"opentelemetry-sdk>=1.30.0",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.30.0",
|
||||
# Data Handling
|
||||
"chromadb>=0.5.23",
|
||||
"tokenizers>=0.20.3",
|
||||
"onnxruntime==1.22.0",
|
||||
"openpyxl>=3.1.5",
|
||||
"pyvis>=0.3.2",
|
||||
# Authentication and Security
|
||||
"python-dotenv>=1.0.0",
|
||||
"pyjwt>=2.9.0",
|
||||
# Configuration and Utils
|
||||
"click>=8.1.8",
|
||||
"appdirs>=1.4.4",
|
||||
"jsonref>=1.1.0",
|
||||
"json-repair==0.25.2",
|
||||
"uv>=0.4.25",
|
||||
"tomli-w>=1.1.0",
|
||||
"tomli>=2.0.2",
|
||||
"blinker>=1.9.0",
|
||||
"json5>=0.10.0",
|
||||
"portalocker==2.7.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.8.0"
|
||||
]
|
||||
pdfplumber = [
|
||||
"pdfplumber>=0.11.4",
|
||||
]
|
||||
pandas = [
|
||||
"pandas>=2.2.3",
|
||||
]
|
||||
openpyxl = [
|
||||
"openpyxl>=3.1.5",
|
||||
]
|
||||
mem0 = ["mem0ai>=0.1.94"]
|
||||
docling = [
|
||||
"docling>=2.12.0",
|
||||
]
|
||||
aisuite = [
|
||||
"aisuite>=0.1.10",
|
||||
]
|
||||
qdrant = [
|
||||
"qdrant-client[fastembed]>=1.14.3",
|
||||
]
|
||||
|
||||
|
||||
[project.scripts]
|
||||
crewai = "crewai.cli.cli:crewai"
|
||||
|
||||
[tool.ruff]
|
||||
exclude = [
|
||||
"src/crewai/cli/templates",
|
||||
]
|
||||
fix = true
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = [
|
||||
"E", # pycodestyle errors (style issues)
|
||||
"F", # Pyflakes (code errors)
|
||||
"B", # flake8-bugbear (bug prevention)
|
||||
"S", # bandit (security issues)
|
||||
"RUF", # ruff-specific rules
|
||||
"N", # pep8-naming (naming conventions)
|
||||
"W", # pycodestyle warnings
|
||||
"PERF", # performance issues
|
||||
"PIE", # flake8-pie (unnecessary code)
|
||||
"ASYNC", # async/await best practices
|
||||
"RET", # flake8-return (return improvements)
|
||||
"UP006", # use collections.abc
|
||||
"UP007", # use X | Y for unions
|
||||
"UP035", # use dict/list instead of typing.Dict/List
|
||||
"UP037", # remove quotes from type annotations
|
||||
"UP045", # use X | None instead of Optional[X]
|
||||
"UP004", # use isinstance instead of type
|
||||
"UP008", # use super() instead of super(Class, self)
|
||||
"UP010", # use isinstance for type checks
|
||||
"UP018", # use str() instead of "string"
|
||||
"UP031", # use f-strings for .format()
|
||||
"UP032", # use f-strings for .format() with positional
|
||||
"I001", # sort imports
|
||||
"I002", # remove unused imports
|
||||
]
|
||||
ignore = ["E501"] # ignore line too long globally
|
||||
|
||||
[tool.ruff.lint.per-file-ignores]
|
||||
"tests/**/*.py" = ["S101", "RET504"] # Allow assert statements and unnecessary assignments before return in tests
|
||||
|
||||
[tool.mypy]
|
||||
exclude = ["src/crewai/cli/templates", "tests/"]
|
||||
|
||||
[tool.bandit]
|
||||
exclude_dirs = ["src/crewai/cli/templates"]
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
markers = [
|
||||
"telemetry: mark test as a telemetry test (don't mock telemetry)",
|
||||
]
|
||||
|
||||
[tool.hatch.version]
|
||||
path = "src/crewai/__init__.py"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.build.targets.wheel]
|
||||
packages = ["src/crewai"]
|
||||
@@ -1,17 +1,10 @@
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
Union,
|
||||
)
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
@@ -19,6 +12,24 @@ from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalCompletedEvent,
|
||||
MemoryRetrievalStartedEvent,
|
||||
)
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||
@@ -38,24 +49,6 @@ from crewai.utilities.agent_utils import (
|
||||
)
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.events.types.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.events.event_bus import crewai_event_bus
|
||||
from crewai.events.types.memory_events import (
|
||||
MemoryRetrievalStartedEvent,
|
||||
MemoryRetrievalCompletedEvent,
|
||||
)
|
||||
from crewai.events.types.knowledge_events import (
|
||||
KnowledgeQueryCompletedEvent,
|
||||
KnowledgeQueryFailedEvent,
|
||||
KnowledgeQueryStartedEvent,
|
||||
KnowledgeRetrievalCompletedEvent,
|
||||
KnowledgeRetrievalStartedEvent,
|
||||
KnowledgeSearchQueryFailedEvent,
|
||||
)
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
@@ -87,36 +80,36 @@ class Agent(BaseAgent):
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
max_execution_time: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
)
|
||||
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||
step_callback: Optional[Any] = Field(
|
||||
step_callback: Any | None = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each step of the agent execution.",
|
||||
)
|
||||
use_system_prompt: Optional[bool] = Field(
|
||||
use_system_prompt: bool | None = Field(
|
||||
default=True,
|
||||
description="Use system prompt for the agent.",
|
||||
)
|
||||
llm: Union[str, InstanceOf[BaseLLM], Any] = Field(
|
||||
llm: str | InstanceOf[BaseLLM] | Any = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
function_calling_llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
|
||||
function_calling_llm: str | InstanceOf[BaseLLM] | Any | None = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
system_template: Optional[str] = Field(
|
||||
system_template: str | None = Field(
|
||||
default=None, description="System format for the agent."
|
||||
)
|
||||
prompt_template: Optional[str] = Field(
|
||||
prompt_template: str | None = Field(
|
||||
default=None, description="Prompt format for the agent."
|
||||
)
|
||||
response_template: Optional[str] = Field(
|
||||
response_template: str | None = Field(
|
||||
default=None, description="Response format for the agent."
|
||||
)
|
||||
allow_code_execution: Optional[bool] = Field(
|
||||
allow_code_execution: bool | None = Field(
|
||||
default=False, description="Enable code execution for the agent."
|
||||
)
|
||||
respect_context_window: bool = Field(
|
||||
@@ -147,31 +140,31 @@ class Agent(BaseAgent):
|
||||
default=False,
|
||||
description="Whether the agent should reflect and create a plan before executing a task.",
|
||||
)
|
||||
max_reasoning_attempts: Optional[int] = Field(
|
||||
max_reasoning_attempts: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum number of reasoning attempts before executing the task. If None, will try until ready.",
|
||||
)
|
||||
embedder: Optional[Dict[str, Any]] = Field(
|
||||
embedder: dict[str, Any] | None = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
)
|
||||
agent_knowledge_context: Optional[str] = Field(
|
||||
agent_knowledge_context: str | None = Field(
|
||||
default=None,
|
||||
description="Knowledge context for the agent.",
|
||||
)
|
||||
crew_knowledge_context: Optional[str] = Field(
|
||||
crew_knowledge_context: str | None = Field(
|
||||
default=None,
|
||||
description="Knowledge context for the crew.",
|
||||
)
|
||||
knowledge_search_query: Optional[str] = Field(
|
||||
knowledge_search_query: str | None = Field(
|
||||
default=None,
|
||||
description="Knowledge search query for the agent dynamically generated by the agent.",
|
||||
)
|
||||
from_repository: Optional[str] = Field(
|
||||
from_repository: str | None = Field(
|
||||
default=None,
|
||||
description="The Agent's role to be used from your repository.",
|
||||
)
|
||||
guardrail: Optional[Union[Callable[[Any], Tuple[bool, Any]], str]] = Field(
|
||||
guardrail: Callable[[Any], tuple[bool, Any]] | str | None = Field(
|
||||
default=None,
|
||||
description="Function or string description of a guardrail to validate agent output",
|
||||
)
|
||||
@@ -180,7 +173,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
def validate_from_repository(cls, v):
|
||||
def validate_from_repository(self, v):
|
||||
if v is not None and (from_repository := v.get("from_repository")):
|
||||
return load_agent_from_repository(from_repository) | v
|
||||
return v
|
||||
@@ -208,7 +201,7 @@ class Agent(BaseAgent):
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
|
||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||
def set_knowledge(self, crew_embedder: dict[str, Any] | None = None):
|
||||
try:
|
||||
if self.embedder is None and crew_embedder:
|
||||
self.embedder = crew_embedder
|
||||
@@ -224,7 +217,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
self.knowledge.add_sources()
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {e!s}") from e
|
||||
|
||||
def _is_any_available_memory(self) -> bool:
|
||||
"""Check if any memory is available."""
|
||||
@@ -244,8 +237,8 @@ class Agent(BaseAgent):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Task,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
"""Execute a task with the agent.
|
||||
|
||||
@@ -278,11 +271,9 @@ class Agent(BaseAgent):
|
||||
task.description += f"\n\nReasoning Plan:\n{reasoning_output.plan.plan}"
|
||||
except Exception as e:
|
||||
if hasattr(self, "_logger"):
|
||||
self._logger.log(
|
||||
"error", f"Error during reasoning process: {str(e)}"
|
||||
)
|
||||
self._logger.log("error", f"Error during reasoning process: {e!s}")
|
||||
else:
|
||||
print(f"Error during reasoning process: {str(e)}")
|
||||
print(f"Error during reasoning process: {e!s}")
|
||||
|
||||
self._inject_date_to_task(task)
|
||||
|
||||
@@ -525,14 +516,14 @@ class Agent(BaseAgent):
|
||||
|
||||
try:
|
||||
return future.result(timeout=timeout)
|
||||
except concurrent.futures.TimeoutError:
|
||||
except concurrent.futures.TimeoutError as e:
|
||||
future.cancel()
|
||||
raise TimeoutError(
|
||||
f"Task '{task.description}' execution timed out after {timeout} seconds. Consider increasing max_execution_time or optimizing the task."
|
||||
)
|
||||
) from e
|
||||
except Exception as e:
|
||||
future.cancel()
|
||||
raise RuntimeError(f"Task execution failed: {str(e)}")
|
||||
raise RuntimeError(f"Task execution failed: {e!s}") from e
|
||||
|
||||
def _execute_without_timeout(self, task_prompt: str, task: Task) -> str:
|
||||
"""Execute a task without a timeout.
|
||||
@@ -554,14 +545,14 @@ class Agent(BaseAgent):
|
||||
)["output"]
|
||||
|
||||
def create_agent_executor(
|
||||
self, tools: Optional[List[BaseTool]] = None, task=None
|
||||
self, tools: list[BaseTool] | None = None, task=None
|
||||
) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
|
||||
Returns:
|
||||
An instance of the CrewAgentExecutor class.
|
||||
"""
|
||||
raw_tools: List[BaseTool] = tools or self.tools or []
|
||||
raw_tools: list[BaseTool] = tools or self.tools or []
|
||||
parsed_tools = parse_tools(raw_tools)
|
||||
|
||||
prompt = Prompts(
|
||||
@@ -603,10 +594,9 @@ class Agent(BaseAgent):
|
||||
callbacks=[TokenCalcHandler(self._token_process)],
|
||||
)
|
||||
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]):
|
||||
def get_delegation_tools(self, agents: list[BaseAgent]):
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
tools = agent_tools.tools()
|
||||
return tools
|
||||
return agent_tools.tools()
|
||||
|
||||
def get_multimodal_tools(self) -> Sequence[BaseTool]:
|
||||
from crewai.tools.agent_tools.add_image_tool import AddImageTool
|
||||
@@ -654,7 +644,7 @@ class Agent(BaseAgent):
|
||||
)
|
||||
return task_prompt
|
||||
|
||||
def _render_text_description(self, tools: List[Any]) -> str:
|
||||
def _render_text_description(self, tools: list[Any]) -> str:
|
||||
"""Render the tool name and description in plain text.
|
||||
|
||||
Output will be in the format of:
|
||||
@@ -664,15 +654,13 @@ class Agent(BaseAgent):
|
||||
search: This tool is used for search
|
||||
calculator: This tool is used for math
|
||||
"""
|
||||
description = "\n".join(
|
||||
return "\n".join(
|
||||
[
|
||||
f"Tool name: {tool.name}\nTool description:\n{tool.description}"
|
||||
for tool in tools
|
||||
]
|
||||
)
|
||||
|
||||
return description
|
||||
|
||||
def _inject_date_to_task(self, task):
|
||||
"""Inject the current date into the task description if inject_date is enabled."""
|
||||
if self.inject_date:
|
||||
@@ -700,9 +688,9 @@ class Agent(BaseAgent):
|
||||
task.description += f"\n\nCurrent Date: {current_date}"
|
||||
except Exception as e:
|
||||
if hasattr(self, "_logger"):
|
||||
self._logger.log("warning", f"Failed to inject date: {str(e)}")
|
||||
self._logger.log("warning", f"Failed to inject date: {e!s}")
|
||||
else:
|
||||
print(f"Warning: Failed to inject date: {str(e)}")
|
||||
print(f"Warning: Failed to inject date: {e!s}")
|
||||
|
||||
def _validate_docker_installation(self) -> None:
|
||||
"""Check if Docker is installed and running."""
|
||||
@@ -718,10 +706,10 @@ class Agent(BaseAgent):
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
except subprocess.CalledProcessError:
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise RuntimeError(
|
||||
f"Docker is not running. Please start Docker to use code execution with agent: {self.role}"
|
||||
)
|
||||
) from e
|
||||
|
||||
def __repr__(self):
|
||||
return f"Agent(role={self.role}, goal={self.goal}, backstory={self.backstory})"
|
||||
@@ -796,8 +784,8 @@ class Agent(BaseAgent):
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
response_format: Optional[Type[Any]] = None,
|
||||
messages: str | list[dict[str, str]],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent with the given messages using a LiteAgent instance.
|
||||
@@ -836,8 +824,8 @@ class Agent(BaseAgent):
|
||||
|
||||
async def kickoff_async(
|
||||
self,
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
response_format: Optional[Type[Any]] = None,
|
||||
messages: str | list[dict[str, str]],
|
||||
response_format: type[Any] | None = None,
|
||||
) -> LiteAgentOutput:
|
||||
"""
|
||||
Execute the agent asynchronously with the given messages using a LiteAgent instance.
|
||||
12
packages/crewai/src/crewai/agents/__init__.py
Normal file
12
packages/crewai/src/crewai/agents/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.parser import AgentAction, AgentFinish, OutputParserException, parse
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
|
||||
__all__ = [
|
||||
"AgentAction",
|
||||
"AgentFinish",
|
||||
"CacheHandler",
|
||||
"OutputParserException",
|
||||
"ToolsHandler",
|
||||
"parse",
|
||||
]
|
||||
@@ -1,7 +1,7 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
from pydantic import ConfigDict, PrivateAttr
|
||||
|
||||
from crewai.agent import BaseAgent
|
||||
from crewai.tools import BaseTool
|
||||
@@ -16,22 +16,21 @@ class BaseAgentAdapter(BaseAgent, ABC):
|
||||
"""
|
||||
|
||||
adapted_structured_output: bool = False
|
||||
_agent_config: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
_agent_config: dict[str, Any] | None = PrivateAttr(default=None)
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def __init__(self, agent_config: Optional[Dict[str, Any]] = None, **kwargs: Any):
|
||||
def __init__(self, agent_config: dict[str, Any] | None = None, **kwargs: Any):
|
||||
super().__init__(adapted_agent=True, **kwargs)
|
||||
self._agent_config = agent_config
|
||||
|
||||
@abstractmethod
|
||||
def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None:
|
||||
def configure_tools(self, tools: list[BaseTool] | None = None) -> None:
|
||||
"""Configure and adapt tools for the specific agent implementation.
|
||||
|
||||
Args:
|
||||
tools: Optional list of BaseTool instances to be configured
|
||||
"""
|
||||
pass
|
||||
|
||||
def configure_structured_output(self, structured_output: Any) -> None:
|
||||
"""Configure the structured output for the specific agent implementation.
|
||||
@@ -39,4 +38,3 @@ class BaseAgentAdapter(BaseAgent, ABC):
|
||||
Args:
|
||||
structured_output: The structured output to be configured
|
||||
"""
|
||||
pass
|
||||
@@ -1,5 +1,5 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, List, Optional
|
||||
from typing import Any
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
|
||||
@@ -12,23 +12,22 @@ class BaseToolAdapter(ABC):
|
||||
different frameworks and platforms.
|
||||
"""
|
||||
|
||||
original_tools: List[BaseTool]
|
||||
converted_tools: List[Any]
|
||||
original_tools: list[BaseTool]
|
||||
converted_tools: list[Any]
|
||||
|
||||
def __init__(self, tools: Optional[List[BaseTool]] = None):
|
||||
def __init__(self, tools: list[BaseTool] | None = None):
|
||||
self.original_tools = tools or []
|
||||
self.converted_tools = []
|
||||
|
||||
@abstractmethod
|
||||
def configure_tools(self, tools: List[BaseTool]) -> None:
|
||||
def configure_tools(self, tools: list[BaseTool]) -> None:
|
||||
"""Configure and convert tools for the specific implementation.
|
||||
|
||||
Args:
|
||||
tools: List of BaseTool instances to be configured and converted
|
||||
"""
|
||||
pass
|
||||
|
||||
def tools(self) -> List[Any]:
|
||||
def tools(self) -> list[Any]:
|
||||
"""Return all converted tools."""
|
||||
return self.converted_tools
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Callable
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from typing import Any, Callable, Dict, List, Optional, TypeVar
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -25,7 +26,6 @@ from crewai.security.security_config import SecurityConfig
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter
|
||||
from crewai.utilities.string_utils import interpolate_only
|
||||
|
||||
T = TypeVar("T", bound="BaseAgent")
|
||||
@@ -81,17 +81,17 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False))
|
||||
_rpm_controller: Optional[RPMController] = PrivateAttr(default=None)
|
||||
_rpm_controller: RPMController | None = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
_original_role: Optional[str] = PrivateAttr(default=None)
|
||||
_original_goal: Optional[str] = PrivateAttr(default=None)
|
||||
_original_backstory: Optional[str] = PrivateAttr(default=None)
|
||||
_original_role: str | None = PrivateAttr(default=None)
|
||||
_original_goal: str | None = PrivateAttr(default=None)
|
||||
_original_backstory: str | None = PrivateAttr(default=None)
|
||||
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Objective of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
config: dict[str, Any] | None = Field(
|
||||
description="Configuration for the agent", default=None, exclude=True
|
||||
)
|
||||
cache: bool = Field(
|
||||
@@ -100,7 +100,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
verbose: bool = Field(
|
||||
default=False, description="Verbose mode for the Agent Execution"
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
max_rpm: int | None = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the agent execution to be respected.",
|
||||
)
|
||||
@@ -108,7 +108,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
tools: list[BaseTool] | None = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: int = Field(
|
||||
@@ -122,27 +122,27 @@ class BaseAgent(ABC, BaseModel):
|
||||
)
|
||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
||||
cache_handler: InstanceOf[CacheHandler] | None = Field(
|
||||
default=None, description="An instance of the CacheHandler class."
|
||||
)
|
||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||
default_factory=ToolsHandler,
|
||||
description="An instance of the ToolsHandler class.",
|
||||
)
|
||||
tools_results: List[Dict[str, Any]] = Field(
|
||||
tools_results: list[dict[str, Any]] = Field(
|
||||
default=[], description="Results of the tools used by the agent."
|
||||
)
|
||||
max_tokens: Optional[int] = Field(
|
||||
max_tokens: int | None = Field(
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
)
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
knowledge: Knowledge | None = Field(
|
||||
default=None, description="Knowledge for the agent."
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
knowledge_sources: list[BaseKnowledgeSource] | None = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
knowledge_storage: Optional[Any] = Field(
|
||||
knowledge_storage: Any | None = Field(
|
||||
default=None,
|
||||
description="Custom knowledge storage for the agent.",
|
||||
)
|
||||
@@ -150,13 +150,13 @@ class BaseAgent(ABC, BaseModel):
|
||||
default_factory=SecurityConfig,
|
||||
description="Security configuration for the agent, including fingerprinting.",
|
||||
)
|
||||
callbacks: List[Callable] = Field(
|
||||
callbacks: list[Callable] = Field(
|
||||
default=[], description="Callbacks to be used for the agent"
|
||||
)
|
||||
adapted_agent: bool = Field(
|
||||
default=False, description="Whether the agent is adapted"
|
||||
)
|
||||
knowledge_config: Optional[KnowledgeConfig] = Field(
|
||||
knowledge_config: KnowledgeConfig | None = Field(
|
||||
default=None,
|
||||
description="Knowledge configuration for the agent such as limits and threshold",
|
||||
)
|
||||
@@ -168,7 +168,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
@field_validator("tools")
|
||||
@classmethod
|
||||
def validate_tools(cls, tools: List[Any]) -> List[BaseTool]:
|
||||
def validate_tools(cls, tools: list[Any]) -> list[BaseTool]:
|
||||
"""Validate and process the tools provided to the agent.
|
||||
|
||||
This method ensures that each tool is either an instance of BaseTool
|
||||
@@ -221,7 +221,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
||||
def _deny_user_set_id(cls, v: UUID4 | None) -> None:
|
||||
if v:
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
@@ -252,8 +252,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
context: str | None = None,
|
||||
tools: list[BaseTool] | None = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@@ -262,9 +262,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
|
||||
def get_delegation_tools(self, agents: list["BaseAgent"]) -> list[BaseTool]:
|
||||
"""Set the task tools that init BaseAgenTools class."""
|
||||
pass
|
||||
|
||||
def copy(self: T) -> T: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel"
|
||||
"""Create a deep copy of the Agent."""
|
||||
@@ -309,7 +308,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
copied_agent = type(self)(
|
||||
return type(self)(
|
||||
**copied_data,
|
||||
llm=existing_llm,
|
||||
tools=self.tools,
|
||||
@@ -318,9 +317,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
knowledge_storage=copied_knowledge_storage,
|
||||
)
|
||||
|
||||
return copied_agent
|
||||
|
||||
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
def interpolate_inputs(self, inputs: dict[str, Any]) -> None:
|
||||
"""Interpolate inputs into the agent description and backstory."""
|
||||
if self._original_role is None:
|
||||
self._original_role = self.role
|
||||
@@ -362,5 +359,5 @@ class BaseAgent(ABC, BaseModel):
|
||||
self._rpm_controller = rpm_controller
|
||||
self.create_agent_executor()
|
||||
|
||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||
def set_knowledge(self, crew_embedder: dict[str, Any] | None = None):
|
||||
pass
|
||||
@@ -1,13 +1,13 @@
|
||||
import time
|
||||
from typing import TYPE_CHECKING, Dict, List
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.events.event_listener import event_listener
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.events.event_listener import event_listener
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
@@ -21,7 +21,7 @@ class CrewAgentExecutorMixin:
|
||||
task: "Task"
|
||||
iterations: int
|
||||
max_iter: int
|
||||
messages: List[Dict[str, str]]
|
||||
messages: list[dict[str, str]]
|
||||
_i18n: I18N
|
||||
_printer: Printer = Printer()
|
||||
|
||||
@@ -46,7 +46,6 @@ class CrewAgentExecutorMixin:
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Failed to add to short term memory: {e}")
|
||||
pass
|
||||
|
||||
def _create_external_memory(self, output) -> None:
|
||||
"""Create and save a external-term memory item if conditions are met."""
|
||||
@@ -67,7 +66,6 @@ class CrewAgentExecutorMixin:
|
||||
)
|
||||
except Exception as e:
|
||||
print(f"Failed to add to external memory: {e}")
|
||||
pass
|
||||
|
||||
def _create_long_term_memory(self, output) -> None:
|
||||
"""Create and save long-term and entity memory items based on evaluation."""
|
||||
@@ -113,10 +111,8 @@ class CrewAgentExecutorMixin:
|
||||
self.crew._entity_memory.save(entity_memories)
|
||||
except AttributeError as e:
|
||||
print(f"Missing attributes for long term memory: {e}")
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"Failed to add to long term memory: {e}")
|
||||
pass
|
||||
elif (
|
||||
self.crew
|
||||
and self.crew._long_term_memory
|
||||
@@ -251,9 +251,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
i18n=self._i18n,
|
||||
)
|
||||
continue
|
||||
else:
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
handle_unknown_error(self._printer, e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
|
||||
@@ -324,9 +323,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.agent,
|
||||
AgentLogsStartedEvent(
|
||||
agent_role=self.agent.role,
|
||||
task_description=(
|
||||
getattr(self.task, "description") if self.task else "Not Found"
|
||||
),
|
||||
task_description=(self.task.description if self.task else "Not Found"),
|
||||
verbose=self.agent.verbose
|
||||
or (hasattr(self, "crew") and getattr(self.crew, "verbose", False)),
|
||||
),
|
||||
@@ -415,8 +412,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
"""
|
||||
prompt = prompt.replace("{input}", inputs["input"])
|
||||
prompt = prompt.replace("{tool_names}", inputs["tool_names"])
|
||||
prompt = prompt.replace("{tools}", inputs["tools"])
|
||||
return prompt
|
||||
return prompt.replace("{tools}", inputs["tools"])
|
||||
|
||||
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
||||
"""Process human feedback.
|
||||
@@ -10,9 +10,9 @@ from dataclasses import dataclass
|
||||
from json_repair import repair_json
|
||||
|
||||
from crewai.agents.constants import (
|
||||
ACTION_INPUT_ONLY_REGEX,
|
||||
ACTION_INPUT_REGEX,
|
||||
ACTION_REGEX,
|
||||
ACTION_INPUT_ONLY_REGEX,
|
||||
FINAL_ANSWER_ACTION,
|
||||
MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE,
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
@@ -104,7 +104,7 @@ def parse(text: str) -> AgentAction | AgentFinish:
|
||||
final_answer = final_answer[:-3].rstrip()
|
||||
return AgentFinish(thought=thought, output=final_answer, text=text)
|
||||
|
||||
elif action_match:
|
||||
if action_match:
|
||||
action = action_match.group(1)
|
||||
clean_action = _clean_action(action)
|
||||
|
||||
@@ -121,16 +121,15 @@ def parse(text: str) -> AgentAction | AgentFinish:
|
||||
raise OutputParserException(
|
||||
f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{_I18N.slice('final_answer_format')}",
|
||||
)
|
||||
elif not ACTION_INPUT_ONLY_REGEX.search(text):
|
||||
if not ACTION_INPUT_ONLY_REGEX.search(text):
|
||||
raise OutputParserException(
|
||||
MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE,
|
||||
)
|
||||
else:
|
||||
err_format = _I18N.slice("format_without_tools")
|
||||
error = f"{err_format}"
|
||||
raise OutputParserException(
|
||||
error,
|
||||
)
|
||||
err_format = _I18N.slice("format_without_tools")
|
||||
error = f"{err_format}"
|
||||
raise OutputParserException(
|
||||
error,
|
||||
)
|
||||
|
||||
|
||||
def _extract_thought(text: str) -> str:
|
||||
@@ -149,8 +148,7 @@ def _extract_thought(text: str) -> str:
|
||||
return ""
|
||||
thought = text[:thought_index].strip()
|
||||
# Remove any triple backticks from the thought string
|
||||
thought = thought.replace("```", "").strip()
|
||||
return thought
|
||||
return thought.replace("```", "").strip()
|
||||
|
||||
|
||||
def _clean_action(text: str) -> str:
|
||||
@@ -1,8 +1,8 @@
|
||||
"""Tools handler for managing tool execution and caching."""
|
||||
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.tools.cache_tools.cache_tools import CacheTools
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
|
||||
|
||||
class ToolsHandler:
|
||||
@@ -1,5 +1,6 @@
|
||||
from crewai.cli.authentication.providers.base_provider import BaseProvider
|
||||
|
||||
|
||||
class Auth0Provider(BaseProvider):
|
||||
def get_authorize_url(self) -> str:
|
||||
return f"https://{self._get_domain()}/oauth/device/code"
|
||||
@@ -1,30 +1,26 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from crewai.cli.authentication.main import Oauth2Settings
|
||||
|
||||
|
||||
class BaseProvider(ABC):
|
||||
def __init__(self, settings: Oauth2Settings):
|
||||
self.settings = settings
|
||||
|
||||
@abstractmethod
|
||||
def get_authorize_url(self) -> str:
|
||||
...
|
||||
def get_authorize_url(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_token_url(self) -> str:
|
||||
...
|
||||
def get_token_url(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_jwks_url(self) -> str:
|
||||
...
|
||||
def get_jwks_url(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_issuer(self) -> str:
|
||||
...
|
||||
def get_issuer(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_audience(self) -> str:
|
||||
...
|
||||
def get_audience(self) -> str: ...
|
||||
|
||||
@abstractmethod
|
||||
def get_client_id(self) -> str:
|
||||
...
|
||||
def get_client_id(self) -> str: ...
|
||||
@@ -1,5 +1,6 @@
|
||||
from crewai.cli.authentication.providers.base_provider import BaseProvider
|
||||
|
||||
|
||||
class OktaProvider(BaseProvider):
|
||||
def get_authorize_url(self) -> str:
|
||||
return f"https://{self.settings.domain}/oauth2/default/v1/device/authorize"
|
||||
@@ -1,5 +1,6 @@
|
||||
from crewai.cli.authentication.providers.base_provider import BaseProvider
|
||||
|
||||
|
||||
class WorkosProvider(BaseProvider):
|
||||
def get_authorize_url(self) -> str:
|
||||
return f"https://{self._get_domain()}/oauth2/device_authorization"
|
||||
@@ -17,9 +18,11 @@ class WorkosProvider(BaseProvider):
|
||||
return self.settings.audience or ""
|
||||
|
||||
def get_client_id(self) -> str:
|
||||
assert self.settings.client_id is not None, "Client ID is required"
|
||||
if self.settings.client_id is None:
|
||||
raise RuntimeError("Client ID is required")
|
||||
return self.settings.client_id
|
||||
|
||||
def _get_domain(self) -> str:
|
||||
assert self.settings.domain is not None, "Domain is required"
|
||||
if self.settings.domain is None:
|
||||
raise RuntimeError("Domain is required")
|
||||
return self.settings.domain
|
||||
@@ -17,8 +17,6 @@ def validate_jwt_token(
|
||||
missing required claims).
|
||||
"""
|
||||
|
||||
decoded_token = None
|
||||
|
||||
try:
|
||||
jwk_client = PyJWKClient(jwks_url)
|
||||
signing_key = jwk_client.get_signing_key_from_jwt(jwt_token)
|
||||
@@ -26,7 +24,7 @@ def validate_jwt_token(
|
||||
_unverified_decoded_token = jwt.decode(
|
||||
jwt_token, options={"verify_signature": False}
|
||||
)
|
||||
decoded_token = jwt.decode(
|
||||
return jwt.decode(
|
||||
jwt_token,
|
||||
signing_key.key,
|
||||
algorithms=["RS256"],
|
||||
@@ -40,7 +38,6 @@ def validate_jwt_token(
|
||||
"require": ["exp", "iat", "iss", "aud", "sub"],
|
||||
},
|
||||
)
|
||||
return decoded_token
|
||||
|
||||
except jwt.ExpiredSignatureError:
|
||||
raise Exception("Token has expired.")
|
||||
@@ -55,8 +52,8 @@ def validate_jwt_token(
|
||||
f"Invalid token issuer. Got: '{actual_issuer}'. Expected: '{issuer}'"
|
||||
)
|
||||
except jwt.MissingRequiredClaimError as e:
|
||||
raise Exception(f"Token is missing required claims: {str(e)}")
|
||||
raise Exception(f"Token is missing required claims: {e!s}")
|
||||
except jwt.exceptions.PyJWKClientError as e:
|
||||
raise Exception(f"JWKS or key processing error: {str(e)}")
|
||||
raise Exception(f"JWKS or key processing error: {e!s}")
|
||||
except jwt.InvalidTokenError as e:
|
||||
raise Exception(f"Invalid token: {str(e)}")
|
||||
raise Exception(f"Invalid token: {e!s}")
|
||||
@@ -1,13 +1,13 @@
|
||||
from importlib.metadata import version as get_version
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.settings.main import SettingsCommand
|
||||
|
||||
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.create_crew import create_crew
|
||||
from crewai.cli.create_flow import create_flow
|
||||
from crewai.cli.crew_chat import run_chat
|
||||
from crewai.cli.settings.main import SettingsCommand
|
||||
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||
KickoffTaskOutputsSQLiteStorage,
|
||||
)
|
||||
@@ -237,13 +237,11 @@ def login():
|
||||
@crewai.group()
|
||||
def deploy():
|
||||
"""Deploy the Crew CLI group."""
|
||||
pass
|
||||
|
||||
|
||||
@crewai.group()
|
||||
def tool():
|
||||
"""Tool Repository related commands."""
|
||||
pass
|
||||
|
||||
|
||||
@deploy.command(name="create")
|
||||
@@ -263,7 +261,7 @@ def deploy_list():
|
||||
|
||||
@deploy.command(name="push")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deploy_push(uuid: Optional[str]):
|
||||
def deploy_push(uuid: str | None):
|
||||
"""Deploy the Crew."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.deploy(uuid=uuid)
|
||||
@@ -271,7 +269,7 @@ def deploy_push(uuid: Optional[str]):
|
||||
|
||||
@deploy.command(name="status")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deply_status(uuid: Optional[str]):
|
||||
def deply_status(uuid: str | None):
|
||||
"""Get the status of a deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.get_crew_status(uuid=uuid)
|
||||
@@ -279,7 +277,7 @@ def deply_status(uuid: Optional[str]):
|
||||
|
||||
@deploy.command(name="logs")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deploy_logs(uuid: Optional[str]):
|
||||
def deploy_logs(uuid: str | None):
|
||||
"""Get the logs of a deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.get_crew_logs(uuid=uuid)
|
||||
@@ -287,7 +285,7 @@ def deploy_logs(uuid: Optional[str]):
|
||||
|
||||
@deploy.command(name="remove")
|
||||
@click.option("-u", "--uuid", type=str, help="Crew UUID parameter")
|
||||
def deploy_remove(uuid: Optional[str]):
|
||||
def deploy_remove(uuid: str | None):
|
||||
"""Remove a deployment."""
|
||||
deploy_cmd = DeployCommand()
|
||||
deploy_cmd.remove_crew(uuid=uuid)
|
||||
@@ -327,7 +325,6 @@ def tool_publish(is_public: bool, force: bool):
|
||||
@crewai.group()
|
||||
def flow():
|
||||
"""Flow related commands."""
|
||||
pass
|
||||
|
||||
|
||||
@flow.command(name="kickoff")
|
||||
@@ -359,7 +356,7 @@ def chat():
|
||||
and using the Chat LLM to generate responses.
|
||||
"""
|
||||
click.secho(
|
||||
"\nStarting a conversation with the Crew\n" "Type 'exit' or Ctrl+C to quit.\n",
|
||||
"\nStarting a conversation with the Crew\nType 'exit' or Ctrl+C to quit.\n",
|
||||
)
|
||||
|
||||
run_chat()
|
||||
@@ -368,7 +365,6 @@ def chat():
|
||||
@crewai.group(invoke_without_command=True)
|
||||
def org():
|
||||
"""Organization management commands."""
|
||||
pass
|
||||
|
||||
|
||||
@org.command("list")
|
||||
@@ -396,7 +392,6 @@ def current():
|
||||
@crewai.group()
|
||||
def enterprise():
|
||||
"""Enterprise Configuration commands."""
|
||||
pass
|
||||
|
||||
|
||||
@enterprise.command("configure")
|
||||
@@ -410,7 +405,6 @@ def enterprise_configure(enterprise_url: str):
|
||||
@crewai.group()
|
||||
def config():
|
||||
"""CLI Configuration commands."""
|
||||
pass
|
||||
|
||||
|
||||
@config.command("list")
|
||||
@@ -1,15 +1,14 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.cli.constants import (
|
||||
DEFAULT_CREWAI_ENTERPRISE_URL,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_PROVIDER,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_AUDIENCE,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_CLIENT_ID,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_DOMAIN,
|
||||
CREWAI_ENTERPRISE_DEFAULT_OAUTH2_PROVIDER,
|
||||
DEFAULT_CREWAI_ENTERPRISE_URL,
|
||||
)
|
||||
from crewai.cli.shared.token_manager import TokenManager
|
||||
|
||||
@@ -56,20 +55,20 @@ HIDDEN_SETTINGS_KEYS = [
|
||||
|
||||
|
||||
class Settings(BaseModel):
|
||||
enterprise_base_url: Optional[str] = Field(
|
||||
enterprise_base_url: str | None = Field(
|
||||
default=DEFAULT_CLI_SETTINGS["enterprise_base_url"],
|
||||
description="Base URL of the CrewAI Enterprise instance",
|
||||
)
|
||||
tool_repository_username: Optional[str] = Field(
|
||||
tool_repository_username: str | None = Field(
|
||||
None, description="Username for interacting with the Tool Repository"
|
||||
)
|
||||
tool_repository_password: Optional[str] = Field(
|
||||
tool_repository_password: str | None = Field(
|
||||
None, description="Password for interacting with the Tool Repository"
|
||||
)
|
||||
org_name: Optional[str] = Field(
|
||||
org_name: str | None = Field(
|
||||
None, description="Name of the currently active organization"
|
||||
)
|
||||
org_uuid: Optional[str] = Field(
|
||||
org_uuid: str | None = Field(
|
||||
None, description="UUID of the currently active organization"
|
||||
)
|
||||
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, frozen=True, exclude=True)
|
||||
@@ -79,7 +78,7 @@ class Settings(BaseModel):
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_provider"],
|
||||
)
|
||||
|
||||
oauth2_audience: Optional[str] = Field(
|
||||
oauth2_audience: str | None = Field(
|
||||
description="OAuth2 audience value, typically used to identify the target API or resource.",
|
||||
default=DEFAULT_CLI_SETTINGS["oauth2_audience"],
|
||||
)
|
||||
@@ -16,48 +16,72 @@ from crewai.cli.utils import copy_template, load_env_vars, write_env_file
|
||||
def create_folder_structure(name, parent_folder=None):
|
||||
import keyword
|
||||
import re
|
||||
|
||||
name = name.rstrip('/')
|
||||
|
||||
|
||||
name = name.rstrip("/")
|
||||
|
||||
if not name.strip():
|
||||
raise ValueError("Project name cannot be empty or contain only whitespace")
|
||||
|
||||
|
||||
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
||||
folder_name = re.sub(r'[^a-zA-Z0-9_]', '', folder_name)
|
||||
|
||||
folder_name = re.sub(r"[^a-zA-Z0-9_]", "", folder_name)
|
||||
|
||||
# Check if the name starts with invalid characters or is primarily invalid
|
||||
if re.match(r'^[^a-zA-Z0-9_-]+', name):
|
||||
raise ValueError(f"Project name '{name}' contains no valid characters for a Python module name")
|
||||
|
||||
if re.match(r"^[^a-zA-Z0-9_-]+", name):
|
||||
raise ValueError(
|
||||
f"Project name '{name}' contains no valid characters for a Python module name"
|
||||
)
|
||||
|
||||
if not folder_name:
|
||||
raise ValueError(f"Project name '{name}' contains no valid characters for a Python module name")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' contains no valid characters for a Python module name"
|
||||
)
|
||||
|
||||
if folder_name[0].isdigit():
|
||||
raise ValueError(f"Project name '{name}' would generate folder name '{folder_name}' which cannot start with a digit (invalid Python module name)")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate folder name '{folder_name}' which cannot start with a digit (invalid Python module name)"
|
||||
)
|
||||
|
||||
if keyword.iskeyword(folder_name):
|
||||
raise ValueError(f"Project name '{name}' would generate folder name '{folder_name}' which is a reserved Python keyword")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate folder name '{folder_name}' which is a reserved Python keyword"
|
||||
)
|
||||
|
||||
if not folder_name.isidentifier():
|
||||
raise ValueError(f"Project name '{name}' would generate invalid Python module name '{folder_name}'")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate invalid Python module name '{folder_name}'"
|
||||
)
|
||||
|
||||
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||
|
||||
class_name = re.sub(r'[^a-zA-Z0-9_]', '', class_name)
|
||||
|
||||
|
||||
class_name = re.sub(r"[^a-zA-Z0-9_]", "", class_name)
|
||||
|
||||
if not class_name:
|
||||
raise ValueError(f"Project name '{name}' contains no valid characters for a Python class name")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' contains no valid characters for a Python class name"
|
||||
)
|
||||
|
||||
if class_name[0].isdigit():
|
||||
raise ValueError(f"Project name '{name}' would generate class name '{class_name}' which cannot start with a digit")
|
||||
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate class name '{class_name}' which cannot start with a digit"
|
||||
)
|
||||
|
||||
# Check if the original name (before title casing) is a keyword
|
||||
original_name_clean = re.sub(r'[^a-zA-Z0-9_]', '', name.replace("_", "").replace("-", "").lower())
|
||||
if keyword.iskeyword(original_name_clean) or keyword.iskeyword(class_name) or class_name in ('True', 'False', 'None'):
|
||||
raise ValueError(f"Project name '{name}' would generate class name '{class_name}' which is a reserved Python keyword")
|
||||
|
||||
original_name_clean = re.sub(
|
||||
r"[^a-zA-Z0-9_]", "", name.replace("_", "").replace("-", "").lower()
|
||||
)
|
||||
if (
|
||||
keyword.iskeyword(original_name_clean)
|
||||
or keyword.iskeyword(class_name)
|
||||
or class_name in ("True", "False", "None")
|
||||
):
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate class name '{class_name}' which is a reserved Python keyword"
|
||||
)
|
||||
|
||||
if not class_name.isidentifier():
|
||||
raise ValueError(f"Project name '{name}' would generate invalid Python class name '{class_name}'")
|
||||
raise ValueError(
|
||||
f"Project name '{name}' would generate invalid Python class name '{class_name}'"
|
||||
)
|
||||
|
||||
if parent_folder:
|
||||
folder_path = Path(parent_folder) / folder_name
|
||||
@@ -172,7 +196,7 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||
)
|
||||
|
||||
# Check if the selected provider has predefined models
|
||||
if selected_provider in MODELS and MODELS[selected_provider]:
|
||||
if MODELS.get(selected_provider):
|
||||
while True:
|
||||
selected_model = select_model(selected_provider, provider_models)
|
||||
if selected_model is None: # User typed 'q'
|
||||
@@ -5,7 +5,7 @@ import sys
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
from typing import Any
|
||||
|
||||
import click
|
||||
import tomli
|
||||
@@ -116,7 +116,7 @@ def show_loading(event: threading.Event):
|
||||
print()
|
||||
|
||||
|
||||
def initialize_chat_llm(crew: Crew) -> Optional[LLM | BaseLLM]:
|
||||
def initialize_chat_llm(crew: Crew) -> LLM | BaseLLM | None:
|
||||
"""Initializes the chat LLM and handles exceptions."""
|
||||
try:
|
||||
return create_llm(crew.chat_llm)
|
||||
@@ -157,7 +157,7 @@ def build_system_message(crew_chat_inputs: ChatInputs) -> str:
|
||||
)
|
||||
|
||||
|
||||
def create_tool_function(crew: Crew, messages: List[Dict[str, str]]) -> Any:
|
||||
def create_tool_function(crew: Crew, messages: list[dict[str, str]]) -> Any:
|
||||
"""Creates a wrapper function for running the crew tool with messages."""
|
||||
|
||||
def run_crew_tool_with_messages(**kwargs):
|
||||
@@ -221,9 +221,9 @@ def get_user_input() -> str:
|
||||
def handle_user_input(
|
||||
user_input: str,
|
||||
chat_llm: LLM,
|
||||
messages: List[Dict[str, str]],
|
||||
crew_tool_schema: Dict[str, Any],
|
||||
available_functions: Dict[str, Any],
|
||||
messages: list[dict[str, str]],
|
||||
crew_tool_schema: dict[str, Any],
|
||||
available_functions: dict[str, Any],
|
||||
) -> None:
|
||||
if user_input.strip().lower() == "exit":
|
||||
click.echo("Exiting chat. Goodbye!")
|
||||
@@ -281,7 +281,7 @@ def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
|
||||
}
|
||||
|
||||
|
||||
def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||
def run_crew_tool(crew: Crew, messages: list[dict[str, str]], **kwargs):
|
||||
"""
|
||||
Runs the crew using crew.kickoff(inputs=kwargs) and returns the output.
|
||||
|
||||
@@ -304,9 +304,8 @@ def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||
crew_output = crew.kickoff(inputs=kwargs)
|
||||
|
||||
# Convert CrewOutput to a string to send back to the user
|
||||
result = str(crew_output)
|
||||
return str(crew_output)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
# Exit the chat and show the error message
|
||||
click.secho("An error occurred while running the crew:", fg="red")
|
||||
@@ -314,7 +313,7 @@ def run_crew_tool(crew: Crew, messages: List[Dict[str, str]], **kwargs):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def load_crew_and_name() -> Tuple[Crew, str]:
|
||||
def load_crew_and_name() -> tuple[Crew, str]:
|
||||
"""
|
||||
Loads the crew by importing the crew class from the user's project.
|
||||
|
||||
@@ -395,7 +394,7 @@ def generate_crew_chat_inputs(crew: Crew, crew_name: str, chat_llm) -> ChatInput
|
||||
)
|
||||
|
||||
|
||||
def fetch_required_inputs(crew: Crew) -> Set[str]:
|
||||
def fetch_required_inputs(crew: Crew) -> set[str]:
|
||||
"""
|
||||
Extracts placeholders from the crew's tasks and agents.
|
||||
|
||||
@@ -406,7 +405,7 @@ def fetch_required_inputs(crew: Crew) -> Set[str]:
|
||||
Set[str]: A set of placeholder names.
|
||||
"""
|
||||
placeholder_pattern = re.compile(r"\{(.+?)\}")
|
||||
required_inputs: Set[str] = set()
|
||||
required_inputs: set[str] = set()
|
||||
|
||||
# Scan tasks
|
||||
for task in crew.tasks:
|
||||
@@ -479,9 +478,7 @@ def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) ->
|
||||
f"{context}"
|
||||
)
|
||||
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
|
||||
description = response.strip()
|
||||
|
||||
return description
|
||||
return response.strip()
|
||||
|
||||
|
||||
def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
|
||||
@@ -531,6 +528,4 @@ def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
|
||||
f"{context}"
|
||||
)
|
||||
response = chat_llm.call(messages=[{"role": "user", "content": prompt}])
|
||||
crew_description = response.strip()
|
||||
|
||||
return crew_description
|
||||
return response.strip()
|
||||
@@ -64,8 +64,7 @@ class Repository:
|
||||
"""Return True if the Git repository is fully synced with the remote, False otherwise."""
|
||||
if self.has_uncommitted_changes() or self.is_ahead_or_behind():
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return True
|
||||
|
||||
def origin_url(self) -> str | None:
|
||||
"""Get the Git repository's remote URL."""
|
||||
@@ -12,7 +12,7 @@ def install_crew(proxy_options: list[str]) -> None:
|
||||
Install the crew by running the UV command to lock and install.
|
||||
"""
|
||||
try:
|
||||
command = ["uv", "sync"] + proxy_options
|
||||
command = ["uv", "sync", *proxy_options]
|
||||
subprocess.run(command, check=True, capture_output=False, text=True)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
@@ -1,11 +1,10 @@
|
||||
from typing import List, Optional
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import requests
|
||||
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.cli.constants import DEFAULT_CREWAI_ENTERPRISE_URL
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
class PlusAPI:
|
||||
@@ -56,9 +55,9 @@ class PlusAPI:
|
||||
handle: str,
|
||||
is_public: bool,
|
||||
version: str,
|
||||
description: Optional[str],
|
||||
description: str | None,
|
||||
encoded_file: str,
|
||||
available_exports: Optional[List[str]] = None,
|
||||
available_exports: list[str] | None = None,
|
||||
):
|
||||
params = {
|
||||
"handle": handle,
|
||||
@@ -1,10 +1,10 @@
|
||||
import os
|
||||
import certifi
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
|
||||
import certifi
|
||||
import click
|
||||
import requests
|
||||
|
||||
@@ -25,7 +25,7 @@ def select_choice(prompt_message, choices):
|
||||
|
||||
provider_models = get_provider_data()
|
||||
if not provider_models:
|
||||
return
|
||||
return None
|
||||
click.secho(prompt_message, fg="cyan")
|
||||
for idx, choice in enumerate(choices, start=1):
|
||||
click.secho(f"{idx}. {choice}", fg="cyan")
|
||||
@@ -67,7 +67,7 @@ def select_provider(provider_models):
|
||||
all_providers = sorted(set(predefined_providers + list(provider_models.keys())))
|
||||
|
||||
provider = select_choice(
|
||||
"Select a provider to set up:", predefined_providers + ["other"]
|
||||
"Select a provider to set up:", [*predefined_providers, "other"]
|
||||
)
|
||||
if provider is None: # User typed 'q'
|
||||
return None
|
||||
@@ -102,10 +102,9 @@ def select_model(provider, provider_models):
|
||||
click.secho(f"No models available for provider '{provider}'.", fg="red")
|
||||
return None
|
||||
|
||||
selected_model = select_choice(
|
||||
return select_choice(
|
||||
f"Select a model to use for {provider.capitalize()}:", available_models
|
||||
)
|
||||
return selected_model
|
||||
|
||||
|
||||
def load_provider_data(cache_file, cache_expiry):
|
||||
@@ -165,7 +164,7 @@ def fetch_provider_data(cache_file):
|
||||
Returns:
|
||||
- dict or None: The fetched provider data or None if the operation fails.
|
||||
"""
|
||||
ssl_config = os.environ['SSL_CERT_FILE'] = certifi.where()
|
||||
ssl_config = os.environ["SSL_CERT_FILE"] = certifi.where()
|
||||
|
||||
try:
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60, verify=ssl_config)
|
||||
@@ -1,6 +1,5 @@
|
||||
import subprocess
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
import click
|
||||
from packaging import version
|
||||
@@ -3,7 +3,7 @@ import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from cryptography.fernet import Fernet
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ class TokenManager:
|
||||
encrypted_data = self.fernet.encrypt(json.dumps(data).encode())
|
||||
self.save_secure_file(self.file_path, encrypted_data)
|
||||
|
||||
def get_token(self) -> Optional[str]:
|
||||
def get_token(self) -> str | None:
|
||||
"""
|
||||
Get the access token if it is valid and not expired.
|
||||
|
||||
@@ -113,7 +113,7 @@ class TokenManager:
|
||||
# Set appropriate permissions (read/write for owner only)
|
||||
os.chmod(file_path, 0o600)
|
||||
|
||||
def read_secure_file(self, filename: str) -> Optional[bytes]:
|
||||
def read_secure_file(self, filename: str) -> bytes | None:
|
||||
"""
|
||||
Read the content of a secure file.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user