mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 07:38:29 +00:00
Compare commits
19 Commits
brandon/cr
...
feat/add-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4b8df71ca1 | ||
|
|
62f5b2fb2e | ||
|
|
cc187a23d7 | ||
|
|
540e328f06 | ||
|
|
87d4c5f092 | ||
|
|
45c16cfa6b | ||
|
|
6583f31459 | ||
|
|
9232ac3e3f | ||
|
|
aa8640c086 | ||
|
|
217f5fc5ac | ||
|
|
297dc93fb4 | ||
|
|
86c6760f58 | ||
|
|
498e96a419 | ||
|
|
c0c59dc932 | ||
|
|
f3b3d321e5 | ||
|
|
67e4433dc2 | ||
|
|
4a7ae8df71 | ||
|
|
09f92122d5 | ||
|
|
8118b7b7d6 |
35
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
35
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve CrewAI
|
||||
title: "[BUG]"
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
**Description**
|
||||
Provide a clear and concise description of what the bug is.
|
||||
|
||||
**Steps to Reproduce**
|
||||
Provide a step-by-step process to reproduce the behavior:
|
||||
|
||||
**Expected behavior**
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
**Screenshots/Code snippets**
|
||||
If applicable, add screenshots or code snippets to help explain your problem.
|
||||
|
||||
**Environment Details:**
|
||||
- **Operating System**: [e.g., Ubuntu 20.04, macOS Catalina, Windows 10]
|
||||
- **Python Version**: [e.g., 3.8, 3.9, 3.10]
|
||||
- **crewAI Version**: [e.g., 0.30.11]
|
||||
- **crewAI Tools Version**: [e.g., 0.2.6]
|
||||
|
||||
**Logs**
|
||||
Include relevant logs or error messages if applicable.
|
||||
|
||||
**Possible Solution**
|
||||
Have a solution in mind? Please suggest it here, or write "None".
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the problem here.
|
||||
24
.github/ISSUE_TEMPLATE/custom.md
vendored
Normal file
24
.github/ISSUE_TEMPLATE/custom.md
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
---
|
||||
name: Custom issue template
|
||||
about: Describe this issue template's purpose here.
|
||||
title: "[DOCS]"
|
||||
labels: documentation
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
## Documentation Page
|
||||
<!-- Provide a link to the documentation page that needs improvement -->
|
||||
|
||||
## Description
|
||||
<!-- Describe what needs to be changed or improved in the documentation -->
|
||||
|
||||
## Suggested Changes
|
||||
<!-- If possible, provide specific suggestions for how to improve the documentation -->
|
||||
|
||||
## Additional Context
|
||||
<!-- Add any other context about the documentation issue here -->
|
||||
|
||||
## Checklist
|
||||
- [ ] I have searched the existing issues to make sure this is not a duplicate
|
||||
- [ ] I have checked the latest version of the documentation to ensure this hasn't been addressed
|
||||
26
.github/workflows/stale.yml
vendored
Normal file
26
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,26 @@
|
||||
name: Mark stale issues and pull requests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '10 12 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-label: 'no-issue-activity'
|
||||
stale-issue-message: 'This issue is stale because it has been open for 30 days with no activity. Remove stale label or comment or this will be closed in 5 days.'
|
||||
close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.'
|
||||
days-before-issue-stale: 30
|
||||
days-before-issue-close: 5
|
||||
stale-pr-label: 'no-pr-activity'
|
||||
stale-pr-message: 'This PR is stale because it has been open for 45 days with no activity.'
|
||||
days-before-pr-stale: 45
|
||||
days-before-pr-close: -1
|
||||
@@ -126,7 +126,7 @@ task2 = Task(
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2],
|
||||
verbose=2, # You can set it to 1 or 2 to different logging levels
|
||||
verbose=True,
|
||||
process = Process.sequential
|
||||
)
|
||||
|
||||
|
||||
@@ -134,7 +134,7 @@ Once a crew has been executed, its output can be accessed through the `output` a
|
||||
crew = Crew(
|
||||
agents=[research_agent, writer_agent],
|
||||
tasks=[research_task, write_article_task],
|
||||
verbose=2
|
||||
verbose=True
|
||||
)
|
||||
|
||||
crew_output = crew.kickoff()
|
||||
|
||||
@@ -224,8 +224,8 @@ urgent_crew = Crew(agents=[urgent_handler], tasks=[urgent_task])
|
||||
normal_crew = Crew(agents=[normal_handler], tasks=[normal_task])
|
||||
|
||||
# Create pipelines for different urgency levels
|
||||
urgent_pipeline = Pipeline(stages=[classification_crew, urgent_crew])
|
||||
normal_pipeline = Pipeline(stages=[classification_crew, normal_crew])
|
||||
urgent_pipeline = Pipeline(stages=[urgent_crew])
|
||||
normal_pipeline = Pipeline(stages=[normal_crew])
|
||||
|
||||
# Create a router
|
||||
email_router = Router(
|
||||
@@ -243,7 +243,7 @@ email_router = Router(
|
||||
)
|
||||
|
||||
# Use the router in a main pipeline
|
||||
main_pipeline = Pipeline(stages=[email_router])
|
||||
main_pipeline = Pipeline(stages=[classification_crew, email_router])
|
||||
|
||||
inputs = [{"email": "..."}, {"email": "..."}] # List of email data
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ task = Task(
|
||||
crew = Crew(
|
||||
agents=[research_agent],
|
||||
tasks=[task],
|
||||
verbose=2
|
||||
verbose=True
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
@@ -142,7 +142,7 @@ task = Task(
|
||||
crew = Crew(
|
||||
agents=[research_agent],
|
||||
tasks=[task],
|
||||
verbose=2
|
||||
verbose=True
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
@@ -264,7 +264,7 @@ task1 = Task(
|
||||
crew = Crew(
|
||||
agents=[research_agent],
|
||||
tasks=[task1, task2, task3],
|
||||
verbose=2
|
||||
verbose=True
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
@@ -84,7 +84,7 @@ write = Task(
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research, write],
|
||||
verbose=2
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Execute tasks
|
||||
|
||||
@@ -20,7 +20,7 @@ Before getting started with CrewAI, make sure that you have installed it via pip
|
||||
$ pip install crewai crewai-tools
|
||||
```
|
||||
|
||||
### Virtual Environemnts
|
||||
### Virtual Environments
|
||||
It is highly recommended that you use virtual environments to ensure that your CrewAI project is isolated from other projects and dependencies. Virtual environments provide a clean, separate workspace for each project, preventing conflicts between different versions of packages and libraries. This isolation is crucial for maintaining consistency and reproducibility in your development process. You have multiple options for setting up virtual environments depending on your operating system and Python version:
|
||||
|
||||
1. Use venv (Python's built-in virtual environment tool):
|
||||
@@ -244,6 +244,10 @@ def run():
|
||||
|
||||
To run your project, use the following command:
|
||||
|
||||
```shell
|
||||
$ crewai run
|
||||
```
|
||||
or
|
||||
```shell
|
||||
$ poetry run my_project
|
||||
```
|
||||
|
||||
@@ -79,7 +79,7 @@ task3 = Task(
|
||||
crew = Crew(
|
||||
agents=[data_fetcher_agent, data_processor_agent, summary_generator_agent],
|
||||
tasks=[task1, conditional_task, task3],
|
||||
verbose=2,
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
|
||||
@@ -7,6 +7,7 @@ description: Comprehensive guide on crafting, using, and managing custom tools w
|
||||
This guide provides detailed instructions on creating custom tools for the crewAI framework and how to efficiently manage and utilize these tools, incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools, enabling agents to perform a wide range of actions.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before creating your own tools, ensure you have the crewAI extra tools package installed:
|
||||
|
||||
```bash
|
||||
@@ -31,7 +32,7 @@ class MyCustomTool(BaseTool):
|
||||
|
||||
### Using the `tool` Decorator
|
||||
|
||||
Alternatively, use the `tool` decorator for a direct approach to create tools. This requires specifying attributes and the tool's logic within a function.
|
||||
Alternatively, you can use the tool decorator `@tool`. This approach allows you to define the tool's attributes and functionality directly within a function, offering a concise and efficient way to create specialized tools tailored to your needs.
|
||||
|
||||
```python
|
||||
from crewai_tools import tool
|
||||
|
||||
@@ -16,7 +16,7 @@ Here's an example of how to force the tool output as the result of an agent's ta
|
||||
# Define a custom tool that returns the result as the answer
|
||||
coding_agent =Agent(
|
||||
role="Data Scientist",
|
||||
goal="Product amazing resports on AI",
|
||||
goal="Product amazing reports on AI",
|
||||
backstory="You work with data and AI",
|
||||
tools=[MyCustomTool(result_as_answer=True)],
|
||||
)
|
||||
|
||||
@@ -81,7 +81,7 @@ task2 = Task(
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task1, task2],
|
||||
verbose=2,
|
||||
verbose=True,
|
||||
memory=True,
|
||||
)
|
||||
|
||||
|
||||
@@ -6,33 +6,25 @@ description: Comprehensive guide on integrating CrewAI with various Large Langua
|
||||
## Connect CrewAI to LLMs
|
||||
|
||||
!!! note "Default LLM"
|
||||
By default, CrewAI uses OpenAI's GPT-4 model (specifically, the model specified by the OPENAI_MODEL_NAME environment variable, defaulting to "gpt-4o") for language processing. You can configure your agents to use a different model or API as described in this guide.
|
||||
By default, CrewAI uses OpenAI's GPT-4o model (specifically, the model specified by the OPENAI_MODEL_NAME environment variable, defaulting to "gpt-4o") for language processing. You can configure your agents to use a different model or API as described in this guide.
|
||||
By default, CrewAI uses OpenAI's GPT-4 model (specifically, the model specified by the OPENAI_MODEL_NAME environment variable, defaulting to "gpt-4") for language processing. You can configure your agents to use a different model or API as described in this guide.
|
||||
|
||||
CrewAI offers flexibility in connecting to various LLMs, including local models via [Ollama](https://ollama.ai) and different APIs like Azure. It's compatible with all [LangChain LLM](https://python.langchain.com/docs/integrations/llms/) components, enabling diverse integrations for tailored AI solutions.
|
||||
CrewAI provides extensive versatility in integrating with various Language Models (LLMs), including local options through Ollama such as Llama and Mixtral to cloud-based solutions like Azure. Its compatibility extends to all [LangChain LLM components](https://python.langchain.com/v0.2/docs/integrations/llms/), offering a wide range of integration possibilities for customized AI applications.
|
||||
|
||||
## CrewAI Agent Overview
|
||||
The platform supports connections to an array of Generative AI models, including:
|
||||
|
||||
The `Agent` class is the cornerstone for implementing AI solutions in CrewAI. Here's a comprehensive overview of the Agent class attributes and methods:
|
||||
- OpenAI's suite of advanced language models
|
||||
- Anthropic's cutting-edge AI offerings
|
||||
- Ollama's diverse range of locally-hosted generative model & embeddings
|
||||
- LM Studio's diverse range of locally hosted generative models & embeddings
|
||||
- Groq's Super Fast LLM offerings
|
||||
- Azures' generative AI offerings
|
||||
- HuggingFace's generative AI offerings
|
||||
|
||||
- **Attributes**:
|
||||
- `role`: Defines the agent's role within the solution.
|
||||
- `goal`: Specifies the agent's objective.
|
||||
- `backstory`: Provides a background story to the agent.
|
||||
- `cache` *Optional*: Determines whether the agent should use a cache for tool usage. Default is `True`.
|
||||
- `max_rpm` *Optional*: Maximum number of requests per minute the agent's execution should respect. Optional.
|
||||
- `verbose` *Optional*: Enables detailed logging of the agent's execution. Default is `False`.
|
||||
- `allow_delegation` *Optional*: Allows the agent to delegate tasks to other agents, default is `True`.
|
||||
- `tools`: Specifies the tools available to the agent for task execution. Optional.
|
||||
- `max_iter` *Optional*: Maximum number of iterations for an agent to execute a task, default is 25.
|
||||
- `max_execution_time` *Optional*: Maximum execution time for an agent to execute a task. Optional.
|
||||
- `step_callback` *Optional*: Provides a callback function to be executed after each step. Optional.
|
||||
- `llm` *Optional*: Indicates the Large Language Model the agent uses. By default, it uses the GPT-4 model defined in the environment variable "OPENAI_MODEL_NAME".
|
||||
- `function_calling_llm` *Optional* : Will turn the ReAct CrewAI agent into a function-calling agent.
|
||||
- `callbacks` *Optional*: A list of callback functions from the LangChain library that are triggered during the agent's execution process.
|
||||
- `system_template` *Optional*: Optional string to define the system format for the agent.
|
||||
- `prompt_template` *Optional*: Optional string to define the prompt format for the agent.
|
||||
- `response_template` *Optional*: Optional string to define the response format for the agent.
|
||||
This broad spectrum of LLM options enables users to select the most suitable model for their specific needs, whether prioritizing local deployment, specialized capabilities, or cloud-based scalability.
|
||||
|
||||
## Changing the default LLM
|
||||
The default LLM is provided through the `langchain openai` package, which is installed by default when you install CrewAI. You can change this default LLM to a different model or API by setting the `OPENAI_MODEL_NAME` environment variable. This straightforward process allows you to harness the power of different OpenAI models, enhancing the flexibility and capabilities of your CrewAI implementation.
|
||||
```python
|
||||
# Required
|
||||
os.environ["OPENAI_MODEL_NAME"]="gpt-4-0125-preview"
|
||||
@@ -45,30 +37,27 @@ example_agent = Agent(
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
## Ollama Local Integration
|
||||
Ollama is preferred for local LLM integration, offering customization and privacy benefits. To integrate Ollama with CrewAI, you will need the `langchain-ollama` package. You can then set the following environment variables to connect to your Ollama instance running locally on port 11434.
|
||||
|
||||
## Ollama Integration
|
||||
Ollama is preferred for local LLM integration, offering customization and privacy benefits. To integrate Ollama with CrewAI, set the appropriate environment variables as shown below.
|
||||
|
||||
### Setting Up Ollama
|
||||
- **Environment Variables Configuration**: To integrate Ollama, set the following environment variables:
|
||||
```sh
|
||||
OPENAI_API_BASE='http://localhost:11434'
|
||||
OPENAI_MODEL_NAME='llama2' # Adjust based on available model
|
||||
OPENAI_API_KEY=''
|
||||
os.environ[OPENAI_API_BASE]='http://localhost:11434'
|
||||
os.environ[OPENAI_MODEL_NAME]='llama2' # Adjust based on available model
|
||||
os.environ[OPENAI_API_KEY]='' # No API Key required for Ollama
|
||||
```
|
||||
|
||||
## Ollama Integration (ex. for using Llama 2 locally)
|
||||
1. [Download Ollama](https://ollama.com/download).
|
||||
2. After setting up the Ollama, Pull the Llama2 by typing following lines into the terminal ```ollama pull llama2```.
|
||||
3. Enjoy your free Llama2 model that powered up by excellent agents from crewai.
|
||||
## Ollama Integration Step by Step (ex. for using Llama 3.1 8B locally)
|
||||
1. [Download and install Ollama](https://ollama.com/download).
|
||||
2. After setting up the Ollama, Pull the Llama3.1 8B model by typing following lines into your terminal ```ollama run llama3.1```.
|
||||
3. Llama3.1 should now be served locally on `http://localhost:11434`
|
||||
```
|
||||
from crewai import Agent, Task, Crew
|
||||
from langchain.llms import Ollama
|
||||
from langchain_ollama import ChatOllama
|
||||
import os
|
||||
os.environ["OPENAI_API_KEY"] = "NA"
|
||||
|
||||
llm = Ollama(
|
||||
model = "llama2",
|
||||
model = "llama3.1",
|
||||
base_url = "http://localhost:11434")
|
||||
|
||||
general_agent = Agent(role = "Math Professor",
|
||||
@@ -85,7 +74,7 @@ task = Task(description="""what is 3 + 5""",
|
||||
crew = Crew(
|
||||
agents=[general_agent],
|
||||
tasks=[task],
|
||||
verbose=2
|
||||
verbose=True
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
@@ -98,13 +87,14 @@ There are a couple of different ways you can use HuggingFace to host your LLM.
|
||||
|
||||
### Your own HuggingFace endpoint
|
||||
```python
|
||||
from langchain_community.llms import HuggingFaceEndpoint
|
||||
from langchain_huggingface import HuggingFaceEndpoint,
|
||||
|
||||
llm = HuggingFaceEndpoint(
|
||||
endpoint_url="<YOUR_ENDPOINT_URL_HERE>",
|
||||
huggingfacehub_api_token="<HF_TOKEN_HERE>",
|
||||
repo_id="microsoft/Phi-3-mini-4k-instruct",
|
||||
task="text-generation",
|
||||
max_new_tokens=512
|
||||
max_new_tokens=512,
|
||||
do_sample=False,
|
||||
repetition_penalty=1.03,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
@@ -115,66 +105,50 @@ agent = Agent(
|
||||
)
|
||||
```
|
||||
|
||||
### From HuggingFaceHub endpoint
|
||||
```python
|
||||
from langchain_community.llms import HuggingFaceHub
|
||||
|
||||
llm = HuggingFaceHub(
|
||||
repo_id="HuggingFaceH4/zephyr-7b-beta",
|
||||
huggingfacehub_api_token="<HF_TOKEN_HERE>",
|
||||
task="text-generation",
|
||||
)
|
||||
```
|
||||
|
||||
## OpenAI Compatible API Endpoints
|
||||
Switch between APIs and models seamlessly using environment variables, supporting platforms like FastChat, LM Studio, Groq, and Mistral AI.
|
||||
|
||||
### Configuration Examples
|
||||
#### FastChat
|
||||
```sh
|
||||
OPENAI_API_BASE="http://localhost:8001/v1"
|
||||
OPENAI_MODEL_NAME='oh-2.5m7b-q51'
|
||||
OPENAI_API_KEY=NA
|
||||
os.environ[OPENAI_API_BASE]="http://localhost:8001/v1"
|
||||
os.environ[OPENAI_MODEL_NAME]='oh-2.5m7b-q51'
|
||||
os.environ[OPENAI_API_KEY]=NA
|
||||
```
|
||||
|
||||
#### LM Studio
|
||||
Launch [LM Studio](https://lmstudio.ai) and go to the Server tab. Then select a model from the dropdown menu and wait for it to load. Once it's loaded, click the green Start Server button and use the URL, port, and API key that's shown (you can modify them). Below is an example of the default settings as of LM Studio 0.2.19:
|
||||
```sh
|
||||
OPENAI_API_BASE="http://localhost:1234/v1"
|
||||
OPENAI_API_KEY="lm-studio"
|
||||
os.environ[OPENAI_API_BASE]="http://localhost:1234/v1"
|
||||
os.environ[OPENAI_API_KEY]="lm-studio"
|
||||
```
|
||||
|
||||
#### Groq API
|
||||
```sh
|
||||
OPENAI_API_KEY=your-groq-api-key
|
||||
OPENAI_MODEL_NAME='llama3-8b-8192'
|
||||
OPENAI_API_BASE=https://api.groq.com/openai/v1
|
||||
os.environ[OPENAI_API_KEY]=your-groq-api-key
|
||||
os.environ[OPENAI_MODEL_NAME]='llama3-8b-8192'
|
||||
os.environ[OPENAI_API_BASE]=https://api.groq.com/openai/v1
|
||||
```
|
||||
|
||||
#### Mistral API
|
||||
```sh
|
||||
OPENAI_API_KEY=your-mistral-api-key
|
||||
OPENAI_API_BASE=https://api.mistral.ai/v1
|
||||
OPENAI_MODEL_NAME="mistral-small"
|
||||
os.environ[OPENAI_API_KEY]=your-mistral-api-key
|
||||
os.environ[OPENAI_API_BASE]=https://api.mistral.ai/v1
|
||||
os.environ[OPENAI_MODEL_NAME]="mistral-small"
|
||||
```
|
||||
|
||||
### Solar
|
||||
```python
|
||||
```sh
|
||||
from langchain_community.chat_models.solar import SolarChat
|
||||
# Initialize language model
|
||||
os.environ["SOLAR_API_KEY"] = "your-solar-api-key"
|
||||
llm = SolarChat(max_tokens=1024)
|
||||
```
|
||||
```sh
|
||||
os.environ[SOLAR_API_BASE]="https://api.upstage.ai/v1/solar"
|
||||
os.environ[SOLAR_API_KEY]="your-solar-api-key"
|
||||
```
|
||||
|
||||
# Free developer API key available here: https://console.upstage.ai/services/solar
|
||||
# Langchain Example: https://github.com/langchain-ai/langchain/pull/18556
|
||||
```
|
||||
|
||||
### text-gen-web-ui
|
||||
```sh
|
||||
OPENAI_API_BASE=http://localhost:5000/v1
|
||||
OPENAI_MODEL_NAME=NA
|
||||
OPENAI_API_KEY=NA
|
||||
```
|
||||
|
||||
### Cohere
|
||||
```python
|
||||
@@ -190,10 +164,11 @@ llm = ChatCohere()
|
||||
### Azure Open AI Configuration
|
||||
For Azure OpenAI API integration, set the following environment variables:
|
||||
```sh
|
||||
AZURE_OPENAI_VERSION="2022-12-01"
|
||||
AZURE_OPENAI_DEPLOYMENT=""
|
||||
AZURE_OPENAI_ENDPOINT=""
|
||||
AZURE_OPENAI_KEY=""
|
||||
|
||||
os.environ[AZURE_OPENAI_DEPLOYMENT] = "You deployment"
|
||||
os.environ["OPENAI_API_VERSION"] = "2023-12-01-preview"
|
||||
os.environ["AZURE_OPENAI_ENDPOINT"] = "Your Endpoint"
|
||||
os.environ["AZURE_OPENAI_API_KEY"] = "<Your API Key>"
|
||||
```
|
||||
|
||||
### Example Agent with Azure LLM
|
||||
@@ -216,6 +191,5 @@ azure_agent = Agent(
|
||||
llm=azure_llm
|
||||
)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
Integrating CrewAI with different LLMs expands the framework's versatility, allowing for customized, efficient AI solutions across various domains and platforms.
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
# CodeInterpreterTool
|
||||
|
||||
## Description
|
||||
This tool is used to give the Agent the ability to run code (Python3) from the code generated by the Agent itself. The code is executed in a sandboxed environment, so it is safe to run any code.
|
||||
This tool enables the Agent to execute Python 3 code that it has generated autonomously. The code is run in a secure, isolated environment, ensuring safety regardless of the content.
|
||||
|
||||
It is incredible useful since it allows the Agent to generate code, run it in the same environment, get the result and use it to make decisions.
|
||||
This functionality is particularly valuable as it allows the Agent to create code, execute it within the same ecosystem, obtain the results, and utilize that information to inform subsequent decisions and actions.
|
||||
|
||||
## Requirements
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
## Description
|
||||
|
||||
This tools is a wrapper around the composio toolset and gives your agent access to a wide variety of tools from the composio SDK.
|
||||
This tools is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the composio SDK.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -19,7 +19,7 @@ after the installation is complete, either run `composio login` or export your c
|
||||
|
||||
The following example demonstrates how to initialize the tool and execute a github action:
|
||||
|
||||
1. Initialize toolset
|
||||
1. Initialize Composio tools
|
||||
|
||||
```python
|
||||
from composio import App
|
||||
|
||||
@@ -40,10 +40,9 @@ The `SerperDevTool` comes with several parameters that will be passed to the API
|
||||
- **locale**: Optional. Specify the locale for the search results.
|
||||
- **n_results**: Number of search results to return. Default is `10`.
|
||||
|
||||
The values for `country`, `location`, `lovale` and `search_url` can be found on the [Serper Playground](https://serper.dev/playground).
|
||||
The values for `country`, `location`, `locale` and `search_url` can be found on the [Serper Playground](https://serper.dev/playground).
|
||||
|
||||
## Example with Parameters
|
||||
|
||||
Here is an example demonstrating how to use the tool with additional parameters:
|
||||
|
||||
```python
|
||||
|
||||
@@ -60,7 +60,7 @@ crewai = "crewai.cli.cli:crewai"
|
||||
[tool.mypy]
|
||||
ignore_missing_imports = true
|
||||
disable_error_code = 'import-untyped'
|
||||
exclude = ["cli/templates/main.py", "cli/templates/crew.py"]
|
||||
exclude = ["cli/templates"]
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.pipeline import Pipeline
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
|
||||
__all__ = ["Agent", "Crew", "Process", "Task"]
|
||||
__all__ = ["Agent", "Crew", "Process", "Task", "Pipeline"]
|
||||
|
||||
@@ -158,7 +158,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self):
|
||||
"""Set private attributes."""
|
||||
self._logger = Logger(self.verbose)
|
||||
self._logger = Logger(verbose=self.verbose)
|
||||
if self.max_rpm and not self._rpm_controller:
|
||||
self._rpm_controller = RPMController(
|
||||
max_rpm=self.max_rpm, logger=self._logger
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import threading
|
||||
import time
|
||||
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
|
||||
from typing import Any, Dict, Iterator, List, Literal, Optional, Tuple, Union
|
||||
import click
|
||||
|
||||
|
||||
from langchain.agents import AgentExecutor
|
||||
from langchain.agents.agent import ExceptionTool
|
||||
@@ -11,12 +13,21 @@ from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.input import get_color_mapping
|
||||
from pydantic import InstanceOf
|
||||
|
||||
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
||||
from langchain.chains.summarize import load_summarize_chain
|
||||
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
|
||||
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
@@ -40,6 +51,8 @@ class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
system_template: Optional[str] = None
|
||||
prompt_template: Optional[str] = None
|
||||
response_template: Optional[str] = None
|
||||
_logger: Logger = Logger()
|
||||
_fit_context_window_strategy: Optional[Literal["summarize"]] = "summarize"
|
||||
|
||||
def _call(
|
||||
self,
|
||||
@@ -131,7 +144,7 @@ class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
|
||||
|
||||
# Call the LLM to see what to do.
|
||||
output = self.agent.plan( # type: ignore # Incompatible types in assignment (expression has type "AgentAction | AgentFinish | list[AgentAction]", variable has type "AgentAction")
|
||||
output = self.agent.plan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
@@ -185,6 +198,27 @@ class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
yield AgentStep(action=output, observation=observation)
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
if LLMContextLengthExceededException(str(e))._is_context_limit_error(
|
||||
str(e)
|
||||
):
|
||||
output = self._handle_context_length_error(
|
||||
intermediate_steps, run_manager, inputs
|
||||
)
|
||||
|
||||
if isinstance(output, AgentFinish):
|
||||
yield output
|
||||
elif isinstance(output, list):
|
||||
for step in output:
|
||||
yield step
|
||||
return
|
||||
|
||||
yield AgentStep(
|
||||
action=AgentAction("_Exception", str(e), str(e)),
|
||||
observation=str(e),
|
||||
)
|
||||
return
|
||||
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
if self.should_ask_for_human_input:
|
||||
@@ -235,6 +269,7 @@ class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
agent=self.crew_agent,
|
||||
action=agent_action,
|
||||
)
|
||||
|
||||
tool_calling = tool_usage.parse(agent_action.log)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
@@ -280,3 +315,91 @@ class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).append(
|
||||
self.crew._train_iteration, agent_id, training_data
|
||||
)
|
||||
|
||||
def _handle_context_length(
|
||||
self, intermediate_steps: List[Tuple[AgentAction, str]]
|
||||
) -> List[Tuple[AgentAction, str]]:
|
||||
text = intermediate_steps[0][1]
|
||||
original_action = intermediate_steps[0][0]
|
||||
|
||||
text_splitter = RecursiveCharacterTextSplitter(
|
||||
separators=["\n\n", "\n"],
|
||||
chunk_size=8000,
|
||||
chunk_overlap=500,
|
||||
)
|
||||
|
||||
if self._fit_context_window_strategy == "summarize":
|
||||
docs = text_splitter.create_documents([text])
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Summarizing Content, it is recommended to use a RAG tool",
|
||||
color="bold_blue",
|
||||
)
|
||||
summarize_chain = load_summarize_chain(
|
||||
self.llm, chain_type="map_reduce", verbose=True
|
||||
)
|
||||
summarized_docs = []
|
||||
for doc in docs:
|
||||
summary = summarize_chain.invoke(
|
||||
{"input_documents": [doc]}, return_only_outputs=True
|
||||
)
|
||||
|
||||
summarized_docs.append(summary["output_text"])
|
||||
|
||||
formatted_results = "\n\n".join(summarized_docs)
|
||||
summary_step = AgentStep(
|
||||
action=AgentAction(
|
||||
tool=original_action.tool,
|
||||
tool_input=original_action.tool_input,
|
||||
log=original_action.log,
|
||||
),
|
||||
observation=formatted_results,
|
||||
)
|
||||
summary_tuple = (summary_step.action, summary_step.observation)
|
||||
return [summary_tuple]
|
||||
|
||||
return intermediate_steps
|
||||
|
||||
def _handle_context_length_error(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
run_manager: Optional[CallbackManagerForChainRun],
|
||||
inputs: Dict[str, str],
|
||||
) -> Union[AgentFinish, List[AgentStep]]:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Asking user if they want to use summarize prompt to fit, this will reduce context length.",
|
||||
color="yellow",
|
||||
)
|
||||
user_choice = click.confirm(
|
||||
"Context length exceeded. Do you want to summarize the text to fit models context window?"
|
||||
)
|
||||
if user_choice:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Using summarize prompt to fit, this will reduce context length.",
|
||||
color="bold_blue",
|
||||
)
|
||||
intermediate_steps = self._handle_context_length(intermediate_steps)
|
||||
|
||||
output = self.agent.plan(
|
||||
intermediate_steps,
|
||||
callbacks=run_manager.get_child() if run_manager else None,
|
||||
**inputs,
|
||||
)
|
||||
|
||||
if isinstance(output, AgentFinish):
|
||||
return output
|
||||
elif isinstance(output, AgentAction):
|
||||
return [AgentStep(action=output, observation=None)]
|
||||
else:
|
||||
return [AgentStep(action=action, observation=None) for action in output]
|
||||
else:
|
||||
self._logger.log(
|
||||
"debug",
|
||||
"Context length exceeded. Consider using smaller text or RAG tools from crewai_tools.",
|
||||
color="red",
|
||||
)
|
||||
raise SystemExit(
|
||||
"Context length exceeded and user opted not to summarize. Consider using smaller text or RAG tools from crewai_tools."
|
||||
)
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
import click
|
||||
import pkg_resources
|
||||
|
||||
from crewai.cli.create_crew import create_crew
|
||||
from crewai.cli.create_pipeline import create_pipeline
|
||||
from crewai.memory.storage.kickoff_task_outputs_storage import (
|
||||
KickoffTaskOutputsSQLiteStorage,
|
||||
)
|
||||
|
||||
from .create_crew import create_crew
|
||||
from .evaluate_crew import evaluate_crew
|
||||
from .replay_from_task import replay_task_command
|
||||
from .reset_memories_command import reset_memories_command
|
||||
from .run_crew import run_crew
|
||||
from .train_crew import train_crew
|
||||
|
||||
|
||||
@@ -18,10 +20,19 @@ def crewai():
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@click.argument("project_name")
|
||||
def create(project_name):
|
||||
"""Create a new crew."""
|
||||
create_crew(project_name)
|
||||
@click.argument("type", type=click.Choice(["crew", "pipeline"]))
|
||||
@click.argument("name")
|
||||
@click.option(
|
||||
"--router", is_flag=True, help="Create a pipeline with router functionality"
|
||||
)
|
||||
def create(type, name, router):
|
||||
"""Create a new crew or pipeline."""
|
||||
if type == "crew":
|
||||
create_crew(name)
|
||||
elif type == "pipeline":
|
||||
create_pipeline(name, router)
|
||||
else:
|
||||
click.secho("Error: Invalid type. Must be 'crew' or 'pipeline'.", fg="red")
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@@ -49,10 +60,17 @@ def version(tools):
|
||||
default=5,
|
||||
help="Number of iterations to train the crew",
|
||||
)
|
||||
def train(n_iterations: int):
|
||||
@click.option(
|
||||
"-f",
|
||||
"--filename",
|
||||
type=str,
|
||||
default="trained_agents_data.pkl",
|
||||
help="Path to a custom file for training",
|
||||
)
|
||||
def train(n_iterations: int, filename: str):
|
||||
"""Train the crew."""
|
||||
click.echo(f"Training the crew for {n_iterations} iterations")
|
||||
train_crew(n_iterations)
|
||||
click.echo(f"Training the Crew for {n_iterations} iterations")
|
||||
train_crew(n_iterations, filename)
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@@ -147,5 +165,12 @@ def test(n_iterations: int, model: str):
|
||||
evaluate_crew(n_iterations, model)
|
||||
|
||||
|
||||
@crewai.command()
|
||||
def run():
|
||||
"""Run the crew."""
|
||||
click.echo("Running the crew")
|
||||
run_crew()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
crewai()
|
||||
|
||||
@@ -1,25 +1,35 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.utils import copy_template
|
||||
|
||||
def create_crew(name):
|
||||
|
||||
def create_crew(name, parent_folder=None):
|
||||
"""Create a new crew."""
|
||||
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
||||
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||
|
||||
click.secho(f"Creating folder {folder_name}...", fg="green", bold=True)
|
||||
if parent_folder:
|
||||
folder_path = Path(parent_folder) / folder_name
|
||||
else:
|
||||
folder_path = Path(folder_name)
|
||||
|
||||
if not os.path.exists(folder_name):
|
||||
os.mkdir(folder_name)
|
||||
os.mkdir(folder_name + "/tests")
|
||||
os.mkdir(folder_name + "/src")
|
||||
os.mkdir(folder_name + f"/src/{folder_name}")
|
||||
os.mkdir(folder_name + f"/src/{folder_name}/tools")
|
||||
os.mkdir(folder_name + f"/src/{folder_name}/config")
|
||||
with open(folder_name + "/.env", "w") as file:
|
||||
file.write("OPENAI_API_KEY=YOUR_API_KEY")
|
||||
click.secho(
|
||||
f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
|
||||
fg="green",
|
||||
bold=True,
|
||||
)
|
||||
|
||||
if not folder_path.exists():
|
||||
folder_path.mkdir(parents=True)
|
||||
(folder_path / "tests").mkdir(exist_ok=True)
|
||||
if not parent_folder:
|
||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
||||
with open(folder_path / ".env", "w") as file:
|
||||
file.write("OPENAI_API_KEY=YOUR_API_KEY")
|
||||
else:
|
||||
click.secho(
|
||||
f"\tFolder {folder_name} already exists. Please choose a different name.",
|
||||
@@ -28,53 +38,34 @@ def create_crew(name):
|
||||
return
|
||||
|
||||
package_dir = Path(__file__).parent
|
||||
templates_dir = package_dir / "templates"
|
||||
templates_dir = package_dir / "templates" / "crew"
|
||||
|
||||
# List of template files to copy
|
||||
root_template_files = [
|
||||
".gitignore",
|
||||
"pyproject.toml",
|
||||
"README.md",
|
||||
]
|
||||
root_template_files = (
|
||||
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
|
||||
)
|
||||
tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"]
|
||||
config_template_files = ["config/agents.yaml", "config/tasks.yaml"]
|
||||
src_template_files = ["__init__.py", "main.py", "crew.py"]
|
||||
src_template_files = (
|
||||
["__init__.py", "main.py", "crew.py"] if not parent_folder else ["crew.py"]
|
||||
)
|
||||
|
||||
for file_name in root_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / file_name
|
||||
dst_file = folder_path / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
src_folder = folder_path / "src" / folder_name if not parent_folder else folder_path
|
||||
|
||||
for file_name in src_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / "src" / folder_name / file_name
|
||||
dst_file = src_folder / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
for file_name in tools_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / "src" / folder_name / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
for file_name in config_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = Path(folder_name) / "src" / folder_name / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
if not parent_folder:
|
||||
for file_name in tools_template_files + config_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = src_folder / file_name
|
||||
copy_template(src_file, dst_file, name, class_name, folder_name)
|
||||
|
||||
click.secho(f"Crew {name} created successfully!", fg="green", bold=True)
|
||||
|
||||
|
||||
def copy_template(src, dst, name, class_name, folder_name):
|
||||
"""Copy a file from src to dst."""
|
||||
with open(src, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
# Interpolate the content
|
||||
content = content.replace("{{name}}", name)
|
||||
content = content.replace("{{crew_name}}", class_name)
|
||||
content = content.replace("{{folder_name}}", folder_name)
|
||||
|
||||
# Write the interpolated content to the new file
|
||||
with open(dst, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
click.secho(f" - Created {dst}", fg="green")
|
||||
|
||||
107
src/crewai/cli/create_pipeline.py
Normal file
107
src/crewai/cli/create_pipeline.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
|
||||
def create_pipeline(name, router=False):
|
||||
"""Create a new pipeline project."""
|
||||
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
||||
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||
|
||||
click.secho(f"Creating pipeline {folder_name}...", fg="green", bold=True)
|
||||
|
||||
project_root = Path(folder_name)
|
||||
if project_root.exists():
|
||||
click.secho(f"Error: Folder {folder_name} already exists.", fg="red")
|
||||
return
|
||||
|
||||
# Create directory structure
|
||||
(project_root / "src" / folder_name).mkdir(parents=True)
|
||||
(project_root / "src" / folder_name / "pipelines").mkdir(parents=True)
|
||||
(project_root / "src" / folder_name / "crews").mkdir(parents=True)
|
||||
(project_root / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(project_root / "tests").mkdir(exist_ok=True)
|
||||
|
||||
# Create .env file
|
||||
with open(project_root / ".env", "w") as file:
|
||||
file.write("OPENAI_API_KEY=YOUR_API_KEY")
|
||||
|
||||
package_dir = Path(__file__).parent
|
||||
template_folder = "pipeline_router" if router else "pipeline"
|
||||
templates_dir = package_dir / "templates" / template_folder
|
||||
|
||||
# List of template files to copy
|
||||
root_template_files = [".gitignore", "pyproject.toml", "README.md"]
|
||||
src_template_files = ["__init__.py", "main.py"]
|
||||
tools_template_files = ["tools/__init__.py", "tools/custom_tool.py"]
|
||||
|
||||
if router:
|
||||
crew_folders = [
|
||||
"classifier_crew",
|
||||
"normal_crew",
|
||||
"urgent_crew",
|
||||
]
|
||||
pipelines_folders = [
|
||||
"pipelines/__init__.py",
|
||||
"pipelines/pipeline_classifier.py",
|
||||
"pipelines/pipeline_normal.py",
|
||||
"pipelines/pipeline_urgent.py",
|
||||
]
|
||||
else:
|
||||
crew_folders = [
|
||||
"research_crew",
|
||||
"write_linkedin_crew",
|
||||
"write_x_crew",
|
||||
]
|
||||
pipelines_folders = ["pipelines/__init__.py", "pipelines/pipeline.py"]
|
||||
|
||||
def process_file(src_file, dst_file):
|
||||
with open(src_file, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
content = content.replace("{{name}}", name)
|
||||
content = content.replace("{{crew_name}}", class_name)
|
||||
content = content.replace("{{folder_name}}", folder_name)
|
||||
content = content.replace("{{pipeline_name}}", class_name)
|
||||
|
||||
with open(dst_file, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
# Copy and process root template files
|
||||
for file_name in root_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / file_name
|
||||
process_file(src_file, dst_file)
|
||||
|
||||
# Copy and process src template files
|
||||
for file_name in src_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / "src" / folder_name / file_name
|
||||
process_file(src_file, dst_file)
|
||||
|
||||
# Copy tools files
|
||||
for file_name in tools_template_files:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / "src" / folder_name / file_name
|
||||
shutil.copy(src_file, dst_file)
|
||||
|
||||
# Copy pipelines folders
|
||||
for file_name in pipelines_folders:
|
||||
src_file = templates_dir / file_name
|
||||
dst_file = project_root / "src" / folder_name / file_name
|
||||
process_file(src_file, dst_file)
|
||||
|
||||
# Copy crew folders
|
||||
for crew_folder in crew_folders:
|
||||
src_crew_folder = templates_dir / "crews" / crew_folder
|
||||
dst_crew_folder = project_root / "src" / folder_name / "crews" / crew_folder
|
||||
if src_crew_folder.exists():
|
||||
shutil.copytree(src_crew_folder, dst_crew_folder)
|
||||
else:
|
||||
click.secho(
|
||||
f"Warning: Crew folder {crew_folder} not found in template.",
|
||||
fg="yellow",
|
||||
)
|
||||
|
||||
click.secho(f"Pipeline {name} created successfully!", fg="green", bold=True)
|
||||
23
src/crewai/cli/run_crew.py
Normal file
23
src/crewai/cli/run_crew.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import subprocess
|
||||
|
||||
import click
|
||||
|
||||
|
||||
def run_crew() -> None:
|
||||
"""
|
||||
Run the crew by running a command in the Poetry environment.
|
||||
"""
|
||||
command = ["poetry", "run", "run_crew"]
|
||||
|
||||
try:
|
||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
if result.stderr:
|
||||
click.echo(result.stderr, err=True)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||
click.echo(e.output, err=True)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"An unexpected error occurred: {e}", err=True)
|
||||
61
src/crewai/cli/templates/crew/README.md
Normal file
61
src/crewai/cli/templates/crew/README.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# {{crew_name}} Crew
|
||||
|
||||
Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [Poetry](https://python-poetry.org/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, if you haven't already, install Poetry:
|
||||
|
||||
```bash
|
||||
pip install poetry
|
||||
```
|
||||
|
||||
Next, navigate to your project directory and install the dependencies:
|
||||
|
||||
1. First lock the dependencies and then install them:
|
||||
```bash
|
||||
poetry lock
|
||||
```
|
||||
```bash
|
||||
poetry install
|
||||
```
|
||||
### Customizing
|
||||
|
||||
**Add your `OPENAI_API_KEY` into the `.env` file**
|
||||
|
||||
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
|
||||
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
|
||||
- Modify `src/{{folder_name}}/crew.py` to add your own logic, tools and specific args
|
||||
- Modify `src/{{folder_name}}/main.py` to add custom inputs for your agents and tasks
|
||||
|
||||
## Running the Project
|
||||
|
||||
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
|
||||
|
||||
```bash
|
||||
$ crewai run
|
||||
```
|
||||
or
|
||||
```bash
|
||||
poetry run {{folder_name}}
|
||||
```
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
The {{name}} Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
|
||||
|
||||
## Support
|
||||
|
||||
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
|
||||
- Visit our [documentation](https://docs.crewai.com)
|
||||
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
|
||||
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Chat with our docs](https://chatg.pt/DWjSBZn)
|
||||
|
||||
Let's create wonders together with the power and simplicity of crewAI.
|
||||
@@ -48,6 +48,6 @@ class {{crew_name}}Crew():
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=2,
|
||||
verbose=True,
|
||||
# process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
|
||||
)
|
||||
@@ -25,7 +25,7 @@ def train():
|
||||
"topic": "AI LLMs"
|
||||
}
|
||||
try:
|
||||
{{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]), inputs=inputs)
|
||||
{{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while training the crew: {e}")
|
||||
@@ -10,6 +10,7 @@ crewai = { extras = ["tools"], version = "^0.46.0" }
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:run"
|
||||
run_crew = "{{folder_name}}.main:run"
|
||||
train = "{{folder_name}}.main:train"
|
||||
replay = "{{folder_name}}.main:replay"
|
||||
test = "{{folder_name}}.main:test"
|
||||
0
src/crewai/cli/templates/crew/tools/__init__.py
Normal file
0
src/crewai/cli/templates/crew/tools/__init__.py
Normal file
2
src/crewai/cli/templates/pipeline/.gitignore
vendored
Normal file
2
src/crewai/cli/templates/pipeline/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.env
|
||||
__pycache__/
|
||||
0
src/crewai/cli/templates/pipeline/__init__.py
Normal file
0
src/crewai/cli/templates/pipeline/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
researcher:
|
||||
role: >
|
||||
{topic} Senior Data Researcher
|
||||
goal: >
|
||||
Uncover cutting-edge developments in {topic}
|
||||
backstory: >
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
goal: >
|
||||
Create detailed reports based on {topic} data analysis and research findings
|
||||
backstory: >
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
@@ -0,0 +1,16 @@
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
|
||||
reporting_task:
|
||||
description: >
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with a title, mains topics, each with a full section of information.
|
||||
agent: reporting_analyst
|
||||
@@ -0,0 +1,58 @@
|
||||
from pydantic import BaseModel
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
class ResearchReport(BaseModel):
|
||||
"""Research Report"""
|
||||
title: str
|
||||
body: str
|
||||
|
||||
@CrewBase
|
||||
class ResearchCrew():
|
||||
"""Research Crew"""
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
output_pydantic=ResearchReport
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Research Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -0,0 +1,51 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from {{folder_name}}.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class WriteLinkedInCrew():
|
||||
"""Research Crew"""
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
output_file='report.md'
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the {{crew_name}} crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -0,0 +1,14 @@
|
||||
x_writer_agent:
|
||||
role: >
|
||||
Expert Social Media Content Creator specializing in short form written content
|
||||
goal: >
|
||||
Create viral-worthy, engaging short form posts that distill complex {topic} information
|
||||
into compelling 280-character messages
|
||||
backstory: >
|
||||
You're a social media virtuoso with a particular talent for short form content. Your posts
|
||||
consistently go viral due to your ability to craft hooks that stop users mid-scroll.
|
||||
You've studied the techniques of social media masters like Justin Welsh, Dickie Bush,
|
||||
Nicolas Cole, and Shaan Puri, incorporating their best practices into your own unique style.
|
||||
Your superpower is taking intricate {topic} concepts and transforming them into
|
||||
bite-sized, shareable content that resonates with a wide audience. You know exactly
|
||||
how to structure a post for maximum impact and engagement.
|
||||
@@ -0,0 +1,22 @@
|
||||
write_x_task:
|
||||
description: >
|
||||
Using the research report provided, create an engaging short form post about {topic}.
|
||||
Your post should have a great hook, summarize key points, and be structured for easy
|
||||
consumption on a digital platform. The post must be under 280 characters.
|
||||
Follow these guidelines:
|
||||
1. Start with an attention-grabbing hook
|
||||
2. Condense the main insights from the research
|
||||
3. Use clear, concise language
|
||||
4. Include a call-to-action or thought-provoking question if space allows
|
||||
5. Ensure the post flows well and is easy to read quickly
|
||||
|
||||
Here is the title of the research report you will be using
|
||||
|
||||
Title: {title}
|
||||
Research:
|
||||
{body}
|
||||
|
||||
expected_output: >
|
||||
A compelling X post under 280 characters that effectively summarizes the key findings
|
||||
about {topic}, starts with a strong hook, and is optimized for engagement on the platform.
|
||||
agent: x_writer_agent
|
||||
@@ -0,0 +1,36 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
@CrewBase
|
||||
class WriteXCrew:
|
||||
"""Research Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def x_writer_agent(self) -> Agent:
|
||||
return Agent(config=self.agents_config["x_writer_agent"], verbose=True)
|
||||
|
||||
@task
|
||||
def write_x_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["write_x_task"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Write X Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
26
src/crewai/cli/templates/pipeline/main.py
Normal file
26
src/crewai/cli/templates/pipeline/main.py
Normal file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env python
|
||||
import asyncio
|
||||
from {{folder_name}}.pipelines.pipeline import {{pipeline_name}}Pipeline
|
||||
|
||||
async def run():
|
||||
"""
|
||||
Run the pipeline.
|
||||
"""
|
||||
inputs = [
|
||||
{"topic": "AI wearables"},
|
||||
]
|
||||
pipeline = {{pipeline_name}}Pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
|
||||
# Process and print results
|
||||
for result in results:
|
||||
print(f"Raw output: {result.raw}")
|
||||
if result.json_dict:
|
||||
print(f"JSON output: {result.json_dict}")
|
||||
print("\n")
|
||||
|
||||
def main():
|
||||
asyncio.run(run())
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
87
src/crewai/cli/templates/pipeline/pipelines/pipeline.py
Normal file
87
src/crewai/cli/templates/pipeline/pipelines/pipeline.py
Normal file
@@ -0,0 +1,87 @@
|
||||
"""
|
||||
This pipeline file includes two different examples to demonstrate the flexibility of crewAI pipelines.
|
||||
|
||||
Example 1: Two-Stage Pipeline
|
||||
-----------------------------
|
||||
This pipeline consists of two crews:
|
||||
1. ResearchCrew: Performs research on a given topic.
|
||||
2. WriteXCrew: Generates an X (Twitter) post based on the research findings.
|
||||
|
||||
Key features:
|
||||
- The ResearchCrew's final task uses output_json to store all research findings in a JSON object.
|
||||
- This JSON object is then passed to the WriteXCrew, where tasks can access the research findings.
|
||||
|
||||
Example 2: Two-Stage Pipeline with Parallel Execution
|
||||
-------------------------------------------------------
|
||||
This pipeline consists of three crews:
|
||||
1. ResearchCrew: Performs research on a given topic.
|
||||
2. WriteXCrew and WriteLinkedInCrew: Run in parallel, using the research findings to generate posts for X and LinkedIn, respectively.
|
||||
|
||||
Key features:
|
||||
- Demonstrates the ability to run multiple crews in parallel.
|
||||
- Shows how to structure a pipeline with both sequential and parallel stages.
|
||||
|
||||
Usage:
|
||||
- To switch between examples, comment/uncomment the respective code blocks below.
|
||||
- Ensure that you have implemented all necessary crew classes (ResearchCrew, WriteXCrew, WriteLinkedInCrew) before running.
|
||||
"""
|
||||
|
||||
# Common imports for both examples
|
||||
from crewai import Pipeline
|
||||
|
||||
|
||||
|
||||
# Uncomment the crews you need for your chosen example
|
||||
from ..crews.research_crew.research_crew import ResearchCrew
|
||||
from ..crews.write_x_crew.write_x_crew import WriteXCrew
|
||||
# from .crews.write_linkedin_crew.write_linkedin_crew import WriteLinkedInCrew # Uncomment for Example 2
|
||||
|
||||
# EXAMPLE 1: Two-Stage Pipeline
|
||||
# -----------------------------
|
||||
# Uncomment the following code block to use Example 1
|
||||
|
||||
class {{pipeline_name}}Pipeline:
|
||||
def __init__(self):
|
||||
# Initialize crews
|
||||
self.research_crew = ResearchCrew().crew()
|
||||
self.write_x_crew = WriteXCrew().crew()
|
||||
|
||||
def create_pipeline(self):
|
||||
return Pipeline(
|
||||
stages=[
|
||||
self.research_crew,
|
||||
self.write_x_crew
|
||||
]
|
||||
)
|
||||
|
||||
async def kickoff(self, inputs):
|
||||
pipeline = self.create_pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
return results
|
||||
|
||||
|
||||
# EXAMPLE 2: Two-Stage Pipeline with Parallel Execution
|
||||
# -------------------------------------------------------
|
||||
# Uncomment the following code block to use Example 2
|
||||
|
||||
# @PipelineBase
|
||||
# class {{pipeline_name}}Pipeline:
|
||||
# def __init__(self):
|
||||
# # Initialize crews
|
||||
# self.research_crew = ResearchCrew().crew()
|
||||
# self.write_x_crew = WriteXCrew().crew()
|
||||
# self.write_linkedin_crew = WriteLinkedInCrew().crew()
|
||||
|
||||
# @pipeline
|
||||
# def create_pipeline(self):
|
||||
# return Pipeline(
|
||||
# stages=[
|
||||
# self.research_crew,
|
||||
# [self.write_x_crew, self.write_linkedin_crew] # Parallel execution
|
||||
# ]
|
||||
# )
|
||||
|
||||
# async def run(self, inputs):
|
||||
# pipeline = self.create_pipeline()
|
||||
# results = await pipeline.kickoff(inputs)
|
||||
# return results
|
||||
17
src/crewai/cli/templates/pipeline/pyproject.toml
Normal file
17
src/crewai/cli/templates/pipeline/pyproject.toml
Normal file
@@ -0,0 +1,17 @@
|
||||
[tool.poetry]
|
||||
name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = "^0.46.0" }
|
||||
asyncio = "*"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:main"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
0
src/crewai/cli/templates/pipeline/tools/__init__.py
Normal file
0
src/crewai/cli/templates/pipeline/tools/__init__.py
Normal file
12
src/crewai/cli/templates/pipeline/tools/custom_tool.py
Normal file
12
src/crewai/cli/templates/pipeline/tools/custom_tool.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from crewai_tools import BaseTool
|
||||
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
return "this is an example of a tool output, ignore it and move along."
|
||||
2
src/crewai/cli/templates/pipeline_router/.gitignore
vendored
Normal file
2
src/crewai/cli/templates/pipeline_router/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
.env
|
||||
__pycache__/
|
||||
57
src/crewai/cli/templates/pipeline_router/README.md
Normal file
57
src/crewai/cli/templates/pipeline_router/README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# {{crew_name}} Crew
|
||||
|
||||
Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [Poetry](https://python-poetry.org/) for dependency management and package handling, offering a seamless setup and execution experience.
|
||||
|
||||
First, if you haven't already, install Poetry:
|
||||
|
||||
```bash
|
||||
pip install poetry
|
||||
```
|
||||
|
||||
Next, navigate to your project directory and install the dependencies:
|
||||
|
||||
1. First lock the dependencies and then install them:
|
||||
```bash
|
||||
poetry lock
|
||||
```
|
||||
```bash
|
||||
poetry install
|
||||
```
|
||||
### Customizing
|
||||
|
||||
**Add your `OPENAI_API_KEY` into the `.env` file**
|
||||
|
||||
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
|
||||
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
|
||||
- Modify `src/{{folder_name}}/crew.py` to add your own logic, tools and specific args
|
||||
- Modify `src/{{folder_name}}/main.py` to add custom inputs for your agents and tasks
|
||||
|
||||
## Running the Project
|
||||
|
||||
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
|
||||
|
||||
```bash
|
||||
poetry run {{folder_name}}
|
||||
```
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
|
||||
## Understanding Your Crew
|
||||
|
||||
The {{name}} Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
|
||||
|
||||
## Support
|
||||
|
||||
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
|
||||
- Visit our [documentation](https://docs.crewai.com)
|
||||
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
|
||||
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
|
||||
- [Chat with our docs](https://chatg.pt/DWjSBZn)
|
||||
|
||||
Let's create wonders together with the power and simplicity of crewAI.
|
||||
19
src/crewai/cli/templates/pipeline_router/config/agents.yaml
Normal file
19
src/crewai/cli/templates/pipeline_router/config/agents.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
researcher:
|
||||
role: >
|
||||
{topic} Senior Data Researcher
|
||||
goal: >
|
||||
Uncover cutting-edge developments in {topic}
|
||||
backstory: >
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
goal: >
|
||||
Create detailed reports based on {topic} data analysis and research findings
|
||||
backstory: >
|
||||
You're a meticulous analyst with a keen eye for detail. You're known for
|
||||
your ability to turn complex data into clear and concise reports, making
|
||||
it easy for others to understand and act on the information you provide.
|
||||
17
src/crewai/cli/templates/pipeline_router/config/tasks.yaml
Normal file
17
src/crewai/cli/templates/pipeline_router/config/tasks.yaml
Normal file
@@ -0,0 +1,17 @@
|
||||
research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
|
||||
reporting_task:
|
||||
description: >
|
||||
Review the context you got and expand each topic into a full section for a report.
|
||||
Make sure the report is detailed and contains any and all relevant information.
|
||||
expected_output: >
|
||||
A fully fledge reports with the mains topics, each with a full section of information.
|
||||
Formatted as markdown without '```'
|
||||
agent: reporting_analyst
|
||||
@@ -0,0 +1,40 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
class UrgencyScore(BaseModel):
|
||||
urgency_score: int
|
||||
|
||||
@CrewBase
|
||||
class ClassifierCrew:
|
||||
"""Email Classifier Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def classifier(self) -> Agent:
|
||||
return Agent(config=self.agents_config["classifier"], verbose=True)
|
||||
|
||||
@task
|
||||
def urgent_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["classify_email"],
|
||||
output_pydantic=UrgencyScore,
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Email Classifier Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -0,0 +1,7 @@
|
||||
classifier:
|
||||
role: >
|
||||
Email Classifier
|
||||
goal: >
|
||||
Classify the email: {email} as urgent or normal from a score of 1 to 10, where 1 is not urgent and 10 is urgent. Return the urgency score only.`
|
||||
backstory: >
|
||||
You are a highly efficient and experienced email classifier, trained to quickly assess and classify emails. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing normal situations and maintaining smooth operations.
|
||||
@@ -0,0 +1,7 @@
|
||||
classify_email:
|
||||
description: >
|
||||
Classify the email: {email}
|
||||
as urgent or normal.
|
||||
expected_output: >
|
||||
Classify the email from a scale of 1 to 10, where 1 is not urgent and 10 is urgent. Return the urgency score only.
|
||||
agent: classifier
|
||||
@@ -0,0 +1,7 @@
|
||||
normal_handler:
|
||||
role: >
|
||||
Normal Email Processor
|
||||
goal: >
|
||||
Process normal emails and create an email to respond to the sender.
|
||||
backstory: >
|
||||
You are a highly efficient and experienced normal email handler, trained to quickly assess and respond to normal communications. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing normal situations and maintaining smooth operations.
|
||||
@@ -0,0 +1,6 @@
|
||||
normal_task:
|
||||
description: >
|
||||
Process and respond to normal email quickly.
|
||||
expected_output: >
|
||||
An email response to the normal email.
|
||||
agent: normal_handler
|
||||
@@ -0,0 +1,36 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
@CrewBase
|
||||
class NormalCrew:
|
||||
"""Normal Email Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def normal_handler(self) -> Agent:
|
||||
return Agent(config=self.agents_config["normal_handler"], verbose=True)
|
||||
|
||||
@task
|
||||
def urgent_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["normal_task"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Normal Email Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
@@ -0,0 +1,7 @@
|
||||
urgent_handler:
|
||||
role: >
|
||||
Urgent Email Processor
|
||||
goal: >
|
||||
Process urgent emails and create an email to respond to the sender.
|
||||
backstory: >
|
||||
You are a highly efficient and experienced urgent email handler, trained to quickly assess and respond to time-sensitive communications. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing critical situations and maintaining smooth operations.
|
||||
@@ -0,0 +1,6 @@
|
||||
urgent_task:
|
||||
description: >
|
||||
Process and respond to urgent email quickly.
|
||||
expected_output: >
|
||||
An email response to the urgent email.
|
||||
agent: urgent_handler
|
||||
@@ -0,0 +1,36 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from demo_pipeline.tools.custom_tool import MyCustomTool
|
||||
|
||||
# Check our tools documentations for more information on how to use them
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
|
||||
@CrewBase
|
||||
class UrgentCrew:
|
||||
"""Urgent Email Crew"""
|
||||
|
||||
agents_config = "config/agents.yaml"
|
||||
tasks_config = "config/tasks.yaml"
|
||||
|
||||
@agent
|
||||
def urgent_handler(self) -> Agent:
|
||||
return Agent(config=self.agents_config["urgent_handler"], verbose=True)
|
||||
|
||||
@task
|
||||
def urgent_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config["urgent_task"],
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the Urgent Email Crew"""
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
75
src/crewai/cli/templates/pipeline_router/main.py
Normal file
75
src/crewai/cli/templates/pipeline_router/main.py
Normal file
@@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env python
|
||||
import asyncio
|
||||
from crewai.routers.router import Route
|
||||
from crewai.routers.router import Router
|
||||
|
||||
from {{folder_name}}.pipelines.pipeline_classifier import EmailClassifierPipeline
|
||||
from {{folder_name}}.pipelines.pipeline_normal import NormalPipeline
|
||||
from {{folder_name}}.pipelines.pipeline_urgent import UrgentPipeline
|
||||
|
||||
async def run():
|
||||
"""
|
||||
Run the pipeline.
|
||||
"""
|
||||
inputs = [
|
||||
{
|
||||
"email": """
|
||||
Subject: URGENT: Marketing Campaign Launch - Immediate Action Required
|
||||
Dear Team,
|
||||
I'm reaching out regarding our upcoming marketing campaign that requires your immediate attention and swift action. We're facing a critical deadline, and our success hinges on our ability to mobilize quickly.
|
||||
Key points:
|
||||
|
||||
Campaign launch: 48 hours from now
|
||||
Target audience: 250,000 potential customers
|
||||
Expected ROI: 35% increase in Q3 sales
|
||||
|
||||
What we need from you NOW:
|
||||
|
||||
Final approval on creative assets (due in 3 hours)
|
||||
Confirmation of media placements (due by end of day)
|
||||
Last-minute budget allocation for paid social media push
|
||||
|
||||
Our competitors are poised to launch similar campaigns, and we must act fast to maintain our market advantage. Delays could result in significant lost opportunities and potential revenue.
|
||||
Please prioritize this campaign above all other tasks. I'll be available for the next 24 hours to address any concerns or roadblocks.
|
||||
Let's make this happen!
|
||||
[Your Name]
|
||||
Marketing Director
|
||||
P.S. I'll be scheduling an emergency team meeting in 1 hour to discuss our action plan. Attendance is mandatory.
|
||||
"""
|
||||
}
|
||||
]
|
||||
|
||||
pipeline_classifier = EmailClassifierPipeline().create_pipeline()
|
||||
pipeline_urgent = UrgentPipeline().create_pipeline()
|
||||
pipeline_normal = NormalPipeline().create_pipeline()
|
||||
|
||||
router = Router(
|
||||
routes={
|
||||
"high_urgency": Route(
|
||||
condition=lambda x: x.get("urgency_score", 0) > 7,
|
||||
pipeline=pipeline_urgent
|
||||
),
|
||||
"low_urgency": Route(
|
||||
condition=lambda x: x.get("urgency_score", 0) <= 7,
|
||||
pipeline=pipeline_normal
|
||||
)
|
||||
},
|
||||
default=pipeline_normal
|
||||
)
|
||||
|
||||
pipeline = pipeline_classifier >> router
|
||||
|
||||
results = await pipeline.kickoff(inputs)
|
||||
|
||||
# Process and print results
|
||||
for result in results:
|
||||
print(f"Raw output: {result.raw}")
|
||||
if result.json_dict:
|
||||
print(f"JSON output: {result.json_dict}")
|
||||
print("\n")
|
||||
|
||||
def main():
|
||||
asyncio.run(run())
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,24 @@
|
||||
from crewai import Pipeline
|
||||
from crewai.project import PipelineBase
|
||||
from ..crews.classifier_crew.classifier_crew import ClassifierCrew
|
||||
|
||||
|
||||
@PipelineBase
|
||||
class EmailClassifierPipeline:
|
||||
def __init__(self):
|
||||
# Initialize crews
|
||||
self.classifier_crew = ClassifierCrew().crew()
|
||||
|
||||
def create_pipeline(self):
|
||||
return Pipeline(
|
||||
stages=[
|
||||
self.classifier_crew
|
||||
]
|
||||
)
|
||||
|
||||
async def kickoff(self, inputs):
|
||||
pipeline = self.create_pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
return results
|
||||
|
||||
|
||||
@@ -0,0 +1,24 @@
|
||||
from crewai import Pipeline
|
||||
from crewai.project import PipelineBase
|
||||
from ..crews.normal_crew.normal_crew import NormalCrew
|
||||
|
||||
|
||||
@PipelineBase
|
||||
class NormalPipeline:
|
||||
def __init__(self):
|
||||
# Initialize crews
|
||||
self.normal_crew = NormalCrew().crew()
|
||||
|
||||
def create_pipeline(self):
|
||||
return Pipeline(
|
||||
stages=[
|
||||
self.normal_crew
|
||||
]
|
||||
)
|
||||
|
||||
async def kickoff(self, inputs):
|
||||
pipeline = self.create_pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
return results
|
||||
|
||||
|
||||
@@ -0,0 +1,23 @@
|
||||
from crewai import Pipeline
|
||||
from crewai.project import PipelineBase
|
||||
from ..crews.urgent_crew.urgent_crew import UrgentCrew
|
||||
|
||||
@PipelineBase
|
||||
class UrgentPipeline:
|
||||
def __init__(self):
|
||||
# Initialize crews
|
||||
self.urgent_crew = UrgentCrew().crew()
|
||||
|
||||
def create_pipeline(self):
|
||||
return Pipeline(
|
||||
stages=[
|
||||
self.urgent_crew
|
||||
]
|
||||
)
|
||||
|
||||
async def kickoff(self, inputs):
|
||||
pipeline = self.create_pipeline()
|
||||
results = await pipeline.kickoff(inputs)
|
||||
return results
|
||||
|
||||
|
||||
19
src/crewai/cli/templates/pipeline_router/pyproject.toml
Normal file
19
src/crewai/cli/templates/pipeline_router/pyproject.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
[tool.poetry]
|
||||
name = "{{folder_name}}"
|
||||
version = "0.1.0"
|
||||
description = "{{name}} using crewAI"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = "^0.46.0" }
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:main"
|
||||
train = "{{folder_name}}.main:train"
|
||||
replay = "{{folder_name}}.main:replay"
|
||||
test = "{{folder_name}}.main:test"
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
@@ -0,0 +1,12 @@
|
||||
from crewai_tools import BaseTool
|
||||
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
return "this is an example of a tool output, ignore it and move along."
|
||||
@@ -3,19 +3,22 @@ import subprocess
|
||||
import click
|
||||
|
||||
|
||||
def train_crew(n_iterations: int) -> None:
|
||||
def train_crew(n_iterations: int, filename: str) -> None:
|
||||
"""
|
||||
Train the crew by running a command in the Poetry environment.
|
||||
|
||||
Args:
|
||||
n_iterations (int): The number of iterations to train the crew.
|
||||
"""
|
||||
command = ["poetry", "run", "train", str(n_iterations)]
|
||||
command = ["poetry", "run", "train", str(n_iterations), filename]
|
||||
|
||||
try:
|
||||
if n_iterations <= 0:
|
||||
raise ValueError("The number of iterations must be a positive integer.")
|
||||
|
||||
if not filename.endswith(".pkl"):
|
||||
raise ValueError("The filename must not end with .pkl")
|
||||
|
||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
if result.stderr:
|
||||
|
||||
18
src/crewai/cli/utils.py
Normal file
18
src/crewai/cli/utils.py
Normal file
@@ -0,0 +1,18 @@
|
||||
import click
|
||||
|
||||
|
||||
def copy_template(src, dst, name, class_name, folder_name):
|
||||
"""Copy a file from src to dst."""
|
||||
with open(src, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
# Interpolate the content
|
||||
content = content.replace("{{name}}", name)
|
||||
content = content.replace("{{crew_name}}", class_name)
|
||||
content = content.replace("{{folder_name}}", folder_name)
|
||||
|
||||
# Write the interpolated content to the new file
|
||||
with open(dst, "w") as file:
|
||||
file.write(content)
|
||||
|
||||
click.secho(f" - Created {dst}", fg="green")
|
||||
@@ -34,7 +34,9 @@ from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.constants import (
|
||||
TRAINING_DATA_FILE,
|
||||
)
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.formatter import (
|
||||
@@ -104,7 +106,7 @@ class Crew(BaseModel):
|
||||
tasks: List[Task] = Field(default_factory=list)
|
||||
agents: List[BaseAgent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: int = Field(default=0)
|
||||
verbose: bool = Field(default=False)
|
||||
memory: bool = Field(
|
||||
default=False,
|
||||
description="Whether the crew should use memory to store memories of it's execution",
|
||||
@@ -198,7 +200,7 @@ class Crew(BaseModel):
|
||||
def set_private_attrs(self) -> "Crew":
|
||||
"""Set private attributes."""
|
||||
self._cache_handler = CacheHandler()
|
||||
self._logger = Logger(self.verbose)
|
||||
self._logger = Logger(verbose=self.verbose)
|
||||
if self.output_log_file:
|
||||
self._file_handler = FileHandler(self.output_log_file)
|
||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||
@@ -388,7 +390,7 @@ class Crew(BaseModel):
|
||||
del task_config["agent"]
|
||||
return Task(**task_config, agent=task_agent)
|
||||
|
||||
def _setup_for_training(self) -> None:
|
||||
def _setup_for_training(self, filename: str) -> None:
|
||||
"""Sets up the crew for training."""
|
||||
self._train = True
|
||||
|
||||
@@ -399,11 +401,13 @@ class Crew(BaseModel):
|
||||
agent.allow_delegation = False
|
||||
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).initialize_file()
|
||||
CrewTrainingHandler(TRAINED_AGENTS_DATA_FILE).initialize_file()
|
||||
CrewTrainingHandler(filename).initialize_file()
|
||||
|
||||
def train(self, n_iterations: int, inputs: Optional[Dict[str, Any]] = {}) -> None:
|
||||
def train(
|
||||
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
|
||||
) -> None:
|
||||
"""Trains the crew for a given number of iterations."""
|
||||
self._setup_for_training()
|
||||
self._setup_for_training(filename)
|
||||
|
||||
for n_iteration in range(n_iterations):
|
||||
self._train_iteration = n_iteration
|
||||
@@ -416,7 +420,7 @@ class Crew(BaseModel):
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
|
||||
CrewTrainingHandler(TRAINED_AGENTS_DATA_FILE).save_trained_data(
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ class Pipeline(BaseModel):
|
||||
"""
|
||||
initial_input = copy.deepcopy(kickoff_input)
|
||||
current_input = copy.deepcopy(kickoff_input)
|
||||
stages = copy.deepcopy(self.stages)
|
||||
stages = self._copy_stages()
|
||||
pipeline_usage_metrics: Dict[str, UsageMetrics] = {}
|
||||
all_stage_outputs: List[List[CrewOutput]] = []
|
||||
traces: List[List[Union[str, Dict[str, Any]]]] = [[initial_input]]
|
||||
@@ -367,6 +367,24 @@ class Pipeline(BaseModel):
|
||||
]
|
||||
return [crew_outputs + [output] for output in all_stage_outputs[-1]]
|
||||
|
||||
def _copy_stages(self):
|
||||
"""Create a deep copy of the Pipeline's stages."""
|
||||
new_stages = []
|
||||
for stage in self.stages:
|
||||
if isinstance(stage, list):
|
||||
new_stages.append(
|
||||
[
|
||||
crew.copy() if hasattr(crew, "copy") else copy.deepcopy(crew)
|
||||
for crew in stage
|
||||
]
|
||||
)
|
||||
elif hasattr(stage, "copy"):
|
||||
new_stages.append(stage.copy())
|
||||
else:
|
||||
new_stages.append(copy.deepcopy(stage))
|
||||
|
||||
return new_stages
|
||||
|
||||
def __rshift__(self, other: PipelineStage) -> "Pipeline":
|
||||
"""
|
||||
Implements the >> operator to add another Stage (Crew or List[Crew]) to an existing Pipeline.
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
from .annotations import (
|
||||
agent,
|
||||
cache_handler,
|
||||
callback,
|
||||
crew,
|
||||
task,
|
||||
llm,
|
||||
output_json,
|
||||
output_pydantic,
|
||||
pipeline,
|
||||
task,
|
||||
tool,
|
||||
callback,
|
||||
llm,
|
||||
cache_handler,
|
||||
)
|
||||
from .crew_base import CrewBase
|
||||
from .pipeline_base import PipelineBase
|
||||
|
||||
__all__ = [
|
||||
"agent",
|
||||
@@ -20,6 +22,8 @@ __all__ = [
|
||||
"tool",
|
||||
"callback",
|
||||
"CrewBase",
|
||||
"PipelineBase",
|
||||
"llm",
|
||||
"cache_handler",
|
||||
"pipeline",
|
||||
]
|
||||
|
||||
@@ -1,14 +1,4 @@
|
||||
def memoize(func):
|
||||
cache = {}
|
||||
|
||||
def memoized_func(*args, **kwargs):
|
||||
key = (args, tuple(kwargs.items()))
|
||||
if key not in cache:
|
||||
cache[key] = func(*args, **kwargs)
|
||||
return cache[key]
|
||||
|
||||
memoized_func.__dict__.update(func.__dict__)
|
||||
return memoized_func
|
||||
from crewai.project.utils import memoize
|
||||
|
||||
|
||||
def task(func):
|
||||
@@ -61,6 +51,21 @@ def cache_handler(func):
|
||||
return memoize(func)
|
||||
|
||||
|
||||
def stage(func):
|
||||
func.is_stage = True
|
||||
return memoize(func)
|
||||
|
||||
|
||||
def router(func):
|
||||
func.is_router = True
|
||||
return memoize(func)
|
||||
|
||||
|
||||
def pipeline(func):
|
||||
func.is_pipeline = True
|
||||
return memoize(func)
|
||||
|
||||
|
||||
def crew(func):
|
||||
def wrapper(self, *args, **kwargs):
|
||||
instantiated_tasks = []
|
||||
|
||||
@@ -24,6 +24,7 @@ def CrewBase(cls):
|
||||
original_agents_config_path = getattr(
|
||||
cls, "agents_config", "config/agents.yaml"
|
||||
)
|
||||
|
||||
original_tasks_config_path = getattr(cls, "tasks_config", "config/tasks.yaml")
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
@@ -37,9 +38,11 @@ def CrewBase(cls):
|
||||
self.agents_config = self.load_yaml(
|
||||
os.path.join(self.base_directory, self.original_agents_config_path)
|
||||
)
|
||||
|
||||
self.tasks_config = self.load_yaml(
|
||||
os.path.join(self.base_directory, self.original_tasks_config_path)
|
||||
)
|
||||
|
||||
self.map_all_agent_variables()
|
||||
self.map_all_task_variables()
|
||||
|
||||
|
||||
58
src/crewai/project/pipeline_base.py
Normal file
58
src/crewai/project/pipeline_base.py
Normal file
@@ -0,0 +1,58 @@
|
||||
from typing import Callable, Dict
|
||||
|
||||
from pydantic import ConfigDict
|
||||
|
||||
from crewai.crew import Crew
|
||||
from crewai.pipeline.pipeline import Pipeline
|
||||
from crewai.routers.router import Router
|
||||
|
||||
|
||||
# TODO: Could potentially remove. Need to check with @joao and @gui if this is needed for CrewAI+
|
||||
def PipelineBase(cls):
|
||||
class WrappedClass(cls):
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
is_pipeline_class: bool = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
self.stages = []
|
||||
self._map_pipeline_components()
|
||||
|
||||
def _get_all_functions(self):
|
||||
return {
|
||||
name: getattr(self, name)
|
||||
for name in dir(self)
|
||||
if callable(getattr(self, name))
|
||||
}
|
||||
|
||||
def _filter_functions(
|
||||
self, functions: Dict[str, Callable], attribute: str
|
||||
) -> Dict[str, Callable]:
|
||||
return {
|
||||
name: func
|
||||
for name, func in functions.items()
|
||||
if hasattr(func, attribute)
|
||||
}
|
||||
|
||||
def _map_pipeline_components(self):
|
||||
all_functions = self._get_all_functions()
|
||||
crew_functions = self._filter_functions(all_functions, "is_crew")
|
||||
router_functions = self._filter_functions(all_functions, "is_router")
|
||||
|
||||
for stage_attr in dir(self):
|
||||
stage = getattr(self, stage_attr)
|
||||
if isinstance(stage, (Crew, Router)):
|
||||
self.stages.append(stage)
|
||||
elif callable(stage) and hasattr(stage, "is_crew"):
|
||||
self.stages.append(crew_functions[stage_attr]())
|
||||
elif callable(stage) and hasattr(stage, "is_router"):
|
||||
self.stages.append(router_functions[stage_attr]())
|
||||
elif isinstance(stage, list) and all(
|
||||
isinstance(item, Crew) for item in stage
|
||||
):
|
||||
self.stages.append(stage)
|
||||
|
||||
def build_pipeline(self) -> Pipeline:
|
||||
return Pipeline(stages=self.stages)
|
||||
|
||||
return WrappedClass
|
||||
11
src/crewai/project/utils.py
Normal file
11
src/crewai/project/utils.py
Normal file
@@ -0,0 +1,11 @@
|
||||
def memoize(func):
|
||||
cache = {}
|
||||
|
||||
def memoized_func(*args, **kwargs):
|
||||
key = (args, tuple(kwargs.items()))
|
||||
if key not in cache:
|
||||
cache[key] = func(*args, **kwargs)
|
||||
return cache[key]
|
||||
|
||||
memoized_func.__dict__.update(func.__dict__)
|
||||
return memoized_func
|
||||
@@ -1,17 +1,20 @@
|
||||
from dataclasses import dataclass
|
||||
from copy import deepcopy
|
||||
from typing import Any, Callable, Dict, Generic, Tuple, TypeVar
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
|
||||
T = TypeVar("T", bound=Dict[str, Any])
|
||||
U = TypeVar("U")
|
||||
|
||||
|
||||
@dataclass
|
||||
class Route(Generic[T, U]):
|
||||
condition: Callable[[T], bool]
|
||||
pipeline: U
|
||||
|
||||
def __init__(self, condition: Callable[[T], bool], pipeline: U):
|
||||
self.condition = condition
|
||||
self.pipeline = pipeline
|
||||
|
||||
|
||||
class Router(BaseModel, Generic[T, U]):
|
||||
routes: Dict[str, Route[T, U]] = Field(
|
||||
@@ -19,9 +22,21 @@ class Router(BaseModel, Generic[T, U]):
|
||||
description="Dictionary of route names to (condition, pipeline) tuples",
|
||||
)
|
||||
default: U = Field(..., description="Default pipeline if no conditions are met")
|
||||
_route_types: Dict[str, type] = PrivateAttr(default_factory=dict)
|
||||
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
def __init__(self, routes: Dict[str, Route[T, U]], default: U, **data):
|
||||
super().__init__(routes=routes, default=default, **data)
|
||||
self._check_copyable(default)
|
||||
for name, route in routes.items():
|
||||
self._check_copyable(route.pipeline)
|
||||
self._route_types[name] = type(route.pipeline)
|
||||
|
||||
@staticmethod
|
||||
def _check_copyable(obj):
|
||||
if not hasattr(obj, "copy") or not callable(getattr(obj, "copy")):
|
||||
raise ValueError(f"Object of type {type(obj)} must have a 'copy' method")
|
||||
|
||||
def add_route(
|
||||
self,
|
||||
@@ -40,7 +55,9 @@ class Router(BaseModel, Generic[T, U]):
|
||||
Returns:
|
||||
The Router instance for method chaining
|
||||
"""
|
||||
self._check_copyable(pipeline)
|
||||
self.routes[name] = Route(condition=condition, pipeline=pipeline)
|
||||
self._route_types[name] = type(pipeline)
|
||||
return self
|
||||
|
||||
def route(self, input_data: T) -> Tuple[U, str]:
|
||||
@@ -58,3 +75,16 @@ class Router(BaseModel, Generic[T, U]):
|
||||
return route.pipeline, name
|
||||
|
||||
return self.default, "default"
|
||||
|
||||
def copy(self) -> "Router[T, U]":
|
||||
"""Create a deep copy of the Router."""
|
||||
new_routes = {
|
||||
name: Route(
|
||||
condition=deepcopy(route.condition),
|
||||
pipeline=route.pipeline.copy(), # type: ignore
|
||||
)
|
||||
for name, route in self.routes.items()
|
||||
}
|
||||
new_default = self.default.copy() # type: ignore
|
||||
|
||||
return Router(routes=new_routes, default=new_default)
|
||||
|
||||
@@ -16,7 +16,7 @@ try:
|
||||
except ImportError:
|
||||
agentops = None
|
||||
|
||||
OPENAI_BIGGER_MODELS = ["gpt-4"]
|
||||
OPENAI_BIGGER_MODELS = ["gpt-4o"]
|
||||
|
||||
|
||||
class ToolUsageErrorException(Exception):
|
||||
|
||||
@@ -7,6 +7,9 @@ from .parser import YamlParser
|
||||
from .printer import Printer
|
||||
from .prompts import Prompts
|
||||
from .rpm_controller import RPMController
|
||||
from .exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Converter",
|
||||
@@ -19,4 +22,5 @@ __all__ = [
|
||||
"Prompts",
|
||||
"RPMController",
|
||||
"YamlParser",
|
||||
"LLMContextLengthExceededException",
|
||||
]
|
||||
|
||||
@@ -0,0 +1,26 @@
|
||||
class LLMContextLengthExceededException(Exception):
|
||||
CONTEXT_LIMIT_ERRORS = [
|
||||
"maximum context length",
|
||||
"context length exceeded",
|
||||
"context_length_exceeded",
|
||||
"context window full",
|
||||
"too many tokens",
|
||||
"input is too long",
|
||||
"exceeds token limit",
|
||||
]
|
||||
|
||||
def __init__(self, error_message: str):
|
||||
self.original_error_message = error_message
|
||||
super().__init__(self._get_error_message(error_message))
|
||||
|
||||
def _is_context_limit_error(self, error_message: str) -> bool:
|
||||
return any(
|
||||
phrase.lower() in error_message.lower()
|
||||
for phrase in self.CONTEXT_LIMIT_ERRORS
|
||||
)
|
||||
|
||||
def _get_error_message(self, error_message: str):
|
||||
return (
|
||||
f"LLM context length exceeded. Original error: {error_message}\n"
|
||||
"Consider using a smaller input or implementing a text splitting strategy."
|
||||
)
|
||||
@@ -1,7 +1,5 @@
|
||||
import os
|
||||
import pickle
|
||||
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
@@ -32,14 +30,16 @@ class PickleHandler:
|
||||
Parameters:
|
||||
- file_name (str): The name of the file for saving and loading data.
|
||||
"""
|
||||
if not file_name.endswith(".pkl"):
|
||||
file_name += ".pkl"
|
||||
|
||||
self.file_path = os.path.join(os.getcwd(), file_name)
|
||||
|
||||
def initialize_file(self) -> None:
|
||||
"""
|
||||
Initialize the file with an empty dictionary if it does not exist or is empty.
|
||||
Initialize the file with an empty dictionary and overwrite any existing data.
|
||||
"""
|
||||
if not os.path.exists(self.file_path) or os.path.getsize(self.file_path) == 0:
|
||||
self.save({}) # Save an empty dictionary to initialize the file
|
||||
self.save({})
|
||||
|
||||
def save(self, data) -> None:
|
||||
"""
|
||||
|
||||
@@ -6,15 +6,11 @@ from crewai.utilities.printer import Printer
|
||||
class Logger:
|
||||
_printer = Printer()
|
||||
|
||||
def __init__(self, verbose_level=0):
|
||||
verbose_level = (
|
||||
2 if isinstance(verbose_level, bool) and verbose_level else verbose_level
|
||||
)
|
||||
self.verbose_level = verbose_level
|
||||
def __init__(self, verbose=False):
|
||||
self.verbose = verbose
|
||||
|
||||
def log(self, level, message, color="bold_green"):
|
||||
level_map = {"debug": 1, "info": 2}
|
||||
if self.verbose_level and level_map.get(level, 0) <= self.verbose_level:
|
||||
if self.verbose:
|
||||
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
self._printer.print(
|
||||
f"[{timestamp}][{level.upper()}]: {message}", color=color
|
||||
|
||||
@@ -10,24 +10,24 @@ from crewai.agents.agent_builder.utilities.base_token_process import TokenProces
|
||||
class TokenCalcHandler(BaseCallbackHandler):
|
||||
model_name: str = ""
|
||||
token_cost_process: TokenProcess
|
||||
encoding: tiktoken.Encoding
|
||||
|
||||
def __init__(self, model_name, token_cost_process):
|
||||
self.model_name = model_name
|
||||
self.token_cost_process = token_cost_process
|
||||
try:
|
||||
self.encoding = tiktoken.encoding_for_model(self.model_name)
|
||||
except KeyError:
|
||||
self.encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
def on_llm_start(
|
||||
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
|
||||
) -> None:
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(self.model_name)
|
||||
except KeyError:
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
if self.token_cost_process is None:
|
||||
return
|
||||
|
||||
for prompt in prompts:
|
||||
self.token_cost_process.sum_prompt_tokens(len(encoding.encode(prompt)))
|
||||
self.token_cost_process.sum_prompt_tokens(len(self.encoding.encode(prompt)))
|
||||
|
||||
async def on_llm_new_token(self, token: str, **kwargs) -> None:
|
||||
self.token_cost_process.sum_completion_tokens(1)
|
||||
|
||||
@@ -7,6 +7,7 @@ import pytest
|
||||
from langchain.tools import tool
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain.schema import AgentAction
|
||||
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.agents.cache import CacheHandler
|
||||
@@ -469,7 +470,7 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], max_rpm=1, verbose=2)
|
||||
crew = Crew(agents=[agent], tasks=[task], max_rpm=1, verbose=True)
|
||||
|
||||
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
|
||||
moveon.return_value = True
|
||||
@@ -521,7 +522,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
|
||||
),
|
||||
]
|
||||
|
||||
crew = Crew(agents=[agent1, agent2], tasks=tasks, max_rpm=1, verbose=2)
|
||||
crew = Crew(agents=[agent1, agent2], tasks=tasks, max_rpm=1, verbose=True)
|
||||
|
||||
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
|
||||
moveon.return_value = True
|
||||
@@ -562,7 +563,7 @@ def test_agent_error_on_parsing_tool(capsys):
|
||||
crew = Crew(
|
||||
agents=[agent1],
|
||||
tasks=tasks,
|
||||
verbose=2,
|
||||
verbose=True,
|
||||
function_calling_llm=ChatOpenAI(model="gpt-4-0125-preview"),
|
||||
)
|
||||
|
||||
@@ -601,7 +602,7 @@ def test_agent_remembers_output_format_after_using_tools_too_many_times():
|
||||
)
|
||||
]
|
||||
|
||||
crew = Crew(agents=[agent1], tasks=tasks, verbose=2)
|
||||
crew = Crew(agents=[agent1], tasks=tasks, verbose=True)
|
||||
|
||||
with patch.object(ToolUsage, "_remember_format") as remember_format:
|
||||
crew.kickoff()
|
||||
@@ -1014,3 +1015,75 @@ def test_agent_max_retry_limit():
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_handle_context_length_exceeds_limit():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
)
|
||||
original_action = AgentAction(
|
||||
tool="test_tool", tool_input="test_input", log="test_log"
|
||||
)
|
||||
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_iter_next_step", wraps=agent.agent_executor._iter_next_step
|
||||
) as private_mock:
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
expected_output="The final answer",
|
||||
)
|
||||
agent.execute_task(
|
||||
task=task,
|
||||
)
|
||||
private_mock.assert_called_once()
|
||||
with patch("crewai.agents.executor.click") as mock_prompt:
|
||||
mock_prompt.return_value = "y"
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_handle_context_length"
|
||||
) as mock_handle_context:
|
||||
mock_handle_context.side_effect = ValueError(
|
||||
"Context length limit exceeded"
|
||||
)
|
||||
|
||||
long_input = "This is a very long input. " * 10000
|
||||
|
||||
# Attempt to handle context length, expecting the mocked error
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
agent.agent_executor._handle_context_length(
|
||||
[(original_action, long_input)]
|
||||
)
|
||||
|
||||
assert "Context length limit exceeded" in str(excinfo.value)
|
||||
mock_handle_context.assert_called_once()
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_handle_context_length_exceeds_limit_cli_no():
|
||||
agent = Agent(
|
||||
role="test role",
|
||||
goal="test goal",
|
||||
backstory="test backstory",
|
||||
)
|
||||
task = Task(description="test task", agent=agent, expected_output="test output")
|
||||
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_iter_next_step", wraps=agent.agent_executor._iter_next_step
|
||||
) as private_mock:
|
||||
task = Task(
|
||||
description="The final answer is 42. But don't give it yet, instead keep using the `get_final_answer` tool.",
|
||||
expected_output="The final answer",
|
||||
)
|
||||
agent.execute_task(
|
||||
task=task,
|
||||
)
|
||||
private_mock.assert_called_once()
|
||||
with patch("crewai.agents.executor.click") as mock_prompt:
|
||||
mock_prompt.return_value = "n"
|
||||
pytest.raises(SystemExit)
|
||||
with patch.object(
|
||||
CrewAgentExecutor, "_handle_context_length"
|
||||
) as mock_handle_context:
|
||||
mock_handle_context.assert_not_called()
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
181
tests/cassettes/test_handle_context_length_exceeds_limit.yaml
Normal file
181
tests/cassettes/test_handle_context_length_exceeds_limit.yaml
Normal file
@@ -0,0 +1,181 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are test role. test backstory\nYour personal
|
||||
goal is: test goalTo give my best complete final answer to the task use the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
my best complete final answer to the task.\nYour final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!\nCurrent Task: The final answer is 42.
|
||||
But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis
|
||||
is the expect criteria for your final answer: The final answer \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '938'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.35.10
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.35.10
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
The"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
is"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"42"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rDmhDy41qR9B2jdH1zkXoxv4LMn6","object":"chat.completion.chunk","created":1722471399,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8ac1a40879b87d1f-LAX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Thu, 01 Aug 2024 00:16:40 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=MHUl15YVi607cmuLtQ84ESiH30IyJiIW1a40fopQ81w-1722471400-1.0.1.1-OGpq5Ezj6iE0ToM1diQllGb70.J3O_K2De9NbwZPWmW2qN07U20adJ_0yd6PKUNqMdL.xEnLcNAOWVmsfrLUrQ;
|
||||
path=/; expires=Thu, 01-Aug-24 00:46:40 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=G2ZVNvfNfFk4DeKyZ7jMYetG7wOasINAGHstrOnuAY8-1722471400129-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '131'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15552000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999786'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_b68b417b3fe1c67244279551e411b37a
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -0,0 +1,162 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"content": "You are test role. test backstory\nYour personal
|
||||
goal is: test goalTo give my best complete final answer to the task use the
|
||||
exact following format:\n\nThought: I now can give a great answer\nFinal Answer:
|
||||
my best complete final answer to the task.\nYour final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!\nCurrent Task: The final answer is 42.
|
||||
But don''t give it yet, instead keep using the `get_final_answer` tool.\n\nThis
|
||||
is the expect criteria for your final answer: The final answer \n you MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:\n", "role": "user"}], "model": "gpt-4o",
|
||||
"n": 1, "stop": ["\nObservation"], "stream": true, "temperature": 0.7}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, br
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '938'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.35.10
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.35.10
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.5
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"\n"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"
|
||||
"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{"content":"42"},"logprobs":null,"finish_reason":null}]}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-9rI1RAFocnugKoDvAxLndHW5uBeU9","object":"chat.completion.chunk","created":1722487689,"model":"gpt-4o-2024-05-13","system_fingerprint":"fp_4e2b2da518","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-Cache-Status:
|
||||
- DYNAMIC
|
||||
CF-RAY:
|
||||
- 8ac331b9eaee2b7f-LAX
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Thu, 01 Aug 2024 04:48:09 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=OXht5zC71vWYFW_z_933m3sZfFS2xBez0DHv93FvT5s-1722487689-1.0.1.1-wE8JTR7MnwUgiiTDppYg8A7zLEiidth.MB0zrwONeAtNWRjKC1tuGf8LZYDlYIHUhqG73syYExpZ.5pZhzJkcg;
|
||||
path=/; expires=Thu, 01-Aug-24 05:18:09 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=PAR7y4xRe4VzRT.7GK34Tq5r8vevY6xq0E.i.R40xnU-1722487689562-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '84'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=15552000; includeSubDomains; preload
|
||||
x-ratelimit-limit-requests:
|
||||
- '10000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '30000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '9999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '29999786'
|
||||
x-ratelimit-reset-requests:
|
||||
- 6ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_105dcfc53c9672dea0437249c12c3319
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
5474
tests/cassettes/test_hierarchical_verbose_false_manager_agent.yaml
Normal file
5474
tests/cassettes/test_hierarchical_verbose_false_manager_agent.yaml
Normal file
File diff suppressed because it is too large
Load Diff
10311
tests/cassettes/test_hierarchical_verbose_manager_agent.yaml
Normal file
10311
tests/cassettes/test_hierarchical_verbose_manager_agent.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@@ -15,18 +15,18 @@ def runner():
|
||||
def test_train_default_iterations(train_crew, runner):
|
||||
result = runner.invoke(train)
|
||||
|
||||
train_crew.assert_called_once_with(5)
|
||||
train_crew.assert_called_once_with(5, "trained_agents_data.pkl")
|
||||
assert result.exit_code == 0
|
||||
assert "Training the crew for 5 iterations" in result.output
|
||||
assert "Training the Crew for 5 iterations" in result.output
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.cli.train_crew")
|
||||
def test_train_custom_iterations(train_crew, runner):
|
||||
result = runner.invoke(train, ["--n_iterations", "10"])
|
||||
|
||||
train_crew.assert_called_once_with(10)
|
||||
train_crew.assert_called_once_with(10, "trained_agents_data.pkl")
|
||||
assert result.exit_code == 0
|
||||
assert "Training the crew for 10 iterations" in result.output
|
||||
assert "Training the Crew for 10 iterations" in result.output
|
||||
|
||||
|
||||
@mock.patch("crewai.cli.cli.train_crew")
|
||||
|
||||
@@ -6,7 +6,6 @@ from crewai.cli.train_crew import train_crew
|
||||
|
||||
@mock.patch("crewai.cli.train_crew.subprocess.run")
|
||||
def test_train_crew_positive_iterations(mock_subprocess_run):
|
||||
# Arrange
|
||||
n_iterations = 5
|
||||
mock_subprocess_run.return_value = subprocess.CompletedProcess(
|
||||
args=["poetry", "run", "train", str(n_iterations)],
|
||||
@@ -15,12 +14,10 @@ def test_train_crew_positive_iterations(mock_subprocess_run):
|
||||
stderr="",
|
||||
)
|
||||
|
||||
# Act
|
||||
train_crew(n_iterations)
|
||||
train_crew(n_iterations, "trained_agents_data.pkl")
|
||||
|
||||
# Assert
|
||||
mock_subprocess_run.assert_called_once_with(
|
||||
["poetry", "run", "train", str(n_iterations)],
|
||||
["poetry", "run", "train", str(n_iterations), "trained_agents_data.pkl"],
|
||||
capture_output=False,
|
||||
text=True,
|
||||
check=True,
|
||||
@@ -29,7 +26,7 @@ def test_train_crew_positive_iterations(mock_subprocess_run):
|
||||
|
||||
@mock.patch("crewai.cli.train_crew.click")
|
||||
def test_train_crew_zero_iterations(click):
|
||||
train_crew(0)
|
||||
train_crew(0, "trained_agents_data.pkl")
|
||||
click.echo.assert_called_once_with(
|
||||
"An unexpected error occurred: The number of iterations must be a positive integer.",
|
||||
err=True,
|
||||
@@ -38,7 +35,7 @@ def test_train_crew_zero_iterations(click):
|
||||
|
||||
@mock.patch("crewai.cli.train_crew.click")
|
||||
def test_train_crew_negative_iterations(click):
|
||||
train_crew(-2)
|
||||
train_crew(-2, "trained_agents_data.pkl")
|
||||
click.echo.assert_called_once_with(
|
||||
"An unexpected error occurred: The number of iterations must be a positive integer.",
|
||||
err=True,
|
||||
@@ -55,10 +52,13 @@ def test_train_crew_called_process_error(mock_subprocess_run, click):
|
||||
output="Error",
|
||||
stderr="Some error occurred",
|
||||
)
|
||||
train_crew(n_iterations)
|
||||
train_crew(n_iterations, "trained_agents_data.pkl")
|
||||
|
||||
mock_subprocess_run.assert_called_once_with(
|
||||
["poetry", "run", "train", "5"], capture_output=False, text=True, check=True
|
||||
["poetry", "run", "train", str(n_iterations), "trained_agents_data.pkl"],
|
||||
capture_output=False,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
click.echo.assert_has_calls(
|
||||
[
|
||||
@@ -74,13 +74,15 @@ def test_train_crew_called_process_error(mock_subprocess_run, click):
|
||||
@mock.patch("crewai.cli.train_crew.click")
|
||||
@mock.patch("crewai.cli.train_crew.subprocess.run")
|
||||
def test_train_crew_unexpected_exception(mock_subprocess_run, click):
|
||||
# Arrange
|
||||
n_iterations = 5
|
||||
mock_subprocess_run.side_effect = Exception("Unexpected error")
|
||||
train_crew(n_iterations)
|
||||
train_crew(n_iterations, "trained_agents_data.pkl")
|
||||
|
||||
mock_subprocess_run.assert_called_once_with(
|
||||
["poetry", "run", "train", "5"], capture_output=False, text=True, check=True
|
||||
["poetry", "run", "train", str(n_iterations), "trained_agents_data.pkl"],
|
||||
capture_output=False,
|
||||
text=True,
|
||||
check=True,
|
||||
)
|
||||
click.echo.assert_called_once_with(
|
||||
"An unexpected error occurred: Unexpected error", err=True
|
||||
|
||||
@@ -8,6 +8,7 @@ from unittest.mock import MagicMock, patch
|
||||
|
||||
import pydantic_core
|
||||
import pytest
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.crew import Crew
|
||||
@@ -450,45 +451,13 @@ def test_crew_verbose_output(capsys):
|
||||
assert expected_string in captured.out
|
||||
|
||||
# Now test with verbose set to False
|
||||
crew._logger = Logger(verbose_level=False)
|
||||
crew.verbose = False
|
||||
crew._logger = Logger(verbose=False)
|
||||
crew.kickoff()
|
||||
captured = capsys.readouterr()
|
||||
assert captured.out == ""
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_crew_verbose_levels_output(capsys):
|
||||
tasks = [
|
||||
Task(
|
||||
description="Write about AI advancements.",
|
||||
expected_output="A 4 paragraph article about AI.",
|
||||
agent=researcher,
|
||||
)
|
||||
]
|
||||
|
||||
crew = Crew(agents=[researcher], tasks=tasks, process=Process.sequential, verbose=1)
|
||||
|
||||
crew.kickoff()
|
||||
captured = capsys.readouterr()
|
||||
expected_strings = ["Working Agent: Researcher", "[Researcher] Task output:"]
|
||||
|
||||
for expected_string in expected_strings:
|
||||
assert expected_string in captured.out
|
||||
|
||||
# Now test with verbose set to 2
|
||||
crew._logger = Logger(verbose_level=2)
|
||||
crew.kickoff()
|
||||
captured = capsys.readouterr()
|
||||
expected_strings = [
|
||||
"Working Agent: Researcher",
|
||||
"Starting Task: Write about AI advancements.",
|
||||
"[Researcher] Task output:",
|
||||
]
|
||||
|
||||
for expected_string in expected_strings:
|
||||
assert expected_string in captured.out
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_cache_hitting_between_agents():
|
||||
from unittest.mock import call, patch
|
||||
@@ -562,7 +531,7 @@ def test_api_calls_throttling(capsys):
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task], max_rpm=2, verbose=2)
|
||||
crew = Crew(agents=[agent], tasks=[task], max_rpm=2, verbose=True)
|
||||
|
||||
with patch.object(RPMController, "_wait_for_next_minute") as moveon:
|
||||
moveon.return_value = True
|
||||
@@ -619,7 +588,7 @@ def test_agents_rpm_is_never_set_if_crew_max_RPM_is_not_set():
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
Crew(agents=[agent], tasks=[task], verbose=2)
|
||||
Crew(agents=[agent], tasks=[task], verbose=True)
|
||||
|
||||
assert agent._rpm_controller is None
|
||||
|
||||
@@ -740,7 +709,7 @@ async def test_crew_async_kickoff():
|
||||
]
|
||||
|
||||
agent = Agent(
|
||||
role="{topic} Researcher",
|
||||
role="mock agent",
|
||||
goal="Express hot takes on {topic}.",
|
||||
backstory="You have a lot of experience with {topic}.",
|
||||
)
|
||||
@@ -752,19 +721,30 @@ async def test_crew_async_kickoff():
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
results = await crew.kickoff_for_each_async(inputs=inputs)
|
||||
mock_task_output = (
|
||||
CrewOutput(
|
||||
raw="Test output from Crew 1",
|
||||
tasks_output=[],
|
||||
token_usage=UsageMetrics(
|
||||
total_tokens=100,
|
||||
prompt_tokens=10,
|
||||
completion_tokens=90,
|
||||
successful_requests=1,
|
||||
),
|
||||
json_dict={"output": "crew1"},
|
||||
pydantic=None,
|
||||
),
|
||||
)
|
||||
with patch.object(Crew, "kickoff_async", return_value=mock_task_output):
|
||||
results = await crew.kickoff_for_each_async(inputs=inputs)
|
||||
|
||||
assert len(results) == len(inputs)
|
||||
for result in results:
|
||||
# Assert that all required keys are in usage_metrics and their values are not None
|
||||
for key in [
|
||||
"total_tokens",
|
||||
"prompt_tokens",
|
||||
"completion_tokens",
|
||||
"successful_requests",
|
||||
]:
|
||||
assert key in result.token_usage
|
||||
assert result.token_usage[key] > 0
|
||||
assert len(results) == len(inputs)
|
||||
for result in results:
|
||||
# Assert that all required keys are in usage_metrics and their values are not None
|
||||
assert result[0].token_usage.total_tokens > 0 # type: ignore
|
||||
assert result[0].token_usage.prompt_tokens > 0 # type: ignore
|
||||
assert result[0].token_usage.completion_tokens > 0 # type: ignore
|
||||
assert result[0].token_usage.successful_requests > 0 # type: ignore
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
@@ -1827,7 +1807,9 @@ def test_crew_train_success(task_evaluator, crew_training_handler, kickoff):
|
||||
agents=[researcher, writer],
|
||||
tasks=[task],
|
||||
)
|
||||
crew.train(n_iterations=2, inputs={"topic": "AI"})
|
||||
crew.train(
|
||||
n_iterations=2, inputs={"topic": "AI"}, filename="trained_agents_data.pkl"
|
||||
)
|
||||
task_evaluator.assert_has_calls(
|
||||
[
|
||||
mock.call(researcher),
|
||||
@@ -1911,7 +1893,7 @@ def test__setup_for_training():
|
||||
for agent in agents:
|
||||
assert agent.allow_delegation is True
|
||||
|
||||
crew._setup_for_training()
|
||||
crew._setup_for_training("trained_agents_data.pkl")
|
||||
|
||||
assert crew._train is True
|
||||
assert task.human_input is True
|
||||
@@ -2565,3 +2547,49 @@ def test_crew_testing_function(mock_kickoff, crew_evaluator):
|
||||
mock.call().print_crew_evaluation_result(),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_verbose_manager_agent():
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task],
|
||||
process=Process.hierarchical,
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4o"),
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
assert crew.manager_agent is not None
|
||||
assert crew.manager_agent.verbose
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_hierarchical_verbose_false_manager_agent():
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
task = Task(
|
||||
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
|
||||
expected_output="5 bullet points with a paragraph for each idea.",
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[task],
|
||||
process=Process.hierarchical,
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4o"),
|
||||
verbose=False,
|
||||
)
|
||||
|
||||
crew.kickoff()
|
||||
|
||||
assert crew.manager_agent is not None
|
||||
assert not crew.manager_agent.verbose
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user