remove all references to pipeline and pipeline router (#1661)

* remove all references to pipeline and router

* fix linting

* drop poetry.lock
This commit is contained in:
Brandon Hancock (bhancock_ai)
2024-12-04 12:39:34 -05:00
committed by GitHub
parent 066ad73423
commit bbea797b0c
57 changed files with 10 additions and 10079 deletions

View File

@@ -6,7 +6,6 @@ import pkg_resources
from crewai.cli.add_crew_to_flow import add_crew_to_flow
from crewai.cli.create_crew import create_crew
from crewai.cli.create_flow import create_flow
from crewai.cli.create_pipeline import create_pipeline
from crewai.memory.storage.kickoff_task_outputs_storage import (
KickoffTaskOutputsSQLiteStorage,
)
@@ -31,22 +30,18 @@ def crewai():
@crewai.command()
@click.argument("type", type=click.Choice(["crew", "pipeline", "flow"]))
@click.argument("type", type=click.Choice(["crew", "flow"]))
@click.argument("name")
@click.option("--provider", type=str, help="The provider to use for the crew")
@click.option("--skip_provider", is_flag=True, help="Skip provider validation")
def create(type, name, provider, skip_provider=False):
"""Create a new crew, pipeline, or flow."""
"""Create a new crew, or flow."""
if type == "crew":
create_crew(name, provider, skip_provider)
elif type == "pipeline":
create_pipeline(name)
elif type == "flow":
create_flow(name)
else:
click.secho(
"Error: Invalid type. Must be 'crew', 'pipeline', or 'flow'.", fg="red"
)
click.secho("Error: Invalid type. Must be 'crew' or 'flow'.", fg="red")
@crewai.command()

View File

@@ -1,107 +0,0 @@
import shutil
from pathlib import Path
import click
def create_pipeline(name, router=False):
"""Create a new pipeline project."""
folder_name = name.replace(" ", "_").replace("-", "_").lower()
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
click.secho(f"Creating pipeline {folder_name}...", fg="green", bold=True)
project_root = Path(folder_name)
if project_root.exists():
click.secho(f"Error: Folder {folder_name} already exists.", fg="red")
return
# Create directory structure
(project_root / "src" / folder_name).mkdir(parents=True)
(project_root / "src" / folder_name / "pipelines").mkdir(parents=True)
(project_root / "src" / folder_name / "crews").mkdir(parents=True)
(project_root / "src" / folder_name / "tools").mkdir(parents=True)
(project_root / "tests").mkdir(exist_ok=True)
# Create .env file
with open(project_root / ".env", "w") as file:
file.write("OPENAI_API_KEY=YOUR_API_KEY")
package_dir = Path(__file__).parent
template_folder = "pipeline_router" if router else "pipeline"
templates_dir = package_dir / "templates" / template_folder
# List of template files to copy
root_template_files = [".gitignore", "pyproject.toml", "README.md"]
src_template_files = ["__init__.py", "main.py"]
tools_template_files = ["tools/__init__.py", "tools/custom_tool.py"]
if router:
crew_folders = [
"classifier_crew",
"normal_crew",
"urgent_crew",
]
pipelines_folders = [
"pipelines/__init__.py",
"pipelines/pipeline_classifier.py",
"pipelines/pipeline_normal.py",
"pipelines/pipeline_urgent.py",
]
else:
crew_folders = [
"research_crew",
"write_linkedin_crew",
"write_x_crew",
]
pipelines_folders = ["pipelines/__init__.py", "pipelines/pipeline.py"]
def process_file(src_file, dst_file):
with open(src_file, "r") as file:
content = file.read()
content = content.replace("{{name}}", name)
content = content.replace("{{crew_name}}", class_name)
content = content.replace("{{folder_name}}", folder_name)
content = content.replace("{{pipeline_name}}", class_name)
with open(dst_file, "w") as file:
file.write(content)
# Copy and process root template files
for file_name in root_template_files:
src_file = templates_dir / file_name
dst_file = project_root / file_name
process_file(src_file, dst_file)
# Copy and process src template files
for file_name in src_template_files:
src_file = templates_dir / file_name
dst_file = project_root / "src" / folder_name / file_name
process_file(src_file, dst_file)
# Copy tools files
for file_name in tools_template_files:
src_file = templates_dir / file_name
dst_file = project_root / "src" / folder_name / file_name
shutil.copy(src_file, dst_file)
# Copy pipelines folders
for file_name in pipelines_folders:
src_file = templates_dir / file_name
dst_file = project_root / "src" / folder_name / file_name
process_file(src_file, dst_file)
# Copy crew folders
for crew_folder in crew_folders:
src_crew_folder = templates_dir / "crews" / crew_folder
dst_crew_folder = project_root / "src" / folder_name / "crews" / crew_folder
if src_crew_folder.exists():
shutil.copytree(src_crew_folder, dst_crew_folder)
else:
click.secho(
f"Warning: Crew folder {crew_folder} not found in template.",
fg="yellow",
)
click.secho(f"Pipeline {name} created successfully!", fg="green", bold=True)

View File

@@ -1,2 +0,0 @@
.env
__pycache__/

View File

@@ -1,57 +0,0 @@
# {{crew_name}} Crew
Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
## Installation
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [Poetry](https://python-poetry.org/) for dependency management and package handling, offering a seamless setup and execution experience.
First, if you haven't already, install Poetry:
```bash
pip install poetry
```
Next, navigate to your project directory and install the dependencies:
1. First lock the dependencies and then install them:
```bash
crewai install
```
### Customizing
**Add your `OPENAI_API_KEY` into the `.env` file**
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
- Modify `src/{{folder_name}}/crew.py` to add your own logic, tools and specific args
- Modify `src/{{folder_name}}/main.py` to add custom inputs for your agents and tasks
## Running the Project
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
```bash
crewai run
```
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
## Understanding Your Crew
The {{name}} Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
## Support
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
- Visit our [documentation](https://docs.crewai.com)
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
- [Chat with our docs](https://chatg.pt/DWjSBZn)
Let's create wonders together with the power and simplicity of crewAI.

View File

@@ -1,19 +0,0 @@
researcher:
role: >
{topic} Senior Data Researcher
goal: >
Uncover cutting-edge developments in {topic}
backstory: >
You're a seasoned researcher with a knack for uncovering the latest
developments in {topic}. Known for your ability to find the most relevant
information and present it in a clear and concise manner.
reporting_analyst:
role: >
{topic} Reporting Analyst
goal: >
Create detailed reports based on {topic} data analysis and research findings
backstory: >
You're a meticulous analyst with a keen eye for detail. You're known for
your ability to turn complex data into clear and concise reports, making
it easy for others to understand and act on the information you provide.

View File

@@ -1,16 +0,0 @@
research_task:
description: >
Conduct a thorough research about {topic}
Make sure you find any interesting and relevant information given
the current year is 2024.
expected_output: >
A list with 10 bullet points of the most relevant information about {topic}
agent: researcher
reporting_task:
description: >
Review the context you got and expand each topic into a full section for a report.
Make sure the report is detailed and contains any and all relevant information.
expected_output: >
A fully fledge reports with a title, mains topics, each with a full section of information.
agent: reporting_analyst

View File

@@ -1,58 +0,0 @@
from pydantic import BaseModel
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
# Uncomment the following line to use an example of a custom tool
# from demo_pipeline.tools.custom_tool import MyCustomTool
# Check our tools documentations for more information on how to use them
# from crewai_tools import SerperDevTool
class ResearchReport(BaseModel):
"""Research Report"""
title: str
body: str
@CrewBase
class ResearchCrew():
"""Research Crew"""
agents_config = 'config/agents.yaml'
tasks_config = 'config/tasks.yaml'
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
verbose=True
)
@agent
def reporting_analyst(self) -> Agent:
return Agent(
config=self.agents_config['reporting_analyst'],
verbose=True
)
@task
def research_task(self) -> Task:
return Task(
config=self.tasks_config['research_task'],
)
@task
def reporting_task(self) -> Task:
return Task(
config=self.tasks_config['reporting_task'],
output_pydantic=ResearchReport
)
@crew
def crew(self) -> Crew:
"""Creates the Research Crew"""
return Crew(
agents=self.agents, # Automatically created by the @agent decorator
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)

View File

@@ -1,51 +0,0 @@
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
# Uncomment the following line to use an example of a custom tool
# from {{folder_name}}.tools.custom_tool import MyCustomTool
# Check our tools documentations for more information on how to use them
# from crewai_tools import SerperDevTool
@CrewBase
class WriteLinkedInCrew():
"""Research Crew"""
agents_config = 'config/agents.yaml'
tasks_config = 'config/tasks.yaml'
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config['researcher'],
verbose=True
)
@agent
def reporting_analyst(self) -> Agent:
return Agent(
config=self.agents_config['reporting_analyst'],
verbose=True
)
@task
def research_task(self) -> Task:
return Task(
config=self.tasks_config['research_task'],
)
@task
def reporting_task(self) -> Task:
return Task(
config=self.tasks_config['reporting_task'],
output_file='report.md'
)
@crew
def crew(self) -> Crew:
"""Creates the {{crew_name}} crew"""
return Crew(
agents=self.agents, # Automatically created by the @agent decorator
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)

View File

@@ -1,14 +0,0 @@
x_writer_agent:
role: >
Expert Social Media Content Creator specializing in short form written content
goal: >
Create viral-worthy, engaging short form posts that distill complex {topic} information
into compelling 280-character messages
backstory: >
You're a social media virtuoso with a particular talent for short form content. Your posts
consistently go viral due to your ability to craft hooks that stop users mid-scroll.
You've studied the techniques of social media masters like Justin Welsh, Dickie Bush,
Nicolas Cole, and Shaan Puri, incorporating their best practices into your own unique style.
Your superpower is taking intricate {topic} concepts and transforming them into
bite-sized, shareable content that resonates with a wide audience. You know exactly
how to structure a post for maximum impact and engagement.

View File

@@ -1,22 +0,0 @@
write_x_task:
description: >
Using the research report provided, create an engaging short form post about {topic}.
Your post should have a great hook, summarize key points, and be structured for easy
consumption on a digital platform. The post must be under 280 characters.
Follow these guidelines:
1. Start with an attention-grabbing hook
2. Condense the main insights from the research
3. Use clear, concise language
4. Include a call-to-action or thought-provoking question if space allows
5. Ensure the post flows well and is easy to read quickly
Here is the title of the research report you will be using
Title: {title}
Research:
{body}
expected_output: >
A compelling X post under 280 characters that effectively summarizes the key findings
about {topic}, starts with a strong hook, and is optimized for engagement on the platform.
agent: x_writer_agent

View File

@@ -1,36 +0,0 @@
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
# Uncomment the following line to use an example of a custom tool
# from demo_pipeline.tools.custom_tool import MyCustomTool
# Check our tools documentations for more information on how to use them
# from crewai_tools import SerperDevTool
@CrewBase
class WriteXCrew:
"""Research Crew"""
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
@agent
def x_writer_agent(self) -> Agent:
return Agent(config=self.agents_config["x_writer_agent"], verbose=True)
@task
def write_x_task(self) -> Task:
return Task(
config=self.tasks_config["write_x_task"],
)
@crew
def crew(self) -> Crew:
"""Creates the Write X Crew"""
return Crew(
agents=self.agents, # Automatically created by the @agent decorator
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)

View File

@@ -1,26 +0,0 @@
#!/usr/bin/env python
import asyncio
from {{folder_name}}.pipelines.pipeline import {{pipeline_name}}Pipeline
async def run():
"""
Run the pipeline.
"""
inputs = [
{"topic": "AI wearables"},
]
pipeline = {{pipeline_name}}Pipeline()
results = await pipeline.kickoff(inputs)
# Process and print results
for result in results:
print(f"Raw output: {result.raw}")
if result.json_dict:
print(f"JSON output: {result.json_dict}")
print("\n")
def main():
asyncio.run(run())
if __name__ == "__main__":
main()

View File

@@ -1,87 +0,0 @@
"""
This pipeline file includes two different examples to demonstrate the flexibility of crewAI pipelines.
Example 1: Two-Stage Pipeline
-----------------------------
This pipeline consists of two crews:
1. ResearchCrew: Performs research on a given topic.
2. WriteXCrew: Generates an X (Twitter) post based on the research findings.
Key features:
- The ResearchCrew's final task uses output_json to store all research findings in a JSON object.
- This JSON object is then passed to the WriteXCrew, where tasks can access the research findings.
Example 2: Two-Stage Pipeline with Parallel Execution
-------------------------------------------------------
This pipeline consists of three crews:
1. ResearchCrew: Performs research on a given topic.
2. WriteXCrew and WriteLinkedInCrew: Run in parallel, using the research findings to generate posts for X and LinkedIn, respectively.
Key features:
- Demonstrates the ability to run multiple crews in parallel.
- Shows how to structure a pipeline with both sequential and parallel stages.
Usage:
- To switch between examples, comment/uncomment the respective code blocks below.
- Ensure that you have implemented all necessary crew classes (ResearchCrew, WriteXCrew, WriteLinkedInCrew) before running.
"""
# Common imports for both examples
from crewai import Pipeline
# Uncomment the crews you need for your chosen example
from ..crews.research_crew.research_crew import ResearchCrew
from ..crews.write_x_crew.write_x_crew import WriteXCrew
# from .crews.write_linkedin_crew.write_linkedin_crew import WriteLinkedInCrew # Uncomment for Example 2
# EXAMPLE 1: Two-Stage Pipeline
# -----------------------------
# Uncomment the following code block to use Example 1
class {{pipeline_name}}Pipeline:
def __init__(self):
# Initialize crews
self.research_crew = ResearchCrew().crew()
self.write_x_crew = WriteXCrew().crew()
def create_pipeline(self):
return Pipeline(
stages=[
self.research_crew,
self.write_x_crew
]
)
async def kickoff(self, inputs):
pipeline = self.create_pipeline()
results = await pipeline.kickoff(inputs)
return results
# EXAMPLE 2: Two-Stage Pipeline with Parallel Execution
# -------------------------------------------------------
# Uncomment the following code block to use Example 2
# @PipelineBase
# class {{pipeline_name}}Pipeline:
# def __init__(self):
# # Initialize crews
# self.research_crew = ResearchCrew().crew()
# self.write_x_crew = WriteXCrew().crew()
# self.write_linkedin_crew = WriteLinkedInCrew().crew()
# @pipeline
# def create_pipeline(self):
# return Pipeline(
# stages=[
# self.research_crew,
# [self.write_x_crew, self.write_linkedin_crew] # Parallel execution
# ]
# )
# async def run(self, inputs):
# pipeline = self.create_pipeline()
# results = await pipeline.kickoff(inputs)
# return results

View File

@@ -1,17 +0,0 @@
[tool.poetry]
name = "{{folder_name}}"
version = "0.1.0"
description = "{{name}} using crewAI"
authors = ["Your Name <you@example.com>"]
[tool.poetry.dependencies]
python = ">=3.10,<=3.13"
crewai = { extras = ["tools"], version = ">=0.85.0,<1.0.0" }
asyncio = "*"
[tool.poetry.scripts]
{{folder_name}} = "{{folder_name}}.main:main"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

View File

@@ -1,19 +0,0 @@
from typing import Type
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class MyCustomToolInput(BaseModel):
"""Input schema for MyCustomTool."""
argument: str = Field(..., description="Description of the argument.")
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
args_schema: Type[BaseModel] = MyCustomToolInput
def _run(self, argument: str) -> str:
# Implementation goes here
return "this is an example of a tool output, ignore it and move along."

View File

@@ -1,2 +0,0 @@
.env
__pycache__/

View File

@@ -1,54 +0,0 @@
# {{crew_name}} Crew
Welcome to the {{crew_name}} Crew project, powered by [crewAI](https://crewai.com). This template is designed to help you set up a multi-agent AI system with ease, leveraging the powerful and flexible framework provided by crewAI. Our goal is to enable your agents to collaborate effectively on complex tasks, maximizing their collective intelligence and capabilities.
## Installation
Ensure you have Python >=3.10 <=3.13 installed on your system. This project uses [Poetry](https://python-poetry.org/) for dependency management and package handling, offering a seamless setup and execution experience.
First, if you haven't already, install Poetry:
```bash
pip install poetry
```
Next, navigate to your project directory and install the dependencies:
1. First lock the dependencies and then install them:
```bash
crewai install
```
### Customizing
**Add your `OPENAI_API_KEY` into the `.env` file**
- Modify `src/{{folder_name}}/config/agents.yaml` to define your agents
- Modify `src/{{folder_name}}/config/tasks.yaml` to define your tasks
- Modify `src/{{folder_name}}/crew.py` to add your own logic, tools and specific args
- Modify `src/{{folder_name}}/main.py` to add custom inputs for your agents and tasks
## Running the Project
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
```bash
crewai run
```
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
## Understanding Your Crew
The {{name}} Crew is composed of multiple AI agents, each with unique roles, goals, and tools. These agents collaborate on a series of tasks, defined in `config/tasks.yaml`, leveraging their collective skills to achieve complex objectives. The `config/agents.yaml` file outlines the capabilities and configurations of each agent in your crew.
## Support
For support, questions, or feedback regarding the {{crew_name}} Crew or crewAI.
- Visit our [documentation](https://docs.crewai.com)
- Reach out to us through our [GitHub repository](https://github.com/joaomdmoura/crewai)
- [Join our Discord](https://discord.com/invite/X4JWnZnxPb)
- [Chat with our docs](https://chatg.pt/DWjSBZn)
Let's create wonders together with the power and simplicity of crewAI.

View File

@@ -1,19 +0,0 @@
researcher:
role: >
{topic} Senior Data Researcher
goal: >
Uncover cutting-edge developments in {topic}
backstory: >
You're a seasoned researcher with a knack for uncovering the latest
developments in {topic}. Known for your ability to find the most relevant
information and present it in a clear and concise manner.
reporting_analyst:
role: >
{topic} Reporting Analyst
goal: >
Create detailed reports based on {topic} data analysis and research findings
backstory: >
You're a meticulous analyst with a keen eye for detail. You're known for
your ability to turn complex data into clear and concise reports, making
it easy for others to understand and act on the information you provide.

View File

@@ -1,17 +0,0 @@
research_task:
description: >
Conduct a thorough research about {topic}
Make sure you find any interesting and relevant information given
the current year is 2024.
expected_output: >
A list with 10 bullet points of the most relevant information about {topic}
agent: researcher
reporting_task:
description: >
Review the context you got and expand each topic into a full section for a report.
Make sure the report is detailed and contains any and all relevant information.
expected_output: >
A fully fledge reports with the mains topics, each with a full section of information.
Formatted as markdown without '```'
agent: reporting_analyst

View File

@@ -1,40 +0,0 @@
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
from pydantic import BaseModel
# Uncomment the following line to use an example of a custom tool
# from demo_pipeline.tools.custom_tool import MyCustomTool
# Check our tools documentations for more information on how to use them
# from crewai_tools import SerperDevTool
class UrgencyScore(BaseModel):
urgency_score: int
@CrewBase
class ClassifierCrew:
"""Email Classifier Crew"""
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
@agent
def classifier(self) -> Agent:
return Agent(config=self.agents_config["classifier"], verbose=True)
@task
def urgent_task(self) -> Task:
return Task(
config=self.tasks_config["classify_email"],
output_pydantic=UrgencyScore,
)
@crew
def crew(self) -> Crew:
"""Creates the Email Classifier Crew"""
return Crew(
agents=self.agents, # Automatically created by the @agent decorator
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)

View File

@@ -1,7 +0,0 @@
classifier:
role: >
Email Classifier
goal: >
Classify the email: {email} as urgent or normal from a score of 1 to 10, where 1 is not urgent and 10 is urgent. Return the urgency score only.`
backstory: >
You are a highly efficient and experienced email classifier, trained to quickly assess and classify emails. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing normal situations and maintaining smooth operations.

View File

@@ -1,7 +0,0 @@
classify_email:
description: >
Classify the email: {email}
as urgent or normal.
expected_output: >
Classify the email from a scale of 1 to 10, where 1 is not urgent and 10 is urgent. Return the urgency score only.
agent: classifier

View File

@@ -1,7 +0,0 @@
normal_handler:
role: >
Normal Email Processor
goal: >
Process normal emails and create an email to respond to the sender.
backstory: >
You are a highly efficient and experienced normal email handler, trained to quickly assess and respond to normal communications. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing normal situations and maintaining smooth operations.

View File

@@ -1,6 +0,0 @@
normal_task:
description: >
Process and respond to normal email quickly.
expected_output: >
An email response to the normal email.
agent: normal_handler

View File

@@ -1,36 +0,0 @@
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
# Uncomment the following line to use an example of a custom tool
# from demo_pipeline.tools.custom_tool import MyCustomTool
# Check our tools documentations for more information on how to use them
# from crewai_tools import SerperDevTool
@CrewBase
class NormalCrew:
"""Normal Email Crew"""
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
@agent
def normal_handler(self) -> Agent:
return Agent(config=self.agents_config["normal_handler"], verbose=True)
@task
def urgent_task(self) -> Task:
return Task(
config=self.tasks_config["normal_task"],
)
@crew
def crew(self) -> Crew:
"""Creates the Normal Email Crew"""
return Crew(
agents=self.agents, # Automatically created by the @agent decorator
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)

View File

@@ -1,7 +0,0 @@
urgent_handler:
role: >
Urgent Email Processor
goal: >
Process urgent emails and create an email to respond to the sender.
backstory: >
You are a highly efficient and experienced urgent email handler, trained to quickly assess and respond to time-sensitive communications. Your ability to remain calm under pressure and provide concise, actionable responses has made you an invaluable asset in managing critical situations and maintaining smooth operations.

View File

@@ -1,6 +0,0 @@
urgent_task:
description: >
Process and respond to urgent email quickly.
expected_output: >
An email response to the urgent email.
agent: urgent_handler

View File

@@ -1,36 +0,0 @@
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
# Uncomment the following line to use an example of a custom tool
# from demo_pipeline.tools.custom_tool import MyCustomTool
# Check our tools documentations for more information on how to use them
# from crewai_tools import SerperDevTool
@CrewBase
class UrgentCrew:
"""Urgent Email Crew"""
agents_config = "config/agents.yaml"
tasks_config = "config/tasks.yaml"
@agent
def urgent_handler(self) -> Agent:
return Agent(config=self.agents_config["urgent_handler"], verbose=True)
@task
def urgent_task(self) -> Task:
return Task(
config=self.tasks_config["urgent_task"],
)
@crew
def crew(self) -> Crew:
"""Creates the Urgent Email Crew"""
return Crew(
agents=self.agents, # Automatically created by the @agent decorator
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=True,
)

View File

@@ -1,75 +0,0 @@
#!/usr/bin/env python
import asyncio
from crewai.routers.router import Route
from crewai.routers.router import Router
from {{folder_name}}.pipelines.pipeline_classifier import EmailClassifierPipeline
from {{folder_name}}.pipelines.pipeline_normal import NormalPipeline
from {{folder_name}}.pipelines.pipeline_urgent import UrgentPipeline
async def run():
"""
Run the pipeline.
"""
inputs = [
{
"email": """
Subject: URGENT: Marketing Campaign Launch - Immediate Action Required
Dear Team,
I'm reaching out regarding our upcoming marketing campaign that requires your immediate attention and swift action. We're facing a critical deadline, and our success hinges on our ability to mobilize quickly.
Key points:
Campaign launch: 48 hours from now
Target audience: 250,000 potential customers
Expected ROI: 35% increase in Q3 sales
What we need from you NOW:
Final approval on creative assets (due in 3 hours)
Confirmation of media placements (due by end of day)
Last-minute budget allocation for paid social media push
Our competitors are poised to launch similar campaigns, and we must act fast to maintain our market advantage. Delays could result in significant lost opportunities and potential revenue.
Please prioritize this campaign above all other tasks. I'll be available for the next 24 hours to address any concerns or roadblocks.
Let's make this happen!
[Your Name]
Marketing Director
P.S. I'll be scheduling an emergency team meeting in 1 hour to discuss our action plan. Attendance is mandatory.
"""
}
]
pipeline_classifier = EmailClassifierPipeline().create_pipeline()
pipeline_urgent = UrgentPipeline().create_pipeline()
pipeline_normal = NormalPipeline().create_pipeline()
router = Router(
routes={
"high_urgency": Route(
condition=lambda x: x.get("urgency_score", 0) > 7,
pipeline=pipeline_urgent
),
"low_urgency": Route(
condition=lambda x: x.get("urgency_score", 0) <= 7,
pipeline=pipeline_normal
)
},
default=pipeline_normal
)
pipeline = pipeline_classifier >> router
results = await pipeline.kickoff(inputs)
# Process and print results
for result in results:
print(f"Raw output: {result.raw}")
if result.json_dict:
print(f"JSON output: {result.json_dict}")
print("\n")
def main():
asyncio.run(run())
if __name__ == "__main__":
main()

View File

@@ -1,24 +0,0 @@
from crewai import Pipeline
from crewai.project import PipelineBase
from ..crews.classifier_crew.classifier_crew import ClassifierCrew
@PipelineBase
class EmailClassifierPipeline:
def __init__(self):
# Initialize crews
self.classifier_crew = ClassifierCrew().crew()
def create_pipeline(self):
return Pipeline(
stages=[
self.classifier_crew
]
)
async def kickoff(self, inputs):
pipeline = self.create_pipeline()
results = await pipeline.kickoff(inputs)
return results

View File

@@ -1,24 +0,0 @@
from crewai import Pipeline
from crewai.project import PipelineBase
from ..crews.normal_crew.normal_crew import NormalCrew
@PipelineBase
class NormalPipeline:
def __init__(self):
# Initialize crews
self.normal_crew = NormalCrew().crew()
def create_pipeline(self):
return Pipeline(
stages=[
self.normal_crew
]
)
async def kickoff(self, inputs):
pipeline = self.create_pipeline()
results = await pipeline.kickoff(inputs)
return results

View File

@@ -1,23 +0,0 @@
from crewai import Pipeline
from crewai.project import PipelineBase
from ..crews.urgent_crew.urgent_crew import UrgentCrew
@PipelineBase
class UrgentPipeline:
def __init__(self):
# Initialize crews
self.urgent_crew = UrgentCrew().crew()
def create_pipeline(self):
return Pipeline(
stages=[
self.urgent_crew
]
)
async def kickoff(self, inputs):
pipeline = self.create_pipeline()
results = await pipeline.kickoff(inputs)
return results

View File

@@ -1,21 +0,0 @@
[project]
name = "{{folder_name}}"
version = "0.1.0"
description = "{{name}} using crewAI"
authors = ["Your Name <you@example.com>"]
requires-python = ">=3.10,<=3.13"
dependencies = [
"crewai[tools]>=0.85.0,<1.0.0"
]
[project.scripts]
{{folder_name}} = "{{folder_name}}.main:main"
run_crew = "{{folder_name}}.main:main"
train = "{{folder_name}}.main:train"
replay = "{{folder_name}}.main:replay"
test = "{{folder_name}}.main:test"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"

View File

@@ -1,19 +0,0 @@
from typing import Type
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class MyCustomToolInput(BaseModel):
"""Input schema for MyCustomTool."""
argument: str = Field(..., description="Description of the argument.")
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
args_schema: Type[BaseModel] = MyCustomToolInput
def _run(self, argument: str) -> str:
# Implementation goes here
return "this is an example of a tool output, ignore it and move along."