diff --git a/docs/how-to/Conditional-Tasks.md b/docs/how-to/Conditional-Tasks.md new file mode 100644 index 000000000..45b6b480e --- /dev/null +++ b/docs/how-to/Conditional-Tasks.md @@ -0,0 +1,88 @@ +--- +title: Conditional Tasks +description: Learn how to use conditional tasks in a crewAI kickoff +--- + +## Introduction +Conditional Tasks in crewAI allow for dynamic workflow adaptation based on the outcomes of previous tasks. This powerful feature enables crews to make decisions and execute tasks selectively, enhancing the flexibility and efficiency of your AI-driven processes. + + +```python +from typing import List + +from pydantic import BaseModel +from crewai import Agent, Crew +from crewai.tasks.conditional_task import ConditionalTask +from crewai.tasks.task_output import TaskOutput +from crewai.task import Task +from crewai_tools import SerperDevTool + + +# Define a condition function for the conditional task - if false task will be skipped, true, then execute task +def is_data_fetched(output: TaskOutput) -> bool: + if len(output.pydantic.events) >= 10: # this will skip this task + return False + return True + + +# Define the agents +data_fetcher_agent = Agent( + role="Data Fetcher", + goal="Fetch data online using Serper tool", + backstory="Backstory 1", + verbose=True, + tools=[SerperDevTool()], +) + +data_processor_agent = Agent( + role="Data Processor", + goal="Process fetched data", + backstory="Backstory 2", + verbose=True, +) + +summary_generator_agent = Agent( + role="Summary Generator", + goal="Generate summary from fetched data", + backstory="Backstory 3", + verbose=True, +) + + +class EventOutput(BaseModel): + events: List[str] + + +task1 = Task( + name="Data Fetching Task", + description="Fetch data about events in San Francisco using Serper tool", + expected_output="List of 10 things to do in SF this week", + agent=data_fetcher_agent, + output_pydantic=EventOutput, +) + +conditional_task = ConditionalTask( + name="Data Processing Task", + description="Process data if data fetching is successful", + expected_output="List of 11 Things to do in SF this week ", + condition=is_data_fetched, + agent=data_processor_agent, +) + +task3 = Task( + name="Summary Generation Task", + description="Generate summary of events in San Francisco from fetched data", + expected_output="summary_generated", + agent=summary_generator_agent, +) + +# Create a crew with the tasks +crew = Crew( + agents=[data_fetcher_agent, data_processor_agent, summary_generator_agent], + tasks=[task1, conditional_task, task3], + verbose=2, +) + +result = crew.kickoff() +print("results", result) +``` \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index 061ee9027..b21c20681 100644 --- a/docs/index.md +++ b/docs/index.md @@ -118,6 +118,11 @@ Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By Replay from a Task +
  • + + Conditional Tasks + +
  • Agent Monitoring with AgentOps diff --git a/mkdocs.yml b/mkdocs.yml index 622340b39..6e8cae1c8 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -146,6 +146,7 @@ nav: - Kickoff a Crew Asynchronously: 'how-to/Kickoff-async.md' - Kickoff a Crew for a List: 'how-to/Kickoff-for-each.md' - Replay from a specific task from a kickoff: 'how-to/Replay-tasks-from-latest-Crew-Kickoff.md' + - Conditional Tasks: 'how-to/Conditional-Tasks.md' - Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md' - Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md' - Tools Docs: diff --git a/src/crewai/crew.py b/src/crewai/crew.py index 5edc25c0b..f025145fc 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -660,7 +660,7 @@ class Crew(BaseModel): previous_output ): self._logger.log( - "info", + "debug", f"Skipping conditional task: {task.description}", color="yellow", ) @@ -670,6 +670,7 @@ class Crew(BaseModel): agent=task.agent.role if task.agent else "", output_format=OutputFormat.RAW, ) + if not was_replayed: self._store_execution_log( task, diff --git a/src/crewai/tasks/conditional_task.py b/src/crewai/tasks/conditional_task.py index e11e1097d..97b75c1f7 100644 --- a/src/crewai/tasks/conditional_task.py +++ b/src/crewai/tasks/conditional_task.py @@ -18,11 +18,10 @@ class ConditionalTask(Task): def __init__( self, - *args, condition: Callable[[Any], bool], **kwargs, ): - super().__init__(*args, **kwargs) + super().__init__(**kwargs) self.condition = condition def should_execute(self, context: TaskOutput) -> bool: diff --git a/tests/task_test.py b/tests/task_test.py index 9e98ecbad..7bf74866f 100644 --- a/tests/task_test.py +++ b/tests/task_test.py @@ -6,6 +6,7 @@ from unittest.mock import MagicMock, patch import pytest from crewai import Agent, Crew, Process, Task +from crewai.tasks.conditional_task import ConditionalTask from crewai.tasks.task_output import TaskOutput from crewai.utilities.converter import Converter from pydantic import BaseModel @@ -695,6 +696,19 @@ def test_task_definition_based_on_dict(): assert task.agent is None +def test_conditional_task_definition_based_on_dict(): + config = { + "description": "Give me an integer score between 1-5 for the following title: 'The impact of AI in the future of work', check examples to based your evaluation.", + "expected_output": "The score of the title.", + } + + task = ConditionalTask(config=config, condition=lambda x: True) + + assert task.description == config["description"] + assert task.expected_output == config["expected_output"] + assert task.agent is None + + def test_interpolate_inputs(): task = Task( description="Give me a list of 5 interesting ideas about {topic} to explore for an article, what makes them unique and interesting.",