From 144e6d203f5016f41d5f3f2403311981adb84d90 Mon Sep 17 00:00:00 2001 From: Eduardo Chiarotti Date: Fri, 26 Jul 2024 14:24:29 -0300 Subject: [PATCH] feat: add ability to set LLM for AgentPLanner on Crew (#1001) * feat: add ability to set LLM for AgentPLanner on Crew * feat: fixes issue on instantiating the ChatOpenAI on the crew * docs: add docs for the planning_llm new parameter * docs: change message to ChatOpenAI llm * feat: add tests --- docs/core-concepts/Crews.md | 1 + docs/core-concepts/Planning.md | 19 ++++++++++++ src/crewai/crew.py | 17 ++++++----- src/crewai/utilities/planning_handler.py | 20 ++++++++++--- tests/utilities/test_planning_handler.py | 38 ++++++++++++++++++++++-- 5 files changed, 80 insertions(+), 15 deletions(-) diff --git a/docs/core-concepts/Crews.md b/docs/core-concepts/Crews.md index 1896c6a38..62c3da657 100644 --- a/docs/core-concepts/Crews.md +++ b/docs/core-concepts/Crews.md @@ -33,6 +33,7 @@ A crew in crewAI represents a collaborative group of agents working together to | **Manager Callbacks** _(optional)_ | `manager_callbacks` | `manager_callbacks` takes a list of callback handlers to be executed by the manager agent when a hierarchical process is used. | | **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. | | **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. +| **Planning LLM** *(optional)* | `planning_llm` | The language model used by the AgentPlanner in a planning process. | !!! note "Crew Max RPM" The `max_rpm` attribute sets the maximum number of requests per minute the crew can perform to avoid rate limits and will override individual agents' `max_rpm` settings if you set it. diff --git a/docs/core-concepts/Planning.md b/docs/core-concepts/Planning.md index 810309703..36ae34437 100644 --- a/docs/core-concepts/Planning.md +++ b/docs/core-concepts/Planning.md @@ -23,6 +23,25 @@ my_crew = Crew( From this point on, your crew will have planning enabled, and the tasks will be planned before each iteration. +#### Planning LLM + +Now you can define the LLM that will be used to plan the tasks. You can use any ChatOpenAI LLM model available. + +```python +from crewai import Crew, Agent, Task, Process +from langchain_openai import ChatOpenAI + +# Assemble your crew with planning capabilities and custom LLM +my_crew = Crew( + agents=self.agents, + tasks=self.tasks, + process=Process.sequential, + planning=True, + planning_llm=ChatOpenAI(model="gpt-4o") +) +``` + + ### Example When running the base case example, you will see something like the following output, which represents the output of the AgentPlanner responsible for creating the step-by-step logic to add to the Agents tasks. diff --git a/src/crewai/crew.py b/src/crewai/crew.py index f1f064a3a..d133c2f47 100644 --- a/src/crewai/crew.py +++ b/src/crewai/crew.py @@ -155,6 +155,10 @@ class Crew(BaseModel): default=False, description="Plan the crew execution and add the plan to the crew.", ) + planning_llm: Optional[Any] = Field( + default=None, + description="Language model that will run the AgentPlanner if planning is True.", + ) task_execution_output_json_files: Optional[List[str]] = Field( default=None, description="List of file paths for task execution JSON files.", @@ -560,15 +564,12 @@ class Crew(BaseModel): def _handle_crew_planning(self): """Handles the Crew planning.""" self._logger.log("info", "Planning the crew execution") - result = CrewPlanner(self.tasks)._handle_crew_planning() + result = CrewPlanner( + tasks=self.tasks, planning_agent_llm=self.planning_llm + )._handle_crew_planning() - if result is not None and hasattr(result, "list_of_plans_per_task"): - for task, step_plan in zip(self.tasks, result.list_of_plans_per_task): - task.description += step_plan - else: - self._logger.log( - "info", "Something went wrong with the planning process of the Crew" - ) + for task, step_plan in zip(self.tasks, result.list_of_plans_per_task): + task.description += step_plan def _store_execution_log( self, diff --git a/src/crewai/utilities/planning_handler.py b/src/crewai/utilities/planning_handler.py index cba1727b9..29b89667e 100644 --- a/src/crewai/utilities/planning_handler.py +++ b/src/crewai/utilities/planning_handler.py @@ -1,5 +1,6 @@ -from typing import List, Optional +from typing import Any, List, Optional +from langchain_openai import ChatOpenAI from pydantic import BaseModel from crewai.agent import Agent @@ -11,17 +12,27 @@ class PlannerTaskPydanticOutput(BaseModel): class CrewPlanner: - def __init__(self, tasks: List[Task]): + def __init__(self, tasks: List[Task], planning_agent_llm: Optional[Any] = None): self.tasks = tasks - def _handle_crew_planning(self) -> Optional[BaseModel]: + if planning_agent_llm is None: + self.planning_agent_llm = ChatOpenAI(model="gpt-4o-mini") + else: + self.planning_agent_llm = planning_agent_llm + + def _handle_crew_planning(self) -> PlannerTaskPydanticOutput: """Handles the Crew planning by creating detailed step-by-step plans for each task.""" planning_agent = self._create_planning_agent() tasks_summary = self._create_tasks_summary() planner_task = self._create_planner_task(planning_agent, tasks_summary) - return planner_task.execute_sync().pydantic + result = planner_task.execute_sync() + + if isinstance(result.pydantic, PlannerTaskPydanticOutput): + return result.pydantic + + raise ValueError("Failed to get the Planning output") def _create_planning_agent(self) -> Agent: """Creates the planning agent for the crew planning.""" @@ -32,6 +43,7 @@ class CrewPlanner: "available to each agent so that they can perform the tasks in an exemplary manner" ), backstory="Planner agent for crew planning", + llm=self.planning_agent_llm, ) def _create_planner_task(self, planning_agent: Agent, tasks_summary: str) -> Task: diff --git a/tests/utilities/test_planning_handler.py b/tests/utilities/test_planning_handler.py index 75bc3e033..502398fab 100644 --- a/tests/utilities/test_planning_handler.py +++ b/tests/utilities/test_planning_handler.py @@ -1,10 +1,11 @@ from unittest.mock import patch -from crewai.tasks.task_output import TaskOutput import pytest +from langchain_openai import ChatOpenAI from crewai.agent import Agent from crewai.task import Task +from crewai.tasks.task_output import TaskOutput from crewai.utilities.planning_handler import CrewPlanner, PlannerTaskPydanticOutput @@ -28,7 +29,19 @@ class TestCrewPlanner: agent=Agent(role="Agent 3", goal="Goal 3", backstory="Backstory 3"), ), ] - return CrewPlanner(tasks) + return CrewPlanner(tasks, None) + + @pytest.fixture + def crew_planner_different_llm(self): + tasks = [ + Task( + description="Task 1", + expected_output="Output 1", + agent=Agent(role="Agent 1", goal="Goal 1", backstory="Backstory 1"), + ) + ] + planning_agent_llm = ChatOpenAI(model="gpt-3.5-turbo") + return CrewPlanner(tasks, planning_agent_llm) def test_handle_crew_planning(self, crew_planner): with patch.object(Task, "execute_sync") as execute: @@ -40,7 +53,7 @@ class TestCrewPlanner: ), ) result = crew_planner._handle_crew_planning() - + assert crew_planner.planning_agent_llm.model_name == "gpt-4o-mini" assert isinstance(result, PlannerTaskPydanticOutput) assert len(result.list_of_plans_per_task) == len(crew_planner.tasks) execute.assert_called_once() @@ -72,3 +85,22 @@ class TestCrewPlanner: assert isinstance(tasks_summary, str) assert tasks_summary.startswith("\n Task Number 1 - Task 1") assert tasks_summary.endswith('"agent_tools": []\n ') + + def test_handle_crew_planning_different_llm(self, crew_planner_different_llm): + with patch.object(Task, "execute_sync") as execute: + execute.return_value = TaskOutput( + description="Description", + agent="agent", + pydantic=PlannerTaskPydanticOutput(list_of_plans_per_task=["Plan 1"]), + ) + result = crew_planner_different_llm._handle_crew_planning() + + assert ( + crew_planner_different_llm.planning_agent_llm.model_name + == "gpt-3.5-turbo" + ) + assert isinstance(result, PlannerTaskPydanticOutput) + assert len(result.list_of_plans_per_task) == len( + crew_planner_different_llm.tasks + ) + execute.assert_called_once()