Add RPM control to both agents and crews (#133)

* moving file into utilities
* creating Logger and RPMController
* Adding support for RPM to agents and crew
This commit is contained in:
João Moura
2024-01-14 00:22:11 -03:00
committed by GitHub
parent b0c5e24507
commit e27dd53c78
16 changed files with 2343 additions and 85 deletions

View File

@@ -24,8 +24,7 @@ from crewai.agents import (
CrewAgentOutputParser,
ToolsHandler,
)
from crewai.i18n import I18N
from crewai.prompts import Prompts
from crewai.utilities import I18N, Logger, Prompts, RPMController
class Agent(BaseModel):
@@ -42,11 +41,14 @@ class Agent(BaseModel):
llm: The language model that will run the agent.
max_iter: Maximum number of iterations for an agent to execute a task.
memory: Whether the agent should have memory or not.
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
verbose: Whether the agent execution should be in verbose mode.
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
"""
__hash__ = object.__hash__
_logger: Logger = PrivateAttr()
_rpm_controller: RPMController = PrivateAttr(default=None)
_request_within_rpm_limit: Any = PrivateAttr(default=None)
model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -58,6 +60,10 @@ class Agent(BaseModel):
role: str = Field(description="Role of the agent")
goal: str = Field(description="Objective of the agent")
backstory: str = Field(description="Backstory of the agent")
max_rpm: Optional[int] = Field(
default=None,
description="Maximum number of requests per minute for the agent execution to be respected.",
)
memory: bool = Field(
default=True, description="Whether the agent should have memory or not"
)
@@ -101,6 +107,15 @@ class Agent(BaseModel):
"may_not_set_field", "This field is not to be set by the user.", {}
)
@model_validator(mode="after")
def set_private_attrs(self):
self._logger = Logger(self.verbose)
if self.max_rpm and not self._rpm_controller:
self._rpm_controller = RPMController(
max_rpm=self.max_rpm, logger=self._logger
)
return self
@model_validator(mode="after")
def check_agent_executor(self) -> "Agent":
if not self.agent_executor:
@@ -128,7 +143,7 @@ class Agent(BaseModel):
tools = tools or self.tools
self.agent_executor.tools = tools
return self.agent_executor.invoke(
result = self.agent_executor.invoke(
{
"input": task,
"tool_names": self.__tools_names(tools),
@@ -137,14 +152,20 @@ class Agent(BaseModel):
RunnableConfig(callbacks=[self.tools_handler]),
)["output"]
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()
return result
def set_cache_handler(self, cache_handler) -> None:
self.cache_handler = cache_handler
self.tools_handler = ToolsHandler(cache=self.cache_handler)
self.__create_agent_executor()
def set_request_within_rpm_limit(self, ensure_function) -> None:
self._request_within_rpm_limit = ensure_function
self.__create_agent_executor()
def set_rpm_controller(self, rpm_controller) -> None:
if not self._rpm_controller:
self._rpm_controller = rpm_controller
self.__create_agent_executor()
def __create_agent_executor(self) -> CrewAgentExecutor:
"""Create an agent executor for the agent.
@@ -164,9 +185,13 @@ class Agent(BaseModel):
"verbose": self.verbose,
"handle_parsing_errors": True,
"max_iterations": self.max_iter,
"request_within_rpm_limit": self._request_within_rpm_limit,
}
if self._rpm_controller:
executor_args[
"request_within_rpm_limit"
] = self._rpm_controller.check_or_wait
if self.memory:
summary_memory = ConversationSummaryMemory(
llm=self.llm, input_key="input", memory_key="chat_history"