mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 16:48:30 +00:00
Add RPM control to both agents and crews (#133)
* moving file into utilities * creating Logger and RPMController * Adding support for RPM to agents and crew
This commit is contained in:
@@ -1,8 +1,6 @@
|
||||
import json
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from typing import Any, ClassVar, Dict, List, Optional, Union
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -19,10 +17,10 @@ from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.i18n import I18N
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
|
||||
|
||||
class Crew(BaseModel):
|
||||
@@ -37,23 +35,26 @@ class Crew(BaseModel):
|
||||
config: Configuration settings for the crew.
|
||||
cache_handler: Handles caching for the crew's operations.
|
||||
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
|
||||
rpm: Current number of requests per minute for the crew execution.
|
||||
id: A unique identifier for the crew instance.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__
|
||||
_timer: Optional[threading.Timer] = PrivateAttr(default=None)
|
||||
lock: ClassVar[threading.Lock] = threading.Lock()
|
||||
rpm: ClassVar[int] = 0
|
||||
max_rpm: Optional[int] = Field(default=None)
|
||||
_rpm_controller: RPMController = PrivateAttr()
|
||||
_logger: Logger = PrivateAttr()
|
||||
_cache_handler: Optional[InstanceOf[CacheHandler]] = PrivateAttr(
|
||||
default=CacheHandler()
|
||||
)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
tasks: List[Task] = Field(default_factory=list)
|
||||
agents: List[Agent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: Union[int, bool] = Field(default=0)
|
||||
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
|
||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(default=CacheHandler())
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
max_rpm: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the crew execution to be respected.",
|
||||
)
|
||||
language: str = Field(
|
||||
default="en",
|
||||
description="Language used for the crew, defaults to English.",
|
||||
@@ -74,9 +75,10 @@ class Crew(BaseModel):
|
||||
return json.loads(v) if isinstance(v, Json) else v
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_reset_counter(self):
|
||||
if self.max_rpm:
|
||||
self._reset_request_count()
|
||||
def set_private_attrs(self):
|
||||
self._cache_handler = CacheHandler()
|
||||
self._logger = Logger(self.verbose)
|
||||
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -94,8 +96,8 @@ class Crew(BaseModel):
|
||||
|
||||
if self.agents:
|
||||
for agent in self.agents:
|
||||
agent.set_cache_handler(self.cache_handler)
|
||||
agent.set_request_within_rpm_limit(self.ensure_request_within_rpm_limit)
|
||||
agent.set_cache_handler(self._cache_handler)
|
||||
agent.set_rpm_controller(self._rpm_controller)
|
||||
return self
|
||||
|
||||
def _setup_from_config(self):
|
||||
@@ -116,28 +118,9 @@ class Crew(BaseModel):
|
||||
del task_config["agent"]
|
||||
return Task(**task_config, agent=task_agent)
|
||||
|
||||
def ensure_request_within_rpm_limit(self):
|
||||
if not self.max_rpm:
|
||||
return True
|
||||
|
||||
with Crew.lock:
|
||||
if Crew.rpm < self.max_rpm:
|
||||
Crew.rpm += 1
|
||||
return True
|
||||
self._log("info", "Max RPM reached, waiting for next minute to start.")
|
||||
|
||||
return self._wait_for_next_minute()
|
||||
|
||||
def _wait_for_next_minute(self):
|
||||
time.sleep(60)
|
||||
with Crew.lock:
|
||||
Crew.rpm = 0
|
||||
return True
|
||||
|
||||
def kickoff(self) -> str:
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
for agent in self.agents:
|
||||
agent.cache_handler = self.cache_handler
|
||||
agent.i18n = I18N(language=self.language)
|
||||
|
||||
if self.process == Process.sequential:
|
||||
@@ -149,8 +132,12 @@ class Crew(BaseModel):
|
||||
for task in self.tasks:
|
||||
self._prepare_and_execute_task(task)
|
||||
task_output = task.execute(task_output)
|
||||
self._log("debug", f"\n[{task.agent.role}] Task output: {task_output}\n\n")
|
||||
self._stop_timer()
|
||||
self._logger.log(
|
||||
"debug", f"[{task.agent.role}] Task output: {task_output}\n\n"
|
||||
)
|
||||
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
return task_output
|
||||
|
||||
def _prepare_and_execute_task(self, task):
|
||||
@@ -158,24 +145,5 @@ class Crew(BaseModel):
|
||||
if task.agent.allow_delegation:
|
||||
task.tools += AgentTools(agents=self.agents).tools()
|
||||
|
||||
self._log("debug", f"Working Agent: {task.agent.role}")
|
||||
self._log("info", f"Starting Task: {task.description}")
|
||||
|
||||
def _log(self, level, message):
|
||||
"""Logs a message at the specified verbosity level."""
|
||||
level_map = {"debug": 1, "info": 2}
|
||||
verbose_level = (
|
||||
2 if isinstance(self.verbose, bool) and self.verbose else self.verbose
|
||||
)
|
||||
if verbose_level and level_map[level] <= verbose_level:
|
||||
print(f"\n{message}")
|
||||
|
||||
def _stop_timer(self):
|
||||
if self._timer:
|
||||
self._timer.cancel()
|
||||
|
||||
def _reset_request_count(self):
|
||||
self._stop_timer()
|
||||
self._timer = threading.Timer(60.0, self._reset_request_count)
|
||||
self._timer.start()
|
||||
Crew.rpm = 0
|
||||
self._logger.log("debug", f"Working Agent: {task.agent.role}")
|
||||
self._logger.log("info", f"Starting Task: {task.description}")
|
||||
|
||||
Reference in New Issue
Block a user