Add RPM control to both agents and crews (#133)

* moving file into utilities
* creating Logger and RPMController
* Adding support for RPM to agents and crew
This commit is contained in:
João Moura
2024-01-14 00:22:11 -03:00
committed by GitHub
parent b0c5e24507
commit e27dd53c78
16 changed files with 2343 additions and 85 deletions

View File

@@ -24,8 +24,7 @@ from crewai.agents import (
CrewAgentOutputParser,
ToolsHandler,
)
from crewai.i18n import I18N
from crewai.prompts import Prompts
from crewai.utilities import I18N, Logger, Prompts, RPMController
class Agent(BaseModel):
@@ -42,11 +41,14 @@ class Agent(BaseModel):
llm: The language model that will run the agent.
max_iter: Maximum number of iterations for an agent to execute a task.
memory: Whether the agent should have memory or not.
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
verbose: Whether the agent execution should be in verbose mode.
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
"""
__hash__ = object.__hash__
_logger: Logger = PrivateAttr()
_rpm_controller: RPMController = PrivateAttr(default=None)
_request_within_rpm_limit: Any = PrivateAttr(default=None)
model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -58,6 +60,10 @@ class Agent(BaseModel):
role: str = Field(description="Role of the agent")
goal: str = Field(description="Objective of the agent")
backstory: str = Field(description="Backstory of the agent")
max_rpm: Optional[int] = Field(
default=None,
description="Maximum number of requests per minute for the agent execution to be respected.",
)
memory: bool = Field(
default=True, description="Whether the agent should have memory or not"
)
@@ -101,6 +107,15 @@ class Agent(BaseModel):
"may_not_set_field", "This field is not to be set by the user.", {}
)
@model_validator(mode="after")
def set_private_attrs(self):
self._logger = Logger(self.verbose)
if self.max_rpm and not self._rpm_controller:
self._rpm_controller = RPMController(
max_rpm=self.max_rpm, logger=self._logger
)
return self
@model_validator(mode="after")
def check_agent_executor(self) -> "Agent":
if not self.agent_executor:
@@ -128,7 +143,7 @@ class Agent(BaseModel):
tools = tools or self.tools
self.agent_executor.tools = tools
return self.agent_executor.invoke(
result = self.agent_executor.invoke(
{
"input": task,
"tool_names": self.__tools_names(tools),
@@ -137,14 +152,20 @@ class Agent(BaseModel):
RunnableConfig(callbacks=[self.tools_handler]),
)["output"]
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()
return result
def set_cache_handler(self, cache_handler) -> None:
self.cache_handler = cache_handler
self.tools_handler = ToolsHandler(cache=self.cache_handler)
self.__create_agent_executor()
def set_request_within_rpm_limit(self, ensure_function) -> None:
self._request_within_rpm_limit = ensure_function
self.__create_agent_executor()
def set_rpm_controller(self, rpm_controller) -> None:
if not self._rpm_controller:
self._rpm_controller = rpm_controller
self.__create_agent_executor()
def __create_agent_executor(self) -> CrewAgentExecutor:
"""Create an agent executor for the agent.
@@ -164,9 +185,13 @@ class Agent(BaseModel):
"verbose": self.verbose,
"handle_parsing_errors": True,
"max_iterations": self.max_iter,
"request_within_rpm_limit": self._request_within_rpm_limit,
}
if self._rpm_controller:
executor_args[
"request_within_rpm_limit"
] = self._rpm_controller.check_or_wait
if self.memory:
summary_memory = ConversationSummaryMemory(
llm=self.llm, input_key="input", memory_key="chat_history"

View File

@@ -1,6 +1,6 @@
from langchain_core.exceptions import OutputParserException
from crewai.i18n import I18N
from crewai.utilities import I18N
class TaskRepeatedUsageException(OutputParserException):

View File

@@ -12,8 +12,8 @@ from langchain_core.tools import BaseTool
from langchain_core.utils.input import get_color_mapping
from crewai.agents.cache.cache_hit import CacheHit
from crewai.i18n import I18N
from crewai.tools.cache_tools import CacheTools
from crewai.utilities import I18N
class CrewAgentExecutor(AgentExecutor):

View File

@@ -7,7 +7,7 @@ from langchain_core.agents import AgentAction, AgentFinish
from crewai.agents.cache import CacheHandler, CacheHit
from crewai.agents.exceptions import TaskRepeatedUsageException
from crewai.agents.tools_handler import ToolsHandler
from crewai.i18n import I18N
from crewai.utilities import I18N
FINAL_ANSWER_ACTION = "Final Answer:"
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE = (

View File

@@ -1,8 +1,6 @@
import json
import threading
import time
import uuid
from typing import Any, ClassVar, Dict, List, Optional, Union
from typing import Any, Dict, List, Optional, Union
from pydantic import (
UUID4,
@@ -19,10 +17,10 @@ from pydantic_core import PydanticCustomError
from crewai.agent import Agent
from crewai.agents.cache import CacheHandler
from crewai.i18n import I18N
from crewai.process import Process
from crewai.task import Task
from crewai.tools.agent_tools import AgentTools
from crewai.utilities import I18N, Logger, RPMController
class Crew(BaseModel):
@@ -37,23 +35,26 @@ class Crew(BaseModel):
config: Configuration settings for the crew.
cache_handler: Handles caching for the crew's operations.
max_rpm: Maximum number of requests per minute for the crew execution to be respected.
rpm: Current number of requests per minute for the crew execution.
id: A unique identifier for the crew instance.
"""
__hash__ = object.__hash__
_timer: Optional[threading.Timer] = PrivateAttr(default=None)
lock: ClassVar[threading.Lock] = threading.Lock()
rpm: ClassVar[int] = 0
max_rpm: Optional[int] = Field(default=None)
_rpm_controller: RPMController = PrivateAttr()
_logger: Logger = PrivateAttr()
_cache_handler: Optional[InstanceOf[CacheHandler]] = PrivateAttr(
default=CacheHandler()
)
model_config = ConfigDict(arbitrary_types_allowed=True)
tasks: List[Task] = Field(default_factory=list)
agents: List[Agent] = Field(default_factory=list)
process: Process = Field(default=Process.sequential)
verbose: Union[int, bool] = Field(default=0)
config: Optional[Union[Json, Dict[str, Any]]] = Field(default=None)
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(default=CacheHandler())
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
max_rpm: Optional[int] = Field(
default=None,
description="Maximum number of requests per minute for the crew execution to be respected.",
)
language: str = Field(
default="en",
description="Language used for the crew, defaults to English.",
@@ -74,9 +75,10 @@ class Crew(BaseModel):
return json.loads(v) if isinstance(v, Json) else v
@model_validator(mode="after")
def set_reset_counter(self):
if self.max_rpm:
self._reset_request_count()
def set_private_attrs(self):
self._cache_handler = CacheHandler()
self._logger = Logger(self.verbose)
self._rpm_controller = RPMController(max_rpm=self.max_rpm, logger=self._logger)
return self
@model_validator(mode="after")
@@ -94,8 +96,8 @@ class Crew(BaseModel):
if self.agents:
for agent in self.agents:
agent.set_cache_handler(self.cache_handler)
agent.set_request_within_rpm_limit(self.ensure_request_within_rpm_limit)
agent.set_cache_handler(self._cache_handler)
agent.set_rpm_controller(self._rpm_controller)
return self
def _setup_from_config(self):
@@ -116,28 +118,9 @@ class Crew(BaseModel):
del task_config["agent"]
return Task(**task_config, agent=task_agent)
def ensure_request_within_rpm_limit(self):
if not self.max_rpm:
return True
with Crew.lock:
if Crew.rpm < self.max_rpm:
Crew.rpm += 1
return True
self._log("info", "Max RPM reached, waiting for next minute to start.")
return self._wait_for_next_minute()
def _wait_for_next_minute(self):
time.sleep(60)
with Crew.lock:
Crew.rpm = 0
return True
def kickoff(self) -> str:
"""Starts the crew to work on its assigned tasks."""
for agent in self.agents:
agent.cache_handler = self.cache_handler
agent.i18n = I18N(language=self.language)
if self.process == Process.sequential:
@@ -149,8 +132,12 @@ class Crew(BaseModel):
for task in self.tasks:
self._prepare_and_execute_task(task)
task_output = task.execute(task_output)
self._log("debug", f"\n[{task.agent.role}] Task output: {task_output}\n\n")
self._stop_timer()
self._logger.log(
"debug", f"[{task.agent.role}] Task output: {task_output}\n\n"
)
if self.max_rpm:
self._rpm_controller.stop_rpm_counter()
return task_output
def _prepare_and_execute_task(self, task):
@@ -158,24 +145,5 @@ class Crew(BaseModel):
if task.agent.allow_delegation:
task.tools += AgentTools(agents=self.agents).tools()
self._log("debug", f"Working Agent: {task.agent.role}")
self._log("info", f"Starting Task: {task.description}")
def _log(self, level, message):
"""Logs a message at the specified verbosity level."""
level_map = {"debug": 1, "info": 2}
verbose_level = (
2 if isinstance(self.verbose, bool) and self.verbose else self.verbose
)
if verbose_level and level_map[level] <= verbose_level:
print(f"\n{message}")
def _stop_timer(self):
if self._timer:
self._timer.cancel()
def _reset_request_count(self):
self._stop_timer()
self._timer = threading.Timer(60.0, self._reset_request_count)
self._timer.start()
Crew.rpm = 0
self._logger.log("debug", f"Working Agent: {task.agent.role}")
self._logger.log("info", f"Starting Task: {task.description}")

View File

@@ -4,7 +4,7 @@ from langchain.tools import Tool
from pydantic import BaseModel, Field
from crewai.agent import Agent
from crewai.i18n import I18N
from crewai.utilities import I18N
class AgentTools(BaseModel):

View File

@@ -0,0 +1,4 @@
from .i18n import I18N
from .logger import Logger
from .prompts import Prompts
from .rpm_controller import RPMController

View File

@@ -17,7 +17,9 @@ class I18N(BaseModel):
"""Load translations from a JSON file based on the specified language."""
try:
dir_path = os.path.dirname(os.path.realpath(__file__))
prompts_path = os.path.join(dir_path, f"translations/{self.language}.json")
prompts_path = os.path.join(
dir_path, f"../translations/{self.language}.json"
)
with open(prompts_path, "r") as f:
self._translations = json.load(f)

View File

@@ -0,0 +1,11 @@
class Logger:
def __init__(self, verbose_level=0):
verbose_level = (
2 if isinstance(verbose_level, bool) and verbose_level else verbose_level
)
self.verbose_level = verbose_level
def log(self, level, message):
level_map = {"debug": 1, "info": 2}
if self.verbose_level and level_map.get(level, 0) <= self.verbose_level:
print(f"\n[{level.upper()}]: {message}")

View File

@@ -3,7 +3,7 @@ from typing import ClassVar
from langchain.prompts import PromptTemplate
from pydantic import BaseModel, Field
from .i18n import I18N
from crewai.utilities import I18N
class Prompts(BaseModel):

View File

@@ -0,0 +1,57 @@
import threading
import time
from typing import Union
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, model_validator
from crewai.utilities.logger import Logger
class RPMController(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
max_rpm: Union[int, None] = Field(default=None)
logger: Logger = Field(default=None)
_current_rpm: int = PrivateAttr(default=0)
_timer: threading.Timer = PrivateAttr(default=None)
_lock: threading.Lock = PrivateAttr(default=None)
@model_validator(mode="after")
def reset_counter(self):
if self.max_rpm:
self._lock = threading.Lock()
self._reset_request_count()
return self
def check_or_wait(self):
if not self.max_rpm:
return True
with self._lock:
if self._current_rpm < self.max_rpm:
self._current_rpm += 1
return True
else:
self.logger.log(
"info", "Max RPM reached, waiting for next minute to start."
)
self._wait_for_next_minute()
self._current_rpm = 1
return True
def stop_rpm_counter(self):
if self._timer:
self._timer.cancel()
self._timer = None
def _wait_for_next_minute(self):
time.sleep(60)
with self._lock:
self._current_rpm = 0
def _reset_request_count(self):
with self._lock:
self._current_rpm = 0
if self._timer:
self._timer.cancel()
self._timer = threading.Timer(60.0, self._reset_request_count)
self._timer.start()