mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-22 06:18:14 +00:00
Merge in main to bugfix/kickoff-for-each-usage-metrics
This commit is contained in:
@@ -2,3 +2,5 @@ from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
|
||||
__all__ = ["Agent", "Crew", "Process", "Task"]
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import os
|
||||
import uuid
|
||||
from copy import copy, deepcopy
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from copy import copy
|
||||
from typing import Any, List, Optional, Tuple
|
||||
|
||||
from langchain.agents.agent import RunnableAgent
|
||||
from langchain.agents.tools import tool as LangChainTool
|
||||
@@ -9,25 +8,19 @@ from langchain.tools.render import render_text_description
|
||||
from langchain_core.agents import AgentAction
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
from pydantic import Field, InstanceOf, model_validator
|
||||
|
||||
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser, ToolsHandler
|
||||
from crewai.agents import CacheHandler, CrewAgentExecutor, CrewAgentParser
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.utilities import I18N, Logger, Prompts, RPMController
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler, TokenProcess
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
class Agent(BaseModel):
|
||||
class Agent(BaseAgent):
|
||||
"""Represents an agent in a system.
|
||||
|
||||
Each agent has a role, a goal, a backstory, and an optional language model (llm).
|
||||
@@ -51,57 +44,10 @@ class Agent(BaseModel):
|
||||
callbacks: A list of callback functions from the langchain library that are triggered during the agent's execution process
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_logger: Logger = PrivateAttr()
|
||||
_rpm_controller: RPMController = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
_token_process: TokenProcess = PrivateAttr(default=TokenProcess())
|
||||
|
||||
formatting_errors: int = 0
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
id: UUID4 = Field(
|
||||
default_factory=uuid.uuid4,
|
||||
frozen=True,
|
||||
description="Unique identifier for the object, not set by user.",
|
||||
)
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Objective of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
cache: bool = Field(
|
||||
default=True,
|
||||
description="Whether the agent should use a cache for tool usage.",
|
||||
)
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
description="Configuration for the agent",
|
||||
default=None,
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the agent execution to be respected.",
|
||||
)
|
||||
verbose: bool = Field(
|
||||
default=False, description="Verbose mode for the Agent Execution"
|
||||
)
|
||||
allow_delegation: bool = Field(
|
||||
default=True, description="Allow delegation of tasks to agents"
|
||||
)
|
||||
tools: Optional[List[Any]] = Field(
|
||||
default_factory=list, description="Tools at agents disposal"
|
||||
)
|
||||
max_iter: Optional[int] = Field(
|
||||
default=25, description="Maximum iterations for an agent to execute a task"
|
||||
)
|
||||
max_execution_time: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum execution time for an agent to execute a task",
|
||||
)
|
||||
agent_executor: InstanceOf[CrewAgentExecutor] = Field(
|
||||
default=None, description="An instance of the CrewAgentExecutor class."
|
||||
)
|
||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||
default=None, description="An instance of the ToolsHandler class."
|
||||
)
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
default=None, description="An instance of the CacheHandler class."
|
||||
)
|
||||
@@ -109,7 +55,6 @@ class Agent(BaseModel):
|
||||
default=None,
|
||||
description="Callback to be executed after each step of the agent execution.",
|
||||
)
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
llm: Any = Field(
|
||||
default_factory=lambda: ChatOpenAI(
|
||||
model=os.environ.get("OPENAI_MODEL_NAME", "gpt-4o")
|
||||
@@ -132,46 +77,17 @@ class Agent(BaseModel):
|
||||
default=None, description="Response format for the agent."
|
||||
)
|
||||
|
||||
_original_role: str | None = None
|
||||
_original_goal: str | None = None
|
||||
_original_backstory: str | None = None
|
||||
allow_code_execution: Optional[bool] = Field(
|
||||
default=False, description="Enable code execution for the agent."
|
||||
)
|
||||
|
||||
def __init__(__pydantic_self__, **data):
|
||||
config = data.pop("config", {})
|
||||
super().__init__(**config, **data)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
||||
if v:
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_attributes_based_on_config(self) -> "Agent":
|
||||
"""Set attributes based on the agent configuration."""
|
||||
if self.config:
|
||||
for key, value in self.config.items():
|
||||
setattr(self, key, value)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self):
|
||||
"""Set private attributes."""
|
||||
self._logger = Logger(self.verbose)
|
||||
if self.max_rpm and not self._rpm_controller:
|
||||
self._rpm_controller = RPMController(
|
||||
max_rpm=self.max_rpm, logger=self._logger
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_agent_executor(self) -> "Agent":
|
||||
"""set agent executor is set."""
|
||||
print(
|
||||
f"CREW ID: {self.id} - SET AGENT EXECUTOR model name", self.llm.model_name
|
||||
)
|
||||
"""Ensure agent executor and token process are set."""
|
||||
if hasattr(self.llm, "model_name"):
|
||||
token_handler = TokenCalcHandler(self.llm.model_name, self._token_process)
|
||||
|
||||
@@ -180,7 +96,6 @@ class Agent(BaseModel):
|
||||
self.llm.callbacks = []
|
||||
|
||||
# Check if an instance of TokenCalcHandler already exists in the list
|
||||
print(f"CREW ID : {self.id} - self.llm.callbacks", self.llm.callbacks)
|
||||
if not any(
|
||||
isinstance(handler, TokenCalcHandler) for handler in self.llm.callbacks
|
||||
):
|
||||
@@ -231,8 +146,7 @@ class Agent(BaseModel):
|
||||
|
||||
tools = tools or self.tools
|
||||
# type: ignore # Argument 1 to "_parse_tools" of "Agent" has incompatible type "list[Any] | None"; expected "list[Any]"
|
||||
parsed_tools = self._parse_tools(tools)
|
||||
|
||||
parsed_tools = self._parse_tools(tools or [])
|
||||
self.create_agent_executor(tools=tools)
|
||||
self.agent_executor.tools = parsed_tools
|
||||
self.agent_executor.task = task
|
||||
@@ -240,6 +154,11 @@ class Agent(BaseModel):
|
||||
self.agent_executor.tools_description = render_text_description(parsed_tools)
|
||||
self.agent_executor.tools_names = self.__tools_names(parsed_tools)
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
task_prompt = self._training_handler(task_prompt=task_prompt)
|
||||
else:
|
||||
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
||||
|
||||
result = self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
@@ -247,33 +166,22 @@ class Agent(BaseModel):
|
||||
"tools": self.agent_executor.tools_description,
|
||||
}
|
||||
)["output"]
|
||||
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
|
||||
return result
|
||||
|
||||
def set_cache_handler(self, cache_handler: CacheHandler) -> None:
|
||||
"""Set the cache handler for the agent.
|
||||
|
||||
Args:
|
||||
cache_handler: An instance of the CacheHandler class.
|
||||
"""
|
||||
self.tools_handler = ToolsHandler()
|
||||
if self.cache:
|
||||
self.cache_handler = cache_handler
|
||||
self.tools_handler.cache = cache_handler
|
||||
self.create_agent_executor()
|
||||
|
||||
def set_rpm_controller(self, rpm_controller: RPMController) -> None:
|
||||
"""Set the rpm controller for the agent.
|
||||
|
||||
Args:
|
||||
rpm_controller: An instance of the RPMController class.
|
||||
"""
|
||||
if not self._rpm_controller:
|
||||
self._rpm_controller = rpm_controller
|
||||
self.create_agent_executor()
|
||||
def format_log_to_str(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
observation_prefix: str = "Observation: ",
|
||||
llm_prefix: str = "",
|
||||
) -> str:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts = ""
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts += action.log
|
||||
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
|
||||
return thoughts
|
||||
|
||||
def create_agent_executor(self, tools=None) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
@@ -329,47 +237,36 @@ class Agent(BaseModel):
|
||||
)
|
||||
|
||||
stop_words = [self.i18n.slice("observation")]
|
||||
|
||||
if self.response_template:
|
||||
stop_words.append(
|
||||
self.response_template.split("{{ .Response }}")[1].strip()
|
||||
)
|
||||
|
||||
bind = self.llm.bind(stop=stop_words)
|
||||
|
||||
inner_agent = agent_args | execution_prompt | bind | CrewAgentParser(agent=self)
|
||||
self.agent_executor = CrewAgentExecutor(
|
||||
agent=RunnableAgent(runnable=inner_agent), **executor_args
|
||||
)
|
||||
|
||||
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
"""Interpolate inputs into the agent description and backstory."""
|
||||
if self._original_role is None:
|
||||
self._original_role = self.role
|
||||
if self._original_goal is None:
|
||||
self._original_goal = self.goal
|
||||
if self._original_backstory is None:
|
||||
self._original_backstory = self.backstory
|
||||
def get_delegation_tools(self, agents: List[BaseAgent]):
|
||||
agent_tools = AgentTools(agents=agents)
|
||||
tools = agent_tools.tools()
|
||||
return tools
|
||||
|
||||
if inputs:
|
||||
self.role = self._original_role.format(**inputs)
|
||||
self.goal = self._original_goal.format(**inputs)
|
||||
self.backstory = self._original_backstory.format(**inputs)
|
||||
def get_code_execution_tools(self):
|
||||
try:
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
|
||||
def increment_formatting_errors(self) -> None:
|
||||
"""Count the formatting errors of the agent."""
|
||||
self.formatting_errors += 1
|
||||
return [CodeInterpreterTool()]
|
||||
except ModuleNotFoundError:
|
||||
self._logger.log(
|
||||
"info", "Coding tools not available. Install crewai_tools. "
|
||||
)
|
||||
|
||||
def format_log_to_str(
|
||||
self,
|
||||
intermediate_steps: List[Tuple[AgentAction, str]],
|
||||
observation_prefix: str = "Observation: ",
|
||||
llm_prefix: str = "",
|
||||
) -> str:
|
||||
"""Construct the scratchpad that lets the agent continue its thought process."""
|
||||
thoughts = ""
|
||||
for action, observation in intermediate_steps:
|
||||
thoughts += action.log
|
||||
thoughts += f"\n{observation_prefix}{observation}\n{llm_prefix}"
|
||||
return thoughts
|
||||
def get_output_converter(self, llm, text, model, instructions):
|
||||
return Converter(llm=llm, text=text, model=model, instructions=instructions)
|
||||
|
||||
def copy(self):
|
||||
"""Create a deep copy of the Agent."""
|
||||
@@ -386,27 +283,21 @@ class Agent(BaseModel):
|
||||
"llm",
|
||||
}
|
||||
|
||||
print("EXISTING LLM", self.llm)
|
||||
# TODO: TEST REMOVING THIS AND SEE IF ANYTHING CHANGES
|
||||
existing_llm = copy(self.llm)
|
||||
print("COPIED LLM", existing_llm)
|
||||
# TODO: EXPAND ON WHY THIS IS NEEDED
|
||||
# RESET LLM CALLBACKS
|
||||
existing_llm.callbacks = []
|
||||
print("RESET LLM CALLBACKS", existing_llm)
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
print("COPIED DATA FOR AGENT", copied_data)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
|
||||
copied_agent = Agent(**copied_data, llm=existing_llm, tools=self.tools)
|
||||
|
||||
return copied_agent
|
||||
|
||||
# type: ignore # Function "langchain_core.tools.tool" is not valid as a type
|
||||
def _parse_tools(self, tools: List[Any]) -> List[LangChainTool]:
|
||||
"""Parse tools to be used for the task."""
|
||||
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
|
||||
tools_list = []
|
||||
try:
|
||||
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
|
||||
from crewai_tools import BaseTool as CrewAITool
|
||||
|
||||
for tool in tools:
|
||||
@@ -415,10 +306,35 @@ class Agent(BaseModel):
|
||||
else:
|
||||
tools_list.append(tool)
|
||||
except ModuleNotFoundError:
|
||||
tools_list = []
|
||||
for tool in tools:
|
||||
tools_list.append(tool)
|
||||
return tools_list
|
||||
|
||||
def _training_handler(self, task_prompt: str) -> str:
|
||||
"""Handle training data for the agent task prompt to improve output on Training."""
|
||||
if data := CrewTrainingHandler(TRAINING_DATA_FILE).load():
|
||||
agent_id = str(self.id)
|
||||
|
||||
if data.get(agent_id):
|
||||
human_feedbacks = [
|
||||
i["human_feedback"] for i in data.get(agent_id, {}).values()
|
||||
]
|
||||
task_prompt += "You MUST follow these feedbacks: \n " + "\n - ".join(
|
||||
human_feedbacks
|
||||
)
|
||||
|
||||
return task_prompt
|
||||
|
||||
def _use_trained_data(self, task_prompt: str) -> str:
|
||||
"""Use trained data for the agent task prompt to improve output."""
|
||||
if data := CrewTrainingHandler(TRAINED_AGENTS_DATA_FILE).load():
|
||||
if trained_data_output := data.get(self.role):
|
||||
task_prompt += "You MUST follow these feedbacks: \n " + "\n - ".join(
|
||||
trained_data_output["suggestions"]
|
||||
)
|
||||
return task_prompt
|
||||
|
||||
@staticmethod
|
||||
def __tools_names(tools) -> str:
|
||||
return ", ".join([t.name for t in tools])
|
||||
|
||||
0
src/crewai/agents/agent_builder/__init__.py
Normal file
0
src/crewai/agents/agent_builder/__init__.py
Normal file
233
src/crewai/agents/agent_builder/base_agent.py
Normal file
233
src/crewai/agents/agent_builder/base_agent.py
Normal file
@@ -0,0 +1,233 @@
|
||||
import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agents import CacheHandler, ToolsHandler
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
|
||||
|
||||
class BaseAgent(ABC, BaseModel):
|
||||
"""Abstract Base Class for all third party agents compatible with CrewAI.
|
||||
|
||||
Attributes:
|
||||
id (UUID4): Unique identifier for the agent.
|
||||
role (str): Role of the agent.
|
||||
goal (str): Objective of the agent.
|
||||
backstory (str): Backstory of the agent.
|
||||
cache (bool): Whether the agent should use a cache for tool usage.
|
||||
config (Optional[Dict[str, Any]]): Configuration for the agent.
|
||||
verbose (bool): Verbose mode for the Agent Execution.
|
||||
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
|
||||
allow_delegation (bool): Allow delegation of tasks to agents.
|
||||
tools (Optional[List[Any]]): Tools at the agent's disposal.
|
||||
max_iter (Optional[int]): Maximum iterations for an agent to execute a task.
|
||||
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
|
||||
llm (Any): Language model that will run the agent.
|
||||
crew (Any): Crew to which the agent belongs.
|
||||
i18n (I18N): Internationalization settings.
|
||||
cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class.
|
||||
tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class.
|
||||
|
||||
|
||||
Methods:
|
||||
execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> str:
|
||||
Abstract method to execute a task.
|
||||
create_agent_executor(tools=None) -> None:
|
||||
Abstract method to create an agent executor.
|
||||
_parse_tools(tools: List[Any]) -> List[Any]:
|
||||
Abstract method to parse tools.
|
||||
get_delegation_tools(agents: List["BaseAgent"]):
|
||||
Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew.
|
||||
get_output_converter(llm, model, instructions):
|
||||
Abstract method to get the converter class for the agent to create json/pydantic outputs.
|
||||
interpolate_inputs(inputs: Dict[str, Any]) -> None:
|
||||
Interpolate inputs into the agent description and backstory.
|
||||
set_cache_handler(cache_handler: CacheHandler) -> None:
|
||||
Set the cache handler for the agent.
|
||||
increment_formatting_errors() -> None:
|
||||
Increment formatting errors.
|
||||
copy() -> "BaseAgent":
|
||||
Create a copy of the agent.
|
||||
set_rpm_controller(rpm_controller: RPMController) -> None:
|
||||
Set the rpm controller for the agent.
|
||||
set_private_attrs() -> "BaseAgent":
|
||||
Set private attributes.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
_logger: Logger = PrivateAttr()
|
||||
_rpm_controller: RPMController = PrivateAttr(default=None)
|
||||
_request_within_rpm_limit: Any = PrivateAttr(default=None)
|
||||
formatting_errors: int = 0
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Objective of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
cache: bool = Field(
|
||||
default=True, description="Whether the agent should use a cache for tool usage."
|
||||
)
|
||||
config: Optional[Dict[str, Any]] = Field(
|
||||
description="Configuration for the agent", default=None
|
||||
)
|
||||
verbose: bool = Field(
|
||||
default=False, description="Verbose mode for the Agent Execution"
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the agent execution to be respected.",
|
||||
)
|
||||
allow_delegation: bool = Field(
|
||||
default=True, description="Allow delegation of tasks to agents"
|
||||
)
|
||||
tools: Optional[List[Any]] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: Optional[int] = Field(
|
||||
default=25, description="Maximum iterations for an agent to execute a task"
|
||||
)
|
||||
agent_executor: InstanceOf = Field(
|
||||
default=None, description="An instance of the CrewAgentExecutor class."
|
||||
)
|
||||
llm: Any = Field(
|
||||
default=None, description="Language model that will run the agent."
|
||||
)
|
||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
default=None, description="An instance of the CacheHandler class."
|
||||
)
|
||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||
default=None, description="An instance of the ToolsHandler class."
|
||||
)
|
||||
|
||||
_original_role: str | None = None
|
||||
_original_goal: str | None = None
|
||||
_original_backstory: str | None = None
|
||||
_token_process: TokenProcess = TokenProcess()
|
||||
|
||||
def __init__(__pydantic_self__, **data):
|
||||
config = data.pop("config", {})
|
||||
super().__init__(**config, **data)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_config_attributes(self):
|
||||
if self.config:
|
||||
for key, value in self.config.items():
|
||||
setattr(self, key, value)
|
||||
return self
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def _deny_user_set_id(cls, v: Optional[UUID4]) -> None:
|
||||
if v:
|
||||
raise PydanticCustomError(
|
||||
"may_not_set_field", "This field is not to be set by the user.", {}
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_attributes_based_on_config(self) -> "BaseAgent":
|
||||
"""Set attributes based on the agent configuration."""
|
||||
if self.config:
|
||||
for key, value in self.config.items():
|
||||
setattr(self, key, value)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self):
|
||||
"""Set private attributes."""
|
||||
self._logger = Logger(self.verbose)
|
||||
if self.max_rpm and not self._rpm_controller:
|
||||
self._rpm_controller = RPMController(
|
||||
max_rpm=self.max_rpm, logger=self._logger
|
||||
)
|
||||
if not self._token_process:
|
||||
self._token_process = TokenProcess()
|
||||
return self
|
||||
|
||||
@abstractmethod
|
||||
def execute_task(
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def create_agent_executor(self, tools=None) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _parse_tools(self, tools: List[Any]) -> List[Any]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_delegation_tools(self, agents: List["BaseAgent"]):
|
||||
"""Set the task tools that init BaseAgenTools class."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_output_converter(
|
||||
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
|
||||
):
|
||||
"""Get the converter class for the agent to create json/pydantic outputs."""
|
||||
pass
|
||||
|
||||
# TODO: HAVE LORENZE REVIEW THIS WITH OTHER AGENTS
|
||||
@abstractmethod
|
||||
def copy(self):
|
||||
"""Create a copy of the agent."""
|
||||
pass
|
||||
|
||||
def interpolate_inputs(self, inputs: Dict[str, Any]) -> None:
|
||||
"""Interpolate inputs into the agent description and backstory."""
|
||||
if self._original_role is None:
|
||||
self._original_role = self.role
|
||||
if self._original_goal is None:
|
||||
self._original_goal = self.goal
|
||||
if self._original_backstory is None:
|
||||
self._original_backstory = self.backstory
|
||||
|
||||
if inputs:
|
||||
self.role = self._original_role.format(**inputs)
|
||||
self.goal = self._original_goal.format(**inputs)
|
||||
self.backstory = self._original_backstory.format(**inputs)
|
||||
|
||||
def set_cache_handler(self, cache_handler: CacheHandler) -> None:
|
||||
"""Set the cache handler for the agent.
|
||||
|
||||
Args:
|
||||
cache_handler: An instance of the CacheHandler class.
|
||||
"""
|
||||
self.tools_handler = ToolsHandler()
|
||||
if self.cache:
|
||||
self.cache_handler = cache_handler
|
||||
self.tools_handler.cache = cache_handler
|
||||
self.create_agent_executor()
|
||||
|
||||
def increment_formatting_errors(self) -> None:
|
||||
self.formatting_errors += 1
|
||||
|
||||
def set_rpm_controller(self, rpm_controller: RPMController) -> None:
|
||||
"""Set the rpm controller for the agent.
|
||||
|
||||
Args:
|
||||
rpm_controller: An instance of the RPMController class.
|
||||
"""
|
||||
if not self._rpm_controller:
|
||||
self._rpm_controller = rpm_controller
|
||||
self.create_agent_executor()
|
||||
65
src/crewai/agents/agent_builder/base_agent_executor_mixin.py
Normal file
65
src/crewai/agents/agent_builder/base_agent_executor_mixin.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import time
|
||||
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
|
||||
|
||||
class CrewAgentExecutorMixin:
|
||||
def _should_force_answer(self) -> bool:
|
||||
return (
|
||||
self.iterations == self.force_answer_max_iterations
|
||||
) and not self.have_forced_answer
|
||||
|
||||
def _create_short_term_memory(self, output) -> None:
|
||||
if (
|
||||
self.crew
|
||||
and self.crew.memory
|
||||
and "Action: Delegate work to coworker" not in output.log
|
||||
):
|
||||
memory = ShortTermMemoryItem(
|
||||
data=output.log,
|
||||
agent=self.crew_agent.role,
|
||||
metadata={
|
||||
"observation": self.task.description,
|
||||
},
|
||||
)
|
||||
self.crew._short_term_memory.save(memory)
|
||||
|
||||
def _create_long_term_memory(self, output) -> None:
|
||||
if self.crew and self.crew.memory:
|
||||
ltm_agent = TaskEvaluator(self.crew_agent)
|
||||
evaluation = ltm_agent.evaluate(self.task, output.log)
|
||||
|
||||
if isinstance(evaluation, ConverterError):
|
||||
return
|
||||
|
||||
long_term_memory = LongTermMemoryItem(
|
||||
task=self.task.description,
|
||||
agent=self.crew_agent.role,
|
||||
quality=evaluation.quality,
|
||||
datetime=str(time.time()),
|
||||
expected_output=self.task.expected_output,
|
||||
metadata={
|
||||
"suggestions": evaluation.suggestions,
|
||||
"quality": evaluation.quality,
|
||||
},
|
||||
)
|
||||
self.crew._long_term_memory.save(long_term_memory)
|
||||
|
||||
for entity in evaluation.entities:
|
||||
entity_memory = EntityMemoryItem(
|
||||
name=entity.name,
|
||||
type=entity.type,
|
||||
description=entity.description,
|
||||
relationships="\n".join([f"- {r}" for r in entity.relationships]),
|
||||
)
|
||||
self.crew._entity_memory.save(entity_memory)
|
||||
|
||||
def _ask_human_input(self, final_answer: dict) -> str:
|
||||
"""Get human input."""
|
||||
return input(
|
||||
self._i18n.slice("getting_input").format(final_answer=final_answer)
|
||||
)
|
||||
81
src/crewai/agents/agent_builder/utilities/base_agent_tool.py
Normal file
81
src/crewai/agents/agent_builder/utilities/base_agent_tool.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional, Union
|
||||
from pydantic import BaseModel, Field
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.task import Task
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
class BaseAgentTools(BaseModel, ABC):
|
||||
"""Default tools around agent delegation"""
|
||||
|
||||
agents: List[BaseAgent] = Field(description="List of agents in this crew.")
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
|
||||
@abstractmethod
|
||||
def tools(self):
|
||||
pass
|
||||
|
||||
def _get_coworker(self, coworker: Optional[str], **kwargs) -> Optional[str]:
|
||||
coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker")
|
||||
if coworker:
|
||||
is_list = coworker.startswith("[") and coworker.endswith("]")
|
||||
if is_list:
|
||||
coworker = coworker[1:-1].split(",")[0]
|
||||
return coworker
|
||||
|
||||
def delegate_work(
|
||||
self, task: str, context: str, coworker: Optional[str] = None, **kwargs
|
||||
):
|
||||
"""Useful to delegate a specific task to a coworker passing all necessary context and names."""
|
||||
coworker = self._get_coworker(coworker, **kwargs)
|
||||
return self._execute(coworker, task, context)
|
||||
|
||||
def ask_question(
|
||||
self, question: str, context: str, coworker: Optional[str] = None, **kwargs
|
||||
):
|
||||
"""Useful to ask a question, opinion or take from a coworker passing all necessary context and names."""
|
||||
coworker = self._get_coworker(coworker, **kwargs)
|
||||
return self._execute(coworker, question, context)
|
||||
|
||||
def _execute(self, agent: Union[str, None], task: str, context: Union[str, None]):
|
||||
"""Execute the command."""
|
||||
try:
|
||||
if agent is None:
|
||||
agent = ""
|
||||
|
||||
# It is important to remove the quotes from the agent name.
|
||||
# The reason we have to do this is because less-powerful LLM's
|
||||
# have difficulty producing valid JSON.
|
||||
# As a result, we end up with invalid JSON that is truncated like this:
|
||||
# {"task": "....", "coworker": "....
|
||||
# when it should look like this:
|
||||
# {"task": "....", "coworker": "...."}
|
||||
agent_name = agent.casefold().replace('"', "").replace("\n", "")
|
||||
|
||||
agent = [
|
||||
available_agent
|
||||
for available_agent in self.agents
|
||||
if available_agent.role.casefold().replace("\n", "") == agent_name
|
||||
]
|
||||
except Exception as _:
|
||||
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
||||
coworkers="\n".join(
|
||||
[f"- {agent.role.casefold()}" for agent in self.agents]
|
||||
)
|
||||
)
|
||||
|
||||
if not agent:
|
||||
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
||||
coworkers="\n".join(
|
||||
[f"- {agent.role.casefold()}" for agent in self.agents]
|
||||
)
|
||||
)
|
||||
|
||||
agent = agent[0]
|
||||
task = Task(
|
||||
description=task,
|
||||
agent=agent,
|
||||
expected_output="Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
)
|
||||
return agent.execute_task(task, context)
|
||||
@@ -0,0 +1,48 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
from pydantic import BaseModel, Field, PrivateAttr
|
||||
|
||||
|
||||
class OutputConverter(BaseModel, ABC):
|
||||
"""
|
||||
Abstract base class for converting task results into structured formats.
|
||||
|
||||
This class provides a framework for converting unstructured text into
|
||||
either Pydantic models or JSON, tailored for specific agent requirements.
|
||||
It uses a language model to interpret and structure the input text based
|
||||
on given instructions.
|
||||
|
||||
Attributes:
|
||||
text (str): The input text to be converted.
|
||||
llm (Any): The language model used for conversion.
|
||||
model (Any): The target model for structuring the output.
|
||||
instructions (str): Specific instructions for the conversion process.
|
||||
max_attempts (int): Maximum number of conversion attempts (default: 3).
|
||||
"""
|
||||
|
||||
_is_gpt: bool = PrivateAttr(default=True)
|
||||
text: str = Field(description="Text to be converted.")
|
||||
llm: Any = Field(description="The language model to be used to convert the text.")
|
||||
model: Any = Field(description="The model to be used to convert the text.")
|
||||
instructions: str = Field(description="Conversion instructions to the LLM.")
|
||||
max_attemps: Optional[int] = Field(
|
||||
description="Max number of attemps to try to get the output formated.",
|
||||
default=3,
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def to_pydantic(self, current_attempt=1):
|
||||
"""Convert text to pydantic."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def to_json(self, current_attempt=1):
|
||||
"""Convert text to json."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _is_gpt(self, llm):
|
||||
"""Return if llm provided is of gpt from openai."""
|
||||
pass
|
||||
@@ -0,0 +1,27 @@
|
||||
from typing import Any, Dict
|
||||
|
||||
|
||||
class TokenProcess:
|
||||
total_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
successful_requests: int = 0
|
||||
|
||||
def sum_prompt_tokens(self, tokens: int):
|
||||
self.prompt_tokens = self.prompt_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_completion_tokens(self, tokens: int):
|
||||
self.completion_tokens = self.completion_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_successful_requests(self, requests: int):
|
||||
self.successful_requests = self.successful_requests + requests
|
||||
|
||||
def get_summary(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"total_tokens": self.total_tokens,
|
||||
"prompt_tokens": self.prompt_tokens,
|
||||
"completion_tokens": self.completion_tokens,
|
||||
"successful_requests": self.successful_requests,
|
||||
}
|
||||
@@ -7,22 +7,20 @@ from langchain.agents.agent import ExceptionTool
|
||||
from langchain.callbacks.manager import CallbackManagerForChainRun
|
||||
from langchain_core.agents import AgentAction, AgentFinish, AgentStep
|
||||
from langchain_core.exceptions import OutputParserException
|
||||
from langchain_core.pydantic_v1 import root_validator
|
||||
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_core.utils.input import get_color_mapping
|
||||
from pydantic import InstanceOf
|
||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N
|
||||
from crewai.utilities.converter import ConverterError
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
class CrewAgentExecutor(AgentExecutor):
|
||||
class CrewAgentExecutor(AgentExecutor, CrewAgentExecutorMixin):
|
||||
_i18n: I18N = I18N()
|
||||
should_ask_for_human_input: bool = False
|
||||
llm: Any = None
|
||||
@@ -44,61 +42,6 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
prompt_template: Optional[str] = None
|
||||
response_template: Optional[str] = None
|
||||
|
||||
@root_validator()
|
||||
def set_force_answer_max_iterations(cls, values: Dict) -> Dict:
|
||||
values["force_answer_max_iterations"] = values["max_iterations"] - 2
|
||||
return values
|
||||
|
||||
def _should_force_answer(self) -> bool:
|
||||
return (
|
||||
self.iterations == self.force_answer_max_iterations
|
||||
) and not self.have_forced_answer
|
||||
|
||||
def _create_short_term_memory(self, output) -> None:
|
||||
if (
|
||||
self.crew
|
||||
and self.crew.memory
|
||||
and "Action: Delegate work to coworker" not in output.log
|
||||
):
|
||||
memory = ShortTermMemoryItem(
|
||||
data=output.log,
|
||||
agent=self.crew_agent.role,
|
||||
metadata={
|
||||
"observation": self.task.description,
|
||||
},
|
||||
)
|
||||
self.crew._short_term_memory.save(memory)
|
||||
|
||||
def _create_long_term_memory(self, output) -> None:
|
||||
if self.crew and self.crew.memory:
|
||||
ltm_agent = TaskEvaluator(self.crew_agent)
|
||||
evaluation = ltm_agent.evaluate(self.task, output.log)
|
||||
|
||||
if isinstance(evaluation, ConverterError):
|
||||
return
|
||||
|
||||
long_term_memory = LongTermMemoryItem(
|
||||
task=self.task.description,
|
||||
agent=self.crew_agent.role,
|
||||
quality=evaluation.quality,
|
||||
datetime=str(time.time()),
|
||||
expected_output=self.task.expected_output,
|
||||
metadata={
|
||||
"suggestions": evaluation.suggestions,
|
||||
"quality": evaluation.quality,
|
||||
},
|
||||
)
|
||||
self.crew._long_term_memory.save(long_term_memory)
|
||||
|
||||
for entity in evaluation.entities:
|
||||
entity_memory = EntityMemoryItem(
|
||||
name=entity.name,
|
||||
type=entity.type,
|
||||
description=entity.description,
|
||||
relationships="\n".join([f"- {r}" for r in entity.relationships]),
|
||||
)
|
||||
self.crew._entity_memory.save(entity_memory)
|
||||
|
||||
def _call(
|
||||
self,
|
||||
inputs: Dict[str, str],
|
||||
@@ -246,12 +189,17 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
# If the tool chosen is the finishing tool, then we end and return.
|
||||
if isinstance(output, AgentFinish):
|
||||
if self.should_ask_for_human_input:
|
||||
human_feedback = self._ask_human_input(output.return_values["output"])
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(output, human_feedback)
|
||||
|
||||
# Making sure we only ask for it once, so disabling for the next thought loop
|
||||
self.should_ask_for_human_input = False
|
||||
human_feedback = self._ask_human_input(output.return_values["output"])
|
||||
action = AgentAction(
|
||||
tool="Human Input", tool_input=human_feedback, log=output.log
|
||||
)
|
||||
|
||||
yield AgentStep(
|
||||
action=action,
|
||||
observation=self._i18n.slice("human_feedback").format(
|
||||
@@ -261,6 +209,9 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
return
|
||||
|
||||
else:
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(output)
|
||||
|
||||
yield output
|
||||
return
|
||||
|
||||
@@ -300,8 +251,30 @@ class CrewAgentExecutor(AgentExecutor):
|
||||
)
|
||||
yield AgentStep(action=agent_action, observation=observation)
|
||||
|
||||
def _ask_human_input(self, final_answer: dict) -> str:
|
||||
"""Get human input."""
|
||||
return input(
|
||||
self._i18n.slice("getting_input").format(final_answer=final_answer)
|
||||
)
|
||||
def _handle_crew_training_output(
|
||||
self, output: AgentFinish, human_feedback: str | None = None
|
||||
) -> None:
|
||||
"""Function to handle the process of the training data."""
|
||||
agent_id = str(self.crew_agent.id)
|
||||
|
||||
if (
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
and not self.should_ask_for_human_input
|
||||
):
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
if training_data.get(agent_id):
|
||||
training_data[agent_id][self.crew._train_iteration][
|
||||
"improved_output"
|
||||
] = output.return_values["output"]
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).save(training_data)
|
||||
|
||||
if self.should_ask_for_human_input and human_feedback is not None:
|
||||
training_data = {
|
||||
"initial_output": output.return_values["output"],
|
||||
"human_feedback": human_feedback,
|
||||
"agent": agent_id,
|
||||
"agent_role": self.crew_agent.role,
|
||||
}
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).append(
|
||||
self.crew._train_iteration, agent_id, training_data
|
||||
)
|
||||
|
||||
@@ -15,8 +15,9 @@ def train():
|
||||
"""
|
||||
Train the crew for a given number of iterations.
|
||||
"""
|
||||
inputs = {"topic": "AI LLMs"}
|
||||
try:
|
||||
{{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]))
|
||||
{{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]), inputs=inputs)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while training the crew: {e}")
|
||||
|
||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = "^0.32.2" }
|
||||
crewai = { extras = ["tools"], version = "^0.35.4" }
|
||||
|
||||
[tool.poetry.scripts]
|
||||
{{folder_name}} = "{{folder_name}}.main:run"
|
||||
|
||||
@@ -5,19 +5,20 @@ from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from langchain_core.callbacks import BaseCallbackHandler
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
Json,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
model_validator,
|
||||
UUID4,
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
InstanceOf,
|
||||
Json,
|
||||
PrivateAttr,
|
||||
field_validator,
|
||||
model_validator,
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.cache import CacheHandler
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
@@ -27,6 +28,8 @@ from crewai.task import Task
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
|
||||
class Crew(BaseModel):
|
||||
@@ -63,11 +66,13 @@ class Crew(BaseModel):
|
||||
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
|
||||
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
|
||||
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
|
||||
_train: Optional[bool] = PrivateAttr(default=False)
|
||||
_train_iteration: Optional[int] = PrivateAttr()
|
||||
|
||||
cache: bool = Field(default=False)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
tasks: List[Task] = Field(default_factory=list)
|
||||
agents: List[Agent] = Field(default_factory=list)
|
||||
agents: List[BaseAgent] = Field(default_factory=list)
|
||||
process: Process = Field(default=Process.sequential)
|
||||
verbose: Union[int, bool] = Field(default=0)
|
||||
memory: bool = Field(
|
||||
@@ -89,7 +94,7 @@ class Crew(BaseModel):
|
||||
manager_llm: Optional[Any] = Field(
|
||||
description="Language model that will run the agent.", default=None
|
||||
)
|
||||
manager_agent: Optional[Any] = Field(
|
||||
manager_agent: Optional[BaseAgent] = Field(
|
||||
description="Custom agent that will be used as manager.", default=None
|
||||
)
|
||||
manager_callbacks: Optional[List[InstanceOf[BaseCallbackHandler]]] = Field(
|
||||
@@ -242,17 +247,41 @@ class Crew(BaseModel):
|
||||
del task_config["agent"]
|
||||
return Task(**task_config, agent=task_agent)
|
||||
|
||||
def _setup_for_training(self) -> None:
|
||||
"""Sets up the crew for training."""
|
||||
self._train = True
|
||||
|
||||
for task in self.tasks:
|
||||
task.human_input = True
|
||||
|
||||
for agent in self.agents:
|
||||
agent.allow_delegation = False
|
||||
|
||||
def train(self, n_iterations: int, inputs: Optional[Dict[str, Any]] = {}) -> None:
|
||||
"""Trains the crew for a given number of iterations."""
|
||||
self._setup_for_training()
|
||||
|
||||
for n_iteration in range(n_iterations):
|
||||
self._train_iteration = n_iteration
|
||||
self.kickoff(inputs=inputs)
|
||||
|
||||
training_data = CrewTrainingHandler("training_data.pkl").load()
|
||||
|
||||
for agent in self.agents:
|
||||
result = TaskEvaluator(agent).evaluate_training_data(
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
|
||||
CrewTrainingHandler("trained_agents_data.pkl").save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = {},
|
||||
) -> Union[str, Dict[str, Any]]:
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
print(f"CREW ID {self.id} - KICKING OFF CREW")
|
||||
print(
|
||||
f"CREW ID {self.id} - callbacks",
|
||||
[agent.llm.callbacks for agent in self.agents],
|
||||
)
|
||||
self._execution_span = self._telemetry.crew_execution_span(self)
|
||||
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
||||
# type: ignore # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
@@ -260,12 +289,21 @@ class Crew(BaseModel):
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
|
||||
for agent in self.agents:
|
||||
# type: ignore # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
agent.i18n = i18n
|
||||
agent.crew = self
|
||||
|
||||
if not agent.function_calling_llm:
|
||||
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
agent.crew = self # type: ignore[attr-defined]
|
||||
# TODO: Create an AgentFunctionCalling protocol for future refactoring
|
||||
if (
|
||||
hasattr(agent, "function_calling_llm")
|
||||
and not agent.function_calling_llm
|
||||
):
|
||||
agent.function_calling_llm = self.function_calling_llm
|
||||
if not agent.step_callback:
|
||||
|
||||
if hasattr(agent, "allow_code_execution") and agent.allow_code_execution:
|
||||
agent.tools += agent.get_code_execution_tools()
|
||||
|
||||
if hasattr(agent, "step_callback") and not agent.step_callback:
|
||||
agent.step_callback = self.step_callback
|
||||
|
||||
agent.create_agent_executor()
|
||||
@@ -283,10 +321,10 @@ class Crew(BaseModel):
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
|
||||
metrics = metrics + [
|
||||
agent._token_process.get_summary() for agent in self.agents
|
||||
]
|
||||
|
||||
self.usage_metrics = {
|
||||
key: sum([m[key] for m in metrics if m is not None]) for key in metrics[0]
|
||||
}
|
||||
@@ -327,7 +365,6 @@ class Crew(BaseModel):
|
||||
"""Asynchronous kickoff method to start the crew execution."""
|
||||
return await asyncio.to_thread(self.kickoff, inputs)
|
||||
|
||||
# TODO: IF THERE ARE MULTIPLE INPUTS, THE USAGE METRICS FOR FIRST ONE COMES BACK AS 0.
|
||||
async def kickoff_for_each_async(self, inputs: List[Dict]) -> List[Any]:
|
||||
crew_copies = [self.copy() for _ in inputs]
|
||||
|
||||
@@ -356,28 +393,17 @@ class Crew(BaseModel):
|
||||
|
||||
return results
|
||||
|
||||
def train(self, n_iterations: int) -> None:
|
||||
# TODO: Implement training
|
||||
pass
|
||||
|
||||
def _run_sequential_process(self) -> Union[str, Dict[str, Any]]:
|
||||
def _run_sequential_process(self) -> str:
|
||||
"""Executes tasks sequentially and returns the final output."""
|
||||
task_output = ""
|
||||
total_token_usage = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
|
||||
for task in self.tasks:
|
||||
print("TASK DESCRIPTION", task.description)
|
||||
if task.agent.allow_delegation: # type: ignore # Item "None" of "Agent | None" has no attribute "allow_delegation"
|
||||
agents_for_delegation = [
|
||||
agent for agent in self.agents if agent != task.agent
|
||||
]
|
||||
if len(self.agents) > 1 and len(agents_for_delegation) > 0:
|
||||
task.tools += AgentTools(agents=agents_for_delegation).tools()
|
||||
task.tools += task.agent.get_delegation_tools(agents_for_delegation)
|
||||
|
||||
role = task.agent.role if task.agent is not None else "None"
|
||||
self._logger.log("debug", f"== Working Agent: {role}", color="bold_purple")
|
||||
@@ -389,7 +415,6 @@ class Crew(BaseModel):
|
||||
self._file_handler.log(
|
||||
agent=role, task=task.description, status="started"
|
||||
)
|
||||
|
||||
output = task.execute(context=task_output)
|
||||
|
||||
if not task.async_execution:
|
||||
@@ -401,18 +426,12 @@ class Crew(BaseModel):
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(agent=role, task=task_output, status="completed")
|
||||
|
||||
for agent in self.agents:
|
||||
print("INSPECTING AGENT", agent.role)
|
||||
agent_token_usage = agent._token_process.get_summary()
|
||||
print("AGENT TOKEN USAGE", agent_token_usage)
|
||||
for key in total_token_usage:
|
||||
total_token_usage[key] += agent_token_usage.get(key, 0)
|
||||
|
||||
self._finish_execution(task_output)
|
||||
# type: ignore # Item "None" of "Agent | None" has no attribute "_token_process")
|
||||
|
||||
token_usage = self._calculate_usage_metrics()
|
||||
|
||||
# type: ignore # Incompatible return value type (got "tuple[str, Any]", expected "str")
|
||||
return self._format_output(task_output, total_token_usage)
|
||||
return self._format_output(task_output, token_usage)
|
||||
|
||||
def _run_hierarchical_process(self) -> Union[str, Dict[str, Any]]:
|
||||
"""Creates and assigns a manager agent to make sure the crew completes the tasks."""
|
||||
@@ -423,7 +442,7 @@ class Crew(BaseModel):
|
||||
manager = self.manager_agent
|
||||
if len(manager.tools) > 0:
|
||||
raise Exception("Manager agent should not have tools")
|
||||
manager.tools = AgentTools(agents=self.agents).tools()
|
||||
manager.tools = self.manager_agent.get_delegation_tools(self.agents)
|
||||
else:
|
||||
manager = Agent(
|
||||
role=i18n.retrieve("hierarchical_manager_agent", "role"),
|
||||
@@ -431,10 +450,12 @@ class Crew(BaseModel):
|
||||
backstory=i18n.retrieve("hierarchical_manager_agent", "backstory"),
|
||||
tools=AgentTools(agents=self.agents).tools(),
|
||||
llm=self.manager_llm,
|
||||
verbose=True,
|
||||
verbose=self.verbose,
|
||||
)
|
||||
self.manager_agent = manager
|
||||
|
||||
task_output = ""
|
||||
|
||||
for task in self.tasks:
|
||||
self._logger.log("debug", f"Working Agent: {manager.role}")
|
||||
self._logger.log("info", f"Starting Task: {task.description}")
|
||||
@@ -449,19 +470,19 @@ class Crew(BaseModel):
|
||||
)
|
||||
|
||||
self._logger.log("debug", f"[{manager.role}] Task output: {task_output}")
|
||||
|
||||
if self.output_log_file:
|
||||
self._file_handler.log(
|
||||
agent=manager.role, task=task_output, status="completed"
|
||||
)
|
||||
|
||||
# TODO: GET TOKENS USAGE CALCULATED INCLUDING MANAGER
|
||||
self._finish_execution(task_output)
|
||||
|
||||
# type: ignore # Incompatible return value type (got "tuple[str, Any]", expected "str")
|
||||
manager_token_usage = manager._token_process.get_summary()
|
||||
token_usage = self._calculate_usage_metrics()
|
||||
|
||||
return (
|
||||
self._format_output(task_output, manager_token_usage),
|
||||
manager_token_usage,
|
||||
self._format_output(task_output, token_usage),
|
||||
token_usage,
|
||||
)
|
||||
|
||||
def copy(self):
|
||||
@@ -511,17 +532,17 @@ class Crew(BaseModel):
|
||||
for task in self.tasks
|
||||
]
|
||||
# type: ignore # "interpolate_inputs" of "Agent" does not return a value (it only ever returns None)
|
||||
[agent.interpolate_inputs(inputs) for agent in self.agents]
|
||||
for agent in self.agents:
|
||||
agent.interpolate_inputs(inputs)
|
||||
|
||||
def _format_output(
|
||||
self, output: str, token_usage: Optional[Dict[str, Any]]
|
||||
self, output: str, token_usage: Optional[Dict[str, Any]] = None
|
||||
) -> Union[str, Dict[str, Any]]:
|
||||
"""
|
||||
Formats the output of the crew execution.
|
||||
If full_output is True, then returned data type will be a dictionary else returned outputs are string
|
||||
"""
|
||||
|
||||
print("token_usage passed to _format_output", token_usage)
|
||||
if self.full_output:
|
||||
return { # type: ignore # Incompatible return value type (got "dict[str, Sequence[str | TaskOutput | None]]", expected "str")
|
||||
"final_output": output,
|
||||
@@ -536,5 +557,38 @@ class Crew(BaseModel):
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
self._telemetry.end_crew(self, output)
|
||||
|
||||
def _calculate_usage_metrics(
|
||||
self,
|
||||
) -> Dict[str, int]:
|
||||
total_usage_metrics = {
|
||||
"total_tokens": 0,
|
||||
"prompt_tokens": 0,
|
||||
"completion_tokens": 0,
|
||||
"successful_requests": 0,
|
||||
}
|
||||
|
||||
for agent in self.agents:
|
||||
if hasattr(agent, "_token_process"):
|
||||
token_sum = agent._token_process.get_summary()
|
||||
total_usage_metrics = {
|
||||
key: total_usage_metrics[key] + token_sum[key]
|
||||
for key in total_usage_metrics
|
||||
}
|
||||
|
||||
if self.manager_agent:
|
||||
token_sum = self.manager_agent._token_process.get_summary()
|
||||
total_usage_metrics = {
|
||||
key: total_usage_metrics[key] + token_sum[key]
|
||||
for key in total_usage_metrics
|
||||
}
|
||||
|
||||
return total_usage_metrics
|
||||
|
||||
def __repr__(self):
|
||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||
|
||||
def aggregate_token_usage(self, token_usage_list: List[Dict[str, Any]]):
|
||||
return {
|
||||
key: sum([m[key] for m in token_usage_list if m is not None])
|
||||
for key in token_usage_list[0]
|
||||
}
|
||||
|
||||
@@ -3,17 +3,22 @@ import re
|
||||
import threading
|
||||
import uuid
|
||||
from copy import copy
|
||||
from typing import Any, Dict, List, Optional, Type
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
|
||||
|
||||
from langchain_openai import ChatOpenAI
|
||||
from opentelemetry.trace import Span
|
||||
from pydantic import UUID4, BaseModel, Field, field_validator, model_validator
|
||||
from pydantic_core import PydanticCustomError
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.utilities import I18N, ConverterError, Printer
|
||||
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents import Agent
|
||||
|
||||
|
||||
class Task(BaseModel):
|
||||
"""Class that represents a task to be executed.
|
||||
@@ -42,7 +47,6 @@ class Task(BaseModel):
|
||||
tools_errors: int = 0
|
||||
delegations: int = 0
|
||||
i18n: I18N = I18N()
|
||||
thread: Optional[threading.Thread] = None
|
||||
prompt_context: Optional[str] = None
|
||||
description: str = Field(description="Description of the actual task.")
|
||||
expected_output: str = Field(
|
||||
@@ -55,7 +59,7 @@ class Task(BaseModel):
|
||||
callback: Optional[Any] = Field(
|
||||
description="Callback to be executed after the task is completed.", default=None
|
||||
)
|
||||
agent: Optional[Agent] = Field(
|
||||
agent: Optional[BaseAgent] = Field(
|
||||
description="Agent responsible for execution the task.", default=None
|
||||
)
|
||||
context: Optional[List["Task"]] = Field(
|
||||
@@ -95,8 +99,11 @@ class Task(BaseModel):
|
||||
default=False,
|
||||
)
|
||||
|
||||
_telemetry: Telemetry
|
||||
_execution_span: Span | None = None
|
||||
_original_description: str | None = None
|
||||
_original_expected_output: str | None = None
|
||||
_thread: threading.Thread | None = None
|
||||
|
||||
def __init__(__pydantic_self__, **data):
|
||||
config = data.pop("config", {})
|
||||
@@ -118,6 +125,12 @@ class Task(BaseModel):
|
||||
return value[1:]
|
||||
return value
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_private_attrs(self) -> "Task":
|
||||
"""Set private attributes."""
|
||||
self._telemetry = Telemetry()
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def set_attributes_based_on_config(self) -> "Task":
|
||||
"""Set attributes based on the agent configuration."""
|
||||
@@ -145,9 +158,21 @@ class Task(BaseModel):
|
||||
)
|
||||
return self
|
||||
|
||||
def wait_for_completion(self) -> str | BaseModel:
|
||||
"""Wait for asynchronous task completion and return the output."""
|
||||
assert self.async_execution, "Task is not set to be executed asynchronously."
|
||||
|
||||
if self._thread:
|
||||
self._thread.join()
|
||||
self._thread = None
|
||||
|
||||
assert self.output, "Task output is not set."
|
||||
|
||||
return self.output.exported_output
|
||||
|
||||
def execute( # type: ignore # Missing return statement
|
||||
self,
|
||||
agent: Agent | None = None,
|
||||
agent: BaseAgent | None = None,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
) -> str:
|
||||
@@ -157,6 +182,8 @@ class Task(BaseModel):
|
||||
Output of the task.
|
||||
"""
|
||||
|
||||
self._execution_span = self._telemetry.task_started(self)
|
||||
|
||||
agent = agent or self.agent
|
||||
if not agent:
|
||||
raise Exception(
|
||||
@@ -168,8 +195,8 @@ class Task(BaseModel):
|
||||
context = []
|
||||
for task in self.context:
|
||||
if task.async_execution:
|
||||
task.thread.join() # type: ignore # Item "None" of "Thread | None" has no attribute "join"
|
||||
if task and task.output:
|
||||
task.wait_for_completion()
|
||||
if task.output:
|
||||
# type: ignore # Item "str" of "str | None" has no attribute "append"
|
||||
context.append(task.output.raw_output)
|
||||
# type: ignore # Argument 1 to "join" of "str" has incompatible type "str | None"; expected "Iterable[str]"
|
||||
@@ -179,10 +206,10 @@ class Task(BaseModel):
|
||||
tools = tools or self.tools
|
||||
|
||||
if self.async_execution:
|
||||
self.thread = threading.Thread(
|
||||
self._thread = threading.Thread(
|
||||
target=self._execute, args=(agent, self, context, tools)
|
||||
)
|
||||
self.thread.start()
|
||||
self._thread.start()
|
||||
else:
|
||||
result = self._execute(
|
||||
task=self,
|
||||
@@ -192,15 +219,15 @@ class Task(BaseModel):
|
||||
)
|
||||
return result
|
||||
|
||||
def _execute(self, agent: Agent, task, context, tools):
|
||||
def _execute(self, agent: "Agent", task, context, tools):
|
||||
result = agent.execute_task(
|
||||
task=task,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
exported_output = self._export_output(result)
|
||||
|
||||
# type: ignore # the responses are usually str but need to figure out a more elegant solution here
|
||||
self.output = TaskOutput(
|
||||
description=self.description,
|
||||
exported_output=exported_output,
|
||||
@@ -211,6 +238,10 @@ class Task(BaseModel):
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
|
||||
if self._execution_span:
|
||||
self._telemetry.task_ended(self._execution_span, self)
|
||||
self._execution_span = None
|
||||
|
||||
return exported_output
|
||||
|
||||
def prompt(self) -> str:
|
||||
@@ -246,7 +277,7 @@ class Task(BaseModel):
|
||||
"""Increment the delegations counter."""
|
||||
self.delegations += 1
|
||||
|
||||
def copy(self, agents: Optional[List[Agent]] = None) -> "Task":
|
||||
def copy(self, agents: Optional[List["Agent"]] = None) -> "Task":
|
||||
"""Create a deep copy of the Task."""
|
||||
exclude = {
|
||||
"id",
|
||||
@@ -262,15 +293,11 @@ class Task(BaseModel):
|
||||
[task.copy() for task in self.context] if self.context else None
|
||||
)
|
||||
|
||||
# TODO: Make sure this clone approach is correct.
|
||||
def get_agent_by_role(role: str) -> Agent | None:
|
||||
def get_agent_by_role(role: str) -> Union["Agent", None]:
|
||||
return next((agent for agent in agents if agent.role == role), None)
|
||||
|
||||
cloned_agent = get_agent_by_role(self.agent.role) if self.agent else None
|
||||
# cloned_agent = self.agent.copy() if self.agent else None
|
||||
print("TOOLS BEFORE COPY", self.tools)
|
||||
cloned_tools = copy(self.tools) if self.tools else []
|
||||
print("TOOLS AFTER COPY", cloned_tools)
|
||||
|
||||
copied_task = Task(
|
||||
**copied_data,
|
||||
@@ -311,14 +338,13 @@ class Task(BaseModel):
|
||||
pass
|
||||
|
||||
# type: ignore # Item "None" of "Agent | None" has no attribute "function_calling_llm"
|
||||
llm = self.agent.function_calling_llm or self.agent.llm
|
||||
|
||||
llm = getattr(self.agent, "function_calling_llm", None) or self.agent.llm
|
||||
if not self._is_gpt(llm):
|
||||
# type: ignore # Argument "model" to "PydanticSchemaParser" has incompatible type "type[BaseModel] | None"; expected "type[BaseModel]"
|
||||
model_schema = PydanticSchemaParser(model=model).get_schema()
|
||||
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
||||
|
||||
converter = Converter(
|
||||
converter = self.agent.get_output_converter(
|
||||
llm=llm, text=result, model=model, instructions=instructions
|
||||
)
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import platform
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
import pkg_resources
|
||||
from opentelemetry import trace
|
||||
@@ -10,7 +12,11 @@ from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExport
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
from opentelemetry.trace import Status, StatusCode
|
||||
from opentelemetry.trace import Span, Status, StatusCode
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
|
||||
|
||||
class Telemetry:
|
||||
@@ -88,9 +94,6 @@ class Telemetry:
|
||||
self._add_attribute(span, "python_version", platform.python_version())
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "crew_process", crew.process)
|
||||
self._add_attribute(
|
||||
span, "crew_language", crew.prompt_file if crew.i18n else "None"
|
||||
)
|
||||
self._add_attribute(span, "crew_memory", crew.memory)
|
||||
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
|
||||
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
|
||||
@@ -102,6 +105,8 @@ class Telemetry:
|
||||
{
|
||||
"id": str(agent.id),
|
||||
"role": agent.role,
|
||||
"goal": agent.goal,
|
||||
"backstory": agent.backstory,
|
||||
"verbose?": agent.verbose,
|
||||
"max_iter": agent.max_iter,
|
||||
"max_rpm": agent.max_rpm,
|
||||
@@ -123,8 +128,16 @@ class Telemetry:
|
||||
[
|
||||
{
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"expected_output": task.expected_output,
|
||||
"async_execution?": task.async_execution,
|
||||
"human_input?": task.human_input,
|
||||
"agent_role": task.agent.role if task.agent else "None",
|
||||
"context": (
|
||||
[task.description for task in task.context]
|
||||
if task.context
|
||||
else None
|
||||
),
|
||||
"tools_names": [
|
||||
tool.name.casefold() for tool in task.tools
|
||||
],
|
||||
@@ -143,6 +156,38 @@ class Telemetry:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def task_started(self, task: Task) -> Span | None:
|
||||
"""Records task started in a crew."""
|
||||
if self.ready:
|
||||
try:
|
||||
tracer = trace.get_tracer("crewai.telemetry")
|
||||
span = tracer.start_span("Task Execution")
|
||||
|
||||
self._add_attribute(span, "task_id", str(task.id))
|
||||
self._add_attribute(span, "formatted_description", task.description)
|
||||
self._add_attribute(
|
||||
span, "formatted_expected_output", task.expected_output
|
||||
)
|
||||
|
||||
return span
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
def task_ended(self, span: Span, task: Task):
|
||||
"""Records task execution in a crew."""
|
||||
if self.ready:
|
||||
try:
|
||||
self._add_attribute(
|
||||
span, "output", task.output.raw_output if task.output else ""
|
||||
)
|
||||
|
||||
span.set_status(Status(StatusCode.OK))
|
||||
span.end()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||
"""Records the repeated usage 'error' of a tool by an agent."""
|
||||
if self.ready:
|
||||
@@ -207,7 +252,7 @@ class Telemetry:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def crew_execution_span(self, crew):
|
||||
def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None):
|
||||
"""Records the complete execution of a crew.
|
||||
This is only collected if the user has opted-in to share the crew.
|
||||
"""
|
||||
@@ -221,6 +266,7 @@ class Telemetry:
|
||||
pkg_resources.get_distribution("crewai").version,
|
||||
)
|
||||
self._add_attribute(span, "crew_id", str(crew.id))
|
||||
self._add_attribute(span, "inputs", json.dumps(inputs))
|
||||
self._add_attribute(
|
||||
span,
|
||||
"crew_agents",
|
||||
@@ -238,7 +284,7 @@ class Telemetry:
|
||||
"llm": json.dumps(self._safe_llm_attributes(agent.llm)),
|
||||
"delegation_enabled?": agent.allow_delegation,
|
||||
"tools_names": [
|
||||
tool.name.casefold() for tool in agent.tools
|
||||
tool.name.casefold() for tool in agent.tools or []
|
||||
],
|
||||
}
|
||||
for agent in crew.agents
|
||||
@@ -253,16 +299,17 @@ class Telemetry:
|
||||
{
|
||||
"id": str(task.id),
|
||||
"description": task.description,
|
||||
"expected_output": task.expected_output,
|
||||
"async_execution?": task.async_execution,
|
||||
"output": task.expected_output,
|
||||
"human_input?": task.human_input,
|
||||
"agent_role": task.agent.role if task.agent else "None",
|
||||
"context": (
|
||||
[task.description for task in task.context]
|
||||
if task.context
|
||||
else "None"
|
||||
else None
|
||||
),
|
||||
"tools_names": [
|
||||
tool.name.casefold() for tool in task.tools
|
||||
tool.name.casefold() for tool in task.tools or []
|
||||
],
|
||||
}
|
||||
for task in crew.tasks
|
||||
|
||||
@@ -1,106 +1,25 @@
|
||||
from typing import List, Union
|
||||
|
||||
from langchain.tools import StructuredTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.task import Task
|
||||
from crewai.utilities import I18N
|
||||
from crewai.agents.agent_builder.utilities.base_agent_tool import BaseAgentTools
|
||||
|
||||
|
||||
class AgentTools(BaseModel):
|
||||
class AgentTools(BaseAgentTools):
|
||||
"""Default tools around agent delegation"""
|
||||
|
||||
agents: List[Agent] = Field(description="List of agents in this crew.")
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
|
||||
def tools(self):
|
||||
coworkers = f"[{', '.join([f'{agent.role}' for agent in self.agents])}]"
|
||||
tools = [
|
||||
StructuredTool.from_function(
|
||||
func=self.delegate_work,
|
||||
name="Delegate work to coworker",
|
||||
description=self.i18n.tools("delegate_work").format(
|
||||
coworkers=f"[{', '.join([f'{agent.role}' for agent in self.agents])}]"
|
||||
coworkers=coworkers
|
||||
),
|
||||
),
|
||||
StructuredTool.from_function(
|
||||
func=self.ask_question,
|
||||
name="Ask question to coworker",
|
||||
description=self.i18n.tools("ask_question").format(
|
||||
coworkers=f"[{', '.join([f'{agent.role}' for agent in self.agents])}]"
|
||||
),
|
||||
description=self.i18n.tools("ask_question").format(coworkers=coworkers),
|
||||
),
|
||||
]
|
||||
return tools
|
||||
|
||||
def delegate_work(
|
||||
self,
|
||||
task: str,
|
||||
context: Union[str, None] = None,
|
||||
coworker: Union[str, None] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Useful to delegate a specific task to a coworker passing all necessary context and names."""
|
||||
coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker")
|
||||
if coworker:
|
||||
is_list = coworker.startswith("[") and coworker.endswith("]")
|
||||
if is_list:
|
||||
coworker = coworker[1:-1].split(",")[0]
|
||||
return self._execute(coworker, task, context)
|
||||
|
||||
def ask_question(
|
||||
self,
|
||||
question: str,
|
||||
context: Union[str, None] = None,
|
||||
coworker: Union[str, None] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Useful to ask a question, opinion or take from a coworker passing all necessary context and names."""
|
||||
coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker")
|
||||
if coworker:
|
||||
is_list = coworker.startswith("[") and coworker.endswith("]")
|
||||
if is_list:
|
||||
coworker = coworker[1:-1].split(",")[0]
|
||||
return self._execute(coworker, question, context)
|
||||
|
||||
def _execute(self, agent: Union[str, None], task: str, context: Union[str, None]):
|
||||
"""Execute the command."""
|
||||
try:
|
||||
if agent is None:
|
||||
agent = ""
|
||||
|
||||
# It is important to remove the quotes from the agent name.
|
||||
# The reason we have to do this is because less-powerful LLM's
|
||||
# have difficulty producing valid JSON.
|
||||
# As a result, we end up with invalid JSON that is truncated like this:
|
||||
# {"task": "....", "coworker": "....
|
||||
# when it should look like this:
|
||||
# {"task": "....", "coworker": "...."}
|
||||
agent_name = agent.casefold().replace('"', "").replace("\n", "")
|
||||
|
||||
agent = [
|
||||
available_agent
|
||||
for available_agent in self.agents
|
||||
if available_agent.role.casefold().replace("\n", "") == agent_name
|
||||
]
|
||||
except Exception as _:
|
||||
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
||||
coworkers="\n".join(
|
||||
[f"- {agent.role.casefold()}" for agent in self.agents]
|
||||
)
|
||||
)
|
||||
|
||||
if not agent:
|
||||
return self.i18n.errors("agent_tool_unexsiting_coworker").format(
|
||||
coworkers="\n".join(
|
||||
[f"- {agent.role.casefold()}" for agent in self.agents]
|
||||
)
|
||||
)
|
||||
|
||||
agent = agent[0]
|
||||
task = Task(
|
||||
description=task,
|
||||
agent=agent,
|
||||
expected_output="Your best answer to your coworker asking you this, accounting for the context shared.",
|
||||
)
|
||||
return agent.execute_task(task, context)
|
||||
|
||||
@@ -98,7 +98,7 @@ class ToolUsage:
|
||||
tool_string: str,
|
||||
tool: BaseTool,
|
||||
calling: Union[ToolCalling, InstructorToolCalling],
|
||||
) -> None: # TODO: Fix this return type
|
||||
) -> str: # TODO: Fix this return type --> finecwg : I updated return type to str
|
||||
if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None)
|
||||
try:
|
||||
result = self._i18n.errors("task_repeated_usage").format(
|
||||
@@ -123,7 +123,7 @@ class ToolUsage:
|
||||
tool=calling.tool_name, input=calling.arguments
|
||||
)
|
||||
|
||||
if not result:
|
||||
if result is None: #! finecwg: if not result --> if result is None
|
||||
try:
|
||||
if calling.tool_name in [
|
||||
"Delegate work to coworker",
|
||||
|
||||
@@ -1,9 +1,22 @@
|
||||
from .converter import Converter, ConverterError
|
||||
from .file_handler import FileHandler
|
||||
from .i18n import I18N
|
||||
from .instructor import Instructor
|
||||
from .logger import Logger
|
||||
from .parser import YamlParser
|
||||
from .printer import Printer
|
||||
from .prompts import Prompts
|
||||
from .rpm_controller import RPMController
|
||||
from .fileHandler import FileHandler
|
||||
from .parser import YamlParser
|
||||
|
||||
__all__ = [
|
||||
"Converter",
|
||||
"ConverterError",
|
||||
"FileHandler",
|
||||
"I18N",
|
||||
"Instructor",
|
||||
"Logger",
|
||||
"Printer",
|
||||
"Prompts",
|
||||
"RPMController",
|
||||
"YamlParser",
|
||||
]
|
||||
|
||||
2
src/crewai/utilities/constants.py
Normal file
2
src/crewai/utilities/constants.py
Normal file
@@ -0,0 +1,2 @@
|
||||
TRAINING_DATA_FILE = "training_data.pkl"
|
||||
TRAINED_AGENTS_DATA_FILE = "trained_agents_data.pkl"
|
||||
@@ -1,9 +1,11 @@
|
||||
import json
|
||||
from typing import Any, Optional
|
||||
|
||||
from langchain.schema import HumanMessage, SystemMessage
|
||||
from langchain_openai import ChatOpenAI
|
||||
from pydantic import BaseModel, Field, PrivateAttr, model_validator
|
||||
from pydantic import model_validator
|
||||
from crewai.agents.agent_builder.utilities.base_output_converter_base import (
|
||||
OutputConverter,
|
||||
)
|
||||
|
||||
|
||||
class ConverterError(Exception):
|
||||
@@ -14,19 +16,9 @@ class ConverterError(Exception):
|
||||
self.message = message
|
||||
|
||||
|
||||
class Converter(BaseModel):
|
||||
class Converter(OutputConverter):
|
||||
"""Class that converts text into either pydantic or json."""
|
||||
|
||||
_is_gpt: bool = PrivateAttr(default=True)
|
||||
text: str = Field(description="Text to be converted.")
|
||||
llm: Any = Field(description="The language model to be used to convert the text.")
|
||||
model: Any = Field(description="The model to be used to convert the text.")
|
||||
instructions: str = Field(description="Conversion instructions to the LLM.")
|
||||
max_attemps: Optional[int] = Field(
|
||||
description="Max number of attemps to try to get the output formated.",
|
||||
default=3,
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def check_llm_provider(self):
|
||||
if not self._is_gpt(self.llm):
|
||||
|
||||
@@ -26,6 +26,18 @@ class TaskEvaluation(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class TrainingTaskEvaluation(BaseModel):
|
||||
suggestions: List[str] = Field(
|
||||
description="Based on the Human Feedbacks and the comparison between Initial Outputs and Improved outputs provide action items based on human_feedback for future tasks."
|
||||
)
|
||||
quality: float = Field(
|
||||
description="A score from 0 to 10 evaluating on completion, quality, and overall performance from the improved output to the initial output based on the human feedback."
|
||||
)
|
||||
final_summary: str = Field(
|
||||
description="A step by step action items to improve the next Agent based on the human-feedback and improved output."
|
||||
)
|
||||
|
||||
|
||||
class TaskEvaluator:
|
||||
def __init__(self, original_agent):
|
||||
self.llm = original_agent.llm
|
||||
@@ -59,3 +71,49 @@ class TaskEvaluator:
|
||||
|
||||
def _is_gpt(self, llm) -> bool:
|
||||
return isinstance(llm, ChatOpenAI) and llm.openai_api_base is None
|
||||
|
||||
def evaluate_training_data(
|
||||
self, training_data: dict, agent_id: str
|
||||
) -> TrainingTaskEvaluation:
|
||||
"""
|
||||
Evaluate the training data based on the llm output, human feedback, and improved output.
|
||||
|
||||
Parameters:
|
||||
- training_data (dict): The training data to be evaluated.
|
||||
- agent_id (str): The ID of the agent.
|
||||
"""
|
||||
|
||||
output_training_data = training_data[agent_id]
|
||||
|
||||
final_aggregated_data = ""
|
||||
for _, data in output_training_data.items():
|
||||
final_aggregated_data += (
|
||||
f"Initial Output:\n{data['initial_output']}\n\n"
|
||||
f"Human Feedback:\n{data['human_feedback']}\n\n"
|
||||
f"Improved Output:\n{data['improved_output']}\n\n"
|
||||
)
|
||||
|
||||
evaluation_query = (
|
||||
"Assess the quality of the training data based on the llm output, human feedback , and llm output improved result.\n\n"
|
||||
f"{final_aggregated_data}"
|
||||
"Please provide:\n"
|
||||
"- Based on the Human Feedbacks and the comparison between Initial Outputs and Improved outputs provide action items based on human_feedback for future tasks\n"
|
||||
"- A score from 0 to 10 evaluating on completion, quality, and overall performance from the improved output to the initial output based on the human feedback\n"
|
||||
)
|
||||
instructions = "I'm gonna convert this raw text into valid JSON."
|
||||
|
||||
if not self._is_gpt(self.llm):
|
||||
model_schema = PydanticSchemaParser(
|
||||
model=TrainingTaskEvaluation
|
||||
).get_schema()
|
||||
instructions = f"{instructions}\n\nThe json should have the following structure, with the following keys:\n{model_schema}"
|
||||
|
||||
converter = Converter(
|
||||
llm=self.llm,
|
||||
text=evaluation_query,
|
||||
model=TrainingTaskEvaluation,
|
||||
instructions=instructions,
|
||||
)
|
||||
|
||||
pydantic_result = converter.to_pydantic()
|
||||
return pydantic_result
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class FileHandler:
|
||||
"""take care of file operations, currently it only logs messages to a file"""
|
||||
|
||||
def __init__(self, file_path):
|
||||
if isinstance(file_path, bool):
|
||||
self._path = os.path.join(os.curdir, "logs.txt")
|
||||
elif isinstance(file_path, str):
|
||||
self._path = file_path
|
||||
else:
|
||||
raise ValueError("file_path must be either a boolean or a string.")
|
||||
|
||||
def log(self, **kwargs):
|
||||
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
message = f"{now}: ".join([f"{key}={value}" for key, value in kwargs.items()])
|
||||
with open(self._path, "a", encoding = 'utf-8') as file:
|
||||
file.write(message + "\n")
|
||||
69
src/crewai/utilities/file_handler.py
Normal file
69
src/crewai/utilities/file_handler.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import os
|
||||
import pickle
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class FileHandler:
|
||||
"""take care of file operations, currently it only logs messages to a file"""
|
||||
|
||||
def __init__(self, file_path):
|
||||
if isinstance(file_path, bool):
|
||||
self._path = os.path.join(os.curdir, "logs.txt")
|
||||
elif isinstance(file_path, str):
|
||||
self._path = file_path
|
||||
else:
|
||||
raise ValueError("file_path must be either a boolean or a string.")
|
||||
|
||||
def log(self, **kwargs):
|
||||
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
||||
message = f"{now}: ".join([f"{key}={value}" for key, value in kwargs.items()])
|
||||
with open(self._path, "a", encoding="utf-8") as file:
|
||||
file.write(message + "\n")
|
||||
|
||||
|
||||
class PickleHandler:
|
||||
def __init__(self, file_name: str) -> None:
|
||||
"""
|
||||
Initialize the PickleHandler with the name of the file where data will be stored.
|
||||
The file will be saved in the current directory.
|
||||
|
||||
Parameters:
|
||||
- file_name (str): The name of the file for saving and loading data.
|
||||
"""
|
||||
self.file_path = os.path.join(os.getcwd(), file_name)
|
||||
self._initialize_file()
|
||||
|
||||
def _initialize_file(self) -> None:
|
||||
"""
|
||||
Initialize the file with an empty dictionary if it does not exist or is empty.
|
||||
"""
|
||||
if not os.path.exists(self.file_path) or os.path.getsize(self.file_path) == 0:
|
||||
self.save({}) # Save an empty dictionary to initialize the file
|
||||
|
||||
def save(self, data) -> None:
|
||||
"""
|
||||
Save the data to the specified file using pickle.
|
||||
|
||||
Parameters:
|
||||
- data (object): The data to be saved.
|
||||
"""
|
||||
with open(self.file_path, "wb") as file:
|
||||
pickle.dump(data, file)
|
||||
|
||||
def load(self) -> dict:
|
||||
"""
|
||||
Load the data from the specified file using pickle.
|
||||
|
||||
Returns:
|
||||
- dict: The data loaded from the file.
|
||||
"""
|
||||
if not os.path.exists(self.file_path) or os.path.getsize(self.file_path) == 0:
|
||||
return {} # Return an empty dictionary if the file does not exist or is empty
|
||||
|
||||
with open(self.file_path, "rb") as file:
|
||||
try:
|
||||
return pickle.load(file)
|
||||
except EOFError:
|
||||
return {} # Return an empty dictionary if the file is empty or corrupted
|
||||
except Exception:
|
||||
raise # Raise any other exceptions that occur during loading
|
||||
@@ -5,36 +5,10 @@ import tiktoken
|
||||
from langchain.callbacks.base import BaseCallbackHandler
|
||||
from langchain.schema import LLMResult
|
||||
|
||||
|
||||
class TokenProcess:
|
||||
id = uuid.uuid4() # TODO: REMOVE THIS
|
||||
total_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
successful_requests: int = 0
|
||||
|
||||
def sum_prompt_tokens(self, tokens: int):
|
||||
self.prompt_tokens = self.prompt_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_completion_tokens(self, tokens: int):
|
||||
self.completion_tokens = self.completion_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_successful_requests(self, requests: int):
|
||||
self.successful_requests = self.successful_requests + requests
|
||||
|
||||
def get_summary(self) -> Dict[str, Any]:
|
||||
return {
|
||||
"total_tokens": self.total_tokens,
|
||||
"prompt_tokens": self.prompt_tokens,
|
||||
"completion_tokens": self.completion_tokens,
|
||||
"successful_requests": self.successful_requests,
|
||||
}
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
|
||||
|
||||
class TokenCalcHandler(BaseCallbackHandler):
|
||||
id = uuid.uuid4() # TODO: REMOVE THIS
|
||||
model_name: str = ""
|
||||
token_cost_process: TokenProcess
|
||||
|
||||
|
||||
31
src/crewai/utilities/training_handler.py
Normal file
31
src/crewai/utilities/training_handler.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from crewai.utilities.file_handler import PickleHandler
|
||||
|
||||
|
||||
class CrewTrainingHandler(PickleHandler):
|
||||
def save_trained_data(self, agent_id: str, trained_data: dict) -> None:
|
||||
"""
|
||||
Save the trained data for a specific agent.
|
||||
|
||||
Parameters:
|
||||
- agent_id (str): The ID of the agent.
|
||||
- trained_data (dict): The trained data to be saved.
|
||||
"""
|
||||
data = self.load()
|
||||
data[agent_id] = trained_data
|
||||
self.save(data)
|
||||
|
||||
def append(self, train_iteration: int, agent_id: str, new_data) -> None:
|
||||
"""
|
||||
Append new data to the existing pickle file.
|
||||
|
||||
Parameters:
|
||||
- new_data (object): The new data to be appended.
|
||||
"""
|
||||
data = self.load()
|
||||
|
||||
if agent_id in data:
|
||||
data[agent_id][train_iteration] = new_data
|
||||
else:
|
||||
data[agent_id] = {train_iteration: new_data}
|
||||
|
||||
self.save(data)
|
||||
Reference in New Issue
Block a user