mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 16:18:30 +00:00
* Adding tool caching a loop execution prevention. This adds some guardrails, to both prevent the same tool to be used consecutively and also caching tool's results across the entire crew so it cuts down execution time and eventual LLM calls. This plays a huge role for smaller opensource models that usually fall into those behaviors patterns. It also includes some smaller improvements around the tool prompt and agent tools, all with the same intention of guiding models into better conform with agent instructions.
157 lines
5.5 KiB
Python
157 lines
5.5 KiB
Python
from typing import Any, List, Optional
|
|
|
|
from langchain.agents import AgentExecutor
|
|
from langchain.agents.format_scratchpad import format_log_to_str
|
|
from langchain.chat_models import ChatOpenAI
|
|
from langchain.memory import ConversationSummaryMemory
|
|
from langchain.tools.render import render_text_description
|
|
from langchain_core.runnables.config import RunnableConfig
|
|
from pydantic import BaseModel, Field, InstanceOf, model_validator
|
|
|
|
from .agents import CacheHandler, CrewAgentOutputParser, ToolsHandler
|
|
from .prompts import Prompts
|
|
|
|
|
|
class Agent(BaseModel):
|
|
"""Represents an agent in a system.
|
|
|
|
Each agent has a role, a goal, a backstory, and an optional language model (llm).
|
|
The agent can also have memory, can operate in verbose mode, and can delegate tasks to other agents.
|
|
|
|
Attributes:
|
|
agent_executor: An instance of the AgentExecutor class.
|
|
role: The role of the agent.
|
|
goal: The objective of the agent.
|
|
backstory: The backstory of the agent.
|
|
llm: The language model that will run the agent.
|
|
memory: Whether the agent should have memory or not.
|
|
verbose: Whether the agent execution should be in verbose mode.
|
|
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
|
"""
|
|
|
|
class Config:
|
|
arbitrary_types_allowed = True
|
|
|
|
role: str = Field(description="Role of the agent")
|
|
goal: str = Field(description="Objective of the agent")
|
|
backstory: str = Field(description="Backstory of the agent")
|
|
llm: Optional[Any] = Field(
|
|
default_factory=lambda: ChatOpenAI(
|
|
temperature=0.7,
|
|
model_name="gpt-4",
|
|
),
|
|
description="Language model that will run the agent.",
|
|
)
|
|
memory: bool = Field(
|
|
default=True, description="Whether the agent should have memory or not"
|
|
)
|
|
verbose: bool = Field(
|
|
default=False, description="Verbose mode for the Agent Execution"
|
|
)
|
|
allow_delegation: bool = Field(
|
|
default=True, description="Allow delegation of tasks to agents"
|
|
)
|
|
tools: List[Any] = Field(
|
|
default_factory=list, description="Tools at agents disposal"
|
|
)
|
|
agent_executor: Optional[InstanceOf[AgentExecutor]] = Field(
|
|
default=None, description="An instance of the AgentExecutor class."
|
|
)
|
|
tools_handler: Optional[InstanceOf[ToolsHandler]] = Field(
|
|
default=None, description="An instance of the ToolsHandler class."
|
|
)
|
|
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
|
default=CacheHandler(), description="An instance of the CacheHandler class."
|
|
)
|
|
|
|
@model_validator(mode="after")
|
|
def check_agent_executor(self) -> "Agent":
|
|
if not self.agent_executor:
|
|
self.set_cache_handler(self.cache_handler)
|
|
return self
|
|
|
|
def execute_task(
|
|
self, task: str, context: str = None, tools: List[Any] = None
|
|
) -> str:
|
|
"""Execute a task with the agent.
|
|
|
|
Args:
|
|
task: Task to execute.
|
|
context: Context to execute the task in.
|
|
tools: Tools to use for the task.
|
|
|
|
Returns:
|
|
Output of the agent
|
|
"""
|
|
if context:
|
|
task = "\n".join(
|
|
[task, "\nThis is the context you are working with:", context]
|
|
)
|
|
|
|
tools = tools or self.tools
|
|
self.agent_executor.tools = tools
|
|
|
|
return self.agent_executor.invoke(
|
|
{
|
|
"input": task,
|
|
"tool_names": self.__tools_names(tools),
|
|
"tools": render_text_description(tools),
|
|
},
|
|
RunnableConfig(callbacks=[self.tools_handler]),
|
|
)["output"]
|
|
|
|
def set_cache_handler(self, cache_handler) -> None:
|
|
print(f"cache_handler: {cache_handler}")
|
|
self.cache_handler = cache_handler
|
|
self.tools_handler = ToolsHandler(cache=self.cache_handler)
|
|
self.__create_agent_executor()
|
|
|
|
def __create_agent_executor(self) -> AgentExecutor:
|
|
"""Create an agent executor for the agent.
|
|
|
|
Returns:
|
|
An instance of the AgentExecutor class.
|
|
"""
|
|
agent_args = {
|
|
"input": lambda x: x["input"],
|
|
"tools": lambda x: x["tools"],
|
|
"tool_names": lambda x: x["tool_names"],
|
|
"agent_scratchpad": lambda x: format_log_to_str(x["intermediate_steps"]),
|
|
}
|
|
executor_args = {
|
|
"tools": self.tools,
|
|
"verbose": self.verbose,
|
|
"handle_parsing_errors": True,
|
|
}
|
|
|
|
if self.memory:
|
|
summary_memory = ConversationSummaryMemory(
|
|
llm=self.llm, memory_key="chat_history", input_key="input"
|
|
)
|
|
executor_args["memory"] = summary_memory
|
|
agent_args["chat_history"] = lambda x: x["chat_history"]
|
|
prompt = Prompts.TASK_EXECUTION_WITH_MEMORY_PROMPT
|
|
else:
|
|
prompt = Prompts.TASK_EXECUTION_PROMPT
|
|
|
|
execution_prompt = prompt.partial(
|
|
goal=self.goal,
|
|
role=self.role,
|
|
backstory=self.backstory,
|
|
)
|
|
|
|
bind = self.llm.bind(stop=["\nObservation"])
|
|
inner_agent = (
|
|
agent_args
|
|
| execution_prompt
|
|
| bind
|
|
| CrewAgentOutputParser(
|
|
tools_handler=self.tools_handler, cache=self.cache_handler
|
|
)
|
|
)
|
|
self.agent_executor = AgentExecutor(agent=inner_agent, **executor_args)
|
|
|
|
@staticmethod
|
|
def __tools_names(tools) -> str:
|
|
return ", ".join([t.name for t in tools])
|