updating agente with long term memory

This commit is contained in:
Joao Moura
2023-11-05 17:43:23 -03:00
parent c4f86a60bb
commit c936181cc1

View File

@@ -8,13 +8,19 @@ from langchain.agents import AgentExecutor
from langchain.chat_models import ChatOpenAI as OpenAI from langchain.chat_models import ChatOpenAI as OpenAI
from langchain.tools.render import render_text_description from langchain.tools.render import render_text_description
from langchain.agents.format_scratchpad import format_log_to_str from langchain.agents.format_scratchpad import format_log_to_str
from langchain.agents.output_parsers import ReActSingleInputOutputParser, PydanticOutputParser from langchain.agents.output_parsers import ReActSingleInputOutputParser
from langchain.memory import (
ConversationSummaryMemory,
ConversationEntityMemory,
CombinedMemory
)
from .prompts import Prompts from .prompts import Prompts
from .agent.agent_vote import AgentVote from .agents.agent_vote import AgentVote
class Agent(BaseModel): class Agent(BaseModel):
"""Generic agent implementation.""" """Generic agent implementation."""
agent_executor: AgentExecutor = None
role: str = Field(description="Role of the agent") role: str = Field(description="Role of the agent")
goal: str = Field(description="Objective of the agent") goal: str = Field(description="Objective of the agent")
backstory: str = Field(description="Backstory of the agent") backstory: str = Field(description="Backstory of the agent")
@@ -27,32 +33,43 @@ class Agent(BaseModel):
default=Prompts default=Prompts
) )
llm: str = Field( llm: str = Field(
description="LLM of the agent", description="LLM that will run the agent",
default=OpenAI( default=OpenAI(
temperature=0.7, temperature=0.7,
model="gpt-4", model="gpt-4",
verbose=True verbose=True
) )
) )
def vote_agent_for_task(self, task: str) -> AgentVote: def __init__(self, **data):
""" super().__init__(**data)
Execute a task with the agent. execution_prompt = Prompts.TASK_EXECUTION_PROMPT.partial(
Parameters:
task (str): Task to execute
Returns:
output (AgentVote): The agent voted to execute the task
"""
parser = PydanticOutputParser(pydantic_object=AgentVote)
prompt = Prompts.AGENT_EXECUTION_PROMPT.partial(
tools=render_text_description(self.tools),
tool_names=self.__tools_names(),
backstory=self.backstory,
role=self.role,
goal=self.goal, goal=self.goal,
format_instructions=parser.get_format_instructions() role=self.role,
backstory=self.backstory,
)
llm_with_bind = self.llm.bind(stop=["\nObservation"])
inner_agent = {
"input": lambda x: x["input"],
"tools": lambda x: x["tools"],
"entities": lambda x: x["entities"],
"tool_names": lambda x: x["tool_names"],
"chat_history": lambda x: x["chat_history"],
"agent_scratchpad": lambda x: format_log_to_str(x['intermediate_steps']),
} | execution_prompt | llm_with_bind | ReActSingleInputOutputParser()
summary_memory = ConversationSummaryMemory(llm=self.llm, memory_key='chat_history', input_key="input")
entity_memory = ConversationEntityMemory(llm=self.llm, input_key="input")
memory = CombinedMemory(memories=[entity_memory, summary_memory])
self.agent_executor = AgentExecutor(
agent=inner_agent,
tools=self.tools,
memory=memory,
verbose=True,
handle_parsing_errors=True
) )
return self.__function_calling(task, prompt, parser)
def execute_task(self, task: str) -> str: def execute_task(self, task: str) -> str:
""" """
@@ -62,41 +79,11 @@ class Agent(BaseModel):
Returns: Returns:
output (str): Output of the agent output (str): Output of the agent
""" """
prompt = Prompts.AGENT_EXECUTION_PROMPT.partial( return self.agent_executor.invoke({
tools=render_text_description(self.tools), "input": task,
tool_names=self.__tools_names(), "tool_names": self.__tools_names(),
backstory=self.backstory, "tools": render_text_description(self.tools),
role=self.role, })['output']
goal=self.goal,
)
return self.__execute_task(task, prompt)
def __function_calling(self, input: str, prompt: str, parser: str) -> str:
inner_agent = {
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_log_to_str(x['intermediate_steps'])
} | prompt | parser
return self.__execute(inner_agent, input)
def __execute_task(self, input: str, prompt: str) -> str:
chat_with_bind = self.llm.bind(stop=["\nObservation"])
inner_agent = {
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_log_to_str(x['intermediate_steps'])
} | prompt | chat_with_bind | ReActSingleInputOutputParser()
return self.__execute(inner_agent, input)
def __execute(self, inner_agent, input):
agent_executor = AgentExecutor(
agent=inner_agent,
tools=self.tools,
verbose=True,
handle_parsing_errors=True
)
return agent_executor.invoke({"input": input})['output']
def __tools_names(self) -> str: def __tools_names(self) -> str:
return ", ".join([t.name for t in self.tools]) return ", ".join([t.name for t in self.tools])