From c8f5bdc19f40b691df97755028be6fff8f8e1fd8 Mon Sep 17 00:00:00 2001 From: GabeKoga Date: Thu, 28 Mar 2024 17:48:11 -0300 Subject: [PATCH] feature: human input per task --- src/crewai/agents/executor.py | 28 ++++++++++++++++++++++++++-- src/crewai/task.py | 4 ++++ src/crewai/translations/en.json | 4 +++- 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/src/crewai/agents/executor.py b/src/crewai/agents/executor.py index 156c125fd..5521dffed 100644 --- a/src/crewai/agents/executor.py +++ b/src/crewai/agents/executor.py @@ -18,6 +18,7 @@ from crewai.utilities import I18N class CrewAgentExecutor(AgentExecutor): _i18n: I18N = I18N() + human_input: bool = False llm: Any = None iterations: int = 0 task: Any = None @@ -54,6 +55,8 @@ class CrewAgentExecutor(AgentExecutor): [tool.name for tool in self.tools], excluded_colors=["green", "red"] ) intermediate_steps: List[Tuple[AgentAction, str]] = [] + # Get info about human input from Task + self.human_input = self.task.human_input # Let's start tracking the number of iterations and time elapsed self.iterations = 0 time_elapsed = 0.0 @@ -169,8 +172,23 @@ class CrewAgentExecutor(AgentExecutor): # If the tool chosen is the finishing tool, then we end and return. if isinstance(output, AgentFinish): - yield output - return + if self.human_input: + self.human_input = False + human_feedback = self._human_input(output.return_values["output"]) + action = AgentAction( + tool="Human Input", tool_input=human_feedback, log=output.log + ) + yield AgentStep( + action=action, + observation=self._i18n.slice("human_feedback").format( + human_feedback=human_feedback + ), + ) + return + + else: + yield output + return actions: List[AgentAction] actions = [output] if isinstance(output, AgentAction) else output @@ -203,3 +221,9 @@ class CrewAgentExecutor(AgentExecutor): tools=", ".join([tool.name for tool in self.tools]), ) yield AgentStep(action=agent_action, observation=observation) + + def _human_input(self, final_answer: dict) -> str: + """Get human input.""" + return input( + self._i18n.slice("getting_input").format(final_answer=final_answer) + ) diff --git a/src/crewai/task.py b/src/crewai/task.py index ff7af1b89..55972ef06 100644 --- a/src/crewai/task.py +++ b/src/crewai/task.py @@ -70,6 +70,10 @@ class Task(BaseModel): frozen=True, description="Unique identifier for the object, not set by user.", ) + human_input: Optional[bool] = Field( + description="Whether the task should have a human review the final answer of the agent", + default=False, + ) def __init__(__pydantic_self__, **data): config = data.pop("config", {}) diff --git a/src/crewai/translations/en.json b/src/crewai/translations/en.json index cbe08dc44..99524e9e6 100644 --- a/src/crewai/translations/en.json +++ b/src/crewai/translations/en.json @@ -15,7 +15,9 @@ "final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n", "format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n", "task_with_context": "{task}\n\nThis is the context you're working with:\n{context}", - "expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary." + "expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary.", + "human_feedback": "You got human feedback on your work, re-avaluate it and give a new Final Answer when ready.\n {human_feedback}", + "getting_input": "This is the agent final answer: {final_answer}\nPlease provide a feedback: " }, "errors": { "unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",