feature: human input per task (#395)

* feature: human input per task

* Update executor.py

* Update executor.py

* Update executor.py

* Update executor.py

* Update executor.py

* feat: change human input for unit testing
added documentation and unit test

* Create test_agent_human_input.yaml

add yaml for test

---------

Co-authored-by: João Moura <joaomdmoura@gmail.com>
This commit is contained in:
GabeKoga
2024-04-01 10:04:56 -03:00
committed by GitHub
parent 22ab99cbd6
commit bcf701b287
7 changed files with 449 additions and 12 deletions

View File

@@ -18,6 +18,7 @@ from crewai.utilities import I18N
class CrewAgentExecutor(AgentExecutor):
_i18n: I18N = I18N()
should_ask_for_human_input: bool = False
llm: Any = None
iterations: int = 0
task: Any = None
@@ -54,6 +55,9 @@ class CrewAgentExecutor(AgentExecutor):
[tool.name for tool in self.tools], excluded_colors=["green", "red"]
)
intermediate_steps: List[Tuple[AgentAction, str]] = []
# Allowing human input given task setting
if self.task.human_input:
self.should_ask_for_human_input = True
# Let's start tracking the number of iterations and time elapsed
self.iterations = 0
time_elapsed = 0.0
@@ -169,8 +173,24 @@ class CrewAgentExecutor(AgentExecutor):
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
yield output
return
if self.should_ask_for_human_input:
# Making sure we only ask for it once, so disabling for the next thought loop
self.should_ask_for_human_input = False
human_feedback = self._ask_human_input(output.return_values["output"])
action = AgentAction(
tool="Human Input", tool_input=human_feedback, log=output.log
)
yield AgentStep(
action=action,
observation=self._i18n.slice("human_feedback").format(
human_feedback=human_feedback
),
)
return
else:
yield output
return
actions: List[AgentAction]
actions = [output] if isinstance(output, AgentAction) else output
@@ -203,3 +223,9 @@ class CrewAgentExecutor(AgentExecutor):
tools=", ".join([tool.name for tool in self.tools]),
)
yield AgentStep(action=agent_action, observation=observation)
def _ask_human_input(self, final_answer: dict) -> str:
"""Get human input."""
return input(
self._i18n.slice("getting_input").format(final_answer=final_answer)
)

View File

@@ -70,6 +70,10 @@ class Task(BaseModel):
frozen=True,
description="Unique identifier for the object, not set by user.",
)
human_input: Optional[bool] = Field(
description="Whether the task should have a human review the final answer of the agent",
default=False,
)
_original_description: str | None = None
_original_expected_output: str | None = None

View File

@@ -15,7 +15,9 @@
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary."
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary.",
"human_feedback": "You got human feedback on your work, re-avaluate it and give a new Final Answer when ready.\n {human_feedback}",
"getting_input": "This is the agent final answer: {final_answer}\nPlease provide a feedback: "
},
"errors": {
"unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",