mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-20 13:28:13 +00:00
feature: human input per task
This commit is contained in:
@@ -18,6 +18,7 @@ from crewai.utilities import I18N
|
|||||||
|
|
||||||
class CrewAgentExecutor(AgentExecutor):
|
class CrewAgentExecutor(AgentExecutor):
|
||||||
_i18n: I18N = I18N()
|
_i18n: I18N = I18N()
|
||||||
|
human_input: bool = False
|
||||||
llm: Any = None
|
llm: Any = None
|
||||||
iterations: int = 0
|
iterations: int = 0
|
||||||
task: Any = None
|
task: Any = None
|
||||||
@@ -54,6 +55,8 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
[tool.name for tool in self.tools], excluded_colors=["green", "red"]
|
[tool.name for tool in self.tools], excluded_colors=["green", "red"]
|
||||||
)
|
)
|
||||||
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
intermediate_steps: List[Tuple[AgentAction, str]] = []
|
||||||
|
# Get info about human input from Task
|
||||||
|
self.human_input = self.task.human_input
|
||||||
# Let's start tracking the number of iterations and time elapsed
|
# Let's start tracking the number of iterations and time elapsed
|
||||||
self.iterations = 0
|
self.iterations = 0
|
||||||
time_elapsed = 0.0
|
time_elapsed = 0.0
|
||||||
@@ -169,8 +172,23 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
|
|
||||||
# If the tool chosen is the finishing tool, then we end and return.
|
# If the tool chosen is the finishing tool, then we end and return.
|
||||||
if isinstance(output, AgentFinish):
|
if isinstance(output, AgentFinish):
|
||||||
yield output
|
if self.human_input:
|
||||||
return
|
self.human_input = False
|
||||||
|
human_feedback = self._human_input(output.return_values["output"])
|
||||||
|
action = AgentAction(
|
||||||
|
tool="Human Input", tool_input=human_feedback, log=output.log
|
||||||
|
)
|
||||||
|
yield AgentStep(
|
||||||
|
action=action,
|
||||||
|
observation=self._i18n.slice("human_feedback").format(
|
||||||
|
human_feedback=human_feedback
|
||||||
|
),
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
else:
|
||||||
|
yield output
|
||||||
|
return
|
||||||
|
|
||||||
actions: List[AgentAction]
|
actions: List[AgentAction]
|
||||||
actions = [output] if isinstance(output, AgentAction) else output
|
actions = [output] if isinstance(output, AgentAction) else output
|
||||||
@@ -203,3 +221,9 @@ class CrewAgentExecutor(AgentExecutor):
|
|||||||
tools=", ".join([tool.name for tool in self.tools]),
|
tools=", ".join([tool.name for tool in self.tools]),
|
||||||
)
|
)
|
||||||
yield AgentStep(action=agent_action, observation=observation)
|
yield AgentStep(action=agent_action, observation=observation)
|
||||||
|
|
||||||
|
def _human_input(self, final_answer: dict) -> str:
|
||||||
|
"""Get human input."""
|
||||||
|
return input(
|
||||||
|
self._i18n.slice("getting_input").format(final_answer=final_answer)
|
||||||
|
)
|
||||||
|
|||||||
@@ -70,6 +70,10 @@ class Task(BaseModel):
|
|||||||
frozen=True,
|
frozen=True,
|
||||||
description="Unique identifier for the object, not set by user.",
|
description="Unique identifier for the object, not set by user.",
|
||||||
)
|
)
|
||||||
|
human_input: Optional[bool] = Field(
|
||||||
|
description="Whether the task should have a human review the final answer of the agent",
|
||||||
|
default=False,
|
||||||
|
)
|
||||||
|
|
||||||
def __init__(__pydantic_self__, **data):
|
def __init__(__pydantic_self__, **data):
|
||||||
config = data.pop("config", {})
|
config = data.pop("config", {})
|
||||||
|
|||||||
@@ -15,7 +15,9 @@
|
|||||||
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
"final_answer_format": "If you don't need to use any more tools, you must give your best complete final answer, make sure it satisfy the expect criteria, use the EXACT format below:\n\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task.\n\n",
|
||||||
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
"format_without_tools": "\nSorry, I didn't use the right format. I MUST either use a tool (among the available ones), OR give my best final answer.\nI just remembered the expected format I must follow:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now can give a great answer\nFinal Answer: my best complete final answer to the task\nYour final answer must be the great and the most complete as possible, it must be outcome described\n\n",
|
||||||
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
"task_with_context": "{task}\n\nThis is the context you're working with:\n{context}",
|
||||||
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary."
|
"expected_output": "\nThis is the expect criteria for your final answer: {expected_output} \n you MUST return the actual complete content as the final answer, not a summary.",
|
||||||
|
"human_feedback": "You got human feedback on your work, re-avaluate it and give a new Final Answer when ready.\n {human_feedback}",
|
||||||
|
"getting_input": "This is the agent final answer: {final_answer}\nPlease provide a feedback: "
|
||||||
},
|
},
|
||||||
"errors": {
|
"errors": {
|
||||||
"unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",
|
"unexpected_format": "\nSorry, I didn't use the expected format, I MUST either use a tool (use one at time) OR give my best final answer.\n",
|
||||||
|
|||||||
Reference in New Issue
Block a user