diff --git a/docs/how-to/human-input-on-execution.mdx b/docs/how-to/human-input-on-execution.mdx index bf243981d..06b3ba588 100644 --- a/docs/how-to/human-input-on-execution.mdx +++ b/docs/how-to/human-input-on-execution.mdx @@ -14,6 +14,31 @@ This feature is especially useful in complex decision-making processes or when a To integrate human input into agent execution, set the `human_input` flag in the task definition. When enabled, the agent prompts the user for input before delivering its final answer. This input can provide extra context, clarify ambiguities, or validate the agent's output. +## Customizing human input sources + +By default, human input is collected via the command line using the `input()` function. However, you can override this behavior by providing a custom function to handle human input from different sources: + +```python +def get_input_from_api(final_answer: str) -> str: + """Get human feedback from an API instead of CLI.""" + # Make an API call to get feedback + response = requests.post("https://your-api.com/feedback", json={"answer": final_answer}) + return response.json()["feedback"] + +task = Task( + description="Analyze the latest market trends", + expected_output="A detailed analysis of market trends", + agent=analyst, + human_input=True, + ask_human_input=get_input_from_api # Use the custom function +) +``` + +The custom function should: +- Accept a string parameter (the agent's final answer) +- Return a string (the human feedback) +- Return an empty string if the answer is acceptable and no further iterations are needed + ### Example: ```shell @@ -95,4 +120,4 @@ result = crew.kickoff() print("######################") print(result) -``` \ No newline at end of file +``` diff --git a/src/crewai/agent.py b/src/crewai/agent.py index d10b768d4..900a6d5ae 100644 --- a/src/crewai/agent.py +++ b/src/crewai/agent.py @@ -250,6 +250,7 @@ class Agent(BaseAgent): "tool_names": self.agent_executor.tools_names, "tools": self.agent_executor.tools_description, "ask_for_human_input": task.human_input, + "ask_human_input_function": task.ask_human_input, } )["output"] except Exception as e: diff --git a/src/crewai/agents/crew_agent_executor.py b/src/crewai/agents/crew_agent_executor.py index 452b343c8..1a0670340 100644 --- a/src/crewai/agents/crew_agent_executor.py +++ b/src/crewai/agents/crew_agent_executor.py @@ -81,6 +81,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): self.respect_context_window = respect_context_window self.request_within_rpm_limit = request_within_rpm_limit self.ask_for_human_input = False + self.ask_human_input_function = None self.messages: List[Dict[str, str]] = [] self.iterations = 0 self.log_error_after = 3 @@ -103,6 +104,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): self._show_start_logs() self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False)) + self.ask_human_input_function = inputs.get("ask_human_input_function", None) try: formatted_answer = self._invoke_loop() @@ -533,7 +535,18 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): Returns: AgentFinish: The final answer after processing feedback """ - human_feedback = self._ask_human_input(formatted_answer.output) + # Get output from either return_values dict or output attribute + output = "" + if hasattr(formatted_answer, "return_values") and formatted_answer.return_values: + output = formatted_answer.return_values.get("output", "") + elif hasattr(formatted_answer, "output"): + output = formatted_answer.output + + # Use custom function if provided, otherwise use default + if self.ask_human_input_function and callable(self.ask_human_input_function): + human_feedback = self.ask_human_input_function(output) + else: + human_feedback = self._ask_human_input(output) if self._is_training_mode(): return self._handle_training_feedback(formatted_answer, human_feedback) @@ -572,7 +585,18 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): self.ask_for_human_input = False else: answer = self._process_feedback_iteration(feedback) - feedback = self._ask_human_input(answer.output) + # Get output from either return_values dict or output attribute + output = "" + if hasattr(answer, "return_values") and answer.return_values: + output = answer.return_values.get("output", "") + elif hasattr(answer, "output"): + output = answer.output + + # Use custom function if provided, otherwise use default + if self.ask_human_input_function and callable(self.ask_human_input_function): + feedback = self.ask_human_input_function(output) + else: + feedback = self._ask_human_input(output) return answer diff --git a/src/crewai/task.py b/src/crewai/task.py index be400e99a..973abada1 100644 --- a/src/crewai/task.py +++ b/src/crewai/task.py @@ -131,6 +131,10 @@ class Task(BaseModel): description="Whether the task should have a human review the final answer of the agent", default=False, ) + ask_human_input: Optional[Callable[[str], str]] = Field( + description="Function to override the default human input method. Should accept a string (final_answer) and return a string (human feedback)", + default=None, + ) converter_cls: Optional[Type[Converter]] = Field( description="A converter class used to export structured output", default=None, diff --git a/tests/agent_test.py b/tests/agent_test.py index b5b3aae93..a9f358240 100644 --- a/tests/agent_test.py +++ b/tests/agent_test.py @@ -1205,6 +1205,7 @@ def test_agent_max_retry_limit(): "tool_names": "", "tools": "", "ask_for_human_input": True, + "ask_human_input_function": None, } ), mock.call( @@ -1213,6 +1214,7 @@ def test_agent_max_retry_limit(): "tool_names": "", "tools": "", "ask_for_human_input": True, + "ask_human_input_function": None, } ), ] diff --git a/tests/test_custom_human_input.py b/tests/test_custom_human_input.py new file mode 100644 index 000000000..3cf66b09b --- /dev/null +++ b/tests/test_custom_human_input.py @@ -0,0 +1,23 @@ +import pytest +from unittest.mock import patch, MagicMock + +from crewai.task import Task + + +def test_task_custom_human_input_parameter(): + """Test that the Task class accepts the ask_human_input parameter.""" + # Custom human input function + def custom_input_func(final_answer): + return "Custom feedback" + + # Create a task with the custom function + task = Task( + description="Test task", + expected_output="Test output", + human_input=True, + ask_human_input=custom_input_func + ) + + # Verify the parameter was stored correctly + assert task.ask_human_input == custom_input_func + assert callable(task.ask_human_input)