Add feature to override _ask_human_input function in Task (#2419)

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-03-20 08:44:15 +00:00
parent fe0813e831
commit 0ff73d22d7
6 changed files with 82 additions and 3 deletions

View File

@@ -14,6 +14,31 @@ This feature is especially useful in complex decision-making processes or when a
To integrate human input into agent execution, set the `human_input` flag in the task definition. When enabled, the agent prompts the user for input before delivering its final answer.
This input can provide extra context, clarify ambiguities, or validate the agent's output.
## Customizing human input sources
By default, human input is collected via the command line using the `input()` function. However, you can override this behavior by providing a custom function to handle human input from different sources:
```python
def get_input_from_api(final_answer: str) -> str:
"""Get human feedback from an API instead of CLI."""
# Make an API call to get feedback
response = requests.post("https://your-api.com/feedback", json={"answer": final_answer})
return response.json()["feedback"]
task = Task(
description="Analyze the latest market trends",
expected_output="A detailed analysis of market trends",
agent=analyst,
human_input=True,
ask_human_input=get_input_from_api # Use the custom function
)
```
The custom function should:
- Accept a string parameter (the agent's final answer)
- Return a string (the human feedback)
- Return an empty string if the answer is acceptable and no further iterations are needed
### Example:
```shell
@@ -95,4 +120,4 @@ result = crew.kickoff()
print("######################")
print(result)
```
```

View File

@@ -250,6 +250,7 @@ class Agent(BaseAgent):
"tool_names": self.agent_executor.tools_names,
"tools": self.agent_executor.tools_description,
"ask_for_human_input": task.human_input,
"ask_human_input_function": task.ask_human_input,
}
)["output"]
except Exception as e:

View File

@@ -81,6 +81,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.respect_context_window = respect_context_window
self.request_within_rpm_limit = request_within_rpm_limit
self.ask_for_human_input = False
self.ask_human_input_function = None
self.messages: List[Dict[str, str]] = []
self.iterations = 0
self.log_error_after = 3
@@ -103,6 +104,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._show_start_logs()
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
self.ask_human_input_function = inputs.get("ask_human_input_function", None)
try:
formatted_answer = self._invoke_loop()
@@ -533,7 +535,18 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
Returns:
AgentFinish: The final answer after processing feedback
"""
human_feedback = self._ask_human_input(formatted_answer.output)
# Get output from either return_values dict or output attribute
output = ""
if hasattr(formatted_answer, "return_values") and formatted_answer.return_values:
output = formatted_answer.return_values.get("output", "")
elif hasattr(formatted_answer, "output"):
output = formatted_answer.output
# Use custom function if provided, otherwise use default
if self.ask_human_input_function and callable(self.ask_human_input_function):
human_feedback = self.ask_human_input_function(output)
else:
human_feedback = self._ask_human_input(output)
if self._is_training_mode():
return self._handle_training_feedback(formatted_answer, human_feedback)
@@ -572,7 +585,18 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self.ask_for_human_input = False
else:
answer = self._process_feedback_iteration(feedback)
feedback = self._ask_human_input(answer.output)
# Get output from either return_values dict or output attribute
output = ""
if hasattr(answer, "return_values") and answer.return_values:
output = answer.return_values.get("output", "")
elif hasattr(answer, "output"):
output = answer.output
# Use custom function if provided, otherwise use default
if self.ask_human_input_function and callable(self.ask_human_input_function):
feedback = self.ask_human_input_function(output)
else:
feedback = self._ask_human_input(output)
return answer

View File

@@ -131,6 +131,10 @@ class Task(BaseModel):
description="Whether the task should have a human review the final answer of the agent",
default=False,
)
ask_human_input: Optional[Callable[[str], str]] = Field(
description="Function to override the default human input method. Should accept a string (final_answer) and return a string (human feedback)",
default=None,
)
converter_cls: Optional[Type[Converter]] = Field(
description="A converter class used to export structured output",
default=None,

View File

@@ -1205,6 +1205,7 @@ def test_agent_max_retry_limit():
"tool_names": "",
"tools": "",
"ask_for_human_input": True,
"ask_human_input_function": None,
}
),
mock.call(
@@ -1213,6 +1214,7 @@ def test_agent_max_retry_limit():
"tool_names": "",
"tools": "",
"ask_for_human_input": True,
"ask_human_input_function": None,
}
),
]

View File

@@ -0,0 +1,23 @@
import pytest
from unittest.mock import patch, MagicMock
from crewai.task import Task
def test_task_custom_human_input_parameter():
"""Test that the Task class accepts the ask_human_input parameter."""
# Custom human input function
def custom_input_func(final_answer):
return "Custom feedback"
# Create a task with the custom function
task = Task(
description="Test task",
expected_output="Test output",
human_input=True,
ask_human_input=custom_input_func
)
# Verify the parameter was stored correctly
assert task.ask_human_input == custom_input_func
assert callable(task.ask_human_input)