mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-29 10:08:13 +00:00
Add feature to override _ask_human_input function in Task (#2419)
Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
@@ -14,6 +14,31 @@ This feature is especially useful in complex decision-making processes or when a
|
|||||||
To integrate human input into agent execution, set the `human_input` flag in the task definition. When enabled, the agent prompts the user for input before delivering its final answer.
|
To integrate human input into agent execution, set the `human_input` flag in the task definition. When enabled, the agent prompts the user for input before delivering its final answer.
|
||||||
This input can provide extra context, clarify ambiguities, or validate the agent's output.
|
This input can provide extra context, clarify ambiguities, or validate the agent's output.
|
||||||
|
|
||||||
|
## Customizing human input sources
|
||||||
|
|
||||||
|
By default, human input is collected via the command line using the `input()` function. However, you can override this behavior by providing a custom function to handle human input from different sources:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_input_from_api(final_answer: str) -> str:
|
||||||
|
"""Get human feedback from an API instead of CLI."""
|
||||||
|
# Make an API call to get feedback
|
||||||
|
response = requests.post("https://your-api.com/feedback", json={"answer": final_answer})
|
||||||
|
return response.json()["feedback"]
|
||||||
|
|
||||||
|
task = Task(
|
||||||
|
description="Analyze the latest market trends",
|
||||||
|
expected_output="A detailed analysis of market trends",
|
||||||
|
agent=analyst,
|
||||||
|
human_input=True,
|
||||||
|
ask_human_input=get_input_from_api # Use the custom function
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
The custom function should:
|
||||||
|
- Accept a string parameter (the agent's final answer)
|
||||||
|
- Return a string (the human feedback)
|
||||||
|
- Return an empty string if the answer is acceptable and no further iterations are needed
|
||||||
|
|
||||||
### Example:
|
### Example:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@@ -95,4 +120,4 @@ result = crew.kickoff()
|
|||||||
|
|
||||||
print("######################")
|
print("######################")
|
||||||
print(result)
|
print(result)
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -250,6 +250,7 @@ class Agent(BaseAgent):
|
|||||||
"tool_names": self.agent_executor.tools_names,
|
"tool_names": self.agent_executor.tools_names,
|
||||||
"tools": self.agent_executor.tools_description,
|
"tools": self.agent_executor.tools_description,
|
||||||
"ask_for_human_input": task.human_input,
|
"ask_for_human_input": task.human_input,
|
||||||
|
"ask_human_input_function": task.ask_human_input,
|
||||||
}
|
}
|
||||||
)["output"]
|
)["output"]
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|||||||
@@ -81,6 +81,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self.respect_context_window = respect_context_window
|
self.respect_context_window = respect_context_window
|
||||||
self.request_within_rpm_limit = request_within_rpm_limit
|
self.request_within_rpm_limit = request_within_rpm_limit
|
||||||
self.ask_for_human_input = False
|
self.ask_for_human_input = False
|
||||||
|
self.ask_human_input_function = None
|
||||||
self.messages: List[Dict[str, str]] = []
|
self.messages: List[Dict[str, str]] = []
|
||||||
self.iterations = 0
|
self.iterations = 0
|
||||||
self.log_error_after = 3
|
self.log_error_after = 3
|
||||||
@@ -103,6 +104,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self._show_start_logs()
|
self._show_start_logs()
|
||||||
|
|
||||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||||
|
self.ask_human_input_function = inputs.get("ask_human_input_function", None)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
formatted_answer = self._invoke_loop()
|
formatted_answer = self._invoke_loop()
|
||||||
@@ -533,7 +535,18 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
Returns:
|
Returns:
|
||||||
AgentFinish: The final answer after processing feedback
|
AgentFinish: The final answer after processing feedback
|
||||||
"""
|
"""
|
||||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
# Get output from either return_values dict or output attribute
|
||||||
|
output = ""
|
||||||
|
if hasattr(formatted_answer, "return_values") and formatted_answer.return_values:
|
||||||
|
output = formatted_answer.return_values.get("output", "")
|
||||||
|
elif hasattr(formatted_answer, "output"):
|
||||||
|
output = formatted_answer.output
|
||||||
|
|
||||||
|
# Use custom function if provided, otherwise use default
|
||||||
|
if self.ask_human_input_function and callable(self.ask_human_input_function):
|
||||||
|
human_feedback = self.ask_human_input_function(output)
|
||||||
|
else:
|
||||||
|
human_feedback = self._ask_human_input(output)
|
||||||
|
|
||||||
if self._is_training_mode():
|
if self._is_training_mode():
|
||||||
return self._handle_training_feedback(formatted_answer, human_feedback)
|
return self._handle_training_feedback(formatted_answer, human_feedback)
|
||||||
@@ -572,7 +585,18 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self.ask_for_human_input = False
|
self.ask_for_human_input = False
|
||||||
else:
|
else:
|
||||||
answer = self._process_feedback_iteration(feedback)
|
answer = self._process_feedback_iteration(feedback)
|
||||||
feedback = self._ask_human_input(answer.output)
|
# Get output from either return_values dict or output attribute
|
||||||
|
output = ""
|
||||||
|
if hasattr(answer, "return_values") and answer.return_values:
|
||||||
|
output = answer.return_values.get("output", "")
|
||||||
|
elif hasattr(answer, "output"):
|
||||||
|
output = answer.output
|
||||||
|
|
||||||
|
# Use custom function if provided, otherwise use default
|
||||||
|
if self.ask_human_input_function and callable(self.ask_human_input_function):
|
||||||
|
feedback = self.ask_human_input_function(output)
|
||||||
|
else:
|
||||||
|
feedback = self._ask_human_input(output)
|
||||||
|
|
||||||
return answer
|
return answer
|
||||||
|
|
||||||
|
|||||||
@@ -131,6 +131,10 @@ class Task(BaseModel):
|
|||||||
description="Whether the task should have a human review the final answer of the agent",
|
description="Whether the task should have a human review the final answer of the agent",
|
||||||
default=False,
|
default=False,
|
||||||
)
|
)
|
||||||
|
ask_human_input: Optional[Callable[[str], str]] = Field(
|
||||||
|
description="Function to override the default human input method. Should accept a string (final_answer) and return a string (human feedback)",
|
||||||
|
default=None,
|
||||||
|
)
|
||||||
converter_cls: Optional[Type[Converter]] = Field(
|
converter_cls: Optional[Type[Converter]] = Field(
|
||||||
description="A converter class used to export structured output",
|
description="A converter class used to export structured output",
|
||||||
default=None,
|
default=None,
|
||||||
|
|||||||
@@ -1205,6 +1205,7 @@ def test_agent_max_retry_limit():
|
|||||||
"tool_names": "",
|
"tool_names": "",
|
||||||
"tools": "",
|
"tools": "",
|
||||||
"ask_for_human_input": True,
|
"ask_for_human_input": True,
|
||||||
|
"ask_human_input_function": None,
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
mock.call(
|
mock.call(
|
||||||
@@ -1213,6 +1214,7 @@ def test_agent_max_retry_limit():
|
|||||||
"tool_names": "",
|
"tool_names": "",
|
||||||
"tools": "",
|
"tools": "",
|
||||||
"ask_for_human_input": True,
|
"ask_for_human_input": True,
|
||||||
|
"ask_human_input_function": None,
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
]
|
]
|
||||||
|
|||||||
23
tests/test_custom_human_input.py
Normal file
23
tests/test_custom_human_input.py
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
import pytest
|
||||||
|
from unittest.mock import patch, MagicMock
|
||||||
|
|
||||||
|
from crewai.task import Task
|
||||||
|
|
||||||
|
|
||||||
|
def test_task_custom_human_input_parameter():
|
||||||
|
"""Test that the Task class accepts the ask_human_input parameter."""
|
||||||
|
# Custom human input function
|
||||||
|
def custom_input_func(final_answer):
|
||||||
|
return "Custom feedback"
|
||||||
|
|
||||||
|
# Create a task with the custom function
|
||||||
|
task = Task(
|
||||||
|
description="Test task",
|
||||||
|
expected_output="Test output",
|
||||||
|
human_input=True,
|
||||||
|
ask_human_input=custom_input_func
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify the parameter was stored correctly
|
||||||
|
assert task.ask_human_input == custom_input_func
|
||||||
|
assert callable(task.ask_human_input)
|
||||||
Reference in New Issue
Block a user