feat: Add multi-round dialogue functionality (fixes #2250)

- Add max_dialogue_rounds parameter to Task class with default of 10
- Update _handle_regular_feedback method to support multiple rounds
- Update _ask_human_input method to display current round information
- Pass max_dialogue_rounds parameter through agent execution flow
- Add tests for Task class max_dialogue_rounds parameter

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-28 10:13:52 +00:00
parent 86825e1769
commit 1225071e00
5 changed files with 76 additions and 12 deletions

View File

@@ -249,6 +249,7 @@ class Agent(BaseAgent):
"tool_names": self.agent_executor.tools_names,
"tools": self.agent_executor.tools_description,
"ask_for_human_input": task.human_input,
"max_dialogue_rounds": task.max_dialogue_rounds,
}
)["output"]
except Exception as e:

View File

@@ -94,10 +94,19 @@ class CrewAgentExecutorMixin:
print(f"Failed to add to long term memory: {e}")
pass
def _ask_human_input(self, final_answer: str) -> str:
"""Prompt human input with mode-appropriate messaging."""
def _ask_human_input(self, final_answer: str, current_round: int = 1, max_rounds: int = 10) -> str:
"""Prompt human input with mode-appropriate messaging.
Args:
final_answer: The final answer from the agent
current_round: The current dialogue round (default: 1)
max_rounds: Maximum number of dialogue rounds (default: 10)
Returns:
str: The user's feedback
"""
self._printer.print(
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
content=f"\033[1m\033[95m ## Result (Round {current_round}/{max_rounds}):\033[00m \033[92m{final_answer}\033[00m"
)
# Training mode prompt (single iteration)
@@ -113,7 +122,7 @@ class CrewAgentExecutorMixin:
else:
prompt = (
"\n\n=====\n"
"## HUMAN FEEDBACK: Provide feedback on the Final Result and Agent's actions.\n"
f"## HUMAN FEEDBACK (Round {current_round}/{max_rounds}): Provide feedback on the Result and Agent's actions.\n"
"Please follow these guidelines:\n"
" - If you are happy with the result, simply hit Enter without typing anything.\n"
" - Otherwise, provide specific improvement requests.\n"

View File

@@ -103,6 +103,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self._show_start_logs()
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
max_rounds = int(inputs.get("max_dialogue_rounds", 10))
try:
formatted_answer = self._invoke_loop()
@@ -121,7 +122,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
raise e
if self.ask_for_human_input:
formatted_answer = self._handle_human_feedback(formatted_answer)
formatted_answer = self._handle_human_feedback(formatted_answer, max_rounds)
self._create_short_term_memory(formatted_answer)
self._create_long_term_memory(formatted_answer)
@@ -524,21 +525,22 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
prompt = prompt.rstrip()
return {"role": role, "content": prompt}
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
def _handle_human_feedback(self, formatted_answer: AgentFinish, max_rounds: int = 10) -> AgentFinish:
"""Handle human feedback with different flows for training vs regular use.
Args:
formatted_answer: The initial AgentFinish result to get feedback on
max_rounds: Maximum number of dialogue rounds (default: 10)
Returns:
AgentFinish: The final answer after processing feedback
"""
human_feedback = self._ask_human_input(formatted_answer.output)
human_feedback = self._ask_human_input(formatted_answer.output, 1, max_rounds)
if self._is_training_mode():
return self._handle_training_feedback(formatted_answer, human_feedback)
return self._handle_regular_feedback(formatted_answer, human_feedback)
return self._handle_regular_feedback(formatted_answer, human_feedback, max_rounds)
def _is_training_mode(self) -> bool:
"""Check if crew is in training mode."""
@@ -560,19 +562,30 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
return improved_answer
def _handle_regular_feedback(
self, current_answer: AgentFinish, initial_feedback: str
self, current_answer: AgentFinish, initial_feedback: str, max_rounds: int = 10
) -> AgentFinish:
"""Process feedback for regular use with potential multiple iterations."""
"""Process feedback for regular use with potential multiple iterations.
Args:
current_answer: The initial AgentFinish result to get feedback on
initial_feedback: The initial feedback from the user
max_rounds: Maximum number of dialogue rounds (default: 10)
Returns:
AgentFinish: The final answer after processing feedback
"""
feedback = initial_feedback
answer = current_answer
current_round = 1
while self.ask_for_human_input:
while self.ask_for_human_input and current_round <= max_rounds:
# If the user provides a blank response, assume they are happy with the result
if feedback.strip() == "":
self.ask_for_human_input = False
else:
answer = self._process_feedback_iteration(feedback)
feedback = self._ask_human_input(answer.output)
feedback = self._ask_human_input(answer.output, current_round, max_rounds)
current_round += 1
return answer

View File

@@ -125,6 +125,10 @@ class Task(BaseModel):
description="Whether the task should have a human review the final answer of the agent",
default=False,
)
max_dialogue_rounds: int = Field(
default=10,
description="Maximum number of dialogue rounds for human input",
)
converter_cls: Optional[Type[Converter]] = Field(
description="A converter class used to export structured output",
default=None,

View File

@@ -0,0 +1,37 @@
import unittest
from unittest.mock import patch
from crewai.task import Task
class TestMultiRoundDialogue(unittest.TestCase):
"""Test the multi-round dialogue functionality."""
def test_task_max_dialogue_rounds_default(self):
"""Test that Task has a default max_dialogue_rounds of 10."""
# Create a task with default max_dialogue_rounds
task = Task(
description="Test task",
expected_output="Test output",
human_input=True
)
# Verify the default value
self.assertEqual(task.max_dialogue_rounds, 10)
def test_task_max_dialogue_rounds_custom(self):
"""Test that Task accepts a custom max_dialogue_rounds."""
# Create a task with custom max_dialogue_rounds
task = Task(
description="Test task",
expected_output="Test output",
human_input=True,
max_dialogue_rounds=5
)
# Verify the custom value
self.assertEqual(task.max_dialogue_rounds, 5)
if __name__ == "__main__":
unittest.main()