From a651d7ddd31654e6a7651400b0b6c325d678371f Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Fri, 28 Feb 2025 10:25:50 +0000 Subject: [PATCH] fix: Address CI failures and code review feedback for multi-round dialogue Co-Authored-By: Joe Moura --- .../base_agent_executor_mixin.py | 3 +- src/crewai/agents/crew_agent_executor.py | 3 ++ src/crewai/task.py | 2 + tests/agent_test.py | 2 + tests/agents/test_multi_round_dialogue.py | 41 ++++++++++++++++++- 5 files changed, 49 insertions(+), 2 deletions(-) diff --git a/src/crewai/agents/agent_builder/base_agent_executor_mixin.py b/src/crewai/agents/agent_builder/base_agent_executor_mixin.py index 4313a68ea..8ae977b14 100644 --- a/src/crewai/agents/agent_builder/base_agent_executor_mixin.py +++ b/src/crewai/agents/agent_builder/base_agent_executor_mixin.py @@ -105,8 +105,9 @@ class CrewAgentExecutorMixin: Returns: str: The user's feedback """ + round_info = f"\033[1m\033[93mRound {current_round}/{max_rounds}\033[00m" self._printer.print( - content=f"\033[1m\033[95m ## Result (Round {current_round}/{max_rounds}):\033[00m \033[92m{final_answer}\033[00m" + content=f"\033[1m\033[95m ## Result {round_info}:\033[00m \033[92m{final_answer}\033[00m" ) # Training mode prompt (single iteration) diff --git a/src/crewai/agents/crew_agent_executor.py b/src/crewai/agents/crew_agent_executor.py index 606b99975..bde8a8cc1 100644 --- a/src/crewai/agents/crew_agent_executor.py +++ b/src/crewai/agents/crew_agent_executor.py @@ -574,6 +574,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): Returns: AgentFinish: The final answer after processing feedback """ + if max_rounds < 1: + raise ValueError("max_rounds must be positive") + feedback = initial_feedback answer = current_answer current_round = 1 diff --git a/src/crewai/task.py b/src/crewai/task.py index 97e18dd8e..e704896de 100644 --- a/src/crewai/task.py +++ b/src/crewai/task.py @@ -128,6 +128,8 @@ class Task(BaseModel): max_dialogue_rounds: int = Field( default=10, description="Maximum number of dialogue rounds for human input", + ge=1, # Ensures positive integer + examples=[5, 10, 15], ) converter_cls: Optional[Type[Converter]] = Field( description="A converter class used to export structured output", diff --git a/tests/agent_test.py b/tests/agent_test.py index 5e1083c4b..147116892 100644 --- a/tests/agent_test.py +++ b/tests/agent_test.py @@ -1206,6 +1206,7 @@ def test_agent_max_retry_limit(): "tool_names": "", "tools": "", "ask_for_human_input": True, + "max_dialogue_rounds": 10, } ), mock.call( @@ -1214,6 +1215,7 @@ def test_agent_max_retry_limit(): "tool_names": "", "tools": "", "ask_for_human_input": True, + "max_dialogue_rounds": 10, } ), ] diff --git a/tests/agents/test_multi_round_dialogue.py b/tests/agents/test_multi_round_dialogue.py index fcb5544a3..2ddf90e6e 100644 --- a/tests/agents/test_multi_round_dialogue.py +++ b/tests/agents/test_multi_round_dialogue.py @@ -1,7 +1,8 @@ import unittest -from unittest.mock import patch +from unittest.mock import patch, MagicMock from crewai.task import Task +from langchain_core.agents import AgentFinish class TestMultiRoundDialogue(unittest.TestCase): @@ -31,6 +32,44 @@ class TestMultiRoundDialogue(unittest.TestCase): # Verify the custom value self.assertEqual(task.max_dialogue_rounds, 5) + + def test_task_max_dialogue_rounds_validation(self): + """Test that Task validates max_dialogue_rounds as a positive integer.""" + # Create a task with invalid max_dialogue_rounds + with self.assertRaises(ValueError): + task = Task( + description="Test task", + expected_output="Test output", + human_input=True, + max_dialogue_rounds=0 + ) + + def test_handle_regular_feedback_rounds(self): + """Test that _handle_regular_feedback correctly handles multiple rounds.""" + from crewai.agents.crew_agent_executor import CrewAgentExecutor + + # Create a simple mock executor + executor = MagicMock() + executor.ask_for_human_input = True + executor._ask_human_input = MagicMock(side_effect=["Feedback", ""]) + executor._process_feedback_iteration = MagicMock(return_value=MagicMock()) + + # Create a sample initial answer + initial_answer = MagicMock() + + # Call the method directly + CrewAgentExecutor._handle_regular_feedback( + executor, + initial_answer, + "Initial feedback", + max_rounds=3 + ) + + # Verify the correct number of iterations occurred + # First call for initial feedback, second call for empty feedback to end loop + self.assertEqual(executor._ask_human_input.call_count, 2) + # The _process_feedback_iteration is called for the initial feedback and the first round + self.assertEqual(executor._process_feedback_iteration.call_count, 2) if __name__ == "__main__":