fix: Address CI failures and code review feedback for multi-round dialogue

Co-Authored-By: Joe Moura <joao@crewai.com>
This commit is contained in:
Devin AI
2025-02-28 10:25:50 +00:00
parent 1225071e00
commit a651d7ddd3
5 changed files with 49 additions and 2 deletions

View File

@@ -105,8 +105,9 @@ class CrewAgentExecutorMixin:
Returns:
str: The user's feedback
"""
round_info = f"\033[1m\033[93mRound {current_round}/{max_rounds}\033[00m"
self._printer.print(
content=f"\033[1m\033[95m ## Result (Round {current_round}/{max_rounds}):\033[00m \033[92m{final_answer}\033[00m"
content=f"\033[1m\033[95m ## Result {round_info}:\033[00m \033[92m{final_answer}\033[00m"
)
# Training mode prompt (single iteration)

View File

@@ -574,6 +574,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
Returns:
AgentFinish: The final answer after processing feedback
"""
if max_rounds < 1:
raise ValueError("max_rounds must be positive")
feedback = initial_feedback
answer = current_answer
current_round = 1

View File

@@ -128,6 +128,8 @@ class Task(BaseModel):
max_dialogue_rounds: int = Field(
default=10,
description="Maximum number of dialogue rounds for human input",
ge=1, # Ensures positive integer
examples=[5, 10, 15],
)
converter_cls: Optional[Type[Converter]] = Field(
description="A converter class used to export structured output",

View File

@@ -1206,6 +1206,7 @@ def test_agent_max_retry_limit():
"tool_names": "",
"tools": "",
"ask_for_human_input": True,
"max_dialogue_rounds": 10,
}
),
mock.call(
@@ -1214,6 +1215,7 @@ def test_agent_max_retry_limit():
"tool_names": "",
"tools": "",
"ask_for_human_input": True,
"max_dialogue_rounds": 10,
}
),
]

View File

@@ -1,7 +1,8 @@
import unittest
from unittest.mock import patch
from unittest.mock import patch, MagicMock
from crewai.task import Task
from langchain_core.agents import AgentFinish
class TestMultiRoundDialogue(unittest.TestCase):
@@ -31,6 +32,44 @@ class TestMultiRoundDialogue(unittest.TestCase):
# Verify the custom value
self.assertEqual(task.max_dialogue_rounds, 5)
def test_task_max_dialogue_rounds_validation(self):
"""Test that Task validates max_dialogue_rounds as a positive integer."""
# Create a task with invalid max_dialogue_rounds
with self.assertRaises(ValueError):
task = Task(
description="Test task",
expected_output="Test output",
human_input=True,
max_dialogue_rounds=0
)
def test_handle_regular_feedback_rounds(self):
"""Test that _handle_regular_feedback correctly handles multiple rounds."""
from crewai.agents.crew_agent_executor import CrewAgentExecutor
# Create a simple mock executor
executor = MagicMock()
executor.ask_for_human_input = True
executor._ask_human_input = MagicMock(side_effect=["Feedback", ""])
executor._process_feedback_iteration = MagicMock(return_value=MagicMock())
# Create a sample initial answer
initial_answer = MagicMock()
# Call the method directly
CrewAgentExecutor._handle_regular_feedback(
executor,
initial_answer,
"Initial feedback",
max_rounds=3
)
# Verify the correct number of iterations occurred
# First call for initial feedback, second call for empty feedback to end loop
self.assertEqual(executor._ask_human_input.call_count, 2)
# The _process_feedback_iteration is called for the initial feedback and the first round
self.assertEqual(executor._process_feedback_iteration.call_count, 2)
if __name__ == "__main__":