diff --git a/src/crewai/agents/crew_agent_executor.py b/src/crewai/agents/crew_agent_executor.py index 2c880a634..626e5815e 100644 --- a/src/crewai/agents/crew_agent_executor.py +++ b/src/crewai/agents/crew_agent_executor.py @@ -87,35 +87,79 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): self.llm.stop = self.stop def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]: + print("Invoke method called with inputs:", inputs) + if "system" in self.prompt: system_prompt = self._format_prompt(self.prompt.get("system", ""), inputs) user_prompt = self._format_prompt(self.prompt.get("user", ""), inputs) + print("System prompt:", system_prompt) + print("User prompt:", user_prompt) self.messages.append(self._format_msg(system_prompt, role="system")) self.messages.append(self._format_msg(user_prompt)) else: user_prompt = self._format_prompt(self.prompt.get("prompt", ""), inputs) + print("User prompt:", user_prompt) self.messages.append(self._format_msg(user_prompt)) self._show_start_logs() self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False)) - formatted_answer = self._invoke_loop() + print("Initial ask_for_human_input:", self.ask_for_human_input) - if self.ask_for_human_input: + formatted_answer = self._invoke_loop() + print("Initial formatted answer:", formatted_answer) + + while self.ask_for_human_input: + print("Entering feedback loop") human_feedback = self._ask_human_input(formatted_answer.output) + print("Human feedback received:", human_feedback) + if self.crew and self.crew._train: self._handle_crew_training_output(formatted_answer, human_feedback) - # Making sure we only ask for it once, so disabling for the next thought loop - self.ask_for_human_input = False - self.messages.append(self._format_msg(f"Feedback: {human_feedback}")) - formatted_answer = self._invoke_loop() + # Make an LLM call to verify if additional changes are requested based on human feedback + additional_changes_requested_prompt = self._i18n.slice( + "human_feedback_classification" + ).format(feedback=human_feedback) + print( + "Additional changes prompt for LLM:", + additional_changes_requested_prompt, + ) + + additional_changes_requested_response = ( + self.llm.call( + [ + self._format_msg( + additional_changes_requested_prompt, role="system" + ) + ], + callbacks=self.callbacks, + ) + .strip() + .lower() + ) + print( + "Additional changes response from LLM:", + additional_changes_requested_response, + ) + + if additional_changes_requested_response == "false": + print("Human is satisfied, exiting loop") + self.ask_for_human_input = False + else: + print("Human wants changes, continuing loop") + self.ask_for_human_input = True + self.messages.append(self._format_msg(f"Feedback: {human_feedback}")) + formatted_answer = self._invoke_loop() + print("Updated formatted answer:", formatted_answer) if self.crew and self.crew._train: self._handle_crew_training_output(formatted_answer) + self._create_short_term_memory(formatted_answer) self._create_long_term_memory(formatted_answer) + print("Final output:", formatted_answer.output) return {"output": formatted_answer.output} def _invoke_loop(self, formatted_answer=None): diff --git a/src/crewai/translations/en.json b/src/crewai/translations/en.json index d79076bb7..d30606dc0 100644 --- a/src/crewai/translations/en.json +++ b/src/crewai/translations/en.json @@ -22,7 +22,8 @@ "sumamrize_instruction": "Summarize the following text, make sure to include all the important information: {group}", "summary": "This is a summary of our conversation so far:\n{merged_summary}", "manager_request": "Your best answer to your coworker asking you this, accounting for the context shared.", - "formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python." + "formatted_task_instructions": "Ensure your final answer contains only the content in the following format: {output_format}\n\nEnsure the final output does not include any code block markers like ```json or ```python.", + "human_feedback_classification": "Determine if the following feedback indicates that the user is satisfied or if further changes are needed. Respond with 'True' if further changes are needed, or 'False' if the user is satisfied. **Important** Do not include any additional commentary outside of your 'True' or 'False' response.\n\nFeedback: \"{feedback}\"" }, "errors": { "force_final_answer_error": "You can't keep going, this was the best you could do.\n {formatted_answer.text}",