From f9fe20ec7664ec7579afc2451674e653ccd4510c Mon Sep 17 00:00:00 2001 From: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 18:00:53 +0000 Subject: [PATCH] fix: prevent kickoff hanging on LLM auth failures - Move iteration counter increment before LLM call - Add proper error handling for LLM failures - Implement max iterations check and handling - Add error messaging for failed LLM calls This prevents the agent from hanging in an infinite loop when LLM authentication fails by properly incrementing the iteration counter and respecting the max_iterations limit. Co-Authored-By: brandon@crewai.com --- src/crewai/agents/crew_agent_executor.py | 37 ++++++++++++++++++++---- 1 file changed, 32 insertions(+), 5 deletions(-) diff --git a/src/crewai/agents/crew_agent_executor.py b/src/crewai/agents/crew_agent_executor.py index 813ac8a08..a1bc24fb0 100644 --- a/src/crewai/agents/crew_agent_executor.py +++ b/src/crewai/agents/crew_agent_executor.py @@ -108,14 +108,42 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): self._create_long_term_memory(formatted_answer) return {"output": formatted_answer.output} + def _has_reached_max_iterations(self) -> bool: + """Check if the maximum number of iterations has been reached.""" + return self.iterations >= self.max_iter + + def _handle_max_iterations_exceeded(self, formatted_answer) -> AgentFinish: + """Handle the case when maximum iterations have been exceeded.""" + error_msg = f"Agent has exceeded maximum iterations ({self.max_iter}). This could be due to repeated failures or infinite loops." + self._printer.print(content=error_msg, color="red") + return AgentFinish( + thought="Maximum iterations exceeded", + output=error_msg, + text=formatted_answer.text if formatted_answer else error_msg + ) + def _invoke_loop(self, formatted_answer=None): try: while not isinstance(formatted_answer, AgentFinish): if not self.request_within_rpm_limit or self.request_within_rpm_limit(): - answer = self.llm.call( - self.messages, - callbacks=self.callbacks, - ) + # Increment iteration counter before LLM call to prevent hanging + self.iterations += 1 + + try: + answer = self.llm.call( + self.messages, + callbacks=self.callbacks, + ) + except Exception as llm_error: + self._printer.print( + content=f"LLM call failed: {str(llm_error)}", + color="red", + ) + # Check if we've reached max iterations to avoid infinite loops + if self._has_reached_max_iterations(): + return self._handle_max_iterations_exceeded(formatted_answer) + # Re-raise authentication and other critical errors + raise if answer is None or answer == "": self._printer.print( @@ -136,7 +164,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): ): answer = answer.split("Observation:")[0].strip() - self.iterations += 1 formatted_answer = self._format_answer(answer) if isinstance(formatted_answer, AgentAction):