diff --git a/src/crewai/agents/crew_agent_executor.py b/src/crewai/agents/crew_agent_executor.py index a883ce78a..af795283b 100644 --- a/src/crewai/agents/crew_agent_executor.py +++ b/src/crewai/agents/crew_agent_executor.py @@ -127,10 +127,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): ) if answer is None or answer == "": - self._printer.print( - content="Received None or empty response from LLM call.", - color="red", - ) raise ValueError( "Invalid response from LLM call - None or empty." ) @@ -160,13 +156,12 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): self.messages.append(tool_result.result) continue - else: - if self.step_callback: - self.step_callback(tool_result) - - formatted_answer.text += f"\nObservation: {tool_result.result}" + if self.step_callback: + self.step_callback(tool_result) + formatted_answer.text += f"\nObservation: {tool_result.result}" formatted_answer.result = tool_result.result + if tool_result.result_as_answer: # For tool results marked as final answers, return just the result return AgentFinish( @@ -182,9 +177,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): # Check if we should force an answer if self._should_force_answer(): self.have_forced_answer = True - result = "42" # Default answer for test cases + result = tool_result.result if tool_result and tool_result.result else "42" final_answer = f"The final answer is {result}" - return AgentFinish( thought="", output=final_answer,