diff --git a/src/crewai/agent.py b/src/crewai/agent.py index 40524bbd9..8fa5162a2 100644 --- a/src/crewai/agent.py +++ b/src/crewai/agent.py @@ -241,7 +241,7 @@ class Agent(BaseModel): self.agent_executor.tools_description = render_text_description(parsed_tools) self.agent_executor.tools_names = self.__tools_names(parsed_tools) - if self.crew._train: + if self.crew and self.crew._train: task_prompt = self._training_handler(task_prompt=task_prompt) else: task_prompt = self._use_trained_data(task_prompt=task_prompt) diff --git a/src/crewai/agents/executor.py b/src/crewai/agents/executor.py index 79591e427..b4f49b522 100644 --- a/src/crewai/agents/executor.py +++ b/src/crewai/agents/executor.py @@ -250,7 +250,7 @@ class CrewAgentExecutor(AgentExecutor): if self.should_ask_for_human_input: human_feedback = self._ask_human_input(output.return_values["output"]) - if self.crew._train: + if self.crew and self.crew._train: self._handle_crew_training_output(output, human_feedback) # Making sure we only ask for it once, so disabling for the next thought loop @@ -268,7 +268,7 @@ class CrewAgentExecutor(AgentExecutor): return else: - if self.crew._train: + if self.crew and self.crew._train: self._handle_crew_training_output(output) yield output @@ -323,9 +323,10 @@ class CrewAgentExecutor(AgentExecutor): agent_id = str(self.crew_agent.id) if ( - training_data := CrewTrainingHandler(TRAINING_DATA_FILE).load() + CrewTrainingHandler(TRAINING_DATA_FILE).load() and not self.should_ask_for_human_input ): + training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load() if training_data.get(agent_id): training_data[agent_id][self.crew._train_iteration][ "improved_output"