diff --git a/src/crewai/crews/crew_output.py b/src/crewai/crews/crew_output.py index cb4cd6c79..0ffc154f7 100644 --- a/src/crewai/crews/crew_output.py +++ b/src/crewai/crews/crew_output.py @@ -24,8 +24,6 @@ class CrewOutput(BaseModel): description="Processed token summary", default={} ) - # TODO: Joao - Adding this safety check breakes when people want to see - # The full output of a CrewOutput. # @property # def pydantic(self) -> Optional[BaseModel]: # # Check if the final task output included a pydantic model diff --git a/src/crewai/tasks/task_output.py b/src/crewai/tasks/task_output.py index b56334180..1d01e9806 100644 --- a/src/crewai/tasks/task_output.py +++ b/src/crewai/tasks/task_output.py @@ -11,9 +11,7 @@ class TaskOutput(BaseModel): description: str = Field(description="Description of the task") summary: Optional[str] = Field(description="Summary of the task", default=None) - raw: str = Field( - description="Raw output of the task", default="" - ) # TODO: @joao: breaking change, by renaming raw_output to raw, but now consistent with CrewOutput + raw: str = Field(description="Raw output of the task", default="") pydantic: Optional[BaseModel] = Field( description="Pydantic output of task", default=None ) @@ -32,8 +30,6 @@ class TaskOutput(BaseModel): self.summary = f"{excerpt}..." return self - # TODO: Joao - Adding this safety check breakes when people want to see - # The full output of a TaskOutput or CrewOutput. # @property # def pydantic(self) -> Optional[BaseModel]: # # Check if the final task output included a pydantic model diff --git a/tests/crew_test.py b/tests/crew_test.py index 39a449e0d..de9971503 100644 --- a/tests/crew_test.py +++ b/tests/crew_test.py @@ -1379,7 +1379,6 @@ def test_crew_does_not_interpolate_without_inputs(): interpolate_task_inputs.assert_not_called() -# TODO: Ask @joao if we want to start throwing errors if inputs are not provided # def test_crew_partial_inputs(): # agent = Agent( # role="{topic} Researcher", @@ -1403,7 +1402,6 @@ def test_crew_does_not_interpolate_without_inputs(): # assert crew.agents[0].backstory == "You have a lot of experience with AI." -# TODO: If we do want ot throw errors if we are missing inputs. Add in this test. # def test_crew_invalid_inputs(): # agent = Agent( # role="{topic} Researcher", diff --git a/tests/task_test.py b/tests/task_test.py index 99a3df0a1..95f201c7c 100644 --- a/tests/task_test.py +++ b/tests/task_test.py @@ -81,7 +81,7 @@ def test_task_prompt_includes_expected_output(): with patch.object(Agent, "execute_task") as execute: execute.return_value = "ok" - task.execute_sync() + task.execute_sync(agent=researcher) execute.assert_called_once_with(task=task, context=None, tools=[]) @@ -104,7 +104,7 @@ def test_task_callback(): with patch.object(Agent, "execute_task") as execute: execute.return_value = "ok" - task.execute_sync() + task.execute_sync(agent=researcher) task_completed.assert_called_once_with(task.output) @@ -129,7 +129,7 @@ def test_task_callback_returns_task_ouput(): with patch.object(Agent, "execute_task") as execute: execute.return_value = "exported_ok" - task.execute_sync() + task.execute_sync(agent=researcher) # Ensure the callback is called with a TaskOutput object serialized to JSON task_completed.assert_called_once() callback_data = task_completed.call_args[0][0] @@ -521,9 +521,7 @@ def test_save_task_json_output(): with patch.object(Task, "_save_file") as save_file: save_file.return_value = None crew.kickoff() - save_file.assert_called_once_with( - {"score": 4} - ) # TODO: @Joao, should this be a dict or a json string? + save_file.assert_called_once_with({"score": 4}) @pytest.mark.vcr(filter_headers=["authorization"])