removed todo comments and fixed some tests

This commit is contained in:
Lorenze Jay
2024-07-11 12:44:30 -07:00
parent a55a835d54
commit 1cf4b47404
4 changed files with 5 additions and 15 deletions

View File

@@ -24,8 +24,6 @@ class CrewOutput(BaseModel):
description="Processed token summary", default={}
)
# TODO: Joao - Adding this safety check breakes when people want to see
# The full output of a CrewOutput.
# @property
# def pydantic(self) -> Optional[BaseModel]:
# # Check if the final task output included a pydantic model

View File

@@ -11,9 +11,7 @@ class TaskOutput(BaseModel):
description: str = Field(description="Description of the task")
summary: Optional[str] = Field(description="Summary of the task", default=None)
raw: str = Field(
description="Raw output of the task", default=""
) # TODO: @joao: breaking change, by renaming raw_output to raw, but now consistent with CrewOutput
raw: str = Field(description="Raw output of the task", default="")
pydantic: Optional[BaseModel] = Field(
description="Pydantic output of task", default=None
)
@@ -32,8 +30,6 @@ class TaskOutput(BaseModel):
self.summary = f"{excerpt}..."
return self
# TODO: Joao - Adding this safety check breakes when people want to see
# The full output of a TaskOutput or CrewOutput.
# @property
# def pydantic(self) -> Optional[BaseModel]:
# # Check if the final task output included a pydantic model

View File

@@ -1379,7 +1379,6 @@ def test_crew_does_not_interpolate_without_inputs():
interpolate_task_inputs.assert_not_called()
# TODO: Ask @joao if we want to start throwing errors if inputs are not provided
# def test_crew_partial_inputs():
# agent = Agent(
# role="{topic} Researcher",
@@ -1403,7 +1402,6 @@ def test_crew_does_not_interpolate_without_inputs():
# assert crew.agents[0].backstory == "You have a lot of experience with AI."
# TODO: If we do want ot throw errors if we are missing inputs. Add in this test.
# def test_crew_invalid_inputs():
# agent = Agent(
# role="{topic} Researcher",

View File

@@ -81,7 +81,7 @@ def test_task_prompt_includes_expected_output():
with patch.object(Agent, "execute_task") as execute:
execute.return_value = "ok"
task.execute_sync()
task.execute_sync(agent=researcher)
execute.assert_called_once_with(task=task, context=None, tools=[])
@@ -104,7 +104,7 @@ def test_task_callback():
with patch.object(Agent, "execute_task") as execute:
execute.return_value = "ok"
task.execute_sync()
task.execute_sync(agent=researcher)
task_completed.assert_called_once_with(task.output)
@@ -129,7 +129,7 @@ def test_task_callback_returns_task_ouput():
with patch.object(Agent, "execute_task") as execute:
execute.return_value = "exported_ok"
task.execute_sync()
task.execute_sync(agent=researcher)
# Ensure the callback is called with a TaskOutput object serialized to JSON
task_completed.assert_called_once()
callback_data = task_completed.call_args[0][0]
@@ -521,9 +521,7 @@ def test_save_task_json_output():
with patch.object(Task, "_save_file") as save_file:
save_file.return_value = None
crew.kickoff()
save_file.assert_called_once_with(
{"score": 4}
) # TODO: @Joao, should this be a dict or a json string?
save_file.assert_called_once_with({"score": 4})
@pytest.mark.vcr(filter_headers=["authorization"])