style: fix lint issues in test_task_output_json_overrides_llm_response_format

Applied ruff auto-fixes to remove trailing whitespace from docstring
and blank lines in the new test function.

Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
Devin AI
2025-10-03 06:09:45 +00:00
parent 0452c0af47
commit 7d236d42fb

View File

@@ -1639,26 +1639,26 @@ def test_task_interpolation_with_hyphens():
@pytest.mark.vcr(filter_headers=["authorization"])
def test_task_output_json_overrides_llm_response_format():
"""Test that task.output_json takes priority over llm.response_format when both are set.
This test addresses issue #3639: when both a task's output_json and an agent's LLM
response_format are set with pydantic models, the task-level setting should take
precedence over the agent-level setting.
"""
from crewai.llm import LLM
class TaskOutputModel(BaseModel):
"""Expected output model for the task."""
task_result: str
task_confidence: float
class LLMOutputModel(BaseModel):
"""Different output model set on the LLM."""
llm_answer: str
llm_score: int
llm = LLM(model="gpt-4o-mini", response_format=LLMOutputModel)
agent = Agent(
role="Test Agent",
goal="Test goal for priority testing",
@@ -1666,17 +1666,17 @@ def test_task_output_json_overrides_llm_response_format():
llm=llm,
allow_delegation=False,
)
task = Task(
description="Analyze the priority system and provide a result",
expected_output="A structured result with task_result and task_confidence fields",
agent=agent,
output_json=TaskOutputModel,
)
crew = Crew(agents=[agent], tasks=[task], process=Process.sequential)
result = crew.kickoff()
assert result.json_dict is not None, "Result should have json_dict output"
assert "task_result" in result.json_dict, (
"Should have task_result field from TaskOutputModel. "