diff --git a/tests/test_task.py b/tests/test_task.py index ec9556f7b..831929003 100644 --- a/tests/test_task.py +++ b/tests/test_task.py @@ -1639,26 +1639,26 @@ def test_task_interpolation_with_hyphens(): @pytest.mark.vcr(filter_headers=["authorization"]) def test_task_output_json_overrides_llm_response_format(): """Test that task.output_json takes priority over llm.response_format when both are set. - + This test addresses issue #3639: when both a task's output_json and an agent's LLM response_format are set with pydantic models, the task-level setting should take precedence over the agent-level setting. """ - + from crewai.llm import LLM - + class TaskOutputModel(BaseModel): """Expected output model for the task.""" task_result: str task_confidence: float - + class LLMOutputModel(BaseModel): """Different output model set on the LLM.""" llm_answer: str llm_score: int - + llm = LLM(model="gpt-4o-mini", response_format=LLMOutputModel) - + agent = Agent( role="Test Agent", goal="Test goal for priority testing", @@ -1666,17 +1666,17 @@ def test_task_output_json_overrides_llm_response_format(): llm=llm, allow_delegation=False, ) - + task = Task( description="Analyze the priority system and provide a result", expected_output="A structured result with task_result and task_confidence fields", agent=agent, output_json=TaskOutputModel, ) - + crew = Crew(agents=[agent], tasks=[task], process=Process.sequential) result = crew.kickoff() - + assert result.json_dict is not None, "Result should have json_dict output" assert "task_result" in result.json_dict, ( "Should have task_result field from TaskOutputModel. "