mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
style: fix lint issues in test_task_output_json_overrides_llm_response_format
Applied ruff auto-fixes to remove trailing whitespace from docstring and blank lines in the new test function. Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
@@ -1639,26 +1639,26 @@ def test_task_interpolation_with_hyphens():
|
|||||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
def test_task_output_json_overrides_llm_response_format():
|
def test_task_output_json_overrides_llm_response_format():
|
||||||
"""Test that task.output_json takes priority over llm.response_format when both are set.
|
"""Test that task.output_json takes priority over llm.response_format when both are set.
|
||||||
|
|
||||||
This test addresses issue #3639: when both a task's output_json and an agent's LLM
|
This test addresses issue #3639: when both a task's output_json and an agent's LLM
|
||||||
response_format are set with pydantic models, the task-level setting should take
|
response_format are set with pydantic models, the task-level setting should take
|
||||||
precedence over the agent-level setting.
|
precedence over the agent-level setting.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from crewai.llm import LLM
|
from crewai.llm import LLM
|
||||||
|
|
||||||
class TaskOutputModel(BaseModel):
|
class TaskOutputModel(BaseModel):
|
||||||
"""Expected output model for the task."""
|
"""Expected output model for the task."""
|
||||||
task_result: str
|
task_result: str
|
||||||
task_confidence: float
|
task_confidence: float
|
||||||
|
|
||||||
class LLMOutputModel(BaseModel):
|
class LLMOutputModel(BaseModel):
|
||||||
"""Different output model set on the LLM."""
|
"""Different output model set on the LLM."""
|
||||||
llm_answer: str
|
llm_answer: str
|
||||||
llm_score: int
|
llm_score: int
|
||||||
|
|
||||||
llm = LLM(model="gpt-4o-mini", response_format=LLMOutputModel)
|
llm = LLM(model="gpt-4o-mini", response_format=LLMOutputModel)
|
||||||
|
|
||||||
agent = Agent(
|
agent = Agent(
|
||||||
role="Test Agent",
|
role="Test Agent",
|
||||||
goal="Test goal for priority testing",
|
goal="Test goal for priority testing",
|
||||||
@@ -1666,17 +1666,17 @@ def test_task_output_json_overrides_llm_response_format():
|
|||||||
llm=llm,
|
llm=llm,
|
||||||
allow_delegation=False,
|
allow_delegation=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
task = Task(
|
task = Task(
|
||||||
description="Analyze the priority system and provide a result",
|
description="Analyze the priority system and provide a result",
|
||||||
expected_output="A structured result with task_result and task_confidence fields",
|
expected_output="A structured result with task_result and task_confidence fields",
|
||||||
agent=agent,
|
agent=agent,
|
||||||
output_json=TaskOutputModel,
|
output_json=TaskOutputModel,
|
||||||
)
|
)
|
||||||
|
|
||||||
crew = Crew(agents=[agent], tasks=[task], process=Process.sequential)
|
crew = Crew(agents=[agent], tasks=[task], process=Process.sequential)
|
||||||
result = crew.kickoff()
|
result = crew.kickoff()
|
||||||
|
|
||||||
assert result.json_dict is not None, "Result should have json_dict output"
|
assert result.json_dict is not None, "Result should have json_dict output"
|
||||||
assert "task_result" in result.json_dict, (
|
assert "task_result" in result.json_dict, (
|
||||||
"Should have task_result field from TaskOutputModel. "
|
"Should have task_result field from TaskOutputModel. "
|
||||||
|
|||||||
Reference in New Issue
Block a user