fix: apply ruff linting fixes to tasks module

This commit is contained in:
Greyson LaLonde
2025-09-22 13:09:53 -04:00
committed by GitHub
parent 58413b663a
commit db5f565dea
2 changed files with 11 additions and 15 deletions

View File

@@ -6,7 +6,7 @@ Classes:
HallucinationGuardrail: Placeholder guardrail that validates task outputs.
"""
from typing import Any, Optional, Tuple
from typing import Any
from crewai.llm import LLM
from crewai.tasks.task_output import TaskOutput
@@ -48,7 +48,7 @@ class HallucinationGuardrail:
self,
context: str,
llm: LLM,
threshold: Optional[float] = None,
threshold: float | None = None,
tool_response: str = "",
):
"""Initialize the HallucinationGuardrail placeholder.
@@ -75,7 +75,7 @@ class HallucinationGuardrail:
"""Generate a description of this guardrail for event logging."""
return "HallucinationGuardrail (no-op)"
def __call__(self, task_output: TaskOutput) -> Tuple[bool, Any]:
def __call__(self, task_output: TaskOutput) -> tuple[bool, Any]:
"""Validate a task output against hallucination criteria.
In the open source, this method always returns that the output is valid.

View File

@@ -1,4 +1,4 @@
from typing import Any, Tuple
from typing import Any
from pydantic import BaseModel, Field
@@ -53,7 +53,7 @@ class LLMGuardrail:
Guardrail:
{self.description}
Your task:
- Confirm if the Task result complies with the guardrail.
- If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails).
@@ -61,11 +61,9 @@ class LLMGuardrail:
- If the Task result complies with the guardrail, saying that is valid
"""
result = agent.kickoff(query, response_format=LLMGuardrailResult)
return agent.kickoff(query, response_format=LLMGuardrailResult)
return result
def __call__(self, task_output: TaskOutput) -> Tuple[bool, Any]:
def __call__(self, task_output: TaskOutput) -> tuple[bool, Any]:
"""Validates the output of a task based on specified criteria.
Args:
@@ -79,13 +77,11 @@ class LLMGuardrail:
try:
result = self._validate_output(task_output)
assert isinstance(
result.pydantic, LLMGuardrailResult
), "The guardrail result is not a valid pydantic model"
if not isinstance(result.pydantic, LLMGuardrailResult):
raise ValueError("The guardrail result is not a valid pydantic model")
if result.pydantic.valid:
return True, task_output.raw
else:
return False, result.pydantic.feedback
return False, result.pydantic.feedback
except Exception as e:
return False, f"Error while validating the task output: {str(e)}"
return False, f"Error while validating the task output: {e!s}"