From db5f565deac9f73a171af70d390211c4d3dac45b Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Mon, 22 Sep 2025 13:09:53 -0400 Subject: [PATCH] fix: apply ruff linting fixes to tasks module --- src/crewai/tasks/hallucination_guardrail.py | 6 +++--- src/crewai/tasks/llm_guardrail.py | 20 ++++++++------------ 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/src/crewai/tasks/hallucination_guardrail.py b/src/crewai/tasks/hallucination_guardrail.py index 3079bc243..682209e51 100644 --- a/src/crewai/tasks/hallucination_guardrail.py +++ b/src/crewai/tasks/hallucination_guardrail.py @@ -6,7 +6,7 @@ Classes: HallucinationGuardrail: Placeholder guardrail that validates task outputs. """ -from typing import Any, Optional, Tuple +from typing import Any from crewai.llm import LLM from crewai.tasks.task_output import TaskOutput @@ -48,7 +48,7 @@ class HallucinationGuardrail: self, context: str, llm: LLM, - threshold: Optional[float] = None, + threshold: float | None = None, tool_response: str = "", ): """Initialize the HallucinationGuardrail placeholder. @@ -75,7 +75,7 @@ class HallucinationGuardrail: """Generate a description of this guardrail for event logging.""" return "HallucinationGuardrail (no-op)" - def __call__(self, task_output: TaskOutput) -> Tuple[bool, Any]: + def __call__(self, task_output: TaskOutput) -> tuple[bool, Any]: """Validate a task output against hallucination criteria. In the open source, this method always returns that the output is valid. diff --git a/src/crewai/tasks/llm_guardrail.py b/src/crewai/tasks/llm_guardrail.py index 65aea7cec..79c0c6b2a 100644 --- a/src/crewai/tasks/llm_guardrail.py +++ b/src/crewai/tasks/llm_guardrail.py @@ -1,4 +1,4 @@ -from typing import Any, Tuple +from typing import Any from pydantic import BaseModel, Field @@ -53,7 +53,7 @@ class LLMGuardrail: Guardrail: {self.description} - + Your task: - Confirm if the Task result complies with the guardrail. - If not, provide clear feedback explaining what is wrong (e.g., by how much it violates the rule, or what specific part fails). @@ -61,11 +61,9 @@ class LLMGuardrail: - If the Task result complies with the guardrail, saying that is valid """ - result = agent.kickoff(query, response_format=LLMGuardrailResult) + return agent.kickoff(query, response_format=LLMGuardrailResult) - return result - - def __call__(self, task_output: TaskOutput) -> Tuple[bool, Any]: + def __call__(self, task_output: TaskOutput) -> tuple[bool, Any]: """Validates the output of a task based on specified criteria. Args: @@ -79,13 +77,11 @@ class LLMGuardrail: try: result = self._validate_output(task_output) - assert isinstance( - result.pydantic, LLMGuardrailResult - ), "The guardrail result is not a valid pydantic model" + if not isinstance(result.pydantic, LLMGuardrailResult): + raise ValueError("The guardrail result is not a valid pydantic model") if result.pydantic.valid: return True, task_output.raw - else: - return False, result.pydantic.feedback + return False, result.pydantic.feedback except Exception as e: - return False, f"Error while validating the task output: {str(e)}" + return False, f"Error while validating the task output: {e!s}"