fix: resolve remaining mypy type errors

- Fix tool_usage.py: rename result variable to avoid redefinition
- Fix lite_agent.py: import TaskOutput from correct module and add type casts
- Add explicit type annotation for data dict in tool_usage.py
This commit is contained in:
Greyson LaLonde
2025-09-04 10:40:33 -04:00
parent eed2ffde5f
commit 0bab041531
2 changed files with 13 additions and 7 deletions

View File

@@ -40,7 +40,7 @@ from crewai.events.types.logging_events import AgentLogsExecutionEvent
from crewai.flow.flow_trackable import FlowTrackable from crewai.flow.flow_trackable import FlowTrackable
from crewai.llm import LLM from crewai.llm import LLM
from crewai.llms.base_llm import BaseLLM from crewai.llms.base_llm import BaseLLM
from crewai.task import TaskOutput from crewai.tasks import TaskOutput
from crewai.tools.base_tool import BaseTool from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.structured_tool import CrewStructuredTool
from crewai.utilities import I18N from crewai.utilities import I18N
@@ -217,7 +217,10 @@ class LiteAgent(FlowTrackable, BaseModel):
@model_validator(mode="after") @model_validator(mode="after")
def ensure_guardrail_is_callable(self) -> Self: def ensure_guardrail_is_callable(self) -> Self:
if callable(self.guardrail): if callable(self.guardrail):
self._guardrail = self.guardrail self._guardrail = cast(
Callable[[LiteAgentOutput | TaskOutput], tuple[bool, Any]],
self.guardrail,
)
elif isinstance(self.guardrail, str): elif isinstance(self.guardrail, str):
from crewai.tasks.llm_guardrail import LLMGuardrail from crewai.tasks.llm_guardrail import LLMGuardrail
@@ -226,7 +229,10 @@ class LiteAgent(FlowTrackable, BaseModel):
f"Guardrail requires LLM instance of type BaseLLM, got {type(self.llm).__name__}" f"Guardrail requires LLM instance of type BaseLLM, got {type(self.llm).__name__}"
) )
self._guardrail = LLMGuardrail(description=self.guardrail, llm=self.llm) self._guardrail = cast(
Callable[[LiteAgentOutput | TaskOutput], tuple[bool, Any]],
LLMGuardrail(description=self.guardrail, llm=self.llm),
)
return self return self

View File

@@ -165,7 +165,7 @@ class ToolUsage:
""" """
if self._check_tool_repeated_usage(calling=calling): if self._check_tool_repeated_usage(calling=calling):
try: try:
result = self._i18n.errors("task_repeated_usage").format( repeated_usage_msg = self._i18n.errors("task_repeated_usage").format(
tool_names=self.tools_names tool_names=self.tools_names
) )
self._telemetry.tool_repeated_usage( self._telemetry.tool_repeated_usage(
@@ -173,8 +173,8 @@ class ToolUsage:
tool_name=tool.name, tool_name=tool.name,
attempts=self._run_attempts, attempts=self._run_attempts,
) )
result = self._format_result(result=result) repeated_usage_result = self._format_result(result=repeated_usage_msg)
return result return repeated_usage_result
except Exception: except Exception:
if self.task: if self.task:
@@ -303,7 +303,7 @@ class ToolUsage:
attempts=self._run_attempts, attempts=self._run_attempts,
) )
result = self._format_result(result=result) result = self._format_result(result=result)
data = { data: dict[str, Any] = {
"result": result, "result": result,
"tool_name": tool.name, "tool_name": tool.name,
"tool_args": calling.arguments, "tool_args": calling.arguments,