fix: resolve lint and type checking issues

- Add per-file ignores for S101 (assert statements) in test files
- Add per-file ignore for PERF203 (try-except in loop) in lite_agent.py
- Replace assert with proper error handling in lite_agent.py
- Replace eval() with ast.literal_eval() for security compliance

Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
Devin AI
2025-09-09 13:23:43 +00:00
parent 934c63ede1
commit a5617cbfff
3 changed files with 64 additions and 70 deletions

View File

@@ -133,6 +133,10 @@ select = [
]
ignore = ["E501"] # ignore line too long
[tool.ruff.lint.per-file-ignores]
"tests/**/*.py" = ["S101"] # Allow assert statements in tests
"src/crewai/lite_agent.py" = ["PERF203"] # Allow try-except in loop for LLM parsing
[tool.mypy]
exclude = ["src/crewai/cli/templates", "tests"]

View File

@@ -1,21 +1,14 @@
import asyncio
import inspect
import uuid
from collections.abc import Callable
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
Union,
cast,
get_args,
get_origin,
)
try:
from typing import Self
except ImportError:
@@ -27,8 +20,8 @@ from pydantic import (
Field,
InstanceOf,
PrivateAttr,
model_validator,
field_validator,
model_validator,
)
from crewai.agents.agent_builder.base_agent import BaseAgent
@@ -39,13 +32,18 @@ from crewai.agents.parser import (
AgentFinish,
OutputParserException,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.agent_events import (
LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent,
)
from crewai.events.types.logging_events import AgentLogsExecutionEvent
from crewai.flow.flow_trackable import FlowTrackable
from crewai.llm import LLM, BaseLLM
from crewai.tools.base_tool import BaseTool
from crewai.tools.structured_tool import CrewStructuredTool
from crewai.utilities import I18N
from crewai.utilities.converter import convert_to_model
from crewai.utilities.guardrail import process_guardrail
from crewai.utilities.agent_utils import (
enforce_rpm_limit,
format_message_for_llm,
@@ -62,15 +60,8 @@ from crewai.utilities.agent_utils import (
process_llm_response,
render_text_description_and_args,
)
from crewai.utilities.converter import generate_model_description
from crewai.events.types.logging_events import AgentLogsExecutionEvent
from crewai.events.types.agent_events import (
LiteAgentExecutionCompletedEvent,
LiteAgentExecutionErrorEvent,
LiteAgentExecutionStartedEvent,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.utilities.converter import convert_to_model, generate_model_description
from crewai.utilities.guardrail import process_guardrail
from crewai.utilities.llm_utils import create_llm
from crewai.utilities.printer import Printer
from crewai.utilities.token_counter_callback import TokenCalcHandler
@@ -83,15 +74,15 @@ class LiteAgentOutput(BaseModel):
model_config = {"arbitrary_types_allowed": True}
raw: str = Field(description="Raw output of the agent", default="")
pydantic: Optional[BaseModel] = Field(
pydantic: BaseModel | None = Field(
description="Pydantic output of the agent", default=None
)
agent_role: str = Field(description="Role of the agent that produced this output")
usage_metrics: Optional[Dict[str, Any]] = Field(
usage_metrics: dict[str, Any] | None = Field(
description="Token usage metrics for this execution", default=None
)
def to_dict(self) -> Dict[str, Any]:
def to_dict(self) -> dict[str, Any]:
"""Convert pydantic_output to a dictionary."""
if self.pydantic:
return self.pydantic.model_dump()
@@ -131,10 +122,10 @@ class LiteAgent(FlowTrackable, BaseModel):
role: str = Field(description="Role of the agent")
goal: str = Field(description="Goal of the agent")
backstory: str = Field(description="Backstory of the agent")
llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field(
llm: str | InstanceOf[BaseLLM] | Any | None = Field(
default=None, description="Language model that will run the agent"
)
tools: List[BaseTool] = Field(
tools: list[BaseTool] = Field(
default_factory=list, description="Tools at agent's disposal"
)
@@ -142,7 +133,7 @@ class LiteAgent(FlowTrackable, BaseModel):
max_iterations: int = Field(
default=15, description="Maximum number of iterations for tool usage"
)
max_execution_time: Optional[int] = Field(
max_execution_time: int | None = Field(
default=None, description=". Maximum execution time in seconds"
)
respect_context_window: bool = Field(
@@ -153,25 +144,25 @@ class LiteAgent(FlowTrackable, BaseModel):
default=True,
description="Whether to use stop words to prevent the LLM from using tools",
)
request_within_rpm_limit: Optional[Callable[[], bool]] = Field(
request_within_rpm_limit: Callable[[], bool] | None = Field(
default=None,
description="Callback to check if the request is within the RPM limit",
)
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
# Output and Formatting Properties
response_format: Optional[Type[BaseModel]] = Field(
response_format: type[BaseModel] | None = Field(
default=None, description="Pydantic model for structured output"
)
verbose: bool = Field(
default=False, description="Whether to print execution details"
)
callbacks: List[Callable] = Field(
callbacks: list[Callable] = Field(
default=[], description="Callbacks to be used for the agent"
)
# Guardrail Properties
guardrail: Optional[Union[Callable[[LiteAgentOutput], Tuple[bool, Any]], str]] = (
guardrail: Callable[[LiteAgentOutput], tuple[bool, Any]] | str | None = (
Field(
default=None,
description="Function or string description of a guardrail to validate agent output",
@@ -182,23 +173,23 @@ class LiteAgent(FlowTrackable, BaseModel):
)
# State and Results
tools_results: List[Dict[str, Any]] = Field(
tools_results: list[dict[str, Any]] = Field(
default=[], description="Results of the tools used by the agent."
)
# Reference of Agent
original_agent: Optional[BaseAgent] = Field(
original_agent: BaseAgent | None = Field(
default=None, description="Reference to the agent that created this LiteAgent"
)
# Private Attributes
_parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list)
_parsed_tools: list[CrewStructuredTool] = PrivateAttr(default_factory=list)
_token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess)
_cache_handler: CacheHandler = PrivateAttr(default_factory=CacheHandler)
_key: str = PrivateAttr(default_factory=lambda: str(uuid.uuid4()))
_messages: List[Dict[str, str]] = PrivateAttr(default_factory=list)
_messages: list[dict[str, str]] = PrivateAttr(default_factory=list)
_iterations: int = PrivateAttr(default=0)
_printer: Printer = PrivateAttr(default_factory=Printer)
_guardrail: Optional[Callable] = PrivateAttr(default=None)
_guardrail: Callable | None = PrivateAttr(default=None)
_guardrail_retry_count: int = PrivateAttr(default=0)
@model_validator(mode="after")
@@ -242,8 +233,8 @@ class LiteAgent(FlowTrackable, BaseModel):
@field_validator("guardrail", mode="before")
@classmethod
def validate_guardrail_function(
cls, v: Optional[Union[Callable, str]]
) -> Optional[Union[Callable, str]]:
cls, v: Callable | str | None
) -> Callable | str | None:
"""Validate that the guardrail function has the correct signature.
If v is a callable, validate that it has the correct signature.
@@ -268,7 +259,7 @@ class LiteAgent(FlowTrackable, BaseModel):
# Check return annotation if present
if sig.return_annotation is not sig.empty:
if sig.return_annotation == Tuple[bool, Any]:
if sig.return_annotation == tuple[bool, Any]:
return v
origin = get_origin(sig.return_annotation)
@@ -291,7 +282,7 @@ class LiteAgent(FlowTrackable, BaseModel):
"""Return the original role for compatibility with tool interfaces."""
return self.role
def kickoff(self, messages: Union[str, List[Dict[str, str]]]) -> LiteAgentOutput:
def kickoff(self, messages: str | list[dict[str, str]]) -> LiteAgentOutput:
"""
Execute the agent with the given messages.
@@ -339,7 +330,7 @@ class LiteAgent(FlowTrackable, BaseModel):
)
raise e
def _execute_core(self, agent_info: Dict[str, Any]) -> LiteAgentOutput:
def _execute_core(self, agent_info: dict[str, Any]) -> LiteAgentOutput:
# Emit event for agent execution start
crewai_event_bus.emit(
self,
@@ -352,7 +343,7 @@ class LiteAgent(FlowTrackable, BaseModel):
# Execute the agent using invoke loop
agent_finish = self._invoke_loop()
formatted_result: Optional[BaseModel] = None
formatted_result: BaseModel | None = None
if self.response_format:
try:
converted_result = convert_to_model(
@@ -366,7 +357,7 @@ class LiteAgent(FlowTrackable, BaseModel):
formatted_result = converted_result
except Exception as e:
self._printer.print(
content=f"Failed to parse output into response format: {str(e)}",
content=f"Failed to parse output into response format: {e!s}",
color="yellow",
)
@@ -434,7 +425,7 @@ class LiteAgent(FlowTrackable, BaseModel):
return output
async def kickoff_async(
self, messages: Union[str, List[Dict[str, str]]]
self, messages: str | list[dict[str, str]]
) -> LiteAgentOutput:
"""
Execute the agent asynchronously with the given messages.
@@ -481,8 +472,8 @@ class LiteAgent(FlowTrackable, BaseModel):
return base_prompt
def _format_messages(
self, messages: Union[str, List[Dict[str, str]]]
) -> List[Dict[str, str]]:
self, messages: str | list[dict[str, str]]
) -> list[dict[str, str]]:
"""Format messages for the LLM."""
if isinstance(messages, str):
messages = [{"role": "user", "content": messages}]
@@ -577,18 +568,18 @@ class LiteAgent(FlowTrackable, BaseModel):
i18n=self.i18n,
)
continue
else:
handle_unknown_error(self._printer, e)
raise e
handle_unknown_error(self._printer, e)
raise e
finally:
self._iterations += 1
assert isinstance(formatted_answer, AgentFinish)
if not isinstance(formatted_answer, AgentFinish):
raise ValueError(f"Expected AgentFinish, got {type(formatted_answer)}")
self._show_logs(formatted_answer)
return formatted_answer
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
def _show_logs(self, formatted_answer: AgentAction | AgentFinish):
"""Show logs for the agent's execution."""
crewai_event_bus.emit(
self,

View File

@@ -1,19 +1,18 @@
from collections import defaultdict
from typing import cast
from unittest.mock import Mock
from unittest.mock import Mock, patch
import pytest
from pydantic import BaseModel, Field
from crewai import LLM, Agent
from crewai.flow import Flow, start
from crewai.lite_agent import LiteAgent, LiteAgentOutput
from crewai.tools import BaseTool
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.agent_events import LiteAgentExecutionStartedEvent
from crewai.events.types.tool_usage_events import ToolUsageStartedEvent
from crewai.flow import Flow, start
from crewai.lite_agent import LiteAgent, LiteAgentOutput
from crewai.llms.base_llm import BaseLLM
from unittest.mock import patch
from crewai.tools import BaseTool
# A simple test tool
@@ -37,10 +36,9 @@ class WebSearchTool(BaseTool):
# This is a mock implementation
if "tokyo" in query.lower():
return "Tokyo's population in 2023 was approximately 21 million people in the city proper, and 37 million in the greater metropolitan area."
elif "climate change" in query.lower() and "coral" in query.lower():
if "climate change" in query.lower() and "coral" in query.lower():
return "Climate change severely impacts coral reefs through: 1) Ocean warming causing coral bleaching, 2) Ocean acidification reducing calcification, 3) Sea level rise affecting light availability, 4) Increased storm frequency damaging reef structures. Sources: NOAA Coral Reef Conservation Program, Global Coral Reef Alliance."
else:
return f"Found information about {query}: This is a simulated search result for demonstration purposes."
return f"Found information about {query}: This is a simulated search result for demonstration purposes."
# Define Mock Calculator Tool
@@ -52,11 +50,12 @@ class CalculatorTool(BaseTool):
def _run(self, expression: str) -> str:
"""Calculate the result of a mathematical expression."""
import ast
try:
result = eval(expression, {"__builtins__": {}})
result = ast.literal_eval(expression)
return f"The result of {expression} is {result}"
except Exception as e:
return f"Error calculating {expression}: {str(e)}"
return f"Error calculating {expression}: {e!s}"
# Define a custom response format using Pydantic
@@ -522,14 +521,14 @@ def test_lite_agent_with_custom_llm_and_guardrails():
def test_lite_agent_structured_output_with_malformed_json():
"""Test that LiteAgent can handle malformed JSON wrapped in markdown blocks."""
class FounderNames(BaseModel):
names: list[str] = Field(description="List of founder names")
class MockLLMWithMalformedJSON(BaseLLM):
def __init__(self):
super().__init__(model="mock-model")
def call(self, messages, **kwargs):
return '''Thought: I need to extract the founder names
Final Answer: ```json
@@ -537,18 +536,18 @@ Final Answer: ```json
"names": ["John Smith", "Jane Doe"]
}
```'''
def supports_function_calling(self):
return False
def supports_stop_words(self):
return False
def get_context_window_size(self):
return 4096
mock_llm = MockLLMWithMalformedJSON()
agent = Agent(
role="Data Extraction Specialist",
goal="Extract founder names from text",
@@ -556,12 +555,12 @@ Final Answer: ```json
llm=mock_llm,
verbose=True,
)
result = agent.kickoff(
messages="Extract founder names from: 'The company was founded by John Smith and Jane Doe.'",
response_format=FounderNames
)
assert result.pydantic is not None, "Should successfully parse malformed JSON"
assert isinstance(result.pydantic, FounderNames), "Should return correct Pydantic model"
assert result.pydantic.names == ["John Smith", "Jane Doe"], "Should extract correct founder names"