From d879be8b66c5331104a552a701e0235d982ec0b5 Mon Sep 17 00:00:00 2001 From: Greyson LaLonde Date: Fri, 19 Sep 2025 22:11:21 -0400 Subject: [PATCH] chore: fix ruff linting issues in agents module fix(agents): linting, import paths, cache key alignment, and static method --- src/crewai/agents/__init__.py | 11 +- .../agent_adapters/base_agent_adapter.py | 14 +- .../agent_adapters/base_tool_adapter.py | 13 +- src/crewai/agents/agent_builder/base_agent.py | 53 ++++--- .../base_agent_executor_mixin.py | 10 +- src/crewai/agents/crew_agent_executor.py | 24 ++-- src/crewai/agents/parser.py | 32 ++--- src/crewai/agents/tools_handler.py | 14 +- src/crewai/lite_agent.py | 107 +++++++-------- src/crewai/tools/tool_usage.py | 129 +++++++++--------- src/crewai/utilities/agent_utils.py | 100 +++++++------- .../utilities/crew_pydantic_output_parser.py | 11 +- src/crewai/utilities/tool_utils.py | 24 ++-- tests/agents/test_agent.py | 22 +-- tests/agents/test_crew_agent_parser.py | 12 +- tests/test_crew.py | 4 +- 16 files changed, 286 insertions(+), 294 deletions(-) diff --git a/src/crewai/agents/__init__.py b/src/crewai/agents/__init__.py index 9e400098c..541d4ebaf 100644 --- a/src/crewai/agents/__init__.py +++ b/src/crewai/agents/__init__.py @@ -1,5 +1,12 @@ from crewai.agents.cache.cache_handler import CacheHandler -from crewai.agents.parser import parse, AgentAction, AgentFinish, OutputParserException +from crewai.agents.parser import AgentAction, AgentFinish, OutputParserError, parse from crewai.agents.tools_handler import ToolsHandler -__all__ = ["CacheHandler", "parse", "AgentAction", "AgentFinish", "OutputParserException", "ToolsHandler"] +__all__ = [ + "AgentAction", + "AgentFinish", + "CacheHandler", + "OutputParserError", + "ToolsHandler", + "parse", +] diff --git a/src/crewai/agents/agent_adapters/base_agent_adapter.py b/src/crewai/agents/agent_adapters/base_agent_adapter.py index 6b8a151d6..4bfcf2ab7 100644 --- a/src/crewai/agents/agent_adapters/base_agent_adapter.py +++ b/src/crewai/agents/agent_adapters/base_agent_adapter.py @@ -1,7 +1,7 @@ from abc import ABC, abstractmethod -from typing import Any, Dict, List, Optional +from typing import Any -from pydantic import PrivateAttr +from pydantic import ConfigDict, PrivateAttr from crewai.agent import BaseAgent from crewai.tools import BaseTool @@ -16,22 +16,21 @@ class BaseAgentAdapter(BaseAgent, ABC): """ adapted_structured_output: bool = False - _agent_config: Optional[Dict[str, Any]] = PrivateAttr(default=None) + _agent_config: dict[str, Any] | None = PrivateAttr(default=None) - model_config = {"arbitrary_types_allowed": True} + model_config = ConfigDict(arbitrary_types_allowed=True) - def __init__(self, agent_config: Optional[Dict[str, Any]] = None, **kwargs: Any): + def __init__(self, agent_config: dict[str, Any] | None = None, **kwargs: Any): super().__init__(adapted_agent=True, **kwargs) self._agent_config = agent_config @abstractmethod - def configure_tools(self, tools: Optional[List[BaseTool]] = None) -> None: + def configure_tools(self, tools: list[BaseTool] | None = None) -> None: """Configure and adapt tools for the specific agent implementation. Args: tools: Optional list of BaseTool instances to be configured """ - pass def configure_structured_output(self, structured_output: Any) -> None: """Configure the structured output for the specific agent implementation. @@ -39,4 +38,3 @@ class BaseAgentAdapter(BaseAgent, ABC): Args: structured_output: The structured output to be configured """ - pass diff --git a/src/crewai/agents/agent_adapters/base_tool_adapter.py b/src/crewai/agents/agent_adapters/base_tool_adapter.py index f1ee438a8..513090d64 100644 --- a/src/crewai/agents/agent_adapters/base_tool_adapter.py +++ b/src/crewai/agents/agent_adapters/base_tool_adapter.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import Any, List, Optional +from typing import Any from crewai.tools.base_tool import BaseTool @@ -12,23 +12,22 @@ class BaseToolAdapter(ABC): different frameworks and platforms. """ - original_tools: List[BaseTool] - converted_tools: List[Any] + original_tools: list[BaseTool] + converted_tools: list[Any] - def __init__(self, tools: Optional[List[BaseTool]] = None): + def __init__(self, tools: list[BaseTool] | None = None): self.original_tools = tools or [] self.converted_tools = [] @abstractmethod - def configure_tools(self, tools: List[BaseTool]) -> None: + def configure_tools(self, tools: list[BaseTool]) -> None: """Configure and convert tools for the specific implementation. Args: tools: List of BaseTool instances to be configured and converted """ - pass - def tools(self) -> List[Any]: + def tools(self) -> list[Any]: """Return all converted tools.""" return self.converted_tools diff --git a/src/crewai/agents/agent_builder/base_agent.py b/src/crewai/agents/agent_builder/base_agent.py index ba2596f63..41344aea2 100644 --- a/src/crewai/agents/agent_builder/base_agent.py +++ b/src/crewai/agents/agent_builder/base_agent.py @@ -1,8 +1,9 @@ import uuid from abc import ABC, abstractmethod +from collections.abc import Callable from copy import copy as shallow_copy from hashlib import md5 -from typing import Any, Callable, Dict, List, Optional, TypeVar +from typing import Any, TypeVar from pydantic import ( UUID4, @@ -25,7 +26,6 @@ from crewai.security.security_config import SecurityConfig from crewai.tools.base_tool import BaseTool, Tool from crewai.utilities import I18N, Logger, RPMController from crewai.utilities.config import process_config -from crewai.utilities.converter import Converter from crewai.utilities.string_utils import interpolate_only T = TypeVar("T", bound="BaseAgent") @@ -81,17 +81,17 @@ class BaseAgent(ABC, BaseModel): __hash__ = object.__hash__ # type: ignore _logger: Logger = PrivateAttr(default_factory=lambda: Logger(verbose=False)) - _rpm_controller: Optional[RPMController] = PrivateAttr(default=None) + _rpm_controller: RPMController | None = PrivateAttr(default=None) _request_within_rpm_limit: Any = PrivateAttr(default=None) - _original_role: Optional[str] = PrivateAttr(default=None) - _original_goal: Optional[str] = PrivateAttr(default=None) - _original_backstory: Optional[str] = PrivateAttr(default=None) + _original_role: str | None = PrivateAttr(default=None) + _original_goal: str | None = PrivateAttr(default=None) + _original_backstory: str | None = PrivateAttr(default=None) _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess) id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True) role: str = Field(description="Role of the agent") goal: str = Field(description="Objective of the agent") backstory: str = Field(description="Backstory of the agent") - config: Optional[Dict[str, Any]] = Field( + config: dict[str, Any] | None = Field( description="Configuration for the agent", default=None, exclude=True ) cache: bool = Field( @@ -100,7 +100,7 @@ class BaseAgent(ABC, BaseModel): verbose: bool = Field( default=False, description="Verbose mode for the Agent Execution" ) - max_rpm: Optional[int] = Field( + max_rpm: int | None = Field( default=None, description="Maximum number of requests per minute for the agent execution to be respected.", ) @@ -108,7 +108,7 @@ class BaseAgent(ABC, BaseModel): default=False, description="Enable agent to delegate and ask questions among each other.", ) - tools: Optional[List[BaseTool]] = Field( + tools: list[BaseTool] | None = Field( default_factory=list, description="Tools at agents' disposal" ) max_iter: int = Field( @@ -122,27 +122,27 @@ class BaseAgent(ABC, BaseModel): ) crew: Any = Field(default=None, description="Crew to which the agent belongs.") i18n: I18N = Field(default=I18N(), description="Internationalization settings.") - cache_handler: Optional[InstanceOf[CacheHandler]] = Field( + cache_handler: InstanceOf[CacheHandler] | None = Field( default=None, description="An instance of the CacheHandler class." ) tools_handler: InstanceOf[ToolsHandler] = Field( default_factory=ToolsHandler, description="An instance of the ToolsHandler class.", ) - tools_results: List[Dict[str, Any]] = Field( + tools_results: list[dict[str, Any]] = Field( default=[], description="Results of the tools used by the agent." ) - max_tokens: Optional[int] = Field( + max_tokens: int | None = Field( default=None, description="Maximum number of tokens for the agent's execution." ) - knowledge: Optional[Knowledge] = Field( + knowledge: Knowledge | None = Field( default=None, description="Knowledge for the agent." ) - knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field( + knowledge_sources: list[BaseKnowledgeSource] | None = Field( default=None, description="Knowledge sources for the agent.", ) - knowledge_storage: Optional[Any] = Field( + knowledge_storage: Any | None = Field( default=None, description="Custom knowledge storage for the agent.", ) @@ -150,13 +150,13 @@ class BaseAgent(ABC, BaseModel): default_factory=SecurityConfig, description="Security configuration for the agent, including fingerprinting.", ) - callbacks: List[Callable] = Field( + callbacks: list[Callable] = Field( default=[], description="Callbacks to be used for the agent" ) adapted_agent: bool = Field( default=False, description="Whether the agent is adapted" ) - knowledge_config: Optional[KnowledgeConfig] = Field( + knowledge_config: KnowledgeConfig | None = Field( default=None, description="Knowledge configuration for the agent such as limits and threshold", ) @@ -168,7 +168,7 @@ class BaseAgent(ABC, BaseModel): @field_validator("tools") @classmethod - def validate_tools(cls, tools: List[Any]) -> List[BaseTool]: + def validate_tools(cls, tools: list[Any]) -> list[BaseTool]: """Validate and process the tools provided to the agent. This method ensures that each tool is either an instance of BaseTool @@ -221,7 +221,7 @@ class BaseAgent(ABC, BaseModel): @field_validator("id", mode="before") @classmethod - def _deny_user_set_id(cls, v: Optional[UUID4]) -> None: + def _deny_user_set_id(cls, v: UUID4 | None) -> None: if v: raise PydanticCustomError( "may_not_set_field", "This field is not to be set by the user.", {} @@ -252,8 +252,8 @@ class BaseAgent(ABC, BaseModel): def execute_task( self, task: Any, - context: Optional[str] = None, - tools: Optional[List[BaseTool]] = None, + context: str | None = None, + tools: list[BaseTool] | None = None, ) -> str: pass @@ -262,9 +262,8 @@ class BaseAgent(ABC, BaseModel): pass @abstractmethod - def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]: + def get_delegation_tools(self, agents: list["BaseAgent"]) -> list[BaseTool]: """Set the task tools that init BaseAgenTools class.""" - pass def copy(self: T) -> T: # type: ignore # Signature of "copy" incompatible with supertype "BaseModel" """Create a deep copy of the Agent.""" @@ -309,7 +308,7 @@ class BaseAgent(ABC, BaseModel): copied_data = self.model_dump(exclude=exclude) copied_data = {k: v for k, v in copied_data.items() if v is not None} - copied_agent = type(self)( + return type(self)( **copied_data, llm=existing_llm, tools=self.tools, @@ -318,9 +317,7 @@ class BaseAgent(ABC, BaseModel): knowledge_storage=copied_knowledge_storage, ) - return copied_agent - - def interpolate_inputs(self, inputs: Dict[str, Any]) -> None: + def interpolate_inputs(self, inputs: dict[str, Any]) -> None: """Interpolate inputs into the agent description and backstory.""" if self._original_role is None: self._original_role = self.role @@ -362,5 +359,5 @@ class BaseAgent(ABC, BaseModel): self._rpm_controller = rpm_controller self.create_agent_executor() - def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None): + def set_knowledge(self, crew_embedder: dict[str, Any] | None = None): pass diff --git a/src/crewai/agents/agent_builder/base_agent_executor_mixin.py b/src/crewai/agents/agent_builder/base_agent_executor_mixin.py index 344a526de..60de79dcc 100644 --- a/src/crewai/agents/agent_builder/base_agent_executor_mixin.py +++ b/src/crewai/agents/agent_builder/base_agent_executor_mixin.py @@ -1,13 +1,13 @@ import time -from typing import TYPE_CHECKING, Dict, List +from typing import TYPE_CHECKING +from crewai.events.event_listener import event_listener from crewai.memory.entity.entity_memory_item import EntityMemoryItem from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem from crewai.utilities import I18N from crewai.utilities.converter import ConverterError from crewai.utilities.evaluators.task_evaluator import TaskEvaluator from crewai.utilities.printer import Printer -from crewai.events.event_listener import event_listener if TYPE_CHECKING: from crewai.agents.agent_builder.base_agent import BaseAgent @@ -21,7 +21,7 @@ class CrewAgentExecutorMixin: task: "Task" iterations: int max_iter: int - messages: List[Dict[str, str]] + messages: list[dict[str, str]] _i18n: I18N _printer: Printer = Printer() @@ -46,7 +46,6 @@ class CrewAgentExecutorMixin: ) except Exception as e: print(f"Failed to add to short term memory: {e}") - pass def _create_external_memory(self, output) -> None: """Create and save a external-term memory item if conditions are met.""" @@ -67,7 +66,6 @@ class CrewAgentExecutorMixin: ) except Exception as e: print(f"Failed to add to external memory: {e}") - pass def _create_long_term_memory(self, output) -> None: """Create and save long-term and entity memory items based on evaluation.""" @@ -113,10 +111,8 @@ class CrewAgentExecutorMixin: self.crew._entity_memory.save(entity_memories) except AttributeError as e: print(f"Missing attributes for long term memory: {e}") - pass except Exception as e: print(f"Failed to add to long term memory: {e}") - pass elif ( self.crew and self.crew._long_term_memory diff --git a/src/crewai/agents/crew_agent_executor.py b/src/crewai/agents/crew_agent_executor.py index f4a5cebe3..d912bdf3c 100644 --- a/src/crewai/agents/crew_agent_executor.py +++ b/src/crewai/agents/crew_agent_executor.py @@ -12,7 +12,7 @@ from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecu from crewai.agents.parser import ( AgentAction, AgentFinish, - OutputParserException, + OutputParserError, ) from crewai.agents.tools_handler import ToolsHandler from crewai.events.event_bus import crewai_event_bus @@ -228,7 +228,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): self._invoke_step_callback(formatted_answer) self._append_message(formatted_answer.text) - except OutputParserException as e: + except OutputParserError as e: # noqa: PERF203 formatted_answer = handle_output_parser_exception( e=e, messages=self.messages, @@ -251,17 +251,20 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): i18n=self._i18n, ) continue - else: - handle_unknown_error(self._printer, e) - raise e + handle_unknown_error(self._printer, e) + raise e finally: self.iterations += 1 # During the invoke loop, formatted_answer alternates between AgentAction # (when the agent is using tools) and eventually becomes AgentFinish - # (when the agent reaches a final answer). This assertion confirms we've + # (when the agent reaches a final answer). This check confirms we've # reached a final answer and helps type checking understand this transition. - assert isinstance(formatted_answer, AgentFinish) + if not isinstance(formatted_answer, AgentFinish): + raise RuntimeError( + "Agent execution ended without reaching a final answer. " + f"Got {type(formatted_answer).__name__} instead of AgentFinish." + ) self._show_logs(formatted_answer) return formatted_answer @@ -324,9 +327,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): self.agent, AgentLogsStartedEvent( agent_role=self.agent.role, - task_description=( - getattr(self.task, "description") if self.task else "Not Found" - ), + task_description=(self.task.description if self.task else "Not Found"), verbose=self.agent.verbose or (hasattr(self, "crew") and getattr(self.crew, "verbose", False)), ), @@ -415,8 +416,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): """ prompt = prompt.replace("{input}", inputs["input"]) prompt = prompt.replace("{tool_names}", inputs["tool_names"]) - prompt = prompt.replace("{tools}", inputs["tools"]) - return prompt + return prompt.replace("{tools}", inputs["tools"]) def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish: """Process human feedback. diff --git a/src/crewai/agents/parser.py b/src/crewai/agents/parser.py index 03479e4c3..912983b11 100644 --- a/src/crewai/agents/parser.py +++ b/src/crewai/agents/parser.py @@ -7,12 +7,12 @@ AgentAction or AgentFinish objects. from dataclasses import dataclass -from json_repair import repair_json +from json_repair import repair_json # type: ignore[import-untyped] from crewai.agents.constants import ( + ACTION_INPUT_ONLY_REGEX, ACTION_INPUT_REGEX, ACTION_REGEX, - ACTION_INPUT_ONLY_REGEX, FINAL_ANSWER_ACTION, MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE, MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, @@ -43,7 +43,7 @@ class AgentFinish: text: str -class OutputParserException(Exception): +class OutputParserError(Exception): """Exception raised when output parsing fails. Attributes: @@ -51,7 +51,7 @@ class OutputParserException(Exception): """ def __init__(self, error: str) -> None: - """Initialize OutputParserException. + """Initialize OutputParserError. Args: error: The error message. @@ -87,7 +87,7 @@ def parse(text: str) -> AgentAction | AgentFinish: AgentAction or AgentFinish based on the content. Raises: - OutputParserException: If the text format is invalid. + OutputParserError: If the text format is invalid. """ thought = _extract_thought(text) includes_answer = FINAL_ANSWER_ACTION in text @@ -104,7 +104,7 @@ def parse(text: str) -> AgentAction | AgentFinish: final_answer = final_answer[:-3].rstrip() return AgentFinish(thought=thought, output=final_answer, text=text) - elif action_match: + if action_match: action = action_match.group(1) clean_action = _clean_action(action) @@ -118,19 +118,18 @@ def parse(text: str) -> AgentAction | AgentFinish: ) if not ACTION_REGEX.search(text): - raise OutputParserException( + raise OutputParserError( f"{MISSING_ACTION_AFTER_THOUGHT_ERROR_MESSAGE}\n{_I18N.slice('final_answer_format')}", ) - elif not ACTION_INPUT_ONLY_REGEX.search(text): - raise OutputParserException( + if not ACTION_INPUT_ONLY_REGEX.search(text): + raise OutputParserError( MISSING_ACTION_INPUT_AFTER_ACTION_ERROR_MESSAGE, ) - else: - err_format = _I18N.slice("format_without_tools") - error = f"{err_format}" - raise OutputParserException( - error, - ) + err_format = _I18N.slice("format_without_tools") + error = f"{err_format}" + raise OutputParserError( + error, + ) def _extract_thought(text: str) -> str: @@ -149,8 +148,7 @@ def _extract_thought(text: str) -> str: return "" thought = text[:thought_index].strip() # Remove any triple backticks from the thought string - thought = thought.replace("```", "").strip() - return thought + return thought.replace("```", "").strip() def _clean_action(text: str) -> str: diff --git a/src/crewai/agents/tools_handler.py b/src/crewai/agents/tools_handler.py index e3fd7d336..ac7e0799b 100644 --- a/src/crewai/agents/tools_handler.py +++ b/src/crewai/agents/tools_handler.py @@ -1,8 +1,10 @@ """Tools handler for managing tool execution and caching.""" +import json + +from crewai.agents.cache.cache_handler import CacheHandler from crewai.tools.cache_tools.cache_tools import CacheTools from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling -from crewai.agents.cache.cache_handler import CacheHandler class ToolsHandler: @@ -37,8 +39,16 @@ class ToolsHandler: """ self.last_used_tool = calling if self.cache and should_cache and calling.tool_name != CacheTools().name: + # Convert arguments to string for cache + input_str = "" + if calling.arguments: + if isinstance(calling.arguments, dict): + input_str = json.dumps(calling.arguments) + else: + input_str = str(calling.arguments) + self.cache.add( tool=calling.tool_name, - input=calling.arguments, + input=input_str, output=output, ) diff --git a/src/crewai/lite_agent.py b/src/crewai/lite_agent.py index d1c9e1dc1..b2ffc322f 100644 --- a/src/crewai/lite_agent.py +++ b/src/crewai/lite_agent.py @@ -1,35 +1,24 @@ import asyncio import inspect import uuid +from collections.abc import Callable from typing import ( Any, - Callable, - Dict, - List, - Optional, - Tuple, - Type, - Union, cast, get_args, get_origin, ) - -try: - from typing import Self -except ImportError: - from typing_extensions import Self - from pydantic import ( UUID4, BaseModel, Field, InstanceOf, PrivateAttr, - model_validator, field_validator, + model_validator, ) +from typing_extensions import Self from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess @@ -37,14 +26,20 @@ from crewai.agents.cache import CacheHandler from crewai.agents.parser import ( AgentAction, AgentFinish, - OutputParserException, + OutputParserError, ) +from crewai.events.event_bus import crewai_event_bus +from crewai.events.types.agent_events import ( + LiteAgentExecutionCompletedEvent, + LiteAgentExecutionErrorEvent, + LiteAgentExecutionStartedEvent, +) +from crewai.events.types.logging_events import AgentLogsExecutionEvent from crewai.flow.flow_trackable import FlowTrackable from crewai.llm import LLM, BaseLLM from crewai.tools.base_tool import BaseTool from crewai.tools.structured_tool import CrewStructuredTool from crewai.utilities import I18N -from crewai.utilities.guardrail import process_guardrail from crewai.utilities.agent_utils import ( enforce_rpm_limit, format_message_for_llm, @@ -62,14 +57,7 @@ from crewai.utilities.agent_utils import ( render_text_description_and_args, ) from crewai.utilities.converter import generate_model_description -from crewai.events.types.logging_events import AgentLogsExecutionEvent -from crewai.events.types.agent_events import ( - LiteAgentExecutionCompletedEvent, - LiteAgentExecutionErrorEvent, - LiteAgentExecutionStartedEvent, -) -from crewai.events.event_bus import crewai_event_bus - +from crewai.utilities.guardrail import process_guardrail from crewai.utilities.llm_utils import create_llm from crewai.utilities.printer import Printer from crewai.utilities.token_counter_callback import TokenCalcHandler @@ -82,15 +70,15 @@ class LiteAgentOutput(BaseModel): model_config = {"arbitrary_types_allowed": True} raw: str = Field(description="Raw output of the agent", default="") - pydantic: Optional[BaseModel] = Field( + pydantic: BaseModel | None = Field( description="Pydantic output of the agent", default=None ) agent_role: str = Field(description="Role of the agent that produced this output") - usage_metrics: Optional[Dict[str, Any]] = Field( + usage_metrics: dict[str, Any] | None = Field( description="Token usage metrics for this execution", default=None ) - def to_dict(self) -> Dict[str, Any]: + def to_dict(self) -> dict[str, Any]: """Convert pydantic_output to a dictionary.""" if self.pydantic: return self.pydantic.model_dump() @@ -130,10 +118,10 @@ class LiteAgent(FlowTrackable, BaseModel): role: str = Field(description="Role of the agent") goal: str = Field(description="Goal of the agent") backstory: str = Field(description="Backstory of the agent") - llm: Optional[Union[str, InstanceOf[BaseLLM], Any]] = Field( + llm: str | InstanceOf[BaseLLM] | Any | None = Field( default=None, description="Language model that will run the agent" ) - tools: List[BaseTool] = Field( + tools: list[BaseTool] = Field( default_factory=list, description="Tools at agent's disposal" ) @@ -141,7 +129,7 @@ class LiteAgent(FlowTrackable, BaseModel): max_iterations: int = Field( default=15, description="Maximum number of iterations for tool usage" ) - max_execution_time: Optional[int] = Field( + max_execution_time: int | None = Field( default=None, description=". Maximum execution time in seconds" ) respect_context_window: bool = Field( @@ -152,52 +140,50 @@ class LiteAgent(FlowTrackable, BaseModel): default=True, description="Whether to use stop words to prevent the LLM from using tools", ) - request_within_rpm_limit: Optional[Callable[[], bool]] = Field( + request_within_rpm_limit: Callable[[], bool] | None = Field( default=None, description="Callback to check if the request is within the RPM limit", ) i18n: I18N = Field(default=I18N(), description="Internationalization settings.") # Output and Formatting Properties - response_format: Optional[Type[BaseModel]] = Field( + response_format: type[BaseModel] | None = Field( default=None, description="Pydantic model for structured output" ) verbose: bool = Field( default=False, description="Whether to print execution details" ) - callbacks: List[Callable] = Field( + callbacks: list[Callable] = Field( default=[], description="Callbacks to be used for the agent" ) # Guardrail Properties - guardrail: Optional[Union[Callable[[LiteAgentOutput], Tuple[bool, Any]], str]] = ( - Field( - default=None, - description="Function or string description of a guardrail to validate agent output", - ) + guardrail: Callable[[LiteAgentOutput], tuple[bool, Any]] | str | None = Field( + default=None, + description="Function or string description of a guardrail to validate agent output", ) guardrail_max_retries: int = Field( default=3, description="Maximum number of retries when guardrail fails" ) # State and Results - tools_results: List[Dict[str, Any]] = Field( + tools_results: list[dict[str, Any]] = Field( default=[], description="Results of the tools used by the agent." ) # Reference of Agent - original_agent: Optional[BaseAgent] = Field( + original_agent: BaseAgent | None = Field( default=None, description="Reference to the agent that created this LiteAgent" ) # Private Attributes - _parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list) + _parsed_tools: list[CrewStructuredTool] = PrivateAttr(default_factory=list) _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess) _cache_handler: CacheHandler = PrivateAttr(default_factory=CacheHandler) _key: str = PrivateAttr(default_factory=lambda: str(uuid.uuid4())) - _messages: List[Dict[str, str]] = PrivateAttr(default_factory=list) + _messages: list[dict[str, str]] = PrivateAttr(default_factory=list) _iterations: int = PrivateAttr(default=0) _printer: Printer = PrivateAttr(default_factory=Printer) - _guardrail: Optional[Callable] = PrivateAttr(default=None) + _guardrail: Callable | None = PrivateAttr(default=None) _guardrail_retry_count: int = PrivateAttr(default=0) @model_validator(mode="after") @@ -241,8 +227,8 @@ class LiteAgent(FlowTrackable, BaseModel): @field_validator("guardrail", mode="before") @classmethod def validate_guardrail_function( - cls, v: Optional[Union[Callable, str]] - ) -> Optional[Union[Callable, str]]: + cls, v: Callable | str | None + ) -> Callable | str | None: """Validate that the guardrail function has the correct signature. If v is a callable, validate that it has the correct signature. @@ -267,7 +253,7 @@ class LiteAgent(FlowTrackable, BaseModel): # Check return annotation if present if sig.return_annotation is not sig.empty: - if sig.return_annotation == Tuple[bool, Any]: + if sig.return_annotation == tuple[bool, Any]: return v origin = get_origin(sig.return_annotation) @@ -290,7 +276,7 @@ class LiteAgent(FlowTrackable, BaseModel): """Return the original role for compatibility with tool interfaces.""" return self.role - def kickoff(self, messages: Union[str, List[Dict[str, str]]]) -> LiteAgentOutput: + def kickoff(self, messages: str | list[dict[str, str]]) -> LiteAgentOutput: """ Execute the agent with the given messages. @@ -338,7 +324,7 @@ class LiteAgent(FlowTrackable, BaseModel): ) raise e - def _execute_core(self, agent_info: Dict[str, Any]) -> LiteAgentOutput: + def _execute_core(self, agent_info: dict[str, Any]) -> LiteAgentOutput: # Emit event for agent execution start crewai_event_bus.emit( self, @@ -351,7 +337,7 @@ class LiteAgent(FlowTrackable, BaseModel): # Execute the agent using invoke loop agent_finish = self._invoke_loop() - formatted_result: Optional[BaseModel] = None + formatted_result: BaseModel | None = None if self.response_format: try: # Cast to BaseModel to ensure type safety @@ -360,7 +346,7 @@ class LiteAgent(FlowTrackable, BaseModel): formatted_result = result except Exception as e: self._printer.print( - content=f"Failed to parse output into response format: {str(e)}", + content=f"Failed to parse output into response format: {e!s}", color="yellow", ) @@ -428,7 +414,7 @@ class LiteAgent(FlowTrackable, BaseModel): return output async def kickoff_async( - self, messages: Union[str, List[Dict[str, str]]] + self, messages: str | list[dict[str, str]] ) -> LiteAgentOutput: """ Execute the agent asynchronously with the given messages. @@ -475,8 +461,8 @@ class LiteAgent(FlowTrackable, BaseModel): return base_prompt def _format_messages( - self, messages: Union[str, List[Dict[str, str]]] - ) -> List[Dict[str, str]]: + self, messages: str | list[dict[str, str]] + ) -> list[dict[str, str]]: """Format messages for the LLM.""" if isinstance(messages, str): messages = [{"role": "user", "content": messages}] @@ -548,7 +534,7 @@ class LiteAgent(FlowTrackable, BaseModel): ) self._append_message(formatted_answer.text, role="assistant") - except OutputParserException as e: + except OutputParserError as e: # noqa: PERF203 formatted_answer = handle_output_parser_exception( e=e, messages=self._messages, @@ -571,18 +557,21 @@ class LiteAgent(FlowTrackable, BaseModel): i18n=self.i18n, ) continue - else: - handle_unknown_error(self._printer, e) - raise e + handle_unknown_error(self._printer, e) + raise e finally: self._iterations += 1 - assert isinstance(formatted_answer, AgentFinish) + if not isinstance(formatted_answer, AgentFinish): + raise RuntimeError( + "Agent execution ended without reaching a final answer. " + f"Got {type(formatted_answer).__name__} instead of AgentFinish." + ) self._show_logs(formatted_answer) return formatted_answer - def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]): + def _show_logs(self, formatted_answer: AgentAction | AgentFinish): """Show logs for the agent's execution.""" crewai_event_bus.emit( self, diff --git a/src/crewai/tools/tool_usage.py b/src/crewai/tools/tool_usage.py index 5b64ae76a..7ef05f347 100644 --- a/src/crewai/tools/tool_usage.py +++ b/src/crewai/tools/tool_usage.py @@ -5,12 +5,20 @@ import time from difflib import SequenceMatcher from json import JSONDecodeError from textwrap import dedent -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Union import json5 -from json_repair import repair_json +from json_repair import repair_json # type: ignore[import-untyped,import-error] from crewai.agents.tools_handler import ToolsHandler +from crewai.events.event_bus import crewai_event_bus +from crewai.events.types.tool_usage_events import ( + ToolSelectionErrorEvent, + ToolUsageErrorEvent, + ToolUsageFinishedEvent, + ToolUsageStartedEvent, + ToolValidateInputErrorEvent, +) from crewai.task import Task from crewai.telemetry import Telemetry from crewai.tools.structured_tool import CrewStructuredTool @@ -20,14 +28,6 @@ from crewai.utilities.agent_utils import ( get_tool_names, render_text_description_and_args, ) -from crewai.events.event_bus import crewai_event_bus -from crewai.events.types.tool_usage_events import ( - ToolSelectionErrorEvent, - ToolUsageErrorEvent, - ToolUsageFinishedEvent, - ToolUsageStartedEvent, - ToolValidateInputErrorEvent, -) if TYPE_CHECKING: from crewai.agents.agent_builder.base_agent import BaseAgent @@ -44,7 +44,7 @@ OPENAI_BIGGER_MODELS = [ ] -class ToolUsageErrorException(Exception): +class ToolUsageError(Exception): """Exception raised for errors in the tool usage.""" def __init__(self, message: str) -> None: @@ -60,7 +60,6 @@ class ToolUsage: task: Task being executed. tools_handler: Tools handler that will manage the tool usage. tools: List of tools available for the agent. - original_tools: Original tools available for the agent before being converted to BaseTool. tools_description: Description of the tools available for the agent. tools_names: Names of the tools available for the agent. function_calling_llm: Language model to be used for the tool usage. @@ -68,13 +67,13 @@ class ToolUsage: def __init__( self, - tools_handler: Optional[ToolsHandler], - tools: List[CrewStructuredTool], - task: Optional[Task], + tools_handler: ToolsHandler | None, + tools: list[CrewStructuredTool], + task: Task | None, function_calling_llm: Any, - agent: Optional[Union["BaseAgent", "LiteAgent"]] = None, + agent: Union["BaseAgent", "LiteAgent"] | None = None, action: Any = None, - fingerprint_context: Optional[Dict[str, str]] = None, + fingerprint_context: dict[str, str] | None = None, ) -> None: self._i18n: I18N = agent.i18n if agent else I18N() self._printer: Printer = Printer() @@ -105,9 +104,9 @@ class ToolUsage: return self._tool_calling(tool_string) def use( - self, calling: Union[ToolCalling, InstructorToolCalling], tool_string: str + self, calling: ToolCalling | InstructorToolCalling, tool_string: str ) -> str: - if isinstance(calling, ToolUsageErrorException): + if isinstance(calling, ToolUsageError): error = calling.message if self.agent and self.agent.verbose: self._printer.print(content=f"\n\n{error}\n", color="red") @@ -130,8 +129,7 @@ class ToolUsage: and tool.name == self._i18n.tools("add_image")["name"] # type: ignore ): try: - result = self._use(tool_string=tool_string, tool=tool, calling=calling) - return result + return self._use(tool_string=tool_string, tool=tool, calling=calling) except Exception as e: error = getattr(e, "message", str(e)) @@ -147,7 +145,7 @@ class ToolUsage: self, tool_string: str, tool: CrewStructuredTool, - calling: Union[ToolCalling, InstructorToolCalling], + calling: ToolCalling | InstructorToolCalling, ) -> str: if self._check_tool_repeated_usage(calling=calling): # type: ignore # _check_tool_repeated_usage of "ToolUsage" does not return a value (it only ever returns None) try: @@ -159,8 +157,7 @@ class ToolUsage: tool_name=tool.name, attempts=self._run_attempts, ) - result = self._format_result(result=result) # type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None) - return result # type: ignore # Fix the return type of this function + return self._format_result(result=result) # type: ignore # "_format_result" of "ToolUsage" does not return a value (it only ever returns None) except Exception: if self.task: @@ -176,8 +173,9 @@ class ToolUsage: "agent": self.agent, } - if self.agent.fingerprint: - event_data.update(self.agent.fingerprint) + # TODO: Investigate fingerprint attribute availability on BaseAgent/LiteAgent + if self.agent.fingerprint: # type: ignore + event_data.update(self.agent.fingerprint) # type: ignore if self.task: event_data["task_name"] = self.task.name or self.task.description event_data["task_id"] = str(self.task.id) @@ -188,8 +186,17 @@ class ToolUsage: result = None # type: ignore if self.tools_handler and self.tools_handler.cache: + input_str = "" + if calling.arguments: + if isinstance(calling.arguments, dict): + import json + + input_str = json.dumps(calling.arguments) + else: + input_str = str(calling.arguments) + result = self.tools_handler.cache.read( - tool=calling.tool_name, input=calling.arguments + tool=calling.tool_name, input=input_str ) # type: ignore from_cache = result is not None @@ -207,8 +214,7 @@ class ToolUsage: try: result = usage_limit_error self._telemetry.tool_usage_error(llm=self.function_calling_llm) - result = self._format_result(result=result) - return result + return self._format_result(result=result) except Exception: if self.task: self.task.increment_tools_errors() @@ -255,7 +261,7 @@ class ToolUsage: error_message = self._i18n.errors("tool_usage_exception").format( error=e, tool=tool.name, tool_inputs=tool.description ) - error = ToolUsageErrorException( + error = ToolUsageError( f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}" ).message if self.task: @@ -346,7 +352,7 @@ class ToolUsage: return result def _check_tool_repeated_usage( - self, calling: Union[ToolCalling, InstructorToolCalling] + self, calling: ToolCalling | InstructorToolCalling ) -> bool: if not self.tools_handler: return False @@ -356,7 +362,8 @@ class ToolUsage: ) return False - def _check_usage_limit(self, tool: Any, tool_name: str) -> str | None: + @staticmethod + def _check_usage_limit(tool: Any, tool_name: str) -> str | None: """Check if tool has reached its usage limit. Args: @@ -393,7 +400,7 @@ class ToolUsage: return tool if self.task: self.task.increment_tools_errors() - tool_selection_data: Dict[str, Any] = { + tool_selection_data: dict[str, Any] = { "agent_key": getattr(self.agent, "key", None) if self.agent else None, "agent_role": getattr(self.agent, "role", None) if self.agent else None, "tool_name": tool_name, @@ -410,27 +417,24 @@ class ToolUsage: ), ) raise Exception(error) - else: - error = f"I forgot the Action name, these are the only available Actions: {self.tools_description}" - crewai_event_bus.emit( - self, - ToolSelectionErrorEvent( - **tool_selection_data, - error=error, - ), - ) - raise Exception(error) + error = f"I forgot the Action name, these are the only available Actions: {self.tools_description}" + crewai_event_bus.emit( + self, + ToolSelectionErrorEvent( + **tool_selection_data, + error=error, + ), + ) + raise Exception(error) def _render(self) -> str: """Render the tool name and description in plain text.""" - descriptions = [] - for tool in self.tools: - descriptions.append(tool.description) + descriptions = [tool.description for tool in self.tools] return "\n--\n".join(descriptions) def _function_calling( self, tool_string: str - ) -> Union[ToolCalling, InstructorToolCalling]: + ) -> ToolCalling | InstructorToolCalling: model = ( InstructorToolCalling if self.function_calling_llm.supports_function_calling() @@ -453,13 +457,13 @@ class ToolUsage: ) tool_object = converter.to_pydantic() if not isinstance(tool_object, (ToolCalling, InstructorToolCalling)): - raise ToolUsageErrorException("Failed to parse tool calling") + raise ToolUsageError("Failed to parse tool calling") return tool_object def _original_tool_calling( self, tool_string: str, raise_error: bool = False - ) -> Union[ToolCalling, InstructorToolCalling, ToolUsageErrorException]: + ) -> ToolCalling | InstructorToolCalling | ToolUsageError: tool_name = self.action.tool tool = self._select_tool(tool_name) try: @@ -468,18 +472,12 @@ class ToolUsage: except Exception: if raise_error: raise - else: - return ToolUsageErrorException( - f"{self._i18n.errors('tool_arguments_error')}" - ) + return ToolUsageError(f"{self._i18n.errors('tool_arguments_error')}") if not isinstance(arguments, dict): if raise_error: raise - else: - return ToolUsageErrorException( - f"{self._i18n.errors('tool_arguments_error')}" - ) + return ToolUsageError(f"{self._i18n.errors('tool_arguments_error')}") return ToolCalling( tool_name=tool.name, @@ -488,15 +486,14 @@ class ToolUsage: def _tool_calling( self, tool_string: str - ) -> Union[ToolCalling, InstructorToolCalling, ToolUsageErrorException]: + ) -> ToolCalling | InstructorToolCalling | ToolUsageError: try: try: return self._original_tool_calling(tool_string, raise_error=True) except Exception: if self.function_calling_llm: return self._function_calling(tool_string) - else: - return self._original_tool_calling(tool_string) + return self._original_tool_calling(tool_string) except Exception as e: self._run_attempts += 1 if self._run_attempts > self._max_parsing_attempts: @@ -505,12 +502,12 @@ class ToolUsage: self.task.increment_tools_errors() if self.agent and self.agent.verbose: self._printer.print(content=f"\n\n{e}\n", color="red") - return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling") + return ToolUsageError( # type: ignore # Incompatible return value type (got "ToolUsageError", expected "ToolCalling | InstructorToolCalling") f"{self._i18n.errors('tool_usage_error').format(error=e)}\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}" ) return self._tool_calling(tool_string) - def _validate_tool_input(self, tool_input: Optional[str]) -> Dict[str, Any]: + def _validate_tool_input(self, tool_input: str | None) -> dict[str, Any]: if tool_input is None: return {} @@ -534,7 +531,7 @@ class ToolUsage: return arguments except (ValueError, SyntaxError): repaired_input = repair_json(tool_input) - pass # Continue to the next parsing attempt + # Continue to the next parsing attempt # Attempt 3: Parse as JSON5 try: @@ -586,7 +583,7 @@ class ToolUsage: def on_tool_error( self, tool: Any, - tool_calling: Union[ToolCalling, InstructorToolCalling], + tool_calling: ToolCalling | InstructorToolCalling, e: Exception, ) -> None: event_data = self._prepare_event_data(tool, tool_calling) @@ -595,7 +592,7 @@ class ToolUsage: def on_tool_use_finished( self, tool: Any, - tool_calling: Union[ToolCalling, InstructorToolCalling], + tool_calling: ToolCalling | InstructorToolCalling, from_cache: bool, started_at: float, result: Any, @@ -616,7 +613,7 @@ class ToolUsage: crewai_event_bus.emit(self, ToolUsageFinishedEvent(**event_data)) def _prepare_event_data( - self, tool: Any, tool_calling: Union[ToolCalling, InstructorToolCalling] + self, tool: Any, tool_calling: ToolCalling | InstructorToolCalling ) -> dict: event_data = { "run_attempts": self._run_attempts, diff --git a/src/crewai/utilities/agent_utils.py b/src/crewai/utilities/agent_utils.py index d5be00e8b..9b2d1df15 100644 --- a/src/crewai/utilities/agent_utils.py +++ b/src/crewai/utilities/agent_utils.py @@ -1,14 +1,18 @@ import json import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Union +from collections.abc import Callable, Sequence +from typing import Any + +from rich.console import Console from crewai.agents.constants import FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE from crewai.agents.parser import ( AgentAction, AgentFinish, - OutputParserException, + OutputParserError, parse, ) +from crewai.cli.config import Settings from crewai.llm import LLM from crewai.llms.base_llm import BaseLLM from crewai.tools import BaseTool as CrewAITool @@ -20,13 +24,11 @@ from crewai.utilities.errors import AgentRepositoryError from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededException, ) -from rich.console import Console -from crewai.cli.config import Settings console = Console() -def parse_tools(tools: List[BaseTool]) -> List[CrewStructuredTool]: +def parse_tools(tools: list[BaseTool]) -> list[CrewStructuredTool]: """Parse tools to be used for the task.""" tools_list = [] @@ -39,13 +41,13 @@ def parse_tools(tools: List[BaseTool]) -> List[CrewStructuredTool]: return tools_list -def get_tool_names(tools: Sequence[Union[CrewStructuredTool, BaseTool]]) -> str: +def get_tool_names(tools: Sequence[CrewStructuredTool | BaseTool]) -> str: """Get the names of the tools.""" return ", ".join([t.name for t in tools]) def render_text_description_and_args( - tools: Sequence[Union[CrewStructuredTool, BaseTool]], + tools: Sequence[CrewStructuredTool | BaseTool], ) -> str: """Render the tool name, description, and args in plain text. @@ -53,10 +55,7 @@ def render_text_description_and_args( calculator: This tool is used for math, \ args: {"expression": {"type": "string"}} """ - tool_strings = [] - for tool in tools: - tool_strings.append(tool.description) - + tool_strings = [tool.description for tool in tools] return "\n".join(tool_strings) @@ -66,13 +65,13 @@ def has_reached_max_iterations(iterations: int, max_iterations: int) -> bool: def handle_max_iterations_exceeded( - formatted_answer: Union[AgentAction, AgentFinish, None], + formatted_answer: AgentAction | AgentFinish | None, printer: Printer, i18n: I18N, - messages: List[Dict[str, str]], - llm: Union[LLM, BaseLLM], - callbacks: List[Any], -) -> Union[AgentAction, AgentFinish]: + messages: list[dict[str, str]], + llm: LLM | BaseLLM, + callbacks: list[Any], +) -> AgentAction | AgentFinish: """ Handles the case when the maximum number of iterations is exceeded. Performs one more LLM call to get the final answer. @@ -90,7 +89,7 @@ def handle_max_iterations_exceeded( if formatted_answer and hasattr(formatted_answer, "text"): assistant_message = ( - formatted_answer.text + f'\n{i18n.errors("force_final_answer")}' + formatted_answer.text + f"\n{i18n.errors('force_final_answer')}" ) else: assistant_message = i18n.errors("force_final_answer") @@ -110,17 +109,16 @@ def handle_max_iterations_exceeded( ) raise ValueError("Invalid response from LLM call - None or empty.") - formatted_answer = format_answer(answer) # Return the formatted answer, regardless of its type - return formatted_answer + return format_answer(answer) -def format_message_for_llm(prompt: str, role: str = "user") -> Dict[str, str]: +def format_message_for_llm(prompt: str, role: str = "user") -> dict[str, str]: prompt = prompt.rstrip() return {"role": role, "content": prompt} -def format_answer(answer: str) -> Union[AgentAction, AgentFinish]: +def format_answer(answer: str) -> AgentAction | AgentFinish: """Format a response from the LLM into an AgentAction or AgentFinish.""" try: return parse(answer) @@ -134,7 +132,7 @@ def format_answer(answer: str) -> Union[AgentAction, AgentFinish]: def enforce_rpm_limit( - request_within_rpm_limit: Optional[Callable[[], bool]] = None, + request_within_rpm_limit: Callable[[], bool] | None = None, ) -> None: """Enforce the requests per minute (RPM) limit if applicable.""" if request_within_rpm_limit: @@ -142,12 +140,12 @@ def enforce_rpm_limit( def get_llm_response( - llm: Union[LLM, BaseLLM], - messages: List[Dict[str, str]], - callbacks: List[Any], + llm: LLM | BaseLLM, + messages: list[dict[str, str]], + callbacks: list[Any], printer: Printer, - from_task: Optional[Any] = None, - from_agent: Optional[Any] = None, + from_task: Any | None = None, + from_agent: Any | None = None, ) -> str: """Call the LLM and return the response, handling any invalid responses.""" try: @@ -171,13 +169,13 @@ def get_llm_response( def process_llm_response( answer: str, use_stop_words: bool -) -> Union[AgentAction, AgentFinish]: +) -> AgentAction | AgentFinish: """Process the LLM response and format it into an AgentAction or AgentFinish.""" if not use_stop_words: try: # Preliminary parsing to check for errors. format_answer(answer) - except OutputParserException as e: + except OutputParserError as e: if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error: answer = answer.split("Observation:")[0].strip() @@ -187,10 +185,10 @@ def process_llm_response( def handle_agent_action_core( formatted_answer: AgentAction, tool_result: ToolResult, - messages: Optional[List[Dict[str, str]]] = None, - step_callback: Optional[Callable] = None, - show_logs: Optional[Callable] = None, -) -> Union[AgentAction, AgentFinish]: + messages: list[dict[str, str]] | None = None, + step_callback: Callable | None = None, + show_logs: Callable | None = None, +) -> AgentAction | AgentFinish: """Core logic for handling agent actions and tool results. Args: @@ -245,16 +243,16 @@ def handle_unknown_error(printer: Any, exception: Exception) -> None: def handle_output_parser_exception( - e: OutputParserException, - messages: List[Dict[str, str]], + e: OutputParserError, + messages: list[dict[str, str]], iterations: int, log_error_after: int = 3, - printer: Optional[Any] = None, + printer: Any | None = None, ) -> AgentAction: - """Handle OutputParserException by updating messages and formatted_answer. + """Handle OutputParserError by updating messages and formatted_answer. Args: - e: The OutputParserException that occurred + e: The OutputParserError that occurred messages: List of messages to append to iterations: Current iteration count log_error_after: Number of iterations after which to log errors @@ -298,9 +296,9 @@ def is_context_length_exceeded(exception: Exception) -> bool: def handle_context_length( respect_context_window: bool, printer: Any, - messages: List[Dict[str, str]], + messages: list[dict[str, str]], llm: Any, - callbacks: List[Any], + callbacks: list[Any], i18n: Any, ) -> None: """Handle context length exceeded by either summarizing or raising an error. @@ -330,9 +328,9 @@ def handle_context_length( def summarize_messages( - messages: List[Dict[str, str]], + messages: list[dict[str, str]], llm: Any, - callbacks: List[Any], + callbacks: list[Any], i18n: Any, ) -> None: """Summarize messages to fit within context window. @@ -344,12 +342,12 @@ def summarize_messages( i18n: I18N instance for messages """ messages_string = " ".join([message["content"] for message in messages]) - messages_groups = [] - cut_size = llm.get_context_window_size() - for i in range(0, len(messages_string), cut_size): - messages_groups.append({"content": messages_string[i : i + cut_size]}) + messages_groups = [ + {"content": messages_string[i : i + cut_size]} + for i in range(0, len(messages_string), cut_size) + ] summarized_contents = [] @@ -385,8 +383,8 @@ def summarize_messages( def show_agent_logs( printer: Printer, agent_role: str, - formatted_answer: Optional[Union[AgentAction, AgentFinish]] = None, - task_description: Optional[str] = None, + formatted_answer: AgentAction | AgentFinish | None = None, + task_description: str | None = None, verbose: bool = False, ) -> None: """Show agent logs for both start and execution states. @@ -458,8 +456,8 @@ def _print_current_organization(): ) -def load_agent_from_repository(from_repository: str) -> Dict[str, Any]: - attributes: Dict[str, Any] = {} +def load_agent_from_repository(from_repository: str) -> dict[str, Any]: + attributes: dict[str, Any] = {} if from_repository: import importlib @@ -497,7 +495,7 @@ def load_agent_from_repository(from_repository: str) -> Dict[str, Any]: else: attributes[key].append(tool_value) - except Exception as e: + except Exception as e: # noqa: PERF203 raise AgentRepositoryError( f"Tool {tool['name']} could not be loaded: {e}" ) from e diff --git a/src/crewai/utilities/crew_pydantic_output_parser.py b/src/crewai/utilities/crew_pydantic_output_parser.py index d0dbfae06..c40bf679b 100644 --- a/src/crewai/utilities/crew_pydantic_output_parser.py +++ b/src/crewai/utilities/crew_pydantic_output_parser.py @@ -1,17 +1,18 @@ import json -from typing import Any, Type +from typing import Any import regex from pydantic import BaseModel, ValidationError -from crewai.agents.parser import OutputParserException +from crewai.agents.parser import OutputParserError """Parser for converting text outputs into Pydantic models.""" + class CrewPydanticOutputParser: """Parses text outputs into specified Pydantic models.""" - pydantic_object: Type[BaseModel] + pydantic_object: type[BaseModel] def parse_result(self, result: str) -> Any: result = self._transform_in_valid_json(result) @@ -27,7 +28,7 @@ class CrewPydanticOutputParser: except ValidationError as e: name = self.pydantic_object.__name__ msg = f"Failed to parse {name} from completion {json_object}. Got: {e}" - raise OutputParserException(error=msg) + raise OutputParserError(error=msg) from e def _transform_in_valid_json(self, text) -> str: text = text.replace("```", "").replace("json", "") @@ -41,7 +42,7 @@ class CrewPydanticOutputParser: # Return the first successfully parsed JSON object json_obj = json.dumps(json_obj) return str(json_obj) - except json.JSONDecodeError: + except json.JSONDecodeError: # noqa: PERF203 # If parsing fails, skip to the next match continue return text diff --git a/src/crewai/utilities/tool_utils.py b/src/crewai/utilities/tool_utils.py index eaf065477..c1c20bc66 100644 --- a/src/crewai/utilities/tool_utils.py +++ b/src/crewai/utilities/tool_utils.py @@ -1,24 +1,24 @@ -from typing import Any, Dict, List, Optional +from typing import Any from crewai.agents.parser import AgentAction from crewai.security import Fingerprint from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.tool_types import ToolResult -from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException +from crewai.tools.tool_usage import ToolUsage, ToolUsageError from crewai.utilities.i18n import I18N def execute_tool_and_check_finality( agent_action: AgentAction, - tools: List[CrewStructuredTool], + tools: list[CrewStructuredTool], i18n: I18N, - agent_key: Optional[str] = None, - agent_role: Optional[str] = None, - tools_handler: Optional[Any] = None, - task: Optional[Any] = None, - agent: Optional[Any] = None, - function_calling_llm: Optional[Any] = None, - fingerprint_context: Optional[Dict[str, str]] = None, + agent_key: str | None = None, + agent_role: str | None = None, + tools_handler: Any | None = None, + task: Any | None = None, + agent: Any | None = None, + function_calling_llm: Any | None = None, + fingerprint_context: dict[str, str] | None = None, ) -> ToolResult: """Execute a tool and check if the result should be treated as a final answer. @@ -50,7 +50,7 @@ def execute_tool_and_check_finality( fingerprint_obj = Fingerprint.from_dict(fingerprint_context) agent.set_fingerprint(fingerprint_obj) except Exception as e: - raise ValueError(f"Failed to set fingerprint: {e}") + raise ValueError(f"Failed to set fingerprint: {e}") from e # Create tool usage instance tool_usage = ToolUsage( @@ -65,7 +65,7 @@ def execute_tool_and_check_finality( # Parse tool calling tool_calling = tool_usage.parse_tool_calling(agent_action.text) - if isinstance(tool_calling, ToolUsageErrorException): + if isinstance(tool_calling, ToolUsageError): return ToolResult(tool_calling.message, False) # Check if tool name matches diff --git a/tests/agents/test_agent.py b/tests/agents/test_agent.py index 9e40ca015..e7d0526c8 100644 --- a/tests/agents/test_agent.py +++ b/tests/agents/test_agent.py @@ -258,8 +258,8 @@ def test_cache_hitting(): output = agent.execute_task(task1) output = agent.execute_task(task2) assert cache_handler._cache == { - "multiplier-{'first_number': 2, 'second_number': 6}": 12, - "multiplier-{'first_number': 3, 'second_number': 3}": 9, + 'multiplier-{"first_number": 2, "second_number": 6}': 12, + 'multiplier-{"first_number": 3, "second_number": 3}': 9, } task = Task( @@ -271,9 +271,9 @@ def test_cache_hitting(): assert output == "36" assert cache_handler._cache == { - "multiplier-{'first_number': 2, 'second_number': 6}": 12, - "multiplier-{'first_number': 3, 'second_number': 3}": 9, - "multiplier-{'first_number': 12, 'second_number': 3}": 36, + 'multiplier-{"first_number": 2, "second_number": 6}': 12, + 'multiplier-{"first_number": 3, "second_number": 3}': 9, + 'multiplier-{"first_number": 12, "second_number": 3}': 36, } received_events = [] @@ -293,7 +293,7 @@ def test_cache_hitting(): output = agent.execute_task(task) assert output == "0" read.assert_called_with( - tool="multiplier", input={"first_number": 2, "second_number": 6} + tool="multiplier", input='{"first_number": 2, "second_number": 6}' ) assert len(received_events) == 1 assert isinstance(received_events[0], ToolUsageFinishedEvent) @@ -334,8 +334,8 @@ def test_disabling_cache_for_agent(): output = agent.execute_task(task1) output = agent.execute_task(task2) assert cache_handler._cache != { - "multiplier-{'first_number': 2, 'second_number': 6}": 12, - "multiplier-{'first_number': 3, 'second_number': 3}": 9, + 'multiplier-{"first_number": 2, "second_number": 6}': 12, + 'multiplier-{"first_number": 3, "second_number": 3}': 9, } task = Task( @@ -347,9 +347,9 @@ def test_disabling_cache_for_agent(): assert output == "36" assert cache_handler._cache != { - "multiplier-{'first_number': 2, 'second_number': 6}": 12, - "multiplier-{'first_number': 3, 'second_number': 3}": 9, - "multiplier-{'first_number': 12, 'second_number': 3}": 36, + 'multiplier-{"first_number": 2, "second_number": 6}': 12, + 'multiplier-{"first_number": 3, "second_number": 3}': 9, + 'multiplier-{"first_number": 12, "second_number": 3}': 36, } with patch.object(CacheHandler, "read") as read: diff --git a/tests/agents/test_crew_agent_parser.py b/tests/agents/test_crew_agent_parser.py index 92563e8fd..72e44487c 100644 --- a/tests/agents/test_crew_agent_parser.py +++ b/tests/agents/test_crew_agent_parser.py @@ -1,11 +1,13 @@ import pytest -from crewai.agents.crew_agent_executor import ( +from crewai.agents import parser +from crewai.agents.parser import ( AgentAction, AgentFinish, - OutputParserException, ) -from crewai.agents import parser +from crewai.agents.parser import ( + OutputParserError as OutputParserException, +) def test_valid_action_parsing_special_characters(): @@ -348,9 +350,9 @@ def test_integration_valid_and_invalid(): for part in parts: try: result = parser.parse(part.strip()) - results.append(result) except OutputParserException as e: - results.append(e) + result = e + results.append(result) assert isinstance(results[0], AgentAction) assert isinstance(results[1], AgentFinish) diff --git a/tests/test_crew.py b/tests/test_crew.py index 3c8db8371..0a9b94695 100644 --- a/tests/test_crew.py +++ b/tests/test_crew.py @@ -918,7 +918,7 @@ def test_cache_hitting_between_agents(researcher, writer, ceo): # Check if both calls were made with the expected arguments expected_call = call( - tool="multiplier", input={"first_number": 2, "second_number": 6} + tool="multiplier", input='{"first_number": 2, "second_number": 6}' ) assert cache_calls[0] == expected_call, f"First call mismatch: {cache_calls[0]}" assert cache_calls[1] == expected_call, ( @@ -2229,7 +2229,7 @@ def test_tools_with_custom_caching(): # Verify that one of those calls was with the even number that should be cached add_to_cache.assert_any_call( tool="multiplcation_tool", - input={"first_number": 2, "second_number": 6}, + input='{"first_number": 2, "second_number": 6}', output=12, )