diff --git a/src/crewai/agent.py b/src/crewai/agent.py index a265f4d52..4ddd20e21 100644 --- a/src/crewai/agent.py +++ b/src/crewai/agent.py @@ -482,6 +482,7 @@ class Agent(BaseAgent): verbose=self.verbose, response_format=response_format, i18n=self.i18n, + original_agent=self, ) return lite_agent.kickoff(messages) diff --git a/src/crewai/lite_agent.py b/src/crewai/lite_agent.py index e63a2320d..d458e6de0 100644 --- a/src/crewai/lite_agent.py +++ b/src/crewai/lite_agent.py @@ -47,11 +47,6 @@ from crewai.utilities.events.llm_events import ( LLMCallStartedEvent, LLMCallType, ) -from crewai.utilities.events.tool_usage_events import ( - ToolUsageErrorEvent, - ToolUsageFinishedEvent, - ToolUsageStartedEvent, -) from crewai.utilities.llm_utils import create_llm from crewai.utilities.printer import Printer from crewai.utilities.token_counter_callback import TokenCalcHandler @@ -155,6 +150,10 @@ class LiteAgent(BaseModel): default=[], description="Results of the tools used by the agent." ) + # Reference of Agent + original_agent: Optional[BaseAgent] = Field( + default=None, description="Reference to the agent that created this LiteAgent" + ) # Private Attributes _parsed_tools: List[CrewStructuredTool] = PrivateAttr(default_factory=list) _token_process: TokenProcess = PrivateAttr(default_factory=TokenProcess) @@ -163,7 +162,7 @@ class LiteAgent(BaseModel): _messages: List[Dict[str, str]] = PrivateAttr(default_factory=list) _iterations: int = PrivateAttr(default=0) _printer: Printer = PrivateAttr(default_factory=Printer) - + @model_validator(mode="after") def setup_llm(self): """Set up the LLM and other components after initialization.""" @@ -412,18 +411,6 @@ class LiteAgent(BaseModel): formatted_answer = process_llm_response(answer, self.use_stop_words) if isinstance(formatted_answer, AgentAction): - # Emit tool usage started event - crewai_event_bus.emit( - self, - event=ToolUsageStartedEvent( - agent_key=self.key, - agent_role=self.role, - tool_name=formatted_answer.tool, - tool_args=formatted_answer.tool_input, - tool_class=formatted_answer.tool, - ), - ) - try: tool_result = execute_tool_and_check_finality( agent_action=formatted_answer, @@ -431,34 +418,9 @@ class LiteAgent(BaseModel): i18n=self.i18n, agent_key=self.key, agent_role=self.role, - ) - # Emit tool usage finished event - crewai_event_bus.emit( - self, - event=ToolUsageFinishedEvent( - agent_key=self.key, - agent_role=self.role, - tool_name=formatted_answer.tool, - tool_args=formatted_answer.tool_input, - tool_class=formatted_answer.tool, - started_at=datetime.now(), - finished_at=datetime.now(), - output=tool_result.result, - ), + agent=self.original_agent, ) except Exception as e: - # Emit tool usage error event - crewai_event_bus.emit( - self, - event=ToolUsageErrorEvent( - agent_key=self.key, - agent_role=self.role, - tool_name=formatted_answer.tool, - tool_args=formatted_answer.tool_input, - tool_class=formatted_answer.tool, - error=str(e), - ), - ) raise e formatted_answer = handle_agent_action_core( diff --git a/src/crewai/llm.py b/src/crewai/llm.py index 741544662..25b798a6d 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -707,15 +707,6 @@ class LLM(BaseLLM): function_name, lambda: None ) # Ensure fn is always a callable logging.error(f"Error executing function '{function_name}': {e}") - crewai_event_bus.emit( - self, - event=ToolExecutionErrorEvent( - tool_name=function_name, - tool_args=function_args, - tool_class=fn, - error=str(e), - ), - ) crewai_event_bus.emit( self, event=LLMCallFailedEvent(error=f"Tool execution error: {str(e)}"), diff --git a/src/crewai/tools/tool_usage.py b/src/crewai/tools/tool_usage.py index 8c6862e0d..dc5f8f29a 100644 --- a/src/crewai/tools/tool_usage.py +++ b/src/crewai/tools/tool_usage.py @@ -2,7 +2,6 @@ import ast import datetime import json import time -from dataclasses import dataclass from difflib import SequenceMatcher from json import JSONDecodeError from textwrap import dedent @@ -26,6 +25,7 @@ from crewai.utilities.events.tool_usage_events import ( ToolSelectionErrorEvent, ToolUsageErrorEvent, ToolUsageFinishedEvent, + ToolUsageStartedEvent, ToolValidateInputErrorEvent, ) @@ -166,6 +166,21 @@ class ToolUsage: if self.task: self.task.increment_tools_errors() + if self.agent: + event_data = { + "agent_key": self.agent.key, + "agent_role": self.agent.role, + "tool_name": self.action.tool, + "tool_args": self.action.tool_input, + "tool_class": self.action.tool, + "agent": self.agent, + } + + if self.agent.fingerprint: + event_data.update(self.agent.fingerprint) + + crewai_event_bus.emit(self,ToolUsageStartedEvent(**event_data)) + started_at = time.time() from_cache = False result = None # type: ignore diff --git a/src/crewai/utilities/agent_utils.py b/src/crewai/utilities/agent_utils.py index e9389eb0e..8af665140 100644 --- a/src/crewai/utilities/agent_utils.py +++ b/src/crewai/utilities/agent_utils.py @@ -16,7 +16,6 @@ from crewai.tools.base_tool import BaseTool from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.tool_types import ToolResult from crewai.utilities import I18N, Printer -from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent from crewai.utilities.exceptions.context_window_exceeding_exception import ( LLMContextLengthExceededException, ) diff --git a/src/crewai/utilities/tool_utils.py b/src/crewai/utilities/tool_utils.py index 2b26ca83b..eaf065477 100644 --- a/src/crewai/utilities/tool_utils.py +++ b/src/crewai/utilities/tool_utils.py @@ -5,11 +5,6 @@ from crewai.security import Fingerprint from crewai.tools.structured_tool import CrewStructuredTool from crewai.tools.tool_types import ToolResult from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException -from crewai.utilities.events import crewai_event_bus -from crewai.utilities.events.tool_usage_events import ( - ToolUsageErrorEvent, - ToolUsageStartedEvent, -) from crewai.utilities.i18n import I18N @@ -42,10 +37,8 @@ def execute_tool_and_check_finality( ToolResult containing the execution result and whether it should be treated as a final answer """ try: - # Create tool name to tool map tool_name_to_tool_map = {tool.name: tool for tool in tools} - # Emit tool usage event if agent info is available if agent_key and agent_role and agent: fingerprint_context = fingerprint_context or {} if agent: @@ -59,22 +52,6 @@ def execute_tool_and_check_finality( except Exception as e: raise ValueError(f"Failed to set fingerprint: {e}") - event_data = { - "agent_key": agent_key, - "agent_role": agent_role, - "tool_name": agent_action.tool, - "tool_args": agent_action.tool_input, - "tool_class": agent_action.tool, - "agent": agent, - } - event_data.update(fingerprint_context) - crewai_event_bus.emit( - agent, - event=ToolUsageStartedEvent( - **event_data, - ), - ) - # Create tool usage instance tool_usage = ToolUsage( tools_handler=tools_handler, @@ -110,17 +87,4 @@ def execute_tool_and_check_finality( return ToolResult(tool_result, False) except Exception as e: - # Emit error event if agent info is available - if agent_key and agent_role and agent: - crewai_event_bus.emit( - agent, - event=ToolUsageErrorEvent( - agent_key=agent_key, - agent_role=agent_role, - tool_name=agent_action.tool, - tool_args=agent_action.tool_input, - tool_class=agent_action.tool, - error=str(e), - ), - ) raise e diff --git a/tests/cassettes/test_tool_execution_error_event.yaml b/tests/cassettes/test_tool_execution_error_event.yaml deleted file mode 100644 index 61583726a..000000000 --- a/tests/cassettes/test_tool_execution_error_event.yaml +++ /dev/null @@ -1,112 +0,0 @@ -interactions: -- request: - body: '{"messages": [{"role": "user", "content": "Use the failing tool"}], "model": - "gpt-4o-mini", "stop": [], "tools": [{"type": "function", "function": {"name": - "failing_tool", "description": "This tool always fails.", "parameters": {"type": - "object", "properties": {"param": {"type": "string", "description": "A test - parameter"}}, "required": ["param"]}}}]}' - headers: - accept: - - application/json - accept-encoding: - - gzip, deflate - connection: - - keep-alive - content-length: - - '353' - content-type: - - application/json - host: - - api.openai.com - user-agent: - - OpenAI/Python 1.61.0 - x-stainless-arch: - - arm64 - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - MacOS - x-stainless-package-version: - - 1.61.0 - x-stainless-raw-response: - - 'true' - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.12.8 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - content: "{\n \"id\": \"chatcmpl-B2P4zoJZuES7Aom8ugEq1modz5Vsl\",\n \"object\": - \"chat.completion\",\n \"created\": 1739912761,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n - \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": - \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n - \ \"id\": \"call_F6fJxISpMKUBIGV6dd2vjRNG\",\n \"type\": - \"function\",\n \"function\": {\n \"name\": \"failing_tool\",\n - \ \"arguments\": \"{\\\"param\\\":\\\"test\\\"}\"\n }\n - \ }\n ],\n \"refusal\": null\n },\n \"logprobs\": - null,\n \"finish_reason\": \"tool_calls\"\n }\n ],\n \"usage\": {\n - \ \"prompt_tokens\": 51,\n \"completion_tokens\": 15,\n \"total_tokens\": - 66,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": - 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": - 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": - 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": - \"fp_00428b782a\"\n}\n" - headers: - CF-RAY: - - 9140fa827f38eb1e-SJC - Connection: - - keep-alive - Content-Encoding: - - gzip - Content-Type: - - application/json - Date: - - Tue, 18 Feb 2025 21:06:02 GMT - Server: - - cloudflare - Set-Cookie: - - __cf_bm=xbuu3IQpCMh.43ZrqL1TRMECOc6QldgHV0hzOX1GrWI-1739912762-1.0.1.1-t7iyq5xMioPrwfeaHLvPT9rwRPp7Q9A9uIm69icH9dPxRD4xMA3cWqb1aXj1_e2IyAEQQWFe1UWjlmJ22aHh3Q; - path=/; expires=Tue, 18-Feb-25 21:36:02 GMT; domain=.api.openai.com; HttpOnly; - Secure; SameSite=None - - _cfuvid=x9l.Rhja8_wXDN.j8qcEU1PvvEqAwZp4Fd3s_aj4qwM-1739912762161-0.0.1.1-604800000; - path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - nosniff - access-control-expose-headers: - - X-Request-ID - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - openai-organization: - - crewai-iuxna1 - openai-processing-ms: - - '861' - openai-version: - - '2020-10-01' - strict-transport-security: - - max-age=31536000; includeSubDomains; preload - x-ratelimit-limit-requests: - - '30000' - x-ratelimit-limit-tokens: - - '150000000' - x-ratelimit-remaining-requests: - - '29999' - x-ratelimit-remaining-tokens: - - '149999978' - x-ratelimit-reset-requests: - - 2ms - x-ratelimit-reset-tokens: - - 0s - x-request-id: - - req_8666ec3aa6677cb346ba00993556051d - http_version: HTTP/1.1 - status_code: 200 -version: 1 diff --git a/tests/llm_test.py b/tests/llm_test.py index c674b623b..65cc75bab 100644 --- a/tests/llm_test.py +++ b/tests/llm_test.py @@ -395,51 +395,3 @@ def test_deepseek_r1_with_open_router(): result = llm.call("What is the capital of France?") assert isinstance(result, str) assert "Paris" in result - - -@pytest.mark.vcr(filter_headers=["authorization"]) -def test_tool_execution_error_event(): - llm = LLM(model="gpt-4o-mini") - - def failing_tool(param: str) -> str: - """This tool always fails.""" - raise Exception("Tool execution failed!") - - tool_schema = { - "type": "function", - "function": { - "name": "failing_tool", - "description": "This tool always fails.", - "parameters": { - "type": "object", - "properties": { - "param": {"type": "string", "description": "A test parameter"} - }, - "required": ["param"], - }, - }, - } - - received_events = [] - - @crewai_event_bus.on(ToolExecutionErrorEvent) - def event_handler(source, event): - received_events.append(event) - - available_functions = {"failing_tool": failing_tool} - - messages = [{"role": "user", "content": "Use the failing tool"}] - - llm.call( - messages, - tools=[tool_schema], - available_functions=available_functions, - ) - - assert len(received_events) == 1 - event = received_events[0] - assert isinstance(event, ToolExecutionErrorEvent) - assert event.tool_name == "failing_tool" - assert event.tool_args == {"param": "test"} - assert event.tool_class == failing_tool - assert "Tool execution failed!" in event.error