From 0c3ab25b53a604fd38b2a3f08601d9d6093ef324 Mon Sep 17 00:00:00 2001 From: lorenzejay Date: Tue, 27 Jan 2026 09:05:49 -0800 Subject: [PATCH] feat: implement before and after tool call hooks in CrewAgentExecutor and AgentExecutor - Added support for before and after tool call hooks in both CrewAgentExecutor and AgentExecutor classes. - Introduced ToolCallHookContext to manage context for hooks, allowing for enhanced control over tool execution. - Implemented logic to block tool execution based on before hooks and to modify results based on after hooks. - Added integration tests to validate the functionality of the new hooks, ensuring they work as expected in various scenarios. - Enhanced the overall flexibility and extensibility of tool interactions within the CrewAI framework. --- .../src/crewai/agents/crew_agent_executor.py | 64 +++- .../src/crewai/experimental/agent_executor.py | 61 ++- ...nt_native_tool_hooks_before_and_after.yaml | 224 +++++++++++ ...re_hook_blocks_tool_execution_in_crew.yaml | 351 ++++++++++++++++++ ...ew_native_tool_hooks_before_and_after.yaml | 230 ++++++++++++ ...gent_hooks_integration_with_real_tool.yaml | 278 ++++---------- lib/crewai/tests/hooks/test_tool_hooks.py | 160 ++++++++ 7 files changed, 1152 insertions(+), 216 deletions(-) create mode 100644 lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_agent_native_tool_hooks_before_and_after.yaml create mode 100644 lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_before_hook_blocks_tool_execution_in_crew.yaml create mode 100644 lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_crew_native_tool_hooks_before_and_after.yaml diff --git a/lib/crewai/src/crewai/agents/crew_agent_executor.py b/lib/crewai/src/crewai/agents/crew_agent_executor.py index a5fb12d58..150ae4126 100644 --- a/lib/crewai/src/crewai/agents/crew_agent_executor.py +++ b/lib/crewai/src/crewai/agents/crew_agent_executor.py @@ -28,6 +28,11 @@ from crewai.hooks.llm_hooks import ( get_after_llm_call_hooks, get_before_llm_call_hooks, ) +from crewai.hooks.tool_hooks import ( + ToolCallHookContext, + get_after_tool_call_hooks, + get_before_tool_call_hooks, +) from crewai.utilities.agent_utils import ( aget_llm_response, convert_tools_to_openai_schema, @@ -749,8 +754,41 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): track_delegation_if_needed(func_name, args_dict, self.task) - # Execute the tool (only if not cached and not at max usage) - if not from_cache and not max_usage_reached: + # Find the structured tool for hook context + structured_tool = None + for tool in self.tools or []: + if sanitize_tool_name(tool.name) == func_name: + structured_tool = tool + break + + # Execute before_tool_call hooks + hook_blocked = False + before_hook_context = ToolCallHookContext( + tool_name=func_name, + tool_input=args_dict, + tool=structured_tool, # type: ignore[arg-type] + agent=self.agent, + task=self.task, + crew=self.crew, + ) + before_hooks = get_before_tool_call_hooks() + try: + for hook in before_hooks: + hook_result = hook(before_hook_context) + if hook_result is False: + hook_blocked = True + break + except Exception as hook_error: + self._printer.print( + content=f"Error in before_tool_call hook: {hook_error}", + color="red", + ) + + # If hook blocked execution, set result and skip tool execution + if hook_blocked: + result = f"Tool execution blocked by hook. Tool: {func_name}" + # Execute the tool (only if not cached, not at max usage, and not blocked by hook) + elif not from_cache and not max_usage_reached: result = "Tool not found" if func_name in available_functions: try: @@ -798,6 +836,28 @@ class CrewAgentExecutor(CrewAgentExecutorMixin): # Return error message when max usage limit is reached result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore." + after_hook_context = ToolCallHookContext( + tool_name=func_name, + tool_input=args_dict, + tool=structured_tool, # type: ignore[arg-type] + agent=self.agent, + task=self.task, + crew=self.crew, + tool_result=result, + ) + after_hooks = get_after_tool_call_hooks() + try: + for after_hook in after_hooks: + hook_result = after_hook(after_hook_context) + if hook_result is not None: + result = hook_result + after_hook_context.tool_result = result + except Exception as hook_error: + self._printer.print( + content=f"Error in after_tool_call hook: {hook_error}", + color="red", + ) + # Emit tool usage finished event crewai_event_bus.emit( self, diff --git a/lib/crewai/src/crewai/experimental/agent_executor.py b/lib/crewai/src/crewai/experimental/agent_executor.py index 7ad7ad025..2df7b96bf 100644 --- a/lib/crewai/src/crewai/experimental/agent_executor.py +++ b/lib/crewai/src/crewai/experimental/agent_executor.py @@ -36,6 +36,11 @@ from crewai.hooks.llm_hooks import ( get_after_llm_call_hooks, get_before_llm_call_hooks, ) +from crewai.hooks.tool_hooks import ( + ToolCallHookContext, + get_after_tool_call_hooks, + get_before_tool_call_hooks, +) from crewai.utilities.agent_utils import ( convert_tools_to_openai_schema, enforce_rpm_limit, @@ -650,8 +655,37 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): track_delegation_if_needed(func_name, args_dict, self.task) - # Execute the tool (only if not cached and not at max usage) - if not from_cache and not max_usage_reached: + structured_tool = None + for tool in self.tools or []: + if sanitize_tool_name(tool.name) == func_name: + structured_tool = tool + break + + hook_blocked = False + before_hook_context = ToolCallHookContext( + tool_name=func_name, + tool_input=args_dict, + tool=structured_tool, # type: ignore[arg-type] + agent=self.agent, + task=self.task, + crew=self.crew, + ) + before_hooks = get_before_tool_call_hooks() + try: + for hook in before_hooks: + hook_result = hook(before_hook_context) + if hook_result is False: + hook_blocked = True + break + except Exception as hook_error: + self._printer.print( + content=f"Error in before_tool_call hook: {hook_error}", + color="red", + ) + + if hook_blocked: + result = f"Tool execution blocked by hook. Tool: {func_name}" + elif not from_cache and not max_usage_reached: result = "Tool not found" if func_name in self._available_functions: try: @@ -700,6 +734,29 @@ class AgentExecutor(Flow[AgentReActState], CrewAgentExecutorMixin): # Return error message when max usage limit is reached result = f"Tool '{func_name}' has reached its usage limit of {original_tool.max_usage_count} times and cannot be used anymore." + # Execute after_tool_call hooks (even if blocked, to allow logging/monitoring) + after_hook_context = ToolCallHookContext( + tool_name=func_name, + tool_input=args_dict, + tool=structured_tool, # type: ignore[arg-type] + agent=self.agent, + task=self.task, + crew=self.crew, + tool_result=result, + ) + after_hooks = get_after_tool_call_hooks() + try: + for after_hook in after_hooks: + hook_result = after_hook(after_hook_context) + if hook_result is not None: + result = hook_result + after_hook_context.tool_result = result + except Exception as hook_error: + self._printer.print( + content=f"Error in after_tool_call hook: {hook_error}", + color="red", + ) + # Emit tool usage finished event crewai_event_bus.emit( self, diff --git a/lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_agent_native_tool_hooks_before_and_after.yaml b/lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_agent_native_tool_hooks_before_and_after.yaml new file mode 100644 index 000000000..23719e5c3 --- /dev/null +++ b/lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_agent_native_tool_hooks_before_and_after.yaml @@ -0,0 +1,224 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Calculator. You are a + calculator assistant\nYour personal goal is: Perform calculations"},{"role":"user","content":"\nCurrent + Task: What is 7 times 6? Use the multiply_numbers tool.\n\nThis is VERY important + to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"multiply_numbers","description":"Multiply + two numbers together.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '589' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D2gblVDQeSH6tTrJiUtxgjoVoPuAR\",\n \"object\": + \"chat.completion\",\n \"created\": 1769532813,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_gO6PtjoOIDVeDWs7Wf680BHh\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"multiply_numbers\",\n + \ \"arguments\": \"{\\\"a\\\":7,\\\"b\\\":6}\"\n }\n + \ }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 100,\n \"completion_tokens\": + 18,\n \"total_tokens\": 118,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 27 Jan 2026 16:53:34 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '593' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Calculator. You are a + calculator assistant\nYour personal goal is: Perform calculations"},{"role":"user","content":"\nCurrent + Task: What is 7 times 6? Use the multiply_numbers tool.\n\nThis is VERY important + to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_gO6PtjoOIDVeDWs7Wf680BHh","type":"function","function":{"name":"multiply_numbers","arguments":"{\"a\":7,\"b\":6}"}}]},{"role":"tool","tool_call_id":"call_gO6PtjoOIDVeDWs7Wf680BHh","name":"multiply_numbers","content":"42"},{"role":"user","content":"Analyze + the tool result. If requirements are met, provide the Final Answer. Otherwise, + call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"multiply_numbers","description":"Multiply + two numbers together.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1056' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D2gbm9NaGCXkI3QwW3eOTFSP4L4lh\",\n \"object\": + \"chat.completion\",\n \"created\": 1769532814,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"42\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 162,\n \"completion_tokens\": + 2,\n \"total_tokens\": 164,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 27 Jan 2026 16:53:34 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '259' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_before_hook_blocks_tool_execution_in_crew.yaml b/lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_before_hook_blocks_tool_execution_in_crew.yaml new file mode 100644 index 000000000..8f3722e9f --- /dev/null +++ b/lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_before_hook_blocks_tool_execution_in_crew.yaml @@ -0,0 +1,351 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Test Agent. You are a + test agent\nYour personal goal is: Try to use the dangerous operation tool"},{"role":"user","content":"\nCurrent + Task: Use the dangerous_operation tool with action ''delete_all''.\n\nThis is + the expected criteria for your final answer: The result of the operation\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nThis + is VERY important to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"dangerous_operation","description":"Perform + a dangerous operation that should be blocked.","parameters":{"properties":{"action":{"title":"Action","type":"string"}},"required":["action"],"type":"object"}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '773' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D2giKEOxBDVqJVqVECwcFjbzdQKSA\",\n \"object\": + \"chat.completion\",\n \"created\": 1769533220,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_3OM1qS0QaWqhiJaHyJbNz1ME\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"dangerous_operation\",\n + \ \"arguments\": \"{\\\"action\\\":\\\"delete_all\\\"}\"\n }\n + \ }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 133,\n \"completion_tokens\": + 17,\n \"total_tokens\": 150,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 27 Jan 2026 17:00:20 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '484' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Test Agent. You are a + test agent\nYour personal goal is: Try to use the dangerous operation tool"},{"role":"user","content":"\nCurrent + Task: Use the dangerous_operation tool with action ''delete_all''.\n\nThis is + the expected criteria for your final answer: The result of the operation\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nThis + is VERY important to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","type":"function","function":{"name":"dangerous_operation","arguments":"{\"action\":\"delete_all\"}"}}]},{"role":"tool","tool_call_id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","name":"dangerous_operation","content":"Tool + execution blocked by hook. Tool: dangerous_operation"},{"role":"user","content":"Analyze + the tool result. If requirements are met, provide the Final Answer. Otherwise, + call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"dangerous_operation","description":"Perform + a dangerous operation that should be blocked.","parameters":{"properties":{"action":{"title":"Action","type":"string"}},"required":["action"],"type":"object"}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1311' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D2giLnD91JxhK0yXninQ7oHYttNDY\",\n \"object\": + \"chat.completion\",\n \"created\": 1769533221,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_qF1c2e31GgjoSNJx0HBxI3zX\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"dangerous_operation\",\n + \ \"arguments\": \"{\\\"action\\\":\\\"delete_all\\\"}\"\n }\n + \ }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 204,\n \"completion_tokens\": + 17,\n \"total_tokens\": 221,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 27 Jan 2026 17:00:21 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '447' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Test Agent. You are a + test agent\nYour personal goal is: Try to use the dangerous operation tool"},{"role":"user","content":"\nCurrent + Task: Use the dangerous_operation tool with action ''delete_all''.\n\nThis is + the expected criteria for your final answer: The result of the operation\nyou + MUST return the actual complete content as the final answer, not a summary.\n\nThis + is VERY important to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","type":"function","function":{"name":"dangerous_operation","arguments":"{\"action\":\"delete_all\"}"}}]},{"role":"tool","tool_call_id":"call_3OM1qS0QaWqhiJaHyJbNz1ME","name":"dangerous_operation","content":"Tool + execution blocked by hook. Tool: dangerous_operation"},{"role":"user","content":"Analyze + the tool result. If requirements are met, provide the Final Answer. Otherwise, + call the next tool. Deliver only the answer without meta-commentary."},{"role":"assistant","content":null,"tool_calls":[{"id":"call_qF1c2e31GgjoSNJx0HBxI3zX","type":"function","function":{"name":"dangerous_operation","arguments":"{\"action\":\"delete_all\"}"}}]},{"role":"tool","tool_call_id":"call_qF1c2e31GgjoSNJx0HBxI3zX","name":"dangerous_operation","content":"Tool + execution blocked by hook. Tool: dangerous_operation"},{"role":"user","content":"Analyze + the tool result. If requirements are met, provide the Final Answer. Otherwise, + call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"dangerous_operation","description":"Perform + a dangerous operation that should be blocked.","parameters":{"properties":{"action":{"title":"Action","type":"string"}},"required":["action"],"type":"object"}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1849' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D2giM1tAvEOCNwDw1qNmNUN5PIg2Y\",\n \"object\": + \"chat.completion\",\n \"created\": 1769533222,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"The dangerous_operation tool with action + 'delete_all' was blocked and did not execute. There is no result from the + operation to provide.\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 275,\n \"completion_tokens\": + 28,\n \"total_tokens\": 303,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 27 Jan 2026 17:00:22 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '636' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_crew_native_tool_hooks_before_and_after.yaml b/lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_crew_native_tool_hooks_before_and_after.yaml new file mode 100644 index 000000000..a60224a60 --- /dev/null +++ b/lib/crewai/tests/cassettes/hooks/TestNativeToolCallingHooksIntegration.test_crew_native_tool_hooks_before_and_after.yaml @@ -0,0 +1,230 @@ +interactions: +- request: + body: '{"messages":[{"role":"system","content":"You are Math Assistant. You are + a math assistant that helps with division\nYour personal goal is: Perform division + calculations accurately"},{"role":"user","content":"\nCurrent Task: Calculate + 100 divided by 4 using the divide_numbers tool.\n\nThis is the expected criteria + for your final answer: The result of the division\nyou MUST return the actual + complete content as the final answer, not a summary.\n\nThis is VERY important + to you, your job depends on it!"}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"divide_numbers","description":"Divide + first number by second number.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '809' + content-type: + - application/json + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D2gbkWUn8InDLeD1Cf8w0LxiUQOIS\",\n \"object\": + \"chat.completion\",\n \"created\": 1769532812,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": null,\n \"tool_calls\": [\n {\n + \ \"id\": \"call_gwIV3i71RNqfpr7KguEciCuV\",\n \"type\": + \"function\",\n \"function\": {\n \"name\": \"divide_numbers\",\n + \ \"arguments\": \"{\\\"a\\\":100,\\\"b\\\":4}\"\n }\n + \ }\n ],\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"tool_calls\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 140,\n \"completion_tokens\": + 18,\n \"total_tokens\": 158,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 27 Jan 2026 16:53:32 GMT + Server: + - cloudflare + Set-Cookie: + - SET-COOKIE-XXX + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '435' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +- request: + body: '{"messages":[{"role":"system","content":"You are Math Assistant. You are + a math assistant that helps with division\nYour personal goal is: Perform division + calculations accurately"},{"role":"user","content":"\nCurrent Task: Calculate + 100 divided by 4 using the divide_numbers tool.\n\nThis is the expected criteria + for your final answer: The result of the division\nyou MUST return the actual + complete content as the final answer, not a summary.\n\nThis is VERY important + to you, your job depends on it!"},{"role":"assistant","content":null,"tool_calls":[{"id":"call_gwIV3i71RNqfpr7KguEciCuV","type":"function","function":{"name":"divide_numbers","arguments":"{\"a\":100,\"b\":4}"}}]},{"role":"tool","tool_call_id":"call_gwIV3i71RNqfpr7KguEciCuV","name":"divide_numbers","content":"25.0"},{"role":"user","content":"Analyze + the tool result. If requirements are met, provide the Final Answer. Otherwise, + call the next tool. Deliver only the answer without meta-commentary."}],"model":"gpt-4.1-mini","tool_choice":"auto","tools":[{"type":"function","function":{"name":"divide_numbers","description":"Divide + first number by second number.","parameters":{"properties":{"a":{"title":"A","type":"integer"},"b":{"title":"B","type":"integer"}},"required":["a","b"],"type":"object"}}}]}' + headers: + User-Agent: + - X-USER-AGENT-XXX + accept: + - application/json + accept-encoding: + - ACCEPT-ENCODING-XXX + authorization: + - AUTHORIZATION-XXX + connection: + - keep-alive + content-length: + - '1276' + content-type: + - application/json + cookie: + - COOKIE-XXX + host: + - api.openai.com + x-stainless-arch: + - X-STAINLESS-ARCH-XXX + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - X-STAINLESS-OS-XXX + x-stainless-package-version: + - 1.83.0 + x-stainless-read-timeout: + - X-STAINLESS-READ-TIMEOUT-XXX + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.13.3 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: "{\n \"id\": \"chatcmpl-D2gbkHw19D5oEBOhpZP5FR5MvRFgb\",\n \"object\": + \"chat.completion\",\n \"created\": 1769532812,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"25.0\",\n \"refusal\": null,\n + \ \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": + \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 204,\n \"completion_tokens\": + 4,\n \"total_tokens\": 208,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" + headers: + CF-RAY: + - CF-RAY-XXX + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Tue, 27 Jan 2026 16:53:33 GMT + Server: + - cloudflare + Strict-Transport-Security: + - STS-XXX + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - X-CONTENT-TYPE-XXX + access-control-expose-headers: + - ACCESS-CONTROL-XXX + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - OPENAI-ORG-XXX + openai-processing-ms: + - '523' + openai-project: + - OPENAI-PROJECT-XXX + openai-version: + - '2020-10-01' + x-openai-proxy-wasm: + - v0.1 + x-ratelimit-limit-requests: + - X-RATELIMIT-LIMIT-REQUESTS-XXX + x-ratelimit-limit-tokens: + - X-RATELIMIT-LIMIT-TOKENS-XXX + x-ratelimit-remaining-requests: + - X-RATELIMIT-REMAINING-REQUESTS-XXX + x-ratelimit-remaining-tokens: + - X-RATELIMIT-REMAINING-TOKENS-XXX + x-ratelimit-reset-requests: + - X-RATELIMIT-RESET-REQUESTS-XXX + x-ratelimit-reset-tokens: + - X-RATELIMIT-RESET-TOKENS-XXX + x-request-id: + - X-REQUEST-ID-XXX + status: + code: 200 + message: OK +version: 1 diff --git a/lib/crewai/tests/cassettes/hooks/TestToolHooksIntegration.test_lite_agent_hooks_integration_with_real_tool.yaml b/lib/crewai/tests/cassettes/hooks/TestToolHooksIntegration.test_lite_agent_hooks_integration_with_real_tool.yaml index a379c8397..f69c7df52 100644 --- a/lib/crewai/tests/cassettes/hooks/TestToolHooksIntegration.test_lite_agent_hooks_integration_with_real_tool.yaml +++ b/lib/crewai/tests/cassettes/hooks/TestToolHooksIntegration.test_lite_agent_hooks_integration_with_real_tool.yaml @@ -1,7 +1,22 @@ interactions: - request: - body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final - answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. + You are a helpful calculator assistant\nYour personal goal is: Help with math + calculations\n\nYou ONLY have access to the following tools, and should NEVER + make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: + {\n \"properties\": {\n \"a\": {\n \"title\": \"A\",\n \"type\": + \"integer\"\n },\n \"b\": {\n \"title\": \"B\",\n \"type\": + \"integer\"\n }\n },\n \"required\": [\n \"a\",\n \"b\"\n ],\n \"title\": + \"Calculate_Sum\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\nTool + Description: Add two numbers together.\n\nIMPORTANT: Use the following format + in your response:\n\n```\nThought: you should always think about what to do\nAction: + the action to take, only one name of [calculate_sum], just the name, exactly + as it''s written.\nAction Input: the input to the action, just a simple JSON + object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: + the result of the action\n```\n\nOnce all necessary information is gathered, + return the following format:\n\n```\nThought: I now know the final answer\nFinal + Answer: the final answer to the original input question\n```"},{"role":"user","content":"What + is 5 + 3? Use the calculate_sum tool."}],"model":"gpt-4.1-mini"}' headers: User-Agent: - X-USER-AGENT-XXX @@ -14,7 +29,7 @@ interactions: connection: - keep-alive content-length: - - '1119' + - '1356' content-type: - application/json host: @@ -41,8 +56,18 @@ interactions: uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CiksV15hVLWURKZH4BxQEGjiCFWpz\",\n \"object\": \"chat.completion\",\n \"created\": 1764782667,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I should use the calculate_sum tool to add 5 and 3.\\nAction: calculate_sum\\nAction Input: {\\\"a\\\": 5, \\\"b\\\": 3}\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 234,\n \"completion_tokens\": 40,\n \"total_tokens\": 274,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\ - : \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n" + string: "{\n \"id\": \"chatcmpl-D2gSz7JfTi4NQ2QRTANg8Z2afJI8b\",\n \"object\": + \"chat.completion\",\n \"created\": 1769532269,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"```\\nThought: I need to use the calculate_sum + tool to find the sum of 5 and 3\\nAction: calculate_sum\\nAction Input: {\\\"a\\\":5,\\\"b\\\":3}\\n```\",\n + \ \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": + null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": + 295,\n \"completion_tokens\": 41,\n \"total_tokens\": 336,\n \"prompt_tokens_details\": + {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" headers: CF-RAY: - CF-RAY-XXX @@ -51,7 +76,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 03 Dec 2025 17:24:28 GMT + - Tue, 27 Jan 2026 16:44:30 GMT Server: - cloudflare Set-Cookie: @@ -71,13 +96,11 @@ interactions: openai-organization: - OPENAI-ORG-XXX openai-processing-ms: - - '681' + - '827' openai-project: - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '871' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: @@ -98,8 +121,25 @@ interactions: code: 200 message: OK - request: - body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final - answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."},{"role":"assistant","content":"```\nThought: I should use the calculate_sum tool to add 5 and 3.\nAction: calculate_sum\nAction Input: {\"a\": 5, \"b\": 3}\n```\nObservation: 8"}],"model":"gpt-4.1-mini"}' + body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. + You are a helpful calculator assistant\nYour personal goal is: Help with math + calculations\n\nYou ONLY have access to the following tools, and should NEVER + make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: + {\n \"properties\": {\n \"a\": {\n \"title\": \"A\",\n \"type\": + \"integer\"\n },\n \"b\": {\n \"title\": \"B\",\n \"type\": + \"integer\"\n }\n },\n \"required\": [\n \"a\",\n \"b\"\n ],\n \"title\": + \"Calculate_Sum\",\n \"type\": \"object\",\n \"additionalProperties\": false\n}\nTool + Description: Add two numbers together.\n\nIMPORTANT: Use the following format + in your response:\n\n```\nThought: you should always think about what to do\nAction: + the action to take, only one name of [calculate_sum], just the name, exactly + as it''s written.\nAction Input: the input to the action, just a simple JSON + object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: + the result of the action\n```\n\nOnce all necessary information is gathered, + return the following format:\n\n```\nThought: I now know the final answer\nFinal + Answer: the final answer to the original input question\n```"},{"role":"user","content":"What + is 5 + 3? Use the calculate_sum tool."},{"role":"assistant","content":"```\nThought: + I need to use the calculate_sum tool to find the sum of 5 and 3\nAction: calculate_sum\nAction + Input: {\"a\":5,\"b\":3}\n```\nObservation: 8"}],"model":"gpt-4.1-mini"}' headers: User-Agent: - X-USER-AGENT-XXX @@ -112,7 +152,7 @@ interactions: connection: - keep-alive content-length: - - '1298' + - '1544' content-type: - application/json cookie: @@ -141,7 +181,18 @@ interactions: uri: https://api.openai.com/v1/chat/completions response: body: - string: "{\n \"id\": \"chatcmpl-CiksWrVbyJFurKCm7XPRU1b1pT7qF\",\n \"object\": \"chat.completion\",\n \"created\": 1764782668,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal Answer: 8\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 283,\n \"completion_tokens\": 18,\n \"total_tokens\": 301,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n" + string: "{\n \"id\": \"chatcmpl-D2gT0RU66XqjAUOXnGmokD1Q8Fman\",\n \"object\": + \"chat.completion\",\n \"created\": 1769532270,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n + \ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": + \"assistant\",\n \"content\": \"```\\nThought: I now know the final + answer\\nFinal Answer: 8\\n```\",\n \"refusal\": null,\n \"annotations\": + []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n + \ }\n ],\n \"usage\": {\n \"prompt_tokens\": 345,\n \"completion_tokens\": + 18,\n \"total_tokens\": 363,\n \"prompt_tokens_details\": {\n \"cached_tokens\": + 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": + {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": + 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": + \"default\",\n \"system_fingerprint\": \"fp_376a7ccef1\"\n}\n" headers: CF-RAY: - CF-RAY-XXX @@ -150,7 +201,7 @@ interactions: Content-Type: - application/json Date: - - Wed, 03 Dec 2025 17:24:29 GMT + - Tue, 27 Jan 2026 16:44:31 GMT Server: - cloudflare Strict-Transport-Security: @@ -168,208 +219,11 @@ interactions: openai-organization: - OPENAI-ORG-XXX openai-processing-ms: - - '427' + - '606' openai-project: - OPENAI-PROJECT-XXX openai-version: - '2020-10-01' - x-envoy-upstream-service-time: - - '442' - x-openai-proxy-wasm: - - v0.1 - x-ratelimit-limit-requests: - - X-RATELIMIT-LIMIT-REQUESTS-XXX - x-ratelimit-limit-tokens: - - X-RATELIMIT-LIMIT-TOKENS-XXX - x-ratelimit-remaining-requests: - - X-RATELIMIT-REMAINING-REQUESTS-XXX - x-ratelimit-remaining-tokens: - - X-RATELIMIT-REMAINING-TOKENS-XXX - x-ratelimit-reset-requests: - - X-RATELIMIT-RESET-REQUESTS-XXX - x-ratelimit-reset-tokens: - - X-RATELIMIT-RESET-TOKENS-XXX - x-request-id: - - X-REQUEST-ID-XXX - status: - code: 200 - message: OK -- request: - body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final - answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."}],"model":"gpt-4.1-mini"}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - application/json - accept-encoding: - - ACCEPT-ENCODING-XXX - authorization: - - AUTHORIZATION-XXX - connection: - - keep-alive - content-length: - - '1119' - content-type: - - application/json - host: - - api.openai.com - x-stainless-arch: - - X-STAINLESS-ARCH-XXX - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - X-STAINLESS-OS-XXX - x-stainless-package-version: - - 1.83.0 - x-stainless-read-timeout: - - X-STAINLESS-READ-TIMEOUT-XXX - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.13.3 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-CimX8hwYiUUZijApUDk1yBMzTpBj9\",\n \"object\": \"chat.completion\",\n \"created\": 1764789030,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I need to add 5 and 3 using the calculate_sum tool.\\nAction: calculate_sum\\nAction Input: {\\\"a\\\":5,\\\"b\\\":3}\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 234,\n \"completion_tokens\": 37,\n \"total_tokens\": 271,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\"\ - : \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n" - headers: - CF-RAY: - - CF-RAY-XXX - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Wed, 03 Dec 2025 19:10:33 GMT - Server: - - cloudflare - Set-Cookie: - - SET-COOKIE-XXX - Strict-Transport-Security: - - STS-XXX - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - access-control-expose-headers: - - ACCESS-CONTROL-XXX - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - openai-organization: - - OPENAI-ORG-XXX - openai-processing-ms: - - '2329' - openai-project: - - OPENAI-PROJECT-XXX - openai-version: - - '2020-10-01' - x-envoy-upstream-service-time: - - '2349' - x-openai-proxy-wasm: - - v0.1 - x-ratelimit-limit-requests: - - X-RATELIMIT-LIMIT-REQUESTS-XXX - x-ratelimit-limit-tokens: - - X-RATELIMIT-LIMIT-TOKENS-XXX - x-ratelimit-remaining-requests: - - X-RATELIMIT-REMAINING-REQUESTS-XXX - x-ratelimit-remaining-tokens: - - X-RATELIMIT-REMAINING-TOKENS-XXX - x-ratelimit-reset-requests: - - X-RATELIMIT-RESET-REQUESTS-XXX - x-ratelimit-reset-tokens: - - X-RATELIMIT-RESET-TOKENS-XXX - x-request-id: - - X-REQUEST-ID-XXX - status: - code: 200 - message: OK -- request: - body: '{"messages":[{"role":"system","content":"You are Calculator Assistant. You are a helpful calculator assistant\nYour personal goal is: Help with math calculations\n\nYou ONLY have access to the following tools, and should NEVER make up tools that are not listed here:\n\nTool Name: calculate_sum\nTool Arguments: {''a'': {''description'': None, ''type'': ''int''}, ''b'': {''description'': None, ''type'': ''int''}}\nTool Description: Add two numbers together.\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought: you should always think about what to do\nAction: the action to take, only one name of [calculate_sum], just the name, exactly as it''s written.\nAction Input: the input to the action, just a simple JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce all necessary information is gathered, return the following format:\n\n```\nThought: I now know the final answer\nFinal Answer: the final - answer to the original input question\n```"},{"role":"user","content":"What is 5 + 3? Use the calculate_sum tool."},{"role":"assistant","content":"```\nThought: I need to add 5 and 3 using the calculate_sum tool.\nAction: calculate_sum\nAction Input: {\"a\":5,\"b\":3}\n```\nObservation: 8"}],"model":"gpt-4.1-mini"}' - headers: - User-Agent: - - X-USER-AGENT-XXX - accept: - - application/json - accept-encoding: - - ACCEPT-ENCODING-XXX - authorization: - - AUTHORIZATION-XXX - connection: - - keep-alive - content-length: - - '1295' - content-type: - - application/json - cookie: - - COOKIE-XXX - host: - - api.openai.com - x-stainless-arch: - - X-STAINLESS-ARCH-XXX - x-stainless-async: - - 'false' - x-stainless-lang: - - python - x-stainless-os: - - X-STAINLESS-OS-XXX - x-stainless-package-version: - - 1.83.0 - x-stainless-read-timeout: - - X-STAINLESS-READ-TIMEOUT-XXX - x-stainless-retry-count: - - '0' - x-stainless-runtime: - - CPython - x-stainless-runtime-version: - - 3.13.3 - method: POST - uri: https://api.openai.com/v1/chat/completions - response: - body: - string: "{\n \"id\": \"chatcmpl-CimXBrY5sdbr2pJnqGlazPTra4dor\",\n \"object\": \"chat.completion\",\n \"created\": 1764789033,\n \"model\": \"gpt-4.1-mini-2025-04-14\",\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": \"```\\nThought: I now know the final answer\\nFinal Answer: 8\\n```\",\n \"refusal\": null,\n \"annotations\": []\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\": 280,\n \"completion_tokens\": 18,\n \"total_tokens\": 298,\n \"prompt_tokens_details\": {\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\": 0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\": \"default\",\n \"system_fingerprint\": \"fp_9766e549b2\"\n}\n" - headers: - CF-RAY: - - CF-RAY-XXX - Connection: - - keep-alive - Content-Type: - - application/json - Date: - - Wed, 03 Dec 2025 19:10:35 GMT - Server: - - cloudflare - Strict-Transport-Security: - - STS-XXX - Transfer-Encoding: - - chunked - X-Content-Type-Options: - - X-CONTENT-TYPE-XXX - access-control-expose-headers: - - ACCESS-CONTROL-XXX - alt-svc: - - h3=":443"; ma=86400 - cf-cache-status: - - DYNAMIC - openai-organization: - - OPENAI-ORG-XXX - openai-processing-ms: - - '1647' - openai-project: - - OPENAI-PROJECT-XXX - openai-version: - - '2020-10-01' - x-envoy-upstream-service-time: - - '1694' x-openai-proxy-wasm: - v0.1 x-ratelimit-limit-requests: diff --git a/lib/crewai/tests/hooks/test_tool_hooks.py b/lib/crewai/tests/hooks/test_tool_hooks.py index e8c6bd7d0..f918427c7 100644 --- a/lib/crewai/tests/hooks/test_tool_hooks.py +++ b/lib/crewai/tests/hooks/test_tool_hooks.py @@ -590,3 +590,163 @@ class TestToolHooksIntegration: # Clean up hooks unregister_before_tool_call_hook(before_tool_call_hook) unregister_after_tool_call_hook(after_tool_call_hook) + + +class TestNativeToolCallingHooksIntegration: + """Integration tests for hooks with native function calling (Agent and Crew).""" + + @pytest.mark.vcr() + def test_agent_native_tool_hooks_before_and_after(self): + """Test that Agent with native tool calling executes before/after hooks.""" + import os + from crewai import Agent + from crewai.tools import tool + + if not os.environ.get("OPENAI_API_KEY"): + pytest.skip("OPENAI_API_KEY not set") + + hook_calls = {"before": [], "after": []} + + @tool("multiply_numbers") + def multiply_numbers(a: int, b: int) -> int: + """Multiply two numbers together.""" + return a * b + + def before_hook(context: ToolCallHookContext) -> bool | None: + hook_calls["before"].append({ + "tool_name": context.tool_name, + "tool_input": dict(context.tool_input), + "has_agent": context.agent is not None, + }) + return None + + def after_hook(context: ToolCallHookContext) -> str | None: + hook_calls["after"].append({ + "tool_name": context.tool_name, + "tool_result": context.tool_result, + "has_agent": context.agent is not None, + }) + return None + + register_before_tool_call_hook(before_hook) + register_after_tool_call_hook(after_hook) + + try: + agent = Agent( + role="Calculator", + goal="Perform calculations", + backstory="You are a calculator assistant", + tools=[multiply_numbers], + verbose=True, + ) + + result = agent.kickoff( + messages="What is 7 times 6? Use the multiply_numbers tool." + ) + + # Verify before hook was called + assert len(hook_calls["before"]) > 0, "Before hook was never called" + before_call = hook_calls["before"][0] + assert before_call["tool_name"] == "multiply_numbers" + assert "a" in before_call["tool_input"] + assert "b" in before_call["tool_input"] + assert before_call["has_agent"] is True + + # Verify after hook was called + assert len(hook_calls["after"]) > 0, "After hook was never called" + after_call = hook_calls["after"][0] + assert after_call["tool_name"] == "multiply_numbers" + assert "42" in str(after_call["tool_result"]) + assert after_call["has_agent"] is True + + finally: + unregister_before_tool_call_hook(before_hook) + unregister_after_tool_call_hook(after_hook) + + @pytest.mark.vcr() + def test_crew_native_tool_hooks_before_and_after(self): + """Test that Crew with Agent executes before/after hooks with full context.""" + import os + from crewai import Agent, Crew, Task + from crewai.tools import tool + + if not os.environ.get("OPENAI_API_KEY"): + pytest.skip("OPENAI_API_KEY not set") + + hook_calls = {"before": [], "after": []} + + @tool("divide_numbers") + def divide_numbers(a: int, b: int) -> float: + """Divide first number by second number.""" + return a / b + + def before_hook(context: ToolCallHookContext) -> bool | None: + hook_calls["before"].append({ + "tool_name": context.tool_name, + "tool_input": dict(context.tool_input), + "has_agent": context.agent is not None, + "has_task": context.task is not None, + "has_crew": context.crew is not None, + "agent_role": context.agent.role if context.agent else None, + }) + return None + + def after_hook(context: ToolCallHookContext) -> str | None: + hook_calls["after"].append({ + "tool_name": context.tool_name, + "tool_result": context.tool_result, + "has_agent": context.agent is not None, + "has_task": context.task is not None, + "has_crew": context.crew is not None, + }) + return None + + register_before_tool_call_hook(before_hook) + register_after_tool_call_hook(after_hook) + + try: + agent = Agent( + role="Math Assistant", + goal="Perform division calculations accurately", + backstory="You are a math assistant that helps with division", + tools=[divide_numbers], + verbose=True, + ) + + task = Task( + description="Calculate 100 divided by 4 using the divide_numbers tool.", + expected_output="The result of the division", + agent=agent, + ) + + crew = Crew( + agents=[agent], + tasks=[task], + verbose=True, + ) + + result = crew.kickoff() + + # Verify before hook was called with full context + assert len(hook_calls["before"]) > 0, "Before hook was never called" + before_call = hook_calls["before"][0] + assert before_call["tool_name"] == "divide_numbers" + assert "a" in before_call["tool_input"] + assert "b" in before_call["tool_input"] + assert before_call["has_agent"] is True + assert before_call["has_task"] is True + assert before_call["has_crew"] is True + assert before_call["agent_role"] == "Math Assistant" + + # Verify after hook was called with full context + assert len(hook_calls["after"]) > 0, "After hook was never called" + after_call = hook_calls["after"][0] + assert after_call["tool_name"] == "divide_numbers" + assert "25" in str(after_call["tool_result"]) + assert after_call["has_agent"] is True + assert after_call["has_task"] is True + assert after_call["has_crew"] is True + + finally: + unregister_before_tool_call_hook(before_hook) + unregister_after_tool_call_hook(after_hook)