mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-05-02 15:52:34 +00:00
Merge branch 'main' into gl/chore/use-base-model-for-llms
This commit is contained in:
147
lib/crewai/tests/agents/test_a2a_trust_completion_status.py
Normal file
147
lib/crewai/tests/agents/test_a2a_trust_completion_status.py
Normal file
@@ -0,0 +1,147 @@
|
||||
"""Test trust_remote_completion_status flag in A2A wrapper."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.a2a.config import A2AConfig
|
||||
|
||||
try:
|
||||
from a2a.types import Message, Role
|
||||
|
||||
A2A_SDK_INSTALLED = True
|
||||
except ImportError:
|
||||
A2A_SDK_INSTALLED = False
|
||||
|
||||
|
||||
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
|
||||
def test_trust_remote_completion_status_true_returns_directly():
|
||||
"""When trust_remote_completion_status=True and A2A returns completed, return result directly."""
|
||||
from crewai.a2a.wrapper import _delegate_to_a2a
|
||||
from crewai.a2a.types import AgentResponseProtocol
|
||||
from crewai import Agent, Task
|
||||
|
||||
a2a_config = A2AConfig(
|
||||
endpoint="http://test-endpoint.com",
|
||||
trust_remote_completion_status=True,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="test manager",
|
||||
goal="coordinate",
|
||||
backstory="test",
|
||||
a2a=a2a_config,
|
||||
)
|
||||
|
||||
task = Task(description="test", expected_output="test", agent=agent)
|
||||
|
||||
class MockResponse:
|
||||
is_a2a = True
|
||||
message = "Please help"
|
||||
a2a_ids = ["http://test-endpoint.com/"]
|
||||
|
||||
with (
|
||||
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
|
||||
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
|
||||
):
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = "Test"
|
||||
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
|
||||
|
||||
# A2A returns completed
|
||||
mock_execute.return_value = {
|
||||
"status": "completed",
|
||||
"result": "Done by remote",
|
||||
"history": [],
|
||||
}
|
||||
|
||||
# This should return directly without checking LLM response
|
||||
result = _delegate_to_a2a(
|
||||
self=agent,
|
||||
agent_response=MockResponse(),
|
||||
task=task,
|
||||
original_fn=lambda *args, **kwargs: "fallback",
|
||||
context=None,
|
||||
tools=None,
|
||||
agent_cards={"http://test-endpoint.com/": mock_card},
|
||||
original_task_description="test",
|
||||
)
|
||||
|
||||
assert result == "Done by remote"
|
||||
assert mock_execute.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
|
||||
def test_trust_remote_completion_status_false_continues_conversation():
|
||||
"""When trust_remote_completion_status=False and A2A returns completed, ask server agent."""
|
||||
from crewai.a2a.wrapper import _delegate_to_a2a
|
||||
from crewai import Agent, Task
|
||||
|
||||
a2a_config = A2AConfig(
|
||||
endpoint="http://test-endpoint.com",
|
||||
trust_remote_completion_status=False,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="test manager",
|
||||
goal="coordinate",
|
||||
backstory="test",
|
||||
a2a=a2a_config,
|
||||
)
|
||||
|
||||
task = Task(description="test", expected_output="test", agent=agent)
|
||||
|
||||
class MockResponse:
|
||||
is_a2a = True
|
||||
message = "Please help"
|
||||
a2a_ids = ["http://test-endpoint.com/"]
|
||||
|
||||
call_count = 0
|
||||
|
||||
def mock_original_fn(self, task, context, tools):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
# Server decides to finish
|
||||
return '{"is_a2a": false, "message": "Server final answer", "a2a_ids": []}'
|
||||
return "unexpected"
|
||||
|
||||
with (
|
||||
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
|
||||
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
|
||||
):
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = "Test"
|
||||
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
|
||||
|
||||
# A2A returns completed
|
||||
mock_execute.return_value = {
|
||||
"status": "completed",
|
||||
"result": "Done by remote",
|
||||
"history": [],
|
||||
}
|
||||
|
||||
result = _delegate_to_a2a(
|
||||
self=agent,
|
||||
agent_response=MockResponse(),
|
||||
task=task,
|
||||
original_fn=mock_original_fn,
|
||||
context=None,
|
||||
tools=None,
|
||||
agent_cards={"http://test-endpoint.com/": mock_card},
|
||||
original_task_description="test",
|
||||
)
|
||||
|
||||
# Should call original_fn to get server response
|
||||
assert call_count >= 1
|
||||
assert result == "Server final answer"
|
||||
|
||||
|
||||
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
|
||||
def test_default_trust_remote_completion_status_is_false():
|
||||
"""Verify that default value of trust_remote_completion_status is False."""
|
||||
a2a_config = A2AConfig(
|
||||
endpoint="http://test-endpoint.com",
|
||||
)
|
||||
|
||||
assert a2a_config.trust_remote_completion_status is False
|
||||
@@ -2714,293 +2714,3 @@ def test_agent_without_apps_no_platform_tools():
|
||||
|
||||
tools = crew._prepare_tools(agent, task, [])
|
||||
assert tools == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_before_llm_call_hook_modifies_messages():
|
||||
"""Test that before_llm_call hooks can modify messages."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_before_llm_call_hook
|
||||
|
||||
hook_called = False
|
||||
original_message_count = 0
|
||||
|
||||
def before_hook(context: LLMCallHookContext) -> None:
|
||||
nonlocal hook_called, original_message_count
|
||||
hook_called = True
|
||||
original_message_count = len(context.messages)
|
||||
context.messages.append({
|
||||
"role": "user",
|
||||
"content": "Additional context: This is a test modification."
|
||||
})
|
||||
|
||||
register_before_llm_call_hook(before_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="A greeting",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_called, "before_llm_call hook should have been called"
|
||||
assert len(agent.agent_executor.messages) > original_message_count
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_after_llm_call_hook_modifies_messages_for_next_iteration():
|
||||
"""Test that after_llm_call hooks can modify messages for the next iteration."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_after_llm_call_hook
|
||||
|
||||
hook_call_count = 0
|
||||
hook_iterations = []
|
||||
messages_added_in_iteration_0 = False
|
||||
test_message_content = "HOOK_ADDED_MESSAGE_FOR_NEXT_ITERATION"
|
||||
|
||||
def after_hook(context: LLMCallHookContext) -> str | None:
|
||||
nonlocal hook_call_count, hook_iterations, messages_added_in_iteration_0
|
||||
hook_call_count += 1
|
||||
current_iteration = context.iterations
|
||||
hook_iterations.append(current_iteration)
|
||||
|
||||
if current_iteration == 0:
|
||||
messages_before = len(context.messages)
|
||||
context.messages.append({
|
||||
"role": "user",
|
||||
"content": test_message_content
|
||||
})
|
||||
messages_added_in_iteration_0 = True
|
||||
assert len(context.messages) == messages_before + 1
|
||||
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(after_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
allow_delegation=False,
|
||||
max_iter=3,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Count to 3, taking your time",
|
||||
expected_output="A count",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_call_count > 0, "after_llm_call hook should have been called"
|
||||
assert messages_added_in_iteration_0, "Message should have been added in iteration 0"
|
||||
|
||||
executor_messages = agent.agent_executor.messages
|
||||
message_contents = [msg.get("content", "") for msg in executor_messages if isinstance(msg, dict)]
|
||||
assert any(test_message_content in content for content in message_contents), (
|
||||
f"Message added by hook in iteration 0 should be present in executor messages. "
|
||||
f"Messages: {message_contents}"
|
||||
)
|
||||
|
||||
assert len(executor_messages) > 2, "Executor should have more than initial messages"
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_after_llm_call_hook_modifies_messages():
|
||||
"""Test that after_llm_call hooks can modify messages for next iteration."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_after_llm_call_hook
|
||||
|
||||
hook_called = False
|
||||
messages_before_hook = 0
|
||||
|
||||
def after_hook(context: LLMCallHookContext) -> str | None:
|
||||
nonlocal hook_called, messages_before_hook
|
||||
hook_called = True
|
||||
messages_before_hook = len(context.messages)
|
||||
context.messages.append({
|
||||
"role": "user",
|
||||
"content": "Remember: This is iteration 2 context."
|
||||
})
|
||||
return None # Don't modify response
|
||||
|
||||
register_after_llm_call_hook(after_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
allow_delegation=False,
|
||||
max_iter=2,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Count to 2",
|
||||
expected_output="A count",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_called, "after_llm_call hook should have been called"
|
||||
assert len(agent.agent_executor.messages) > messages_before_hook
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_hooks_with_crew():
|
||||
"""Test that LLM call hooks work with crew execution."""
|
||||
from crewai.utilities.llm_call_hooks import (
|
||||
LLMCallHookContext,
|
||||
register_after_llm_call_hook,
|
||||
register_before_llm_call_hook,
|
||||
)
|
||||
|
||||
before_hook_called = False
|
||||
after_hook_called = False
|
||||
|
||||
def before_hook(context: LLMCallHookContext) -> None:
|
||||
nonlocal before_hook_called
|
||||
before_hook_called = True
|
||||
assert context.executor is not None
|
||||
assert context.agent is not None
|
||||
assert context.task is not None
|
||||
context.messages.append({
|
||||
"role": "system",
|
||||
"content": "Additional system context from hook."
|
||||
})
|
||||
|
||||
def after_hook(context: LLMCallHookContext) -> str | None:
|
||||
nonlocal after_hook_called
|
||||
after_hook_called = True
|
||||
assert context.response is not None
|
||||
assert len(context.messages) > 0
|
||||
return None
|
||||
|
||||
register_before_llm_call_hook(before_hook)
|
||||
register_after_llm_call_hook(after_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Research topics",
|
||||
backstory="You are a researcher",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Research AI frameworks",
|
||||
expected_output="A research summary",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert before_hook_called, "before_llm_call hook should have been called"
|
||||
assert after_hook_called, "after_llm_call hook should have been called"
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_hooks_can_modify_executor_attributes():
|
||||
"""Test that hooks can access and modify executor attributes like tools."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_before_llm_call_hook
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool
|
||||
def test_tool() -> str:
|
||||
"""A test tool."""
|
||||
return "test result"
|
||||
|
||||
hook_called = False
|
||||
original_tools_count = 0
|
||||
|
||||
def before_hook(context: LLMCallHookContext) -> None:
|
||||
nonlocal hook_called, original_tools_count
|
||||
hook_called = True
|
||||
original_tools_count = len(context.executor.tools)
|
||||
assert context.executor.max_iter > 0
|
||||
assert context.executor.iterations >= 0
|
||||
assert context.executor.tools is not None
|
||||
|
||||
register_before_llm_call_hook(before_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
tools=[test_tool],
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Use the test tool",
|
||||
expected_output="Tool result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_called, "before_llm_call hook should have been called"
|
||||
assert original_tools_count >= 0
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_hooks_error_handling():
|
||||
"""Test that hook errors don't break execution."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_before_llm_call_hook
|
||||
|
||||
hook_called = False
|
||||
|
||||
def error_hook(context: LLMCallHookContext) -> None:
|
||||
nonlocal hook_called
|
||||
hook_called = True
|
||||
raise ValueError("Test hook error")
|
||||
|
||||
register_before_llm_call_hook(error_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="A greeting",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_called, "before_llm_call hook should have been called"
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Count to 2\n\nThis
|
||||
is the expected criteria for your final answer: A count\nyou MUST return the
|
||||
actual complete content as the final answer, not a summary.\n\nBegin! This is
|
||||
VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"},{"role":"user","content":"Additional context:
|
||||
This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '849'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJNb5wwEL3zK0Y+QwSI7LLcokqVcujHoR9S2wg5ZsBujceyTdIo2v9e
|
||||
GTYLaROpFyTmzXt+b2YeEwCmOtYAE5IHMVqdveH09UHKLx+/2eFzkAdZXL8XJPr9h3dFydLIoNuf
|
||||
KMIT60LQaDUGRWaBhUMeMKoW+11ZH/K8rmZgpA51pA02ZNVFkY3KqKzMy8ssr7KiOtElKYGeNfA9
|
||||
AQB4nL/RqOnwN2sgT58qI3rPB2TNuQmAOdKxwrj3ygduAktXUJAJaGbvnyRNgwwNXIOhexDcwKDu
|
||||
EDgMMQBw4+/R/TBvleEarua/BooUyq2gw37yPKYyk9YbgBtDgcepzFFuTsjxbF7TYB3d+r+orFdG
|
||||
edk65J5MNOoDWTajxwTgZh7S9Cw3s45GG9pAv3B+rtgdFj22LmeD1icwUOB6W9+nL+i1HQautN+M
|
||||
mQkuJHYrdd0JnzpFGyDZpP7XzUvaS3Jlhv+RXwEh0AbsWuuwU+J54rXNYbzd19rOU54NM4/uTgls
|
||||
g0IXN9Fhzye9HBTzDz7g2PbKDOisU8tV9batRFlfFn29K1lyTP4AAAD//wMApumqgWQDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d044543db94e48-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=KLlCOQ_zxXquDvj96O28ObVFEoAbFE8R7zlmuiuXH1M-1762890085-1.0.1.1-UChItG1GnLDHrErY60dUpkbD3lEkSvfkTQpOmEtzd0fjjm_y1pJQiB.VDXVi2pPIMSelir0ZgiVXSh5.hGPb3RjQqbH3pv0Rr_2dQ59OIQ8;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:25 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=u.Z6xV9tQd3ucK35BinKtlCkewcI6q_uQicyeEeeR18-1762890085355-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '559'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '735'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_bcaa0f8500714ed09f967488b238ce2e
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,222 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "aeb82647-004a-4a30-9481-d55f476d5659", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.4.1", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-11T19:45:17.648657+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:45:17 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 48a89b0d-206b-4c1b-aa0d-ecc3b4ab525c
|
||||
x-runtime:
|
||||
- '0.088251'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Count to 3, taking
|
||||
your time\n\nThis is the expected criteria for your final answer: A count\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '790'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJNa9wwEL37Vww6r43tOpuNb2nKQgslOSy0NA1mIo9tdWVJSHK2Jex/
|
||||
L/J+2Ns20IuE5s0bzXszrxEAEzUrgfEOPe+NjO9Q41atP3/79GG7vX8QD0Xq15svX9/fUd+yRWDo
|
||||
5x/E/YmVcN0bSV5odYC5JfQUqmbXy3x1k77LViPQ65pkoLXGx0WSxb1QIs7T/CpOizgrjvROC06O
|
||||
lfAYAQC8jmdoVNX0k5WQLk6RnpzDllh5TgJgVssQYeiccB6VZ4sJ5Fp5UmPvm04PbedL+AhK74Cj
|
||||
gla8ECC0QQCgcjuy39VaKJRwO75KuFeUJAlsdnq8OkuUzD+w1AwOg0o1SDkDUCntMbg0Sns6Ivuz
|
||||
GKlbY/Wz+4PKGqGE6ypL6LQKjTuvDRvRfQTwNJo2XPjAjNW98ZXXWxq/y5ZH09g0rBl6cwS99ihn
|
||||
8esTcFGvqsmjkG5mO+PIO6on6jQjHGqhZ0A0U/13N/+qfVAuVPs/5SeAczKe6spYqgW/VDylWQq7
|
||||
/Fba2eWxYebIvghOlRdkwyRqanCQhwVj7pfz1FeNUC1ZY8VhyxpTFTxfXWXNapmzaB/9BgAA//8D
|
||||
AL0LXHV0AwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d04a06dc4d1949-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:45:18 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=KnsnYxgmlpoHf.5TWnNgU30xb2tc0gK7SC2BbUkud2M-1762890318-1.0.1.1-3KeaQY59x5mY6n8DINELLaH9_b68w7W4ZZ0KeOknBHmQyDwx5qbtDonfYxOjsO_KykjtJLHpB0bsINSNEa9TrjNQHqUWTlRhldfTLenUG44;
|
||||
path=/; expires=Tue, 11-Nov-25 20:15:18 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=ekC35NRP79GCMP.eTi_odl5.6DIsAeFEXKlanWUZOH4-1762890318589-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '598'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '632'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_cb36cbe6c33b42a28675e8c6d9a36fe9
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,127 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
|
||||
is the expected criteria for your final answer: A greeting\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"},{"role":"user","content":"Additional context:
|
||||
This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '851'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJdi9swEHz3r9jqOT5sk+RSvx2lJW1poXDQ0vYwirS21cpaIclJr0f+
|
||||
+yE7F/s+Cn0xeGdnNLO7dwkAU5KVwETLg+isTt9w+rr/YESx27+93RaHVm4/ff7y8Vpcffv+ly0i
|
||||
g3a/UIQH1oWgzmoMiswIC4c8YFTNL9fF5nWWbfIB6EiijrTGhnR5kaedMiotsmKVZss0X57oLSmB
|
||||
npXwIwEAuBu+0aiR+IeVkC0eKh16zxtk5bkJgDnSscK498oHbgJbTKAgE9AM3q9b6ps2lPAeDB1A
|
||||
cAON2iNwaGIA4MYf0P0075ThGq6GvxK2qDW9mks6rHvPYy7Taz0DuDEUeJzLEObmhBzP9jU11tHO
|
||||
P6GyWhnl28oh92SiVR/IsgE9JgA3w5j6R8mZddTZUAX6jcNz+fpy1GPTembo6gQGClzP6pti8YJe
|
||||
JTFwpf1s0Exw0aKcqNNWeC8VzYBklvq5m5e0x+TKNP8jPwFCoA0oK+tQKvE48dTmMF7vv9rOUx4M
|
||||
M49urwRWQaGLm5BY816PJ8X8rQ/YVbUyDTrr1HhXta2Wotis8nqzLlhyTO4BAAD//wMAuV0QSWYD
|
||||
AAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d044428f103c35-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=jp.mByP87tLw_KZOIh7lXZ9UMACecreCMNwHwtJmUvQ-1762890082-1.0.1.1-D76UWkvWlN8e0zlQpgSlSHjrhx3Rkh_r8bz4XKx8kljJt8s9Okre9bo7M62ewJNFK9O9iuHkADMKeAEwlsc4Hg0MsF2vt2Hu1J0xikSInv0;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:22 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=pzTqogdMFPJY2.Yrj49LODdUKbD8UBctCWNyIZVsvK4-1762890082258-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '460'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '478'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999820'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3bda51e6d3e34f8cadcc12551dc29ab0
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,262 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: test_tool\nTool
|
||||
Arguments: {}\nTool Description: A test tool.\n\nIMPORTANT: Use the following
|
||||
format in your response:\n\n```\nThought: you should always think about what
|
||||
to do\nAction: the action to take, only one name of [test_tool], just the name,
|
||||
exactly as it''s written.\nAction Input: the input to the action, just a simple
|
||||
JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent
|
||||
Task: Use the test tool\n\nThis is the expected criteria for your final answer:
|
||||
Tool result\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"},{"role":"user","content":"Additional
|
||||
context: This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1311'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xTy47bMAy85ysIneMgcbNp1reizwXaXrpAD83CVmTaViqLWolu2gb590LOw94+
|
||||
gF504HBGw6F0mAAIXYoMhGokq9aZ5KWkz/vdTn14i/dv9h9f19tXi3dtun789H71U0wjg7Y7VHxh
|
||||
zRS1ziBrsidYeZSMUXXxfJWub+fzddoDLZVoIq12nCxni6TVVifpPL1J5stksTzTG9IKg8jgywQA
|
||||
4NCf0agt8bvIYD69VFoMQdYosmsTgPBkYkXIEHRgaVlMB1CRZbS996IoNva+oa5uOIM7CA11poQu
|
||||
IHCDwBg4ZyIDTFAj90WPj532WIK2FflWxqGhIt+DlbbSgLRhj362sS9URLNB6FKCO+s6zuBw3Nii
|
||||
KMb2PFZdkDEj2xkzAqS1xP11fTAPZ+R4jcJQ7Txtw29UUWmrQ5N7lIFsHDswOdGjxwnAQx959yRF
|
||||
4Ty1Lnr+iv116Wp10hPDqgf02XkfgomlGbFuL6wnenmJLLUJo6UJJVWD5UAdNiy7UtMImIym/tPN
|
||||
37RPk2tb/4/8ACiFjrHMncdSq6cTD20e40/4V9s15d6wCOi/aYU5a/RxEyVWsjOn5ynCj8DY5pW2
|
||||
NXrn9emNVi5fqnR9s6jWq1RMjpNfAAAA//8DANALR4WyAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d044470bdeb976-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=p01_b1BsQgwR2woMBWf1E0gJMDDl7pvqkEVHpHAsMJA-1762890083-1.0.1.1-u8iYLTTx0lmfSR1.CzuuYiHgt03yVVUMsBD8WgExXWm7ts.grUwM1ifj9p6xIz.HElrnQdfDSBD5Lv045aNr61YcB8WW3Vz33W9N0Gn0P3w;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:23 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=2gUmBgxb3VydVYt8.t_P6bY8U_pS.a4KeYpZWDDYM9Q-1762890083295-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '729'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '759'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999707'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999707'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_70c7033dbc5e4ced80d3fdcbcda2c675
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: test_tool\nTool
|
||||
Arguments: {}\nTool Description: A test tool.\n\nIMPORTANT: Use the following
|
||||
format in your response:\n\n```\nThought: you should always think about what
|
||||
to do\nAction: the action to take, only one name of [test_tool], just the name,
|
||||
exactly as it''s written.\nAction Input: the input to the action, just a simple
|
||||
JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent
|
||||
Task: Use the test tool\n\nThis is the expected criteria for your final answer:
|
||||
Tool result\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"},{"role":"user","content":"Additional
|
||||
context: This is a test modification."},{"role":"assistant","content":"```\nThought:
|
||||
I should use the test_tool to get the required information for the final answer.\nAction:
|
||||
test_tool\nAction Input: {}\n```\nObservation: test result"},{"role":"user","content":"Additional
|
||||
context: This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1584'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=p01_b1BsQgwR2woMBWf1E0gJMDDl7pvqkEVHpHAsMJA-1762890083-1.0.1.1-u8iYLTTx0lmfSR1.CzuuYiHgt03yVVUMsBD8WgExXWm7ts.grUwM1ifj9p6xIz.HElrnQdfDSBD5Lv045aNr61YcB8WW3Vz33W9N0Gn0P3w;
|
||||
_cfuvid=2gUmBgxb3VydVYt8.t_P6bY8U_pS.a4KeYpZWDDYM9Q-1762890083295-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbtQwEL3nKyyfN1WS3S5pbhRRKCeEkCpgq8RrTxJTxzb2pC1U++/I
|
||||
TrtJoUhcLNlv3vN7M/OQEEKloBWhvGfIB6vSN8xc3b+/FG/P3rX8x9X+y8dfHy7O8fYra88/0VVg
|
||||
mP134PjEOuFmsApQGj3B3AFDCKr5q21RnmVZuY7AYASoQOssppuTPB2klmmRFadptknzzSO9N5KD
|
||||
pxX5lhBCyEM8g1Et4J5WJFs9vQzgPeuAVsciQqgzKrxQ5r30yDTS1QxyoxF09N40zU5/7s3Y9ViR
|
||||
S6LNHbkJB/ZAWqmZIkz7O3A7fRFvr+OtIggeiQM/KtzppmmW+g7a0bMQUo9KLQCmtUEWmhSTXT8i
|
||||
h2MWZTrrzN7/QaWt1NL3tQPmjQ6+PRpLI3pICLmOPRuftYFaZwaLNZobiN+t83LSo/OsZvQIokGm
|
||||
Fqz1dvWCXi0AmVR+0XXKGe9BzNR5RGwU0iyAZJH6bzcvaU/Jpe7+R34GOAeLIGrrQEj+PPFc5iCs
|
||||
8r/Kjl2OhqkHdys51CjBhUkIaNmopv2i/qdHGOpW6g6cdXJastbWG16Up3lbbguaHJLfAAAA//8D
|
||||
AJW0fwtzAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d0444cbd6db976-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '527'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '578'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999655'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999655'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_6b1d84dcdde643cea5160e155ee624db
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,159 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"name":"llama3.2:3b"}'
|
||||
headers:
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '22'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- localhost:11434
|
||||
user-agent:
|
||||
- litellm/1.78.5
|
||||
method: POST
|
||||
uri: http://localhost:11434/api/show
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"model ''llama3.2:3b'' not found"}'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '41'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:28 GMT
|
||||
status:
|
||||
code: 404
|
||||
message: Not Found
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
|
||||
is the expected criteria for your final answer: A greeting\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"},{"role":"user","content":"Additional context:
|
||||
This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '851'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLRbtQwEHzPVyx+vlRJmrte84KOSqgFCSFAqLRUkc/ZJAbHa9lOy6m6
|
||||
f0dOrpe0gMRLpHh2Znd29jECYLJiBTDRci86o+ILTten/ccPFzyp398srz9/2/o3X/PN6btN84kt
|
||||
AoO2P1D4J9aJoM4o9JL0CAuL3GNQTc9W2fo8SdbnA9BRhSrQGuPj/CSNO6llnCXZMk7yOM0P9Jak
|
||||
QMcKuI0AAB6HbxhUV/iLFZAsnl46dI43yIpjEQCzpMIL485J57n2bDGBgrRHPcz+paW+aX0BV6Dp
|
||||
AQTX0Mh7BA5NMABcuwe03/VbqbmCzfBXwCUqRa/g8sC4grEN7KgHTxXfvZ63s1j3jgfPuldqBnCt
|
||||
yfOws8Ho3QHZH60paoylrXtBZbXU0rWlRe5IBxvOk2EDuo8A7oYV9s+2woylzvjS008c2qWrs1GP
|
||||
TdFNaJYdQE+eqxlrTPGlXlmh51K5WQhMcNFiNVGnxHhfSZoB0cz1n9P8TXt0LnXzP/ITIAQaj1Vp
|
||||
LFZSPHc8lVkMl/2vsuOWh4GZQ3svBZZeog1JVFjzXo3nxtzOeezKWuoGrbFyvLnalLnI1su0Xq8y
|
||||
Fu2j3wAAAP//AwDurzwzggMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d0446e698367ab-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=b52crfzdOm5rh4aOc2LfM8aQKFI.ZL9WCZXaPBDdG5k-1762890090-1.0.1.1-T2xhtwX0vuEnMIb8NRgP4w3RRn1N1ZwSjuhKBob1vDLDmN7XhCKkoIg3IrlC9KEyhA65IGa5DWsHfmlRKKxqw6sIPA98BSO6E3wsTRspHw4;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:30 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=0TH0Kjp_5t6yhwXKA1wlKBHaczp.TeWhM2A5t6by1sI-1762890090153-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1049'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1387'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_4b132b998ed941b5b6a85ddbb36e2b65
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,182 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Researcher. You are a
|
||||
researcher\nYour personal goal is: Research topics\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
Research AI frameworks\n\nThis is the expected criteria for your final answer:
|
||||
A research summary\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nYou MUST follow these instructions: \n - Include specific
|
||||
examples and real-world case studies to enhance the credibility and depth of
|
||||
the article ideas.\n - Incorporate mentions of notable companies, projects,
|
||||
or tools relevant to each topic to provide concrete context.\n - Add diverse
|
||||
viewpoints such as interviews with experts, users, or thought leaders to enrich
|
||||
the narrative and lend authority.\n - Address ethical, social, and emotional
|
||||
considerations explicitly to reflect a balanced and comprehensive analysis.\n
|
||||
- Enhance the descriptions by including implications for future developments
|
||||
and the potential impact on society.\n - Use more engaging and vivid language
|
||||
that draws the reader into each topic''s nuances and importance.\n - Include
|
||||
notes or summaries that contextualize each set of ideas in terms of relevance
|
||||
and potential reader engagement.\n - In future tasks, focus on elaborating initial
|
||||
outlines into more detailed and nuanced article proposals with richer content
|
||||
and insights.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"},{"role":"user","content":"Additional
|
||||
context: This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1894'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA2RXTXPbOBK9z6/o8smpkrRJJjPJ6OZy4ownceKKnY+qzaUJNMmOQTQLACUrc5kf
|
||||
sZf9e/NLtrpBycrsRWWTINh4/V6/xz9/Ajhhf7KGE9djccMYlucoX57duvj+fXn/+hrzz8/v3j7f
|
||||
dL/35y9efDlZ6BPSfCNX9k+tnAxjoMIS622XCAvprk+e//r0xW+PH//22G4M4inoY91Yls9WT5YD
|
||||
R14+ffz0l+XjZ8snz+bHe2FH+WQN//4JAOBP+9VCo6f7kzXYZnZloJyxo5P1YRHASZKgV04wZ84F
|
||||
YzlZPNx0EgtFq/22l6nryxouIcoWHEboeEOA0OkBAGPeUvoaLzhigDP7b/01fo0fKBMm18PNNAyY
|
||||
diARzi7hIuFAW0l3WRddxpLET05hWX+NZ6lwy44xwGUsFAJ3FB3B6dnlI2gPTwImgtITNOjuGokE
|
||||
0oLCluwVnjYUZBwolgWMSTbsOXaQS5pcmRJ5oLjhJFFXZMDooYiEDKXHAhSxCQRpLp9SXgDFjiPZ
|
||||
n7paW4mRKUMR8JS5iwsoCTnW+57GIDsY0PUcCQJhilqBdTYDtXpGiiXsVnDbcz68DPKMlecNZeBY
|
||||
BAb8JkmP9XD+BSTCsNxKCh5wHAM7VAS10tKzw2BlZDEkncTMntJ+id6i+5FSAY6Zu77kBXAIUy66
|
||||
JnYKLSdokqAHHkZ0RZtXyPVRgnS7w+5Uditt45MVvN9Q2jBttRVvCQ3xH9q9/hqXcEsxS7oIsoXT
|
||||
1yJdoEdreF8bqA0dJBcYZZwCJpCR4jLLlBxB4CZhUsinTB5aSf8Pb4WexsOV1dH7/v7rvxnaQPes
|
||||
3VWwuZDRATAE2ea5a8oJQJckZzi//pgX8Np+dfPb6495BbVu22/KVvnRqTgq41T4GQLf0bwabhPG
|
||||
HLCQbTRfvO6lSIaeuz5YH4BLhuwwYMOBSwV6lC2llaJ3vbsVZcnpFRX81wU6akTuHq3hTZRtNFB0
|
||||
A7+LOLAzmk7F2o4BuoRjvzjswRla3IiqgeMRAakoBfJhM8J6Rk/N1HV7mI0/rFhhgDYReRmUy/TA
|
||||
0lp3Bq3VwEK/weioio7jXB4l2HBmibZvxDIlDBAwdhN2pGA6ylnfe/ru7fUjw+FsRNcTXH15RwVO
|
||||
zwb8LvHRGm4MuFAhPohsMa/L0zhKKnrsXKpa96fh2FLSQbOAQBtK2JGHZgd1Z/hMDdwoux1lOD37
|
||||
fPOo6j7whlSZS594QxFckMlDnldapVesTJK2wLl0kYtOzluRcMcFTs/f3b5R+ret4nRUcN4f5Ac2
|
||||
w0iplTQoiNaePBK5fgE8YEeVoYXuyxFmC5gKB/5em3woxtpxLqlgRHvs5m43PnBU0oHDdog/zr4c
|
||||
qfVchoYjZXg3Ddc72HLpAaciAxZ24Lk1LAsb7yrbrz8COkdhnkEL6GbwS0Ib/T9Q0Hb8UUQvicYr
|
||||
jt4KPwtjjxcSvI2epyv4oMPwsw3Dc2XrTZk8Ux04vxOG0js1DC3liryNx8sBlcvrY+n2mKEh7WOa
|
||||
bHBy3FvJPMzyLhcaMjgcrT3SAmEKO3VFRwk86UjZH3tM+jIbqCMW7SzIVJwMlBeQJ9cDZht6+9NV
|
||||
f7DWesaGFM9EhaOMWHo1BeyiZK5dOZuKRBlkyvCJenaB8vpY3T3hhsMOaNCxVgl9SznUjn9sKOkB
|
||||
66jPFFpjsRa7b8RCnaMkbqZqClJdp/CgxHQm2uWAd3sVRTLpRiomfx7UeqvcrWBNCNGA3YtpPUtz
|
||||
nhRtwskfgTjbJRb44/pKUocRznvMtKgere9VLVOy1w+iPXZuSjplS2/phL1SsZ390cq4MdXAB3JV
|
||||
kRLX/1TGu9s3R3OMYq/t9darYw1Ke4RImcWjhe9HwAJmeJwMwxT3Lr23l/2Qy7X2TDgEyhWsGxsF
|
||||
Wjnsg9TahKgsPYiJlGVSiHX4B/PcPev0Paakg1w0NWwkTHqPv+vahh/s3KepA8/ZyYZSdfWfV/Cq
|
||||
5gRjTKYEn5i2o3Asqi6NaOb5Jg8+jkyzbNMKLoiXF8TwlheAkEidijzcFIytJKu/pZwlWXmEmo/u
|
||||
HReqRtzIVAx4T4M4nR/fK8bSqiJrbtswqhWnZZuYog+7H7JSMxVAcDhVZdqOesiWzer2NFOw9NaY
|
||||
ZDRtFEpjojJb8QqubVjpJnpALEcx0E777tPly8sz1VuPmb/XhBppTitH6bWIdlCGQbxSlQZKZqw9
|
||||
Jr/FyuLaX0lzE/UTYFCrnXJewas55J1dqqmKw0IZMGR5iBKqXUcp7kMt5t1xwJG2iq6dbJcZgsGe
|
||||
pHt0lBotrWHMmkNbiFJAp2g7haDJNqpHGkueHepZwI0lzmpFrwaZk8f5DwF0/TVqTkg4sj/OBHNH
|
||||
j3BSqj+0nbzettGrDVHYrbN6bJ4/I8hD4nyX//7rP1o64DCGg/r1W4A36HZASbJd0Dq/SaO8HwPW
|
||||
OlZwIUlB0M+1BQRMFkPIEgP5PVbNxKHG4p4yHZc94A449pS4MndM9G3yFh5oaMj76saHBOKx4Ooh
|
||||
KIN9GGrntTqltSJhX1x+KjtlD2tphplNnRE1vFQZt8gpUs7QkkapY6v5RxauZuMwefDiJstyB++a
|
||||
fcQWqhK6OmzofgzIcZ9OTX0r6zqVHQxTLtBgsOHIMcqmbmgMTtRNYZ5/NrOqM3L0vGE/YYBUI7CF
|
||||
twN3thTCsiHLMpRHUn4FxRcyxVwDVcsU/CzC/uD3Rs5fVnAxWch/+fBRWN9Rq7YsoF84ysu3Ijau
|
||||
W0lbTH4B/wMAAP//jFjNbtswDL73KQSfWiAo0Kwbgt2GXVZgP6fdVgSKTNtcZUnQT7ocCuwh9oR7
|
||||
koGkYjtZBuxMR7Ep8vs7nUdaTULQfaXDqTFqLDYjrbOd1dr1QrqSLKs6rVoJXVr0Kvse8gDxZnWU
|
||||
UDRg577mmqj+08cb+SXbbogKHMT+MMlGc/SSIfoRaWvTqGOGuJqYkV6GZAvBhjAQupZcHwJtTBf9
|
||||
qKAtsi2qpKo5E10E79/stEJgGFv4aG3V6B1mH2sHlbFIH6TMoF0PasSMvdAwacglcn4J4OilXKs+
|
||||
VJNB5oYbbquXROJqndkXZ0/Xw7Y5ELbUj1rgG48c+UeeUbGCA1TPStJOXK20q/PFtW8XnWR9ShdV
|
||||
ejoNWjWUUbsT8FnN6HP03HvsUYZfTIWxJeGeFsUM2lpwPbuCb+49xSs/Mg39Z59BIPFSDIDVHB5U
|
||||
BAt77TJ39t2DCksyWqngLZrDqJ+mjIKQhzkMEsuEsrNobtUD4Sz7Dc38FWGgPdqDSk6HNPhcCcMW
|
||||
gy0Ty+CfzxaB/c7Jhg9o2auNgbfaRMzckgidrWrOu2WuQAPcdWwx+GZqt4TYDan4JCp+GVeQ1iB8
|
||||
fQIRztkHNBTO6MmYGpI/O8sa0fgSa0W5IhqOFU6J5GmXiYakA4IUc7jBHkB2XNQjaR5mVnnXSwBB
|
||||
RPmdgJAP5yYwTP7++SsPcOBnqhzMh6NzDFZnkpVJpUGHGsEQuJMH8pSddWfB1q366lqIlNy1c2Rz
|
||||
OqDz1MlITOty5E9MClJisya2Y9BMHtXuoFPP+lAVBIPfkeclWbIHtQMHEtjVqSM+YoFMm3q7zBRJ
|
||||
OyRNwaYr1i4K2jkv1MNp5mOtvEz5pfV9iH6Xzn7adOgwDdsIOnlHWWXKPjRcfblS6pFz0nISfTYy
|
||||
o9vsn4D/7tV9zUmbOZ+dq5vNplazz9rOhbv1+lg5OXHbQtZo0yJrbQyFFe382zmYZRJYFK4W3/33
|
||||
+1w6W74dXf8/x88FYyBkaLezWbj0WAQav389NvWZX7ipnmebESLdRQudLlZS5UaM87ZD15OoRomW
|
||||
u7C9N+vN67tu82bdXL1c/QEAAP//AwBbY8c8aRcAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d0447958ce36e8-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=dSe1gQEFfPpE4AOsFi3S3RQkzPCQnV1.Ywe__K7cSSU-1762890105-1.0.1.1-I1CSTO8ri4tjbaHdIHQ9YP9c2pa.y9WwMQFRaUztT95T_OAe5V0ndTFN4pO1RiCXh15TUpWmBxRdxIWjcYDMqrDIvKWInLO5aavGFWZ1rys;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:45 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=LMf_4EPFZGfTiqcjmjEk7WxOTuX2ukd3Cs_R8170wJ4-1762890105804-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '15065'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '15254'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999560'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999560'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_c49c9fba20ff4f05903eff3c78797ce1
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
2
lib/crewai/tests/hooks/__init__.py
Normal file
2
lib/crewai/tests/hooks/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
"""Tests for CrewAI hooks functionality."""
|
||||
|
||||
619
lib/crewai/tests/hooks/test_crew_scoped_hooks.py
Normal file
619
lib/crewai/tests/hooks/test_crew_scoped_hooks.py
Normal file
@@ -0,0 +1,619 @@
|
||||
"""Tests for crew-scoped hooks within @CrewBase classes."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Crew
|
||||
from crewai.hooks import (
|
||||
LLMCallHookContext,
|
||||
ToolCallHookContext,
|
||||
before_llm_call,
|
||||
before_tool_call,
|
||||
get_before_llm_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
from crewai.project import CrewBase, agent, crew
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_hooks():
|
||||
"""Clear global hooks before and after each test."""
|
||||
from crewai.hooks import llm_hooks, tool_hooks
|
||||
|
||||
# Store original hooks
|
||||
original_before_llm = llm_hooks._before_llm_call_hooks.copy()
|
||||
original_before_tool = tool_hooks._before_tool_call_hooks.copy()
|
||||
|
||||
# Clear hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
|
||||
yield
|
||||
|
||||
# Restore original hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
llm_hooks._before_llm_call_hooks.extend(original_before_llm)
|
||||
tool_hooks._before_tool_call_hooks.extend(original_before_tool)
|
||||
|
||||
|
||||
class TestCrewScopedHooks:
|
||||
"""Test hooks defined as methods within @CrewBase classes."""
|
||||
|
||||
def test_crew_scoped_hook_is_registered_on_instance_creation(self):
|
||||
"""Test that crew-scoped hooks are registered when crew instance is created."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
pass
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Check hooks before instance creation
|
||||
hooks_before = get_before_llm_call_hooks()
|
||||
initial_count = len(hooks_before)
|
||||
|
||||
# Create instance - should register the hook
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Check hooks after instance creation
|
||||
hooks_after = get_before_llm_call_hooks()
|
||||
|
||||
# Should have one more hook registered
|
||||
assert len(hooks_after) == initial_count + 1
|
||||
|
||||
def test_crew_scoped_hook_has_access_to_self(self):
|
||||
"""Test that crew-scoped hooks can access self and instance variables."""
|
||||
execution_log = []
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.crew_name = "TestCrew"
|
||||
self.call_count = 0
|
||||
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
# Can access self
|
||||
self.call_count += 1
|
||||
execution_log.append(f"{self.crew_name}:{self.call_count}")
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Get the registered hook
|
||||
hooks = get_before_llm_call_hooks()
|
||||
crew_hook = hooks[-1] # Last registered hook
|
||||
|
||||
# Create mock context
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute hook multiple times
|
||||
crew_hook(context)
|
||||
crew_hook(context)
|
||||
|
||||
# Verify hook accessed self and modified instance state
|
||||
assert len(execution_log) == 2
|
||||
assert execution_log[0] == "TestCrew:1"
|
||||
assert execution_log[1] == "TestCrew:2"
|
||||
assert crew_instance.call_count == 2
|
||||
|
||||
def test_multiple_crews_have_isolated_hooks(self):
|
||||
"""Test that different crew instances have isolated hooks."""
|
||||
crew1_executions = []
|
||||
crew2_executions = []
|
||||
|
||||
@CrewBase
|
||||
class Crew1:
|
||||
@before_llm_call
|
||||
def crew1_hook(self, context):
|
||||
crew1_executions.append("crew1")
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
@CrewBase
|
||||
class Crew2:
|
||||
@before_llm_call
|
||||
def crew2_hook(self, context):
|
||||
crew2_executions.append("crew2")
|
||||
|
||||
@agent
|
||||
def analyst(self):
|
||||
return Agent(role="Analyst", goal="Analyze", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create both instances
|
||||
instance1 = Crew1()
|
||||
instance2 = Crew2()
|
||||
|
||||
# Both hooks should be registered
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) >= 2
|
||||
|
||||
# Create mock context
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute all hooks
|
||||
for hook in hooks:
|
||||
hook(context)
|
||||
|
||||
# Both hooks should have executed
|
||||
assert "crew1" in crew1_executions
|
||||
assert "crew2" in crew2_executions
|
||||
|
||||
def test_crew_scoped_hook_with_filters(self):
|
||||
"""Test that filtered crew-scoped hooks work correctly."""
|
||||
execution_log = []
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_tool_call(tools=["delete_file"])
|
||||
def filtered_hook(self, context):
|
||||
execution_log.append(f"filtered:{context.tool_name}")
|
||||
return None
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Get registered hooks
|
||||
hooks = get_before_tool_call_hooks()
|
||||
crew_hook = hooks[-1] # Last registered
|
||||
|
||||
# Test with matching tool
|
||||
mock_tool = Mock()
|
||||
context1 = ToolCallHookContext(
|
||||
tool_name="delete_file", tool_input={}, tool=mock_tool
|
||||
)
|
||||
crew_hook(context1)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "filtered:delete_file"
|
||||
|
||||
# Test with non-matching tool
|
||||
context2 = ToolCallHookContext(
|
||||
tool_name="read_file", tool_input={}, tool=mock_tool
|
||||
)
|
||||
crew_hook(context2)
|
||||
|
||||
# Should still be 1 (filtered hook didn't run)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
def test_crew_scoped_hook_no_double_registration(self):
|
||||
"""Test that crew-scoped hooks are not registered twice."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
pass
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Get initial hook count
|
||||
initial_hooks = len(get_before_llm_call_hooks())
|
||||
|
||||
# Create first instance
|
||||
instance1 = TestCrew()
|
||||
|
||||
# Should add 1 hook
|
||||
hooks_after_first = get_before_llm_call_hooks()
|
||||
assert len(hooks_after_first) == initial_hooks + 1
|
||||
|
||||
# Create second instance
|
||||
instance2 = TestCrew()
|
||||
|
||||
# Should add another hook (one per instance)
|
||||
hooks_after_second = get_before_llm_call_hooks()
|
||||
assert len(hooks_after_second) == initial_hooks + 2
|
||||
|
||||
def test_crew_scoped_hook_method_signature(self):
|
||||
"""Test that crew-scoped hooks have correct signature (self + context)."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.test_value = "test"
|
||||
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
# Should be able to access both self and context
|
||||
return f"{self.test_value}:{context.iterations}"
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Verify the hook method has is_before_llm_call_hook marker
|
||||
assert hasattr(crew_instance.my_hook, "__func__")
|
||||
hook_func = crew_instance.my_hook.__func__
|
||||
assert hasattr(hook_func, "is_before_llm_call_hook")
|
||||
assert hook_func.is_before_llm_call_hook is True
|
||||
|
||||
def test_crew_scoped_with_agent_filter(self):
|
||||
"""Test crew-scoped hooks with agent filters."""
|
||||
execution_log = []
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call(agents=["Researcher"])
|
||||
def filtered_hook(self, context):
|
||||
execution_log.append(context.agent.role)
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Get hooks
|
||||
hooks = get_before_llm_call_hooks()
|
||||
crew_hook = hooks[-1]
|
||||
|
||||
# Test with matching agent
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Researcher")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context1 = LLMCallHookContext(executor=mock_executor)
|
||||
crew_hook(context1)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "Researcher"
|
||||
|
||||
# Test with non-matching agent
|
||||
mock_executor.agent.role = "Analyst"
|
||||
context2 = LLMCallHookContext(executor=mock_executor)
|
||||
crew_hook(context2)
|
||||
|
||||
# Should still be 1 (filtered out)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
|
||||
class TestCrewScopedHookAttributes:
|
||||
"""Test that crew-scoped hooks have correct attributes set."""
|
||||
|
||||
def test_hook_marker_attribute_is_set(self):
|
||||
"""Test that decorator sets marker attribute on method."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
pass
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Check the unbound method has the marker
|
||||
assert hasattr(TestCrew.__dict__["my_hook"], "is_before_llm_call_hook")
|
||||
assert TestCrew.__dict__["my_hook"].is_before_llm_call_hook is True
|
||||
|
||||
def test_filter_attributes_are_preserved(self):
|
||||
"""Test that filter attributes are preserved on methods."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_tool_call(tools=["delete_file"], agents=["Dev"])
|
||||
def filtered_hook(self, context):
|
||||
return None
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Check filter attributes are set
|
||||
hook_method = TestCrew.__dict__["filtered_hook"]
|
||||
assert hasattr(hook_method, "is_before_tool_call_hook")
|
||||
assert hasattr(hook_method, "_filter_tools")
|
||||
assert hasattr(hook_method, "_filter_agents")
|
||||
assert hook_method._filter_tools == ["delete_file"]
|
||||
assert hook_method._filter_agents == ["Dev"]
|
||||
|
||||
def test_registered_hooks_tracked_on_instance(self):
|
||||
"""Test that registered hooks are tracked on the crew instance."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def llm_hook(self, context):
|
||||
pass
|
||||
|
||||
@before_tool_call
|
||||
def tool_hook(self, context):
|
||||
return None
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Check that hooks are tracked
|
||||
assert hasattr(crew_instance, "_registered_hook_functions")
|
||||
assert isinstance(crew_instance._registered_hook_functions, list)
|
||||
assert len(crew_instance._registered_hook_functions) == 2
|
||||
|
||||
# Check hook types
|
||||
hook_types = [ht for ht, _ in crew_instance._registered_hook_functions]
|
||||
assert "before_llm_call" in hook_types
|
||||
assert "before_tool_call" in hook_types
|
||||
|
||||
|
||||
class TestCrewScopedHookExecution:
|
||||
"""Test execution behavior of crew-scoped hooks."""
|
||||
|
||||
def test_crew_hook_executes_with_bound_self(self):
|
||||
"""Test that crew-scoped hook executes with self properly bound."""
|
||||
execution_log = []
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.instance_id = id(self)
|
||||
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
# Should have access to self
|
||||
execution_log.append(self.instance_id)
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
expected_id = crew_instance.instance_id
|
||||
|
||||
# Get and execute hook
|
||||
hooks = get_before_llm_call_hooks()
|
||||
crew_hook = hooks[-1]
|
||||
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute hook
|
||||
crew_hook(context)
|
||||
|
||||
# Verify it had access to self
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == expected_id
|
||||
|
||||
def test_crew_hook_can_modify_instance_state(self):
|
||||
"""Test that crew-scoped hooks can modify instance variables."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.counter = 0
|
||||
|
||||
@before_tool_call
|
||||
def increment_counter(self, context):
|
||||
self.counter += 1
|
||||
return None
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
assert crew_instance.counter == 0
|
||||
|
||||
# Get and execute hook
|
||||
hooks = get_before_tool_call_hooks()
|
||||
crew_hook = hooks[-1]
|
||||
|
||||
mock_tool = Mock()
|
||||
context = ToolCallHookContext(tool_name="test", tool_input={}, tool=mock_tool)
|
||||
|
||||
# Execute hook 3 times
|
||||
crew_hook(context)
|
||||
crew_hook(context)
|
||||
crew_hook(context)
|
||||
|
||||
# Verify counter was incremented
|
||||
assert crew_instance.counter == 3
|
||||
|
||||
def test_multiple_instances_maintain_separate_state(self):
|
||||
"""Test that multiple instances of the same crew maintain separate state."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.call_count = 0
|
||||
|
||||
@before_llm_call
|
||||
def count_calls(self, context):
|
||||
self.call_count += 1
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create two instances
|
||||
instance1 = TestCrew()
|
||||
instance2 = TestCrew()
|
||||
|
||||
# Get all hooks (should include hooks from both instances)
|
||||
all_hooks = get_before_llm_call_hooks()
|
||||
|
||||
# Find hooks for each instance (last 2 registered)
|
||||
hook1 = all_hooks[-2]
|
||||
hook2 = all_hooks[-1]
|
||||
|
||||
# Create mock context
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute first hook twice
|
||||
hook1(context)
|
||||
hook1(context)
|
||||
|
||||
# Execute second hook once
|
||||
hook2(context)
|
||||
|
||||
# Each instance should have independent state
|
||||
# Note: We can't easily verify which hook belongs to which instance
|
||||
# in this test without more introspection, but the fact that it doesn't
|
||||
# crash and hooks can maintain state proves isolation works
|
||||
|
||||
|
||||
class TestSignatureDetection:
|
||||
"""Test that signature detection correctly identifies methods vs functions."""
|
||||
|
||||
def test_method_signature_detected(self):
|
||||
"""Test that methods with 'self' parameter are detected."""
|
||||
import inspect
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def method_hook(self, context):
|
||||
pass
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Check that method has self parameter
|
||||
method = TestCrew.__dict__["method_hook"]
|
||||
sig = inspect.signature(method)
|
||||
params = list(sig.parameters.keys())
|
||||
assert params[0] == "self"
|
||||
assert len(params) == 2 # self + context
|
||||
|
||||
def test_standalone_function_signature_detected(self):
|
||||
"""Test that standalone functions without 'self' are detected."""
|
||||
import inspect
|
||||
|
||||
@before_llm_call
|
||||
def standalone_hook(context):
|
||||
pass
|
||||
|
||||
# Should have only context parameter (no self)
|
||||
sig = inspect.signature(standalone_hook)
|
||||
params = list(sig.parameters.keys())
|
||||
assert "self" not in params
|
||||
assert len(params) == 1 # Just context
|
||||
|
||||
# Should be registered
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) >= 1
|
||||
335
lib/crewai/tests/hooks/test_decorators.py
Normal file
335
lib/crewai/tests/hooks/test_decorators.py
Normal file
@@ -0,0 +1,335 @@
|
||||
"""Tests for decorator-based hook registration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.hooks import (
|
||||
after_llm_call,
|
||||
after_tool_call,
|
||||
before_llm_call,
|
||||
before_tool_call,
|
||||
get_after_llm_call_hooks,
|
||||
get_after_tool_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
from crewai.hooks.tool_hooks import ToolCallHookContext
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_hooks():
|
||||
"""Clear global hooks before and after each test."""
|
||||
from crewai.hooks import llm_hooks, tool_hooks
|
||||
|
||||
# Store original hooks
|
||||
original_before_llm = llm_hooks._before_llm_call_hooks.copy()
|
||||
original_after_llm = llm_hooks._after_llm_call_hooks.copy()
|
||||
original_before_tool = tool_hooks._before_tool_call_hooks.copy()
|
||||
original_after_tool = tool_hooks._after_tool_call_hooks.copy()
|
||||
|
||||
# Clear hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
llm_hooks._after_llm_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
tool_hooks._after_tool_call_hooks.clear()
|
||||
|
||||
yield
|
||||
|
||||
# Restore original hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
llm_hooks._after_llm_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
tool_hooks._after_tool_call_hooks.clear()
|
||||
llm_hooks._before_llm_call_hooks.extend(original_before_llm)
|
||||
llm_hooks._after_llm_call_hooks.extend(original_after_llm)
|
||||
tool_hooks._before_tool_call_hooks.extend(original_before_tool)
|
||||
tool_hooks._after_tool_call_hooks.extend(original_after_tool)
|
||||
|
||||
|
||||
class TestLLMHookDecorators:
|
||||
"""Test LLM hook decorators."""
|
||||
|
||||
def test_before_llm_call_decorator_registers_hook(self):
|
||||
"""Test that @before_llm_call decorator registers the hook."""
|
||||
|
||||
@before_llm_call
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
def test_after_llm_call_decorator_registers_hook(self):
|
||||
"""Test that @after_llm_call decorator registers the hook."""
|
||||
|
||||
@after_llm_call
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
hooks = get_after_llm_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
def test_decorated_hook_executes_correctly(self):
|
||||
"""Test that decorated hook executes and modifies behavior."""
|
||||
execution_log = []
|
||||
|
||||
@before_llm_call
|
||||
def test_hook(context):
|
||||
execution_log.append("executed")
|
||||
|
||||
# Create mock context
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute the hook
|
||||
hooks = get_before_llm_call_hooks()
|
||||
hooks[0](context)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "executed"
|
||||
|
||||
def test_before_llm_call_with_agent_filter(self):
|
||||
"""Test that agent filter works correctly."""
|
||||
execution_log = []
|
||||
|
||||
@before_llm_call(agents=["Researcher"])
|
||||
def filtered_hook(context):
|
||||
execution_log.append(context.agent.role)
|
||||
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
# Test with matching agent
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Researcher")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
hooks[0](context)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "Researcher"
|
||||
|
||||
# Test with non-matching agent
|
||||
mock_executor.agent.role = "Analyst"
|
||||
context2 = LLMCallHookContext(executor=mock_executor)
|
||||
hooks[0](context2)
|
||||
|
||||
# Should still be 1 (hook didn't execute)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
|
||||
class TestToolHookDecorators:
|
||||
"""Test tool hook decorators."""
|
||||
|
||||
def test_before_tool_call_decorator_registers_hook(self):
|
||||
"""Test that @before_tool_call decorator registers the hook."""
|
||||
|
||||
@before_tool_call
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
def test_after_tool_call_decorator_registers_hook(self):
|
||||
"""Test that @after_tool_call decorator registers the hook."""
|
||||
|
||||
@after_tool_call
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
hooks = get_after_tool_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
def test_before_tool_call_with_tool_filter(self):
|
||||
"""Test that tool filter works correctly."""
|
||||
execution_log = []
|
||||
|
||||
@before_tool_call(tools=["delete_file", "execute_code"])
|
||||
def filtered_hook(context):
|
||||
execution_log.append(context.tool_name)
|
||||
return None
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
# Test with matching tool
|
||||
mock_tool = Mock()
|
||||
context = ToolCallHookContext(
|
||||
tool_name="delete_file",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
hooks[0](context)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "delete_file"
|
||||
|
||||
# Test with non-matching tool
|
||||
context2 = ToolCallHookContext(
|
||||
tool_name="read_file",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
hooks[0](context2)
|
||||
|
||||
# Should still be 1 (hook didn't execute for read_file)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
def test_before_tool_call_with_combined_filters(self):
|
||||
"""Test that combined tool and agent filters work."""
|
||||
execution_log = []
|
||||
|
||||
@before_tool_call(tools=["write_file"], agents=["Developer"])
|
||||
def filtered_hook(context):
|
||||
execution_log.append(f"{context.tool_name}-{context.agent.role}")
|
||||
return None
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
mock_tool = Mock()
|
||||
mock_agent = Mock(role="Developer")
|
||||
|
||||
# Test with both matching
|
||||
context = ToolCallHookContext(
|
||||
tool_name="write_file",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
agent=mock_agent,
|
||||
)
|
||||
hooks[0](context)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "write_file-Developer"
|
||||
|
||||
# Test with tool matching but agent not
|
||||
mock_agent.role = "Researcher"
|
||||
context2 = ToolCallHookContext(
|
||||
tool_name="write_file",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
agent=mock_agent,
|
||||
)
|
||||
hooks[0](context2)
|
||||
|
||||
# Should still be 1 (hook didn't execute)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
def test_after_tool_call_with_filter(self):
|
||||
"""Test that after_tool_call decorator with filter works."""
|
||||
|
||||
@after_tool_call(tools=["web_search"])
|
||||
def filtered_hook(context):
|
||||
if context.tool_result:
|
||||
return context.tool_result.upper()
|
||||
return None
|
||||
|
||||
hooks = get_after_tool_call_hooks()
|
||||
mock_tool = Mock()
|
||||
|
||||
# Test with matching tool
|
||||
context = ToolCallHookContext(
|
||||
tool_name="web_search",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
tool_result="result",
|
||||
)
|
||||
result = hooks[0](context)
|
||||
|
||||
assert result == "RESULT"
|
||||
|
||||
# Test with non-matching tool
|
||||
context2 = ToolCallHookContext(
|
||||
tool_name="other_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
tool_result="result",
|
||||
)
|
||||
result2 = hooks[0](context2)
|
||||
|
||||
assert result2 is None # Hook didn't run, returns None
|
||||
|
||||
|
||||
class TestDecoratorAttributes:
|
||||
"""Test that decorators set proper attributes on functions."""
|
||||
|
||||
def test_before_llm_call_sets_attribute(self):
|
||||
"""Test that decorator sets is_before_llm_call_hook attribute."""
|
||||
|
||||
@before_llm_call
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
assert hasattr(test_hook, "is_before_llm_call_hook")
|
||||
assert test_hook.is_before_llm_call_hook is True
|
||||
|
||||
def test_before_tool_call_sets_attributes_with_filters(self):
|
||||
"""Test that decorator with filters sets filter attributes."""
|
||||
|
||||
@before_tool_call(tools=["delete_file"], agents=["Dev"])
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
assert hasattr(test_hook, "is_before_tool_call_hook")
|
||||
assert test_hook.is_before_tool_call_hook is True
|
||||
assert hasattr(test_hook, "_filter_tools")
|
||||
assert test_hook._filter_tools == ["delete_file"]
|
||||
assert hasattr(test_hook, "_filter_agents")
|
||||
assert test_hook._filter_agents == ["Dev"]
|
||||
|
||||
|
||||
class TestMultipleDecorators:
|
||||
"""Test using multiple decorators together."""
|
||||
|
||||
def test_multiple_decorators_all_register(self):
|
||||
"""Test that multiple decorated functions all register."""
|
||||
|
||||
@before_llm_call
|
||||
def hook1(context):
|
||||
pass
|
||||
|
||||
@before_llm_call
|
||||
def hook2(context):
|
||||
pass
|
||||
|
||||
@after_llm_call
|
||||
def hook3(context):
|
||||
return None
|
||||
|
||||
before_hooks = get_before_llm_call_hooks()
|
||||
after_hooks = get_after_llm_call_hooks()
|
||||
|
||||
assert len(before_hooks) == 2
|
||||
assert len(after_hooks) == 1
|
||||
|
||||
def test_decorator_and_manual_registration_work_together(self):
|
||||
"""Test that decorators and manual registration can be mixed."""
|
||||
from crewai.hooks import register_before_tool_call_hook
|
||||
|
||||
@before_tool_call
|
||||
def decorated_hook(context):
|
||||
return None
|
||||
|
||||
def manual_hook(context):
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(manual_hook)
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
395
lib/crewai/tests/hooks/test_human_approval.py
Normal file
395
lib/crewai/tests/hooks/test_human_approval.py
Normal file
@@ -0,0 +1,395 @@
|
||||
"""Tests for human approval functionality in hooks."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
from crewai.hooks.tool_hooks import ToolCallHookContext
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_executor():
|
||||
"""Create a mock executor for LLM hook context."""
|
||||
executor = Mock()
|
||||
executor.messages = [{"role": "system", "content": "Test message"}]
|
||||
executor.agent = Mock(role="Test Agent")
|
||||
executor.task = Mock(description="Test Task")
|
||||
executor.crew = Mock()
|
||||
executor.llm = Mock()
|
||||
executor.iterations = 0
|
||||
return executor
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_tool():
|
||||
"""Create a mock tool for tool hook context."""
|
||||
tool = Mock()
|
||||
tool.name = "test_tool"
|
||||
tool.description = "Test tool description"
|
||||
return tool
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_agent():
|
||||
"""Create a mock agent."""
|
||||
agent = Mock()
|
||||
agent.role = "Test Agent"
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task():
|
||||
"""Create a mock task."""
|
||||
task = Mock()
|
||||
task.description = "Test task"
|
||||
return task
|
||||
|
||||
|
||||
class TestLLMHookHumanInput:
|
||||
"""Test request_human_input() on LLMCallHookContext."""
|
||||
|
||||
@patch("builtins.input", return_value="test response")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_returns_user_response(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that request_human_input returns the user's input."""
|
||||
# Setup mock formatter
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
response = context.request_human_input(
|
||||
prompt="Test prompt", default_message="Test default message"
|
||||
)
|
||||
|
||||
assert response == "test response"
|
||||
mock_input.assert_called_once()
|
||||
|
||||
@patch("builtins.input", return_value="")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_returns_empty_string_on_enter(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that pressing Enter returns empty string."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
response = context.request_human_input(prompt="Test")
|
||||
|
||||
assert response == ""
|
||||
mock_input.assert_called_once()
|
||||
|
||||
@patch("builtins.input", return_value="test")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_pauses_and_resumes_live_updates(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that live updates are paused and resumed."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
context.request_human_input(prompt="Test")
|
||||
|
||||
# Verify pause was called
|
||||
mock_formatter.pause_live_updates.assert_called_once()
|
||||
|
||||
# Verify resume was called
|
||||
mock_formatter.resume_live_updates.assert_called_once()
|
||||
|
||||
@patch("builtins.input", side_effect=Exception("Input error"))
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_resumes_on_exception(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that live updates are resumed even if input raises exception."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
with pytest.raises(Exception, match="Input error"):
|
||||
context.request_human_input(prompt="Test")
|
||||
|
||||
# Verify resume was still called (in finally block)
|
||||
mock_formatter.resume_live_updates.assert_called_once()
|
||||
|
||||
@patch("builtins.input", return_value=" test response ")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_strips_whitespace(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that user input is stripped of leading/trailing whitespace."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
response = context.request_human_input(prompt="Test")
|
||||
|
||||
assert response == "test response" # Whitespace stripped
|
||||
|
||||
|
||||
class TestToolHookHumanInput:
|
||||
"""Test request_human_input() on ToolCallHookContext."""
|
||||
|
||||
@patch("builtins.input", return_value="approve")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_request_human_input_returns_user_response(
|
||||
self, mock_event_listener, mock_input, mock_tool, mock_agent, mock_task
|
||||
):
|
||||
"""Test that request_human_input returns the user's input."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={"arg": "value"},
|
||||
tool=mock_tool,
|
||||
agent=mock_agent,
|
||||
task=mock_task,
|
||||
)
|
||||
|
||||
response = context.request_human_input(
|
||||
prompt="Approve this tool?", default_message="Type 'approve':"
|
||||
)
|
||||
|
||||
assert response == "approve"
|
||||
mock_input.assert_called_once()
|
||||
|
||||
@patch("builtins.input", return_value="")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_request_human_input_handles_empty_input(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that empty input (Enter key) is handled correctly."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
response = context.request_human_input(prompt="Test")
|
||||
|
||||
assert response == ""
|
||||
|
||||
@patch("builtins.input", return_value="test")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_request_human_input_pauses_and_resumes(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that live updates are properly paused and resumed."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
context.request_human_input(prompt="Test")
|
||||
|
||||
mock_formatter.pause_live_updates.assert_called_once()
|
||||
mock_formatter.resume_live_updates.assert_called_once()
|
||||
|
||||
@patch("builtins.input", side_effect=KeyboardInterrupt)
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_request_human_input_resumes_on_keyboard_interrupt(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that live updates are resumed even on keyboard interrupt."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
context.request_human_input(prompt="Test")
|
||||
|
||||
# Verify resume was still called (in finally block)
|
||||
mock_formatter.resume_live_updates.assert_called_once()
|
||||
|
||||
|
||||
class TestApprovalHookIntegration:
|
||||
"""Test integration scenarios with approval hooks."""
|
||||
|
||||
@patch("builtins.input", return_value="approve")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_approval_hook_allows_execution(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that approval hook allows execution when approved."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
def approval_hook(context: ToolCallHookContext) -> bool | None:
|
||||
response = context.request_human_input(
|
||||
prompt="Approve?", default_message="Type 'approve':"
|
||||
)
|
||||
return None if response == "approve" else False
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
result = approval_hook(context)
|
||||
|
||||
assert result is None # Allowed
|
||||
assert mock_input.called
|
||||
|
||||
@patch("builtins.input", return_value="deny")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_approval_hook_blocks_execution(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that approval hook blocks execution when denied."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
def approval_hook(context: ToolCallHookContext) -> bool | None:
|
||||
response = context.request_human_input(
|
||||
prompt="Approve?", default_message="Type 'approve':"
|
||||
)
|
||||
return None if response == "approve" else False
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
result = approval_hook(context)
|
||||
|
||||
assert result is False # Blocked
|
||||
assert mock_input.called
|
||||
|
||||
@patch("builtins.input", return_value="modified result")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_review_hook_modifies_result(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that review hook can modify tool results."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
def review_hook(context: ToolCallHookContext) -> str | None:
|
||||
response = context.request_human_input(
|
||||
prompt="Review result",
|
||||
default_message="Press Enter to keep, or provide modified version:",
|
||||
)
|
||||
return response if response else None
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
tool_result="original result",
|
||||
)
|
||||
|
||||
modified_result = review_hook(context)
|
||||
|
||||
assert modified_result == "modified result"
|
||||
assert mock_input.called
|
||||
|
||||
@patch("builtins.input", return_value="")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_review_hook_keeps_original_on_enter(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that pressing Enter keeps original result."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
def review_hook(context: ToolCallHookContext) -> str | None:
|
||||
response = context.request_human_input(
|
||||
prompt="Review result", default_message="Press Enter to keep:"
|
||||
)
|
||||
return response if response else None
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
tool_result="original result",
|
||||
)
|
||||
|
||||
modified_result = review_hook(context)
|
||||
|
||||
assert modified_result is None # Keep original
|
||||
|
||||
|
||||
class TestCostControlApproval:
|
||||
"""Test cost control approval hook scenarios."""
|
||||
|
||||
@patch("builtins.input", return_value="yes")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_cost_control_allows_when_approved(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that expensive calls are allowed when approved."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
# Set high iteration count
|
||||
mock_executor.iterations = 10
|
||||
|
||||
def cost_control_hook(context: LLMCallHookContext) -> None:
|
||||
if context.iterations > 5:
|
||||
response = context.request_human_input(
|
||||
prompt=f"Iteration {context.iterations} - expensive call",
|
||||
default_message="Type 'yes' to continue:",
|
||||
)
|
||||
if response.lower() != "yes":
|
||||
print("Call blocked")
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Should not raise exception and should call input
|
||||
cost_control_hook(context)
|
||||
assert mock_input.called
|
||||
|
||||
@patch("builtins.input", return_value="no")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_cost_control_logs_when_denied(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that denied calls are logged."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
mock_executor.iterations = 10
|
||||
|
||||
messages_logged = []
|
||||
|
||||
def cost_control_hook(context: LLMCallHookContext) -> None:
|
||||
if context.iterations > 5:
|
||||
response = context.request_human_input(
|
||||
prompt=f"Iteration {context.iterations}",
|
||||
default_message="Type 'yes' to continue:",
|
||||
)
|
||||
if response.lower() != "yes":
|
||||
messages_logged.append("blocked")
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
cost_control_hook(context)
|
||||
|
||||
assert len(messages_logged) == 1
|
||||
assert messages_logged[0] == "blocked"
|
||||
311
lib/crewai/tests/hooks/test_llm_hooks.py
Normal file
311
lib/crewai/tests/hooks/test_llm_hooks.py
Normal file
@@ -0,0 +1,311 @@
|
||||
"""Unit tests for LLM hooks functionality."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.hooks import clear_all_llm_call_hooks, unregister_after_llm_call_hook, unregister_before_llm_call_hook
|
||||
import pytest
|
||||
|
||||
from crewai.hooks.llm_hooks import (
|
||||
LLMCallHookContext,
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
register_after_llm_call_hook,
|
||||
register_before_llm_call_hook,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_executor():
|
||||
"""Create a mock executor for testing."""
|
||||
executor = Mock()
|
||||
executor.messages = [{"role": "system", "content": "Test message"}]
|
||||
executor.agent = Mock(role="Test Agent")
|
||||
executor.task = Mock(description="Test Task")
|
||||
executor.crew = Mock()
|
||||
executor.llm = Mock()
|
||||
executor.iterations = 0
|
||||
return executor
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_hooks():
|
||||
"""Clear global hooks before and after each test."""
|
||||
# Import the private variables to clear them
|
||||
from crewai.hooks import llm_hooks
|
||||
|
||||
# Store original hooks
|
||||
original_before = llm_hooks._before_llm_call_hooks.copy()
|
||||
original_after = llm_hooks._after_llm_call_hooks.copy()
|
||||
|
||||
# Clear hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
llm_hooks._after_llm_call_hooks.clear()
|
||||
|
||||
yield
|
||||
|
||||
# Restore original hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
llm_hooks._after_llm_call_hooks.clear()
|
||||
llm_hooks._before_llm_call_hooks.extend(original_before)
|
||||
llm_hooks._after_llm_call_hooks.extend(original_after)
|
||||
|
||||
|
||||
class TestLLMCallHookContext:
|
||||
"""Test LLMCallHookContext initialization and attributes."""
|
||||
|
||||
def test_context_initialization(self, mock_executor):
|
||||
"""Test that context is initialized correctly with executor."""
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
assert context.executor == mock_executor
|
||||
assert context.messages == mock_executor.messages
|
||||
assert context.agent == mock_executor.agent
|
||||
assert context.task == mock_executor.task
|
||||
assert context.crew == mock_executor.crew
|
||||
assert context.llm == mock_executor.llm
|
||||
assert context.iterations == mock_executor.iterations
|
||||
assert context.response is None
|
||||
|
||||
def test_context_with_response(self, mock_executor):
|
||||
"""Test that context includes response when provided."""
|
||||
test_response = "Test LLM response"
|
||||
context = LLMCallHookContext(executor=mock_executor, response=test_response)
|
||||
|
||||
assert context.response == test_response
|
||||
|
||||
def test_messages_are_mutable_reference(self, mock_executor):
|
||||
"""Test that modifying context.messages modifies executor.messages."""
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Add a message through context
|
||||
new_message = {"role": "user", "content": "New message"}
|
||||
context.messages.append(new_message)
|
||||
|
||||
# Check that executor.messages is also modified
|
||||
assert new_message in mock_executor.messages
|
||||
assert len(mock_executor.messages) == 2
|
||||
|
||||
|
||||
class TestBeforeLLMCallHooks:
|
||||
"""Test before_llm_call hook registration and execution."""
|
||||
|
||||
def test_register_before_hook(self):
|
||||
"""Test that before hooks are registered correctly."""
|
||||
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(test_hook)
|
||||
hooks = get_before_llm_call_hooks()
|
||||
|
||||
assert len(hooks) == 1
|
||||
assert hooks[0] == test_hook
|
||||
|
||||
def test_multiple_before_hooks(self):
|
||||
"""Test that multiple before hooks can be registered."""
|
||||
|
||||
def hook1(context):
|
||||
pass
|
||||
|
||||
def hook2(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(hook1)
|
||||
register_before_llm_call_hook(hook2)
|
||||
hooks = get_before_llm_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
assert hook1 in hooks
|
||||
assert hook2 in hooks
|
||||
|
||||
def test_before_hook_can_modify_messages(self, mock_executor):
|
||||
"""Test that before hooks can modify messages in-place."""
|
||||
|
||||
def add_message_hook(context):
|
||||
context.messages.append({"role": "system", "content": "Added by hook"})
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
add_message_hook(context)
|
||||
|
||||
assert len(context.messages) == 2
|
||||
assert context.messages[1]["content"] == "Added by hook"
|
||||
|
||||
def test_get_before_hooks_returns_copy(self):
|
||||
"""Test that get_before_llm_call_hooks returns a copy."""
|
||||
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(test_hook)
|
||||
hooks1 = get_before_llm_call_hooks()
|
||||
hooks2 = get_before_llm_call_hooks()
|
||||
|
||||
# They should be equal but not the same object
|
||||
assert hooks1 == hooks2
|
||||
assert hooks1 is not hooks2
|
||||
|
||||
|
||||
class TestAfterLLMCallHooks:
|
||||
"""Test after_llm_call hook registration and execution."""
|
||||
|
||||
def test_register_after_hook(self):
|
||||
"""Test that after hooks are registered correctly."""
|
||||
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(test_hook)
|
||||
hooks = get_after_llm_call_hooks()
|
||||
|
||||
assert len(hooks) == 1
|
||||
assert hooks[0] == test_hook
|
||||
|
||||
def test_multiple_after_hooks(self):
|
||||
"""Test that multiple after hooks can be registered."""
|
||||
|
||||
def hook1(context):
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(hook1)
|
||||
register_after_llm_call_hook(hook2)
|
||||
hooks = get_after_llm_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
assert hook1 in hooks
|
||||
assert hook2 in hooks
|
||||
|
||||
def test_after_hook_can_modify_response(self, mock_executor):
|
||||
"""Test that after hooks can modify the response."""
|
||||
original_response = "Original response"
|
||||
|
||||
def modify_response_hook(context):
|
||||
if context.response:
|
||||
return context.response.replace("Original", "Modified")
|
||||
return None
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor, response=original_response)
|
||||
modified = modify_response_hook(context)
|
||||
|
||||
assert modified == "Modified response"
|
||||
|
||||
def test_after_hook_returns_none_keeps_original(self, mock_executor):
|
||||
"""Test that returning None keeps the original response."""
|
||||
original_response = "Original response"
|
||||
|
||||
def no_change_hook(context):
|
||||
return None
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor, response=original_response)
|
||||
result = no_change_hook(context)
|
||||
|
||||
assert result is None
|
||||
assert context.response == original_response
|
||||
|
||||
def test_get_after_hooks_returns_copy(self):
|
||||
"""Test that get_after_llm_call_hooks returns a copy."""
|
||||
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(test_hook)
|
||||
hooks1 = get_after_llm_call_hooks()
|
||||
hooks2 = get_after_llm_call_hooks()
|
||||
|
||||
# They should be equal but not the same object
|
||||
assert hooks1 == hooks2
|
||||
assert hooks1 is not hooks2
|
||||
|
||||
|
||||
class TestLLMHooksIntegration:
|
||||
"""Test integration scenarios with multiple hooks."""
|
||||
|
||||
def test_multiple_before_hooks_execute_in_order(self, mock_executor):
|
||||
"""Test that multiple before hooks execute in registration order."""
|
||||
execution_order = []
|
||||
|
||||
def hook1(context):
|
||||
execution_order.append(1)
|
||||
|
||||
def hook2(context):
|
||||
execution_order.append(2)
|
||||
|
||||
def hook3(context):
|
||||
execution_order.append(3)
|
||||
|
||||
register_before_llm_call_hook(hook1)
|
||||
register_before_llm_call_hook(hook2)
|
||||
register_before_llm_call_hook(hook3)
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
hooks = get_before_llm_call_hooks()
|
||||
|
||||
for hook in hooks:
|
||||
hook(context)
|
||||
|
||||
assert execution_order == [1, 2, 3]
|
||||
|
||||
def test_multiple_after_hooks_chain_modifications(self, mock_executor):
|
||||
"""Test that multiple after hooks can chain modifications."""
|
||||
|
||||
def hook1(context):
|
||||
if context.response:
|
||||
return context.response + " [hook1]"
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
if context.response:
|
||||
return context.response + " [hook2]"
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(hook1)
|
||||
register_after_llm_call_hook(hook2)
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor, response="Original")
|
||||
hooks = get_after_llm_call_hooks()
|
||||
|
||||
# Simulate chaining (how it would be used in practice)
|
||||
result = context.response
|
||||
for hook in hooks:
|
||||
# Update context for next hook
|
||||
context.response = result
|
||||
modified = hook(context)
|
||||
if modified is not None:
|
||||
result = modified
|
||||
|
||||
assert result == "Original [hook1] [hook2]"
|
||||
|
||||
def test_unregister_before_hook(self):
|
||||
"""Test that before hooks can be unregistered."""
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(test_hook)
|
||||
unregister_before_llm_call_hook(test_hook)
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
|
||||
def test_unregister_after_hook(self):
|
||||
"""Test that after hooks can be unregistered."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(test_hook)
|
||||
unregister_after_llm_call_hook(test_hook)
|
||||
hooks = get_after_llm_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
|
||||
def test_clear_all_llm_call_hooks(self):
|
||||
"""Test that all llm call hooks can be cleared."""
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(test_hook)
|
||||
register_after_llm_call_hook(test_hook)
|
||||
clear_all_llm_call_hooks()
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
498
lib/crewai/tests/hooks/test_tool_hooks.py
Normal file
498
lib/crewai/tests/hooks/test_tool_hooks.py
Normal file
@@ -0,0 +1,498 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.hooks import clear_all_tool_call_hooks, unregister_after_tool_call_hook, unregister_before_tool_call_hook
|
||||
import pytest
|
||||
|
||||
from crewai.hooks.tool_hooks import (
|
||||
ToolCallHookContext,
|
||||
get_after_tool_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
register_after_tool_call_hook,
|
||||
register_before_tool_call_hook,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_tool():
|
||||
"""Create a mock tool for testing."""
|
||||
tool = Mock()
|
||||
tool.name = "test_tool"
|
||||
tool.description = "Test tool description"
|
||||
return tool
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_agent():
|
||||
"""Create a mock agent for testing."""
|
||||
agent = Mock()
|
||||
agent.role = "Test Agent"
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task():
|
||||
"""Create a mock task for testing."""
|
||||
task = Mock()
|
||||
task.description = "Test task"
|
||||
return task
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_crew():
|
||||
"""Create a mock crew for testing."""
|
||||
crew = Mock()
|
||||
return crew
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_hooks():
|
||||
"""Clear global hooks before and after each test."""
|
||||
from crewai.hooks import tool_hooks
|
||||
|
||||
# Store original hooks
|
||||
original_before = tool_hooks._before_tool_call_hooks.copy()
|
||||
original_after = tool_hooks._after_tool_call_hooks.copy()
|
||||
|
||||
# Clear hooks
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
tool_hooks._after_tool_call_hooks.clear()
|
||||
|
||||
yield
|
||||
|
||||
# Restore original hooks
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
tool_hooks._after_tool_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.extend(original_before)
|
||||
tool_hooks._after_tool_call_hooks.extend(original_after)
|
||||
|
||||
|
||||
class TestToolCallHookContext:
|
||||
"""Test ToolCallHookContext initialization and attributes."""
|
||||
|
||||
def test_context_initialization(self, mock_tool, mock_agent, mock_task, mock_crew):
|
||||
"""Test that context is initialized correctly."""
|
||||
tool_input = {"arg1": "value1", "arg2": "value2"}
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
agent=mock_agent,
|
||||
task=mock_task,
|
||||
crew=mock_crew,
|
||||
)
|
||||
|
||||
assert context.tool_name == "test_tool"
|
||||
assert context.tool_input == tool_input
|
||||
assert context.tool == mock_tool
|
||||
assert context.agent == mock_agent
|
||||
assert context.task == mock_task
|
||||
assert context.crew == mock_crew
|
||||
assert context.tool_result is None
|
||||
|
||||
def test_context_with_result(self, mock_tool):
|
||||
"""Test that context includes result when provided."""
|
||||
tool_input = {"arg1": "value1"}
|
||||
tool_result = "Test tool result"
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
tool_result=tool_result,
|
||||
)
|
||||
|
||||
assert context.tool_result == tool_result
|
||||
|
||||
def test_tool_input_is_mutable_reference(self, mock_tool):
|
||||
"""Test that modifying context.tool_input modifies the original dict."""
|
||||
tool_input = {"arg1": "value1"}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
# Modify through context
|
||||
context.tool_input["arg2"] = "value2"
|
||||
|
||||
# Check that original dict is also modified
|
||||
assert "arg2" in tool_input
|
||||
assert tool_input["arg2"] == "value2"
|
||||
|
||||
|
||||
class TestBeforeToolCallHooks:
|
||||
"""Test before_tool_call hook registration and execution."""
|
||||
|
||||
def test_register_before_hook(self):
|
||||
"""Test that before hooks are registered correctly."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(test_hook)
|
||||
hooks = get_before_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 1
|
||||
assert hooks[0] == test_hook
|
||||
|
||||
def test_multiple_before_hooks(self):
|
||||
"""Test that multiple before hooks can be registered."""
|
||||
def hook1(context):
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(hook1)
|
||||
register_before_tool_call_hook(hook2)
|
||||
hooks = get_before_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
assert hook1 in hooks
|
||||
assert hook2 in hooks
|
||||
|
||||
def test_before_hook_can_block_execution(self, mock_tool):
|
||||
"""Test that before hooks can block tool execution."""
|
||||
def block_hook(context):
|
||||
if context.tool_name == "dangerous_tool":
|
||||
return False # Block execution
|
||||
return None # Allow execution
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="dangerous_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
result = block_hook(context)
|
||||
assert result is False
|
||||
|
||||
def test_before_hook_can_allow_execution(self, mock_tool):
|
||||
"""Test that before hooks can explicitly allow execution."""
|
||||
def allow_hook(context):
|
||||
return None # Allow execution
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="safe_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
result = allow_hook(context)
|
||||
assert result is None
|
||||
|
||||
def test_before_hook_can_modify_input(self, mock_tool):
|
||||
"""Test that before hooks can modify tool input in-place."""
|
||||
def modify_input_hook(context):
|
||||
context.tool_input["modified_by_hook"] = True
|
||||
return None
|
||||
|
||||
tool_input = {"arg1": "value1"}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
modify_input_hook(context)
|
||||
|
||||
assert "modified_by_hook" in context.tool_input
|
||||
assert context.tool_input["modified_by_hook"] is True
|
||||
|
||||
def test_get_before_hooks_returns_copy(self):
|
||||
"""Test that get_before_tool_call_hooks returns a copy."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(test_hook)
|
||||
hooks1 = get_before_tool_call_hooks()
|
||||
hooks2 = get_before_tool_call_hooks()
|
||||
|
||||
# They should be equal but not the same object
|
||||
assert hooks1 == hooks2
|
||||
assert hooks1 is not hooks2
|
||||
|
||||
|
||||
class TestAfterToolCallHooks:
|
||||
"""Test after_tool_call hook registration and execution."""
|
||||
|
||||
def test_register_after_hook(self):
|
||||
"""Test that after hooks are registered correctly."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(test_hook)
|
||||
hooks = get_after_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 1
|
||||
assert hooks[0] == test_hook
|
||||
|
||||
def test_multiple_after_hooks(self):
|
||||
"""Test that multiple after hooks can be registered."""
|
||||
def hook1(context):
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(hook1)
|
||||
register_after_tool_call_hook(hook2)
|
||||
hooks = get_after_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
assert hook1 in hooks
|
||||
assert hook2 in hooks
|
||||
|
||||
def test_after_hook_can_modify_result(self, mock_tool):
|
||||
"""Test that after hooks can modify the tool result."""
|
||||
original_result = "Original result"
|
||||
|
||||
def modify_result_hook(context):
|
||||
if context.tool_result:
|
||||
return context.tool_result.replace("Original", "Modified")
|
||||
return None
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
tool_result=original_result,
|
||||
)
|
||||
|
||||
modified = modify_result_hook(context)
|
||||
assert modified == "Modified result"
|
||||
|
||||
def test_after_hook_returns_none_keeps_original(self, mock_tool):
|
||||
"""Test that returning None keeps the original result."""
|
||||
original_result = "Original result"
|
||||
|
||||
def no_change_hook(context):
|
||||
return None
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
tool_result=original_result,
|
||||
)
|
||||
|
||||
result = no_change_hook(context)
|
||||
|
||||
assert result is None
|
||||
assert context.tool_result == original_result
|
||||
|
||||
def test_get_after_hooks_returns_copy(self):
|
||||
"""Test that get_after_tool_call_hooks returns a copy."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(test_hook)
|
||||
hooks1 = get_after_tool_call_hooks()
|
||||
hooks2 = get_after_tool_call_hooks()
|
||||
|
||||
# They should be equal but not the same object
|
||||
assert hooks1 == hooks2
|
||||
assert hooks1 is not hooks2
|
||||
|
||||
|
||||
class TestToolHooksIntegration:
|
||||
"""Test integration scenarios with multiple hooks."""
|
||||
|
||||
def test_multiple_before_hooks_execute_in_order(self, mock_tool):
|
||||
"""Test that multiple before hooks execute in registration order."""
|
||||
execution_order = []
|
||||
|
||||
def hook1(context):
|
||||
execution_order.append(1)
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
execution_order.append(2)
|
||||
return None
|
||||
|
||||
def hook3(context):
|
||||
execution_order.append(3)
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(hook1)
|
||||
register_before_tool_call_hook(hook2)
|
||||
register_before_tool_call_hook(hook3)
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
for hook in hooks:
|
||||
hook(context)
|
||||
|
||||
assert execution_order == [1, 2, 3]
|
||||
|
||||
def test_first_blocking_hook_stops_execution(self, mock_tool):
|
||||
"""Test that first hook returning False blocks execution."""
|
||||
execution_order = []
|
||||
|
||||
def hook1(context):
|
||||
execution_order.append(1)
|
||||
return None # Allow
|
||||
|
||||
def hook2(context):
|
||||
execution_order.append(2)
|
||||
return False # Block
|
||||
|
||||
def hook3(context):
|
||||
execution_order.append(3)
|
||||
return None # This shouldn't run
|
||||
|
||||
register_before_tool_call_hook(hook1)
|
||||
register_before_tool_call_hook(hook2)
|
||||
register_before_tool_call_hook(hook3)
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
blocked = False
|
||||
for hook in hooks:
|
||||
result = hook(context)
|
||||
if result is False:
|
||||
blocked = True
|
||||
break
|
||||
|
||||
assert blocked is True
|
||||
assert execution_order == [1, 2] # hook3 didn't run
|
||||
|
||||
def test_multiple_after_hooks_chain_modifications(self, mock_tool):
|
||||
"""Test that multiple after hooks can chain modifications."""
|
||||
def hook1(context):
|
||||
if context.tool_result:
|
||||
return context.tool_result + " [hook1]"
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
if context.tool_result:
|
||||
return context.tool_result + " [hook2]"
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(hook1)
|
||||
register_after_tool_call_hook(hook2)
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
tool_result="Original",
|
||||
)
|
||||
|
||||
hooks = get_after_tool_call_hooks()
|
||||
|
||||
# Simulate chaining (how it would be used in practice)
|
||||
result = context.tool_result
|
||||
for hook in hooks:
|
||||
# Update context for next hook
|
||||
context.tool_result = result
|
||||
modified = hook(context)
|
||||
if modified is not None:
|
||||
result = modified
|
||||
|
||||
assert result == "Original [hook1] [hook2]"
|
||||
|
||||
def test_hooks_with_validation_and_sanitization(self, mock_tool):
|
||||
"""Test a realistic scenario with validation and sanitization hooks."""
|
||||
# Validation hook (before)
|
||||
def validate_file_path(context):
|
||||
if context.tool_name == "write_file":
|
||||
file_path = context.tool_input.get("file_path", "")
|
||||
if ".env" in file_path:
|
||||
return False # Block sensitive files
|
||||
return None
|
||||
|
||||
# Sanitization hook (after)
|
||||
def sanitize_secrets(context):
|
||||
if context.tool_result and "SECRET_KEY" in context.tool_result:
|
||||
return context.tool_result.replace("SECRET_KEY=abc123", "SECRET_KEY=[REDACTED]")
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(validate_file_path)
|
||||
register_after_tool_call_hook(sanitize_secrets)
|
||||
|
||||
# Test blocking
|
||||
blocked_context = ToolCallHookContext(
|
||||
tool_name="write_file",
|
||||
tool_input={"file_path": ".env"},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
before_hooks = get_before_tool_call_hooks()
|
||||
blocked = False
|
||||
for hook in before_hooks:
|
||||
if hook(blocked_context) is False:
|
||||
blocked = True
|
||||
break
|
||||
|
||||
assert blocked is True
|
||||
|
||||
# Test sanitization
|
||||
sanitize_context = ToolCallHookContext(
|
||||
tool_name="read_file",
|
||||
tool_input={"file_path": "config.txt"},
|
||||
tool=mock_tool,
|
||||
tool_result="Content: SECRET_KEY=abc123",
|
||||
)
|
||||
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
result = sanitize_context.tool_result
|
||||
for hook in after_hooks:
|
||||
sanitize_context.tool_result = result
|
||||
modified = hook(sanitize_context)
|
||||
if modified is not None:
|
||||
result = modified
|
||||
|
||||
assert "SECRET_KEY=[REDACTED]" in result
|
||||
assert "abc123" not in result
|
||||
|
||||
|
||||
def test_unregister_before_hook(self):
|
||||
"""Test that before hooks can be unregistered."""
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_tool_call_hook(test_hook)
|
||||
unregister_before_tool_call_hook(test_hook)
|
||||
hooks = get_before_tool_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
|
||||
def test_unregister_after_hook(self):
|
||||
"""Test that after hooks can be unregistered."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(test_hook)
|
||||
unregister_after_tool_call_hook(test_hook)
|
||||
hooks = get_after_tool_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
|
||||
def test_clear_all_tool_call_hooks(self):
|
||||
"""Test that all tool call hooks can be cleared."""
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_tool_call_hook(test_hook)
|
||||
register_after_tool_call_hook(test_hook)
|
||||
clear_all_tool_call_hooks()
|
||||
hooks = get_before_tool_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
Reference in New Issue
Block a user