mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 08:38:30 +00:00
Merge branch 'main' into gl/chore/use-base-model-for-llms
This commit is contained in:
@@ -38,6 +38,7 @@ class A2AConfig(BaseModel):
|
||||
max_turns: Maximum conversation turns with A2A agent (default: 10).
|
||||
response_model: Optional Pydantic model for structured A2A agent responses.
|
||||
fail_fast: If True, raise error when agent unreachable; if False, skip and continue (default: True).
|
||||
trust_remote_completion_status: If True, return A2A agent's result directly when status is "completed"; if False, always ask server agent to respond (default: False).
|
||||
"""
|
||||
|
||||
endpoint: Url = Field(description="A2A agent endpoint URL")
|
||||
@@ -57,3 +58,7 @@ class A2AConfig(BaseModel):
|
||||
default=True,
|
||||
description="If True, raise an error immediately when the A2A agent is unreachable. If False, skip the A2A agent and continue execution.",
|
||||
)
|
||||
trust_remote_completion_status: bool = Field(
|
||||
default=False,
|
||||
description='If True, return the A2A agent\'s result directly when status is "completed" without asking the server agent to respond. If False, always ask the server agent to respond, allowing it to potentially delegate again.',
|
||||
)
|
||||
|
||||
@@ -52,7 +52,7 @@ def wrap_agent_with_a2a_instance(agent: Agent) -> None:
|
||||
Args:
|
||||
agent: The agent instance to wrap
|
||||
"""
|
||||
original_execute_task = agent.execute_task.__func__
|
||||
original_execute_task = agent.execute_task.__func__ # type: ignore[attr-defined]
|
||||
|
||||
@wraps(original_execute_task)
|
||||
def execute_task_with_a2a(
|
||||
@@ -73,7 +73,7 @@ def wrap_agent_with_a2a_instance(agent: Agent) -> None:
|
||||
Task execution result
|
||||
"""
|
||||
if not self.a2a:
|
||||
return original_execute_task(self, task, context, tools)
|
||||
return original_execute_task(self, task, context, tools) # type: ignore[no-any-return]
|
||||
|
||||
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
|
||||
|
||||
@@ -498,6 +498,23 @@ def _delegate_to_a2a(
|
||||
conversation_history = a2a_result.get("history", [])
|
||||
|
||||
if a2a_result["status"] in ["completed", "input_required"]:
|
||||
if (
|
||||
a2a_result["status"] == "completed"
|
||||
and agent_config.trust_remote_completion_status
|
||||
):
|
||||
result_text = a2a_result.get("result", "")
|
||||
final_turn_number = turn_num + 1
|
||||
crewai_event_bus.emit(
|
||||
None,
|
||||
A2AConversationCompletedEvent(
|
||||
status="completed",
|
||||
final_result=result_text,
|
||||
error=None,
|
||||
total_turns=final_turn_number,
|
||||
),
|
||||
)
|
||||
return result_text # type: ignore[no-any-return]
|
||||
|
||||
final_result, next_request = _handle_agent_response_and_continue(
|
||||
self=self,
|
||||
a2a_result=a2a_result,
|
||||
|
||||
@@ -23,6 +23,10 @@ from crewai.events.types.logging_events import (
|
||||
AgentLogsExecutionEvent,
|
||||
AgentLogsStartedEvent,
|
||||
)
|
||||
from crewai.hooks.llm_hooks import (
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
)
|
||||
from crewai.utilities.agent_utils import (
|
||||
enforce_rpm_limit,
|
||||
format_message_for_llm,
|
||||
@@ -38,10 +42,6 @@ from crewai.utilities.agent_utils import (
|
||||
)
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.i18n import I18N, get_i18n
|
||||
from crewai.utilities.llm_call_hooks import (
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
)
|
||||
from crewai.utilities.printer import Printer
|
||||
from crewai.utilities.tool_utils import execute_tool_and_check_finality
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
@@ -263,6 +263,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
task=self.task,
|
||||
agent=self.agent,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
crew=self.crew,
|
||||
)
|
||||
formatted_answer = self._handle_agent_action(
|
||||
formatted_answer, tool_result
|
||||
|
||||
108
lib/crewai/src/crewai/hooks/__init__.py
Normal file
108
lib/crewai/src/crewai/hooks/__init__.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from crewai.hooks.decorators import (
|
||||
after_llm_call,
|
||||
after_tool_call,
|
||||
before_llm_call,
|
||||
before_tool_call,
|
||||
)
|
||||
from crewai.hooks.llm_hooks import (
|
||||
LLMCallHookContext,
|
||||
clear_after_llm_call_hooks,
|
||||
clear_all_llm_call_hooks,
|
||||
clear_before_llm_call_hooks,
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
register_after_llm_call_hook,
|
||||
register_before_llm_call_hook,
|
||||
unregister_after_llm_call_hook,
|
||||
unregister_before_llm_call_hook,
|
||||
)
|
||||
from crewai.hooks.tool_hooks import (
|
||||
ToolCallHookContext,
|
||||
clear_after_tool_call_hooks,
|
||||
clear_all_tool_call_hooks,
|
||||
clear_before_tool_call_hooks,
|
||||
get_after_tool_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
register_after_tool_call_hook,
|
||||
register_before_tool_call_hook,
|
||||
unregister_after_tool_call_hook,
|
||||
unregister_before_tool_call_hook,
|
||||
)
|
||||
|
||||
|
||||
def clear_all_global_hooks() -> dict[str, tuple[int, int]]:
|
||||
"""Clear all global hooks across all hook types (LLM and Tool).
|
||||
|
||||
This is a convenience function that clears all registered hooks in one call.
|
||||
Useful for testing, resetting state, or cleaning up between different
|
||||
execution contexts.
|
||||
|
||||
Returns:
|
||||
Dictionary with counts of cleared hooks:
|
||||
{
|
||||
"llm_hooks": (before_count, after_count),
|
||||
"tool_hooks": (before_count, after_count),
|
||||
"total": (total_before_count, total_after_count)
|
||||
}
|
||||
|
||||
Example:
|
||||
>>> # Register various hooks
|
||||
>>> register_before_llm_call_hook(llm_hook1)
|
||||
>>> register_after_llm_call_hook(llm_hook2)
|
||||
>>> register_before_tool_call_hook(tool_hook1)
|
||||
>>> register_after_tool_call_hook(tool_hook2)
|
||||
>>>
|
||||
>>> # Clear all hooks at once
|
||||
>>> result = clear_all_global_hooks()
|
||||
>>> print(result)
|
||||
{
|
||||
'llm_hooks': (1, 1),
|
||||
'tool_hooks': (1, 1),
|
||||
'total': (2, 2)
|
||||
}
|
||||
"""
|
||||
llm_counts = clear_all_llm_call_hooks()
|
||||
tool_counts = clear_all_tool_call_hooks()
|
||||
|
||||
return {
|
||||
"llm_hooks": llm_counts,
|
||||
"tool_hooks": tool_counts,
|
||||
"total": (llm_counts[0] + tool_counts[0], llm_counts[1] + tool_counts[1]),
|
||||
}
|
||||
|
||||
|
||||
__all__ = [
|
||||
# Context classes
|
||||
"LLMCallHookContext",
|
||||
"ToolCallHookContext",
|
||||
# Decorators
|
||||
"after_llm_call",
|
||||
"after_tool_call",
|
||||
"before_llm_call",
|
||||
"before_tool_call",
|
||||
"clear_after_llm_call_hooks",
|
||||
"clear_after_tool_call_hooks",
|
||||
"clear_all_global_hooks",
|
||||
"clear_all_llm_call_hooks",
|
||||
"clear_all_tool_call_hooks",
|
||||
# Clear hooks
|
||||
"clear_before_llm_call_hooks",
|
||||
"clear_before_tool_call_hooks",
|
||||
"get_after_llm_call_hooks",
|
||||
"get_after_tool_call_hooks",
|
||||
# Get hooks
|
||||
"get_before_llm_call_hooks",
|
||||
"get_before_tool_call_hooks",
|
||||
"register_after_llm_call_hook",
|
||||
"register_after_tool_call_hook",
|
||||
# LLM Hook registration
|
||||
"register_before_llm_call_hook",
|
||||
# Tool Hook registration
|
||||
"register_before_tool_call_hook",
|
||||
"unregister_after_llm_call_hook",
|
||||
"unregister_after_tool_call_hook",
|
||||
"unregister_before_llm_call_hook",
|
||||
"unregister_before_tool_call_hook",
|
||||
]
|
||||
300
lib/crewai/src/crewai/hooks/decorators.py
Normal file
300
lib/crewai/src/crewai/hooks/decorators.py
Normal file
@@ -0,0 +1,300 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
from functools import wraps
|
||||
import inspect
|
||||
from typing import TYPE_CHECKING, Any, TypeVar, overload
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
from crewai.hooks.tool_hooks import ToolCallHookContext
|
||||
|
||||
F = TypeVar("F", bound=Callable[..., Any])
|
||||
|
||||
|
||||
def _create_hook_decorator(
|
||||
hook_type: str,
|
||||
register_function: Callable[..., Any],
|
||||
marker_attribute: str,
|
||||
) -> Callable[..., Any]:
|
||||
"""Create a hook decorator with filtering support.
|
||||
|
||||
This factory function eliminates code duplication across the four hook decorators.
|
||||
|
||||
Args:
|
||||
hook_type: Type of hook ("llm" or "tool")
|
||||
register_function: Function to call for registration (e.g., register_before_llm_call_hook)
|
||||
marker_attribute: Attribute name to mark functions (e.g., "is_before_llm_call_hook")
|
||||
|
||||
Returns:
|
||||
A decorator function that supports filters and auto-registration
|
||||
"""
|
||||
|
||||
def decorator_factory(
|
||||
func: Callable[..., Any] | None = None,
|
||||
*,
|
||||
tools: list[str] | None = None,
|
||||
agents: list[str] | None = None,
|
||||
) -> Callable[..., Any]:
|
||||
def decorator(f: Callable[..., Any]) -> Callable[..., Any]:
|
||||
setattr(f, marker_attribute, True)
|
||||
|
||||
sig = inspect.signature(f)
|
||||
params = list(sig.parameters.keys())
|
||||
is_method = len(params) >= 2 and params[0] == "self"
|
||||
|
||||
if tools:
|
||||
f._filter_tools = tools # type: ignore[attr-defined]
|
||||
if agents:
|
||||
f._filter_agents = agents # type: ignore[attr-defined]
|
||||
|
||||
if tools or agents:
|
||||
|
||||
@wraps(f)
|
||||
def filtered_hook(context: Any) -> Any:
|
||||
if tools and hasattr(context, "tool_name"):
|
||||
if context.tool_name not in tools:
|
||||
return None
|
||||
|
||||
if agents and hasattr(context, "agent"):
|
||||
if context.agent and context.agent.role not in agents:
|
||||
return None
|
||||
|
||||
return f(context)
|
||||
|
||||
if not is_method:
|
||||
register_function(filtered_hook)
|
||||
|
||||
return f
|
||||
|
||||
if not is_method:
|
||||
register_function(f)
|
||||
|
||||
return f
|
||||
|
||||
if func is None:
|
||||
return decorator
|
||||
return decorator(func)
|
||||
|
||||
return decorator_factory
|
||||
|
||||
|
||||
@overload
|
||||
def before_llm_call(
|
||||
func: Callable[[LLMCallHookContext], None],
|
||||
) -> Callable[[LLMCallHookContext], None]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def before_llm_call(
|
||||
*,
|
||||
agents: list[str] | None = None,
|
||||
) -> Callable[
|
||||
[Callable[[LLMCallHookContext], None]], Callable[[LLMCallHookContext], None]
|
||||
]: ...
|
||||
|
||||
|
||||
def before_llm_call(
|
||||
func: Callable[[LLMCallHookContext], None] | None = None,
|
||||
*,
|
||||
agents: list[str] | None = None,
|
||||
) -> (
|
||||
Callable[[LLMCallHookContext], None]
|
||||
| Callable[
|
||||
[Callable[[LLMCallHookContext], None]], Callable[[LLMCallHookContext], None]
|
||||
]
|
||||
):
|
||||
"""Decorator to register a function as a before_llm_call hook.
|
||||
|
||||
Example:
|
||||
Simple usage::
|
||||
|
||||
@before_llm_call
|
||||
def log_calls(context):
|
||||
print(f"LLM call by {context.agent.role}")
|
||||
|
||||
With agent filter::
|
||||
|
||||
@before_llm_call(agents=["Researcher", "Analyst"])
|
||||
def log_specific_agents(context):
|
||||
print(f"Filtered LLM call: {context.agent.role}")
|
||||
"""
|
||||
from crewai.hooks.llm_hooks import register_before_llm_call_hook
|
||||
|
||||
return _create_hook_decorator( # type: ignore[return-value]
|
||||
hook_type="llm",
|
||||
register_function=register_before_llm_call_hook,
|
||||
marker_attribute="is_before_llm_call_hook",
|
||||
)(func=func, agents=agents)
|
||||
|
||||
|
||||
@overload
|
||||
def after_llm_call(
|
||||
func: Callable[[LLMCallHookContext], str | None],
|
||||
) -> Callable[[LLMCallHookContext], str | None]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def after_llm_call(
|
||||
*,
|
||||
agents: list[str] | None = None,
|
||||
) -> Callable[
|
||||
[Callable[[LLMCallHookContext], str | None]],
|
||||
Callable[[LLMCallHookContext], str | None],
|
||||
]: ...
|
||||
|
||||
|
||||
def after_llm_call(
|
||||
func: Callable[[LLMCallHookContext], str | None] | None = None,
|
||||
*,
|
||||
agents: list[str] | None = None,
|
||||
) -> (
|
||||
Callable[[LLMCallHookContext], str | None]
|
||||
| Callable[
|
||||
[Callable[[LLMCallHookContext], str | None]],
|
||||
Callable[[LLMCallHookContext], str | None],
|
||||
]
|
||||
):
|
||||
"""Decorator to register a function as an after_llm_call hook.
|
||||
|
||||
Example:
|
||||
Simple usage::
|
||||
|
||||
@after_llm_call
|
||||
def sanitize(context):
|
||||
if "SECRET" in context.response:
|
||||
return context.response.replace("SECRET", "[REDACTED]")
|
||||
return None
|
||||
|
||||
With agent filter::
|
||||
|
||||
@after_llm_call(agents=["Researcher"])
|
||||
def log_researcher_responses(context):
|
||||
print(f"Response length: {len(context.response)}")
|
||||
return None
|
||||
"""
|
||||
from crewai.hooks.llm_hooks import register_after_llm_call_hook
|
||||
|
||||
return _create_hook_decorator( # type: ignore[return-value]
|
||||
hook_type="llm",
|
||||
register_function=register_after_llm_call_hook,
|
||||
marker_attribute="is_after_llm_call_hook",
|
||||
)(func=func, agents=agents)
|
||||
|
||||
|
||||
@overload
|
||||
def before_tool_call(
|
||||
func: Callable[[ToolCallHookContext], bool | None],
|
||||
) -> Callable[[ToolCallHookContext], bool | None]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def before_tool_call(
|
||||
*,
|
||||
tools: list[str] | None = None,
|
||||
agents: list[str] | None = None,
|
||||
) -> Callable[
|
||||
[Callable[[ToolCallHookContext], bool | None]],
|
||||
Callable[[ToolCallHookContext], bool | None],
|
||||
]: ...
|
||||
|
||||
|
||||
def before_tool_call(
|
||||
func: Callable[[ToolCallHookContext], bool | None] | None = None,
|
||||
*,
|
||||
tools: list[str] | None = None,
|
||||
agents: list[str] | None = None,
|
||||
) -> (
|
||||
Callable[[ToolCallHookContext], bool | None]
|
||||
| Callable[
|
||||
[Callable[[ToolCallHookContext], bool | None]],
|
||||
Callable[[ToolCallHookContext], bool | None],
|
||||
]
|
||||
):
|
||||
"""Decorator to register a function as a before_tool_call hook.
|
||||
|
||||
Example:
|
||||
Simple usage::
|
||||
|
||||
@before_tool_call
|
||||
def log_all_tools(context):
|
||||
print(f"Tool: {context.tool_name}")
|
||||
return None
|
||||
|
||||
With tool filter::
|
||||
|
||||
@before_tool_call(tools=["delete_file", "execute_code"])
|
||||
def approve_dangerous(context):
|
||||
response = context.request_human_input(prompt="Approve?")
|
||||
return None if response == "yes" else False
|
||||
|
||||
With combined filters::
|
||||
|
||||
@before_tool_call(tools=["write_file"], agents=["Developer"])
|
||||
def approve_dev_writes(context):
|
||||
return None # Only for Developer writing files
|
||||
"""
|
||||
from crewai.hooks.tool_hooks import register_before_tool_call_hook
|
||||
|
||||
return _create_hook_decorator( # type: ignore[return-value]
|
||||
hook_type="tool",
|
||||
register_function=register_before_tool_call_hook,
|
||||
marker_attribute="is_before_tool_call_hook",
|
||||
)(func=func, tools=tools, agents=agents)
|
||||
|
||||
|
||||
@overload
|
||||
def after_tool_call(
|
||||
func: Callable[[ToolCallHookContext], str | None],
|
||||
) -> Callable[[ToolCallHookContext], str | None]: ...
|
||||
|
||||
|
||||
@overload
|
||||
def after_tool_call(
|
||||
*,
|
||||
tools: list[str] | None = None,
|
||||
agents: list[str] | None = None,
|
||||
) -> Callable[
|
||||
[Callable[[ToolCallHookContext], str | None]],
|
||||
Callable[[ToolCallHookContext], str | None],
|
||||
]: ...
|
||||
|
||||
|
||||
def after_tool_call(
|
||||
func: Callable[[ToolCallHookContext], str | None] | None = None,
|
||||
*,
|
||||
tools: list[str] | None = None,
|
||||
agents: list[str] | None = None,
|
||||
) -> (
|
||||
Callable[[ToolCallHookContext], str | None]
|
||||
| Callable[
|
||||
[Callable[[ToolCallHookContext], str | None]],
|
||||
Callable[[ToolCallHookContext], str | None],
|
||||
]
|
||||
):
|
||||
"""Decorator to register a function as an after_tool_call hook.
|
||||
|
||||
Example:
|
||||
Simple usage::
|
||||
|
||||
@after_tool_call
|
||||
def log_results(context):
|
||||
print(f"Result: {len(context.tool_result)} chars")
|
||||
return None
|
||||
|
||||
With tool filter::
|
||||
|
||||
@after_tool_call(tools=["web_search", "ExaSearchTool"])
|
||||
def sanitize_search_results(context):
|
||||
if "SECRET" in context.tool_result:
|
||||
return context.tool_result.replace("SECRET", "[REDACTED]")
|
||||
return None
|
||||
"""
|
||||
from crewai.hooks.tool_hooks import register_after_tool_call_hook
|
||||
|
||||
return _create_hook_decorator( # type: ignore[return-value]
|
||||
hook_type="tool",
|
||||
register_function=register_after_tool_call_hook,
|
||||
marker_attribute="is_after_tool_call_hook",
|
||||
)(func=func, tools=tools, agents=agents)
|
||||
290
lib/crewai/src/crewai/hooks/llm_hooks.py
Normal file
290
lib/crewai/src/crewai/hooks/llm_hooks.py
Normal file
@@ -0,0 +1,290 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.events.event_listener import event_listener
|
||||
from crewai.hooks.types import AfterLLMCallHookType, BeforeLLMCallHookType
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
|
||||
|
||||
class LLMCallHookContext:
|
||||
"""Context object passed to LLM call hooks with full executor access.
|
||||
|
||||
Provides hooks with complete access to the executor state, allowing
|
||||
modification of messages, responses, and executor attributes.
|
||||
|
||||
Attributes:
|
||||
executor: Full reference to the CrewAgentExecutor instance
|
||||
messages: Direct reference to executor.messages (mutable list).
|
||||
Can be modified in both before_llm_call and after_llm_call hooks.
|
||||
Modifications in after_llm_call hooks persist to the next iteration,
|
||||
allowing hooks to modify conversation history for subsequent LLM calls.
|
||||
IMPORTANT: Modify messages in-place (e.g., append, extend, remove items).
|
||||
Do NOT replace the list (e.g., context.messages = []), as this will break
|
||||
the executor. Use context.messages.append() or context.messages.extend()
|
||||
instead of assignment.
|
||||
agent: Reference to the agent executing the task
|
||||
task: Reference to the task being executed
|
||||
crew: Reference to the crew instance
|
||||
llm: Reference to the LLM instance
|
||||
iterations: Current iteration count
|
||||
response: LLM response string (only set for after_llm_call hooks).
|
||||
Can be modified by returning a new string from after_llm_call hook.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
executor: CrewAgentExecutor,
|
||||
response: str | None = None,
|
||||
) -> None:
|
||||
"""Initialize hook context with executor reference.
|
||||
|
||||
Args:
|
||||
executor: The CrewAgentExecutor instance
|
||||
response: Optional response string (for after_llm_call hooks)
|
||||
"""
|
||||
self.executor = executor
|
||||
self.messages = executor.messages
|
||||
self.agent = executor.agent
|
||||
self.task = executor.task
|
||||
self.crew = executor.crew
|
||||
self.llm = executor.llm
|
||||
self.iterations = executor.iterations
|
||||
self.response = response
|
||||
|
||||
def request_human_input(
|
||||
self,
|
||||
prompt: str,
|
||||
default_message: str = "Press Enter to continue, or provide feedback:",
|
||||
) -> str:
|
||||
"""Request human input during LLM hook execution.
|
||||
|
||||
This method pauses live console updates, displays a prompt to the user,
|
||||
waits for their input, and then resumes live updates. This is useful for
|
||||
approval gates, debugging, or getting human feedback during execution.
|
||||
|
||||
Args:
|
||||
prompt: Custom message to display to the user
|
||||
default_message: Message shown after the prompt
|
||||
|
||||
Returns:
|
||||
User's input as a string (empty string if just Enter pressed)
|
||||
|
||||
Example:
|
||||
>>> def approval_hook(context: LLMCallHookContext) -> None:
|
||||
... if context.iterations > 5:
|
||||
... response = context.request_human_input(
|
||||
... prompt="Allow this LLM call?",
|
||||
... default_message="Type 'no' to skip, or press Enter:",
|
||||
... )
|
||||
... if response.lower() == "no":
|
||||
... print("LLM call skipped by user")
|
||||
"""
|
||||
|
||||
printer = Printer()
|
||||
event_listener.formatter.pause_live_updates()
|
||||
|
||||
try:
|
||||
printer.print(content=f"\n{prompt}", color="bold_yellow")
|
||||
printer.print(content=default_message, color="cyan")
|
||||
response = input().strip()
|
||||
|
||||
if response:
|
||||
printer.print(content="\nProcessing your input...", color="cyan")
|
||||
|
||||
return response
|
||||
finally:
|
||||
event_listener.formatter.resume_live_updates()
|
||||
|
||||
|
||||
_before_llm_call_hooks: list[BeforeLLMCallHookType] = []
|
||||
_after_llm_call_hooks: list[AfterLLMCallHookType] = []
|
||||
|
||||
|
||||
def register_before_llm_call_hook(
|
||||
hook: BeforeLLMCallHookType,
|
||||
) -> None:
|
||||
"""Register a global before_llm_call hook.
|
||||
|
||||
Global hooks are added to all executors automatically.
|
||||
This is a convenience function for registering hooks that should
|
||||
apply to all LLM calls across all executors.
|
||||
|
||||
Args:
|
||||
hook: Function that receives LLMCallHookContext and can:
|
||||
- Modify context.messages directly (in-place)
|
||||
- Return False to block LLM execution
|
||||
- Return True or None to allow execution
|
||||
IMPORTANT: Modify messages in-place (append, extend, remove items).
|
||||
Do NOT replace the list (context.messages = []), as this will break execution.
|
||||
|
||||
Example:
|
||||
>>> def log_llm_calls(context: LLMCallHookContext) -> None:
|
||||
... print(f"LLM call by {context.agent.role}")
|
||||
... print(f"Messages: {len(context.messages)}")
|
||||
... return None # Allow execution
|
||||
>>>
|
||||
>>> register_before_llm_call_hook(log_llm_calls)
|
||||
>>>
|
||||
>>> def block_excessive_iterations(context: LLMCallHookContext) -> bool | None:
|
||||
... if context.iterations > 10:
|
||||
... print("Blocked: Too many iterations")
|
||||
... return False # Block execution
|
||||
... return None # Allow execution
|
||||
>>>
|
||||
>>> register_before_llm_call_hook(block_excessive_iterations)
|
||||
"""
|
||||
_before_llm_call_hooks.append(hook)
|
||||
|
||||
|
||||
def register_after_llm_call_hook(
|
||||
hook: AfterLLMCallHookType,
|
||||
) -> None:
|
||||
"""Register a global after_llm_call hook.
|
||||
|
||||
Global hooks are added to all executors automatically.
|
||||
This is a convenience function for registering hooks that should
|
||||
apply to all LLM calls across all executors.
|
||||
|
||||
Args:
|
||||
hook: Function that receives LLMCallHookContext and can modify:
|
||||
- The response: Return modified response string or None to keep original
|
||||
- The messages: Modify context.messages directly (mutable reference)
|
||||
Both modifications are supported and can be used together.
|
||||
IMPORTANT: Modify messages in-place (append, extend, remove items).
|
||||
Do NOT replace the list (context.messages = []), as this will break execution.
|
||||
|
||||
Example:
|
||||
>>> def sanitize_response(context: LLMCallHookContext) -> str | None:
|
||||
... if context.response and "SECRET" in context.response:
|
||||
... return context.response.replace("SECRET", "[REDACTED]")
|
||||
... return None
|
||||
>>>
|
||||
>>> register_after_llm_call_hook(sanitize_response)
|
||||
"""
|
||||
_after_llm_call_hooks.append(hook)
|
||||
|
||||
|
||||
def get_before_llm_call_hooks() -> list[BeforeLLMCallHookType]:
|
||||
"""Get all registered global before_llm_call hooks.
|
||||
|
||||
Returns:
|
||||
List of registered before hooks
|
||||
"""
|
||||
return _before_llm_call_hooks.copy()
|
||||
|
||||
|
||||
def get_after_llm_call_hooks() -> list[AfterLLMCallHookType]:
|
||||
"""Get all registered global after_llm_call hooks.
|
||||
|
||||
Returns:
|
||||
List of registered after hooks
|
||||
"""
|
||||
return _after_llm_call_hooks.copy()
|
||||
|
||||
|
||||
def unregister_before_llm_call_hook(
|
||||
hook: BeforeLLMCallHookType,
|
||||
) -> bool:
|
||||
"""Unregister a specific global before_llm_call hook.
|
||||
|
||||
Args:
|
||||
hook: The hook function to remove
|
||||
|
||||
Returns:
|
||||
True if the hook was found and removed, False otherwise
|
||||
|
||||
Example:
|
||||
>>> def my_hook(context: LLMCallHookContext) -> None:
|
||||
... print("Before LLM call")
|
||||
>>>
|
||||
>>> register_before_llm_call_hook(my_hook)
|
||||
>>> unregister_before_llm_call_hook(my_hook)
|
||||
True
|
||||
"""
|
||||
try:
|
||||
_before_llm_call_hooks.remove(hook)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def unregister_after_llm_call_hook(
|
||||
hook: AfterLLMCallHookType,
|
||||
) -> bool:
|
||||
"""Unregister a specific global after_llm_call hook.
|
||||
|
||||
Args:
|
||||
hook: The hook function to remove
|
||||
|
||||
Returns:
|
||||
True if the hook was found and removed, False otherwise
|
||||
|
||||
Example:
|
||||
>>> def my_hook(context: LLMCallHookContext) -> str | None:
|
||||
... return None
|
||||
>>>
|
||||
>>> register_after_llm_call_hook(my_hook)
|
||||
>>> unregister_after_llm_call_hook(my_hook)
|
||||
True
|
||||
"""
|
||||
try:
|
||||
_after_llm_call_hooks.remove(hook)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def clear_before_llm_call_hooks() -> int:
|
||||
"""Clear all registered global before_llm_call hooks.
|
||||
|
||||
Returns:
|
||||
Number of hooks that were cleared
|
||||
|
||||
Example:
|
||||
>>> register_before_llm_call_hook(hook1)
|
||||
>>> register_before_llm_call_hook(hook2)
|
||||
>>> clear_before_llm_call_hooks()
|
||||
2
|
||||
"""
|
||||
count = len(_before_llm_call_hooks)
|
||||
_before_llm_call_hooks.clear()
|
||||
return count
|
||||
|
||||
|
||||
def clear_after_llm_call_hooks() -> int:
|
||||
"""Clear all registered global after_llm_call hooks.
|
||||
|
||||
Returns:
|
||||
Number of hooks that were cleared
|
||||
|
||||
Example:
|
||||
>>> register_after_llm_call_hook(hook1)
|
||||
>>> register_after_llm_call_hook(hook2)
|
||||
>>> clear_after_llm_call_hooks()
|
||||
2
|
||||
"""
|
||||
count = len(_after_llm_call_hooks)
|
||||
_after_llm_call_hooks.clear()
|
||||
return count
|
||||
|
||||
|
||||
def clear_all_llm_call_hooks() -> tuple[int, int]:
|
||||
"""Clear all registered global LLM call hooks (both before and after).
|
||||
|
||||
Returns:
|
||||
Tuple of (before_hooks_cleared, after_hooks_cleared)
|
||||
|
||||
Example:
|
||||
>>> register_before_llm_call_hook(before_hook)
|
||||
>>> register_after_llm_call_hook(after_hook)
|
||||
>>> clear_all_llm_call_hooks()
|
||||
(1, 1)
|
||||
"""
|
||||
before_count = clear_before_llm_call_hooks()
|
||||
after_count = clear_after_llm_call_hooks()
|
||||
return (before_count, after_count)
|
||||
305
lib/crewai/src/crewai/hooks/tool_hooks.py
Normal file
305
lib/crewai/src/crewai/hooks/tool_hooks.py
Normal file
@@ -0,0 +1,305 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from crewai.events.event_listener import event_listener
|
||||
from crewai.hooks.types import AfterToolCallHookType, BeforeToolCallHookType
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.crew import Crew
|
||||
from crewai.task import Task
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
|
||||
|
||||
class ToolCallHookContext:
|
||||
"""Context object passed to tool call hooks.
|
||||
|
||||
Provides hooks with access to the tool being called, its input,
|
||||
the agent/task/crew context, and the result (for after hooks).
|
||||
|
||||
Attributes:
|
||||
tool_name: Name of the tool being called
|
||||
tool_input: Tool input parameters (mutable dict).
|
||||
Can be modified in-place by before_tool_call hooks.
|
||||
IMPORTANT: Modify in-place (e.g., context.tool_input['key'] = value).
|
||||
Do NOT replace the dict (e.g., context.tool_input = {}), as this
|
||||
will not affect the actual tool execution.
|
||||
tool: Reference to the CrewStructuredTool instance
|
||||
agent: Agent executing the tool (may be None)
|
||||
task: Current task being executed (may be None)
|
||||
crew: Crew instance (may be None)
|
||||
tool_result: Tool execution result (only set for after_tool_call hooks).
|
||||
Can be modified by returning a new string from after_tool_call hook.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
tool_name: str,
|
||||
tool_input: dict[str, Any],
|
||||
tool: CrewStructuredTool,
|
||||
agent: Agent | BaseAgent | None = None,
|
||||
task: Task | None = None,
|
||||
crew: Crew | None = None,
|
||||
tool_result: str | None = None,
|
||||
) -> None:
|
||||
"""Initialize tool call hook context.
|
||||
|
||||
Args:
|
||||
tool_name: Name of the tool being called
|
||||
tool_input: Tool input parameters (mutable)
|
||||
tool: Tool instance reference
|
||||
agent: Optional agent executing the tool
|
||||
task: Optional current task
|
||||
crew: Optional crew instance
|
||||
tool_result: Optional tool result (for after hooks)
|
||||
"""
|
||||
self.tool_name = tool_name
|
||||
self.tool_input = tool_input
|
||||
self.tool = tool
|
||||
self.agent = agent
|
||||
self.task = task
|
||||
self.crew = crew
|
||||
self.tool_result = tool_result
|
||||
|
||||
def request_human_input(
|
||||
self,
|
||||
prompt: str,
|
||||
default_message: str = "Press Enter to continue, or provide feedback:",
|
||||
) -> str:
|
||||
"""Request human input during tool hook execution.
|
||||
|
||||
This method pauses live console updates, displays a prompt to the user,
|
||||
waits for their input, and then resumes live updates. This is useful for
|
||||
approval gates, reviewing tool results, or getting human feedback during execution.
|
||||
|
||||
Args:
|
||||
prompt: Custom message to display to the user
|
||||
default_message: Message shown after the prompt
|
||||
|
||||
Returns:
|
||||
User's input as a string (empty string if just Enter pressed)
|
||||
|
||||
Example:
|
||||
>>> def approval_hook(context: ToolCallHookContext) -> bool | None:
|
||||
... if context.tool_name == "delete_file":
|
||||
... response = context.request_human_input(
|
||||
... prompt="Allow file deletion?",
|
||||
... default_message="Type 'approve' to continue:",
|
||||
... )
|
||||
... if response.lower() != "approve":
|
||||
... return False # Block execution
|
||||
... return None # Allow execution
|
||||
"""
|
||||
|
||||
printer = Printer()
|
||||
event_listener.formatter.pause_live_updates()
|
||||
|
||||
try:
|
||||
printer.print(content=f"\n{prompt}", color="bold_yellow")
|
||||
printer.print(content=default_message, color="cyan")
|
||||
response = input().strip()
|
||||
|
||||
if response:
|
||||
printer.print(content="\nProcessing your input...", color="cyan")
|
||||
|
||||
return response
|
||||
finally:
|
||||
event_listener.formatter.resume_live_updates()
|
||||
|
||||
|
||||
# Global hook registries
|
||||
_before_tool_call_hooks: list[BeforeToolCallHookType] = []
|
||||
_after_tool_call_hooks: list[AfterToolCallHookType] = []
|
||||
|
||||
|
||||
def register_before_tool_call_hook(
|
||||
hook: BeforeToolCallHookType,
|
||||
) -> None:
|
||||
"""Register a global before_tool_call hook.
|
||||
|
||||
Global hooks are added to all tool executions automatically.
|
||||
This is a convenience function for registering hooks that should
|
||||
apply to all tool calls across all agents and crews.
|
||||
|
||||
Args:
|
||||
hook: Function that receives ToolCallHookContext and can:
|
||||
- Modify tool_input in-place
|
||||
- Return False to block tool execution
|
||||
- Return True or None to allow execution
|
||||
IMPORTANT: Modify tool_input in-place (e.g., context.tool_input['key'] = value).
|
||||
Do NOT replace the dict (context.tool_input = {}), as this will not affect
|
||||
the actual tool execution.
|
||||
|
||||
Example:
|
||||
>>> def log_tool_usage(context: ToolCallHookContext) -> None:
|
||||
... print(f"Executing tool: {context.tool_name}")
|
||||
... print(f"Input: {context.tool_input}")
|
||||
... return None # Allow execution
|
||||
>>>
|
||||
>>> register_before_tool_call_hook(log_tool_usage)
|
||||
|
||||
>>> def block_dangerous_tools(context: ToolCallHookContext) -> bool | None:
|
||||
... if context.tool_name == "delete_database":
|
||||
... print("Blocked dangerous tool execution!")
|
||||
... return False # Block execution
|
||||
... return None # Allow execution
|
||||
>>>
|
||||
>>> register_before_tool_call_hook(block_dangerous_tools)
|
||||
"""
|
||||
_before_tool_call_hooks.append(hook)
|
||||
|
||||
|
||||
def register_after_tool_call_hook(
|
||||
hook: AfterToolCallHookType,
|
||||
) -> None:
|
||||
"""Register a global after_tool_call hook.
|
||||
|
||||
Global hooks are added to all tool executions automatically.
|
||||
This is a convenience function for registering hooks that should
|
||||
apply to all tool calls across all agents and crews.
|
||||
|
||||
Args:
|
||||
hook: Function that receives ToolCallHookContext and can modify
|
||||
the tool result. Return modified result string or None to keep
|
||||
the original result. The tool_result is available in context.tool_result.
|
||||
|
||||
Example:
|
||||
>>> def sanitize_output(context: ToolCallHookContext) -> str | None:
|
||||
... if context.tool_result and "SECRET_KEY" in context.tool_result:
|
||||
... return context.tool_result.replace("SECRET_KEY=...", "[REDACTED]")
|
||||
... return None # Keep original result
|
||||
>>>
|
||||
>>> register_after_tool_call_hook(sanitize_output)
|
||||
|
||||
>>> def log_tool_results(context: ToolCallHookContext) -> None:
|
||||
... print(f"Tool {context.tool_name} returned: {context.tool_result[:100]}")
|
||||
... return None # Keep original result
|
||||
>>>
|
||||
>>> register_after_tool_call_hook(log_tool_results)
|
||||
"""
|
||||
_after_tool_call_hooks.append(hook)
|
||||
|
||||
|
||||
def get_before_tool_call_hooks() -> list[BeforeToolCallHookType]:
|
||||
"""Get all registered global before_tool_call hooks.
|
||||
|
||||
Returns:
|
||||
List of registered before hooks
|
||||
"""
|
||||
return _before_tool_call_hooks.copy()
|
||||
|
||||
|
||||
def get_after_tool_call_hooks() -> list[AfterToolCallHookType]:
|
||||
"""Get all registered global after_tool_call hooks.
|
||||
|
||||
Returns:
|
||||
List of registered after hooks
|
||||
"""
|
||||
return _after_tool_call_hooks.copy()
|
||||
|
||||
|
||||
def unregister_before_tool_call_hook(
|
||||
hook: BeforeToolCallHookType,
|
||||
) -> bool:
|
||||
"""Unregister a specific global before_tool_call hook.
|
||||
|
||||
Args:
|
||||
hook: The hook function to remove
|
||||
|
||||
Returns:
|
||||
True if the hook was found and removed, False otherwise
|
||||
|
||||
Example:
|
||||
>>> def my_hook(context: ToolCallHookContext) -> None:
|
||||
... print("Before tool call")
|
||||
>>>
|
||||
>>> register_before_tool_call_hook(my_hook)
|
||||
>>> unregister_before_tool_call_hook(my_hook)
|
||||
True
|
||||
"""
|
||||
try:
|
||||
_before_tool_call_hooks.remove(hook)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def unregister_after_tool_call_hook(
|
||||
hook: AfterToolCallHookType,
|
||||
) -> bool:
|
||||
"""Unregister a specific global after_tool_call hook.
|
||||
|
||||
Args:
|
||||
hook: The hook function to remove
|
||||
|
||||
Returns:
|
||||
True if the hook was found and removed, False otherwise
|
||||
|
||||
Example:
|
||||
>>> def my_hook(context: ToolCallHookContext) -> str | None:
|
||||
... return None
|
||||
>>>
|
||||
>>> register_after_tool_call_hook(my_hook)
|
||||
>>> unregister_after_tool_call_hook(my_hook)
|
||||
True
|
||||
"""
|
||||
try:
|
||||
_after_tool_call_hooks.remove(hook)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def clear_before_tool_call_hooks() -> int:
|
||||
"""Clear all registered global before_tool_call hooks.
|
||||
|
||||
Returns:
|
||||
Number of hooks that were cleared
|
||||
|
||||
Example:
|
||||
>>> register_before_tool_call_hook(hook1)
|
||||
>>> register_before_tool_call_hook(hook2)
|
||||
>>> clear_before_tool_call_hooks()
|
||||
2
|
||||
"""
|
||||
count = len(_before_tool_call_hooks)
|
||||
_before_tool_call_hooks.clear()
|
||||
return count
|
||||
|
||||
|
||||
def clear_after_tool_call_hooks() -> int:
|
||||
"""Clear all registered global after_tool_call hooks.
|
||||
|
||||
Returns:
|
||||
Number of hooks that were cleared
|
||||
|
||||
Example:
|
||||
>>> register_after_tool_call_hook(hook1)
|
||||
>>> register_after_tool_call_hook(hook2)
|
||||
>>> clear_after_tool_call_hooks()
|
||||
2
|
||||
"""
|
||||
count = len(_after_tool_call_hooks)
|
||||
_after_tool_call_hooks.clear()
|
||||
return count
|
||||
|
||||
|
||||
def clear_all_tool_call_hooks() -> tuple[int, int]:
|
||||
"""Clear all registered global tool call hooks (both before and after).
|
||||
|
||||
Returns:
|
||||
Tuple of (before_hooks_cleared, after_hooks_cleared)
|
||||
|
||||
Example:
|
||||
>>> register_before_tool_call_hook(before_hook)
|
||||
>>> register_after_tool_call_hook(after_hook)
|
||||
>>> clear_all_tool_call_hooks()
|
||||
(1, 1)
|
||||
"""
|
||||
before_count = clear_before_tool_call_hooks()
|
||||
after_count = clear_after_tool_call_hooks()
|
||||
return (before_count, after_count)
|
||||
137
lib/crewai/src/crewai/hooks/types.py
Normal file
137
lib/crewai/src/crewai/hooks/types.py
Normal file
@@ -0,0 +1,137 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING, Generic, Protocol, TypeVar, runtime_checkable
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
from crewai.hooks.tool_hooks import ToolCallHookContext
|
||||
|
||||
|
||||
ContextT = TypeVar("ContextT", contravariant=True)
|
||||
ReturnT = TypeVar("ReturnT", covariant=True)
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class Hook(Protocol, Generic[ContextT, ReturnT]):
|
||||
"""Generic protocol for hook functions.
|
||||
|
||||
This protocol defines the common interface for all hook types in CrewAI.
|
||||
Hooks receive a context object and optionally return a modified result.
|
||||
|
||||
Type Parameters:
|
||||
ContextT: The context type (LLMCallHookContext or ToolCallHookContext)
|
||||
ReturnT: The return type (None, str | None, or bool | None)
|
||||
|
||||
Example:
|
||||
>>> # Before LLM call hook: receives LLMCallHookContext, returns None
|
||||
>>> hook: Hook[LLMCallHookContext, None] = lambda ctx: print(ctx.iterations)
|
||||
>>>
|
||||
>>> # After LLM call hook: receives LLMCallHookContext, returns str | None
|
||||
>>> hook: Hook[LLMCallHookContext, str | None] = lambda ctx: ctx.response
|
||||
"""
|
||||
|
||||
def __call__(self, context: ContextT) -> ReturnT:
|
||||
"""Execute the hook with the given context.
|
||||
|
||||
Args:
|
||||
context: Context object with relevant execution state
|
||||
|
||||
Returns:
|
||||
Hook-specific return value (None, str | None, or bool | None)
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class BeforeLLMCallHook(Hook["LLMCallHookContext", bool | None], Protocol):
|
||||
"""Protocol for before_llm_call hooks.
|
||||
|
||||
These hooks are called before an LLM is invoked and can modify the messages
|
||||
that will be sent to the LLM or block the execution entirely.
|
||||
"""
|
||||
|
||||
def __call__(self, context: LLMCallHookContext) -> bool | None:
|
||||
"""Execute the before LLM call hook.
|
||||
|
||||
Args:
|
||||
context: Context object with executor, messages, agent, task, etc.
|
||||
Messages can be modified in-place.
|
||||
|
||||
Returns:
|
||||
False to block LLM execution, True or None to allow execution
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class AfterLLMCallHook(Hook["LLMCallHookContext", str | None], Protocol):
|
||||
"""Protocol for after_llm_call hooks.
|
||||
|
||||
These hooks are called after an LLM returns a response and can modify
|
||||
the response or the message history.
|
||||
"""
|
||||
|
||||
def __call__(self, context: LLMCallHookContext) -> str | None:
|
||||
"""Execute the after LLM call hook.
|
||||
|
||||
Args:
|
||||
context: Context object with executor, messages, agent, task, response, etc.
|
||||
Messages can be modified in-place. Response is available in context.response.
|
||||
|
||||
Returns:
|
||||
Modified response string, or None to keep the original response
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class BeforeToolCallHook(Hook["ToolCallHookContext", bool | None], Protocol):
|
||||
"""Protocol for before_tool_call hooks.
|
||||
|
||||
These hooks are called before a tool is executed and can modify the tool
|
||||
input or block the execution entirely.
|
||||
"""
|
||||
|
||||
def __call__(self, context: ToolCallHookContext) -> bool | None:
|
||||
"""Execute the before tool call hook.
|
||||
|
||||
Args:
|
||||
context: Context object with tool_name, tool_input, tool, agent, task, etc.
|
||||
Tool input can be modified in-place.
|
||||
|
||||
Returns:
|
||||
False to block tool execution, True or None to allow execution
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
class AfterToolCallHook(Hook["ToolCallHookContext", str | None], Protocol):
|
||||
"""Protocol for after_tool_call hooks.
|
||||
|
||||
These hooks are called after a tool executes and can modify the result.
|
||||
"""
|
||||
|
||||
def __call__(self, context: ToolCallHookContext) -> str | None:
|
||||
"""Execute the after tool call hook.
|
||||
|
||||
Args:
|
||||
context: Context object with tool_name, tool_input, tool_result, etc.
|
||||
Tool result is available in context.tool_result.
|
||||
|
||||
Returns:
|
||||
Modified tool result string, or None to keep the original result
|
||||
"""
|
||||
...
|
||||
|
||||
|
||||
# - All before hooks: bool | None (False = block execution, True/None = allow)
|
||||
# - All after hooks: str | None (str = modified result, None = keep original)
|
||||
BeforeLLMCallHookType = Hook["LLMCallHookContext", bool | None]
|
||||
AfterLLMCallHookType = Hook["LLMCallHookContext", str | None]
|
||||
BeforeToolCallHookType = Hook["ToolCallHookContext", bool | None]
|
||||
AfterToolCallHookType = Hook["ToolCallHookContext", str | None]
|
||||
|
||||
# Alternative Callable-based type aliases for compatibility
|
||||
BeforeLLMCallHookCallable = Callable[["LLMCallHookContext"], bool | None]
|
||||
AfterLLMCallHookCallable = Callable[["LLMCallHookContext"], str | None]
|
||||
BeforeToolCallHookCallable = Callable[["ToolCallHookContext"], bool | None]
|
||||
AfterToolCallHookCallable = Callable[["ToolCallHookContext"], str | None]
|
||||
157
lib/crewai/src/crewai/hooks/wrappers.py
Normal file
157
lib/crewai/src/crewai/hooks/wrappers.py
Normal file
@@ -0,0 +1,157 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING, Any, TypeVar
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
from crewai.hooks.tool_hooks import ToolCallHookContext
|
||||
|
||||
P = TypeVar("P")
|
||||
R = TypeVar("R")
|
||||
|
||||
|
||||
def _copy_method_metadata(wrapper: Any, original: Callable[..., Any]) -> None:
|
||||
"""Copy metadata from original function to wrapper.
|
||||
|
||||
Args:
|
||||
wrapper: The wrapper object to copy metadata to
|
||||
original: The original function to copy from
|
||||
"""
|
||||
wrapper.__name__ = original.__name__
|
||||
wrapper.__doc__ = original.__doc__
|
||||
wrapper.__module__ = original.__module__
|
||||
wrapper.__qualname__ = original.__qualname__
|
||||
wrapper.__annotations__ = original.__annotations__
|
||||
|
||||
|
||||
class BeforeLLMCallHookMethod:
|
||||
"""Wrapper for methods marked as before_llm_call hooks within @CrewBase classes.
|
||||
|
||||
This wrapper marks a method so it can be detected and registered as a
|
||||
crew-scoped hook during crew initialization.
|
||||
"""
|
||||
|
||||
is_before_llm_call_hook: bool = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
meth: Callable[[Any, LLMCallHookContext], None],
|
||||
agents: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Initialize the hook method wrapper.
|
||||
|
||||
Args:
|
||||
meth: The method to wrap
|
||||
agents: Optional list of agent roles to filter
|
||||
"""
|
||||
self._meth = meth
|
||||
self.agents = agents
|
||||
_copy_method_metadata(self, meth)
|
||||
|
||||
def __call__(self, *args: Any, **kwargs: Any) -> None:
|
||||
"""Call the wrapped method.
|
||||
|
||||
Args:
|
||||
*args: Positional arguments
|
||||
**kwargs: Keyword arguments
|
||||
"""
|
||||
return self._meth(*args, **kwargs)
|
||||
|
||||
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
|
||||
"""Support instance methods by implementing descriptor protocol.
|
||||
|
||||
Args:
|
||||
obj: The instance that the method is accessed through
|
||||
objtype: The type of the instance
|
||||
|
||||
Returns:
|
||||
Self when accessed through class, bound method when accessed through instance
|
||||
"""
|
||||
if obj is None:
|
||||
return self
|
||||
# Return bound method
|
||||
return lambda context: self._meth(obj, context)
|
||||
|
||||
|
||||
class AfterLLMCallHookMethod:
|
||||
"""Wrapper for methods marked as after_llm_call hooks within @CrewBase classes."""
|
||||
|
||||
is_after_llm_call_hook: bool = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
meth: Callable[[Any, LLMCallHookContext], str | None],
|
||||
agents: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Initialize the hook method wrapper."""
|
||||
self._meth = meth
|
||||
self.agents = agents
|
||||
_copy_method_metadata(self, meth)
|
||||
|
||||
def __call__(self, *args: Any, **kwargs: Any) -> str | None:
|
||||
"""Call the wrapped method."""
|
||||
return self._meth(*args, **kwargs)
|
||||
|
||||
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
|
||||
"""Support instance methods."""
|
||||
if obj is None:
|
||||
return self
|
||||
return lambda context: self._meth(obj, context)
|
||||
|
||||
|
||||
class BeforeToolCallHookMethod:
|
||||
"""Wrapper for methods marked as before_tool_call hooks within @CrewBase classes."""
|
||||
|
||||
is_before_tool_call_hook: bool = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
meth: Callable[[Any, ToolCallHookContext], bool | None],
|
||||
tools: list[str] | None = None,
|
||||
agents: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Initialize the hook method wrapper."""
|
||||
self._meth = meth
|
||||
self.tools = tools
|
||||
self.agents = agents
|
||||
_copy_method_metadata(self, meth)
|
||||
|
||||
def __call__(self, *args: Any, **kwargs: Any) -> bool | None:
|
||||
"""Call the wrapped method."""
|
||||
return self._meth(*args, **kwargs)
|
||||
|
||||
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
|
||||
"""Support instance methods."""
|
||||
if obj is None:
|
||||
return self
|
||||
return lambda context: self._meth(obj, context)
|
||||
|
||||
|
||||
class AfterToolCallHookMethod:
|
||||
"""Wrapper for methods marked as after_tool_call hooks within @CrewBase classes."""
|
||||
|
||||
is_after_tool_call_hook: bool = True
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
meth: Callable[[Any, ToolCallHookContext], str | None],
|
||||
tools: list[str] | None = None,
|
||||
agents: list[str] | None = None,
|
||||
) -> None:
|
||||
"""Initialize the hook method wrapper."""
|
||||
self._meth = meth
|
||||
self.tools = tools
|
||||
self.agents = agents
|
||||
_copy_method_metadata(self, meth)
|
||||
|
||||
def __call__(self, *args: Any, **kwargs: Any) -> str | None:
|
||||
"""Call the wrapped method."""
|
||||
return self._meth(*args, **kwargs)
|
||||
|
||||
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
|
||||
"""Support instance methods."""
|
||||
if obj is None:
|
||||
return self
|
||||
return lambda context: self._meth(obj, context)
|
||||
@@ -542,6 +542,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
agent_key=self.key,
|
||||
agent_role=self.role,
|
||||
agent=self.original_agent,
|
||||
crew=None,
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
@@ -293,6 +293,8 @@ class CrewBaseMeta(type):
|
||||
kickoff=_filter_methods(original_methods, "is_kickoff"),
|
||||
)
|
||||
|
||||
_register_crew_hooks(instance, cls)
|
||||
|
||||
|
||||
def close_mcp_server(
|
||||
self: CrewInstance, _instance: CrewInstance, outputs: CrewOutput
|
||||
@@ -438,6 +440,144 @@ def _filter_methods(
|
||||
}
|
||||
|
||||
|
||||
def _register_crew_hooks(instance: CrewInstance, cls: type) -> None:
|
||||
"""Detect and register crew-scoped hook methods.
|
||||
|
||||
Args:
|
||||
instance: Crew instance to register hooks for.
|
||||
cls: Crew class type.
|
||||
"""
|
||||
hook_methods = {
|
||||
name: method
|
||||
for name, method in cls.__dict__.items()
|
||||
if any(
|
||||
hasattr(method, attr)
|
||||
for attr in [
|
||||
"is_before_llm_call_hook",
|
||||
"is_after_llm_call_hook",
|
||||
"is_before_tool_call_hook",
|
||||
"is_after_tool_call_hook",
|
||||
]
|
||||
)
|
||||
}
|
||||
|
||||
if not hook_methods:
|
||||
return
|
||||
|
||||
from crewai.hooks import (
|
||||
register_after_llm_call_hook,
|
||||
register_after_tool_call_hook,
|
||||
register_before_llm_call_hook,
|
||||
register_before_tool_call_hook,
|
||||
)
|
||||
|
||||
instance._registered_hook_functions = []
|
||||
|
||||
instance._hooks_being_registered = True
|
||||
|
||||
for hook_method in hook_methods.values():
|
||||
bound_hook = hook_method.__get__(instance, cls)
|
||||
|
||||
has_tool_filter = hasattr(hook_method, "_filter_tools")
|
||||
has_agent_filter = hasattr(hook_method, "_filter_agents")
|
||||
|
||||
if hasattr(hook_method, "is_before_llm_call_hook"):
|
||||
if has_agent_filter:
|
||||
agents_filter = hook_method._filter_agents
|
||||
|
||||
def make_filtered_before_llm(bound_fn, agents_list):
|
||||
def filtered(context):
|
||||
if context.agent and context.agent.role not in agents_list:
|
||||
return None
|
||||
return bound_fn(context)
|
||||
|
||||
return filtered
|
||||
|
||||
final_hook = make_filtered_before_llm(bound_hook, agents_filter)
|
||||
else:
|
||||
final_hook = bound_hook
|
||||
|
||||
register_before_llm_call_hook(final_hook)
|
||||
instance._registered_hook_functions.append(("before_llm_call", final_hook))
|
||||
|
||||
if hasattr(hook_method, "is_after_llm_call_hook"):
|
||||
if has_agent_filter:
|
||||
agents_filter = hook_method._filter_agents
|
||||
|
||||
def make_filtered_after_llm(bound_fn, agents_list):
|
||||
def filtered(context):
|
||||
if context.agent and context.agent.role not in agents_list:
|
||||
return None
|
||||
return bound_fn(context)
|
||||
|
||||
return filtered
|
||||
|
||||
final_hook = make_filtered_after_llm(bound_hook, agents_filter)
|
||||
else:
|
||||
final_hook = bound_hook
|
||||
|
||||
register_after_llm_call_hook(final_hook)
|
||||
instance._registered_hook_functions.append(("after_llm_call", final_hook))
|
||||
|
||||
if hasattr(hook_method, "is_before_tool_call_hook"):
|
||||
if has_tool_filter or has_agent_filter:
|
||||
tools_filter = getattr(hook_method, "_filter_tools", None)
|
||||
agents_filter = getattr(hook_method, "_filter_agents", None)
|
||||
|
||||
def make_filtered_before_tool(bound_fn, tools_list, agents_list):
|
||||
def filtered(context):
|
||||
if tools_list and context.tool_name not in tools_list:
|
||||
return None
|
||||
if (
|
||||
agents_list
|
||||
and context.agent
|
||||
and context.agent.role not in agents_list
|
||||
):
|
||||
return None
|
||||
return bound_fn(context)
|
||||
|
||||
return filtered
|
||||
|
||||
final_hook = make_filtered_before_tool(
|
||||
bound_hook, tools_filter, agents_filter
|
||||
)
|
||||
else:
|
||||
final_hook = bound_hook
|
||||
|
||||
register_before_tool_call_hook(final_hook)
|
||||
instance._registered_hook_functions.append(("before_tool_call", final_hook))
|
||||
|
||||
if hasattr(hook_method, "is_after_tool_call_hook"):
|
||||
if has_tool_filter or has_agent_filter:
|
||||
tools_filter = getattr(hook_method, "_filter_tools", None)
|
||||
agents_filter = getattr(hook_method, "_filter_agents", None)
|
||||
|
||||
def make_filtered_after_tool(bound_fn, tools_list, agents_list):
|
||||
def filtered(context):
|
||||
if tools_list and context.tool_name not in tools_list:
|
||||
return None
|
||||
if (
|
||||
agents_list
|
||||
and context.agent
|
||||
and context.agent.role not in agents_list
|
||||
):
|
||||
return None
|
||||
return bound_fn(context)
|
||||
|
||||
return filtered
|
||||
|
||||
final_hook = make_filtered_after_tool(
|
||||
bound_hook, tools_filter, agents_filter
|
||||
)
|
||||
else:
|
||||
final_hook = bound_hook
|
||||
|
||||
register_after_tool_call_hook(final_hook)
|
||||
instance._registered_hook_functions.append(("after_tool_call", final_hook))
|
||||
|
||||
instance._hooks_being_registered = False
|
||||
|
||||
|
||||
def map_all_agent_variables(self: CrewInstance) -> None:
|
||||
"""Map agent configuration variables to callable instances.
|
||||
|
||||
|
||||
@@ -260,7 +260,8 @@ def get_llm_response(
|
||||
"""
|
||||
|
||||
if executor_context is not None:
|
||||
_setup_before_llm_call_hooks(executor_context, printer)
|
||||
if not _setup_before_llm_call_hooks(executor_context, printer):
|
||||
raise ValueError("LLM call blocked by before_llm_call hook")
|
||||
messages = executor_context.messages
|
||||
|
||||
try:
|
||||
@@ -673,22 +674,31 @@ def load_agent_from_repository(from_repository: str) -> dict[str, Any]:
|
||||
|
||||
def _setup_before_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | None, printer: Printer
|
||||
) -> None:
|
||||
) -> bool:
|
||||
"""Setup and invoke before_llm_call hooks for the executor context.
|
||||
|
||||
Args:
|
||||
executor_context: The executor context to setup the hooks for.
|
||||
printer: Printer instance for error logging.
|
||||
|
||||
Returns:
|
||||
True if LLM execution should proceed, False if blocked by a hook.
|
||||
"""
|
||||
if executor_context and executor_context.before_llm_call_hooks:
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
|
||||
original_messages = executor_context.messages
|
||||
|
||||
hook_context = LLMCallHookContext(executor_context)
|
||||
try:
|
||||
for hook in executor_context.before_llm_call_hooks:
|
||||
hook(hook_context)
|
||||
result = hook(hook_context)
|
||||
if result is False:
|
||||
printer.print(
|
||||
content="LLM call blocked by before_llm_call hook",
|
||||
color="yellow",
|
||||
)
|
||||
return False
|
||||
except Exception as e:
|
||||
printer.print(
|
||||
content=f"Error in before_llm_call hook: {e}",
|
||||
@@ -709,6 +719,8 @@ def _setup_before_llm_call_hooks(
|
||||
else:
|
||||
executor_context.messages = []
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def _setup_after_llm_call_hooks(
|
||||
executor_context: CrewAgentExecutor | None,
|
||||
@@ -726,7 +738,7 @@ def _setup_after_llm_call_hooks(
|
||||
The potentially modified response string.
|
||||
"""
|
||||
if executor_context and executor_context.after_llm_call_hooks:
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
|
||||
original_messages = executor_context.messages
|
||||
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
|
||||
|
||||
class LLMCallHookContext:
|
||||
"""Context object passed to LLM call hooks with full executor access.
|
||||
|
||||
Provides hooks with complete access to the executor state, allowing
|
||||
modification of messages, responses, and executor attributes.
|
||||
|
||||
Attributes:
|
||||
executor: Full reference to the CrewAgentExecutor instance
|
||||
messages: Direct reference to executor.messages (mutable list).
|
||||
Can be modified in both before_llm_call and after_llm_call hooks.
|
||||
Modifications in after_llm_call hooks persist to the next iteration,
|
||||
allowing hooks to modify conversation history for subsequent LLM calls.
|
||||
IMPORTANT: Modify messages in-place (e.g., append, extend, remove items).
|
||||
Do NOT replace the list (e.g., context.messages = []), as this will break
|
||||
the executor. Use context.messages.append() or context.messages.extend()
|
||||
instead of assignment.
|
||||
agent: Reference to the agent executing the task
|
||||
task: Reference to the task being executed
|
||||
crew: Reference to the crew instance
|
||||
llm: Reference to the LLM instance
|
||||
iterations: Current iteration count
|
||||
response: LLM response string (only set for after_llm_call hooks).
|
||||
Can be modified by returning a new string from after_llm_call hook.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
executor: CrewAgentExecutor,
|
||||
response: str | None = None,
|
||||
) -> None:
|
||||
"""Initialize hook context with executor reference.
|
||||
|
||||
Args:
|
||||
executor: The CrewAgentExecutor instance
|
||||
response: Optional response string (for after_llm_call hooks)
|
||||
"""
|
||||
self.executor = executor
|
||||
self.messages = executor.messages
|
||||
self.agent = executor.agent
|
||||
self.task = executor.task
|
||||
self.crew = executor.crew
|
||||
self.llm = executor.llm
|
||||
self.iterations = executor.iterations
|
||||
self.response = response
|
||||
|
||||
|
||||
# Global hook registries (optional convenience feature)
|
||||
_before_llm_call_hooks: list[Callable[[LLMCallHookContext], None]] = []
|
||||
_after_llm_call_hooks: list[Callable[[LLMCallHookContext], str | None]] = []
|
||||
|
||||
|
||||
def register_before_llm_call_hook(
|
||||
hook: Callable[[LLMCallHookContext], None],
|
||||
) -> None:
|
||||
"""Register a global before_llm_call hook.
|
||||
|
||||
Global hooks are added to all executors automatically.
|
||||
This is a convenience function for registering hooks that should
|
||||
apply to all LLM calls across all executors.
|
||||
|
||||
Args:
|
||||
hook: Function that receives LLMCallHookContext and can modify
|
||||
context.messages directly. Should return None.
|
||||
IMPORTANT: Modify messages in-place (append, extend, remove items).
|
||||
Do NOT replace the list (context.messages = []), as this will break execution.
|
||||
"""
|
||||
_before_llm_call_hooks.append(hook)
|
||||
|
||||
|
||||
def register_after_llm_call_hook(
|
||||
hook: Callable[[LLMCallHookContext], str | None],
|
||||
) -> None:
|
||||
"""Register a global after_llm_call hook.
|
||||
|
||||
Global hooks are added to all executors automatically.
|
||||
This is a convenience function for registering hooks that should
|
||||
apply to all LLM calls across all executors.
|
||||
|
||||
Args:
|
||||
hook: Function that receives LLMCallHookContext and can modify:
|
||||
- The response: Return modified response string or None to keep original
|
||||
- The messages: Modify context.messages directly (mutable reference)
|
||||
Both modifications are supported and can be used together.
|
||||
IMPORTANT: Modify messages in-place (append, extend, remove items).
|
||||
Do NOT replace the list (context.messages = []), as this will break execution.
|
||||
"""
|
||||
_after_llm_call_hooks.append(hook)
|
||||
|
||||
|
||||
def get_before_llm_call_hooks() -> list[Callable[[LLMCallHookContext], None]]:
|
||||
"""Get all registered global before_llm_call hooks.
|
||||
|
||||
Returns:
|
||||
List of registered before hooks
|
||||
"""
|
||||
return _before_llm_call_hooks.copy()
|
||||
|
||||
|
||||
def get_after_llm_call_hooks() -> list[Callable[[LLMCallHookContext], str | None]]:
|
||||
"""Get all registered global after_llm_call hooks.
|
||||
|
||||
Returns:
|
||||
List of registered after hooks
|
||||
"""
|
||||
return _after_llm_call_hooks.copy()
|
||||
@@ -4,16 +4,23 @@ from typing import TYPE_CHECKING
|
||||
|
||||
from crewai.agents.parser import AgentAction
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.hooks.tool_hooks import (
|
||||
ToolCallHookContext,
|
||||
get_after_tool_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
from crewai.security.fingerprint import Fingerprint
|
||||
from crewai.tools.structured_tool import CrewStructuredTool
|
||||
from crewai.tools.tool_types import ToolResult
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageError
|
||||
from crewai.utilities.i18n import I18N
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from crewai.agent import Agent
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.crew import Crew
|
||||
from crewai.llm import LLM
|
||||
from crewai.llm.base_llm import BaseLLM
|
||||
from crewai.task import Task
|
||||
@@ -30,9 +37,13 @@ def execute_tool_and_check_finality(
|
||||
agent: Agent | BaseAgent | None = None,
|
||||
function_calling_llm: BaseLLM | LLM | None = None,
|
||||
fingerprint_context: dict[str, str] | None = None,
|
||||
crew: Crew | None = None,
|
||||
) -> ToolResult:
|
||||
"""Execute a tool and check if the result should be treated as a final answer.
|
||||
|
||||
This function integrates tool hooks for before and after tool execution,
|
||||
allowing programmatic interception and modification of tool calls.
|
||||
|
||||
Args:
|
||||
agent_action: The action containing the tool to execute
|
||||
tools: List of available tools
|
||||
@@ -44,10 +55,12 @@ def execute_tool_and_check_finality(
|
||||
agent: Optional agent instance for tool execution
|
||||
function_calling_llm: Optional LLM for function calling
|
||||
fingerprint_context: Optional context for fingerprinting
|
||||
crew: Optional crew instance for hook context
|
||||
|
||||
Returns:
|
||||
ToolResult containing the execution result and whether it should be treated as a final answer
|
||||
"""
|
||||
logger = Logger(verbose=crew.verbose if crew else False)
|
||||
tool_name_to_tool_map = {tool.name: tool for tool in tools}
|
||||
|
||||
if agent_key and agent_role and agent:
|
||||
@@ -83,10 +96,62 @@ def execute_tool_and_check_finality(
|
||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||
name.casefold().strip() for name in tool_name_to_tool_map
|
||||
]:
|
||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||
tool = tool_name_to_tool_map.get(tool_calling.tool_name)
|
||||
if tool:
|
||||
return ToolResult(tool_result, tool.result_as_answer)
|
||||
if not tool:
|
||||
tool_result = i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([t.name.casefold() for t in tools]),
|
||||
)
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
|
||||
tool_input = tool_calling.arguments if tool_calling.arguments else {}
|
||||
hook_context = ToolCallHookContext(
|
||||
tool_name=tool_calling.tool_name,
|
||||
tool_input=tool_input,
|
||||
tool=tool,
|
||||
agent=agent,
|
||||
task=task,
|
||||
crew=crew,
|
||||
)
|
||||
|
||||
before_hooks = get_before_tool_call_hooks()
|
||||
try:
|
||||
for hook in before_hooks:
|
||||
result = hook(hook_context)
|
||||
if result is False:
|
||||
blocked_message = (
|
||||
f"Tool execution blocked by hook. "
|
||||
f"Tool: {tool_calling.tool_name}"
|
||||
)
|
||||
return ToolResult(blocked_message, False)
|
||||
except Exception as e:
|
||||
logger.log("error", f"Error in before_tool_call hook: {e}")
|
||||
|
||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||
|
||||
after_hook_context = ToolCallHookContext(
|
||||
tool_name=tool_calling.tool_name,
|
||||
tool_input=tool_input,
|
||||
tool=tool,
|
||||
agent=agent,
|
||||
task=task,
|
||||
crew=crew,
|
||||
tool_result=tool_result,
|
||||
)
|
||||
|
||||
# Execute after_tool_call hooks
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
modified_result = tool_result
|
||||
try:
|
||||
for hook in after_hooks:
|
||||
hook_result = hook(after_hook_context)
|
||||
if hook_result is not None:
|
||||
modified_result = hook_result
|
||||
after_hook_context.tool_result = modified_result
|
||||
except Exception as e:
|
||||
logger.log("error", f"Error in after_tool_call hook: {e}")
|
||||
|
||||
return ToolResult(modified_result, tool.result_as_answer)
|
||||
|
||||
# Handle invalid tool name
|
||||
tool_result = i18n.errors("wrong_tool_name").format(
|
||||
|
||||
147
lib/crewai/tests/agents/test_a2a_trust_completion_status.py
Normal file
147
lib/crewai/tests/agents/test_a2a_trust_completion_status.py
Normal file
@@ -0,0 +1,147 @@
|
||||
"""Test trust_remote_completion_status flag in A2A wrapper."""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.a2a.config import A2AConfig
|
||||
|
||||
try:
|
||||
from a2a.types import Message, Role
|
||||
|
||||
A2A_SDK_INSTALLED = True
|
||||
except ImportError:
|
||||
A2A_SDK_INSTALLED = False
|
||||
|
||||
|
||||
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
|
||||
def test_trust_remote_completion_status_true_returns_directly():
|
||||
"""When trust_remote_completion_status=True and A2A returns completed, return result directly."""
|
||||
from crewai.a2a.wrapper import _delegate_to_a2a
|
||||
from crewai.a2a.types import AgentResponseProtocol
|
||||
from crewai import Agent, Task
|
||||
|
||||
a2a_config = A2AConfig(
|
||||
endpoint="http://test-endpoint.com",
|
||||
trust_remote_completion_status=True,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="test manager",
|
||||
goal="coordinate",
|
||||
backstory="test",
|
||||
a2a=a2a_config,
|
||||
)
|
||||
|
||||
task = Task(description="test", expected_output="test", agent=agent)
|
||||
|
||||
class MockResponse:
|
||||
is_a2a = True
|
||||
message = "Please help"
|
||||
a2a_ids = ["http://test-endpoint.com/"]
|
||||
|
||||
with (
|
||||
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
|
||||
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
|
||||
):
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = "Test"
|
||||
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
|
||||
|
||||
# A2A returns completed
|
||||
mock_execute.return_value = {
|
||||
"status": "completed",
|
||||
"result": "Done by remote",
|
||||
"history": [],
|
||||
}
|
||||
|
||||
# This should return directly without checking LLM response
|
||||
result = _delegate_to_a2a(
|
||||
self=agent,
|
||||
agent_response=MockResponse(),
|
||||
task=task,
|
||||
original_fn=lambda *args, **kwargs: "fallback",
|
||||
context=None,
|
||||
tools=None,
|
||||
agent_cards={"http://test-endpoint.com/": mock_card},
|
||||
original_task_description="test",
|
||||
)
|
||||
|
||||
assert result == "Done by remote"
|
||||
assert mock_execute.call_count == 1
|
||||
|
||||
|
||||
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
|
||||
def test_trust_remote_completion_status_false_continues_conversation():
|
||||
"""When trust_remote_completion_status=False and A2A returns completed, ask server agent."""
|
||||
from crewai.a2a.wrapper import _delegate_to_a2a
|
||||
from crewai import Agent, Task
|
||||
|
||||
a2a_config = A2AConfig(
|
||||
endpoint="http://test-endpoint.com",
|
||||
trust_remote_completion_status=False,
|
||||
)
|
||||
|
||||
agent = Agent(
|
||||
role="test manager",
|
||||
goal="coordinate",
|
||||
backstory="test",
|
||||
a2a=a2a_config,
|
||||
)
|
||||
|
||||
task = Task(description="test", expected_output="test", agent=agent)
|
||||
|
||||
class MockResponse:
|
||||
is_a2a = True
|
||||
message = "Please help"
|
||||
a2a_ids = ["http://test-endpoint.com/"]
|
||||
|
||||
call_count = 0
|
||||
|
||||
def mock_original_fn(self, task, context, tools):
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
if call_count == 1:
|
||||
# Server decides to finish
|
||||
return '{"is_a2a": false, "message": "Server final answer", "a2a_ids": []}'
|
||||
return "unexpected"
|
||||
|
||||
with (
|
||||
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
|
||||
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
|
||||
):
|
||||
mock_card = MagicMock()
|
||||
mock_card.name = "Test"
|
||||
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
|
||||
|
||||
# A2A returns completed
|
||||
mock_execute.return_value = {
|
||||
"status": "completed",
|
||||
"result": "Done by remote",
|
||||
"history": [],
|
||||
}
|
||||
|
||||
result = _delegate_to_a2a(
|
||||
self=agent,
|
||||
agent_response=MockResponse(),
|
||||
task=task,
|
||||
original_fn=mock_original_fn,
|
||||
context=None,
|
||||
tools=None,
|
||||
agent_cards={"http://test-endpoint.com/": mock_card},
|
||||
original_task_description="test",
|
||||
)
|
||||
|
||||
# Should call original_fn to get server response
|
||||
assert call_count >= 1
|
||||
assert result == "Server final answer"
|
||||
|
||||
|
||||
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
|
||||
def test_default_trust_remote_completion_status_is_false():
|
||||
"""Verify that default value of trust_remote_completion_status is False."""
|
||||
a2a_config = A2AConfig(
|
||||
endpoint="http://test-endpoint.com",
|
||||
)
|
||||
|
||||
assert a2a_config.trust_remote_completion_status is False
|
||||
@@ -2714,293 +2714,3 @@ def test_agent_without_apps_no_platform_tools():
|
||||
|
||||
tools = crew._prepare_tools(agent, task, [])
|
||||
assert tools == []
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_before_llm_call_hook_modifies_messages():
|
||||
"""Test that before_llm_call hooks can modify messages."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_before_llm_call_hook
|
||||
|
||||
hook_called = False
|
||||
original_message_count = 0
|
||||
|
||||
def before_hook(context: LLMCallHookContext) -> None:
|
||||
nonlocal hook_called, original_message_count
|
||||
hook_called = True
|
||||
original_message_count = len(context.messages)
|
||||
context.messages.append({
|
||||
"role": "user",
|
||||
"content": "Additional context: This is a test modification."
|
||||
})
|
||||
|
||||
register_before_llm_call_hook(before_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="A greeting",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_called, "before_llm_call hook should have been called"
|
||||
assert len(agent.agent_executor.messages) > original_message_count
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_after_llm_call_hook_modifies_messages_for_next_iteration():
|
||||
"""Test that after_llm_call hooks can modify messages for the next iteration."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_after_llm_call_hook
|
||||
|
||||
hook_call_count = 0
|
||||
hook_iterations = []
|
||||
messages_added_in_iteration_0 = False
|
||||
test_message_content = "HOOK_ADDED_MESSAGE_FOR_NEXT_ITERATION"
|
||||
|
||||
def after_hook(context: LLMCallHookContext) -> str | None:
|
||||
nonlocal hook_call_count, hook_iterations, messages_added_in_iteration_0
|
||||
hook_call_count += 1
|
||||
current_iteration = context.iterations
|
||||
hook_iterations.append(current_iteration)
|
||||
|
||||
if current_iteration == 0:
|
||||
messages_before = len(context.messages)
|
||||
context.messages.append({
|
||||
"role": "user",
|
||||
"content": test_message_content
|
||||
})
|
||||
messages_added_in_iteration_0 = True
|
||||
assert len(context.messages) == messages_before + 1
|
||||
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(after_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
allow_delegation=False,
|
||||
max_iter=3,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Count to 3, taking your time",
|
||||
expected_output="A count",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_call_count > 0, "after_llm_call hook should have been called"
|
||||
assert messages_added_in_iteration_0, "Message should have been added in iteration 0"
|
||||
|
||||
executor_messages = agent.agent_executor.messages
|
||||
message_contents = [msg.get("content", "") for msg in executor_messages if isinstance(msg, dict)]
|
||||
assert any(test_message_content in content for content in message_contents), (
|
||||
f"Message added by hook in iteration 0 should be present in executor messages. "
|
||||
f"Messages: {message_contents}"
|
||||
)
|
||||
|
||||
assert len(executor_messages) > 2, "Executor should have more than initial messages"
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_after_llm_call_hook_modifies_messages():
|
||||
"""Test that after_llm_call hooks can modify messages for next iteration."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_after_llm_call_hook
|
||||
|
||||
hook_called = False
|
||||
messages_before_hook = 0
|
||||
|
||||
def after_hook(context: LLMCallHookContext) -> str | None:
|
||||
nonlocal hook_called, messages_before_hook
|
||||
hook_called = True
|
||||
messages_before_hook = len(context.messages)
|
||||
context.messages.append({
|
||||
"role": "user",
|
||||
"content": "Remember: This is iteration 2 context."
|
||||
})
|
||||
return None # Don't modify response
|
||||
|
||||
register_after_llm_call_hook(after_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
allow_delegation=False,
|
||||
max_iter=2,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Count to 2",
|
||||
expected_output="A count",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_called, "after_llm_call hook should have been called"
|
||||
assert len(agent.agent_executor.messages) > messages_before_hook
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_hooks_with_crew():
|
||||
"""Test that LLM call hooks work with crew execution."""
|
||||
from crewai.utilities.llm_call_hooks import (
|
||||
LLMCallHookContext,
|
||||
register_after_llm_call_hook,
|
||||
register_before_llm_call_hook,
|
||||
)
|
||||
|
||||
before_hook_called = False
|
||||
after_hook_called = False
|
||||
|
||||
def before_hook(context: LLMCallHookContext) -> None:
|
||||
nonlocal before_hook_called
|
||||
before_hook_called = True
|
||||
assert context.executor is not None
|
||||
assert context.agent is not None
|
||||
assert context.task is not None
|
||||
context.messages.append({
|
||||
"role": "system",
|
||||
"content": "Additional system context from hook."
|
||||
})
|
||||
|
||||
def after_hook(context: LLMCallHookContext) -> str | None:
|
||||
nonlocal after_hook_called
|
||||
after_hook_called = True
|
||||
assert context.response is not None
|
||||
assert len(context.messages) > 0
|
||||
return None
|
||||
|
||||
register_before_llm_call_hook(before_hook)
|
||||
register_after_llm_call_hook(after_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Researcher",
|
||||
goal="Research topics",
|
||||
backstory="You are a researcher",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Research AI frameworks",
|
||||
expected_output="A research summary",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(agents=[agent], tasks=[task])
|
||||
result = crew.kickoff()
|
||||
|
||||
assert before_hook_called, "before_llm_call hook should have been called"
|
||||
assert after_hook_called, "after_llm_call hook should have been called"
|
||||
assert result is not None
|
||||
assert result.raw is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_hooks_can_modify_executor_attributes():
|
||||
"""Test that hooks can access and modify executor attributes like tools."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_before_llm_call_hook
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool
|
||||
def test_tool() -> str:
|
||||
"""A test tool."""
|
||||
return "test result"
|
||||
|
||||
hook_called = False
|
||||
original_tools_count = 0
|
||||
|
||||
def before_hook(context: LLMCallHookContext) -> None:
|
||||
nonlocal hook_called, original_tools_count
|
||||
hook_called = True
|
||||
original_tools_count = len(context.executor.tools)
|
||||
assert context.executor.max_iter > 0
|
||||
assert context.executor.iterations >= 0
|
||||
assert context.executor.tools is not None
|
||||
|
||||
register_before_llm_call_hook(before_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
tools=[test_tool],
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Use the test tool",
|
||||
expected_output="Tool result",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_called, "before_llm_call hook should have been called"
|
||||
assert original_tools_count >= 0
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_call_hooks_error_handling():
|
||||
"""Test that hook errors don't break execution."""
|
||||
from crewai.utilities.llm_call_hooks import LLMCallHookContext, register_before_llm_call_hook
|
||||
|
||||
hook_called = False
|
||||
|
||||
def error_hook(context: LLMCallHookContext) -> None:
|
||||
nonlocal hook_called
|
||||
hook_called = True
|
||||
raise ValueError("Test hook error")
|
||||
|
||||
register_before_llm_call_hook(error_hook)
|
||||
|
||||
try:
|
||||
agent = Agent(
|
||||
role="Test Agent",
|
||||
goal="Test goal",
|
||||
backstory="Test backstory",
|
||||
allow_delegation=False,
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Say hello",
|
||||
expected_output="A greeting",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
result = agent.execute_task(task)
|
||||
|
||||
assert hook_called, "before_llm_call hook should have been called"
|
||||
assert result is not None
|
||||
finally:
|
||||
pass
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Count to 2\n\nThis
|
||||
is the expected criteria for your final answer: A count\nyou MUST return the
|
||||
actual complete content as the final answer, not a summary.\n\nBegin! This is
|
||||
VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"},{"role":"user","content":"Additional context:
|
||||
This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '849'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJNb5wwEL3zK0Y+QwSI7LLcokqVcujHoR9S2wg5ZsBujceyTdIo2v9e
|
||||
GTYLaROpFyTmzXt+b2YeEwCmOtYAE5IHMVqdveH09UHKLx+/2eFzkAdZXL8XJPr9h3dFydLIoNuf
|
||||
KMIT60LQaDUGRWaBhUMeMKoW+11ZH/K8rmZgpA51pA02ZNVFkY3KqKzMy8ssr7KiOtElKYGeNfA9
|
||||
AQB4nL/RqOnwN2sgT58qI3rPB2TNuQmAOdKxwrj3ygduAktXUJAJaGbvnyRNgwwNXIOhexDcwKDu
|
||||
EDgMMQBw4+/R/TBvleEarua/BooUyq2gw37yPKYyk9YbgBtDgcepzFFuTsjxbF7TYB3d+r+orFdG
|
||||
edk65J5MNOoDWTajxwTgZh7S9Cw3s45GG9pAv3B+rtgdFj22LmeD1icwUOB6W9+nL+i1HQautN+M
|
||||
mQkuJHYrdd0JnzpFGyDZpP7XzUvaS3Jlhv+RXwEh0AbsWuuwU+J54rXNYbzd19rOU54NM4/uTgls
|
||||
g0IXN9Fhzye9HBTzDz7g2PbKDOisU8tV9batRFlfFn29K1lyTP4AAAD//wMApumqgWQDAAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d044543db94e48-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:25 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=KLlCOQ_zxXquDvj96O28ObVFEoAbFE8R7zlmuiuXH1M-1762890085-1.0.1.1-UChItG1GnLDHrErY60dUpkbD3lEkSvfkTQpOmEtzd0fjjm_y1pJQiB.VDXVi2pPIMSelir0ZgiVXSh5.hGPb3RjQqbH3pv0Rr_2dQ59OIQ8;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:25 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=u.Z6xV9tQd3ucK35BinKtlCkewcI6q_uQicyeEeeR18-1762890085355-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '559'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '735'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_bcaa0f8500714ed09f967488b238ce2e
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,222 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"trace_id": "aeb82647-004a-4a30-9481-d55f476d5659", "execution_type":
|
||||
"crew", "user_identifier": null, "execution_context": {"crew_fingerprint": null,
|
||||
"crew_name": "Unknown Crew", "flow_name": null, "crewai_version": "1.4.1", "privacy_level":
|
||||
"standard"}, "execution_metadata": {"expected_duration_estimate": 300, "agent_count":
|
||||
0, "task_count": 0, "flow_method_count": 0, "execution_started_at": "2025-11-11T19:45:17.648657+00:00"}}'
|
||||
headers:
|
||||
Accept:
|
||||
- '*/*'
|
||||
Accept-Encoding:
|
||||
- gzip, deflate, zstd
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '434'
|
||||
Content-Type:
|
||||
- application/json
|
||||
User-Agent:
|
||||
- CrewAI-CLI/1.4.1
|
||||
X-Crewai-Version:
|
||||
- 1.4.1
|
||||
method: POST
|
||||
uri: https://app.crewai.com/crewai_plus/api/v1/tracing/batches
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"bad_credentials","message":"Bad credentials"}'
|
||||
headers:
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Length:
|
||||
- '55'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:45:17 GMT
|
||||
cache-control:
|
||||
- no-store
|
||||
content-security-policy:
|
||||
- 'default-src ''self'' *.app.crewai.com app.crewai.com; script-src ''self''
|
||||
''unsafe-inline'' *.app.crewai.com app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts
|
||||
https://www.gstatic.com https://run.pstmn.io https://apis.google.com https://apis.google.com/js/api.js
|
||||
https://accounts.google.com https://accounts.google.com/gsi/client https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.min.css.map
|
||||
https://*.google.com https://docs.google.com https://slides.google.com https://js.hs-scripts.com
|
||||
https://js.sentry-cdn.com https://browser.sentry-cdn.com https://www.googletagmanager.com
|
||||
https://js-na1.hs-scripts.com https://js.hubspot.com http://js-na1.hs-scripts.com
|
||||
https://bat.bing.com https://cdn.amplitude.com https://cdn.segment.com https://d1d3n03t5zntha.cloudfront.net/
|
||||
https://descriptusercontent.com https://edge.fullstory.com https://googleads.g.doubleclick.net
|
||||
https://js.hs-analytics.net https://js.hs-banner.com https://js.hsadspixel.net
|
||||
https://js.hscollectedforms.net https://js.usemessages.com https://snap.licdn.com
|
||||
https://static.cloudflareinsights.com https://static.reo.dev https://www.google-analytics.com
|
||||
https://share.descript.com/; style-src ''self'' ''unsafe-inline'' *.app.crewai.com
|
||||
app.crewai.com https://cdn.jsdelivr.net/npm/apexcharts; img-src ''self'' data:
|
||||
*.app.crewai.com app.crewai.com https://zeus.tools.crewai.com https://dashboard.tools.crewai.com
|
||||
https://cdn.jsdelivr.net https://forms.hsforms.com https://track.hubspot.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://www.google.com
|
||||
https://www.google.com.br; font-src ''self'' data: *.app.crewai.com app.crewai.com;
|
||||
connect-src ''self'' *.app.crewai.com app.crewai.com https://zeus.tools.crewai.com
|
||||
https://connect.useparagon.com/ https://zeus.useparagon.com/* https://*.useparagon.com/*
|
||||
https://run.pstmn.io https://connect.tools.crewai.com/ https://*.sentry.io
|
||||
https://www.google-analytics.com https://edge.fullstory.com https://rs.fullstory.com
|
||||
https://api.hubspot.com https://forms.hscollectedforms.net https://api.hubapi.com
|
||||
https://px.ads.linkedin.com https://px4.ads.linkedin.com https://google.com/pagead/form-data/16713662509
|
||||
https://google.com/ccm/form-data/16713662509 https://www.google.com/ccm/collect
|
||||
https://worker-actionkit.tools.crewai.com https://api.reo.dev; frame-src ''self''
|
||||
*.app.crewai.com app.crewai.com https://connect.useparagon.com/ https://zeus.tools.crewai.com
|
||||
https://zeus.useparagon.com/* https://connect.tools.crewai.com/ https://docs.google.com
|
||||
https://drive.google.com https://slides.google.com https://accounts.google.com
|
||||
https://*.google.com https://app.hubspot.com/ https://td.doubleclick.net https://www.googletagmanager.com/
|
||||
https://www.youtube.com https://share.descript.com'
|
||||
expires:
|
||||
- '0'
|
||||
permissions-policy:
|
||||
- camera=(), microphone=(self), geolocation=()
|
||||
pragma:
|
||||
- no-cache
|
||||
referrer-policy:
|
||||
- strict-origin-when-cross-origin
|
||||
strict-transport-security:
|
||||
- max-age=63072000; includeSubDomains
|
||||
vary:
|
||||
- Accept
|
||||
x-content-type-options:
|
||||
- nosniff
|
||||
x-frame-options:
|
||||
- SAMEORIGIN
|
||||
x-permitted-cross-domain-policies:
|
||||
- none
|
||||
x-request-id:
|
||||
- 48a89b0d-206b-4c1b-aa0d-ecc3b4ab525c
|
||||
x-runtime:
|
||||
- '0.088251'
|
||||
x-xss-protection:
|
||||
- 1; mode=block
|
||||
status:
|
||||
code: 401
|
||||
message: Unauthorized
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Count to 3, taking
|
||||
your time\n\nThis is the expected criteria for your final answer: A count\nyou
|
||||
MUST return the actual complete content as the final answer, not a summary.\n\nBegin!
|
||||
This is VERY important to you, use the tools available and give your best Final
|
||||
Answer, your job depends on it!\n\nThought:"}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '790'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJNa9wwEL37Vww6r43tOpuNb2nKQgslOSy0NA1mIo9tdWVJSHK2Jex/
|
||||
L/J+2Ns20IuE5s0bzXszrxEAEzUrgfEOPe+NjO9Q41atP3/79GG7vX8QD0Xq15svX9/fUd+yRWDo
|
||||
5x/E/YmVcN0bSV5odYC5JfQUqmbXy3x1k77LViPQ65pkoLXGx0WSxb1QIs7T/CpOizgrjvROC06O
|
||||
lfAYAQC8jmdoVNX0k5WQLk6RnpzDllh5TgJgVssQYeiccB6VZ4sJ5Fp5UmPvm04PbedL+AhK74Cj
|
||||
gla8ECC0QQCgcjuy39VaKJRwO75KuFeUJAlsdnq8OkuUzD+w1AwOg0o1SDkDUCntMbg0Sns6Ivuz
|
||||
GKlbY/Wz+4PKGqGE6ypL6LQKjTuvDRvRfQTwNJo2XPjAjNW98ZXXWxq/y5ZH09g0rBl6cwS99ihn
|
||||
8esTcFGvqsmjkG5mO+PIO6on6jQjHGqhZ0A0U/13N/+qfVAuVPs/5SeAczKe6spYqgW/VDylWQq7
|
||||
/Fba2eWxYebIvghOlRdkwyRqanCQhwVj7pfz1FeNUC1ZY8VhyxpTFTxfXWXNapmzaB/9BgAA//8D
|
||||
AL0LXHV0AwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d04a06dc4d1949-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:45:18 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=KnsnYxgmlpoHf.5TWnNgU30xb2tc0gK7SC2BbUkud2M-1762890318-1.0.1.1-3KeaQY59x5mY6n8DINELLaH9_b68w7W4ZZ0KeOknBHmQyDwx5qbtDonfYxOjsO_KykjtJLHpB0bsINSNEa9TrjNQHqUWTlRhldfTLenUG44;
|
||||
path=/; expires=Tue, 11-Nov-25 20:15:18 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=ekC35NRP79GCMP.eTi_odl5.6DIsAeFEXKlanWUZOH4-1762890318589-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '598'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '632'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999827'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_cb36cbe6c33b42a28675e8c6d9a36fe9
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,127 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
|
||||
is the expected criteria for your final answer: A greeting\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"},{"role":"user","content":"Additional context:
|
||||
This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '851'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFJdi9swEHz3r9jqOT5sk+RSvx2lJW1poXDQ0vYwirS21cpaIclJr0f+
|
||||
+yE7F/s+Cn0xeGdnNLO7dwkAU5KVwETLg+isTt9w+rr/YESx27+93RaHVm4/ff7y8Vpcffv+ly0i
|
||||
g3a/UIQH1oWgzmoMiswIC4c8YFTNL9fF5nWWbfIB6EiijrTGhnR5kaedMiotsmKVZss0X57oLSmB
|
||||
npXwIwEAuBu+0aiR+IeVkC0eKh16zxtk5bkJgDnSscK498oHbgJbTKAgE9AM3q9b6ps2lPAeDB1A
|
||||
cAON2iNwaGIA4MYf0P0075ThGq6GvxK2qDW9mks6rHvPYy7Taz0DuDEUeJzLEObmhBzP9jU11tHO
|
||||
P6GyWhnl28oh92SiVR/IsgE9JgA3w5j6R8mZddTZUAX6jcNz+fpy1GPTembo6gQGClzP6pti8YJe
|
||||
JTFwpf1s0Exw0aKcqNNWeC8VzYBklvq5m5e0x+TKNP8jPwFCoA0oK+tQKvE48dTmMF7vv9rOUx4M
|
||||
M49urwRWQaGLm5BY816PJ8X8rQ/YVbUyDTrr1HhXta2Wotis8nqzLlhyTO4BAAD//wMAuV0QSWYD
|
||||
AAA=
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d044428f103c35-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:22 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=jp.mByP87tLw_KZOIh7lXZ9UMACecreCMNwHwtJmUvQ-1762890082-1.0.1.1-D76UWkvWlN8e0zlQpgSlSHjrhx3Rkh_r8bz4XKx8kljJt8s9Okre9bo7M62ewJNFK9O9iuHkADMKeAEwlsc4Hg0MsF2vt2Hu1J0xikSInv0;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:22 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=pzTqogdMFPJY2.Yrj49LODdUKbD8UBctCWNyIZVsvK4-1762890082258-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '460'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '478'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999820'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_3bda51e6d3e34f8cadcc12551dc29ab0
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,262 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: test_tool\nTool
|
||||
Arguments: {}\nTool Description: A test tool.\n\nIMPORTANT: Use the following
|
||||
format in your response:\n\n```\nThought: you should always think about what
|
||||
to do\nAction: the action to take, only one name of [test_tool], just the name,
|
||||
exactly as it''s written.\nAction Input: the input to the action, just a simple
|
||||
JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent
|
||||
Task: Use the test tool\n\nThis is the expected criteria for your final answer:
|
||||
Tool result\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"},{"role":"user","content":"Additional
|
||||
context: This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1311'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA4xTy47bMAy85ysIneMgcbNp1reizwXaXrpAD83CVmTaViqLWolu2gb590LOw94+
|
||||
gF504HBGw6F0mAAIXYoMhGokq9aZ5KWkz/vdTn14i/dv9h9f19tXi3dtun789H71U0wjg7Y7VHxh
|
||||
zRS1ziBrsidYeZSMUXXxfJWub+fzddoDLZVoIq12nCxni6TVVifpPL1J5stksTzTG9IKg8jgywQA
|
||||
4NCf0agt8bvIYD69VFoMQdYosmsTgPBkYkXIEHRgaVlMB1CRZbS996IoNva+oa5uOIM7CA11poQu
|
||||
IHCDwBg4ZyIDTFAj90WPj532WIK2FflWxqGhIt+DlbbSgLRhj362sS9URLNB6FKCO+s6zuBw3Nii
|
||||
KMb2PFZdkDEj2xkzAqS1xP11fTAPZ+R4jcJQ7Txtw29UUWmrQ5N7lIFsHDswOdGjxwnAQx959yRF
|
||||
4Ty1Lnr+iv116Wp10hPDqgf02XkfgomlGbFuL6wnenmJLLUJo6UJJVWD5UAdNiy7UtMImIym/tPN
|
||||
37RPk2tb/4/8ACiFjrHMncdSq6cTD20e40/4V9s15d6wCOi/aYU5a/RxEyVWsjOn5ynCj8DY5pW2
|
||||
NXrn9emNVi5fqnR9s6jWq1RMjpNfAAAA//8DANALR4WyAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d044470bdeb976-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=p01_b1BsQgwR2woMBWf1E0gJMDDl7pvqkEVHpHAsMJA-1762890083-1.0.1.1-u8iYLTTx0lmfSR1.CzuuYiHgt03yVVUMsBD8WgExXWm7ts.grUwM1ifj9p6xIz.HElrnQdfDSBD5Lv045aNr61YcB8WW3Vz33W9N0Gn0P3w;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:23 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=2gUmBgxb3VydVYt8.t_P6bY8U_pS.a4KeYpZWDDYM9Q-1762890083295-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '729'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '759'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999707'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999707'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_70c7033dbc5e4ced80d3fdcbcda2c675
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nYou ONLY have access to the following tools, and
|
||||
should NEVER make up tools that are not listed here:\n\nTool Name: test_tool\nTool
|
||||
Arguments: {}\nTool Description: A test tool.\n\nIMPORTANT: Use the following
|
||||
format in your response:\n\n```\nThought: you should always think about what
|
||||
to do\nAction: the action to take, only one name of [test_tool], just the name,
|
||||
exactly as it''s written.\nAction Input: the input to the action, just a simple
|
||||
JSON object, enclosed in curly braces, using \" to wrap keys and values.\nObservation:
|
||||
the result of the action\n```\n\nOnce all necessary information is gathered,
|
||||
return the following format:\n\n```\nThought: I now know the final answer\nFinal
|
||||
Answer: the final answer to the original input question\n```"},{"role":"user","content":"\nCurrent
|
||||
Task: Use the test tool\n\nThis is the expected criteria for your final answer:
|
||||
Tool result\nyou MUST return the actual complete content as the final answer,
|
||||
not a summary.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"},{"role":"user","content":"Additional
|
||||
context: This is a test modification."},{"role":"assistant","content":"```\nThought:
|
||||
I should use the test_tool to get the required information for the final answer.\nAction:
|
||||
test_tool\nAction Input: {}\n```\nObservation: test result"},{"role":"user","content":"Additional
|
||||
context: This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1584'
|
||||
content-type:
|
||||
- application/json
|
||||
cookie:
|
||||
- __cf_bm=p01_b1BsQgwR2woMBWf1E0gJMDDl7pvqkEVHpHAsMJA-1762890083-1.0.1.1-u8iYLTTx0lmfSR1.CzuuYiHgt03yVVUMsBD8WgExXWm7ts.grUwM1ifj9p6xIz.HElrnQdfDSBD5Lv045aNr61YcB8WW3Vz33W9N0Gn0P3w;
|
||||
_cfuvid=2gUmBgxb3VydVYt8.t_P6bY8U_pS.a4KeYpZWDDYM9Q-1762890083295-0.0.1.1-604800000
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLBbtQwEL3nKyyfN1WS3S5pbhRRKCeEkCpgq8RrTxJTxzb2pC1U++/I
|
||||
TrtJoUhcLNlv3vN7M/OQEEKloBWhvGfIB6vSN8xc3b+/FG/P3rX8x9X+y8dfHy7O8fYra88/0VVg
|
||||
mP134PjEOuFmsApQGj3B3AFDCKr5q21RnmVZuY7AYASoQOssppuTPB2klmmRFadptknzzSO9N5KD
|
||||
pxX5lhBCyEM8g1Et4J5WJFs9vQzgPeuAVsciQqgzKrxQ5r30yDTS1QxyoxF09N40zU5/7s3Y9ViR
|
||||
S6LNHbkJB/ZAWqmZIkz7O3A7fRFvr+OtIggeiQM/KtzppmmW+g7a0bMQUo9KLQCmtUEWmhSTXT8i
|
||||
h2MWZTrrzN7/QaWt1NL3tQPmjQ6+PRpLI3pICLmOPRuftYFaZwaLNZobiN+t83LSo/OsZvQIokGm
|
||||
Fqz1dvWCXi0AmVR+0XXKGe9BzNR5RGwU0iyAZJH6bzcvaU/Jpe7+R34GOAeLIGrrQEj+PPFc5iCs
|
||||
8r/Kjl2OhqkHdys51CjBhUkIaNmopv2i/qdHGOpW6g6cdXJastbWG16Up3lbbguaHJLfAAAA//8D
|
||||
AJW0fwtzAwAA
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d0444cbd6db976-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:23 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '527'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '578'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999655'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999655'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_6b1d84dcdde643cea5160e155ee624db
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,159 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"name":"llama3.2:3b"}'
|
||||
headers:
|
||||
accept:
|
||||
- '*/*'
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '22'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- localhost:11434
|
||||
user-agent:
|
||||
- litellm/1.78.5
|
||||
method: POST
|
||||
uri: http://localhost:11434/api/show
|
||||
response:
|
||||
body:
|
||||
string: '{"error":"model ''llama3.2:3b'' not found"}'
|
||||
headers:
|
||||
Content-Length:
|
||||
- '41'
|
||||
Content-Type:
|
||||
- application/json; charset=utf-8
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:28 GMT
|
||||
status:
|
||||
code: 404
|
||||
message: Not Found
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Test Agent. Test backstory\nYour
|
||||
personal goal is: Test goal\nTo give my best complete final answer to the task
|
||||
respond using the exact following format:\n\nThought: I now can give a great
|
||||
answer\nFinal Answer: Your final answer must be the great and the most complete
|
||||
as possible, it must be outcome described.\n\nI MUST use these formats, my job
|
||||
depends on it!"},{"role":"user","content":"\nCurrent Task: Say hello\n\nThis
|
||||
is the expected criteria for your final answer: A greeting\nyou MUST return
|
||||
the actual complete content as the final answer, not a summary.\n\nBegin! This
|
||||
is VERY important to you, use the tools available and give your best Final Answer,
|
||||
your job depends on it!\n\nThought:"},{"role":"user","content":"Additional context:
|
||||
This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '851'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAAwAAAP//jFLRbtQwEHzPVyx+vlRJmrte84KOSqgFCSFAqLRUkc/ZJAbHa9lOy6m6
|
||||
f0dOrpe0gMRLpHh2Znd29jECYLJiBTDRci86o+ILTten/ccPFzyp398srz9/2/o3X/PN6btN84kt
|
||||
AoO2P1D4J9aJoM4o9JL0CAuL3GNQTc9W2fo8SdbnA9BRhSrQGuPj/CSNO6llnCXZMk7yOM0P9Jak
|
||||
QMcKuI0AAB6HbxhUV/iLFZAsnl46dI43yIpjEQCzpMIL485J57n2bDGBgrRHPcz+paW+aX0BV6Dp
|
||||
AQTX0Mh7BA5NMABcuwe03/VbqbmCzfBXwCUqRa/g8sC4grEN7KgHTxXfvZ63s1j3jgfPuldqBnCt
|
||||
yfOws8Ho3QHZH60paoylrXtBZbXU0rWlRe5IBxvOk2EDuo8A7oYV9s+2woylzvjS008c2qWrs1GP
|
||||
TdFNaJYdQE+eqxlrTPGlXlmh51K5WQhMcNFiNVGnxHhfSZoB0cz1n9P8TXt0LnXzP/ITIAQaj1Vp
|
||||
LFZSPHc8lVkMl/2vsuOWh4GZQ3svBZZeog1JVFjzXo3nxtzOeezKWuoGrbFyvLnalLnI1su0Xq8y
|
||||
Fu2j3wAAAP//AwDurzwzggMAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d0446e698367ab-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:30 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=b52crfzdOm5rh4aOc2LfM8aQKFI.ZL9WCZXaPBDdG5k-1762890090-1.0.1.1-T2xhtwX0vuEnMIb8NRgP4w3RRn1N1ZwSjuhKBob1vDLDmN7XhCKkoIg3IrlC9KEyhA65IGa5DWsHfmlRKKxqw6sIPA98BSO6E3wsTRspHw4;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:30 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=0TH0Kjp_5t6yhwXKA1wlKBHaczp.TeWhM2A5t6by1sI-1762890090153-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '1049'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '1387'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999817'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_4b132b998ed941b5b6a85ddbb36e2b65
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -1,182 +0,0 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages":[{"role":"system","content":"You are Researcher. You are a
|
||||
researcher\nYour personal goal is: Research topics\nTo give my best complete
|
||||
final answer to the task respond using the exact following format:\n\nThought:
|
||||
I now can give a great answer\nFinal Answer: Your final answer must be the great
|
||||
and the most complete as possible, it must be outcome described.\n\nI MUST use
|
||||
these formats, my job depends on it!"},{"role":"user","content":"\nCurrent Task:
|
||||
Research AI frameworks\n\nThis is the expected criteria for your final answer:
|
||||
A research summary\nyou MUST return the actual complete content as the final
|
||||
answer, not a summary.\n\nYou MUST follow these instructions: \n - Include specific
|
||||
examples and real-world case studies to enhance the credibility and depth of
|
||||
the article ideas.\n - Incorporate mentions of notable companies, projects,
|
||||
or tools relevant to each topic to provide concrete context.\n - Add diverse
|
||||
viewpoints such as interviews with experts, users, or thought leaders to enrich
|
||||
the narrative and lend authority.\n - Address ethical, social, and emotional
|
||||
considerations explicitly to reflect a balanced and comprehensive analysis.\n
|
||||
- Enhance the descriptions by including implications for future developments
|
||||
and the potential impact on society.\n - Use more engaging and vivid language
|
||||
that draws the reader into each topic''s nuances and importance.\n - Include
|
||||
notes or summaries that contextualize each set of ideas in terms of relevance
|
||||
and potential reader engagement.\n - In future tasks, focus on elaborating initial
|
||||
outlines into more detailed and nuanced article proposals with richer content
|
||||
and insights.\n\nBegin! This is VERY important to you, use the tools available
|
||||
and give your best Final Answer, your job depends on it!\n\nThought:"},{"role":"user","content":"Additional
|
||||
context: This is a test modification."}],"model":"gpt-4.1-mini"}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '1894'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.109.1
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.109.1
|
||||
x-stainless-read-timeout:
|
||||
- '600'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.13.3
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: !!binary |
|
||||
H4sIAAAAAAAAA2RXTXPbOBK9z6/o8smpkrRJJjPJ6OZy4ownceKKnY+qzaUJNMmOQTQLACUrc5kf
|
||||
sZf9e/NLtrpBycrsRWWTINh4/V6/xz9/Ajhhf7KGE9djccMYlucoX57duvj+fXn/+hrzz8/v3j7f
|
||||
dL/35y9efDlZ6BPSfCNX9k+tnAxjoMIS622XCAvprk+e//r0xW+PH//22G4M4inoY91Yls9WT5YD
|
||||
R14+ffz0l+XjZ8snz+bHe2FH+WQN//4JAOBP+9VCo6f7kzXYZnZloJyxo5P1YRHASZKgV04wZ84F
|
||||
YzlZPNx0EgtFq/22l6nryxouIcoWHEboeEOA0OkBAGPeUvoaLzhigDP7b/01fo0fKBMm18PNNAyY
|
||||
diARzi7hIuFAW0l3WRddxpLET05hWX+NZ6lwy44xwGUsFAJ3FB3B6dnlI2gPTwImgtITNOjuGokE
|
||||
0oLCluwVnjYUZBwolgWMSTbsOXaQS5pcmRJ5oLjhJFFXZMDooYiEDKXHAhSxCQRpLp9SXgDFjiPZ
|
||||
n7paW4mRKUMR8JS5iwsoCTnW+57GIDsY0PUcCQJhilqBdTYDtXpGiiXsVnDbcz68DPKMlecNZeBY
|
||||
BAb8JkmP9XD+BSTCsNxKCh5wHAM7VAS10tKzw2BlZDEkncTMntJ+id6i+5FSAY6Zu77kBXAIUy66
|
||||
JnYKLSdokqAHHkZ0RZtXyPVRgnS7w+5Uditt45MVvN9Q2jBttRVvCQ3xH9q9/hqXcEsxS7oIsoXT
|
||||
1yJdoEdreF8bqA0dJBcYZZwCJpCR4jLLlBxB4CZhUsinTB5aSf8Pb4WexsOV1dH7/v7rvxnaQPes
|
||||
3VWwuZDRATAE2ea5a8oJQJckZzi//pgX8Np+dfPb6495BbVu22/KVvnRqTgq41T4GQLf0bwabhPG
|
||||
HLCQbTRfvO6lSIaeuz5YH4BLhuwwYMOBSwV6lC2llaJ3vbsVZcnpFRX81wU6akTuHq3hTZRtNFB0
|
||||
A7+LOLAzmk7F2o4BuoRjvzjswRla3IiqgeMRAakoBfJhM8J6Rk/N1HV7mI0/rFhhgDYReRmUy/TA
|
||||
0lp3Bq3VwEK/weioio7jXB4l2HBmibZvxDIlDBAwdhN2pGA6ylnfe/ru7fUjw+FsRNcTXH15RwVO
|
||||
zwb8LvHRGm4MuFAhPohsMa/L0zhKKnrsXKpa96fh2FLSQbOAQBtK2JGHZgd1Z/hMDdwoux1lOD37
|
||||
fPOo6j7whlSZS594QxFckMlDnldapVesTJK2wLl0kYtOzluRcMcFTs/f3b5R+ret4nRUcN4f5Ac2
|
||||
w0iplTQoiNaePBK5fgE8YEeVoYXuyxFmC5gKB/5em3woxtpxLqlgRHvs5m43PnBU0oHDdog/zr4c
|
||||
qfVchoYjZXg3Ddc72HLpAaciAxZ24Lk1LAsb7yrbrz8COkdhnkEL6GbwS0Ib/T9Q0Hb8UUQvicYr
|
||||
jt4KPwtjjxcSvI2epyv4oMPwsw3Dc2XrTZk8Ux04vxOG0js1DC3liryNx8sBlcvrY+n2mKEh7WOa
|
||||
bHBy3FvJPMzyLhcaMjgcrT3SAmEKO3VFRwk86UjZH3tM+jIbqCMW7SzIVJwMlBeQJ9cDZht6+9NV
|
||||
f7DWesaGFM9EhaOMWHo1BeyiZK5dOZuKRBlkyvCJenaB8vpY3T3hhsMOaNCxVgl9SznUjn9sKOkB
|
||||
66jPFFpjsRa7b8RCnaMkbqZqClJdp/CgxHQm2uWAd3sVRTLpRiomfx7UeqvcrWBNCNGA3YtpPUtz
|
||||
nhRtwskfgTjbJRb44/pKUocRznvMtKgere9VLVOy1w+iPXZuSjplS2/phL1SsZ390cq4MdXAB3JV
|
||||
kRLX/1TGu9s3R3OMYq/t9darYw1Ke4RImcWjhe9HwAJmeJwMwxT3Lr23l/2Qy7X2TDgEyhWsGxsF
|
||||
Wjnsg9TahKgsPYiJlGVSiHX4B/PcPev0Paakg1w0NWwkTHqPv+vahh/s3KepA8/ZyYZSdfWfV/Cq
|
||||
5gRjTKYEn5i2o3Asqi6NaOb5Jg8+jkyzbNMKLoiXF8TwlheAkEidijzcFIytJKu/pZwlWXmEmo/u
|
||||
HReqRtzIVAx4T4M4nR/fK8bSqiJrbtswqhWnZZuYog+7H7JSMxVAcDhVZdqOesiWzer2NFOw9NaY
|
||||
ZDRtFEpjojJb8QqubVjpJnpALEcx0E777tPly8sz1VuPmb/XhBppTitH6bWIdlCGQbxSlQZKZqw9
|
||||
Jr/FyuLaX0lzE/UTYFCrnXJewas55J1dqqmKw0IZMGR5iBKqXUcp7kMt5t1xwJG2iq6dbJcZgsGe
|
||||
pHt0lBotrWHMmkNbiFJAp2g7haDJNqpHGkueHepZwI0lzmpFrwaZk8f5DwF0/TVqTkg4sj/OBHNH
|
||||
j3BSqj+0nbzettGrDVHYrbN6bJ4/I8hD4nyX//7rP1o64DCGg/r1W4A36HZASbJd0Dq/SaO8HwPW
|
||||
OlZwIUlB0M+1BQRMFkPIEgP5PVbNxKHG4p4yHZc94A449pS4MndM9G3yFh5oaMj76saHBOKx4Ooh
|
||||
KIN9GGrntTqltSJhX1x+KjtlD2tphplNnRE1vFQZt8gpUs7QkkapY6v5RxauZuMwefDiJstyB++a
|
||||
fcQWqhK6OmzofgzIcZ9OTX0r6zqVHQxTLtBgsOHIMcqmbmgMTtRNYZ5/NrOqM3L0vGE/YYBUI7CF
|
||||
twN3thTCsiHLMpRHUn4FxRcyxVwDVcsU/CzC/uD3Rs5fVnAxWch/+fBRWN9Rq7YsoF84ysu3Ijau
|
||||
W0lbTH4B/wMAAP//jFjNbtswDL73KQSfWiAo0Kwbgt2GXVZgP6fdVgSKTNtcZUnQT7ocCuwh9oR7
|
||||
koGkYjtZBuxMR7Ep8vs7nUdaTULQfaXDqTFqLDYjrbOd1dr1QrqSLKs6rVoJXVr0Kvse8gDxZnWU
|
||||
UDRg577mmqj+08cb+SXbbogKHMT+MMlGc/SSIfoRaWvTqGOGuJqYkV6GZAvBhjAQupZcHwJtTBf9
|
||||
qKAtsi2qpKo5E10E79/stEJgGFv4aG3V6B1mH2sHlbFIH6TMoF0PasSMvdAwacglcn4J4OilXKs+
|
||||
VJNB5oYbbquXROJqndkXZ0/Xw7Y5ELbUj1rgG48c+UeeUbGCA1TPStJOXK20q/PFtW8XnWR9ShdV
|
||||
ejoNWjWUUbsT8FnN6HP03HvsUYZfTIWxJeGeFsUM2lpwPbuCb+49xSs/Mg39Z59BIPFSDIDVHB5U
|
||||
BAt77TJ39t2DCksyWqngLZrDqJ+mjIKQhzkMEsuEsrNobtUD4Sz7Dc38FWGgPdqDSk6HNPhcCcMW
|
||||
gy0Ty+CfzxaB/c7Jhg9o2auNgbfaRMzckgidrWrOu2WuQAPcdWwx+GZqt4TYDan4JCp+GVeQ1iB8
|
||||
fQIRztkHNBTO6MmYGpI/O8sa0fgSa0W5IhqOFU6J5GmXiYakA4IUc7jBHkB2XNQjaR5mVnnXSwBB
|
||||
RPmdgJAP5yYwTP7++SsPcOBnqhzMh6NzDFZnkpVJpUGHGsEQuJMH8pSddWfB1q366lqIlNy1c2Rz
|
||||
OqDz1MlITOty5E9MClJisya2Y9BMHtXuoFPP+lAVBIPfkeclWbIHtQMHEtjVqSM+YoFMm3q7zBRJ
|
||||
OyRNwaYr1i4K2jkv1MNp5mOtvEz5pfV9iH6Xzn7adOgwDdsIOnlHWWXKPjRcfblS6pFz0nISfTYy
|
||||
o9vsn4D/7tV9zUmbOZ+dq5vNplazz9rOhbv1+lg5OXHbQtZo0yJrbQyFFe382zmYZRJYFK4W3/33
|
||||
+1w6W74dXf8/x88FYyBkaLezWbj0WAQav389NvWZX7ipnmebESLdRQudLlZS5UaM87ZD15OoRomW
|
||||
u7C9N+vN67tu82bdXL1c/QEAAP//AwBbY8c8aRcAAA==
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 99d0447958ce36e8-SJC
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Encoding:
|
||||
- gzip
|
||||
Content-Type:
|
||||
- application/json
|
||||
Date:
|
||||
- Tue, 11 Nov 2025 19:41:45 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=dSe1gQEFfPpE4AOsFi3S3RQkzPCQnV1.Ywe__K7cSSU-1762890105-1.0.1.1-I1CSTO8ri4tjbaHdIHQ9YP9c2pa.y9WwMQFRaUztT95T_OAe5V0ndTFN4pO1RiCXh15TUpWmBxRdxIWjcYDMqrDIvKWInLO5aavGFWZ1rys;
|
||||
path=/; expires=Tue, 11-Nov-25 20:11:45 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=LMf_4EPFZGfTiqcjmjEk7WxOTuX2ukd3Cs_R8170wJ4-1762890105804-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Strict-Transport-Security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '15065'
|
||||
openai-project:
|
||||
- proj_xitITlrFeen7zjNSzML82h9x
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
x-envoy-upstream-service-time:
|
||||
- '15254'
|
||||
x-openai-proxy-wasm:
|
||||
- v0.1
|
||||
x-ratelimit-limit-project-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-project-tokens:
|
||||
- '149999560'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999560'
|
||||
x-ratelimit-reset-project-tokens:
|
||||
- 0s
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_c49c9fba20ff4f05903eff3c78797ce1
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
2
lib/crewai/tests/hooks/__init__.py
Normal file
2
lib/crewai/tests/hooks/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
"""Tests for CrewAI hooks functionality."""
|
||||
|
||||
619
lib/crewai/tests/hooks/test_crew_scoped_hooks.py
Normal file
619
lib/crewai/tests/hooks/test_crew_scoped_hooks.py
Normal file
@@ -0,0 +1,619 @@
|
||||
"""Tests for crew-scoped hooks within @CrewBase classes."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai import Agent, Crew
|
||||
from crewai.hooks import (
|
||||
LLMCallHookContext,
|
||||
ToolCallHookContext,
|
||||
before_llm_call,
|
||||
before_tool_call,
|
||||
get_before_llm_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
from crewai.project import CrewBase, agent, crew
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_hooks():
|
||||
"""Clear global hooks before and after each test."""
|
||||
from crewai.hooks import llm_hooks, tool_hooks
|
||||
|
||||
# Store original hooks
|
||||
original_before_llm = llm_hooks._before_llm_call_hooks.copy()
|
||||
original_before_tool = tool_hooks._before_tool_call_hooks.copy()
|
||||
|
||||
# Clear hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
|
||||
yield
|
||||
|
||||
# Restore original hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
llm_hooks._before_llm_call_hooks.extend(original_before_llm)
|
||||
tool_hooks._before_tool_call_hooks.extend(original_before_tool)
|
||||
|
||||
|
||||
class TestCrewScopedHooks:
|
||||
"""Test hooks defined as methods within @CrewBase classes."""
|
||||
|
||||
def test_crew_scoped_hook_is_registered_on_instance_creation(self):
|
||||
"""Test that crew-scoped hooks are registered when crew instance is created."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
pass
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Check hooks before instance creation
|
||||
hooks_before = get_before_llm_call_hooks()
|
||||
initial_count = len(hooks_before)
|
||||
|
||||
# Create instance - should register the hook
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Check hooks after instance creation
|
||||
hooks_after = get_before_llm_call_hooks()
|
||||
|
||||
# Should have one more hook registered
|
||||
assert len(hooks_after) == initial_count + 1
|
||||
|
||||
def test_crew_scoped_hook_has_access_to_self(self):
|
||||
"""Test that crew-scoped hooks can access self and instance variables."""
|
||||
execution_log = []
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.crew_name = "TestCrew"
|
||||
self.call_count = 0
|
||||
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
# Can access self
|
||||
self.call_count += 1
|
||||
execution_log.append(f"{self.crew_name}:{self.call_count}")
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Get the registered hook
|
||||
hooks = get_before_llm_call_hooks()
|
||||
crew_hook = hooks[-1] # Last registered hook
|
||||
|
||||
# Create mock context
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute hook multiple times
|
||||
crew_hook(context)
|
||||
crew_hook(context)
|
||||
|
||||
# Verify hook accessed self and modified instance state
|
||||
assert len(execution_log) == 2
|
||||
assert execution_log[0] == "TestCrew:1"
|
||||
assert execution_log[1] == "TestCrew:2"
|
||||
assert crew_instance.call_count == 2
|
||||
|
||||
def test_multiple_crews_have_isolated_hooks(self):
|
||||
"""Test that different crew instances have isolated hooks."""
|
||||
crew1_executions = []
|
||||
crew2_executions = []
|
||||
|
||||
@CrewBase
|
||||
class Crew1:
|
||||
@before_llm_call
|
||||
def crew1_hook(self, context):
|
||||
crew1_executions.append("crew1")
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
@CrewBase
|
||||
class Crew2:
|
||||
@before_llm_call
|
||||
def crew2_hook(self, context):
|
||||
crew2_executions.append("crew2")
|
||||
|
||||
@agent
|
||||
def analyst(self):
|
||||
return Agent(role="Analyst", goal="Analyze", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create both instances
|
||||
instance1 = Crew1()
|
||||
instance2 = Crew2()
|
||||
|
||||
# Both hooks should be registered
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) >= 2
|
||||
|
||||
# Create mock context
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute all hooks
|
||||
for hook in hooks:
|
||||
hook(context)
|
||||
|
||||
# Both hooks should have executed
|
||||
assert "crew1" in crew1_executions
|
||||
assert "crew2" in crew2_executions
|
||||
|
||||
def test_crew_scoped_hook_with_filters(self):
|
||||
"""Test that filtered crew-scoped hooks work correctly."""
|
||||
execution_log = []
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_tool_call(tools=["delete_file"])
|
||||
def filtered_hook(self, context):
|
||||
execution_log.append(f"filtered:{context.tool_name}")
|
||||
return None
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Get registered hooks
|
||||
hooks = get_before_tool_call_hooks()
|
||||
crew_hook = hooks[-1] # Last registered
|
||||
|
||||
# Test with matching tool
|
||||
mock_tool = Mock()
|
||||
context1 = ToolCallHookContext(
|
||||
tool_name="delete_file", tool_input={}, tool=mock_tool
|
||||
)
|
||||
crew_hook(context1)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "filtered:delete_file"
|
||||
|
||||
# Test with non-matching tool
|
||||
context2 = ToolCallHookContext(
|
||||
tool_name="read_file", tool_input={}, tool=mock_tool
|
||||
)
|
||||
crew_hook(context2)
|
||||
|
||||
# Should still be 1 (filtered hook didn't run)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
def test_crew_scoped_hook_no_double_registration(self):
|
||||
"""Test that crew-scoped hooks are not registered twice."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
pass
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Get initial hook count
|
||||
initial_hooks = len(get_before_llm_call_hooks())
|
||||
|
||||
# Create first instance
|
||||
instance1 = TestCrew()
|
||||
|
||||
# Should add 1 hook
|
||||
hooks_after_first = get_before_llm_call_hooks()
|
||||
assert len(hooks_after_first) == initial_hooks + 1
|
||||
|
||||
# Create second instance
|
||||
instance2 = TestCrew()
|
||||
|
||||
# Should add another hook (one per instance)
|
||||
hooks_after_second = get_before_llm_call_hooks()
|
||||
assert len(hooks_after_second) == initial_hooks + 2
|
||||
|
||||
def test_crew_scoped_hook_method_signature(self):
|
||||
"""Test that crew-scoped hooks have correct signature (self + context)."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.test_value = "test"
|
||||
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
# Should be able to access both self and context
|
||||
return f"{self.test_value}:{context.iterations}"
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Verify the hook method has is_before_llm_call_hook marker
|
||||
assert hasattr(crew_instance.my_hook, "__func__")
|
||||
hook_func = crew_instance.my_hook.__func__
|
||||
assert hasattr(hook_func, "is_before_llm_call_hook")
|
||||
assert hook_func.is_before_llm_call_hook is True
|
||||
|
||||
def test_crew_scoped_with_agent_filter(self):
|
||||
"""Test crew-scoped hooks with agent filters."""
|
||||
execution_log = []
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call(agents=["Researcher"])
|
||||
def filtered_hook(self, context):
|
||||
execution_log.append(context.agent.role)
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Get hooks
|
||||
hooks = get_before_llm_call_hooks()
|
||||
crew_hook = hooks[-1]
|
||||
|
||||
# Test with matching agent
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Researcher")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context1 = LLMCallHookContext(executor=mock_executor)
|
||||
crew_hook(context1)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "Researcher"
|
||||
|
||||
# Test with non-matching agent
|
||||
mock_executor.agent.role = "Analyst"
|
||||
context2 = LLMCallHookContext(executor=mock_executor)
|
||||
crew_hook(context2)
|
||||
|
||||
# Should still be 1 (filtered out)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
|
||||
class TestCrewScopedHookAttributes:
|
||||
"""Test that crew-scoped hooks have correct attributes set."""
|
||||
|
||||
def test_hook_marker_attribute_is_set(self):
|
||||
"""Test that decorator sets marker attribute on method."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
pass
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Check the unbound method has the marker
|
||||
assert hasattr(TestCrew.__dict__["my_hook"], "is_before_llm_call_hook")
|
||||
assert TestCrew.__dict__["my_hook"].is_before_llm_call_hook is True
|
||||
|
||||
def test_filter_attributes_are_preserved(self):
|
||||
"""Test that filter attributes are preserved on methods."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_tool_call(tools=["delete_file"], agents=["Dev"])
|
||||
def filtered_hook(self, context):
|
||||
return None
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Check filter attributes are set
|
||||
hook_method = TestCrew.__dict__["filtered_hook"]
|
||||
assert hasattr(hook_method, "is_before_tool_call_hook")
|
||||
assert hasattr(hook_method, "_filter_tools")
|
||||
assert hasattr(hook_method, "_filter_agents")
|
||||
assert hook_method._filter_tools == ["delete_file"]
|
||||
assert hook_method._filter_agents == ["Dev"]
|
||||
|
||||
def test_registered_hooks_tracked_on_instance(self):
|
||||
"""Test that registered hooks are tracked on the crew instance."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def llm_hook(self, context):
|
||||
pass
|
||||
|
||||
@before_tool_call
|
||||
def tool_hook(self, context):
|
||||
return None
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
|
||||
# Check that hooks are tracked
|
||||
assert hasattr(crew_instance, "_registered_hook_functions")
|
||||
assert isinstance(crew_instance._registered_hook_functions, list)
|
||||
assert len(crew_instance._registered_hook_functions) == 2
|
||||
|
||||
# Check hook types
|
||||
hook_types = [ht for ht, _ in crew_instance._registered_hook_functions]
|
||||
assert "before_llm_call" in hook_types
|
||||
assert "before_tool_call" in hook_types
|
||||
|
||||
|
||||
class TestCrewScopedHookExecution:
|
||||
"""Test execution behavior of crew-scoped hooks."""
|
||||
|
||||
def test_crew_hook_executes_with_bound_self(self):
|
||||
"""Test that crew-scoped hook executes with self properly bound."""
|
||||
execution_log = []
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.instance_id = id(self)
|
||||
|
||||
@before_llm_call
|
||||
def my_hook(self, context):
|
||||
# Should have access to self
|
||||
execution_log.append(self.instance_id)
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
expected_id = crew_instance.instance_id
|
||||
|
||||
# Get and execute hook
|
||||
hooks = get_before_llm_call_hooks()
|
||||
crew_hook = hooks[-1]
|
||||
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute hook
|
||||
crew_hook(context)
|
||||
|
||||
# Verify it had access to self
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == expected_id
|
||||
|
||||
def test_crew_hook_can_modify_instance_state(self):
|
||||
"""Test that crew-scoped hooks can modify instance variables."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.counter = 0
|
||||
|
||||
@before_tool_call
|
||||
def increment_counter(self, context):
|
||||
self.counter += 1
|
||||
return None
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create instance
|
||||
crew_instance = TestCrew()
|
||||
assert crew_instance.counter == 0
|
||||
|
||||
# Get and execute hook
|
||||
hooks = get_before_tool_call_hooks()
|
||||
crew_hook = hooks[-1]
|
||||
|
||||
mock_tool = Mock()
|
||||
context = ToolCallHookContext(tool_name="test", tool_input={}, tool=mock_tool)
|
||||
|
||||
# Execute hook 3 times
|
||||
crew_hook(context)
|
||||
crew_hook(context)
|
||||
crew_hook(context)
|
||||
|
||||
# Verify counter was incremented
|
||||
assert crew_instance.counter == 3
|
||||
|
||||
def test_multiple_instances_maintain_separate_state(self):
|
||||
"""Test that multiple instances of the same crew maintain separate state."""
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
def __init__(self):
|
||||
self.call_count = 0
|
||||
|
||||
@before_llm_call
|
||||
def count_calls(self, context):
|
||||
self.call_count += 1
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Create two instances
|
||||
instance1 = TestCrew()
|
||||
instance2 = TestCrew()
|
||||
|
||||
# Get all hooks (should include hooks from both instances)
|
||||
all_hooks = get_before_llm_call_hooks()
|
||||
|
||||
# Find hooks for each instance (last 2 registered)
|
||||
hook1 = all_hooks[-2]
|
||||
hook2 = all_hooks[-1]
|
||||
|
||||
# Create mock context
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute first hook twice
|
||||
hook1(context)
|
||||
hook1(context)
|
||||
|
||||
# Execute second hook once
|
||||
hook2(context)
|
||||
|
||||
# Each instance should have independent state
|
||||
# Note: We can't easily verify which hook belongs to which instance
|
||||
# in this test without more introspection, but the fact that it doesn't
|
||||
# crash and hooks can maintain state proves isolation works
|
||||
|
||||
|
||||
class TestSignatureDetection:
|
||||
"""Test that signature detection correctly identifies methods vs functions."""
|
||||
|
||||
def test_method_signature_detected(self):
|
||||
"""Test that methods with 'self' parameter are detected."""
|
||||
import inspect
|
||||
|
||||
@CrewBase
|
||||
class TestCrew:
|
||||
@before_llm_call
|
||||
def method_hook(self, context):
|
||||
pass
|
||||
|
||||
@agent
|
||||
def researcher(self):
|
||||
return Agent(role="Researcher", goal="Research", backstory="Expert")
|
||||
|
||||
@crew
|
||||
def crew(self):
|
||||
return Crew(agents=self.agents, tasks=[], verbose=False)
|
||||
|
||||
# Check that method has self parameter
|
||||
method = TestCrew.__dict__["method_hook"]
|
||||
sig = inspect.signature(method)
|
||||
params = list(sig.parameters.keys())
|
||||
assert params[0] == "self"
|
||||
assert len(params) == 2 # self + context
|
||||
|
||||
def test_standalone_function_signature_detected(self):
|
||||
"""Test that standalone functions without 'self' are detected."""
|
||||
import inspect
|
||||
|
||||
@before_llm_call
|
||||
def standalone_hook(context):
|
||||
pass
|
||||
|
||||
# Should have only context parameter (no self)
|
||||
sig = inspect.signature(standalone_hook)
|
||||
params = list(sig.parameters.keys())
|
||||
assert "self" not in params
|
||||
assert len(params) == 1 # Just context
|
||||
|
||||
# Should be registered
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) >= 1
|
||||
335
lib/crewai/tests/hooks/test_decorators.py
Normal file
335
lib/crewai/tests/hooks/test_decorators.py
Normal file
@@ -0,0 +1,335 @@
|
||||
"""Tests for decorator-based hook registration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
import pytest
|
||||
|
||||
from crewai.hooks import (
|
||||
after_llm_call,
|
||||
after_tool_call,
|
||||
before_llm_call,
|
||||
before_tool_call,
|
||||
get_after_llm_call_hooks,
|
||||
get_after_tool_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
)
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
from crewai.hooks.tool_hooks import ToolCallHookContext
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_hooks():
|
||||
"""Clear global hooks before and after each test."""
|
||||
from crewai.hooks import llm_hooks, tool_hooks
|
||||
|
||||
# Store original hooks
|
||||
original_before_llm = llm_hooks._before_llm_call_hooks.copy()
|
||||
original_after_llm = llm_hooks._after_llm_call_hooks.copy()
|
||||
original_before_tool = tool_hooks._before_tool_call_hooks.copy()
|
||||
original_after_tool = tool_hooks._after_tool_call_hooks.copy()
|
||||
|
||||
# Clear hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
llm_hooks._after_llm_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
tool_hooks._after_tool_call_hooks.clear()
|
||||
|
||||
yield
|
||||
|
||||
# Restore original hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
llm_hooks._after_llm_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
tool_hooks._after_tool_call_hooks.clear()
|
||||
llm_hooks._before_llm_call_hooks.extend(original_before_llm)
|
||||
llm_hooks._after_llm_call_hooks.extend(original_after_llm)
|
||||
tool_hooks._before_tool_call_hooks.extend(original_before_tool)
|
||||
tool_hooks._after_tool_call_hooks.extend(original_after_tool)
|
||||
|
||||
|
||||
class TestLLMHookDecorators:
|
||||
"""Test LLM hook decorators."""
|
||||
|
||||
def test_before_llm_call_decorator_registers_hook(self):
|
||||
"""Test that @before_llm_call decorator registers the hook."""
|
||||
|
||||
@before_llm_call
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
def test_after_llm_call_decorator_registers_hook(self):
|
||||
"""Test that @after_llm_call decorator registers the hook."""
|
||||
|
||||
@after_llm_call
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
hooks = get_after_llm_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
def test_decorated_hook_executes_correctly(self):
|
||||
"""Test that decorated hook executes and modifies behavior."""
|
||||
execution_log = []
|
||||
|
||||
@before_llm_call
|
||||
def test_hook(context):
|
||||
execution_log.append("executed")
|
||||
|
||||
# Create mock context
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Test")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Execute the hook
|
||||
hooks = get_before_llm_call_hooks()
|
||||
hooks[0](context)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "executed"
|
||||
|
||||
def test_before_llm_call_with_agent_filter(self):
|
||||
"""Test that agent filter works correctly."""
|
||||
execution_log = []
|
||||
|
||||
@before_llm_call(agents=["Researcher"])
|
||||
def filtered_hook(context):
|
||||
execution_log.append(context.agent.role)
|
||||
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
# Test with matching agent
|
||||
mock_executor = Mock()
|
||||
mock_executor.messages = []
|
||||
mock_executor.agent = Mock(role="Researcher")
|
||||
mock_executor.task = Mock()
|
||||
mock_executor.crew = Mock()
|
||||
mock_executor.llm = Mock()
|
||||
mock_executor.iterations = 0
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
hooks[0](context)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "Researcher"
|
||||
|
||||
# Test with non-matching agent
|
||||
mock_executor.agent.role = "Analyst"
|
||||
context2 = LLMCallHookContext(executor=mock_executor)
|
||||
hooks[0](context2)
|
||||
|
||||
# Should still be 1 (hook didn't execute)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
|
||||
class TestToolHookDecorators:
|
||||
"""Test tool hook decorators."""
|
||||
|
||||
def test_before_tool_call_decorator_registers_hook(self):
|
||||
"""Test that @before_tool_call decorator registers the hook."""
|
||||
|
||||
@before_tool_call
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
def test_after_tool_call_decorator_registers_hook(self):
|
||||
"""Test that @after_tool_call decorator registers the hook."""
|
||||
|
||||
@after_tool_call
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
hooks = get_after_tool_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
def test_before_tool_call_with_tool_filter(self):
|
||||
"""Test that tool filter works correctly."""
|
||||
execution_log = []
|
||||
|
||||
@before_tool_call(tools=["delete_file", "execute_code"])
|
||||
def filtered_hook(context):
|
||||
execution_log.append(context.tool_name)
|
||||
return None
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
assert len(hooks) == 1
|
||||
|
||||
# Test with matching tool
|
||||
mock_tool = Mock()
|
||||
context = ToolCallHookContext(
|
||||
tool_name="delete_file",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
hooks[0](context)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "delete_file"
|
||||
|
||||
# Test with non-matching tool
|
||||
context2 = ToolCallHookContext(
|
||||
tool_name="read_file",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
hooks[0](context2)
|
||||
|
||||
# Should still be 1 (hook didn't execute for read_file)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
def test_before_tool_call_with_combined_filters(self):
|
||||
"""Test that combined tool and agent filters work."""
|
||||
execution_log = []
|
||||
|
||||
@before_tool_call(tools=["write_file"], agents=["Developer"])
|
||||
def filtered_hook(context):
|
||||
execution_log.append(f"{context.tool_name}-{context.agent.role}")
|
||||
return None
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
mock_tool = Mock()
|
||||
mock_agent = Mock(role="Developer")
|
||||
|
||||
# Test with both matching
|
||||
context = ToolCallHookContext(
|
||||
tool_name="write_file",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
agent=mock_agent,
|
||||
)
|
||||
hooks[0](context)
|
||||
|
||||
assert len(execution_log) == 1
|
||||
assert execution_log[0] == "write_file-Developer"
|
||||
|
||||
# Test with tool matching but agent not
|
||||
mock_agent.role = "Researcher"
|
||||
context2 = ToolCallHookContext(
|
||||
tool_name="write_file",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
agent=mock_agent,
|
||||
)
|
||||
hooks[0](context2)
|
||||
|
||||
# Should still be 1 (hook didn't execute)
|
||||
assert len(execution_log) == 1
|
||||
|
||||
def test_after_tool_call_with_filter(self):
|
||||
"""Test that after_tool_call decorator with filter works."""
|
||||
|
||||
@after_tool_call(tools=["web_search"])
|
||||
def filtered_hook(context):
|
||||
if context.tool_result:
|
||||
return context.tool_result.upper()
|
||||
return None
|
||||
|
||||
hooks = get_after_tool_call_hooks()
|
||||
mock_tool = Mock()
|
||||
|
||||
# Test with matching tool
|
||||
context = ToolCallHookContext(
|
||||
tool_name="web_search",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
tool_result="result",
|
||||
)
|
||||
result = hooks[0](context)
|
||||
|
||||
assert result == "RESULT"
|
||||
|
||||
# Test with non-matching tool
|
||||
context2 = ToolCallHookContext(
|
||||
tool_name="other_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
tool_result="result",
|
||||
)
|
||||
result2 = hooks[0](context2)
|
||||
|
||||
assert result2 is None # Hook didn't run, returns None
|
||||
|
||||
|
||||
class TestDecoratorAttributes:
|
||||
"""Test that decorators set proper attributes on functions."""
|
||||
|
||||
def test_before_llm_call_sets_attribute(self):
|
||||
"""Test that decorator sets is_before_llm_call_hook attribute."""
|
||||
|
||||
@before_llm_call
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
assert hasattr(test_hook, "is_before_llm_call_hook")
|
||||
assert test_hook.is_before_llm_call_hook is True
|
||||
|
||||
def test_before_tool_call_sets_attributes_with_filters(self):
|
||||
"""Test that decorator with filters sets filter attributes."""
|
||||
|
||||
@before_tool_call(tools=["delete_file"], agents=["Dev"])
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
assert hasattr(test_hook, "is_before_tool_call_hook")
|
||||
assert test_hook.is_before_tool_call_hook is True
|
||||
assert hasattr(test_hook, "_filter_tools")
|
||||
assert test_hook._filter_tools == ["delete_file"]
|
||||
assert hasattr(test_hook, "_filter_agents")
|
||||
assert test_hook._filter_agents == ["Dev"]
|
||||
|
||||
|
||||
class TestMultipleDecorators:
|
||||
"""Test using multiple decorators together."""
|
||||
|
||||
def test_multiple_decorators_all_register(self):
|
||||
"""Test that multiple decorated functions all register."""
|
||||
|
||||
@before_llm_call
|
||||
def hook1(context):
|
||||
pass
|
||||
|
||||
@before_llm_call
|
||||
def hook2(context):
|
||||
pass
|
||||
|
||||
@after_llm_call
|
||||
def hook3(context):
|
||||
return None
|
||||
|
||||
before_hooks = get_before_llm_call_hooks()
|
||||
after_hooks = get_after_llm_call_hooks()
|
||||
|
||||
assert len(before_hooks) == 2
|
||||
assert len(after_hooks) == 1
|
||||
|
||||
def test_decorator_and_manual_registration_work_together(self):
|
||||
"""Test that decorators and manual registration can be mixed."""
|
||||
from crewai.hooks import register_before_tool_call_hook
|
||||
|
||||
@before_tool_call
|
||||
def decorated_hook(context):
|
||||
return None
|
||||
|
||||
def manual_hook(context):
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(manual_hook)
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
395
lib/crewai/tests/hooks/test_human_approval.py
Normal file
395
lib/crewai/tests/hooks/test_human_approval.py
Normal file
@@ -0,0 +1,395 @@
|
||||
"""Tests for human approval functionality in hooks."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from crewai.hooks.llm_hooks import LLMCallHookContext
|
||||
from crewai.hooks.tool_hooks import ToolCallHookContext
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_executor():
|
||||
"""Create a mock executor for LLM hook context."""
|
||||
executor = Mock()
|
||||
executor.messages = [{"role": "system", "content": "Test message"}]
|
||||
executor.agent = Mock(role="Test Agent")
|
||||
executor.task = Mock(description="Test Task")
|
||||
executor.crew = Mock()
|
||||
executor.llm = Mock()
|
||||
executor.iterations = 0
|
||||
return executor
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_tool():
|
||||
"""Create a mock tool for tool hook context."""
|
||||
tool = Mock()
|
||||
tool.name = "test_tool"
|
||||
tool.description = "Test tool description"
|
||||
return tool
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_agent():
|
||||
"""Create a mock agent."""
|
||||
agent = Mock()
|
||||
agent.role = "Test Agent"
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task():
|
||||
"""Create a mock task."""
|
||||
task = Mock()
|
||||
task.description = "Test task"
|
||||
return task
|
||||
|
||||
|
||||
class TestLLMHookHumanInput:
|
||||
"""Test request_human_input() on LLMCallHookContext."""
|
||||
|
||||
@patch("builtins.input", return_value="test response")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_returns_user_response(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that request_human_input returns the user's input."""
|
||||
# Setup mock formatter
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
response = context.request_human_input(
|
||||
prompt="Test prompt", default_message="Test default message"
|
||||
)
|
||||
|
||||
assert response == "test response"
|
||||
mock_input.assert_called_once()
|
||||
|
||||
@patch("builtins.input", return_value="")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_returns_empty_string_on_enter(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that pressing Enter returns empty string."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
response = context.request_human_input(prompt="Test")
|
||||
|
||||
assert response == ""
|
||||
mock_input.assert_called_once()
|
||||
|
||||
@patch("builtins.input", return_value="test")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_pauses_and_resumes_live_updates(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that live updates are paused and resumed."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
context.request_human_input(prompt="Test")
|
||||
|
||||
# Verify pause was called
|
||||
mock_formatter.pause_live_updates.assert_called_once()
|
||||
|
||||
# Verify resume was called
|
||||
mock_formatter.resume_live_updates.assert_called_once()
|
||||
|
||||
@patch("builtins.input", side_effect=Exception("Input error"))
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_resumes_on_exception(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that live updates are resumed even if input raises exception."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
with pytest.raises(Exception, match="Input error"):
|
||||
context.request_human_input(prompt="Test")
|
||||
|
||||
# Verify resume was still called (in finally block)
|
||||
mock_formatter.resume_live_updates.assert_called_once()
|
||||
|
||||
@patch("builtins.input", return_value=" test response ")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_request_human_input_strips_whitespace(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that user input is stripped of leading/trailing whitespace."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
response = context.request_human_input(prompt="Test")
|
||||
|
||||
assert response == "test response" # Whitespace stripped
|
||||
|
||||
|
||||
class TestToolHookHumanInput:
|
||||
"""Test request_human_input() on ToolCallHookContext."""
|
||||
|
||||
@patch("builtins.input", return_value="approve")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_request_human_input_returns_user_response(
|
||||
self, mock_event_listener, mock_input, mock_tool, mock_agent, mock_task
|
||||
):
|
||||
"""Test that request_human_input returns the user's input."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={"arg": "value"},
|
||||
tool=mock_tool,
|
||||
agent=mock_agent,
|
||||
task=mock_task,
|
||||
)
|
||||
|
||||
response = context.request_human_input(
|
||||
prompt="Approve this tool?", default_message="Type 'approve':"
|
||||
)
|
||||
|
||||
assert response == "approve"
|
||||
mock_input.assert_called_once()
|
||||
|
||||
@patch("builtins.input", return_value="")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_request_human_input_handles_empty_input(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that empty input (Enter key) is handled correctly."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
response = context.request_human_input(prompt="Test")
|
||||
|
||||
assert response == ""
|
||||
|
||||
@patch("builtins.input", return_value="test")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_request_human_input_pauses_and_resumes(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that live updates are properly paused and resumed."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
context.request_human_input(prompt="Test")
|
||||
|
||||
mock_formatter.pause_live_updates.assert_called_once()
|
||||
mock_formatter.resume_live_updates.assert_called_once()
|
||||
|
||||
@patch("builtins.input", side_effect=KeyboardInterrupt)
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_request_human_input_resumes_on_keyboard_interrupt(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that live updates are resumed even on keyboard interrupt."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
with pytest.raises(KeyboardInterrupt):
|
||||
context.request_human_input(prompt="Test")
|
||||
|
||||
# Verify resume was still called (in finally block)
|
||||
mock_formatter.resume_live_updates.assert_called_once()
|
||||
|
||||
|
||||
class TestApprovalHookIntegration:
|
||||
"""Test integration scenarios with approval hooks."""
|
||||
|
||||
@patch("builtins.input", return_value="approve")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_approval_hook_allows_execution(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that approval hook allows execution when approved."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
def approval_hook(context: ToolCallHookContext) -> bool | None:
|
||||
response = context.request_human_input(
|
||||
prompt="Approve?", default_message="Type 'approve':"
|
||||
)
|
||||
return None if response == "approve" else False
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
result = approval_hook(context)
|
||||
|
||||
assert result is None # Allowed
|
||||
assert mock_input.called
|
||||
|
||||
@patch("builtins.input", return_value="deny")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_approval_hook_blocks_execution(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that approval hook blocks execution when denied."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
def approval_hook(context: ToolCallHookContext) -> bool | None:
|
||||
response = context.request_human_input(
|
||||
prompt="Approve?", default_message="Type 'approve':"
|
||||
)
|
||||
return None if response == "approve" else False
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
result = approval_hook(context)
|
||||
|
||||
assert result is False # Blocked
|
||||
assert mock_input.called
|
||||
|
||||
@patch("builtins.input", return_value="modified result")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_review_hook_modifies_result(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that review hook can modify tool results."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
def review_hook(context: ToolCallHookContext) -> str | None:
|
||||
response = context.request_human_input(
|
||||
prompt="Review result",
|
||||
default_message="Press Enter to keep, or provide modified version:",
|
||||
)
|
||||
return response if response else None
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
tool_result="original result",
|
||||
)
|
||||
|
||||
modified_result = review_hook(context)
|
||||
|
||||
assert modified_result == "modified result"
|
||||
assert mock_input.called
|
||||
|
||||
@patch("builtins.input", return_value="")
|
||||
@patch("crewai.hooks.tool_hooks.event_listener")
|
||||
def test_review_hook_keeps_original_on_enter(
|
||||
self, mock_event_listener, mock_input, mock_tool
|
||||
):
|
||||
"""Test that pressing Enter keeps original result."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
def review_hook(context: ToolCallHookContext) -> str | None:
|
||||
response = context.request_human_input(
|
||||
prompt="Review result", default_message="Press Enter to keep:"
|
||||
)
|
||||
return response if response else None
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input={},
|
||||
tool=mock_tool,
|
||||
tool_result="original result",
|
||||
)
|
||||
|
||||
modified_result = review_hook(context)
|
||||
|
||||
assert modified_result is None # Keep original
|
||||
|
||||
|
||||
class TestCostControlApproval:
|
||||
"""Test cost control approval hook scenarios."""
|
||||
|
||||
@patch("builtins.input", return_value="yes")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_cost_control_allows_when_approved(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that expensive calls are allowed when approved."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
# Set high iteration count
|
||||
mock_executor.iterations = 10
|
||||
|
||||
def cost_control_hook(context: LLMCallHookContext) -> None:
|
||||
if context.iterations > 5:
|
||||
response = context.request_human_input(
|
||||
prompt=f"Iteration {context.iterations} - expensive call",
|
||||
default_message="Type 'yes' to continue:",
|
||||
)
|
||||
if response.lower() != "yes":
|
||||
print("Call blocked")
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Should not raise exception and should call input
|
||||
cost_control_hook(context)
|
||||
assert mock_input.called
|
||||
|
||||
@patch("builtins.input", return_value="no")
|
||||
@patch("crewai.hooks.llm_hooks.event_listener")
|
||||
def test_cost_control_logs_when_denied(
|
||||
self, mock_event_listener, mock_input, mock_executor
|
||||
):
|
||||
"""Test that denied calls are logged."""
|
||||
mock_formatter = Mock()
|
||||
mock_event_listener.formatter = mock_formatter
|
||||
|
||||
mock_executor.iterations = 10
|
||||
|
||||
messages_logged = []
|
||||
|
||||
def cost_control_hook(context: LLMCallHookContext) -> None:
|
||||
if context.iterations > 5:
|
||||
response = context.request_human_input(
|
||||
prompt=f"Iteration {context.iterations}",
|
||||
default_message="Type 'yes' to continue:",
|
||||
)
|
||||
if response.lower() != "yes":
|
||||
messages_logged.append("blocked")
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
cost_control_hook(context)
|
||||
|
||||
assert len(messages_logged) == 1
|
||||
assert messages_logged[0] == "blocked"
|
||||
311
lib/crewai/tests/hooks/test_llm_hooks.py
Normal file
311
lib/crewai/tests/hooks/test_llm_hooks.py
Normal file
@@ -0,0 +1,311 @@
|
||||
"""Unit tests for LLM hooks functionality."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.hooks import clear_all_llm_call_hooks, unregister_after_llm_call_hook, unregister_before_llm_call_hook
|
||||
import pytest
|
||||
|
||||
from crewai.hooks.llm_hooks import (
|
||||
LLMCallHookContext,
|
||||
get_after_llm_call_hooks,
|
||||
get_before_llm_call_hooks,
|
||||
register_after_llm_call_hook,
|
||||
register_before_llm_call_hook,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_executor():
|
||||
"""Create a mock executor for testing."""
|
||||
executor = Mock()
|
||||
executor.messages = [{"role": "system", "content": "Test message"}]
|
||||
executor.agent = Mock(role="Test Agent")
|
||||
executor.task = Mock(description="Test Task")
|
||||
executor.crew = Mock()
|
||||
executor.llm = Mock()
|
||||
executor.iterations = 0
|
||||
return executor
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_hooks():
|
||||
"""Clear global hooks before and after each test."""
|
||||
# Import the private variables to clear them
|
||||
from crewai.hooks import llm_hooks
|
||||
|
||||
# Store original hooks
|
||||
original_before = llm_hooks._before_llm_call_hooks.copy()
|
||||
original_after = llm_hooks._after_llm_call_hooks.copy()
|
||||
|
||||
# Clear hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
llm_hooks._after_llm_call_hooks.clear()
|
||||
|
||||
yield
|
||||
|
||||
# Restore original hooks
|
||||
llm_hooks._before_llm_call_hooks.clear()
|
||||
llm_hooks._after_llm_call_hooks.clear()
|
||||
llm_hooks._before_llm_call_hooks.extend(original_before)
|
||||
llm_hooks._after_llm_call_hooks.extend(original_after)
|
||||
|
||||
|
||||
class TestLLMCallHookContext:
|
||||
"""Test LLMCallHookContext initialization and attributes."""
|
||||
|
||||
def test_context_initialization(self, mock_executor):
|
||||
"""Test that context is initialized correctly with executor."""
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
assert context.executor == mock_executor
|
||||
assert context.messages == mock_executor.messages
|
||||
assert context.agent == mock_executor.agent
|
||||
assert context.task == mock_executor.task
|
||||
assert context.crew == mock_executor.crew
|
||||
assert context.llm == mock_executor.llm
|
||||
assert context.iterations == mock_executor.iterations
|
||||
assert context.response is None
|
||||
|
||||
def test_context_with_response(self, mock_executor):
|
||||
"""Test that context includes response when provided."""
|
||||
test_response = "Test LLM response"
|
||||
context = LLMCallHookContext(executor=mock_executor, response=test_response)
|
||||
|
||||
assert context.response == test_response
|
||||
|
||||
def test_messages_are_mutable_reference(self, mock_executor):
|
||||
"""Test that modifying context.messages modifies executor.messages."""
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
|
||||
# Add a message through context
|
||||
new_message = {"role": "user", "content": "New message"}
|
||||
context.messages.append(new_message)
|
||||
|
||||
# Check that executor.messages is also modified
|
||||
assert new_message in mock_executor.messages
|
||||
assert len(mock_executor.messages) == 2
|
||||
|
||||
|
||||
class TestBeforeLLMCallHooks:
|
||||
"""Test before_llm_call hook registration and execution."""
|
||||
|
||||
def test_register_before_hook(self):
|
||||
"""Test that before hooks are registered correctly."""
|
||||
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(test_hook)
|
||||
hooks = get_before_llm_call_hooks()
|
||||
|
||||
assert len(hooks) == 1
|
||||
assert hooks[0] == test_hook
|
||||
|
||||
def test_multiple_before_hooks(self):
|
||||
"""Test that multiple before hooks can be registered."""
|
||||
|
||||
def hook1(context):
|
||||
pass
|
||||
|
||||
def hook2(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(hook1)
|
||||
register_before_llm_call_hook(hook2)
|
||||
hooks = get_before_llm_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
assert hook1 in hooks
|
||||
assert hook2 in hooks
|
||||
|
||||
def test_before_hook_can_modify_messages(self, mock_executor):
|
||||
"""Test that before hooks can modify messages in-place."""
|
||||
|
||||
def add_message_hook(context):
|
||||
context.messages.append({"role": "system", "content": "Added by hook"})
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
add_message_hook(context)
|
||||
|
||||
assert len(context.messages) == 2
|
||||
assert context.messages[1]["content"] == "Added by hook"
|
||||
|
||||
def test_get_before_hooks_returns_copy(self):
|
||||
"""Test that get_before_llm_call_hooks returns a copy."""
|
||||
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(test_hook)
|
||||
hooks1 = get_before_llm_call_hooks()
|
||||
hooks2 = get_before_llm_call_hooks()
|
||||
|
||||
# They should be equal but not the same object
|
||||
assert hooks1 == hooks2
|
||||
assert hooks1 is not hooks2
|
||||
|
||||
|
||||
class TestAfterLLMCallHooks:
|
||||
"""Test after_llm_call hook registration and execution."""
|
||||
|
||||
def test_register_after_hook(self):
|
||||
"""Test that after hooks are registered correctly."""
|
||||
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(test_hook)
|
||||
hooks = get_after_llm_call_hooks()
|
||||
|
||||
assert len(hooks) == 1
|
||||
assert hooks[0] == test_hook
|
||||
|
||||
def test_multiple_after_hooks(self):
|
||||
"""Test that multiple after hooks can be registered."""
|
||||
|
||||
def hook1(context):
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(hook1)
|
||||
register_after_llm_call_hook(hook2)
|
||||
hooks = get_after_llm_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
assert hook1 in hooks
|
||||
assert hook2 in hooks
|
||||
|
||||
def test_after_hook_can_modify_response(self, mock_executor):
|
||||
"""Test that after hooks can modify the response."""
|
||||
original_response = "Original response"
|
||||
|
||||
def modify_response_hook(context):
|
||||
if context.response:
|
||||
return context.response.replace("Original", "Modified")
|
||||
return None
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor, response=original_response)
|
||||
modified = modify_response_hook(context)
|
||||
|
||||
assert modified == "Modified response"
|
||||
|
||||
def test_after_hook_returns_none_keeps_original(self, mock_executor):
|
||||
"""Test that returning None keeps the original response."""
|
||||
original_response = "Original response"
|
||||
|
||||
def no_change_hook(context):
|
||||
return None
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor, response=original_response)
|
||||
result = no_change_hook(context)
|
||||
|
||||
assert result is None
|
||||
assert context.response == original_response
|
||||
|
||||
def test_get_after_hooks_returns_copy(self):
|
||||
"""Test that get_after_llm_call_hooks returns a copy."""
|
||||
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(test_hook)
|
||||
hooks1 = get_after_llm_call_hooks()
|
||||
hooks2 = get_after_llm_call_hooks()
|
||||
|
||||
# They should be equal but not the same object
|
||||
assert hooks1 == hooks2
|
||||
assert hooks1 is not hooks2
|
||||
|
||||
|
||||
class TestLLMHooksIntegration:
|
||||
"""Test integration scenarios with multiple hooks."""
|
||||
|
||||
def test_multiple_before_hooks_execute_in_order(self, mock_executor):
|
||||
"""Test that multiple before hooks execute in registration order."""
|
||||
execution_order = []
|
||||
|
||||
def hook1(context):
|
||||
execution_order.append(1)
|
||||
|
||||
def hook2(context):
|
||||
execution_order.append(2)
|
||||
|
||||
def hook3(context):
|
||||
execution_order.append(3)
|
||||
|
||||
register_before_llm_call_hook(hook1)
|
||||
register_before_llm_call_hook(hook2)
|
||||
register_before_llm_call_hook(hook3)
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor)
|
||||
hooks = get_before_llm_call_hooks()
|
||||
|
||||
for hook in hooks:
|
||||
hook(context)
|
||||
|
||||
assert execution_order == [1, 2, 3]
|
||||
|
||||
def test_multiple_after_hooks_chain_modifications(self, mock_executor):
|
||||
"""Test that multiple after hooks can chain modifications."""
|
||||
|
||||
def hook1(context):
|
||||
if context.response:
|
||||
return context.response + " [hook1]"
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
if context.response:
|
||||
return context.response + " [hook2]"
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(hook1)
|
||||
register_after_llm_call_hook(hook2)
|
||||
|
||||
context = LLMCallHookContext(executor=mock_executor, response="Original")
|
||||
hooks = get_after_llm_call_hooks()
|
||||
|
||||
# Simulate chaining (how it would be used in practice)
|
||||
result = context.response
|
||||
for hook in hooks:
|
||||
# Update context for next hook
|
||||
context.response = result
|
||||
modified = hook(context)
|
||||
if modified is not None:
|
||||
result = modified
|
||||
|
||||
assert result == "Original [hook1] [hook2]"
|
||||
|
||||
def test_unregister_before_hook(self):
|
||||
"""Test that before hooks can be unregistered."""
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(test_hook)
|
||||
unregister_before_llm_call_hook(test_hook)
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
|
||||
def test_unregister_after_hook(self):
|
||||
"""Test that after hooks can be unregistered."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_llm_call_hook(test_hook)
|
||||
unregister_after_llm_call_hook(test_hook)
|
||||
hooks = get_after_llm_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
|
||||
def test_clear_all_llm_call_hooks(self):
|
||||
"""Test that all llm call hooks can be cleared."""
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_llm_call_hook(test_hook)
|
||||
register_after_llm_call_hook(test_hook)
|
||||
clear_all_llm_call_hooks()
|
||||
hooks = get_before_llm_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
498
lib/crewai/tests/hooks/test_tool_hooks.py
Normal file
498
lib/crewai/tests/hooks/test_tool_hooks.py
Normal file
@@ -0,0 +1,498 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import Mock
|
||||
|
||||
from crewai.hooks import clear_all_tool_call_hooks, unregister_after_tool_call_hook, unregister_before_tool_call_hook
|
||||
import pytest
|
||||
|
||||
from crewai.hooks.tool_hooks import (
|
||||
ToolCallHookContext,
|
||||
get_after_tool_call_hooks,
|
||||
get_before_tool_call_hooks,
|
||||
register_after_tool_call_hook,
|
||||
register_before_tool_call_hook,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_tool():
|
||||
"""Create a mock tool for testing."""
|
||||
tool = Mock()
|
||||
tool.name = "test_tool"
|
||||
tool.description = "Test tool description"
|
||||
return tool
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_agent():
|
||||
"""Create a mock agent for testing."""
|
||||
agent = Mock()
|
||||
agent.role = "Test Agent"
|
||||
return agent
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_task():
|
||||
"""Create a mock task for testing."""
|
||||
task = Mock()
|
||||
task.description = "Test task"
|
||||
return task
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_crew():
|
||||
"""Create a mock crew for testing."""
|
||||
crew = Mock()
|
||||
return crew
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clear_hooks():
|
||||
"""Clear global hooks before and after each test."""
|
||||
from crewai.hooks import tool_hooks
|
||||
|
||||
# Store original hooks
|
||||
original_before = tool_hooks._before_tool_call_hooks.copy()
|
||||
original_after = tool_hooks._after_tool_call_hooks.copy()
|
||||
|
||||
# Clear hooks
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
tool_hooks._after_tool_call_hooks.clear()
|
||||
|
||||
yield
|
||||
|
||||
# Restore original hooks
|
||||
tool_hooks._before_tool_call_hooks.clear()
|
||||
tool_hooks._after_tool_call_hooks.clear()
|
||||
tool_hooks._before_tool_call_hooks.extend(original_before)
|
||||
tool_hooks._after_tool_call_hooks.extend(original_after)
|
||||
|
||||
|
||||
class TestToolCallHookContext:
|
||||
"""Test ToolCallHookContext initialization and attributes."""
|
||||
|
||||
def test_context_initialization(self, mock_tool, mock_agent, mock_task, mock_crew):
|
||||
"""Test that context is initialized correctly."""
|
||||
tool_input = {"arg1": "value1", "arg2": "value2"}
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
agent=mock_agent,
|
||||
task=mock_task,
|
||||
crew=mock_crew,
|
||||
)
|
||||
|
||||
assert context.tool_name == "test_tool"
|
||||
assert context.tool_input == tool_input
|
||||
assert context.tool == mock_tool
|
||||
assert context.agent == mock_agent
|
||||
assert context.task == mock_task
|
||||
assert context.crew == mock_crew
|
||||
assert context.tool_result is None
|
||||
|
||||
def test_context_with_result(self, mock_tool):
|
||||
"""Test that context includes result when provided."""
|
||||
tool_input = {"arg1": "value1"}
|
||||
tool_result = "Test tool result"
|
||||
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
tool_result=tool_result,
|
||||
)
|
||||
|
||||
assert context.tool_result == tool_result
|
||||
|
||||
def test_tool_input_is_mutable_reference(self, mock_tool):
|
||||
"""Test that modifying context.tool_input modifies the original dict."""
|
||||
tool_input = {"arg1": "value1"}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
# Modify through context
|
||||
context.tool_input["arg2"] = "value2"
|
||||
|
||||
# Check that original dict is also modified
|
||||
assert "arg2" in tool_input
|
||||
assert tool_input["arg2"] == "value2"
|
||||
|
||||
|
||||
class TestBeforeToolCallHooks:
|
||||
"""Test before_tool_call hook registration and execution."""
|
||||
|
||||
def test_register_before_hook(self):
|
||||
"""Test that before hooks are registered correctly."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(test_hook)
|
||||
hooks = get_before_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 1
|
||||
assert hooks[0] == test_hook
|
||||
|
||||
def test_multiple_before_hooks(self):
|
||||
"""Test that multiple before hooks can be registered."""
|
||||
def hook1(context):
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(hook1)
|
||||
register_before_tool_call_hook(hook2)
|
||||
hooks = get_before_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
assert hook1 in hooks
|
||||
assert hook2 in hooks
|
||||
|
||||
def test_before_hook_can_block_execution(self, mock_tool):
|
||||
"""Test that before hooks can block tool execution."""
|
||||
def block_hook(context):
|
||||
if context.tool_name == "dangerous_tool":
|
||||
return False # Block execution
|
||||
return None # Allow execution
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="dangerous_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
result = block_hook(context)
|
||||
assert result is False
|
||||
|
||||
def test_before_hook_can_allow_execution(self, mock_tool):
|
||||
"""Test that before hooks can explicitly allow execution."""
|
||||
def allow_hook(context):
|
||||
return None # Allow execution
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="safe_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
result = allow_hook(context)
|
||||
assert result is None
|
||||
|
||||
def test_before_hook_can_modify_input(self, mock_tool):
|
||||
"""Test that before hooks can modify tool input in-place."""
|
||||
def modify_input_hook(context):
|
||||
context.tool_input["modified_by_hook"] = True
|
||||
return None
|
||||
|
||||
tool_input = {"arg1": "value1"}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
modify_input_hook(context)
|
||||
|
||||
assert "modified_by_hook" in context.tool_input
|
||||
assert context.tool_input["modified_by_hook"] is True
|
||||
|
||||
def test_get_before_hooks_returns_copy(self):
|
||||
"""Test that get_before_tool_call_hooks returns a copy."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(test_hook)
|
||||
hooks1 = get_before_tool_call_hooks()
|
||||
hooks2 = get_before_tool_call_hooks()
|
||||
|
||||
# They should be equal but not the same object
|
||||
assert hooks1 == hooks2
|
||||
assert hooks1 is not hooks2
|
||||
|
||||
|
||||
class TestAfterToolCallHooks:
|
||||
"""Test after_tool_call hook registration and execution."""
|
||||
|
||||
def test_register_after_hook(self):
|
||||
"""Test that after hooks are registered correctly."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(test_hook)
|
||||
hooks = get_after_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 1
|
||||
assert hooks[0] == test_hook
|
||||
|
||||
def test_multiple_after_hooks(self):
|
||||
"""Test that multiple after hooks can be registered."""
|
||||
def hook1(context):
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(hook1)
|
||||
register_after_tool_call_hook(hook2)
|
||||
hooks = get_after_tool_call_hooks()
|
||||
|
||||
assert len(hooks) == 2
|
||||
assert hook1 in hooks
|
||||
assert hook2 in hooks
|
||||
|
||||
def test_after_hook_can_modify_result(self, mock_tool):
|
||||
"""Test that after hooks can modify the tool result."""
|
||||
original_result = "Original result"
|
||||
|
||||
def modify_result_hook(context):
|
||||
if context.tool_result:
|
||||
return context.tool_result.replace("Original", "Modified")
|
||||
return None
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
tool_result=original_result,
|
||||
)
|
||||
|
||||
modified = modify_result_hook(context)
|
||||
assert modified == "Modified result"
|
||||
|
||||
def test_after_hook_returns_none_keeps_original(self, mock_tool):
|
||||
"""Test that returning None keeps the original result."""
|
||||
original_result = "Original result"
|
||||
|
||||
def no_change_hook(context):
|
||||
return None
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
tool_result=original_result,
|
||||
)
|
||||
|
||||
result = no_change_hook(context)
|
||||
|
||||
assert result is None
|
||||
assert context.tool_result == original_result
|
||||
|
||||
def test_get_after_hooks_returns_copy(self):
|
||||
"""Test that get_after_tool_call_hooks returns a copy."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(test_hook)
|
||||
hooks1 = get_after_tool_call_hooks()
|
||||
hooks2 = get_after_tool_call_hooks()
|
||||
|
||||
# They should be equal but not the same object
|
||||
assert hooks1 == hooks2
|
||||
assert hooks1 is not hooks2
|
||||
|
||||
|
||||
class TestToolHooksIntegration:
|
||||
"""Test integration scenarios with multiple hooks."""
|
||||
|
||||
def test_multiple_before_hooks_execute_in_order(self, mock_tool):
|
||||
"""Test that multiple before hooks execute in registration order."""
|
||||
execution_order = []
|
||||
|
||||
def hook1(context):
|
||||
execution_order.append(1)
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
execution_order.append(2)
|
||||
return None
|
||||
|
||||
def hook3(context):
|
||||
execution_order.append(3)
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(hook1)
|
||||
register_before_tool_call_hook(hook2)
|
||||
register_before_tool_call_hook(hook3)
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
for hook in hooks:
|
||||
hook(context)
|
||||
|
||||
assert execution_order == [1, 2, 3]
|
||||
|
||||
def test_first_blocking_hook_stops_execution(self, mock_tool):
|
||||
"""Test that first hook returning False blocks execution."""
|
||||
execution_order = []
|
||||
|
||||
def hook1(context):
|
||||
execution_order.append(1)
|
||||
return None # Allow
|
||||
|
||||
def hook2(context):
|
||||
execution_order.append(2)
|
||||
return False # Block
|
||||
|
||||
def hook3(context):
|
||||
execution_order.append(3)
|
||||
return None # This shouldn't run
|
||||
|
||||
register_before_tool_call_hook(hook1)
|
||||
register_before_tool_call_hook(hook2)
|
||||
register_before_tool_call_hook(hook3)
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
hooks = get_before_tool_call_hooks()
|
||||
blocked = False
|
||||
for hook in hooks:
|
||||
result = hook(context)
|
||||
if result is False:
|
||||
blocked = True
|
||||
break
|
||||
|
||||
assert blocked is True
|
||||
assert execution_order == [1, 2] # hook3 didn't run
|
||||
|
||||
def test_multiple_after_hooks_chain_modifications(self, mock_tool):
|
||||
"""Test that multiple after hooks can chain modifications."""
|
||||
def hook1(context):
|
||||
if context.tool_result:
|
||||
return context.tool_result + " [hook1]"
|
||||
return None
|
||||
|
||||
def hook2(context):
|
||||
if context.tool_result:
|
||||
return context.tool_result + " [hook2]"
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(hook1)
|
||||
register_after_tool_call_hook(hook2)
|
||||
|
||||
tool_input = {}
|
||||
context = ToolCallHookContext(
|
||||
tool_name="test_tool",
|
||||
tool_input=tool_input,
|
||||
tool=mock_tool,
|
||||
tool_result="Original",
|
||||
)
|
||||
|
||||
hooks = get_after_tool_call_hooks()
|
||||
|
||||
# Simulate chaining (how it would be used in practice)
|
||||
result = context.tool_result
|
||||
for hook in hooks:
|
||||
# Update context for next hook
|
||||
context.tool_result = result
|
||||
modified = hook(context)
|
||||
if modified is not None:
|
||||
result = modified
|
||||
|
||||
assert result == "Original [hook1] [hook2]"
|
||||
|
||||
def test_hooks_with_validation_and_sanitization(self, mock_tool):
|
||||
"""Test a realistic scenario with validation and sanitization hooks."""
|
||||
# Validation hook (before)
|
||||
def validate_file_path(context):
|
||||
if context.tool_name == "write_file":
|
||||
file_path = context.tool_input.get("file_path", "")
|
||||
if ".env" in file_path:
|
||||
return False # Block sensitive files
|
||||
return None
|
||||
|
||||
# Sanitization hook (after)
|
||||
def sanitize_secrets(context):
|
||||
if context.tool_result and "SECRET_KEY" in context.tool_result:
|
||||
return context.tool_result.replace("SECRET_KEY=abc123", "SECRET_KEY=[REDACTED]")
|
||||
return None
|
||||
|
||||
register_before_tool_call_hook(validate_file_path)
|
||||
register_after_tool_call_hook(sanitize_secrets)
|
||||
|
||||
# Test blocking
|
||||
blocked_context = ToolCallHookContext(
|
||||
tool_name="write_file",
|
||||
tool_input={"file_path": ".env"},
|
||||
tool=mock_tool,
|
||||
)
|
||||
|
||||
before_hooks = get_before_tool_call_hooks()
|
||||
blocked = False
|
||||
for hook in before_hooks:
|
||||
if hook(blocked_context) is False:
|
||||
blocked = True
|
||||
break
|
||||
|
||||
assert blocked is True
|
||||
|
||||
# Test sanitization
|
||||
sanitize_context = ToolCallHookContext(
|
||||
tool_name="read_file",
|
||||
tool_input={"file_path": "config.txt"},
|
||||
tool=mock_tool,
|
||||
tool_result="Content: SECRET_KEY=abc123",
|
||||
)
|
||||
|
||||
after_hooks = get_after_tool_call_hooks()
|
||||
result = sanitize_context.tool_result
|
||||
for hook in after_hooks:
|
||||
sanitize_context.tool_result = result
|
||||
modified = hook(sanitize_context)
|
||||
if modified is not None:
|
||||
result = modified
|
||||
|
||||
assert "SECRET_KEY=[REDACTED]" in result
|
||||
assert "abc123" not in result
|
||||
|
||||
|
||||
def test_unregister_before_hook(self):
|
||||
"""Test that before hooks can be unregistered."""
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_tool_call_hook(test_hook)
|
||||
unregister_before_tool_call_hook(test_hook)
|
||||
hooks = get_before_tool_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
|
||||
def test_unregister_after_hook(self):
|
||||
"""Test that after hooks can be unregistered."""
|
||||
def test_hook(context):
|
||||
return None
|
||||
|
||||
register_after_tool_call_hook(test_hook)
|
||||
unregister_after_tool_call_hook(test_hook)
|
||||
hooks = get_after_tool_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
|
||||
def test_clear_all_tool_call_hooks(self):
|
||||
"""Test that all tool call hooks can be cleared."""
|
||||
def test_hook(context):
|
||||
pass
|
||||
|
||||
register_before_tool_call_hook(test_hook)
|
||||
register_after_tool_call_hook(test_hook)
|
||||
clear_all_tool_call_hooks()
|
||||
hooks = get_before_tool_call_hooks()
|
||||
assert len(hooks) == 0
|
||||
Reference in New Issue
Block a user