Merge branch 'main' into Branch_2260

This commit is contained in:
Vidit-Ostwal
2025-03-27 08:11:17 +05:30
15 changed files with 1108 additions and 45 deletions

View File

@@ -20,7 +20,6 @@ from crewai.utilities import I18N, Printer
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
from crewai.utilities.events import ( from crewai.utilities.events import (
ToolUsageErrorEvent, ToolUsageErrorEvent,
ToolUsageStartedEvent,
crewai_event_bus, crewai_event_bus,
) )
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
@@ -153,8 +152,21 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
formatted_answer = self._process_llm_response(answer) formatted_answer = self._process_llm_response(answer)
if isinstance(formatted_answer, AgentAction): if isinstance(formatted_answer, AgentAction):
# Extract agent fingerprint if available
fingerprint_context = {}
if (
self.agent
and hasattr(self.agent, "security_config")
and hasattr(self.agent.security_config, "fingerprint")
):
fingerprint_context = {
"agent_fingerprint": str(
self.agent.security_config.fingerprint
)
}
tool_result = self._execute_tool_and_check_finality( tool_result = self._execute_tool_and_check_finality(
formatted_answer formatted_answer, fingerprint_context=fingerprint_context
) )
formatted_answer = self._handle_agent_action( formatted_answer = self._handle_agent_action(
formatted_answer, tool_result formatted_answer, tool_result
@@ -360,19 +372,35 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n" content=f"\033[95m## Final Answer:\033[00m \033[92m\n{formatted_answer.output}\033[00m\n\n"
) )
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult: def _execute_tool_and_check_finality(
self,
agent_action: AgentAction,
fingerprint_context: Optional[Dict[str, str]] = None,
) -> ToolResult:
try: try:
fingerprint_context = fingerprint_context or {}
if self.agent: if self.agent:
# Create tool usage event with fingerprint information
event_data = {
"agent_key": self.agent.key,
"agent_role": self.agent.role,
"tool_name": agent_action.tool,
"tool_args": agent_action.tool_input,
"tool_class": agent_action.tool,
"agent": self.agent, # Pass the agent object for fingerprint extraction
}
# Include fingerprint context
if fingerprint_context:
event_data.update(fingerprint_context)
# Emit the tool usage started event with agent information
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
event=ToolUsageStartedEvent( event=ToolUsageStartedEvent(**event_data),
agent_key=self.agent.key,
agent_role=self.agent.role,
tool_name=agent_action.tool,
tool_args=agent_action.tool_input,
tool_class=agent_action.tool,
),
) )
tool_usage = ToolUsage( tool_usage = ToolUsage(
tools_handler=self.tools_handler, tools_handler=self.tools_handler,
tools=self.tools, tools=self.tools,
@@ -383,6 +411,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
task=self.task, # type: ignore[arg-type] task=self.task, # type: ignore[arg-type]
agent=self.agent, agent=self.agent,
action=agent_action, action=agent_action,
fingerprint_context=fingerprint_context, # Pass fingerprint context
) )
tool_calling = tool_usage.parse_tool_calling(agent_action.text) tool_calling = tool_usage.parse_tool_calling(agent_action.text)
@@ -411,16 +440,23 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
except Exception as e: except Exception as e:
# TODO: drop # TODO: drop
if self.agent: if self.agent:
error_event_data = {
"agent_key": self.agent.key,
"agent_role": self.agent.role,
"tool_name": agent_action.tool,
"tool_args": agent_action.tool_input,
"tool_class": agent_action.tool,
"error": str(e),
"agent": self.agent, # Pass the agent object for fingerprint extraction
}
# Include fingerprint context
if fingerprint_context:
error_event_data.update(fingerprint_context)
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
event=ToolUsageErrorEvent( # validation error event=ToolUsageErrorEvent(**error_event_data),
agent_key=self.agent.key,
agent_role=self.agent.role,
tool_name=agent_action.tool,
tool_args=agent_action.tool_input,
tool_class=agent_action.tool,
error=str(e),
),
) )
raise e raise e

View File

@@ -388,7 +388,7 @@ class Task(BaseModel):
tools = tools or self.tools or [] tools = tools or self.tools or []
self.processed_by_agents.add(agent.role) self.processed_by_agents.add(agent.role)
crewai_event_bus.emit(self, TaskStartedEvent(context=context)) crewai_event_bus.emit(self, TaskStartedEvent(context=context, task=self))
result = agent.execute_task( result = agent.execute_task(
task=self, task=self,
context=context, context=context,
@@ -464,11 +464,11 @@ class Task(BaseModel):
) )
) )
self._save_file(content) self._save_file(content)
crewai_event_bus.emit(self, TaskCompletedEvent(output=task_output)) crewai_event_bus.emit(self, TaskCompletedEvent(output=task_output, task=self))
return task_output return task_output
except Exception as e: except Exception as e:
self.end_time = datetime.datetime.now() self.end_time = datetime.datetime.now()
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e))) crewai_event_bus.emit(self, TaskFailedEvent(error=str(e), task=self))
raise e # Re-raise the exception after emitting the event raise e # Re-raise the exception after emitting the event
def prompt(self) -> str: def prompt(self) -> str:

View File

@@ -112,6 +112,23 @@ class Telemetry:
self._add_attribute(span, "crew_memory", crew.memory) self._add_attribute(span, "crew_memory", crew.memory)
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks)) self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
self._add_attribute(span, "crew_number_of_agents", len(crew.agents)) self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
# Add fingerprint data
if hasattr(crew, "fingerprint") and crew.fingerprint:
self._add_attribute(span, "crew_fingerprint", crew.fingerprint.uuid_str)
self._add_attribute(
span,
"crew_fingerprint_created_at",
crew.fingerprint.created_at.isoformat(),
)
# Add fingerprint metadata if it exists
if hasattr(crew.fingerprint, "metadata") and crew.fingerprint.metadata:
self._add_attribute(
span,
"crew_fingerprint_metadata",
json.dumps(crew.fingerprint.metadata),
)
if crew.share_crew: if crew.share_crew:
self._add_attribute( self._add_attribute(
span, span,
@@ -129,17 +146,43 @@ class Telemetry:
"max_rpm": agent.max_rpm, "max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file, "i18n": agent.i18n.prompt_file,
"function_calling_llm": ( "function_calling_llm": (
agent.function_calling_llm.model getattr(
if agent.function_calling_llm getattr(agent, "function_calling_llm", None),
"model",
"",
)
if getattr(agent, "function_calling_llm", None)
else "" else ""
), ),
"llm": agent.llm.model, "llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation, "delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution, "allow_code_execution?": getattr(
"max_retry_limit": agent.max_retry_limit, agent, "allow_code_execution", False
),
"max_retry_limit": getattr(agent, "max_retry_limit", 3),
"tools_names": [ "tools_names": [
tool.name.casefold() for tool in agent.tools or [] tool.name.casefold() for tool in agent.tools or []
], ],
# Add agent fingerprint data if sharing crew details
"fingerprint": (
getattr(
getattr(agent, "fingerprint", None),
"uuid_str",
None,
)
),
"fingerprint_created_at": (
created_at.isoformat()
if (
created_at := getattr(
getattr(agent, "fingerprint", None),
"created_at",
None,
)
)
is not None
else None
),
} }
for agent in crew.agents for agent in crew.agents
] ]
@@ -169,6 +212,17 @@ class Telemetry:
"tools_names": [ "tools_names": [
tool.name.casefold() for tool in task.tools or [] tool.name.casefold() for tool in task.tools or []
], ],
# Add task fingerprint data if sharing crew details
"fingerprint": (
task.fingerprint.uuid_str
if hasattr(task, "fingerprint") and task.fingerprint
else None
),
"fingerprint_created_at": (
task.fingerprint.created_at.isoformat()
if hasattr(task, "fingerprint") and task.fingerprint
else None
),
} }
for task in crew.tasks for task in crew.tasks
] ]
@@ -196,14 +250,20 @@ class Telemetry:
"max_iter": agent.max_iter, "max_iter": agent.max_iter,
"max_rpm": agent.max_rpm, "max_rpm": agent.max_rpm,
"function_calling_llm": ( "function_calling_llm": (
agent.function_calling_llm.model getattr(
if agent.function_calling_llm getattr(agent, "function_calling_llm", None),
"model",
"",
)
if getattr(agent, "function_calling_llm", None)
else "" else ""
), ),
"llm": agent.llm.model, "llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation, "delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution, "allow_code_execution?": getattr(
"max_retry_limit": agent.max_retry_limit, agent, "allow_code_execution", False
),
"max_retry_limit": getattr(agent, "max_retry_limit", 3),
"tools_names": [ "tools_names": [
tool.name.casefold() for tool in agent.tools or [] tool.name.casefold() for tool in agent.tools or []
], ],
@@ -252,6 +312,39 @@ class Telemetry:
self._add_attribute(created_span, "task_key", task.key) self._add_attribute(created_span, "task_key", task.key)
self._add_attribute(created_span, "task_id", str(task.id)) self._add_attribute(created_span, "task_id", str(task.id))
# Add fingerprint data
if hasattr(crew, "fingerprint") and crew.fingerprint:
self._add_attribute(
created_span, "crew_fingerprint", crew.fingerprint.uuid_str
)
if hasattr(task, "fingerprint") and task.fingerprint:
self._add_attribute(
created_span, "task_fingerprint", task.fingerprint.uuid_str
)
self._add_attribute(
created_span,
"task_fingerprint_created_at",
task.fingerprint.created_at.isoformat(),
)
# Add fingerprint metadata if it exists
if hasattr(task.fingerprint, "metadata") and task.fingerprint.metadata:
self._add_attribute(
created_span,
"task_fingerprint_metadata",
json.dumps(task.fingerprint.metadata),
)
# Add agent fingerprint if task has an assigned agent
if hasattr(task, "agent") and task.agent:
agent_fingerprint = getattr(
getattr(task.agent, "fingerprint", None), "uuid_str", None
)
if agent_fingerprint:
self._add_attribute(
created_span, "agent_fingerprint", agent_fingerprint
)
if crew.share_crew: if crew.share_crew:
self._add_attribute( self._add_attribute(
created_span, "formatted_description", task.description created_span, "formatted_description", task.description
@@ -270,6 +363,21 @@ class Telemetry:
self._add_attribute(span, "task_key", task.key) self._add_attribute(span, "task_key", task.key)
self._add_attribute(span, "task_id", str(task.id)) self._add_attribute(span, "task_id", str(task.id))
# Add fingerprint data to execution span
if hasattr(crew, "fingerprint") and crew.fingerprint:
self._add_attribute(span, "crew_fingerprint", crew.fingerprint.uuid_str)
if hasattr(task, "fingerprint") and task.fingerprint:
self._add_attribute(span, "task_fingerprint", task.fingerprint.uuid_str)
# Add agent fingerprint if task has an assigned agent
if hasattr(task, "agent") and task.agent:
agent_fingerprint = getattr(
getattr(task.agent, "fingerprint", None), "uuid_str", None
)
if agent_fingerprint:
self._add_attribute(span, "agent_fingerprint", agent_fingerprint)
if crew.share_crew: if crew.share_crew:
self._add_attribute(span, "formatted_description", task.description) self._add_attribute(span, "formatted_description", task.description)
self._add_attribute( self._add_attribute(
@@ -291,7 +399,12 @@ class Telemetry:
Note: Note:
If share_crew is enabled, this will also record the task output If share_crew is enabled, this will also record the task output
""" """
def operation(): def operation():
# Ensure fingerprint data is present on completion span
if hasattr(task, "fingerprint") and task.fingerprint:
self._add_attribute(span, "task_fingerprint", task.fingerprint.uuid_str)
if crew.share_crew: if crew.share_crew:
self._add_attribute( self._add_attribute(
span, span,
@@ -312,6 +425,7 @@ class Telemetry:
tool_name (str): Name of the tool being repeatedly used tool_name (str): Name of the tool being repeatedly used
attempts (int): Number of attempts made with this tool attempts (int): Number of attempts made with this tool
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Repeated Usage") span = tracer.start_span("Tool Repeated Usage")
@@ -329,14 +443,16 @@ class Telemetry:
self._safe_telemetry_operation(operation) self._safe_telemetry_operation(operation)
def tool_usage(self, llm: Any, tool_name: str, attempts: int): def tool_usage(self, llm: Any, tool_name: str, attempts: int, agent: Any = None):
"""Records the usage of a tool by an agent. """Records the usage of a tool by an agent.
Args: Args:
llm (Any): The language model being used llm (Any): The language model being used
tool_name (str): Name of the tool being used tool_name (str): Name of the tool being used
attempts (int): Number of attempts made with this tool attempts (int): Number of attempts made with this tool
agent (Any, optional): The agent using the tool
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage") span = tracer.start_span("Tool Usage")
@@ -349,17 +465,31 @@ class Telemetry:
self._add_attribute(span, "attempts", attempts) self._add_attribute(span, "attempts", attempts)
if llm: if llm:
self._add_attribute(span, "llm", llm.model) self._add_attribute(span, "llm", llm.model)
# Add agent fingerprint data if available
if agent and hasattr(agent, "fingerprint") and agent.fingerprint:
self._add_attribute(
span, "agent_fingerprint", agent.fingerprint.uuid_str
)
if hasattr(agent, "role"):
self._add_attribute(span, "agent_role", agent.role)
span.set_status(Status(StatusCode.OK)) span.set_status(Status(StatusCode.OK))
span.end() span.end()
self._safe_telemetry_operation(operation) self._safe_telemetry_operation(operation)
def tool_usage_error(self, llm: Any): def tool_usage_error(
self, llm: Any, agent: Any = None, tool_name: Optional[str] = None
):
"""Records when a tool usage results in an error. """Records when a tool usage results in an error.
Args: Args:
llm (Any): The language model being used when the error occurred llm (Any): The language model being used when the error occurred
agent (Any, optional): The agent using the tool
tool_name (str, optional): Name of the tool that caused the error
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage Error") span = tracer.start_span("Tool Usage Error")
@@ -370,6 +500,18 @@ class Telemetry:
) )
if llm: if llm:
self._add_attribute(span, "llm", llm.model) self._add_attribute(span, "llm", llm.model)
if tool_name:
self._add_attribute(span, "tool_name", tool_name)
# Add agent fingerprint data if available
if agent and hasattr(agent, "fingerprint") and agent.fingerprint:
self._add_attribute(
span, "agent_fingerprint", agent.fingerprint.uuid_str
)
if hasattr(agent, "role"):
self._add_attribute(span, "agent_role", agent.role)
span.set_status(Status(StatusCode.OK)) span.set_status(Status(StatusCode.OK))
span.end() span.end()
@@ -386,6 +528,7 @@ class Telemetry:
exec_time (int): Execution time in seconds exec_time (int): Execution time in seconds
model_name (str): Name of the model used model_name (str): Name of the model used
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Individual Test Result") span = tracer.start_span("Crew Individual Test Result")
@@ -420,6 +563,7 @@ class Telemetry:
inputs (dict[str, Any] | None): Input parameters for the test inputs (dict[str, Any] | None): Input parameters for the test
model_name (str): Name of the model used in testing model_name (str): Name of the model used in testing
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Test Execution") span = tracer.start_span("Crew Test Execution")
@@ -446,6 +590,7 @@ class Telemetry:
def deploy_signup_error_span(self): def deploy_signup_error_span(self):
"""Records when an error occurs during the deployment signup process.""" """Records when an error occurs during the deployment signup process."""
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Deploy Signup Error") span = tracer.start_span("Deploy Signup Error")
@@ -460,6 +605,7 @@ class Telemetry:
Args: Args:
uuid (Optional[str]): Unique identifier for the deployment uuid (Optional[str]): Unique identifier for the deployment
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Start Deployment") span = tracer.start_span("Start Deployment")
@@ -472,6 +618,7 @@ class Telemetry:
def create_crew_deployment_span(self): def create_crew_deployment_span(self):
"""Records the creation of a new crew deployment.""" """Records the creation of a new crew deployment."""
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Create Crew Deployment") span = tracer.start_span("Create Crew Deployment")
@@ -487,6 +634,7 @@ class Telemetry:
uuid (Optional[str]): Unique identifier for the crew uuid (Optional[str]): Unique identifier for the crew
log_type (str, optional): Type of logs being retrieved. Defaults to "deployment". log_type (str, optional): Type of logs being retrieved. Defaults to "deployment".
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Get Crew Logs") span = tracer.start_span("Get Crew Logs")
@@ -504,6 +652,7 @@ class Telemetry:
Args: Args:
uuid (Optional[str]): Unique identifier for the crew being removed uuid (Optional[str]): Unique identifier for the crew being removed
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Remove Crew") span = tracer.start_span("Remove Crew")
@@ -634,6 +783,7 @@ class Telemetry:
Args: Args:
flow_name (str): Name of the flow being created flow_name (str): Name of the flow being created
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Flow Creation") span = tracer.start_span("Flow Creation")
@@ -650,6 +800,7 @@ class Telemetry:
flow_name (str): Name of the flow being plotted flow_name (str): Name of the flow being plotted
node_names (list[str]): List of node names in the flow node_names (list[str]): List of node names in the flow
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Flow Plotting") span = tracer.start_span("Flow Plotting")
@@ -667,6 +818,7 @@ class Telemetry:
flow_name (str): Name of the flow being executed flow_name (str): Name of the flow being executed
node_names (list[str]): List of nodes being executed in the flow node_names (list[str]): List of nodes being executed in the flow
""" """
def operation(): def operation():
tracer = trace.get_tracer("crewai.telemetry") tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Flow Execution") span = tracer.start_span("Flow Execution")

View File

@@ -22,6 +22,7 @@ from crewai.utilities.events.tool_usage_events import (
ToolSelectionErrorEvent, ToolSelectionErrorEvent,
ToolUsageErrorEvent, ToolUsageErrorEvent,
ToolUsageFinishedEvent, ToolUsageFinishedEvent,
ToolUsageStartedEvent,
ToolValidateInputErrorEvent, ToolValidateInputErrorEvent,
) )
@@ -69,6 +70,7 @@ class ToolUsage:
function_calling_llm: Any, function_calling_llm: Any,
agent: Any, agent: Any,
action: Any, action: Any,
fingerprint_context: Optional[Dict[str, str]] = None,
) -> None: ) -> None:
self._i18n: I18N = agent.i18n self._i18n: I18N = agent.i18n
self._printer: Printer = Printer() self._printer: Printer = Printer()
@@ -85,6 +87,7 @@ class ToolUsage:
self.task = task self.task = task
self.action = action self.action = action
self.function_calling_llm = function_calling_llm self.function_calling_llm = function_calling_llm
self.fingerprint_context = fingerprint_context or {}
# Set the maximum parsing attempts for bigger models # Set the maximum parsing attempts for bigger models
if ( if (
@@ -117,7 +120,10 @@ class ToolUsage:
self._printer.print(content=f"\n\n{error}\n", color="red") self._printer.print(content=f"\n\n{error}\n", color="red")
return error return error
if isinstance(tool, CrewStructuredTool) and tool.name == self._i18n.tools("add_image")["name"]: # type: ignore if (
isinstance(tool, CrewStructuredTool)
and tool.name == self._i18n.tools("add_image")["name"] # type: ignore
):
try: try:
result = self._use(tool_string=tool_string, tool=tool, calling=calling) result = self._use(tool_string=tool_string, tool=tool, calling=calling)
return result return result
@@ -181,18 +187,26 @@ class ToolUsage:
if calling.arguments: if calling.arguments:
try: try:
acceptable_args = tool.args_schema.model_json_schema()["properties"].keys() # type: ignore acceptable_args = tool.args_schema.model_json_schema()[
"properties"
].keys() # type: ignore
arguments = { arguments = {
k: v k: v
for k, v in calling.arguments.items() for k, v in calling.arguments.items()
if k in acceptable_args if k in acceptable_args
} }
# Add fingerprint metadata if available
arguments = self._add_fingerprint_metadata(arguments)
result = tool.invoke(input=arguments) result = tool.invoke(input=arguments)
except Exception: except Exception:
arguments = calling.arguments arguments = calling.arguments
# Add fingerprint metadata if available
arguments = self._add_fingerprint_metadata(arguments)
result = tool.invoke(input=arguments) result = tool.invoke(input=arguments)
else: else:
result = tool.invoke(input={}) # Add fingerprint metadata even to empty arguments
arguments = self._add_fingerprint_metadata({})
result = tool.invoke(input=arguments)
except Exception as e: except Exception as e:
self.on_tool_error(tool=tool, tool_calling=calling, e=e) self.on_tool_error(tool=tool, tool_calling=calling, e=e)
self._run_attempts += 1 self._run_attempts += 1
@@ -202,7 +216,7 @@ class ToolUsage:
error=e, tool=tool.name, tool_inputs=tool.description error=e, tool=tool.name, tool_inputs=tool.description
) )
error = ToolUsageErrorException( error = ToolUsageErrorException(
f'\n{error_message}.\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}' f"\n{error_message}.\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
).message ).message
self.task.increment_tools_errors() self.task.increment_tools_errors()
if self.agent.verbose: if self.agent.verbose:
@@ -244,6 +258,7 @@ class ToolUsage:
tool_calling=calling, tool_calling=calling,
from_cache=from_cache, from_cache=from_cache,
started_at=started_at, started_at=started_at,
result=result,
) )
if ( if (
@@ -380,7 +395,7 @@ class ToolUsage:
raise raise
else: else:
return ToolUsageErrorException( return ToolUsageErrorException(
f'{self._i18n.errors("tool_arguments_error")}' f"{self._i18n.errors('tool_arguments_error')}"
) )
if not isinstance(arguments, dict): if not isinstance(arguments, dict):
@@ -388,7 +403,7 @@ class ToolUsage:
raise raise
else: else:
return ToolUsageErrorException( return ToolUsageErrorException(
f'{self._i18n.errors("tool_arguments_error")}' f"{self._i18n.errors('tool_arguments_error')}"
) )
return ToolCalling( return ToolCalling(
@@ -416,7 +431,7 @@ class ToolUsage:
if self.agent.verbose: if self.agent.verbose:
self._printer.print(content=f"\n\n{e}\n", color="red") self._printer.print(content=f"\n\n{e}\n", color="red")
return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling") return ToolUsageErrorException( # type: ignore # Incompatible return value type (got "ToolUsageErrorException", expected "ToolCalling | InstructorToolCalling")
f'{self._i18n.errors("tool_usage_error").format(error=e)}\nMoving on then. {self._i18n.slice("format").format(tool_names=self.tools_names)}' f"{self._i18n.errors('tool_usage_error').format(error=e)}\nMoving on then. {self._i18n.slice('format').format(tool_names=self.tools_names)}"
) )
return self._tool_calling(tool_string) return self._tool_calling(tool_string)
@@ -480,8 +495,13 @@ class ToolUsage:
"tool_name": self.action.tool, "tool_name": self.action.tool,
"tool_args": str(self.action.tool_input), "tool_args": str(self.action.tool_input),
"tool_class": self.__class__.__name__, "tool_class": self.__class__.__name__,
"agent": self.agent, # Adding agent for fingerprint extraction
} }
# Include fingerprint context if available
if self.fingerprint_context:
tool_selection_data.update(self.fingerprint_context)
crewai_event_bus.emit( crewai_event_bus.emit(
self, self,
ToolValidateInputErrorEvent(**tool_selection_data, error=final_error), ToolValidateInputErrorEvent(**tool_selection_data, error=final_error),
@@ -492,7 +512,12 @@ class ToolUsage:
crewai_event_bus.emit(self, ToolUsageErrorEvent(**{**event_data, "error": e})) crewai_event_bus.emit(self, ToolUsageErrorEvent(**{**event_data, "error": e}))
def on_tool_use_finished( def on_tool_use_finished(
self, tool: Any, tool_calling: ToolCalling, from_cache: bool, started_at: float self,
tool: Any,
tool_calling: ToolCalling,
from_cache: bool,
started_at: float,
result: Any,
) -> None: ) -> None:
finished_at = time.time() finished_at = time.time()
event_data = self._prepare_event_data(tool, tool_calling) event_data = self._prepare_event_data(tool, tool_calling)
@@ -501,12 +526,13 @@ class ToolUsage:
"started_at": datetime.datetime.fromtimestamp(started_at), "started_at": datetime.datetime.fromtimestamp(started_at),
"finished_at": datetime.datetime.fromtimestamp(finished_at), "finished_at": datetime.datetime.fromtimestamp(finished_at),
"from_cache": from_cache, "from_cache": from_cache,
"output": result,
} }
) )
crewai_event_bus.emit(self, ToolUsageFinishedEvent(**event_data)) crewai_event_bus.emit(self, ToolUsageFinishedEvent(**event_data))
def _prepare_event_data(self, tool: Any, tool_calling: ToolCalling) -> dict: def _prepare_event_data(self, tool: Any, tool_calling: ToolCalling) -> dict:
return { event_data = {
"agent_key": self.agent.key, "agent_key": self.agent.key,
"agent_role": (self.agent._original_role or self.agent.role), "agent_role": (self.agent._original_role or self.agent.role),
"run_attempts": self._run_attempts, "run_attempts": self._run_attempts,
@@ -514,4 +540,43 @@ class ToolUsage:
"tool_name": tool.name, "tool_name": tool.name,
"tool_args": tool_calling.arguments, "tool_args": tool_calling.arguments,
"tool_class": tool.__class__.__name__, "tool_class": tool.__class__.__name__,
"agent": self.agent, # Adding agent for fingerprint extraction
} }
# Include fingerprint context if available
if self.fingerprint_context:
event_data.update(self.fingerprint_context)
return event_data
def _add_fingerprint_metadata(self, arguments: dict) -> dict:
"""Add fingerprint metadata to tool arguments if available.
Args:
arguments: The original tool arguments
Returns:
Updated arguments dictionary with fingerprint metadata
"""
# Create a shallow copy to avoid modifying the original
arguments = arguments.copy()
# Add security metadata under a designated key
if not "security_context" in arguments:
arguments["security_context"] = {}
security_context = arguments["security_context"]
# Add agent fingerprint if available
if hasattr(self, "agent") and hasattr(self.agent, "security_config"):
security_context["agent_fingerprint"] = self.agent.security_config.fingerprint.to_dict()
# Add task fingerprint if available
if hasattr(self, "task") and hasattr(self.task, "security_config"):
security_context["task_fingerprint"] = self.task.security_config.fingerprint.to_dict()
# Add crew fingerprint if available
if hasattr(self, "crew") and hasattr(self.crew, "security_config"):
security_context["crew_fingerprint"] = self.crew.security_config.fingerprint.to_dict()
return arguments

View File

@@ -45,7 +45,7 @@ class TaskEvaluator:
def evaluate(self, task, output) -> TaskEvaluation: def evaluate(self, task, output) -> TaskEvaluation:
crewai_event_bus.emit( crewai_event_bus.emit(
self, TaskEvaluationEvent(evaluation_type="task_evaluation") self, TaskEvaluationEvent(evaluation_type="task_evaluation", task=task)
) )
evaluation_query = ( evaluation_query = (
f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n" f"Assess the quality of the task completed based on the description, expected output, and actual results.\n\n"

View File

@@ -21,6 +21,15 @@ class AgentExecutionStartedEvent(CrewEvent):
model_config = {"arbitrary_types_allowed": True} model_config = {"arbitrary_types_allowed": True}
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if hasattr(self.agent, 'fingerprint') and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if hasattr(self.agent.fingerprint, 'metadata') and self.agent.fingerprint.metadata:
self.fingerprint_metadata = self.agent.fingerprint.metadata
class AgentExecutionCompletedEvent(CrewEvent): class AgentExecutionCompletedEvent(CrewEvent):
"""Event emitted when an agent completes executing a task""" """Event emitted when an agent completes executing a task"""
@@ -30,6 +39,15 @@ class AgentExecutionCompletedEvent(CrewEvent):
output: str output: str
type: str = "agent_execution_completed" type: str = "agent_execution_completed"
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if hasattr(self.agent, 'fingerprint') and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if hasattr(self.agent.fingerprint, 'metadata') and self.agent.fingerprint.metadata:
self.fingerprint_metadata = self.agent.fingerprint.metadata
class AgentExecutionErrorEvent(CrewEvent): class AgentExecutionErrorEvent(CrewEvent):
"""Event emitted when an agent encounters an error during execution""" """Event emitted when an agent encounters an error during execution"""
@@ -38,3 +56,12 @@ class AgentExecutionErrorEvent(CrewEvent):
task: Any task: Any
error: str error: str
type: str = "agent_execution_error" type: str = "agent_execution_error"
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if hasattr(self.agent, 'fingerprint') and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if hasattr(self.agent.fingerprint, 'metadata') and self.agent.fingerprint.metadata:
self.fingerprint_metadata = self.agent.fingerprint.metadata

View File

@@ -1,4 +1,5 @@
from datetime import datetime from datetime import datetime
from typing import Any, Dict, Optional
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@@ -8,3 +9,6 @@ class CrewEvent(BaseModel):
timestamp: datetime = Field(default_factory=datetime.now) timestamp: datetime = Field(default_factory=datetime.now)
type: str type: str
source_fingerprint: Optional[str] = None # UUID string of the source entity
source_type: Optional[str] = None # "agent", "task", "crew"
fingerprint_metadata: Optional[Dict[str, Any]] = None # Any relevant metadata

View File

@@ -11,6 +11,16 @@ class CrewKickoffStartedEvent(CrewEvent):
crew_name: Optional[str] crew_name: Optional[str]
inputs: Optional[Dict[str, Any]] inputs: Optional[Dict[str, Any]]
type: str = "crew_kickoff_started" type: str = "crew_kickoff_started"
crew: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the crew
if self.crew and hasattr(self.crew, 'fingerprint') and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if hasattr(self.crew.fingerprint, 'metadata') and self.crew.fingerprint.metadata:
self.fingerprint_metadata = self.crew.fingerprint.metadata
class CrewKickoffCompletedEvent(CrewEvent): class CrewKickoffCompletedEvent(CrewEvent):
@@ -19,6 +29,16 @@ class CrewKickoffCompletedEvent(CrewEvent):
crew_name: Optional[str] crew_name: Optional[str]
output: Any output: Any
type: str = "crew_kickoff_completed" type: str = "crew_kickoff_completed"
crew: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the crew
if self.crew and hasattr(self.crew, 'fingerprint') and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if hasattr(self.crew.fingerprint, 'metadata') and self.crew.fingerprint.metadata:
self.fingerprint_metadata = self.crew.fingerprint.metadata
class CrewKickoffFailedEvent(CrewEvent): class CrewKickoffFailedEvent(CrewEvent):
@@ -27,6 +47,16 @@ class CrewKickoffFailedEvent(CrewEvent):
error: str error: str
crew_name: Optional[str] crew_name: Optional[str]
type: str = "crew_kickoff_failed" type: str = "crew_kickoff_failed"
crew: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the crew
if self.crew and hasattr(self.crew, 'fingerprint') and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if hasattr(self.crew.fingerprint, 'metadata') and self.crew.fingerprint.metadata:
self.fingerprint_metadata = self.crew.fingerprint.metadata
class CrewTrainStartedEvent(CrewEvent): class CrewTrainStartedEvent(CrewEvent):
@@ -37,6 +67,16 @@ class CrewTrainStartedEvent(CrewEvent):
filename: str filename: str
inputs: Optional[Dict[str, Any]] inputs: Optional[Dict[str, Any]]
type: str = "crew_train_started" type: str = "crew_train_started"
crew: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the crew
if self.crew and hasattr(self.crew, 'fingerprint') and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if hasattr(self.crew.fingerprint, 'metadata') and self.crew.fingerprint.metadata:
self.fingerprint_metadata = self.crew.fingerprint.metadata
class CrewTrainCompletedEvent(CrewEvent): class CrewTrainCompletedEvent(CrewEvent):
@@ -46,6 +86,16 @@ class CrewTrainCompletedEvent(CrewEvent):
n_iterations: int n_iterations: int
filename: str filename: str
type: str = "crew_train_completed" type: str = "crew_train_completed"
crew: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the crew
if self.crew and hasattr(self.crew, 'fingerprint') and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if hasattr(self.crew.fingerprint, 'metadata') and self.crew.fingerprint.metadata:
self.fingerprint_metadata = self.crew.fingerprint.metadata
class CrewTrainFailedEvent(CrewEvent): class CrewTrainFailedEvent(CrewEvent):
@@ -54,6 +104,16 @@ class CrewTrainFailedEvent(CrewEvent):
error: str error: str
crew_name: Optional[str] crew_name: Optional[str]
type: str = "crew_train_failed" type: str = "crew_train_failed"
crew: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the crew
if self.crew and hasattr(self.crew, 'fingerprint') and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if hasattr(self.crew.fingerprint, 'metadata') and self.crew.fingerprint.metadata:
self.fingerprint_metadata = self.crew.fingerprint.metadata
class CrewTestStartedEvent(CrewEvent): class CrewTestStartedEvent(CrewEvent):
@@ -64,6 +124,16 @@ class CrewTestStartedEvent(CrewEvent):
eval_llm: Optional[Union[str, Any]] eval_llm: Optional[Union[str, Any]]
inputs: Optional[Dict[str, Any]] inputs: Optional[Dict[str, Any]]
type: str = "crew_test_started" type: str = "crew_test_started"
crew: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the crew
if self.crew and hasattr(self.crew, 'fingerprint') and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if hasattr(self.crew.fingerprint, 'metadata') and self.crew.fingerprint.metadata:
self.fingerprint_metadata = self.crew.fingerprint.metadata
class CrewTestCompletedEvent(CrewEvent): class CrewTestCompletedEvent(CrewEvent):
@@ -71,6 +141,16 @@ class CrewTestCompletedEvent(CrewEvent):
crew_name: Optional[str] crew_name: Optional[str]
type: str = "crew_test_completed" type: str = "crew_test_completed"
crew: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the crew
if self.crew and hasattr(self.crew, 'fingerprint') and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if hasattr(self.crew.fingerprint, 'metadata') and self.crew.fingerprint.metadata:
self.fingerprint_metadata = self.crew.fingerprint.metadata
class CrewTestFailedEvent(CrewEvent): class CrewTestFailedEvent(CrewEvent):
@@ -79,3 +159,13 @@ class CrewTestFailedEvent(CrewEvent):
error: str error: str
crew_name: Optional[str] crew_name: Optional[str]
type: str = "crew_test_failed" type: str = "crew_test_failed"
crew: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the crew
if self.crew and hasattr(self.crew, 'fingerprint') and self.crew.fingerprint:
self.source_fingerprint = self.crew.fingerprint.uuid_str
self.source_type = "crew"
if hasattr(self.crew.fingerprint, 'metadata') and self.crew.fingerprint.metadata:
self.fingerprint_metadata = self.crew.fingerprint.metadata

View File

@@ -12,10 +12,15 @@ class LLMCallType(Enum):
class LLMCallStartedEvent(CrewEvent): class LLMCallStartedEvent(CrewEvent):
"""Event emitted when a LLM call starts""" """Event emitted when a LLM call starts
Attributes:
messages: Content can be either a string or a list of dictionaries that support
multimodal content (text, images, etc.)
"""
type: str = "llm_call_started" type: str = "llm_call_started"
messages: Union[str, List[Dict[str, str]]] messages: Union[str, List[Dict[str, Any]]]
tools: Optional[List[dict]] = None tools: Optional[List[dict]] = None
callbacks: Optional[List[Any]] = None callbacks: Optional[List[Any]] = None
available_functions: Optional[Dict[str, Any]] = None available_functions: Optional[Dict[str, Any]] = None

View File

@@ -1,4 +1,4 @@
from typing import Optional from typing import Any, Optional
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.utilities.events.base_events import CrewEvent from crewai.utilities.events.base_events import CrewEvent
@@ -9,6 +9,16 @@ class TaskStartedEvent(CrewEvent):
type: str = "task_started" type: str = "task_started"
context: Optional[str] context: Optional[str]
task: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the task
if hasattr(self.task, 'fingerprint') and self.task.fingerprint:
self.source_fingerprint = self.task.fingerprint.uuid_str
self.source_type = "task"
if hasattr(self.task.fingerprint, 'metadata') and self.task.fingerprint.metadata:
self.fingerprint_metadata = self.task.fingerprint.metadata
class TaskCompletedEvent(CrewEvent): class TaskCompletedEvent(CrewEvent):
@@ -16,6 +26,16 @@ class TaskCompletedEvent(CrewEvent):
output: TaskOutput output: TaskOutput
type: str = "task_completed" type: str = "task_completed"
task: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the task
if hasattr(self.task, 'fingerprint') and self.task.fingerprint:
self.source_fingerprint = self.task.fingerprint.uuid_str
self.source_type = "task"
if hasattr(self.task.fingerprint, 'metadata') and self.task.fingerprint.metadata:
self.fingerprint_metadata = self.task.fingerprint.metadata
class TaskFailedEvent(CrewEvent): class TaskFailedEvent(CrewEvent):
@@ -23,6 +43,16 @@ class TaskFailedEvent(CrewEvent):
error: str error: str
type: str = "task_failed" type: str = "task_failed"
task: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the task
if hasattr(self.task, 'fingerprint') and self.task.fingerprint:
self.source_fingerprint = self.task.fingerprint.uuid_str
self.source_type = "task"
if hasattr(self.task.fingerprint, 'metadata') and self.task.fingerprint.metadata:
self.fingerprint_metadata = self.task.fingerprint.metadata
class TaskEvaluationEvent(CrewEvent): class TaskEvaluationEvent(CrewEvent):
@@ -30,3 +60,13 @@ class TaskEvaluationEvent(CrewEvent):
type: str = "task_evaluation" type: str = "task_evaluation"
evaluation_type: str evaluation_type: str
task: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the task
if hasattr(self.task, 'fingerprint') and self.task.fingerprint:
self.source_fingerprint = self.task.fingerprint.uuid_str
self.source_type = "task"
if hasattr(self.task.fingerprint, 'metadata') and self.task.fingerprint.metadata:
self.fingerprint_metadata = self.task.fingerprint.metadata

View File

@@ -1,5 +1,5 @@
from datetime import datetime from datetime import datetime
from typing import Any, Callable, Dict from typing import Any, Callable, Dict, Optional
from .base_events import CrewEvent from .base_events import CrewEvent
@@ -14,9 +14,19 @@ class ToolUsageEvent(CrewEvent):
tool_class: str tool_class: str
run_attempts: int | None = None run_attempts: int | None = None
delegations: int | None = None delegations: int | None = None
agent: Optional[Any] = None
model_config = {"arbitrary_types_allowed": True} model_config = {"arbitrary_types_allowed": True}
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if self.agent and hasattr(self.agent, 'fingerprint') and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if hasattr(self.agent.fingerprint, 'metadata') and self.agent.fingerprint.metadata:
self.fingerprint_metadata = self.agent.fingerprint.metadata
class ToolUsageStartedEvent(ToolUsageEvent): class ToolUsageStartedEvent(ToolUsageEvent):
"""Event emitted when a tool execution is started""" """Event emitted when a tool execution is started"""
@@ -30,6 +40,7 @@ class ToolUsageFinishedEvent(ToolUsageEvent):
started_at: datetime started_at: datetime
finished_at: datetime finished_at: datetime
from_cache: bool = False from_cache: bool = False
output: Any
type: str = "tool_usage_finished" type: str = "tool_usage_finished"
@@ -62,3 +73,13 @@ class ToolExecutionErrorEvent(CrewEvent):
tool_name: str tool_name: str
tool_args: Dict[str, Any] tool_args: Dict[str, Any]
tool_class: Callable tool_class: Callable
agent: Optional[Any] = None
def __init__(self, **data):
super().__init__(**data)
# Set fingerprint data from the agent
if self.agent and hasattr(self.agent, 'fingerprint') and self.agent.fingerprint:
self.source_fingerprint = self.agent.fingerprint.uuid_str
self.source_type = "agent"
if hasattr(self.agent.fingerprint, 'metadata') and self.agent.fingerprint.metadata:
self.fingerprint_metadata = self.agent.fingerprint.metadata

View File

@@ -0,0 +1,378 @@
interactions:
- request:
body: !!binary |
CpIKCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkS6QkKEgoQY3Jld2FpLnRl
bGVtZXRyeRLBBwoQ08SlQ6w2FsCauTgZCqberRIITfOsgNi1qJkqDENyZXcgQ3JlYXRlZDABOdjG
6D/PcDAYQahPEkDPcDAYShsKDmNyZXdhaV92ZXJzaW9uEgkKBzAuMTA4LjBKGgoOcHl0aG9uX3Zl
cnNpb24SCAoGMy4xMi45Si4KCGNyZXdfa2V5EiIKIDkwNzMxMTU4MzVlMWNhZjJhNmUxNTIyZDA1
YTBiNTFkSjEKB2NyZXdfaWQSJgokMzdjOGM4NzgtN2NmZC00YjEyLWE4YzctYzIyZDZlOTIxODBk
ShwKDGNyZXdfcHJvY2VzcxIMCgpzZXF1ZW50aWFsShEKC2NyZXdfbWVtb3J5EgIQAEoaChRjcmV3
X251bWJlcl9vZl90YXNrcxICGAFKGwoVY3Jld19udW1iZXJfb2ZfYWdlbnRzEgIYAUrgAgoLY3Jl
d19hZ2VudHMS0AIKzQJbeyJrZXkiOiAiNzYyM2ZjNGY3ZDk0Y2YzZmRiZmNjMjlmYjBiMDIyYmIi
LCAiaWQiOiAiYmVjMjljMTAtOTljYi00MzQwLWIwYTItMWU1NTVkNGRmZGM0IiwgInJvbGUiOiAi
VmlzdWFsIFF1YWxpdHkgSW5zcGVjdG9yIiwgInZlcmJvc2U/IjogdHJ1ZSwgIm1heF9pdGVyIjog
MjUsICJtYXhfcnBtIjogbnVsbCwgImZ1bmN0aW9uX2NhbGxpbmdfbGxtIjogIiIsICJsbG0iOiAi
b3BlbmFpL2dwdC00byIsICJkZWxlZ2F0aW9uX2VuYWJsZWQ/IjogZmFsc2UsICJhbGxvd19jb2Rl
X2V4ZWN1dGlvbj8iOiBmYWxzZSwgIm1heF9yZXRyeV9saW1pdCI6IDIsICJ0b29sc19uYW1lcyI6
IFtdfV1KjQIKCmNyZXdfdGFza3MS/gEK+wFbeyJrZXkiOiAiMDExM2E5ZTg0N2M2NjI2ZDY0ZDZk
Yzk4M2IwNDA5MTgiLCAiaWQiOiAiZWQzYmY1YWUtZTBjMS00MjIxLWFhYTgtMThlNjVkYTMyZjc1
IiwgImFzeW5jX2V4ZWN1dGlvbj8iOiBmYWxzZSwgImh1bWFuX2lucHV0PyI6IGZhbHNlLCAiYWdl
bnRfcm9sZSI6ICJWaXN1YWwgUXVhbGl0eSBJbnNwZWN0b3IiLCAiYWdlbnRfa2V5IjogIjc2MjNm
YzRmN2Q5NGNmM2ZkYmZjYzI5ZmIwYjAyMmJiIiwgInRvb2xzX25hbWVzIjogW119XXoCGAGFAQAB
AAASjgIKECo77ESam8oLrZMmgLLaoksSCLE6x14/Kb1vKgxUYXNrIENyZWF0ZWQwATlI/chAz3Aw
GEEAgMpAz3AwGEouCghjcmV3X2tleRIiCiA5MDczMTE1ODM1ZTFjYWYyYTZlMTUyMmQwNWEwYjUx
ZEoxCgdjcmV3X2lkEiYKJDM3YzhjODc4LTdjZmQtNGIxMi1hOGM3LWMyMmQ2ZTkyMTgwZEouCgh0
YXNrX2tleRIiCiAwMTEzYTllODQ3YzY2MjZkNjRkNmRjOTgzYjA0MDkxOEoxCgd0YXNrX2lkEiYK
JGVkM2JmNWFlLWUwYzEtNDIyMS1hYWE4LTE4ZTY1ZGEzMmY3NXoCGAGFAQABAAA=
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '1301'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.31.1
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Wed, 26 Mar 2025 19:24:52 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Visual Quality Inspector.
Senior quality control expert with expertise in visual inspection\nYour personal
goal is: Perform detailed quality analysis of product images\nYou ONLY have
access to the following tools, and should NEVER make up tools that are not listed
here:\n\nTool Name: Add image to content\nTool Arguments: {''image_url'': {''description'':
''The URL or path of the image to add'', ''type'': ''str''}, ''action'': {''description'':
''Optional context or question about the image'', ''type'': ''Union[str, NoneType]''}}\nTool
Description: See image to understand its content, you can optionally ask a question
about the image\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
you should always think about what to do\nAction: the action to take, only one
name of [Add image to content], just the name, exactly as it''s written.\nAction
Input: the input to the action, just a simple JSON object, enclosed in curly
braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
all necessary information is gathered, return the following format:\n\n```\nThought:
I now know the final answer\nFinal Answer: the final answer to the original
input question\n```"}, {"role": "user", "content": "\nCurrent Task: \n Analyze
the product image at https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244
with focus on:\n 1. Quality of materials\n 2. Manufacturing defects\n 3.
Compliance with standards\n Provide a detailed report highlighting any
issues found.\n \n\nThis is the expected criteria for your final answer:
A detailed report highlighting any issues found\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nBegin! This is VERY
important to you, use the tools available and give your best Final Answer, your
job depends on it!\n\nThought:"}], "model": "gpt-4o", "stop": ["\nObservation:"],
"temperature": 0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '2033'
content-type:
- application/json
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- x64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-BFQepLwSYYzdKLylSFsgcJeg6GTqS\",\n \"object\":
\"chat.completion\",\n \"created\": 1743017091,\n \"model\": \"gpt-4o-2024-08-06\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Thought: I need to examine the product
image to assess the quality of materials, look for any manufacturing defects,
and check compliance with standards.\\n\\nAction: Add image to content\\nAction
Input: {\\\"image_url\\\": \\\"https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244\\\",
\\\"action\\\": \\\"Analyze the quality of materials, manufacturing defects,
and compliance with standards.\\\"}\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 413,\n \"completion_tokens\":
101,\n \"total_tokens\": 514,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_7e8d90e604\"\n}\n"
headers:
CF-RAY:
- 926907d79dcff1e7-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 26 Mar 2025 19:24:53 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=WK433.4kW8cr9rwvOlk4EZ2SfRYK9lAPwXCBYEvLcmU-1743017093-1.0.1.1-kVZyUew5rUbMk.2koGJF_rmX.fTseqN241n2M40n8KvBGoKgy6KM6xBmvFbIVWxUs2Y5ZAz8mWy9CrGjaNKSfCzxmv4.pq78z_DGHr37PgI;
path=/; expires=Wed, 26-Mar-25 19:54:53 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=T77PMcuNYeyzK0tQyDOe7EScjVBVzW_7DpD3YQBqmUc-1743017093675-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1729'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '50000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '49999'
x-ratelimit-remaining-tokens:
- '149999534'
x-ratelimit-reset-requests:
- 1ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_2399c3355adf16734907c73611a7d330
http_version: HTTP/1.1
status_code: 200
- request:
body: !!binary |
CtgBCiQKIgoMc2VydmljZS5uYW1lEhIKEGNyZXdBSS10ZWxlbWV0cnkSrwEKEgoQY3Jld2FpLnRl
bGVtZXRyeRKYAQoQp2ACB2xRGve4HGtU2RdWCBIIlQcsbhK22ykqClRvb2wgVXNhZ2UwATlACEXG
z3AwGEHAjGPGz3AwGEobCg5jcmV3YWlfdmVyc2lvbhIJCgcwLjEwOC4wSiMKCXRvb2xfbmFtZRIW
ChRBZGQgaW1hZ2UgdG8gY29udGVudEoOCghhdHRlbXB0cxICGAF6AhgBhQEAAQAA
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate, zstd
Connection:
- keep-alive
Content-Length:
- '219'
Content-Type:
- application/x-protobuf
User-Agent:
- OTel-OTLP-Exporter-Python/1.31.1
method: POST
uri: https://telemetry.crewai.com:4319/v1/traces
response:
body:
string: "\n\0"
headers:
Content-Length:
- '2'
Content-Type:
- application/x-protobuf
Date:
- Wed, 26 Mar 2025 19:24:57 GMT
status:
code: 200
message: OK
- request:
body: '{"messages": [{"role": "system", "content": "You are Visual Quality Inspector.
Senior quality control expert with expertise in visual inspection\nYour personal
goal is: Perform detailed quality analysis of product images\nYou ONLY have
access to the following tools, and should NEVER make up tools that are not listed
here:\n\nTool Name: Add image to content\nTool Arguments: {''image_url'': {''description'':
''The URL or path of the image to add'', ''type'': ''str''}, ''action'': {''description'':
''Optional context or question about the image'', ''type'': ''Union[str, NoneType]''}}\nTool
Description: See image to understand its content, you can optionally ask a question
about the image\n\nIMPORTANT: Use the following format in your response:\n\n```\nThought:
you should always think about what to do\nAction: the action to take, only one
name of [Add image to content], just the name, exactly as it''s written.\nAction
Input: the input to the action, just a simple JSON object, enclosed in curly
braces, using \" to wrap keys and values.\nObservation: the result of the action\n```\n\nOnce
all necessary information is gathered, return the following format:\n\n```\nThought:
I now know the final answer\nFinal Answer: the final answer to the original
input question\n```"}, {"role": "user", "content": "\nCurrent Task: \n Analyze
the product image at https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244
with focus on:\n 1. Quality of materials\n 2. Manufacturing defects\n 3.
Compliance with standards\n Provide a detailed report highlighting any
issues found.\n \n\nThis is the expected criteria for your final answer:
A detailed report highlighting any issues found\nyou MUST return the actual
complete content as the final answer, not a summary.\n\nBegin! This is VERY
important to you, use the tools available and give your best Final Answer, your
job depends on it!\n\nThought:"}, {"role": "user", "content": [{"type": "text",
"text": "Analyze the quality of materials, manufacturing defects, and compliance
with standards."}, {"type": "image_url", "image_url": {"url": "https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244"}}]},
{"role": "assistant", "content": "Thought: I need to examine the product image
to assess the quality of materials, look for any manufacturing defects, and
check compliance with standards.\n\nAction: Add image to content\nAction Input:
{\"image_url\": \"https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244\",
\"action\": \"Analyze the quality of materials, manufacturing defects, and compliance
with standards.\"}"}], "model": "gpt-4o", "stop": ["\nObservation:"], "temperature":
0.7}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate, zstd
connection:
- keep-alive
content-length:
- '2797'
content-type:
- application/json
cookie:
- __cf_bm=WK433.4kW8cr9rwvOlk4EZ2SfRYK9lAPwXCBYEvLcmU-1743017093-1.0.1.1-kVZyUew5rUbMk.2koGJF_rmX.fTseqN241n2M40n8KvBGoKgy6KM6xBmvFbIVWxUs2Y5ZAz8mWy9CrGjaNKSfCzxmv4.pq78z_DGHr37PgI;
_cfuvid=T77PMcuNYeyzK0tQyDOe7EScjVBVzW_7DpD3YQBqmUc-1743017093675-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.68.2
x-stainless-arch:
- x64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.68.2
x-stainless-raw-response:
- 'true'
x-stainless-read-timeout:
- '600.0'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.9
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-BFQetNNvmPgPxhzaKiHYsPqm8aN0i\",\n \"object\":
\"chat.completion\",\n \"created\": 1743017095,\n \"model\": \"gpt-4o-2024-08-06\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Observation: The image displays a black
leather boot with a pointed toe and a low heel. \\n\\nQuality of Materials:\\n1.
The leather appears to be of good quality, displaying a consistent texture and
finish, which suggests durability.\\n2. The material has a slight sheen, indicating
a possible finishing treatment that enhances the appearance and may offer some
protection.\\n\\nManufacturing Defects:\\n1. There are no visible stitching
errors; the seams appear straight and clean.\\n2. No apparent glue marks or
uneven edges, which indicates good craftsmanship.\\n3. There is a slight distressed
effect, but it appears intentional as part of the design rather than a defect.\\n\\nCompliance
with Standards:\\n1. The shoe design seems to comply with typical fashion standards,
showing a balance of aesthetics and functionality.\\n2. The heel height and
shape appear to provide stability, aligning with safety standards for footwear.\\n\\nFinal
Answer: The analysis of the product image reveals that the black leather boot
is made of high-quality materials with no visible manufacturing defects. The
craftsmanship is precise, with clean seams and a well-executed design. The distressed
effect appears intentional and part of the aesthetic. The boot seems to comply
with fashion and safety standards, offering both style and functionality. No
significant issues were found.\",\n \"refusal\": null,\n \"annotations\":
[]\n },\n \"logprobs\": null,\n \"finish_reason\": \"stop\"\n
\ }\n ],\n \"usage\": {\n \"prompt_tokens\": 1300,\n \"completion_tokens\":
250,\n \"total_tokens\": 1550,\n \"prompt_tokens_details\": {\n \"cached_tokens\":
0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\": {\n
\ \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_3a5b33c01a\"\n}\n"
headers:
CF-RAY:
- 926907e45f33f1e7-GRU
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 26 Mar 2025 19:25:01 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '7242'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-input-images:
- '250000'
x-ratelimit-limit-requests:
- '50000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-input-images:
- '249999'
x-ratelimit-remaining-requests:
- '49999'
x-ratelimit-remaining-tokens:
- '149998641'
x-ratelimit-reset-input-images:
- 0s
x-ratelimit-reset-requests:
- 1ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_c5dd144c8ac1bb3bd96ffbba40707b2d
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -3733,6 +3733,44 @@ def test_multimodal_agent_image_tool_handling():
assert result["content"][1]["type"] == "image_url" assert result["content"][1]["type"] == "image_url"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_multimodal_agent_describing_image_successfully():
"""
Test that a multimodal agent can process images without validation errors.
This test reproduces the scenario from issue #2475.
"""
llm = LLM(model="openai/gpt-4o", temperature=0.7) # model with vision capabilities
expert_analyst = Agent(
role="Visual Quality Inspector",
goal="Perform detailed quality analysis of product images",
backstory="Senior quality control expert with expertise in visual inspection",
llm=llm,
verbose=True,
allow_delegation=False,
multimodal=True,
)
inspection_task = Task(
description="""
Analyze the product image at https://www.us.maguireshoes.com/cdn/shop/files/FW24-Edito-Lucena-Distressed-01_1920x.jpg?v=1736371244 with focus on:
1. Quality of materials
2. Manufacturing defects
3. Compliance with standards
Provide a detailed report highlighting any issues found.
""",
expected_output="A detailed report highlighting any issues found",
agent=expert_analyst,
)
crew = Crew(agents=[expert_analyst], tasks=[inspection_task])
result = crew.kickoff()
task_output = result.tasks_output[0]
assert isinstance(task_output, TaskOutput)
assert task_output.raw == result.raw
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_multimodal_agent_live_image_analysis(): def test_multimodal_agent_live_image_analysis():
""" """

View File

@@ -0,0 +1,46 @@
import os
import pytest
from crewai import LLM, Agent, Crew, Task
@pytest.mark.skip(reason="Only run manually with valid API keys")
def test_multimodal_agent_with_image_url():
"""
Test that a multimodal agent can process images without validation errors.
This test reproduces the scenario from issue #2475.
"""
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
pytest.skip("OPENAI_API_KEY environment variable not set")
llm = LLM(
model="openai/gpt-4o", # model with vision capabilities
api_key=OPENAI_API_KEY,
temperature=0.7
)
expert_analyst = Agent(
role="Visual Quality Inspector",
goal="Perform detailed quality analysis of product images",
backstory="Senior quality control expert with expertise in visual inspection",
llm=llm,
verbose=True,
allow_delegation=False,
multimodal=True
)
inspection_task = Task(
description="""
Analyze the product image at https://www.us.maguireshoes.com/collections/spring-25/products/lucena-black-boot with focus on:
1. Quality of materials
2. Manufacturing defects
3. Compliance with standards
Provide a detailed report highlighting any issues found.
""",
expected_output="A detailed report highlighting any issues found",
agent=expert_analyst
)
crew = Crew(agents=[expert_analyst], tasks=[inspection_task])

View File

@@ -1,5 +1,7 @@
import datetime
import json import json
import random import random
import time
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
import pytest import pytest
@@ -11,6 +13,7 @@ from crewai.tools.tool_usage import ToolUsage
from crewai.utilities.events import crewai_event_bus from crewai.utilities.events import crewai_event_bus
from crewai.utilities.events.tool_usage_events import ( from crewai.utilities.events.tool_usage_events import (
ToolSelectionErrorEvent, ToolSelectionErrorEvent,
ToolUsageFinishedEvent,
ToolValidateInputErrorEvent, ToolValidateInputErrorEvent,
) )
@@ -624,3 +627,161 @@ def test_tool_validate_input_error_event():
assert event.agent_role == "test_role" assert event.agent_role == "test_role"
assert event.tool_name == "test_tool" assert event.tool_name == "test_tool"
assert "must be a valid dictionary" in event.error assert "must be a valid dictionary" in event.error
def test_tool_usage_finished_event_with_result():
"""Test that ToolUsageFinishedEvent is emitted with correct result attributes."""
# Create mock agent with proper string values
mock_agent = MagicMock()
mock_agent.key = "test_agent_key"
mock_agent.role = "test_agent_role"
mock_agent._original_role = "test_agent_role"
mock_agent.i18n = MagicMock()
mock_agent.verbose = False
# Create mock task
mock_task = MagicMock()
mock_task.delegations = 0
# Create mock tool
class TestTool(BaseTool):
name: str = "Test Tool"
description: str = "A test tool"
def _run(self, input: dict) -> str:
return "test result"
test_tool = TestTool()
# Create mock tool calling
mock_tool_calling = MagicMock()
mock_tool_calling.arguments = {"arg1": "value1"}
# Create ToolUsage instance
tool_usage = ToolUsage(
tools_handler=MagicMock(),
tools=[test_tool],
original_tools=[test_tool],
tools_description="Test Tool Description",
tools_names="Test Tool",
task=mock_task,
function_calling_llm=None,
agent=mock_agent,
action=MagicMock(),
)
# Track received events
received_events = []
@crewai_event_bus.on(ToolUsageFinishedEvent)
def event_handler(source, event):
received_events.append(event)
# Call on_tool_use_finished with test data
started_at = time.time()
result = "test output result"
tool_usage.on_tool_use_finished(
tool=test_tool,
tool_calling=mock_tool_calling,
from_cache=False,
started_at=started_at,
result=result,
)
# Verify event was emitted
assert len(received_events) == 1, "Expected one event to be emitted"
event = received_events[0]
assert isinstance(event, ToolUsageFinishedEvent)
# Verify event attributes
assert event.agent_key == "test_agent_key"
assert event.agent_role == "test_agent_role"
assert event.tool_name == "Test Tool"
assert event.tool_args == {"arg1": "value1"}
assert event.tool_class == "TestTool"
assert event.run_attempts == 1 # Default value from ToolUsage
assert event.delegations == 0
assert event.from_cache is False
assert event.output == "test output result"
assert isinstance(event.started_at, datetime.datetime)
assert isinstance(event.finished_at, datetime.datetime)
assert event.type == "tool_usage_finished"
def test_tool_usage_finished_event_with_cached_result():
"""Test that ToolUsageFinishedEvent is emitted with correct result attributes when using cached result."""
# Create mock agent with proper string values
mock_agent = MagicMock()
mock_agent.key = "test_agent_key"
mock_agent.role = "test_agent_role"
mock_agent._original_role = "test_agent_role"
mock_agent.i18n = MagicMock()
mock_agent.verbose = False
# Create mock task
mock_task = MagicMock()
mock_task.delegations = 0
# Create mock tool
class TestTool(BaseTool):
name: str = "Test Tool"
description: str = "A test tool"
def _run(self, input: dict) -> str:
return "test result"
test_tool = TestTool()
# Create mock tool calling
mock_tool_calling = MagicMock()
mock_tool_calling.arguments = {"arg1": "value1"}
# Create ToolUsage instance
tool_usage = ToolUsage(
tools_handler=MagicMock(),
tools=[test_tool],
original_tools=[test_tool],
tools_description="Test Tool Description",
tools_names="Test Tool",
task=mock_task,
function_calling_llm=None,
agent=mock_agent,
action=MagicMock(),
)
# Track received events
received_events = []
@crewai_event_bus.on(ToolUsageFinishedEvent)
def event_handler(source, event):
received_events.append(event)
# Call on_tool_use_finished with test data and from_cache=True
started_at = time.time()
result = "cached test output result"
tool_usage.on_tool_use_finished(
tool=test_tool,
tool_calling=mock_tool_calling,
from_cache=True,
started_at=started_at,
result=result,
)
# Verify event was emitted
assert len(received_events) == 1, "Expected one event to be emitted"
event = received_events[0]
assert isinstance(event, ToolUsageFinishedEvent)
# Verify event attributes
assert event.agent_key == "test_agent_key"
assert event.agent_role == "test_agent_role"
assert event.tool_name == "Test Tool"
assert event.tool_args == {"arg1": "value1"}
assert event.tool_class == "TestTool"
assert event.run_attempts == 1 # Default value from ToolUsage
assert event.delegations == 0
assert event.from_cache is True
assert event.output == "cached test output result"
assert isinstance(event.started_at, datetime.datetime)
assert isinstance(event.finished_at, datetime.datetime)
assert event.type == "tool_usage_finished"