From 6a49a248102170afbe0f3afa4f7fcb5693bff0ed Mon Sep 17 00:00:00 2001 From: Lucas Gomide Date: Mon, 7 Jul 2025 13:39:35 -0300 Subject: [PATCH] feat: add exchanged messages in LLMCallCompletedEvent --- src/crewai/lite_agent.py | 1 + src/crewai/llm.py | 19 +++++++++++-------- src/crewai/utilities/events/llm_events.py | 5 +++-- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/src/crewai/lite_agent.py b/src/crewai/lite_agent.py index 58d60e426..53b6ab4c9 100644 --- a/src/crewai/lite_agent.py +++ b/src/crewai/lite_agent.py @@ -537,6 +537,7 @@ class LiteAgent(FlowTrackable, BaseModel): crewai_event_bus.emit( self, event=LLMCallCompletedEvent( + messages=self._messages, response=answer, call_type=LLMCallType.LLM_CALL, from_agent=self, diff --git a/src/crewai/llm.py b/src/crewai/llm.py index 88edb5ec5..aa4a8a4ce 100644 --- a/src/crewai/llm.py +++ b/src/crewai/llm.py @@ -631,7 +631,7 @@ class LLM(BaseLLM): # Log token usage if available in streaming mode self._handle_streaming_callbacks(callbacks, usage_info, last_chunk) # Emit completion event and return response - self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent) + self._handle_emit_call_events(response=full_response, call_type=LLMCallType.LLM_CALL, from_task=from_task, from_agent=from_agent, messages=params["messages"]) return full_response # --- 9) Handle tool calls if present @@ -643,7 +643,7 @@ class LLM(BaseLLM): self._handle_streaming_callbacks(callbacks, usage_info, last_chunk) # --- 11) Emit completion event and return response - self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent) + self._handle_emit_call_events(response=full_response, call_type=LLMCallType.LLM_CALL, from_task=from_task, from_agent=from_agent, messages=params["messages"]) return full_response except ContextWindowExceededError as e: @@ -655,7 +655,7 @@ class LLM(BaseLLM): logging.error(f"Error in streaming response: {str(e)}") if full_response.strip(): logging.warning(f"Returning partial response despite error: {str(e)}") - self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent) + self._handle_emit_call_events(response=full_response, call_type=LLMCallType.LLM_CALL, from_task=from_task, from_agent=from_agent, messages=params["messages"]) return full_response # Emit failed event and re-raise the exception @@ -809,7 +809,7 @@ class LLM(BaseLLM): # --- 5) If no tool calls or no available functions, return the text response directly if not tool_calls or not available_functions: - self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task, from_agent) + self._handle_emit_call_events(response=text_response, call_type=LLMCallType.LLM_CALL, from_task=from_task, from_agent=from_agent, messages=params["messages"]) return text_response # --- 6) Handle tool calls if present @@ -818,7 +818,7 @@ class LLM(BaseLLM): return tool_result # --- 7) If tool call handling didn't return a result, emit completion event and return text response - self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task, from_agent) + self._handle_emit_call_events(response=text_response, call_type=LLMCallType.LLM_CALL, from_task=from_task, from_agent=from_agent, messages=params["messages"]) return text_response def _handle_tool_call( @@ -874,7 +874,7 @@ class LLM(BaseLLM): ) # --- 3.3) Emit success event - self._handle_emit_call_events(result, LLMCallType.TOOL_CALL) + self._handle_emit_call_events(response=result, call_type=LLMCallType.TOOL_CALL) return result except Exception as e: # --- 3.4) Handle execution errors @@ -991,17 +991,20 @@ class LLM(BaseLLM): logging.error(f"LiteLLM call failed: {str(e)}") raise - def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None): + def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None, messages: Optional[Union[str, List[Dict[str, Any]]]] = None): """Handle the events for the LLM call. Args: response (str): The response from the LLM call. call_type (str): The type of call, either "tool_call" or "llm_call". + from_task: Optional task object + from_agent: Optional agent object + messages: Optional messages object """ assert hasattr(crewai_event_bus, "emit") crewai_event_bus.emit( self, - event=LLMCallCompletedEvent(response=response, call_type=call_type, from_task=from_task, from_agent=from_agent), + event=LLMCallCompletedEvent(messages=messages, response=response, call_type=call_type, from_task=from_task, from_agent=from_agent), ) def _format_messages_for_provider( diff --git a/src/crewai/utilities/events/llm_events.py b/src/crewai/utilities/events/llm_events.py index 283036d54..bc1af5c9a 100644 --- a/src/crewai/utilities/events/llm_events.py +++ b/src/crewai/utilities/events/llm_events.py @@ -2,6 +2,7 @@ from enum import Enum from typing import Any, Dict, List, Optional, Union from pydantic import BaseModel +from datetime import datetime from crewai.utilities.events.base_events import BaseEvent @@ -48,7 +49,7 @@ class LLMCallStartedEvent(LLMEventBase): """ type: str = "llm_call_started" - messages: Union[str, List[Dict[str, Any]]] + messages: Optional[Union[str, List[Dict[str, Any]]]] = None tools: Optional[List[dict]] = None callbacks: Optional[List[Any]] = None available_functions: Optional[Dict[str, Any]] = None @@ -58,10 +59,10 @@ class LLMCallCompletedEvent(LLMEventBase): """Event emitted when a LLM call completes""" type: str = "llm_call_completed" + messages: Union[str, List[Dict[str, Any]]] response: Any call_type: LLMCallType - class LLMCallFailedEvent(LLMEventBase): """Event emitted when a LLM call fails"""