fix: allow messages be empty on LLMCallCompletedEvent

This commit is contained in:
Lucas Gomide
2025-07-11 14:00:42 -03:00
parent 6f0ed6642b
commit 064997464e
4 changed files with 8 additions and 7 deletions

View File

@@ -1,5 +1,5 @@
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
from typing import Any, Dict, Optional
from collections.abc import Sequence
@@ -149,7 +149,7 @@ class EvaluationTraceCallback(BaseEventListener):
"end_time": None
}
def on_llm_call_end(self, messages: Union[str, List[Dict[str, Any]]], response: Any):
def on_llm_call_end(self, messages: str | list[dict[str, Any]] | None, response: Any):
if not self.current_agent_id or not self.current_task_id:
return

View File

@@ -508,7 +508,6 @@ class LLM(BaseLLM):
# Enable tool calls using streaming
if "tool_calls" in delta:
tool_calls = delta["tool_calls"]
if tool_calls:
result = self._handle_streaming_tool_calls(
tool_calls=tool_calls,
@@ -517,6 +516,7 @@ class LLM(BaseLLM):
from_task=from_task,
from_agent=from_agent,
)
if result is not None:
chunk_content = result
@@ -861,6 +861,7 @@ class LLM(BaseLLM):
tool_args=function_args,
),
)
result = fn(**function_args)
crewai_event_bus.emit(
self,
@@ -991,7 +992,7 @@ class LLM(BaseLLM):
logging.error(f"LiteLLM call failed: {str(e)}")
raise
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None, messages: Optional[Union[str, List[Dict[str, Any]]]] = None):
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None, messages: str | list[dict[str, Any]] | None = None):
"""Handle the events for the LLM call.
Args:

View File

@@ -58,7 +58,7 @@ class LLMCallCompletedEvent(LLMEventBase):
"""Event emitted when a LLM call completes"""
type: str = "llm_call_completed"
messages: Union[str, List[Dict[str, Any]]]
messages: str | list[dict[str, Any]] | None = None
response: Any
call_type: LLMCallType

View File

@@ -601,7 +601,7 @@ def test_handle_streaming_tool_calls(get_weather_tool_schema, mock_emit):
def test_handle_streaming_tool_calls_with_error(get_weather_tool_schema, mock_emit):
def get_weather_error(location):
raise Exception("Error")
llm = LLM(model="openai/gpt-4o", stream=True)
response = llm.call(
messages=[
@@ -619,7 +619,7 @@ def test_handle_streaming_tool_calls_with_error(get_weather_tool_schema, mock_em
expected_stream_chunk=9,
expected_completed_llm_call=1,
expected_tool_usage_started=1,
expected_tool_usage_error=1,
expected_tool_usage_error=1,
expected_final_chunk_result=expected_final_chunk_result,
)