feat: propagate is_litellm flag through LLMCallCompletedEvent

Add is_litellm field to LLMCallCompletedEvent so downstream consumers
This commit is contained in:
Lucas Gomide
2026-04-10 10:32:21 -03:00
parent 6efa142e22
commit ea610af201
4 changed files with 24 additions and 0 deletions

View File

@@ -58,6 +58,7 @@ class LLMCallCompletedEvent(LLMEventBase):
response: Any
call_type: LLMCallType
usage: dict[str, Any] | None = None
is_litellm: bool = False
class LLMCallFailedEvent(LLMEventBase):

View File

@@ -1957,6 +1957,7 @@ class LLM(BaseLLM):
model=self.model,
call_id=get_current_call_id(),
usage=usage,
is_litellm=self.is_litellm,
),
)

View File

@@ -479,6 +479,7 @@ class BaseLLM(BaseModel, ABC):
model=self.model,
call_id=get_current_call_id(),
usage=usage,
is_litellm=self.is_litellm,
),
)

View File

@@ -175,6 +175,27 @@ class TestEmitCallCompletedEventPassesUsage:
assert isinstance(event, LLMCallCompletedEvent)
assert event.usage is None
def test_is_litellm_is_passed_to_event(self, mock_emit, llm):
llm.is_litellm = True
llm._emit_call_completed_event(
response="hello",
call_type=LLMCallType.LLM_CALL,
messages="test prompt",
)
event = mock_emit.call_args[1]["event"]
assert event.is_litellm is True
def test_is_litellm_defaults_to_false(self, mock_emit, llm):
llm._emit_call_completed_event(
response="hello",
call_type=LLMCallType.LLM_CALL,
messages="test prompt",
)
event = mock_emit.call_args[1]["event"]
assert event.is_litellm is False
class TestUsageMetricsNewFields:
def test_add_usage_metrics_aggregates_reasoning_and_cache_creation(self):
from crewai.types.usage_metrics import UsageMetrics