feat: Add LLM call events for improved observability (#2214)

* feat: Add LLM call events for improved observability

- Introduce new LLM call events: LLMCallStartedEvent, LLMCallCompletedEvent, and LLMCallFailedEvent
- Emit events for LLM calls and tool calls to provide better tracking and debugging
- Add event handling in the LLM class to track call lifecycle
- Update event bus to support new LLM-related events
- Add test cases to validate LLM event emissions

* feat: Add event handling for LLM call lifecycle events

- Implement event listeners for LLM call events in EventListener
- Add logging for LLM call start, completion, and failure events
- Import and register new LLM-specific event types

* less log

* refactor: Update LLM event response type to support Any

* refactor: Simplify LLM call completed event emission

Remove unnecessary LLMCallType conversion when emitting LLMCallCompletedEvent

* refactor: Update LLM event docstrings for clarity

Improve docstrings for LLM call events to more accurately describe their purpose and lifecycle

* feat: Add LLMCallFailedEvent emission for tool execution errors

Enhance error handling by emitting a specific event when tool execution fails during LLM calls
This commit is contained in:
Lorenze Jay
2025-02-24 12:17:44 -08:00
committed by Brandon Hancock
parent 6fb25a1af7
commit 70ab4ad003
8 changed files with 365 additions and 4 deletions

View File

@@ -1,6 +1,5 @@
import json
from datetime import datetime
from unittest.mock import MagicMock, patch
from unittest.mock import patch
import pytest
from pydantic import Field
@@ -9,6 +8,7 @@ from crewai.agent import Agent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.crew import Crew
from crewai.flow.flow import Flow, listen, start
from crewai.llm import LLM
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.tools.tool_usage import ToolUsage
@@ -31,6 +31,12 @@ from crewai.utilities.events.flow_events import (
MethodExecutionFailedEvent,
MethodExecutionStartedEvent,
)
from crewai.utilities.events.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
)
from crewai.utilities.events.task_events import (
TaskCompletedEvent,
TaskFailedEvent,
@@ -495,3 +501,43 @@ def test_flow_emits_method_execution_failed_event():
assert received_events[0].flow_name == "TestFlow"
assert received_events[0].type == "method_execution_failed"
assert received_events[0].error == error
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_call_started_event():
received_events = []
@crewai_event_bus.on(LLMCallStartedEvent)
def handle_llm_call_started(source, event):
received_events.append(event)
@crewai_event_bus.on(LLMCallCompletedEvent)
def handle_llm_call_completed(source, event):
received_events.append(event)
llm = LLM(model="gpt-4o-mini")
llm.call("Hello, how are you?")
assert len(received_events) == 2
assert received_events[0].type == "llm_call_started"
assert received_events[1].type == "llm_call_completed"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_call_failed_event():
received_events = []
@crewai_event_bus.on(LLMCallFailedEvent)
def handle_llm_call_failed(source, event):
received_events.append(event)
error_message = "Simulated LLM call failure"
with patch.object(LLM, "_call_llm", side_effect=Exception(error_message)):
llm = LLM(model="gpt-4o-mini")
with pytest.raises(Exception) as exc_info:
llm.call("Hello, how are you?")
assert str(exc_info.value) == error_message
assert len(received_events) == 1
assert received_events[0].type == "llm_call_failed"
assert received_events[0].error == error_message