Compare commits

..

10 Commits

Author SHA1 Message Date
Lorenze Jay
502351391a Merge branch 'main' into add/llm-event-emitter 2025-02-24 11:53:02 -08:00
Lorenze Jay
81ad2ab655 feat: Add LLMCallFailedEvent emission for tool execution errors
Enhance error handling by emitting a specific event when tool execution fails during LLM calls
2025-02-24 11:52:50 -08:00
Brandon Hancock (bhancock_ai)
78797c64b0 fix reset memory issue (#2182)
Co-authored-by: Lorenze Jay <63378463+lorenzejay@users.noreply.github.com>
2025-02-24 14:51:58 -05:00
Lorenze Jay
cefc57e2a3 refactor: Update LLM event docstrings for clarity
Improve docstrings for LLM call events to more accurately describe their purpose and lifecycle
2025-02-24 11:48:20 -08:00
Lorenze Jay
46ac490009 refactor: Simplify LLM call completed event emission
Remove unnecessary LLMCallType conversion when emitting LLMCallCompletedEvent
2025-02-24 11:47:48 -08:00
Lorenze Jay
b37fc94936 refactor: Update LLM event response type to support Any 2025-02-24 11:44:38 -08:00
Lorenze Jay
8434a11b10 less log 2025-02-24 11:33:06 -08:00
Lorenze Jay
e1c4947ec6 feat: Add event handling for LLM call lifecycle events
- Implement event listeners for LLM call events in EventListener
- Add logging for LLM call start, completion, and failure events
- Import and register new LLM-specific event types
2025-02-24 11:28:33 -08:00
Lorenze Jay
b9d2884bc8 feat: Add LLM call events for improved observability
- Introduce new LLM call events: LLMCallStartedEvent, LLMCallCompletedEvent, and LLMCallFailedEvent
- Emit events for LLM calls and tool calls to provide better tracking and debugging
- Add event handling in the LLM class to track call lifecycle
- Update event bus to support new LLM-related events
- Add test cases to validate LLM event emissions
2025-02-24 11:26:18 -08:00
Brandon Hancock (bhancock_ai)
8a7584798b Better support async flows (#2193)
* Better support async

* Drop coroutine
2025-02-24 10:25:30 -05:00
12 changed files with 414 additions and 214 deletions

View File

@@ -1278,11 +1278,11 @@ class Crew(BaseModel):
def _reset_all_memories(self) -> None:
"""Reset all available memory systems."""
memory_systems = [
("short term", self._short_term_memory),
("entity", self._entity_memory),
("long term", self._long_term_memory),
("task output", self._task_output_handler),
("knowledge", self.knowledge),
("short term", getattr(self, "_short_term_memory", None)),
("entity", getattr(self, "_entity_memory", None)),
("long term", getattr(self, "_long_term_memory", None)),
("task output", getattr(self, "_task_output_handler", None)),
("knowledge", getattr(self, "knowledge", None)),
]
for name, system in memory_systems:

View File

@@ -713,16 +713,35 @@ class Flow(Generic[T], metaclass=FlowMeta):
raise TypeError(f"State must be dict or BaseModel, got {type(self._state)}")
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
"""Start the flow execution.
"""
Start the flow execution in a synchronous context.
This method wraps kickoff_async so that all state initialization and event
emission is handled in the asynchronous method.
"""
async def run_flow():
return await self.kickoff_async(inputs)
return asyncio.run(run_flow())
@init_flow_main_trace
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
"""
Start the flow execution asynchronously.
This method performs state restoration (if an 'id' is provided and persistence is available)
and updates the flow state with any additional inputs. It then emits the FlowStartedEvent,
logs the flow startup, and executes all start methods. Once completed, it emits the
FlowFinishedEvent and returns the final output.
Args:
inputs: Optional dictionary containing input values and potentially a state ID to restore
"""
# Handle state restoration if ID is provided in inputs
if inputs and "id" in inputs and self._persistence is not None:
restore_uuid = inputs["id"]
stored_state = self._persistence.load_state(restore_uuid)
inputs: Optional dictionary containing input values and/or a state ID for restoration.
Returns:
The final output from the flow, which is the result of the last executed method.
"""
if inputs:
# Override the id in the state if it exists in inputs
if "id" in inputs:
if isinstance(self._state, dict):
@@ -730,24 +749,27 @@ class Flow(Generic[T], metaclass=FlowMeta):
elif isinstance(self._state, BaseModel):
setattr(self._state, "id", inputs["id"])
if stored_state:
self._log_flow_event(
f"Loading flow state from memory for UUID: {restore_uuid}",
color="yellow",
)
# Restore the state
self._restore_state(stored_state)
else:
self._log_flow_event(
f"No flow state found for UUID: {restore_uuid}", color="red"
)
# If persistence is enabled, attempt to restore the stored state using the provided id.
if "id" in inputs and self._persistence is not None:
restore_uuid = inputs["id"]
stored_state = self._persistence.load_state(restore_uuid)
if stored_state:
self._log_flow_event(
f"Loading flow state from memory for UUID: {restore_uuid}",
color="yellow",
)
self._restore_state(stored_state)
else:
self._log_flow_event(
f"No flow state found for UUID: {restore_uuid}", color="red"
)
# Apply any additional inputs after restoration
# Update state with any additional inputs (ignoring the 'id' key)
filtered_inputs = {k: v for k, v in inputs.items() if k != "id"}
if filtered_inputs:
self._initialize_state(filtered_inputs)
# Start flow execution
# Emit FlowStartedEvent and log the start of the flow.
crewai_event_bus.emit(
self,
FlowStartedEvent(
@@ -760,27 +782,18 @@ class Flow(Generic[T], metaclass=FlowMeta):
f"Flow started with ID: {self.flow_id}", color="bold_magenta"
)
if inputs is not None and "id" not in inputs:
self._initialize_state(inputs)
async def run_flow():
return await self.kickoff_async()
return asyncio.run(run_flow())
@init_flow_main_trace
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
if not self._start_methods:
raise ValueError("No start method defined")
# Execute all start methods concurrently.
tasks = [
self._execute_start_method(start_method)
for start_method in self._start_methods
]
await asyncio.gather(*tasks)
final_output = self._method_outputs[-1] if self._method_outputs else None
# Emit FlowFinishedEvent after all processing is complete.
crewai_event_bus.emit(
self,
FlowFinishedEvent(

View File

@@ -21,8 +21,12 @@ from typing import (
from dotenv import load_dotenv
from pydantic import BaseModel
logger = logging.getLogger(__name__)
from crewai.utilities.events.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
)
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
with warnings.catch_warnings():
@@ -135,9 +139,6 @@ def suppress_warnings():
class LLM:
# Constants for model identification
MISTRAL_IDENTIFIERS = {'mistral', 'mixtral'}
def __init__(
self,
model: str,
@@ -264,6 +265,15 @@ class LLM:
>>> print(response)
"The capital of France is Paris."
"""
crewai_event_bus.emit(
self,
event=LLMCallStartedEvent(
messages=messages,
tools=tools,
callbacks=callbacks,
available_functions=available_functions,
),
)
# Validate parameters before proceeding with the call.
self._validate_call_params()
@@ -338,12 +348,13 @@ class LLM:
# --- 4) If no tool calls, return the text response
if not tool_calls or not available_functions:
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL)
return text_response
# --- 5) Handle the tool call
tool_call = tool_calls[0]
function_name = tool_call.function.name
print("function_name", function_name)
if function_name in available_functions:
try:
function_args = json.loads(tool_call.function.arguments)
@@ -355,6 +366,7 @@ class LLM:
try:
# Call the actual tool function
result = fn(**function_args)
self._handle_emit_call_events(result, LLMCallType.TOOL_CALL)
return result
except Exception as e:
@@ -370,6 +382,12 @@ class LLM:
error=str(e),
),
)
crewai_event_bus.emit(
self,
event=LLMCallFailedEvent(
error=f"Tool execution error: {str(e)}"
),
)
return text_response
else:
@@ -379,12 +397,28 @@ class LLM:
return text_response
except Exception as e:
crewai_event_bus.emit(
self,
event=LLMCallFailedEvent(error=str(e)),
)
if not LLMContextLengthExceededException(
str(e)
)._is_context_limit_error(str(e)):
logging.error(f"LiteLLM call failed: {str(e)}")
raise
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType):
"""Handle the events for the LLM call.
Args:
response (str): The response from the LLM call.
call_type (str): The type of call, either "tool_call" or "llm_call".
"""
crewai_event_bus.emit(
self,
event=LLMCallCompletedEvent(response=response, call_type=call_type),
)
def _format_messages_for_provider(
self, messages: List[Dict[str, str]]
) -> List[Dict[str, str]]:
@@ -397,11 +431,9 @@ class LLM:
Returns:
List of formatted messages according to provider requirements.
For Anthropic models, ensures first message has 'user' role.
For Mistral models, converts 'assistant' roles to 'user' roles.
Raises:
TypeError: If messages is None or contains invalid message format.
Exception: If message formatting fails for any provider-specific reason.
"""
if messages is None:
raise TypeError("Messages cannot be None")
@@ -413,19 +445,6 @@ class LLM:
"Invalid message format. Each message must be a dict with 'role' and 'content' keys"
)
# Handle Mistral role requirements
if any(identifier in self.model.lower() for identifier in self.MISTRAL_IDENTIFIERS):
try:
from copy import deepcopy
messages_copy = deepcopy(messages)
for message in messages_copy:
if message.get("role") == "assistant":
message["role"] = "user"
return messages_copy
except Exception as e:
logger.error(f"Error formatting messages for Mistral: {str(e)}")
raise
if not self.is_anthropic:
return messages

View File

@@ -34,6 +34,7 @@ from .tool_usage_events import (
ToolUsageEvent,
ToolValidateInputErrorEvent,
)
from .llm_events import LLMCallCompletedEvent, LLMCallFailedEvent, LLMCallStartedEvent
# events
from .event_listener import EventListener

View File

@@ -4,6 +4,11 @@ from crewai.telemetry.telemetry import Telemetry
from crewai.utilities import Logger
from crewai.utilities.constants import EMITTER_COLOR
from crewai.utilities.events.base_event_listener import BaseEventListener
from crewai.utilities.events.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
)
from .agent_events import AgentExecutionCompletedEvent, AgentExecutionStartedEvent
from .crew_events import (
@@ -253,5 +258,28 @@ class EventListener(BaseEventListener):
#
)
# ----------- LLM EVENTS -----------
@crewai_event_bus.on(LLMCallStartedEvent)
def on_llm_call_started(source, event: LLMCallStartedEvent):
self.logger.log(
f"🤖 LLM Call Started",
event.timestamp,
)
@crewai_event_bus.on(LLMCallCompletedEvent)
def on_llm_call_completed(source, event: LLMCallCompletedEvent):
self.logger.log(
f"✅ LLM Call Completed",
event.timestamp,
)
@crewai_event_bus.on(LLMCallFailedEvent)
def on_llm_call_failed(source, event: LLMCallFailedEvent):
self.logger.log(
f"❌ LLM Call Failed: '{event.error}'",
event.timestamp,
)
event_listener = EventListener()

View File

@@ -0,0 +1,36 @@
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from crewai.utilities.events.base_events import CrewEvent
class LLMCallType(Enum):
"""Type of LLM call being made"""
TOOL_CALL = "tool_call"
LLM_CALL = "llm_call"
class LLMCallStartedEvent(CrewEvent):
"""Event emitted when a LLM call starts"""
type: str = "llm_call_started"
messages: Union[str, List[Dict[str, str]]]
tools: Optional[List[dict]] = None
callbacks: Optional[List[Any]] = None
available_functions: Optional[Dict[str, Any]] = None
class LLMCallCompletedEvent(CrewEvent):
"""Event emitted when a LLM call completes"""
type: str = "llm_call_completed"
response: Any
call_type: LLMCallType
class LLMCallFailedEvent(CrewEvent):
"""Event emitted when a LLM call fails"""
error: str
type: str = "llm_call_failed"

View File

@@ -1,4 +1,4 @@
from typing import Any, Optional
from typing import Optional
from crewai.tasks.task_output import TaskOutput
from crewai.utilities.events.base_events import CrewEvent

View File

@@ -1,76 +0,0 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Use the dummy tool with param
''test''"}], "model": "mistral-large-latest", "stop": [], "tools": [{"type":
"function", "function": {"name": "dummy_tool", "description": "A simple test
tool.", "parameters": {"type": "object", "properties": {"param": {"type": "string",
"description": "A test parameter"}}, "required": ["param"]}}}]}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '372'
content-type:
- application/json
host:
- api.mistral.ai
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- x64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- Linux
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.7
method: POST
uri: https://api.mistral.ai/v1/chat/completions
response:
content: "{\n \"message\":\"Unauthorized\",\n \"request_id\":\"96ca5615d43f134988d0fc4b1ded1455\"\n}"
headers:
CF-RAY:
- 9158bb5adad376f1-SEA
Connection:
- keep-alive
Content-Length:
- '81'
Content-Type:
- application/json; charset=utf-8
Date:
- Fri, 21 Feb 2025 18:17:12 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=MGDKyTo6P8HCsRCn9L6BcLQuWlHhR_Oyx0OAG2lNook-1740161832-1.0.1.1-4TQjjEAQkY4UdlzBET20v1w7G87AU38G8amFRICHPql3I0aHI5pV3Bez0qKp6f3cBT351xkaHyInoOA6FeoJqQ;
path=/; expires=Fri, 21-Feb-25 18:47:12 GMT; domain=.mistral.ai; HttpOnly;
Secure; SameSite=None
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
www-authenticate:
- Key
x-kong-request-id:
- 96ca5615d43f134988d0fc4b1ded1455
x-kong-response-latency:
- '0'
http_version: HTTP/1.1
status_code: 401
version: 1

View File

@@ -13,84 +13,6 @@ from crewai.utilities.token_counter_callback import TokenCalcHandler
# TODO: This test fails without print statement, which makes me think that something is happening asynchronously that we need to eventually fix and dive deeper into at a later date
@pytest.mark.vcr(filter_headers=["authorization"])
@pytest.mark.mistral
class TestMistralLLM:
"""Test suite for Mistral LLM functionality."""
@pytest.fixture
def mistral_llm(self):
"""Fixture providing a Mistral LLM instance."""
return LLM(model="mistral/mistral-large-latest")
def test_mistral_role_handling(self, mistral_llm):
"""
Verify that roles are handled correctly in various scenarios:
- Assistant roles are converted to user roles
- Original messages remain unchanged
- System messages are preserved
"""
messages = [
{"role": "system", "content": "System message"},
{"role": "user", "content": "Test message"},
{"role": "assistant", "content": "Assistant response"}
]
formatted_messages = mistral_llm._format_messages_for_provider(messages)
# Verify role conversions
assert any(msg["role"] == "user" for msg in formatted_messages if msg["content"] == "Assistant response")
assert not any(msg["role"] == "assistant" for msg in formatted_messages)
assert any(msg["role"] == "system" for msg in formatted_messages)
# Original messages should not be modified
assert any(msg["role"] == "assistant" for msg in messages)
def test_mistral_empty_messages(self, mistral_llm):
"""Test handling of empty message list."""
messages = []
formatted_messages = mistral_llm._format_messages_for_provider(messages)
assert formatted_messages == []
def test_mistral_multiple_assistant_messages(self, mistral_llm):
"""Test handling of multiple consecutive assistant messages."""
messages = [
{"role": "user", "content": "User 1"},
{"role": "assistant", "content": "Assistant 1"},
{"role": "assistant", "content": "Assistant 2"},
{"role": "user", "content": "User 2"}
]
formatted_messages = mistral_llm._format_messages_for_provider(messages)
# All assistant messages should be converted to user
assert all(msg["role"] == "user" for msg in formatted_messages
if msg["content"] in ["Assistant 1", "Assistant 2"])
# Original messages should not be modified
assert len([msg for msg in messages if msg["role"] == "assistant"]) == 2
def test_mistral_role_handling():
"""Test that Mistral LLM correctly handles role requirements."""
llm = LLM(model="mistral/mistral-large-latest")
messages = [
{"role": "system", "content": "System message"},
{"role": "user", "content": "User message"},
{"role": "assistant", "content": "Assistant message"}
]
# Get the formatted messages
formatted_messages = llm._format_messages_for_provider(messages)
# Verify that assistant role was changed to user for Mistral
assert any(msg["role"] == "user" for msg in formatted_messages if msg["content"] == "Assistant message")
assert not any(msg["role"] == "assistant" for msg in formatted_messages)
# Original messages should not be modified
assert any(msg["role"] == "assistant" for msg in messages)
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_callback_replacement():
llm1 = LLM(model="gpt-4o-mini")

View File

@@ -0,0 +1,103 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Hello, how are you?"}], "model":
"gpt-4o-mini", "stop": []}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '102'
content-type:
- application/json
cookie:
- _cfuvid=IY8ppO70AMHr2skDSUsGh71zqHHdCQCZ3OvkPi26NBc-1740424913267-0.0.1.1-604800000;
__cf_bm=fU6K5KZoDmgcEuF8_yWAYKUO5fKHh6q5.wDPnna393g-1740424913-1.0.1.1-2iOaq3JVGWs439V0HxJee0IC9HdJm7dPkeJorD.AGw0YwkngRPM8rrTzn_7ht1BkbOauEezj.wPKcBz18gIYUg
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-B4YLA2SrC2rwdVQ3U87G5a0P5lsLw\",\n \"object\":
\"chat.completion\",\n \"created\": 1740425016,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello! I'm just a computer program, so
I don't have feelings, but I'm here and ready to help you. How can I assist
you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
13,\n \"completion_tokens\": 30,\n \"total_tokens\": 43,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_709714d124\"\n}\n"
headers:
CF-RAY:
- 9171d4c0ed44236e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 24 Feb 2025 19:23:38 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '1954'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999978'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_ea2703502b8827e4297cd2a7bae9d9c8
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -0,0 +1,108 @@
interactions:
- request:
body: '{"messages": [{"role": "user", "content": "Hello, how are you?"}], "model":
"gpt-4o-mini", "stop": []}'
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
connection:
- keep-alive
content-length:
- '102'
content-type:
- application/json
cookie:
- _cfuvid=GefCcEtb_Gem93E4a9Hvt3Xyof1YQZVJAXBb9I6pEUs-1739398417375-0.0.1.1-604800000
host:
- api.openai.com
user-agent:
- OpenAI/Python 1.61.0
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 1.61.0
x-stainless-raw-response:
- 'true'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.12.8
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
content: "{\n \"id\": \"chatcmpl-B4YJU8IWKGyBQtAyPDRd3SFI2flYR\",\n \"object\":
\"chat.completion\",\n \"created\": 1740424912,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
\"assistant\",\n \"content\": \"Hello! I'm just a computer program, so
I don't have feelings, but I'm here and ready to help you. How can I assist
you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
13,\n \"completion_tokens\": 30,\n \"total_tokens\": 43,\n \"prompt_tokens_details\":
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
\"default\",\n \"system_fingerprint\": \"fp_7fcd609668\"\n}\n"
headers:
CF-RAY:
- 9171d230d8ed7ae0-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 24 Feb 2025 19:21:53 GMT
Server:
- cloudflare
Set-Cookie:
- __cf_bm=fU6K5KZoDmgcEuF8_yWAYKUO5fKHh6q5.wDPnna393g-1740424913-1.0.1.1-2iOaq3JVGWs439V0HxJee0IC9HdJm7dPkeJorD.AGw0YwkngRPM8rrTzn_7ht1BkbOauEezj.wPKcBz18gIYUg;
path=/; expires=Mon, 24-Feb-25 19:51:53 GMT; domain=.api.openai.com; HttpOnly;
Secure; SameSite=None
- _cfuvid=IY8ppO70AMHr2skDSUsGh71zqHHdCQCZ3OvkPi26NBc-1740424913267-0.0.1.1-604800000;
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
Transfer-Encoding:
- chunked
X-Content-Type-Options:
- nosniff
access-control-expose-headers:
- X-Request-ID
alt-svc:
- h3=":443"; ma=86400
cf-cache-status:
- DYNAMIC
openai-organization:
- crewai-iuxna1
openai-processing-ms:
- '993'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
x-ratelimit-limit-requests:
- '30000'
x-ratelimit-limit-tokens:
- '150000000'
x-ratelimit-remaining-requests:
- '29999'
x-ratelimit-remaining-tokens:
- '149999978'
x-ratelimit-reset-requests:
- 2ms
x-ratelimit-reset-tokens:
- 0s
x-request-id:
- req_d9c4d49185e97b1797061efc1e55d811
http_version: HTTP/1.1
status_code: 200
version: 1

View File

@@ -1,6 +1,5 @@
import json
from datetime import datetime
from unittest.mock import MagicMock, patch
from unittest.mock import patch
import pytest
from pydantic import Field
@@ -9,6 +8,7 @@ from crewai.agent import Agent
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.crew import Crew
from crewai.flow.flow import Flow, listen, start
from crewai.llm import LLM
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.tools.tool_usage import ToolUsage
@@ -31,6 +31,12 @@ from crewai.utilities.events.flow_events import (
MethodExecutionFailedEvent,
MethodExecutionStartedEvent,
)
from crewai.utilities.events.llm_events import (
LLMCallCompletedEvent,
LLMCallFailedEvent,
LLMCallStartedEvent,
LLMCallType,
)
from crewai.utilities.events.task_events import (
TaskCompletedEvent,
TaskFailedEvent,
@@ -495,3 +501,43 @@ def test_flow_emits_method_execution_failed_event():
assert received_events[0].flow_name == "TestFlow"
assert received_events[0].type == "method_execution_failed"
assert received_events[0].error == error
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_call_started_event():
received_events = []
@crewai_event_bus.on(LLMCallStartedEvent)
def handle_llm_call_started(source, event):
received_events.append(event)
@crewai_event_bus.on(LLMCallCompletedEvent)
def handle_llm_call_completed(source, event):
received_events.append(event)
llm = LLM(model="gpt-4o-mini")
llm.call("Hello, how are you?")
assert len(received_events) == 2
assert received_events[0].type == "llm_call_started"
assert received_events[1].type == "llm_call_completed"
@pytest.mark.vcr(filter_headers=["authorization"])
def test_llm_emits_call_failed_event():
received_events = []
@crewai_event_bus.on(LLMCallFailedEvent)
def handle_llm_call_failed(source, event):
received_events.append(event)
error_message = "Simulated LLM call failure"
with patch.object(LLM, "_call_llm", side_effect=Exception(error_message)):
llm = LLM(model="gpt-4o-mini")
with pytest.raises(Exception) as exc_info:
llm.call("Hello, how are you?")
assert str(exc_info.value) == error_message
assert len(received_events) == 1
assert received_events[0].type == "llm_call_failed"
assert received_events[0].error == error_message