mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-09 08:08:32 +00:00
Merge branch 'main' into revert-90f1bee
This commit is contained in:
@@ -1276,11 +1276,11 @@ class Crew(BaseModel):
|
|||||||
def _reset_all_memories(self) -> None:
|
def _reset_all_memories(self) -> None:
|
||||||
"""Reset all available memory systems."""
|
"""Reset all available memory systems."""
|
||||||
memory_systems = [
|
memory_systems = [
|
||||||
("short term", self._short_term_memory),
|
("short term", getattr(self, "_short_term_memory", None)),
|
||||||
("entity", self._entity_memory),
|
("entity", getattr(self, "_entity_memory", None)),
|
||||||
("long term", self._long_term_memory),
|
("long term", getattr(self, "_long_term_memory", None)),
|
||||||
("task output", self._task_output_handler),
|
("task output", getattr(self, "_task_output_handler", None)),
|
||||||
("knowledge", self.knowledge),
|
("knowledge", getattr(self, "knowledge", None)),
|
||||||
]
|
]
|
||||||
|
|
||||||
for name, system in memory_systems:
|
for name, system in memory_systems:
|
||||||
|
|||||||
@@ -10,6 +10,12 @@ from typing import Any, Dict, List, Literal, Optional, Type, Union, cast
|
|||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
from crewai.utilities.events.llm_events import (
|
||||||
|
LLMCallCompletedEvent,
|
||||||
|
LLMCallFailedEvent,
|
||||||
|
LLMCallStartedEvent,
|
||||||
|
LLMCallType,
|
||||||
|
)
|
||||||
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
|
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
|
||||||
|
|
||||||
with warnings.catch_warnings():
|
with warnings.catch_warnings():
|
||||||
@@ -239,6 +245,15 @@ class LLM:
|
|||||||
>>> print(response)
|
>>> print(response)
|
||||||
"The capital of France is Paris."
|
"The capital of France is Paris."
|
||||||
"""
|
"""
|
||||||
|
crewai_event_bus.emit(
|
||||||
|
self,
|
||||||
|
event=LLMCallStartedEvent(
|
||||||
|
messages=messages,
|
||||||
|
tools=tools,
|
||||||
|
callbacks=callbacks,
|
||||||
|
available_functions=available_functions,
|
||||||
|
),
|
||||||
|
)
|
||||||
# Validate parameters before proceeding with the call.
|
# Validate parameters before proceeding with the call.
|
||||||
self._validate_call_params()
|
self._validate_call_params()
|
||||||
|
|
||||||
@@ -313,12 +328,13 @@ class LLM:
|
|||||||
|
|
||||||
# --- 4) If no tool calls, return the text response
|
# --- 4) If no tool calls, return the text response
|
||||||
if not tool_calls or not available_functions:
|
if not tool_calls or not available_functions:
|
||||||
|
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL)
|
||||||
return text_response
|
return text_response
|
||||||
|
|
||||||
# --- 5) Handle the tool call
|
# --- 5) Handle the tool call
|
||||||
tool_call = tool_calls[0]
|
tool_call = tool_calls[0]
|
||||||
function_name = tool_call.function.name
|
function_name = tool_call.function.name
|
||||||
print("function_name", function_name)
|
|
||||||
if function_name in available_functions:
|
if function_name in available_functions:
|
||||||
try:
|
try:
|
||||||
function_args = json.loads(tool_call.function.arguments)
|
function_args = json.loads(tool_call.function.arguments)
|
||||||
@@ -330,6 +346,7 @@ class LLM:
|
|||||||
try:
|
try:
|
||||||
# Call the actual tool function
|
# Call the actual tool function
|
||||||
result = fn(**function_args)
|
result = fn(**function_args)
|
||||||
|
self._handle_emit_call_events(result, LLMCallType.TOOL_CALL)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@@ -345,6 +362,12 @@ class LLM:
|
|||||||
error=str(e),
|
error=str(e),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
crewai_event_bus.emit(
|
||||||
|
self,
|
||||||
|
event=LLMCallFailedEvent(
|
||||||
|
error=f"Tool execution error: {str(e)}"
|
||||||
|
),
|
||||||
|
)
|
||||||
return text_response
|
return text_response
|
||||||
|
|
||||||
else:
|
else:
|
||||||
@@ -354,12 +377,28 @@ class LLM:
|
|||||||
return text_response
|
return text_response
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
crewai_event_bus.emit(
|
||||||
|
self,
|
||||||
|
event=LLMCallFailedEvent(error=str(e)),
|
||||||
|
)
|
||||||
if not LLMContextLengthExceededException(
|
if not LLMContextLengthExceededException(
|
||||||
str(e)
|
str(e)
|
||||||
)._is_context_limit_error(str(e)):
|
)._is_context_limit_error(str(e)):
|
||||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType):
|
||||||
|
"""Handle the events for the LLM call.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
response (str): The response from the LLM call.
|
||||||
|
call_type (str): The type of call, either "tool_call" or "llm_call".
|
||||||
|
"""
|
||||||
|
crewai_event_bus.emit(
|
||||||
|
self,
|
||||||
|
event=LLMCallCompletedEvent(response=response, call_type=call_type),
|
||||||
|
)
|
||||||
|
|
||||||
def _format_messages_for_provider(
|
def _format_messages_for_provider(
|
||||||
self, messages: List[Dict[str, str]]
|
self, messages: List[Dict[str, str]]
|
||||||
) -> List[Dict[str, str]]:
|
) -> List[Dict[str, str]]:
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ from .tool_usage_events import (
|
|||||||
ToolUsageEvent,
|
ToolUsageEvent,
|
||||||
ToolValidateInputErrorEvent,
|
ToolValidateInputErrorEvent,
|
||||||
)
|
)
|
||||||
|
from .llm_events import LLMCallCompletedEvent, LLMCallFailedEvent, LLMCallStartedEvent
|
||||||
|
|
||||||
# events
|
# events
|
||||||
from .event_listener import EventListener
|
from .event_listener import EventListener
|
||||||
|
|||||||
@@ -4,6 +4,11 @@ from crewai.telemetry.telemetry import Telemetry
|
|||||||
from crewai.utilities import Logger
|
from crewai.utilities import Logger
|
||||||
from crewai.utilities.constants import EMITTER_COLOR
|
from crewai.utilities.constants import EMITTER_COLOR
|
||||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||||
|
from crewai.utilities.events.llm_events import (
|
||||||
|
LLMCallCompletedEvent,
|
||||||
|
LLMCallFailedEvent,
|
||||||
|
LLMCallStartedEvent,
|
||||||
|
)
|
||||||
|
|
||||||
from .agent_events import AgentExecutionCompletedEvent, AgentExecutionStartedEvent
|
from .agent_events import AgentExecutionCompletedEvent, AgentExecutionStartedEvent
|
||||||
from .crew_events import (
|
from .crew_events import (
|
||||||
@@ -253,5 +258,28 @@ class EventListener(BaseEventListener):
|
|||||||
#
|
#
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# ----------- LLM EVENTS -----------
|
||||||
|
|
||||||
|
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||||
|
def on_llm_call_started(source, event: LLMCallStartedEvent):
|
||||||
|
self.logger.log(
|
||||||
|
f"🤖 LLM Call Started",
|
||||||
|
event.timestamp,
|
||||||
|
)
|
||||||
|
|
||||||
|
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||||
|
def on_llm_call_completed(source, event: LLMCallCompletedEvent):
|
||||||
|
self.logger.log(
|
||||||
|
f"✅ LLM Call Completed",
|
||||||
|
event.timestamp,
|
||||||
|
)
|
||||||
|
|
||||||
|
@crewai_event_bus.on(LLMCallFailedEvent)
|
||||||
|
def on_llm_call_failed(source, event: LLMCallFailedEvent):
|
||||||
|
self.logger.log(
|
||||||
|
f"❌ LLM Call Failed: '{event.error}'",
|
||||||
|
event.timestamp,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
event_listener = EventListener()
|
event_listener = EventListener()
|
||||||
|
|||||||
36
src/crewai/utilities/events/llm_events.py
Normal file
36
src/crewai/utilities/events/llm_events.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
from enum import Enum
|
||||||
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
|
from crewai.utilities.events.base_events import CrewEvent
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCallType(Enum):
|
||||||
|
"""Type of LLM call being made"""
|
||||||
|
|
||||||
|
TOOL_CALL = "tool_call"
|
||||||
|
LLM_CALL = "llm_call"
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCallStartedEvent(CrewEvent):
|
||||||
|
"""Event emitted when a LLM call starts"""
|
||||||
|
|
||||||
|
type: str = "llm_call_started"
|
||||||
|
messages: Union[str, List[Dict[str, str]]]
|
||||||
|
tools: Optional[List[dict]] = None
|
||||||
|
callbacks: Optional[List[Any]] = None
|
||||||
|
available_functions: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCallCompletedEvent(CrewEvent):
|
||||||
|
"""Event emitted when a LLM call completes"""
|
||||||
|
|
||||||
|
type: str = "llm_call_completed"
|
||||||
|
response: Any
|
||||||
|
call_type: LLMCallType
|
||||||
|
|
||||||
|
|
||||||
|
class LLMCallFailedEvent(CrewEvent):
|
||||||
|
"""Event emitted when a LLM call fails"""
|
||||||
|
|
||||||
|
error: str
|
||||||
|
type: str = "llm_call_failed"
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
from typing import Any, Optional
|
from typing import Optional
|
||||||
|
|
||||||
from crewai.tasks.task_output import TaskOutput
|
from crewai.tasks.task_output import TaskOutput
|
||||||
from crewai.utilities.events.base_events import CrewEvent
|
from crewai.utilities.events.base_events import CrewEvent
|
||||||
|
|||||||
103
tests/utilities/cassettes/test_llm_emits_call_failed_event.yaml
Normal file
103
tests/utilities/cassettes/test_llm_emits_call_failed_event.yaml
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "user", "content": "Hello, how are you?"}], "model":
|
||||||
|
"gpt-4o-mini", "stop": []}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '102'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=IY8ppO70AMHr2skDSUsGh71zqHHdCQCZ3OvkPi26NBc-1740424913267-0.0.1.1-604800000;
|
||||||
|
__cf_bm=fU6K5KZoDmgcEuF8_yWAYKUO5fKHh6q5.wDPnna393g-1740424913-1.0.1.1-2iOaq3JVGWs439V0HxJee0IC9HdJm7dPkeJorD.AGw0YwkngRPM8rrTzn_7ht1BkbOauEezj.wPKcBz18gIYUg
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-B4YLA2SrC2rwdVQ3U87G5a0P5lsLw\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1740425016,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Hello! I'm just a computer program, so
|
||||||
|
I don't have feelings, but I'm here and ready to help you. How can I assist
|
||||||
|
you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
13,\n \"completion_tokens\": 30,\n \"total_tokens\": 43,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_709714d124\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- 9171d4c0ed44236e-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Mon, 24 Feb 2025 19:23:38 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '1954'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999978'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_ea2703502b8827e4297cd2a7bae9d9c8
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
version: 1
|
||||||
108
tests/utilities/cassettes/test_llm_emits_call_started_event.yaml
Normal file
108
tests/utilities/cassettes/test_llm_emits_call_started_event.yaml
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"messages": [{"role": "user", "content": "Hello, how are you?"}], "model":
|
||||||
|
"gpt-4o-mini", "stop": []}'
|
||||||
|
headers:
|
||||||
|
accept:
|
||||||
|
- application/json
|
||||||
|
accept-encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
connection:
|
||||||
|
- keep-alive
|
||||||
|
content-length:
|
||||||
|
- '102'
|
||||||
|
content-type:
|
||||||
|
- application/json
|
||||||
|
cookie:
|
||||||
|
- _cfuvid=GefCcEtb_Gem93E4a9Hvt3Xyof1YQZVJAXBb9I6pEUs-1739398417375-0.0.1.1-604800000
|
||||||
|
host:
|
||||||
|
- api.openai.com
|
||||||
|
user-agent:
|
||||||
|
- OpenAI/Python 1.61.0
|
||||||
|
x-stainless-arch:
|
||||||
|
- arm64
|
||||||
|
x-stainless-async:
|
||||||
|
- 'false'
|
||||||
|
x-stainless-lang:
|
||||||
|
- python
|
||||||
|
x-stainless-os:
|
||||||
|
- MacOS
|
||||||
|
x-stainless-package-version:
|
||||||
|
- 1.61.0
|
||||||
|
x-stainless-raw-response:
|
||||||
|
- 'true'
|
||||||
|
x-stainless-retry-count:
|
||||||
|
- '0'
|
||||||
|
x-stainless-runtime:
|
||||||
|
- CPython
|
||||||
|
x-stainless-runtime-version:
|
||||||
|
- 3.12.8
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
content: "{\n \"id\": \"chatcmpl-B4YJU8IWKGyBQtAyPDRd3SFI2flYR\",\n \"object\":
|
||||||
|
\"chat.completion\",\n \"created\": 1740424912,\n \"model\": \"gpt-4o-mini-2024-07-18\",\n
|
||||||
|
\ \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\":
|
||||||
|
\"assistant\",\n \"content\": \"Hello! I'm just a computer program, so
|
||||||
|
I don't have feelings, but I'm here and ready to help you. How can I assist
|
||||||
|
you today?\",\n \"refusal\": null\n },\n \"logprobs\": null,\n
|
||||||
|
\ \"finish_reason\": \"stop\"\n }\n ],\n \"usage\": {\n \"prompt_tokens\":
|
||||||
|
13,\n \"completion_tokens\": 30,\n \"total_tokens\": 43,\n \"prompt_tokens_details\":
|
||||||
|
{\n \"cached_tokens\": 0,\n \"audio_tokens\": 0\n },\n \"completion_tokens_details\":
|
||||||
|
{\n \"reasoning_tokens\": 0,\n \"audio_tokens\": 0,\n \"accepted_prediction_tokens\":
|
||||||
|
0,\n \"rejected_prediction_tokens\": 0\n }\n },\n \"service_tier\":
|
||||||
|
\"default\",\n \"system_fingerprint\": \"fp_7fcd609668\"\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-RAY:
|
||||||
|
- 9171d230d8ed7ae0-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Mon, 24 Feb 2025 19:21:53 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
Set-Cookie:
|
||||||
|
- __cf_bm=fU6K5KZoDmgcEuF8_yWAYKUO5fKHh6q5.wDPnna393g-1740424913-1.0.1.1-2iOaq3JVGWs439V0HxJee0IC9HdJm7dPkeJorD.AGw0YwkngRPM8rrTzn_7ht1BkbOauEezj.wPKcBz18gIYUg;
|
||||||
|
path=/; expires=Mon, 24-Feb-25 19:51:53 GMT; domain=.api.openai.com; HttpOnly;
|
||||||
|
Secure; SameSite=None
|
||||||
|
- _cfuvid=IY8ppO70AMHr2skDSUsGh71zqHHdCQCZ3OvkPi26NBc-1740424913267-0.0.1.1-604800000;
|
||||||
|
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||||
|
Transfer-Encoding:
|
||||||
|
- chunked
|
||||||
|
X-Content-Type-Options:
|
||||||
|
- nosniff
|
||||||
|
access-control-expose-headers:
|
||||||
|
- X-Request-ID
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
cf-cache-status:
|
||||||
|
- DYNAMIC
|
||||||
|
openai-organization:
|
||||||
|
- crewai-iuxna1
|
||||||
|
openai-processing-ms:
|
||||||
|
- '993'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=31536000; includeSubDomains; preload
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '30000'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '150000000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '29999'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '149999978'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 2ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 0s
|
||||||
|
x-request-id:
|
||||||
|
- req_d9c4d49185e97b1797061efc1e55d811
|
||||||
|
http_version: HTTP/1.1
|
||||||
|
status_code: 200
|
||||||
|
version: 1
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
import json
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from unittest.mock import MagicMock, patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
@@ -9,6 +8,7 @@ from crewai.agent import Agent
|
|||||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||||
from crewai.crew import Crew
|
from crewai.crew import Crew
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
|
from crewai.llm import LLM
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.tools.base_tool import BaseTool
|
from crewai.tools.base_tool import BaseTool
|
||||||
from crewai.tools.tool_usage import ToolUsage
|
from crewai.tools.tool_usage import ToolUsage
|
||||||
@@ -31,6 +31,12 @@ from crewai.utilities.events.flow_events import (
|
|||||||
MethodExecutionFailedEvent,
|
MethodExecutionFailedEvent,
|
||||||
MethodExecutionStartedEvent,
|
MethodExecutionStartedEvent,
|
||||||
)
|
)
|
||||||
|
from crewai.utilities.events.llm_events import (
|
||||||
|
LLMCallCompletedEvent,
|
||||||
|
LLMCallFailedEvent,
|
||||||
|
LLMCallStartedEvent,
|
||||||
|
LLMCallType,
|
||||||
|
)
|
||||||
from crewai.utilities.events.task_events import (
|
from crewai.utilities.events.task_events import (
|
||||||
TaskCompletedEvent,
|
TaskCompletedEvent,
|
||||||
TaskFailedEvent,
|
TaskFailedEvent,
|
||||||
@@ -495,3 +501,43 @@ def test_flow_emits_method_execution_failed_event():
|
|||||||
assert received_events[0].flow_name == "TestFlow"
|
assert received_events[0].flow_name == "TestFlow"
|
||||||
assert received_events[0].type == "method_execution_failed"
|
assert received_events[0].type == "method_execution_failed"
|
||||||
assert received_events[0].error == error
|
assert received_events[0].error == error
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_llm_emits_call_started_event():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||||
|
def handle_llm_call_started(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||||
|
def handle_llm_call_completed(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
llm = LLM(model="gpt-4o-mini")
|
||||||
|
llm.call("Hello, how are you?")
|
||||||
|
|
||||||
|
assert len(received_events) == 2
|
||||||
|
assert received_events[0].type == "llm_call_started"
|
||||||
|
assert received_events[1].type == "llm_call_completed"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||||
|
def test_llm_emits_call_failed_event():
|
||||||
|
received_events = []
|
||||||
|
|
||||||
|
@crewai_event_bus.on(LLMCallFailedEvent)
|
||||||
|
def handle_llm_call_failed(source, event):
|
||||||
|
received_events.append(event)
|
||||||
|
|
||||||
|
error_message = "Simulated LLM call failure"
|
||||||
|
with patch.object(LLM, "_call_llm", side_effect=Exception(error_message)):
|
||||||
|
llm = LLM(model="gpt-4o-mini")
|
||||||
|
with pytest.raises(Exception) as exc_info:
|
||||||
|
llm.call("Hello, how are you?")
|
||||||
|
|
||||||
|
assert str(exc_info.value) == error_message
|
||||||
|
assert len(received_events) == 1
|
||||||
|
assert received_events[0].type == "llm_call_failed"
|
||||||
|
assert received_events[0].error == error_message
|
||||||
|
|||||||
Reference in New Issue
Block a user