mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-08 15:48:29 +00:00
feat: support to filter LLM Events from Lite Agent
This commit is contained in:
@@ -775,6 +775,7 @@ class Agent(BaseAgent):
|
||||
LiteAgentOutput: The result of the agent execution.
|
||||
"""
|
||||
lite_agent = LiteAgent(
|
||||
id=self.id,
|
||||
role=self.role,
|
||||
goal=self.goal,
|
||||
backstory=self.backstory,
|
||||
|
||||
@@ -15,12 +15,14 @@ from typing import (
|
||||
get_origin,
|
||||
)
|
||||
|
||||
|
||||
try:
|
||||
from typing import Self
|
||||
except ImportError:
|
||||
from typing_extensions import Self
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
Field,
|
||||
InstanceOf,
|
||||
@@ -129,6 +131,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
model_config = {"arbitrary_types_allowed": True}
|
||||
|
||||
# Core Agent Properties
|
||||
id: UUID4 = Field(default_factory=uuid.uuid4, frozen=True)
|
||||
role: str = Field(description="Role of the agent")
|
||||
goal: str = Field(description="Goal of the agent")
|
||||
backstory: str = Field(description="Backstory of the agent")
|
||||
@@ -517,6 +520,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
messages=self._messages,
|
||||
tools=None,
|
||||
callbacks=self._callbacks,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -526,6 +530,7 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
messages=self._messages,
|
||||
callbacks=self._callbacks,
|
||||
printer=self._printer,
|
||||
from_agent=self,
|
||||
)
|
||||
|
||||
# Emit LLM call completed event
|
||||
@@ -534,13 +539,14 @@ class LiteAgent(FlowTrackable, BaseModel):
|
||||
event=LLMCallCompletedEvent(
|
||||
response=answer,
|
||||
call_type=LLMCallType.LLM_CALL,
|
||||
from_agent=self,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
# Emit LLM call failed event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e)),
|
||||
event=LLMCallFailedEvent(error=str(e), from_agent=self),
|
||||
)
|
||||
raise e
|
||||
|
||||
|
||||
@@ -420,6 +420,7 @@ class LLM(BaseLLM):
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> str:
|
||||
"""Handle a streaming response from the LLM.
|
||||
|
||||
@@ -427,6 +428,8 @@ class LLM(BaseLLM):
|
||||
params: Parameters for the completion call
|
||||
callbacks: Optional list of callback functions
|
||||
available_functions: Dict of available functions
|
||||
from_task: Optional task object
|
||||
from_agent: Optional agent object
|
||||
|
||||
Returns:
|
||||
str: The complete response text
|
||||
@@ -512,6 +515,7 @@ class LLM(BaseLLM):
|
||||
accumulated_tool_args=accumulated_tool_args,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
if result is not None:
|
||||
chunk_content = result
|
||||
@@ -529,7 +533,7 @@ class LLM(BaseLLM):
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMStreamChunkEvent(chunk=chunk_content, from_task=from_task),
|
||||
event=LLMStreamChunkEvent(chunk=chunk_content, from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
# --- 4) Fallback to non-streaming if no content received
|
||||
if not full_response.strip() and chunk_count == 0:
|
||||
@@ -542,7 +546,7 @@ class LLM(BaseLLM):
|
||||
"stream_options", None
|
||||
) # Remove stream_options for non-streaming call
|
||||
return self._handle_non_streaming_response(
|
||||
non_streaming_params, callbacks, available_functions, from_task
|
||||
non_streaming_params, callbacks, available_functions, from_task, from_agent
|
||||
)
|
||||
|
||||
# --- 5) Handle empty response with chunks
|
||||
@@ -627,7 +631,7 @@ class LLM(BaseLLM):
|
||||
# Log token usage if available in streaming mode
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
# Emit completion event and return response
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task)
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return full_response
|
||||
|
||||
# --- 9) Handle tool calls if present
|
||||
@@ -639,7 +643,7 @@ class LLM(BaseLLM):
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
|
||||
# --- 11) Emit completion event and return response
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task)
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return full_response
|
||||
|
||||
except ContextWindowExceededError as e:
|
||||
@@ -651,14 +655,14 @@ class LLM(BaseLLM):
|
||||
logging.error(f"Error in streaming response: {str(e)}")
|
||||
if full_response.strip():
|
||||
logging.warning(f"Returning partial response despite error: {str(e)}")
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task)
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return full_response
|
||||
|
||||
# Emit failed event and re-raise the exception
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e), from_task=from_task),
|
||||
event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
raise Exception(f"Failed to get streaming response: {str(e)}")
|
||||
|
||||
@@ -668,6 +672,7 @@ class LLM(BaseLLM):
|
||||
accumulated_tool_args: DefaultDict[int, AccumulatedToolArgs],
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> None | str:
|
||||
for tool_call in tool_calls:
|
||||
current_tool_accumulator = accumulated_tool_args[tool_call.index]
|
||||
@@ -686,6 +691,7 @@ class LLM(BaseLLM):
|
||||
tool_call=tool_call.to_dict(),
|
||||
chunk=tool_call.function.arguments,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -753,6 +759,7 @@ class LLM(BaseLLM):
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> str:
|
||||
"""Handle a non-streaming response from the LLM.
|
||||
|
||||
@@ -761,6 +768,7 @@ class LLM(BaseLLM):
|
||||
callbacks: Optional list of callback functions
|
||||
available_functions: Dict of available functions
|
||||
from_task: Optional Task that invoked the LLM
|
||||
from_agent: Optional Agent that invoked the LLM
|
||||
|
||||
Returns:
|
||||
str: The response text
|
||||
@@ -801,7 +809,7 @@ class LLM(BaseLLM):
|
||||
|
||||
# --- 5) If no tool calls or no available functions, return the text response directly
|
||||
if not tool_calls or not available_functions:
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task)
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return text_response
|
||||
|
||||
# --- 6) Handle tool calls if present
|
||||
@@ -810,7 +818,7 @@ class LLM(BaseLLM):
|
||||
return tool_result
|
||||
|
||||
# --- 7) If tool call handling didn't return a result, emit completion event and return text response
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task)
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL, from_task, from_agent)
|
||||
return text_response
|
||||
|
||||
def _handle_tool_call(
|
||||
@@ -896,6 +904,7 @@ class LLM(BaseLLM):
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> Union[str, Any]:
|
||||
"""High-level LLM call method.
|
||||
|
||||
@@ -911,6 +920,7 @@ class LLM(BaseLLM):
|
||||
available_functions: Optional dict mapping function names to callables
|
||||
that can be invoked by the LLM.
|
||||
from_task: Optional Task that invoked the LLM
|
||||
from_agent: Optional Agent that invoked the LLM
|
||||
|
||||
Returns:
|
||||
Union[str, Any]: Either a text response from the LLM (str) or
|
||||
@@ -931,6 +941,7 @@ class LLM(BaseLLM):
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
),
|
||||
)
|
||||
|
||||
@@ -959,11 +970,11 @@ class LLM(BaseLLM):
|
||||
# --- 7) Make the completion call and handle response
|
||||
if self.stream:
|
||||
return self._handle_streaming_response(
|
||||
params, callbacks, available_functions, from_task
|
||||
params, callbacks, available_functions, from_task, from_agent
|
||||
)
|
||||
else:
|
||||
return self._handle_non_streaming_response(
|
||||
params, callbacks, available_functions, from_task
|
||||
params, callbacks, available_functions, from_task, from_agent
|
||||
)
|
||||
|
||||
except LLMContextLengthExceededException:
|
||||
@@ -975,12 +986,12 @@ class LLM(BaseLLM):
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e), from_task=from_task),
|
||||
event=LLMCallFailedEvent(error=str(e), from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||
raise
|
||||
|
||||
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None):
|
||||
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType, from_task: Optional[Any] = None, from_agent: Optional[Any] = None):
|
||||
"""Handle the events for the LLM call.
|
||||
|
||||
Args:
|
||||
@@ -990,7 +1001,7 @@ class LLM(BaseLLM):
|
||||
assert hasattr(crewai_event_bus, "emit")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallCompletedEvent(response=response, call_type=call_type, from_task=from_task),
|
||||
event=LLMCallCompletedEvent(response=response, call_type=call_type, from_task=from_task, from_agent=from_agent),
|
||||
)
|
||||
|
||||
def _format_messages_for_provider(
|
||||
|
||||
@@ -48,6 +48,7 @@ class BaseLLM(ABC):
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> Union[str, Any]:
|
||||
"""Call the LLM with the given messages.
|
||||
|
||||
|
||||
3
src/crewai/llms/third_party/ai_suite.py
vendored
3
src/crewai/llms/third_party/ai_suite.py
vendored
@@ -16,7 +16,8 @@ class AISuiteLLM(BaseLLM):
|
||||
tools: Optional[List[dict]] = None,
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
from_task: Optional[Task] = None,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> Union[str, Any]:
|
||||
completion_params = self._prepare_completion_params(messages, tools)
|
||||
response = self.client.chat.completions.create(**completion_params)
|
||||
|
||||
@@ -146,6 +146,7 @@ def get_llm_response(
|
||||
callbacks: List[Any],
|
||||
printer: Printer,
|
||||
from_task: Optional[Any] = None,
|
||||
from_agent: Optional[Any] = None,
|
||||
) -> str:
|
||||
"""Call the LLM and return the response, handling any invalid responses."""
|
||||
try:
|
||||
@@ -153,6 +154,7 @@ def get_llm_response(
|
||||
messages,
|
||||
callbacks=callbacks,
|
||||
from_task=from_task,
|
||||
from_agent=from_agent,
|
||||
)
|
||||
except Exception as e:
|
||||
printer.print(
|
||||
|
||||
@@ -18,9 +18,14 @@ class LLMEventBase(BaseEvent):
|
||||
self._set_task_params(data)
|
||||
|
||||
def _set_agent_params(self, data: Dict[str, Any]):
|
||||
if "from_task" in data and (agent := getattr(data["from_task"], "agent", None)):
|
||||
self.agent_id = agent.id
|
||||
self.agent_role = agent.role
|
||||
task = data.get("from_task", None)
|
||||
agent = task.agent if task else data.get("from_agent", None)
|
||||
|
||||
if not agent:
|
||||
return
|
||||
|
||||
self.agent_id = agent.id
|
||||
self.agent_role = agent.role
|
||||
|
||||
def _set_task_params(self, data: Dict[str, Any]):
|
||||
if "from_task" in data and (task := data["from_task"]):
|
||||
|
||||
@@ -31,6 +31,7 @@ class CustomLLM(BaseLLM):
|
||||
callbacks=None,
|
||||
available_functions=None,
|
||||
from_task=None,
|
||||
from_agent=None,
|
||||
):
|
||||
"""
|
||||
Mock LLM call that returns a predefined response.
|
||||
|
||||
@@ -0,0 +1,171 @@
|
||||
interactions:
|
||||
- request:
|
||||
body: '{"messages": [{"role": "system", "content": "You are Speaker. You are a
|
||||
helpful assistant that just says hi\nYour personal goal is: Just say hi\n\nTo
|
||||
give my best complete final answer to the task respond using the exact following
|
||||
format:\n\nThought: I now can give a great answer\nFinal Answer: Your final
|
||||
answer must be the great and the most complete as possible, it must be outcome
|
||||
described.\n\nI MUST use these formats, my job depends on it!"}, {"role": "user",
|
||||
"content": "say hi!"}], "model": "gpt-4o-mini", "stop": ["\nObservation:"],
|
||||
"stream": true, "stream_options": {"include_usage": true}}'
|
||||
headers:
|
||||
accept:
|
||||
- application/json
|
||||
accept-encoding:
|
||||
- gzip, deflate, zstd
|
||||
connection:
|
||||
- keep-alive
|
||||
content-length:
|
||||
- '602'
|
||||
content-type:
|
||||
- application/json
|
||||
host:
|
||||
- api.openai.com
|
||||
user-agent:
|
||||
- OpenAI/Python 1.78.0
|
||||
x-stainless-arch:
|
||||
- arm64
|
||||
x-stainless-async:
|
||||
- 'false'
|
||||
x-stainless-lang:
|
||||
- python
|
||||
x-stainless-os:
|
||||
- MacOS
|
||||
x-stainless-package-version:
|
||||
- 1.78.0
|
||||
x-stainless-raw-response:
|
||||
- 'true'
|
||||
x-stainless-read-timeout:
|
||||
- '600.0'
|
||||
x-stainless-retry-count:
|
||||
- '0'
|
||||
x-stainless-runtime:
|
||||
- CPython
|
||||
x-stainless-runtime-version:
|
||||
- 3.11.12
|
||||
method: POST
|
||||
uri: https://api.openai.com/v1/chat/completions
|
||||
response:
|
||||
body:
|
||||
string: 'data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"role":"assistant","content":"","refusal":null},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"Thought"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
I"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
now"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
can"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
give"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
a"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
great"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
answer"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":" \n"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"Final"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
Answer"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":":"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"
|
||||
Hi"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{"content":"!"},"logprobs":null,"finish_reason":null}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}],"usage":null}
|
||||
|
||||
|
||||
data: {"id":"chatcmpl-BoGFzpBc0nuAKcVrYlEEztNwzrUG6","object":"chat.completion.chunk","created":1751318591,"model":"gpt-4o-mini-2024-07-18","service_tier":"default","system_fingerprint":"fp_34a54ae93c","choices":[],"usage":{"prompt_tokens":99,"completion_tokens":15,"total_tokens":114,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"completion_tokens_details":{"reasoning_tokens":0,"audio_tokens":0,"accepted_prediction_tokens":0,"rejected_prediction_tokens":0}}}
|
||||
|
||||
|
||||
data: [DONE]
|
||||
|
||||
|
||||
'
|
||||
headers:
|
||||
CF-RAY:
|
||||
- 9580b92adce5e838-GRU
|
||||
Connection:
|
||||
- keep-alive
|
||||
Content-Type:
|
||||
- text/event-stream; charset=utf-8
|
||||
Date:
|
||||
- Mon, 30 Jun 2025 21:23:12 GMT
|
||||
Server:
|
||||
- cloudflare
|
||||
Set-Cookie:
|
||||
- __cf_bm=nhFmL5HNobQWdbf2Sd9Z8X9ad5zXKG7Ln7MlzuiuwP8-1751318592-1.0.1.1-5qDyF6nVC5d8PDerEmHSOgyWEYdzMdgyFRXqgiJB3FSyWWnvzL4PyVp6LGx9z0P5iTX8PNbxfUOEOYX.7bFaK6p.CyxLaXK7WpnQ3zeasG8;
|
||||
path=/; expires=Mon, 30-Jun-25 21:53:12 GMT; domain=.api.openai.com; HttpOnly;
|
||||
Secure; SameSite=None
|
||||
- _cfuvid=APKo781sOKEk.HlN5nFBT1Mkid8Lj04kw6JPleI78bU-1751318592001-0.0.1.1-604800000;
|
||||
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
||||
Transfer-Encoding:
|
||||
- chunked
|
||||
X-Content-Type-Options:
|
||||
- nosniff
|
||||
access-control-expose-headers:
|
||||
- X-Request-ID
|
||||
alt-svc:
|
||||
- h3=":443"; ma=86400
|
||||
cf-cache-status:
|
||||
- DYNAMIC
|
||||
openai-organization:
|
||||
- crewai-iuxna1
|
||||
openai-processing-ms:
|
||||
- '321'
|
||||
openai-version:
|
||||
- '2020-10-01'
|
||||
strict-transport-security:
|
||||
- max-age=31536000; includeSubDomains; preload
|
||||
x-envoy-upstream-service-time:
|
||||
- '326'
|
||||
x-ratelimit-limit-requests:
|
||||
- '30000'
|
||||
x-ratelimit-limit-tokens:
|
||||
- '150000000'
|
||||
x-ratelimit-remaining-requests:
|
||||
- '29999'
|
||||
x-ratelimit-remaining-tokens:
|
||||
- '149999896'
|
||||
x-ratelimit-reset-requests:
|
||||
- 2ms
|
||||
x-ratelimit-reset-tokens:
|
||||
- 0s
|
||||
x-request-id:
|
||||
- req_0b0f668953604810c182b1e83e9709fe
|
||||
status:
|
||||
code: 200
|
||||
message: OK
|
||||
version: 1
|
||||
@@ -903,3 +903,56 @@ def test_llm_emits_event_with_task_and_agent_info(base_agent, base_task):
|
||||
assert set(all_agent_id) == {base_agent.id}
|
||||
assert set(all_task_id) == {base_task.id}
|
||||
assert set(all_task_name) == {base_task.name}
|
||||
|
||||
@pytest.mark.vcr(filter_headers=["authorization"])
|
||||
def test_llm_emits_event_with_lite_agent():
|
||||
completed_event = []
|
||||
failed_event = []
|
||||
started_event = []
|
||||
stream_event = []
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(LLMCallFailedEvent)
|
||||
def handle_llm_failed(source, event):
|
||||
failed_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallStartedEvent)
|
||||
def handle_llm_started(source, event):
|
||||
started_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMCallCompletedEvent)
|
||||
def handle_llm_completed(source, event):
|
||||
completed_event.append(event)
|
||||
|
||||
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||
def handle_llm_stream_chunk(source, event):
|
||||
stream_event.append(event)
|
||||
|
||||
agent = Agent(
|
||||
role="Speaker",
|
||||
llm=LLM(model="gpt-4o-mini", stream=True),
|
||||
goal="Just say hi",
|
||||
backstory="You are a helpful assistant that just says hi",
|
||||
)
|
||||
agent.kickoff(messages=[{"role": "user", "content": "say hi!"}])
|
||||
|
||||
|
||||
assert len(completed_event) == 2
|
||||
assert len(failed_event) == 0
|
||||
assert len(started_event) == 2
|
||||
assert len(stream_event) == 15
|
||||
|
||||
all_events = completed_event + failed_event + started_event + stream_event
|
||||
all_agent_roles = [event.agent_role for event in all_events]
|
||||
all_agent_id = [event.agent_id for event in all_events]
|
||||
all_task_id = [event.task_id for event in all_events if event.task_id]
|
||||
all_task_name = [event.task_name for event in all_events if event.task_name]
|
||||
|
||||
# ensure all events have the agent + task props set
|
||||
assert len(all_agent_roles) == 19
|
||||
assert len(all_agent_id) == 19
|
||||
assert len(all_task_id) == 0
|
||||
assert len(all_task_name) == 0
|
||||
|
||||
assert set(all_agent_roles) == {agent.role}
|
||||
assert set(all_agent_id) == {agent.id}
|
||||
|
||||
Reference in New Issue
Block a user