mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-28 01:28:14 +00:00
Fix #3715: Remove unwanted LLM stream chunk printing to stdout
- Removed print() statement in event_listener.py that was printing all LLM streaming chunks to stdout - The print() on line 386 was causing all text chunks from LLM responses to be displayed in stdout - Added test to verify stream chunks are emitted as events but not printed to stdout - Streaming chunks should only be handled by event handlers, not printed directly Fixes #3715 Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
@@ -383,7 +383,6 @@ class EventListener(BaseEventListener):
|
|||||||
|
|
||||||
# Read from the in-memory stream
|
# Read from the in-memory stream
|
||||||
content = self.text_stream.read()
|
content = self.text_stream.read()
|
||||||
print(content, end="", flush=True)
|
|
||||||
self.next_chunk = self.text_stream.tell()
|
self.next_chunk = self.text_stream.tell()
|
||||||
|
|
||||||
# ----------- LLM GUARDRAIL EVENTS -----------
|
# ----------- LLM GUARDRAIL EVENTS -----------
|
||||||
|
|||||||
@@ -991,3 +991,39 @@ def test_llm_emits_event_with_lite_agent():
|
|||||||
|
|
||||||
assert set(all_agent_roles) == {agent.role}
|
assert set(all_agent_roles) == {agent.role}
|
||||||
assert set(all_agent_id) == {agent.id}
|
assert set(all_agent_id) == {agent.id}
|
||||||
|
|
||||||
|
def test_llm_stream_chunks_do_not_print_to_stdout(capsys):
|
||||||
|
"""Test that LLM streaming chunks are not printed to stdout.
|
||||||
|
|
||||||
|
This test verifies the fix for issue #3715 where LLM stream chunks
|
||||||
|
were being printed directly to stdout via print() statement.
|
||||||
|
"""
|
||||||
|
received_chunks = []
|
||||||
|
|
||||||
|
with crewai_event_bus.scoped_handlers():
|
||||||
|
|
||||||
|
@crewai_event_bus.on(LLMStreamChunkEvent)
|
||||||
|
def handle_stream_chunk(source, event):
|
||||||
|
received_chunks.append(event.chunk)
|
||||||
|
|
||||||
|
# Manually emit stream chunk events to simulate streaming
|
||||||
|
llm = LLM(model="gpt-4o", stream=True)
|
||||||
|
|
||||||
|
test_chunks = ["Hello", " ", "world", "!"]
|
||||||
|
for chunk in test_chunks:
|
||||||
|
crewai_event_bus.emit(llm, event=LLMStreamChunkEvent(chunk=chunk))
|
||||||
|
|
||||||
|
# Capture stdout
|
||||||
|
captured = capsys.readouterr()
|
||||||
|
|
||||||
|
# Verify that we received chunks
|
||||||
|
assert len(received_chunks) == len(test_chunks), "Should receive all streaming chunks"
|
||||||
|
assert received_chunks == test_chunks, "Chunks should match what was emitted"
|
||||||
|
|
||||||
|
# Verify that chunks were NOT printed to stdout
|
||||||
|
# The bug was that all chunks were being printed via print()
|
||||||
|
for chunk in test_chunks:
|
||||||
|
assert chunk not in captured.out, (
|
||||||
|
f"Chunk '{chunk}' should not be printed to stdout. "
|
||||||
|
"This indicates the bug in event_listener.py is not fixed."
|
||||||
|
)
|
||||||
|
|||||||
Reference in New Issue
Block a user