Implement comprehensive streaming support for CrewAI

- Add streaming events: CrewStreamChunkEvent, TaskStreamChunkEvent, AgentStreamChunkEvent
- Extend Crew.kickoff() with stream parameter and callback support
- Propagate streaming through task and agent execution chains
- Integrate with existing LLM streaming infrastructure
- Add comprehensive tests and examples
- Maintain backward compatibility

Fixes #2950

Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
Devin AI
2025-06-04 07:00:54 +00:00
parent 2bd6b72aae
commit b3b2b1e25f
14 changed files with 1225 additions and 6 deletions

View File

@@ -0,0 +1,36 @@
from crewai import Agent, Task, Crew
from crewai.llm import LLM
def stream_callback(chunk, agent_role, task_description, step_type):
"""Callback function to handle streaming chunks."""
print(f"[{agent_role}] {step_type}: {chunk}", end="", flush=True)
llm = LLM(model="gpt-4o-mini", stream=True)
agent = Agent(
role="Content Writer",
goal="Write engaging content",
backstory="You are an experienced content writer who creates compelling narratives.",
llm=llm,
verbose=False
)
task = Task(
description="Write a short story about a robot learning to paint",
expected_output="A creative short story of 2-3 paragraphs",
agent=agent
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=False
)
print("Starting crew execution with streaming...")
result = crew.kickoff(
stream=True,
stream_callback=stream_callback
)
print(f"\n\nFinal result:\n{result}")

View File

@@ -0,0 +1,51 @@
from crewai import Agent, Task, Crew
from crewai.llm import LLM
def stream_callback(chunk, agent_role, task_description, step_type):
"""Callback function to handle streaming chunks from multiple agents."""
print(f"[{agent_role}] {step_type}: {chunk}", end="", flush=True)
llm = LLM(model="gpt-4o-mini", stream=True)
researcher = Agent(
role="Research Analyst",
goal="Research and analyze topics thoroughly",
backstory="You are an experienced research analyst who excels at gathering and analyzing information.",
llm=llm,
verbose=False
)
writer = Agent(
role="Content Writer",
goal="Write engaging content based on research",
backstory="You are a skilled content writer who creates compelling narratives from research data.",
llm=llm,
verbose=False
)
research_task = Task(
description="Research the latest trends in artificial intelligence and machine learning",
expected_output="A comprehensive research summary of AI/ML trends",
agent=researcher
)
writing_task = Task(
description="Write an engaging blog post about AI trends based on the research",
expected_output="A well-written blog post about AI trends",
agent=writer,
context=[research_task]
)
crew = Crew(
agents=[researcher, writer],
tasks=[research_task, writing_task],
verbose=False
)
print("Starting multi-agent crew execution with streaming...")
result = crew.kickoff(
stream=True,
stream_callback=stream_callback
)
print(f"\n\nFinal result:\n{result}")