mirror of
https://github.com/crewAIInc/crewAI.git
synced 2026-01-10 16:48:30 +00:00
Implement comprehensive streaming support for CrewAI
- Add streaming events: CrewStreamChunkEvent, TaskStreamChunkEvent, AgentStreamChunkEvent - Extend Crew.kickoff() with stream parameter and callback support - Propagate streaming through task and agent execution chains - Integrate with existing LLM streaming infrastructure - Add comprehensive tests and examples - Maintain backward compatibility Fixes #2950 Co-Authored-By: João <joao@crewai.com>
This commit is contained in:
36
examples/streaming_example.py
Normal file
36
examples/streaming_example.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.llm import LLM
|
||||
|
||||
def stream_callback(chunk, agent_role, task_description, step_type):
|
||||
"""Callback function to handle streaming chunks."""
|
||||
print(f"[{agent_role}] {step_type}: {chunk}", end="", flush=True)
|
||||
|
||||
llm = LLM(model="gpt-4o-mini", stream=True)
|
||||
|
||||
agent = Agent(
|
||||
role="Content Writer",
|
||||
goal="Write engaging content",
|
||||
backstory="You are an experienced content writer who creates compelling narratives.",
|
||||
llm=llm,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Write a short story about a robot learning to paint",
|
||||
expected_output="A creative short story of 2-3 paragraphs",
|
||||
agent=agent
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=False
|
||||
)
|
||||
|
||||
print("Starting crew execution with streaming...")
|
||||
result = crew.kickoff(
|
||||
stream=True,
|
||||
stream_callback=stream_callback
|
||||
)
|
||||
|
||||
print(f"\n\nFinal result:\n{result}")
|
||||
51
examples/streaming_multi_agent_example.py
Normal file
51
examples/streaming_multi_agent_example.py
Normal file
@@ -0,0 +1,51 @@
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai.llm import LLM
|
||||
|
||||
def stream_callback(chunk, agent_role, task_description, step_type):
|
||||
"""Callback function to handle streaming chunks from multiple agents."""
|
||||
print(f"[{agent_role}] {step_type}: {chunk}", end="", flush=True)
|
||||
|
||||
llm = LLM(model="gpt-4o-mini", stream=True)
|
||||
|
||||
researcher = Agent(
|
||||
role="Research Analyst",
|
||||
goal="Research and analyze topics thoroughly",
|
||||
backstory="You are an experienced research analyst who excels at gathering and analyzing information.",
|
||||
llm=llm,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
writer = Agent(
|
||||
role="Content Writer",
|
||||
goal="Write engaging content based on research",
|
||||
backstory="You are a skilled content writer who creates compelling narratives from research data.",
|
||||
llm=llm,
|
||||
verbose=False
|
||||
)
|
||||
|
||||
research_task = Task(
|
||||
description="Research the latest trends in artificial intelligence and machine learning",
|
||||
expected_output="A comprehensive research summary of AI/ML trends",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
writing_task = Task(
|
||||
description="Write an engaging blog post about AI trends based on the research",
|
||||
expected_output="A well-written blog post about AI trends",
|
||||
agent=writer,
|
||||
context=[research_task]
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[researcher, writer],
|
||||
tasks=[research_task, writing_task],
|
||||
verbose=False
|
||||
)
|
||||
|
||||
print("Starting multi-agent crew execution with streaming...")
|
||||
result = crew.kickoff(
|
||||
stream=True,
|
||||
stream_callback=stream_callback
|
||||
)
|
||||
|
||||
print(f"\n\nFinal result:\n{result}")
|
||||
Reference in New Issue
Block a user