diff --git a/docs/concepts/crews.mdx b/docs/concepts/crews.mdx
index 97d1a7d6c..8ebefff2f 100644
--- a/docs/concepts/crews.mdx
+++ b/docs/concepts/crews.mdx
@@ -117,6 +117,12 @@ class YourCrewName:
)
```
+How to run the above code:
+
+```python code
+YourCrewName().crew().kickoff(inputs={"any": "input here"})
+```
+
Tasks will be executed in the order they are defined.
@@ -184,6 +190,11 @@ class YourCrewName:
verbose=True
)
```
+How to run the above code:
+
+```python code
+YourCrewName().crew().kickoff(inputs={})
+```
In this example:
diff --git a/docs/concepts/llms.mdx b/docs/concepts/llms.mdx
index 249a2c7e5..cef763146 100644
--- a/docs/concepts/llms.mdx
+++ b/docs/concepts/llms.mdx
@@ -677,18 +677,24 @@ CrewAI supports streaming responses from LLMs, allowing your application to rece
CrewAI emits events for each chunk received during streaming:
```python
- from crewai import LLM
- from crewai.utilities.events import EventHandler, LLMStreamChunkEvent
+ from crewai.utilities.events import (
+ LLMStreamChunkEvent
+ )
+ from crewai.utilities.events.base_event_listener import BaseEventListener
- class MyEventHandler(EventHandler):
- def on_llm_stream_chunk(self, event: LLMStreamChunkEvent):
- # Process each chunk as it arrives
- print(f"Received chunk: {event.chunk}")
+ class MyCustomListener(BaseEventListener):
+ def setup_listeners(self, crewai_event_bus):
+ @crewai_event_bus.on(LLMStreamChunkEvent)
+ def on_llm_stream_chunk(self, event: LLMStreamChunkEvent):
+ # Process each chunk as it arrives
+ print(f"Received chunk: {event.chunk}")
- # Register the event handler
- from crewai.utilities.events import crewai_event_bus
- crewai_event_bus.register_handler(MyEventHandler())
+ my_listener = MyCustomListener()
```
+
+
+ [Click here](https://docs.crewai.com/concepts/event-listener#event-listeners) for more details
+