diff --git a/docs/concepts/flows.mdx b/docs/concepts/flows.mdx
index 9ead26d70..324118310 100644
--- a/docs/concepts/flows.mdx
+++ b/docs/concepts/flows.mdx
@@ -18,63 +18,60 @@ Flows allow you to create structured, event-driven workflows. They provide a sea
4. **Flexible Control Flow**: Implement conditional logic, loops, and branching within your workflows.
-5. **Input Flexibility**: Flows can accept inputs to initialize or update their state, with different handling for structured and unstructured state management.
-
## Getting Started
Let's create a simple Flow where you will use OpenAI to generate a random city in one task and then use that city to generate a fun fact in another task.
-### Passing Inputs to Flows
+```python Code
-Flows can accept inputs to initialize or update their state before execution. The way inputs are handled depends on whether the flow uses structured or unstructured state management.
-
-#### Structured State Management
-
-In structured state management, the flow's state is defined using a Pydantic `BaseModel`. Inputs must match the model's schema, and any updates will overwrite the default values.
-
-```python
from crewai.flow.flow import Flow, listen, start
-from pydantic import BaseModel
+from dotenv import load_dotenv
+from litellm import completion
-class ExampleState(BaseModel):
- counter: int = 0
- message: str = ""
-class StructuredExampleFlow(Flow[ExampleState]):
+class ExampleFlow(Flow):
+ model = "gpt-4o-mini"
+
@start()
- def first_method(self):
- # Implementation
+ def generate_city(self):
+ print("Starting flow")
-flow = StructuredExampleFlow()
-flow.kickoff(inputs={"counter": 10})
-```
+ response = completion(
+ model=self.model,
+ messages=[
+ {
+ "role": "user",
+ "content": "Return the name of a random city in the world.",
+ },
+ ],
+ )
-In this example, the `counter` is initialized to `10`, while `message` retains its default value.
+ random_city = response["choices"][0]["message"]["content"]
+ print(f"Random City: {random_city}")
-#### Unstructured State Management
+ return random_city
-In unstructured state management, the flow's state is a dictionary. You can pass any dictionary to update the state.
+ @listen(generate_city)
+ def generate_fun_fact(self, random_city):
+ response = completion(
+ model=self.model,
+ messages=[
+ {
+ "role": "user",
+ "content": f"Tell me a fun fact about {random_city}",
+ },
+ ],
+ )
-```python
-from crewai.flow.flow import Flow, listen, start
+ fun_fact = response["choices"][0]["message"]["content"]
+ return fun_fact
-class UnstructuredExampleFlow(Flow):
- @start()
- def first_method(self):
- # Implementation
-flow = UnstructuredExampleFlow()
-flow.kickoff(inputs={"counter": 5, "message": "Initial message"})
-```
-Here, both `counter` and `message` are updated based on the provided inputs.
+flow = ExampleFlow()
+result = flow.kickoff()
-**Note:** Ensure that inputs for structured state management adhere to the defined schema to avoid validation errors.
-
-### Example Flow
-
-```python
-# Existing example code
+print(f"Generated fun fact: {result}")
```
In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task.
@@ -97,14 +94,14 @@ The `@listen()` decorator can be used in several ways:
1. **Listening to a Method by Name**: You can pass the name of the method you want to listen to as a string. When that method completes, the listener method will be triggered.
- ```python
+ ```python Code
@listen("generate_city")
def generate_fun_fact(self, random_city):
# Implementation
```
2. **Listening to a Method Directly**: You can pass the method itself. When that method completes, the listener method will be triggered.
- ```python
+ ```python Code
@listen(generate_city)
def generate_fun_fact(self, random_city):
# Implementation
@@ -121,7 +118,7 @@ When you run a Flow, the final output is determined by the last method that comp
Here's how you can access the final output:
-```python
+```python Code
from crewai.flow.flow import Flow, listen, start
class OutputExampleFlow(Flow):
@@ -133,17 +130,18 @@ class OutputExampleFlow(Flow):
def second_method(self, first_output):
return f"Second method received: {first_output}"
+
flow = OutputExampleFlow()
final_output = flow.kickoff()
print("---- Final Output ----")
print(final_output)
-```
+````
-```text
+``` text Output
---- Final Output ----
Second method received: Output from first_method
-```
+````
@@ -158,7 +156,7 @@ Here's an example of how to update and access the state:
-```python
+```python Code
from crewai.flow.flow import Flow, listen, start
from pydantic import BaseModel
@@ -186,7 +184,7 @@ print("Final State:")
print(flow.state)
```
-```text
+```text Output
Final Output: Hello from first_method - updated by second_method
Final State:
counter=2 message='Hello from first_method - updated by second_method'
@@ -210,10 +208,10 @@ allowing developers to choose the approach that best fits their application's ne
In unstructured state management, all state is stored in the `state` attribute of the `Flow` class.
This approach offers flexibility, enabling developers to add or modify state attributes on the fly without defining a strict schema.
-```python
+```python Code
from crewai.flow.flow import Flow, listen, start
-class UnstructuredExampleFlow(Flow):
+class UntructuredExampleFlow(Flow):
@start()
def first_method(self):
@@ -232,7 +230,8 @@ class UnstructuredExampleFlow(Flow):
print(f"State after third_method: {self.state}")
-flow = UnstructuredExampleFlow()
+
+flow = UntructuredExampleFlow()
flow.kickoff()
```
@@ -246,14 +245,16 @@ flow.kickoff()
Structured state management leverages predefined schemas to ensure consistency and type safety across the workflow.
By using models like Pydantic's `BaseModel`, developers can define the exact shape of the state, enabling better validation and auto-completion in development environments.
-```python
+```python Code
from crewai.flow.flow import Flow, listen, start
from pydantic import BaseModel
+
class ExampleState(BaseModel):
counter: int = 0
message: str = ""
+
class StructuredExampleFlow(Flow[ExampleState]):
@start()
@@ -272,6 +273,7 @@ class StructuredExampleFlow(Flow[ExampleState]):
print(f"State after third_method: {self.state}")
+
flow = StructuredExampleFlow()
flow.kickoff()
```
@@ -305,7 +307,7 @@ The `or_` function in Flows allows you to listen to multiple methods and trigger
-```python
+```python Code
from crewai.flow.flow import Flow, listen, or_, start
class OrExampleFlow(Flow):
@@ -322,11 +324,13 @@ class OrExampleFlow(Flow):
def logger(self, result):
print(f"Logger: {result}")
+
+
flow = OrExampleFlow()
flow.kickoff()
```
-```text
+```text Output
Logger: Hello from the start method
Logger: Hello from the second method
```
@@ -342,7 +346,7 @@ The `and_` function in Flows allows you to listen to multiple methods and trigge
-```python
+```python Code
from crewai.flow.flow import Flow, and_, listen, start
class AndExampleFlow(Flow):
@@ -364,7 +368,7 @@ flow = AndExampleFlow()
flow.kickoff()
```
-```text
+```text Output
---- Logger ----
{'greeting': 'Hello from the start method', 'joke': 'What do computers eat? Microchips.'}
```
@@ -381,7 +385,7 @@ You can specify different routes based on the output of the method, allowing you
-```python
+```python Code
import random
from crewai.flow.flow import Flow, listen, router, start
from pydantic import BaseModel
@@ -412,11 +416,12 @@ class RouterFlow(Flow[ExampleState]):
def fourth_method(self):
print("Fourth method running")
+
flow = RouterFlow()
flow.kickoff()
```
-```text
+```text Output
Starting the structured flow
Third method running
Fourth method running
@@ -479,7 +484,7 @@ The `main.py` file is where you create your flow and connect the crews together.
Here's an example of how you can connect the `poem_crew` in the `main.py` file:
-```python
+```python Code
#!/usr/bin/env python
from random import randint
@@ -555,42 +560,6 @@ uv run kickoff
The flow will execute, and you should see the output in the console.
-
-### Adding Additional Crews Using the CLI
-
-Once you have created your initial flow, you can easily add additional crews to your project using the CLI. This allows you to expand your flow's capabilities by integrating new crews without starting from scratch.
-
-To add a new crew to your existing flow, use the following command:
-
-```bash
-crewai flow add-crew
-```
-
-This command will create a new directory for your crew within the `crews` folder of your flow project. It will include the necessary configuration files and a crew definition file, similar to the initial setup.
-
-#### Folder Structure
-
-After adding a new crew, your folder structure will look like this:
-
-| Directory/File | Description |
-| :--------------------- | :----------------------------------------------------------------- |
-| `name_of_flow/` | Root directory for the flow. |
-| ├── `crews/` | Contains directories for specific crews. |
-| │ ├── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts. |
-| │ │ ├── `config/` | Configuration files directory for the "poem_crew". |
-| │ │ │ ├── `agents.yaml` | YAML file defining the agents for "poem_crew". |
-| │ │ │ └── `tasks.yaml` | YAML file defining the tasks for "poem_crew". |
-| │ │ └── `poem_crew.py` | Script for "poem_crew" functionality. |
-| └── `name_of_crew/` | Directory for the new crew. |
-| ├── `config/` | Configuration files directory for the new crew. |
-| │ ├── `agents.yaml` | YAML file defining the agents for the new crew. |
-| │ └── `tasks.yaml` | YAML file defining the tasks for the new crew. |
-| └── `name_of_crew.py` | Script for the new crew functionality. |
-
-You can then customize the `agents.yaml` and `tasks.yaml` files to define the agents and tasks for your new crew. The `name_of_crew.py` file will contain the crew's logic, which you can modify to suit your needs.
-
-By using the CLI to add additional crews, you can efficiently build complex AI workflows that leverage multiple crews working together.
-
## Plot Flows
Visualizing your AI workflows can provide valuable insights into the structure and execution paths of your flows. CrewAI offers a powerful visualization tool that allows you to generate interactive plots of your flows, making it easier to understand and optimize your AI workflows.
@@ -607,7 +576,7 @@ CrewAI provides two convenient methods to generate plots of your flows:
If you are working directly with a flow instance, you can generate a plot by calling the `plot()` method on your flow object. This method will create an HTML file containing the interactive plot of your flow.
-```python
+```python Code
# Assuming you have a flow instance
flow.plot("my_flow_plot")
```
@@ -630,114 +599,13 @@ The generated plot will display nodes representing the tasks in your flow, with
By visualizing your flows, you can gain a clearer understanding of the workflow's structure, making it easier to debug, optimize, and communicate your AI processes to others.
+### Conclusion
-## Advanced
-
-In this section, we explore more complex use cases of CrewAI Flows, starting with a self-evaluation loop. This pattern is crucial for developing AI systems that can iteratively improve their outputs through feedback.
-
-### 1) Self-Evaluation Loop
-
-The self-evaluation loop is a powerful pattern that allows AI workflows to automatically assess and refine their outputs. This example demonstrates how to set up a flow that generates content, evaluates it, and iterates based on feedback until the desired quality is achieved.
-
-#### Overview
-
-The self-evaluation loop involves two main Crews:
-
-1. **ShakespeareanXPostCrew**: Generates a Shakespearean-style post on a given topic.
-2. **XPostReviewCrew**: Evaluates the generated post, providing feedback on its validity and quality.
-
-The process iterates until the post meets the criteria or a maximum retry limit is reached. This approach ensures high-quality outputs through iterative refinement.
-
-#### Importance
-
-This pattern is essential for building robust AI systems that can adapt and improve over time. By automating the evaluation and feedback loop, developers can ensure that their AI workflows produce reliable and high-quality results.
-
-#### Main Code Highlights
-
-Below is the `main.py` file for the self-evaluation loop flow:
-
-```python
-from typing import Optional
-from crewai.flow.flow import Flow, listen, router, start
-from pydantic import BaseModel
-from self_evaluation_loop_flow.crews.shakespeare_crew.shakespeare_crew import (
- ShakespeareanXPostCrew,
-)
-from self_evaluation_loop_flow.crews.x_post_review_crew.x_post_review_crew import (
- XPostReviewCrew,
-)
-
-class ShakespeareXPostFlowState(BaseModel):
- x_post: str = ""
- feedback: Optional[str] = None
- valid: bool = False
- retry_count: int = 0
-
-class ShakespeareXPostFlow(Flow[ShakespeareXPostFlowState]):
-
- @start("retry")
- def generate_shakespeare_x_post(self):
- print("Generating Shakespearean X post")
- topic = "Flying cars"
- result = (
- ShakespeareanXPostCrew()
- .crew()
- .kickoff(inputs={"topic": topic, "feedback": self.state.feedback})
- )
- print("X post generated", result.raw)
- self.state.x_post = result.raw
-
- @router(generate_shakespeare_x_post)
- def evaluate_x_post(self):
- if self.state.retry_count > 3:
- return "max_retry_exceeded"
- result = XPostReviewCrew().crew().kickoff(inputs={"x_post": self.state.x_post})
- self.state.valid = result["valid"]
- self.state.feedback = result["feedback"]
- print("valid", self.state.valid)
- print("feedback", self.state.feedback)
- self.state.retry_count += 1
- if self.state.valid:
- return "complete"
- return "retry"
-
- @listen("complete")
- def save_result(self):
- print("X post is valid")
- print("X post:", self.state.x_post)
- with open("x_post.txt", "w") as file:
- file.write(self.state.x_post)
-
- @listen("max_retry_exceeded")
- def max_retry_exceeded_exit(self):
- print("Max retry count exceeded")
- print("X post:", self.state.x_post)
- print("Feedback:", self.state.feedback)
-
-def kickoff():
- shakespeare_flow = ShakespeareXPostFlow()
- shakespeare_flow.kickoff()
-
-def plot():
- shakespeare_flow = ShakespeareXPostFlow()
- shakespeare_flow.plot()
-
-if __name__ == "__main__":
- kickoff()
-```
-
-#### Code Highlights
-
-- **Retry Mechanism**: The flow uses a retry mechanism to regenerate the post if it doesn't meet the criteria, up to a maximum of three retries.
-- **Feedback Loop**: Feedback from the `XPostReviewCrew` is used to refine the post iteratively.
-- **State Management**: The flow maintains state using a Pydantic model, ensuring type safety and clarity.
-
-For a complete example and further details, please refer to the [Self Evaluation Loop Flow repository](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow).
-
+Plotting your flows is a powerful feature of CrewAI that enhances your ability to design and manage complex AI workflows. Whether you choose to use the `plot()` method or the command line, generating plots will provide you with a visual representation of your workflows, aiding in both development and presentation.
## Next Steps
-If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are five specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
+If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are four specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
1. **Email Auto Responder Flow**: This example demonstrates an infinite loop where a background job continually runs to automate email responses. It's a great use case for tasks that need to be performed repeatedly without manual intervention. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/email_auto_responder_flow)
@@ -747,8 +615,6 @@ If you're interested in exploring additional examples of flows, we have a variet
4. **Meeting Assistant Flow**: This flow demonstrates how to broadcast one event to trigger multiple follow-up actions. For instance, after a meeting is completed, the flow can update a Trello board, send a Slack message, and save the results. It's a great example of handling multiple outcomes from a single event, making it ideal for comprehensive task management and notification systems. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/meeting_assistant_flow)
-5. **Self Evaluation Loop Flow**: This flow demonstrates a self-evaluation loop where AI workflows automatically assess and refine their outputs through feedback. It involves generating content, evaluating it, and iterating until the desired quality is achieved. This pattern is crucial for developing robust AI systems that can adapt and improve over time. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow)
-
By exploring these examples, you can gain insights into how to leverage CrewAI Flows for various use cases, from automating repetitive tasks to managing complex, multi-step processes with dynamic decision-making and human feedback.
Also, check out our YouTube video on how to use flows in CrewAI below!
@@ -762,4 +628,4 @@ Also, check out our YouTube video on how to use flows in CrewAI below!
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerpolicy="strict-origin-when-cross-origin"
allowfullscreen
->
+>
\ No newline at end of file
diff --git a/docs/concepts/langchain-tools.mdx b/docs/concepts/langchain-tools.mdx
index 538581aee..68a7998a9 100644
--- a/docs/concepts/langchain-tools.mdx
+++ b/docs/concepts/langchain-tools.mdx
@@ -7,32 +7,45 @@ icon: link
## Using LangChain Tools
- CrewAI seamlessly integrates with LangChain’s comprehensive [list of tools](https://python.langchain.com/docs/integrations/tools/), all of which can be used with CrewAI.
+ CrewAI seamlessly integrates with LangChain's comprehensive [list of tools](https://python.langchain.com/docs/integrations/tools/), all of which can be used with CrewAI.
```python Code
import os
-from crewai import Agent
-from langchain.agents import Tool
-from langchain.utilities import GoogleSerperAPIWrapper
+from dotenv import load_dotenv
+from crewai import Agent, Task, Crew
+from crewai.tools import BaseTool
+from pydantic import Field
+from langchain_community.utilities import GoogleSerperAPIWrapper
-# Setup API keys
-os.environ["SERPER_API_KEY"] = "Your Key"
+# Set up your SERPER_API_KEY key in an .env file, eg:
+# SERPER_API_KEY=
+load_dotenv()
search = GoogleSerperAPIWrapper()
-# Create and assign the search tool to an agent
-serper_tool = Tool(
- name="Intermediate Answer",
- func=search.run,
- description="Useful for search-based queries",
-)
+class SearchTool(BaseTool):
+ name: str = "Search"
+ description: str = "Useful for search-based queries. Use this to find current information about markets, companies, and trends."
+ search: GoogleSerperAPIWrapper = Field(default_factory=GoogleSerperAPIWrapper)
-agent = Agent(
- role='Research Analyst',
- goal='Provide up-to-date market analysis',
- backstory='An expert analyst with a keen eye for market trends.',
- tools=[serper_tool]
+ def _run(self, query: str) -> str:
+ """Execute the search query and return results"""
+ try:
+ return self.search.run(query)
+ except Exception as e:
+ return f"Error performing search: {str(e)}"
+
+# Create Agents
+researcher = Agent(
+ role='Research Analyst',
+ goal='Gather current market data and trends',
+ backstory="""You are an expert research analyst with years of experience in
+ gathering market intelligence. You're known for your ability to find
+ relevant and up-to-date market information and present it in a clear,
+ actionable format.""",
+ tools=[SearchTool()],
+ verbose=True
)
# rest of the code ...
@@ -40,6 +53,6 @@ agent = Agent(
## Conclusion
-Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
-When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling, caching mechanisms,
-and the flexibility of tool arguments to optimize your agents' performance and capabilities.
\ No newline at end of file
+Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
+When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling, caching mechanisms,
+and the flexibility of tool arguments to optimize your agents' performance and capabilities.
diff --git a/docs/how-to/llm-connections.mdx b/docs/how-to/llm-connections.mdx
index 542a9c110..a2fc540cc 100644
--- a/docs/how-to/llm-connections.mdx
+++ b/docs/how-to/llm-connections.mdx
@@ -125,10 +125,10 @@ You can connect to OpenAI-compatible LLMs using either environment variables or
- ```python Code
- llm = LLM(
- model="custom-model-name",
- api_key="your-api-key",
+ ```python Code
+ llm = LLM(
+ model="custom-model-name",
+ api_key="your-api-key",
base_url="https://api.your-provider.com/v1"
)
agent = Agent(llm=llm, ...)
@@ -179,4 +179,4 @@ This is particularly useful when working with OpenAI-compatible APIs or when you
## Conclusion
-By leveraging LiteLLM, CrewAI offers seamless integration with a vast array of LLMs. This flexibility allows you to choose the most suitable model for your specific needs, whether you prioritize performance, cost-efficiency, or local deployment. Remember to consult the [LiteLLM documentation](https://docs.litellm.ai/docs/) for the most up-to-date information on supported models and configuration options.
\ No newline at end of file
+By leveraging LiteLLM, CrewAI offers seamless integration with a vast array of LLMs. This flexibility allows you to choose the most suitable model for your specific needs, whether you prioritize performance, cost-efficiency, or local deployment. Remember to consult the [LiteLLM documentation](https://docs.litellm.ai/docs/) for the most up-to-date information on supported models and configuration options.
diff --git a/docs/how-to/openlit-observability.mdx b/docs/how-to/openlit-observability.mdx
new file mode 100644
index 000000000..e95989e8e
--- /dev/null
+++ b/docs/how-to/openlit-observability.mdx
@@ -0,0 +1,181 @@
+---
+title: Agent Monitoring with OpenLIT
+description: Quickly start monitoring your Agents in just a single line of code with OpenTelemetry.
+icon: magnifying-glass-chart
+---
+
+# OpenLIT Overview
+
+[OpenLIT](https://github.com/openlit/openlit?src=crewai-docs) is an open-source tool that makes it simple to monitor the performance of AI agents, LLMs, VectorDBs, and GPUs with just **one** line of code.
+
+It provides OpenTelemetry-native tracing and metrics to track important parameters like cost, latency, interactions and task sequences.
+This setup enables you to track hyperparameters and monitor for performance issues, helping you find ways to enhance and fine-tune your agents over time.
+
+
+
+
+
+
+
+### Features
+
+- **Analytics Dashboard**: Monitor your Agents health and performance with detailed dashboards that track metrics, costs, and user interactions.
+- **OpenTelemetry-native Observability SDK**: Vendor-neutral SDKs to send traces and metrics to your existing observability tools like Grafana, DataDog and more.
+- **Cost Tracking for Custom and Fine-Tuned Models**: Tailor cost estimations for specific models using custom pricing files for precise budgeting.
+- **Exceptions Monitoring Dashboard**: Quickly spot and resolve issues by tracking common exceptions and errors with a monitoring dashboard.
+- **Compliance and Security**: Detect potential threats such as profanity and PII leaks.
+- **Prompt Injection Detection**: Identify potential code injection and secret leaks.
+- **API Keys and Secrets Management**: Securely handle your LLM API keys and secrets centrally, avoiding insecure practices.
+- **Prompt Management**: Manage and version Agent prompts using PromptHub for consistent and easy access across Agents.
+- **Model Playground** Test and compare different models for your CrewAI agents before deployment.
+
+## Setup Instructions
+
+
+
+
+
+ ```shell
+ git clone git@github.com:openlit/openlit.git
+ ```
+
+
+ From the root directory of the [OpenLIT Repo](https://github.com/openlit/openlit), Run the below command:
+ ```shell
+ docker compose up -d
+ ```
+
+
+
+
+ ```shell
+ pip install openlit
+ ```
+
+
+ Add the following two lines to your application code:
+
+
+ ```python
+ import openlit
+ openlit.init(otlp_endpoint="http://127.0.0.1:4318")
+ ```
+
+ Example Usage for monitoring a CrewAI Agent:
+
+ ```python
+ from crewai import Agent, Task, Crew, Process
+ import openlit
+
+ openlit.init(disable_metrics=True)
+ # Define your agents
+ researcher = Agent(
+ role="Researcher",
+ goal="Conduct thorough research and analysis on AI and AI agents",
+ backstory="You're an expert researcher, specialized in technology, software engineering, AI, and startups. You work as a freelancer and are currently researching for a new client.",
+ allow_delegation=False,
+ llm='command-r'
+ )
+
+
+ # Define your task
+ task = Task(
+ description="Generate a list of 5 interesting ideas for an article, then write one captivating paragraph for each idea that showcases the potential of a full article on this topic. Return the list of ideas with their paragraphs and your notes.",
+ expected_output="5 bullet points, each with a paragraph and accompanying notes.",
+ )
+
+ # Define the manager agent
+ manager = Agent(
+ role="Project Manager",
+ goal="Efficiently manage the crew and ensure high-quality task completion",
+ backstory="You're an experienced project manager, skilled in overseeing complex projects and guiding teams to success. Your role is to coordinate the efforts of the crew members, ensuring that each task is completed on time and to the highest standard.",
+ allow_delegation=True,
+ llm='command-r'
+ )
+
+ # Instantiate your crew with a custom manager
+ crew = Crew(
+ agents=[researcher],
+ tasks=[task],
+ manager_agent=manager,
+ process=Process.hierarchical,
+ )
+
+ # Start the crew's work
+ result = crew.kickoff()
+
+ print(result)
+ ```
+
+
+
+ Add the following two lines to your application code:
+ ```python
+ import openlit
+
+ openlit.init()
+ ```
+
+ Run the following command to configure the OTEL export endpoint:
+ ```shell
+ export OTEL_EXPORTER_OTLP_ENDPOINT = "http://127.0.0.1:4318"
+ ```
+
+ Example Usage for monitoring a CrewAI Async Agent:
+
+ ```python
+ import asyncio
+ from crewai import Crew, Agent, Task
+ import openlit
+
+ openlit.init(otlp_endpoint="http://127.0.0.1:4318")
+
+ # Create an agent with code execution enabled
+ coding_agent = Agent(
+ role="Python Data Analyst",
+ goal="Analyze data and provide insights using Python",
+ backstory="You are an experienced data analyst with strong Python skills.",
+ allow_code_execution=True,
+ llm="command-r"
+ )
+
+ # Create a task that requires code execution
+ data_analysis_task = Task(
+ description="Analyze the given dataset and calculate the average age of participants. Ages: {ages}",
+ agent=coding_agent,
+ expected_output="5 bullet points, each with a paragraph and accompanying notes.",
+ )
+
+ # Create a crew and add the task
+ analysis_crew = Crew(
+ agents=[coding_agent],
+ tasks=[data_analysis_task]
+ )
+
+ # Async function to kickoff the crew asynchronously
+ async def async_crew_execution():
+ result = await analysis_crew.kickoff_async(inputs={"ages": [25, 30, 35, 40, 45]})
+ print("Crew Result:", result)
+
+ # Run the async function
+ asyncio.run(async_crew_execution())
+ ```
+
+
+ Refer to OpenLIT [Python SDK repository](https://github.com/openlit/openlit/tree/main/sdk/python) for more advanced configurations and use cases.
+
+
+ With the Agent Observability data now being collected and sent to OpenLIT, the next step is to visualize and analyze this data to get insights into your Agent's performance, behavior, and identify areas of improvement.
+
+ Just head over to OpenLIT at `127.0.0.1:3000` on your browser to start exploring. You can login using the default credentials
+ - **Email**: `user@openlit.io`
+ - **Password**: `openlituser`
+
+
+
+
+
+
+
+
+
diff --git a/docs/images/openlit1.png b/docs/images/openlit1.png
new file mode 100644
index 000000000..4fab0340d
Binary files /dev/null and b/docs/images/openlit1.png differ
diff --git a/docs/images/openlit2.png b/docs/images/openlit2.png
new file mode 100644
index 000000000..d36352847
Binary files /dev/null and b/docs/images/openlit2.png differ
diff --git a/docs/images/openlit3.png b/docs/images/openlit3.png
new file mode 100644
index 000000000..4a6636a11
Binary files /dev/null and b/docs/images/openlit3.png differ
diff --git a/docs/mint.json b/docs/mint.json
index d5aa8cb8f..fad9689b8 100644
--- a/docs/mint.json
+++ b/docs/mint.json
@@ -99,7 +99,8 @@
"how-to/replay-tasks-from-latest-crew-kickoff",
"how-to/conditional-tasks",
"how-to/agentops-observability",
- "how-to/langtrace-observability"
+ "how-to/langtrace-observability",
+ "how-to/openlit-observability"
]
},
{
diff --git a/docs/quickstart.mdx b/docs/quickstart.mdx
index a78a56182..9ec3170e1 100644
--- a/docs/quickstart.mdx
+++ b/docs/quickstart.mdx
@@ -349,7 +349,7 @@ Replace `` with the ID of the task you want to replay.
If you need to reset the memory of your crew before running it again, you can do so by calling the reset memory feature:
```shell
-crewai reset-memory
+crewai reset-memories --all
```
This will clear the crew's memory, allowing for a fresh start.
diff --git a/mkdocs.yml b/mkdocs.yml
index 1b6c1f959..df8a1ee70 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -152,6 +152,7 @@ nav:
- Conditional Tasks: 'how-to/Conditional-Tasks.md'
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
- Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md'
+ - Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md'
- Tools Docs:
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'
diff --git a/pyproject.toml b/pyproject.toml
index 1d7d8cc43..86891294c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "crewai"
-version = "0.83.0"
+version = "0.85.0"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md"
requires-python = ">=3.10,<=3.13"
diff --git a/src/crewai/__init__.py b/src/crewai/__init__.py
index 34d7f17c9..440ba090c 100644
--- a/src/crewai/__init__.py
+++ b/src/crewai/__init__.py
@@ -16,7 +16,7 @@ warnings.filterwarnings(
category=UserWarning,
module="pydantic.main",
)
-__version__ = "0.83.0"
+__version__ = "0.85.0"
__all__ = [
"Agent",
"Crew",
diff --git a/src/crewai/cli/templates/crew/pyproject.toml b/src/crewai/cli/templates/crew/pyproject.toml
index 1e456c725..e45732685 100644
--- a/src/crewai/cli/templates/crew/pyproject.toml
+++ b/src/crewai/cli/templates/crew/pyproject.toml
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13"
dependencies = [
- "crewai[tools]>=0.83.0,<1.0.0"
+ "crewai[tools]>=0.85.0,<1.0.0"
]
[project.scripts]
diff --git a/src/crewai/cli/templates/flow/pyproject.toml b/src/crewai/cli/templates/flow/pyproject.toml
index 575aaf086..d981987c8 100644
--- a/src/crewai/cli/templates/flow/pyproject.toml
+++ b/src/crewai/cli/templates/flow/pyproject.toml
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13"
dependencies = [
- "crewai[tools]>=0.83.0,<1.0.0",
+ "crewai[tools]>=0.85.0,<1.0.0",
]
[project.scripts]
diff --git a/src/crewai/cli/templates/pipeline/pyproject.toml b/src/crewai/cli/templates/pipeline/pyproject.toml
index d12dccf11..1e7f4efd5 100644
--- a/src/crewai/cli/templates/pipeline/pyproject.toml
+++ b/src/crewai/cli/templates/pipeline/pyproject.toml
@@ -6,7 +6,7 @@ authors = ["Your Name "]
[tool.poetry.dependencies]
python = ">=3.10,<=3.13"
-crewai = { extras = ["tools"], version = ">=0.83.0,<1.0.0" }
+crewai = { extras = ["tools"], version = ">=0.85.0,<1.0.0" }
asyncio = "*"
[tool.poetry.scripts]
diff --git a/src/crewai/cli/templates/pipeline_router/pyproject.toml b/src/crewai/cli/templates/pipeline_router/pyproject.toml
index 06487bcfa..49208d120 100644
--- a/src/crewai/cli/templates/pipeline_router/pyproject.toml
+++ b/src/crewai/cli/templates/pipeline_router/pyproject.toml
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = ["Your Name "]
requires-python = ">=3.10,<=3.13"
dependencies = [
- "crewai[tools]>=0.83.0,<1.0.0"
+ "crewai[tools]>=0.85.0,<1.0.0"
]
[project.scripts]
diff --git a/src/crewai/cli/templates/tool/pyproject.toml b/src/crewai/cli/templates/tool/pyproject.toml
index 7c1afddfa..33dabd38e 100644
--- a/src/crewai/cli/templates/tool/pyproject.toml
+++ b/src/crewai/cli/templates/tool/pyproject.toml
@@ -5,6 +5,6 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md"
requires-python = ">=3.10,<=3.13"
dependencies = [
- "crewai[tools]>=0.83.0"
+ "crewai[tools]>=0.85.0"
]
diff --git a/src/crewai/memory/entity/entity_memory.py b/src/crewai/memory/entity/entity_memory.py
index 88d33c09a..67c72e927 100644
--- a/src/crewai/memory/entity/entity_memory.py
+++ b/src/crewai/memory/entity/entity_memory.py
@@ -10,7 +10,7 @@ class EntityMemory(Memory):
Inherits from the Memory class.
"""
- def __init__(self, crew=None, embedder_config=None, storage=None):
+ def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if hasattr(crew, "memory_config") and crew.memory_config is not None:
self.memory_provider = crew.memory_config.get("provider")
else:
@@ -33,6 +33,7 @@ class EntityMemory(Memory):
allow_reset=True,
embedder_config=embedder_config,
crew=crew,
+ path=path,
)
)
super().__init__(storage)
diff --git a/src/crewai/memory/long_term/long_term_memory.py b/src/crewai/memory/long_term/long_term_memory.py
index b9c36bdc9..656709ac9 100644
--- a/src/crewai/memory/long_term/long_term_memory.py
+++ b/src/crewai/memory/long_term/long_term_memory.py
@@ -14,8 +14,9 @@ class LongTermMemory(Memory):
LongTermMemoryItem instances.
"""
- def __init__(self, storage=None):
- storage = storage if storage else LTMSQLiteStorage()
+ def __init__(self, storage=None, path=None):
+ if not storage:
+ storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
super().__init__(storage)
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
diff --git a/src/crewai/memory/short_term/short_term_memory.py b/src/crewai/memory/short_term/short_term_memory.py
index 67a568d63..4ade7eb93 100644
--- a/src/crewai/memory/short_term/short_term_memory.py
+++ b/src/crewai/memory/short_term/short_term_memory.py
@@ -13,7 +13,7 @@ class ShortTermMemory(Memory):
MemoryItem instances.
"""
- def __init__(self, crew=None, embedder_config=None, storage=None):
+ def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
if hasattr(crew, "memory_config") and crew.memory_config is not None:
self.memory_provider = crew.memory_config.get("provider")
else:
@@ -32,7 +32,7 @@ class ShortTermMemory(Memory):
storage
if storage
else RAGStorage(
- type="short_term", embedder_config=embedder_config, crew=crew
+ type="short_term", embedder_config=embedder_config, crew=crew, path=path
)
)
super().__init__(storage)
diff --git a/src/crewai/memory/storage/rag_storage.py b/src/crewai/memory/storage/rag_storage.py
index 4023cf558..ded340a19 100644
--- a/src/crewai/memory/storage/rag_storage.py
+++ b/src/crewai/memory/storage/rag_storage.py
@@ -37,7 +37,7 @@ class RAGStorage(BaseRAGStorage):
app: ClientAPI | None = None
- def __init__(self, type, allow_reset=True, embedder_config=None, crew=None):
+ def __init__(self, type, allow_reset=True, embedder_config=None, crew=None, path=None):
super().__init__(type, allow_reset, embedder_config, crew)
agents = crew.agents if crew else []
agents = [self._sanitize_role(agent.role) for agent in agents]
@@ -47,6 +47,7 @@ class RAGStorage(BaseRAGStorage):
self.type = type
self.allow_reset = allow_reset
+ self.path = path
self._initialize_app()
def _set_embedder_config(self):
@@ -59,7 +60,7 @@ class RAGStorage(BaseRAGStorage):
self._set_embedder_config()
chroma_client = chromadb.PersistentClient(
- path=f"{db_storage_path()}/{self.type}/{self.agents}",
+ path=self.path if self.path else f"{db_storage_path()}/{self.type}/{self.agents}",
settings=Settings(allow_reset=self.allow_reset),
)
diff --git a/uv.lock b/uv.lock
index 050602e61..900e730da 100644
--- a/uv.lock
+++ b/uv.lock
@@ -608,7 +608,7 @@ wheels = [
[[package]]
name = "crewai"
-version = "0.83.0"
+version = "0.85.0"
source = { editable = "." }
dependencies = [
{ name = "appdirs" },