mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 12:28:30 +00:00
Compare commits
66 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8cf1cd5a62 | ||
|
|
93c0467bba | ||
|
|
8f5f67de41 | ||
|
|
f8ca49d8df | ||
|
|
c119230fd6 | ||
|
|
14a36d3f5e | ||
|
|
fde1ee45f9 | ||
|
|
6774bc2c53 | ||
|
|
94c62263ed | ||
|
|
495c3859af | ||
|
|
3e003f5e32 | ||
|
|
1c8b509d7d | ||
|
|
58af5c08f9 | ||
|
|
55e968c9e0 | ||
|
|
0b9092702b | ||
|
|
8376698534 | ||
|
|
3dc02310b6 | ||
|
|
e70bc94ab6 | ||
|
|
9285ebf8a2 | ||
|
|
4ca785eb15 | ||
|
|
c57cbd8591 | ||
|
|
7fb1289205 | ||
|
|
f02681ae01 | ||
|
|
c725105b1f | ||
|
|
36aa4bcb46 | ||
|
|
b98f8f9fe1 | ||
|
|
bcfcf88e78 | ||
|
|
fd0de3a47e | ||
|
|
c7b9ae02fd | ||
|
|
4afb022572 | ||
|
|
8610faef22 | ||
|
|
6d677541c7 | ||
|
|
49220ec163 | ||
|
|
40a676b7ac | ||
|
|
50bf146d1e | ||
|
|
40d378abfb | ||
|
|
1b09b085a7 | ||
|
|
9f2acfe91f | ||
|
|
e856359e23 | ||
|
|
faa231e278 | ||
|
|
3d44795476 | ||
|
|
f50e709985 | ||
|
|
d70c542547 | ||
|
|
57201fb856 | ||
|
|
9b142e580b | ||
|
|
3878daffd6 | ||
|
|
34954e6f74 | ||
|
|
e66a135d5d | ||
|
|
66698503b8 | ||
|
|
ec2967c362 | ||
|
|
4ae07468f3 | ||
|
|
6193eb13fa | ||
|
|
55cd15bfc6 | ||
|
|
5f46ff8836 | ||
|
|
cdfbd5f62b | ||
|
|
b43f3987ec | ||
|
|
240527d06c | ||
|
|
276cb7b7e8 | ||
|
|
048aa6cbcc | ||
|
|
fa9949b9d0 | ||
|
|
500072d855 | ||
|
|
04bcfa6e2d | ||
|
|
26afee9bed | ||
|
|
f29f4abdd7 | ||
|
|
4589d6fe9d | ||
|
|
201e652fa2 |
19
.github/security.md
vendored
Normal file
19
.github/security.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
CrewAI takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organization.
|
||||
If you believe you have found a security vulnerability in any CrewAI product or service, please report it to us as described below.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
Please do not report security vulnerabilities through public GitHub issues.
|
||||
To report a vulnerability, please email us at security@crewai.com.
|
||||
Please include the requested information listed below so that we can triage your report more quickly
|
||||
|
||||
- Type of issue (e.g. SQL injection, cross-site scripting, etc.)
|
||||
- Full paths of source file(s) related to the manifestation of the issue
|
||||
- The location of the affected source code (tag/branch/commit or direct URL)
|
||||
- Any special configuration required to reproduce the issue
|
||||
- Step-by-step instructions to reproduce the issue (please include screenshots if needed)
|
||||
- Proof-of-concept or exploit code (if possible)
|
||||
- Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
Once we have received your report, we will respond to you at the email address you provide. If the issue is confirmed, we will release a patch as soon as possible depending on the complexity of the issue.
|
||||
|
||||
At this time, we are not offering a bug bounty program. Any rewards will be at our discretion.
|
||||
2
.github/workflows/security-checker.yml
vendored
2
.github/workflows/security-checker.yml
vendored
@@ -19,5 +19,5 @@ jobs:
|
||||
run: pip install bandit
|
||||
|
||||
- name: Run Bandit
|
||||
run: bandit -c pyproject.toml -r src/ -lll
|
||||
run: bandit -c pyproject.toml -r src/ -ll
|
||||
|
||||
|
||||
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
run: uv python install 3.11.9
|
||||
|
||||
- name: Install the project
|
||||
run: uv sync --dev
|
||||
run: uv sync --dev --all-extras
|
||||
|
||||
- name: Run tests
|
||||
run: uv run pytest tests
|
||||
run: uv run pytest tests -vv
|
||||
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -17,3 +17,7 @@ rc-tests/*
|
||||
temp/*
|
||||
.vscode/*
|
||||
crew_tasks_output.json
|
||||
.codesight
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.venv
|
||||
|
||||
@@ -100,7 +100,7 @@ You can now start developing your crew by editing the files in the `src/my_proje
|
||||
|
||||
#### Example of a simple crew with a sequential process:
|
||||
|
||||
Instatiate your crew:
|
||||
Instantiate your crew:
|
||||
|
||||
```shell
|
||||
crewai create crew latest-ai-development
|
||||
@@ -399,7 +399,7 @@ Data collected includes:
|
||||
- Roles of agents in a crew
|
||||
- Understand high level use cases so we can build better tools, integrations and examples about it
|
||||
- Tools names available
|
||||
- Understand out of the publically available tools, which ones are being used the most so we can improve them
|
||||
- Understand out of the publicly available tools, which ones are being used the most so we can improve them
|
||||
|
||||
Users can opt-in to Further Telemetry, sharing the complete telemetry data by setting the `share_crew` attribute to `True` on their Crews. Enabling `share_crew` results in the collection of detailed crew and task execution data, including `goal`, `backstory`, `context`, and `output` of tasks. This enables a deeper insight into usage patterns while respecting the user's choice to share.
|
||||
|
||||
|
||||
@@ -22,7 +22,8 @@ A crew in crewAI represents a collaborative group of agents working together to
|
||||
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
|
||||
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
||||
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). Defaults to `False`. |
|
||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
||||
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||
|
||||
@@ -18,60 +18,63 @@ Flows allow you to create structured, event-driven workflows. They provide a sea
|
||||
|
||||
4. **Flexible Control Flow**: Implement conditional logic, loops, and branching within your workflows.
|
||||
|
||||
5. **Input Flexibility**: Flows can accept inputs to initialize or update their state, with different handling for structured and unstructured state management.
|
||||
|
||||
## Getting Started
|
||||
|
||||
Let's create a simple Flow where you will use OpenAI to generate a random city in one task and then use that city to generate a fun fact in another task.
|
||||
|
||||
```python Code
|
||||
### Passing Inputs to Flows
|
||||
|
||||
Flows can accept inputs to initialize or update their state before execution. The way inputs are handled depends on whether the flow uses structured or unstructured state management.
|
||||
|
||||
#### Structured State Management
|
||||
|
||||
In structured state management, the flow's state is defined using a Pydantic `BaseModel`. Inputs must match the model's schema, and any updates will overwrite the default values.
|
||||
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from dotenv import load_dotenv
|
||||
from litellm import completion
|
||||
from pydantic import BaseModel
|
||||
|
||||
class ExampleState(BaseModel):
|
||||
counter: int = 0
|
||||
message: str = ""
|
||||
|
||||
class ExampleFlow(Flow):
|
||||
model = "gpt-4o-mini"
|
||||
|
||||
class StructuredExampleFlow(Flow[ExampleState]):
|
||||
@start()
|
||||
def generate_city(self):
|
||||
print("Starting flow")
|
||||
def first_method(self):
|
||||
# Implementation
|
||||
|
||||
response = completion(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Return the name of a random city in the world.",
|
||||
},
|
||||
],
|
||||
)
|
||||
flow = StructuredExampleFlow()
|
||||
flow.kickoff(inputs={"counter": 10})
|
||||
```
|
||||
|
||||
random_city = response["choices"][0]["message"]["content"]
|
||||
print(f"Random City: {random_city}")
|
||||
In this example, the `counter` is initialized to `10`, while `message` retains its default value.
|
||||
|
||||
return random_city
|
||||
#### Unstructured State Management
|
||||
|
||||
@listen(generate_city)
|
||||
def generate_fun_fact(self, random_city):
|
||||
response = completion(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"Tell me a fun fact about {random_city}",
|
||||
},
|
||||
],
|
||||
)
|
||||
In unstructured state management, the flow's state is a dictionary. You can pass any dictionary to update the state.
|
||||
|
||||
fun_fact = response["choices"][0]["message"]["content"]
|
||||
return fun_fact
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
|
||||
class UnstructuredExampleFlow(Flow):
|
||||
@start()
|
||||
def first_method(self):
|
||||
# Implementation
|
||||
|
||||
flow = UnstructuredExampleFlow()
|
||||
flow.kickoff(inputs={"counter": 5, "message": "Initial message"})
|
||||
```
|
||||
|
||||
flow = ExampleFlow()
|
||||
result = flow.kickoff()
|
||||
Here, both `counter` and `message` are updated based on the provided inputs.
|
||||
|
||||
print(f"Generated fun fact: {result}")
|
||||
**Note:** Ensure that inputs for structured state management adhere to the defined schema to avoid validation errors.
|
||||
|
||||
### Example Flow
|
||||
|
||||
```python
|
||||
# Existing example code
|
||||
```
|
||||
|
||||
In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task.
|
||||
@@ -94,14 +97,14 @@ The `@listen()` decorator can be used in several ways:
|
||||
|
||||
1. **Listening to a Method by Name**: You can pass the name of the method you want to listen to as a string. When that method completes, the listener method will be triggered.
|
||||
|
||||
```python Code
|
||||
```python
|
||||
@listen("generate_city")
|
||||
def generate_fun_fact(self, random_city):
|
||||
# Implementation
|
||||
```
|
||||
|
||||
2. **Listening to a Method Directly**: You can pass the method itself. When that method completes, the listener method will be triggered.
|
||||
```python Code
|
||||
```python
|
||||
@listen(generate_city)
|
||||
def generate_fun_fact(self, random_city):
|
||||
# Implementation
|
||||
@@ -118,7 +121,7 @@ When you run a Flow, the final output is determined by the last method that comp
|
||||
Here's how you can access the final output:
|
||||
|
||||
<CodeGroup>
|
||||
```python Code
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
|
||||
class OutputExampleFlow(Flow):
|
||||
@@ -130,18 +133,17 @@ class OutputExampleFlow(Flow):
|
||||
def second_method(self, first_output):
|
||||
return f"Second method received: {first_output}"
|
||||
|
||||
|
||||
flow = OutputExampleFlow()
|
||||
final_output = flow.kickoff()
|
||||
|
||||
print("---- Final Output ----")
|
||||
print(final_output)
|
||||
````
|
||||
```
|
||||
|
||||
``` text Output
|
||||
```text
|
||||
---- Final Output ----
|
||||
Second method received: Output from first_method
|
||||
````
|
||||
```
|
||||
|
||||
</CodeGroup>
|
||||
|
||||
@@ -156,7 +158,7 @@ Here's an example of how to update and access the state:
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python Code
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -184,7 +186,7 @@ print("Final State:")
|
||||
print(flow.state)
|
||||
```
|
||||
|
||||
```text Output
|
||||
```text
|
||||
Final Output: Hello from first_method - updated by second_method
|
||||
Final State:
|
||||
counter=2 message='Hello from first_method - updated by second_method'
|
||||
@@ -208,10 +210,10 @@ allowing developers to choose the approach that best fits their application's ne
|
||||
In unstructured state management, all state is stored in the `state` attribute of the `Flow` class.
|
||||
This approach offers flexibility, enabling developers to add or modify state attributes on the fly without defining a strict schema.
|
||||
|
||||
```python Code
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
|
||||
class UntructuredExampleFlow(Flow):
|
||||
class UnstructuredExampleFlow(Flow):
|
||||
|
||||
@start()
|
||||
def first_method(self):
|
||||
@@ -230,8 +232,7 @@ class UntructuredExampleFlow(Flow):
|
||||
|
||||
print(f"State after third_method: {self.state}")
|
||||
|
||||
|
||||
flow = UntructuredExampleFlow()
|
||||
flow = UnstructuredExampleFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
|
||||
@@ -245,16 +246,14 @@ flow.kickoff()
|
||||
Structured state management leverages predefined schemas to ensure consistency and type safety across the workflow.
|
||||
By using models like Pydantic's `BaseModel`, developers can define the exact shape of the state, enabling better validation and auto-completion in development environments.
|
||||
|
||||
```python Code
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class ExampleState(BaseModel):
|
||||
counter: int = 0
|
||||
message: str = ""
|
||||
|
||||
|
||||
class StructuredExampleFlow(Flow[ExampleState]):
|
||||
|
||||
@start()
|
||||
@@ -273,7 +272,6 @@ class StructuredExampleFlow(Flow[ExampleState]):
|
||||
|
||||
print(f"State after third_method: {self.state}")
|
||||
|
||||
|
||||
flow = StructuredExampleFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
@@ -307,7 +305,7 @@ The `or_` function in Flows allows you to listen to multiple methods and trigger
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python Code
|
||||
```python
|
||||
from crewai.flow.flow import Flow, listen, or_, start
|
||||
|
||||
class OrExampleFlow(Flow):
|
||||
@@ -324,13 +322,11 @@ class OrExampleFlow(Flow):
|
||||
def logger(self, result):
|
||||
print(f"Logger: {result}")
|
||||
|
||||
|
||||
|
||||
flow = OrExampleFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
|
||||
```text Output
|
||||
```text
|
||||
Logger: Hello from the start method
|
||||
Logger: Hello from the second method
|
||||
```
|
||||
@@ -346,7 +342,7 @@ The `and_` function in Flows allows you to listen to multiple methods and trigge
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python Code
|
||||
```python
|
||||
from crewai.flow.flow import Flow, and_, listen, start
|
||||
|
||||
class AndExampleFlow(Flow):
|
||||
@@ -368,7 +364,7 @@ flow = AndExampleFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
|
||||
```text Output
|
||||
```text
|
||||
---- Logger ----
|
||||
{'greeting': 'Hello from the start method', 'joke': 'What do computers eat? Microchips.'}
|
||||
```
|
||||
@@ -385,7 +381,7 @@ You can specify different routes based on the output of the method, allowing you
|
||||
|
||||
<CodeGroup>
|
||||
|
||||
```python Code
|
||||
```python
|
||||
import random
|
||||
from crewai.flow.flow import Flow, listen, router, start
|
||||
from pydantic import BaseModel
|
||||
@@ -416,12 +412,11 @@ class RouterFlow(Flow[ExampleState]):
|
||||
def fourth_method(self):
|
||||
print("Fourth method running")
|
||||
|
||||
|
||||
flow = RouterFlow()
|
||||
flow.kickoff()
|
||||
```
|
||||
|
||||
```text Output
|
||||
```text
|
||||
Starting the structured flow
|
||||
Third method running
|
||||
Fourth method running
|
||||
@@ -484,7 +479,7 @@ The `main.py` file is where you create your flow and connect the crews together.
|
||||
|
||||
Here's an example of how you can connect the `poem_crew` in the `main.py` file:
|
||||
|
||||
```python Code
|
||||
```python
|
||||
#!/usr/bin/env python
|
||||
from random import randint
|
||||
|
||||
@@ -560,6 +555,42 @@ uv run kickoff
|
||||
|
||||
The flow will execute, and you should see the output in the console.
|
||||
|
||||
|
||||
### Adding Additional Crews Using the CLI
|
||||
|
||||
Once you have created your initial flow, you can easily add additional crews to your project using the CLI. This allows you to expand your flow's capabilities by integrating new crews without starting from scratch.
|
||||
|
||||
To add a new crew to your existing flow, use the following command:
|
||||
|
||||
```bash
|
||||
crewai flow add-crew <crew_name>
|
||||
```
|
||||
|
||||
This command will create a new directory for your crew within the `crews` folder of your flow project. It will include the necessary configuration files and a crew definition file, similar to the initial setup.
|
||||
|
||||
#### Folder Structure
|
||||
|
||||
After adding a new crew, your folder structure will look like this:
|
||||
|
||||
| Directory/File | Description |
|
||||
| :--------------------- | :----------------------------------------------------------------- |
|
||||
| `name_of_flow/` | Root directory for the flow. |
|
||||
| ├── `crews/` | Contains directories for specific crews. |
|
||||
| │ ├── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts. |
|
||||
| │ │ ├── `config/` | Configuration files directory for the "poem_crew". |
|
||||
| │ │ │ ├── `agents.yaml` | YAML file defining the agents for "poem_crew". |
|
||||
| │ │ │ └── `tasks.yaml` | YAML file defining the tasks for "poem_crew". |
|
||||
| │ │ └── `poem_crew.py` | Script for "poem_crew" functionality. |
|
||||
| └── `name_of_crew/` | Directory for the new crew. |
|
||||
| ├── `config/` | Configuration files directory for the new crew. |
|
||||
| │ ├── `agents.yaml` | YAML file defining the agents for the new crew. |
|
||||
| │ └── `tasks.yaml` | YAML file defining the tasks for the new crew. |
|
||||
| └── `name_of_crew.py` | Script for the new crew functionality. |
|
||||
|
||||
You can then customize the `agents.yaml` and `tasks.yaml` files to define the agents and tasks for your new crew. The `name_of_crew.py` file will contain the crew's logic, which you can modify to suit your needs.
|
||||
|
||||
By using the CLI to add additional crews, you can efficiently build complex AI workflows that leverage multiple crews working together.
|
||||
|
||||
## Plot Flows
|
||||
|
||||
Visualizing your AI workflows can provide valuable insights into the structure and execution paths of your flows. CrewAI offers a powerful visualization tool that allows you to generate interactive plots of your flows, making it easier to understand and optimize your AI workflows.
|
||||
@@ -576,7 +607,7 @@ CrewAI provides two convenient methods to generate plots of your flows:
|
||||
|
||||
If you are working directly with a flow instance, you can generate a plot by calling the `plot()` method on your flow object. This method will create an HTML file containing the interactive plot of your flow.
|
||||
|
||||
```python Code
|
||||
```python
|
||||
# Assuming you have a flow instance
|
||||
flow.plot("my_flow_plot")
|
||||
```
|
||||
@@ -599,13 +630,114 @@ The generated plot will display nodes representing the tasks in your flow, with
|
||||
|
||||
By visualizing your flows, you can gain a clearer understanding of the workflow's structure, making it easier to debug, optimize, and communicate your AI processes to others.
|
||||
|
||||
### Conclusion
|
||||
|
||||
Plotting your flows is a powerful feature of CrewAI that enhances your ability to design and manage complex AI workflows. Whether you choose to use the `plot()` method or the command line, generating plots will provide you with a visual representation of your workflows, aiding in both development and presentation.
|
||||
## Advanced
|
||||
|
||||
In this section, we explore more complex use cases of CrewAI Flows, starting with a self-evaluation loop. This pattern is crucial for developing AI systems that can iteratively improve their outputs through feedback.
|
||||
|
||||
### 1) Self-Evaluation Loop
|
||||
|
||||
The self-evaluation loop is a powerful pattern that allows AI workflows to automatically assess and refine their outputs. This example demonstrates how to set up a flow that generates content, evaluates it, and iterates based on feedback until the desired quality is achieved.
|
||||
|
||||
#### Overview
|
||||
|
||||
The self-evaluation loop involves two main Crews:
|
||||
|
||||
1. **ShakespeareanXPostCrew**: Generates a Shakespearean-style post on a given topic.
|
||||
2. **XPostReviewCrew**: Evaluates the generated post, providing feedback on its validity and quality.
|
||||
|
||||
The process iterates until the post meets the criteria or a maximum retry limit is reached. This approach ensures high-quality outputs through iterative refinement.
|
||||
|
||||
#### Importance
|
||||
|
||||
This pattern is essential for building robust AI systems that can adapt and improve over time. By automating the evaluation and feedback loop, developers can ensure that their AI workflows produce reliable and high-quality results.
|
||||
|
||||
#### Main Code Highlights
|
||||
|
||||
Below is the `main.py` file for the self-evaluation loop flow:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
from crewai.flow.flow import Flow, listen, router, start
|
||||
from pydantic import BaseModel
|
||||
from self_evaluation_loop_flow.crews.shakespeare_crew.shakespeare_crew import (
|
||||
ShakespeareanXPostCrew,
|
||||
)
|
||||
from self_evaluation_loop_flow.crews.x_post_review_crew.x_post_review_crew import (
|
||||
XPostReviewCrew,
|
||||
)
|
||||
|
||||
class ShakespeareXPostFlowState(BaseModel):
|
||||
x_post: str = ""
|
||||
feedback: Optional[str] = None
|
||||
valid: bool = False
|
||||
retry_count: int = 0
|
||||
|
||||
class ShakespeareXPostFlow(Flow[ShakespeareXPostFlowState]):
|
||||
|
||||
@start("retry")
|
||||
def generate_shakespeare_x_post(self):
|
||||
print("Generating Shakespearean X post")
|
||||
topic = "Flying cars"
|
||||
result = (
|
||||
ShakespeareanXPostCrew()
|
||||
.crew()
|
||||
.kickoff(inputs={"topic": topic, "feedback": self.state.feedback})
|
||||
)
|
||||
print("X post generated", result.raw)
|
||||
self.state.x_post = result.raw
|
||||
|
||||
@router(generate_shakespeare_x_post)
|
||||
def evaluate_x_post(self):
|
||||
if self.state.retry_count > 3:
|
||||
return "max_retry_exceeded"
|
||||
result = XPostReviewCrew().crew().kickoff(inputs={"x_post": self.state.x_post})
|
||||
self.state.valid = result["valid"]
|
||||
self.state.feedback = result["feedback"]
|
||||
print("valid", self.state.valid)
|
||||
print("feedback", self.state.feedback)
|
||||
self.state.retry_count += 1
|
||||
if self.state.valid:
|
||||
return "complete"
|
||||
return "retry"
|
||||
|
||||
@listen("complete")
|
||||
def save_result(self):
|
||||
print("X post is valid")
|
||||
print("X post:", self.state.x_post)
|
||||
with open("x_post.txt", "w") as file:
|
||||
file.write(self.state.x_post)
|
||||
|
||||
@listen("max_retry_exceeded")
|
||||
def max_retry_exceeded_exit(self):
|
||||
print("Max retry count exceeded")
|
||||
print("X post:", self.state.x_post)
|
||||
print("Feedback:", self.state.feedback)
|
||||
|
||||
def kickoff():
|
||||
shakespeare_flow = ShakespeareXPostFlow()
|
||||
shakespeare_flow.kickoff()
|
||||
|
||||
def plot():
|
||||
shakespeare_flow = ShakespeareXPostFlow()
|
||||
shakespeare_flow.plot()
|
||||
|
||||
if __name__ == "__main__":
|
||||
kickoff()
|
||||
```
|
||||
|
||||
#### Code Highlights
|
||||
|
||||
- **Retry Mechanism**: The flow uses a retry mechanism to regenerate the post if it doesn't meet the criteria, up to a maximum of three retries.
|
||||
- **Feedback Loop**: Feedback from the `XPostReviewCrew` is used to refine the post iteratively.
|
||||
- **State Management**: The flow maintains state using a Pydantic model, ensuring type safety and clarity.
|
||||
|
||||
For a complete example and further details, please refer to the [Self Evaluation Loop Flow repository](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow).
|
||||
|
||||
|
||||
## Next Steps
|
||||
|
||||
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are four specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
|
||||
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are five specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
|
||||
|
||||
1. **Email Auto Responder Flow**: This example demonstrates an infinite loop where a background job continually runs to automate email responses. It's a great use case for tasks that need to be performed repeatedly without manual intervention. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/email_auto_responder_flow)
|
||||
|
||||
@@ -615,6 +747,8 @@ If you're interested in exploring additional examples of flows, we have a variet
|
||||
|
||||
4. **Meeting Assistant Flow**: This flow demonstrates how to broadcast one event to trigger multiple follow-up actions. For instance, after a meeting is completed, the flow can update a Trello board, send a Slack message, and save the results. It's a great example of handling multiple outcomes from a single event, making it ideal for comprehensive task management and notification systems. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/meeting_assistant_flow)
|
||||
|
||||
5. **Self Evaluation Loop Flow**: This flow demonstrates a self-evaluation loop where AI workflows automatically assess and refine their outputs through feedback. It involves generating content, evaluating it, and iterating until the desired quality is achieved. This pattern is crucial for developing robust AI systems that can adapt and improve over time. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow)
|
||||
|
||||
By exploring these examples, you can gain insights into how to leverage CrewAI Flows for various use cases, from automating repetitive tasks to managing complex, multi-step processes with dynamic decision-making and human feedback.
|
||||
|
||||
Also, check out our YouTube video on how to use flows in CrewAI below!
|
||||
|
||||
79
docs/concepts/knowledge.mdx
Normal file
79
docs/concepts/knowledge.mdx
Normal file
@@ -0,0 +1,79 @@
|
||||
---
|
||||
title: Knowledge
|
||||
description: Understand what knowledge is in CrewAI and how to effectively use it.
|
||||
icon: book
|
||||
---
|
||||
|
||||
# Using Knowledge in CrewAI
|
||||
|
||||
## Introduction
|
||||
|
||||
Knowledge in CrewAI serves as a foundational component for enriching AI agents with contextual and relevant information. It enables agents to access and utilize structured data sources during their execution processes, making them more intelligent and responsive.
|
||||
|
||||
The Knowledge class in CrewAI provides a powerful way to manage and query knowledge sources for your AI agents. This guide will show you how to implement knowledge management in your CrewAI projects.
|
||||
|
||||
## What is Knowledge?
|
||||
|
||||
The `Knowledge` class in CrewAI manages various sources that store information, which can be queried and retrieved by AI agents. This modular approach allows you to integrate diverse data formats such as text, PDFs, spreadsheets, and more into your AI workflows.
|
||||
|
||||
Additionally, we have specific tools for generate knowledge sources for strings, text files, PDF's, and Spreadsheets. You can expand on any source type by extending the `KnowledgeSource` class.
|
||||
|
||||
## Basic Implementation
|
||||
|
||||
Here's a simple example of how to use the Knowledge class:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew, Process, LLM
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
|
||||
# Create a knowledge source
|
||||
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||
string_source = StringKnowledgeSource(
|
||||
content=content, metadata={"preference": "personal"}
|
||||
)
|
||||
|
||||
# Create an agent with the knowledge store
|
||||
agent = Agent(
|
||||
role="About User",
|
||||
goal="You know everything about the user.",
|
||||
backstory="""You are a master at understanding people and their preferences.""",
|
||||
verbose=True
|
||||
)
|
||||
|
||||
task = Task(
|
||||
description="Answer the following questions about the user: {question}",
|
||||
expected_output="An answer to the question.",
|
||||
agent=agent,
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[agent],
|
||||
tasks=[task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
knowledge={"sources": [string_source], "metadata": {"preference": "personal"}}, # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
|
||||
)
|
||||
|
||||
result = crew.kickoff(inputs={"question": "What city does John live in and how old is he?"})
|
||||
```
|
||||
|
||||
|
||||
## Embedder Configuration
|
||||
|
||||
You can also configure the embedder for the knowledge store. This is useful if you want to use a different embedder for the knowledge store than the one used for the agents.
|
||||
|
||||
```python
|
||||
...
|
||||
string_source = StringKnowledgeSource(
|
||||
content="Users name is John. He is 30 years old and lives in San Francisco.",
|
||||
metadata={"preference": "personal"}
|
||||
)
|
||||
crew = Crew(
|
||||
...
|
||||
knowledge={
|
||||
"sources": [string_source],
|
||||
"metadata": {"preference": "personal"},
|
||||
"embedder_config": {"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||
},
|
||||
)
|
||||
```
|
||||
@@ -25,52 +25,150 @@ By default, CrewAI uses the `gpt-4o-mini` model. It uses environment variables i
|
||||
- `OPENAI_API_BASE`
|
||||
- `OPENAI_API_KEY`
|
||||
|
||||
### 2. String Identifier
|
||||
### 2. Updating YAML files
|
||||
|
||||
```python Code
|
||||
agent = Agent(llm="gpt-4o", ...)
|
||||
You can update the `agents.yml` file to refer to the LLM you want to use:
|
||||
|
||||
```yaml Code
|
||||
researcher:
|
||||
role: Research Specialist
|
||||
goal: Conduct comprehensive research and analysis to gather relevant information,
|
||||
synthesize findings, and produce well-documented insights.
|
||||
backstory: A dedicated research professional with years of experience in academic
|
||||
investigation, literature review, and data analysis, known for thorough and
|
||||
methodical approaches to complex research questions.
|
||||
verbose: true
|
||||
llm: openai/gpt-4o
|
||||
# llm: azure/gpt-4o-mini
|
||||
# llm: gemini/gemini-pro
|
||||
# llm: anthropic/claude-3-5-sonnet-20240620
|
||||
# llm: bedrock/anthropic.claude-3-sonnet-20240229-v1:0
|
||||
# llm: mistral/mistral-large-latest
|
||||
# llm: ollama/llama3:70b
|
||||
# llm: groq/llama-3.2-90b-vision-preview
|
||||
# llm: watsonx/meta-llama/llama-3-1-70b-instruct
|
||||
# llm: nvidia_nim/meta/llama3-70b-instruct
|
||||
# llm: sambanova/Meta-Llama-3.1-8B-Instruct
|
||||
# ...
|
||||
```
|
||||
|
||||
### 3. LLM Instance
|
||||
Keep in mind that you will need to set certain ENV vars depending on the model you are
|
||||
using to account for the credentials or set a custom LLM object like described below.
|
||||
Here are some of the required ENV vars for some of the LLM integrations:
|
||||
|
||||
List of [more providers](https://docs.litellm.ai/docs/providers).
|
||||
<AccordionGroup>
|
||||
<Accordion title="OpenAI">
|
||||
```python Code
|
||||
OPENAI_API_KEY=<your-api-key>
|
||||
OPENAI_API_BASE=<optional-custom-base-url>
|
||||
OPENAI_MODEL_NAME=<openai-model-name>
|
||||
OPENAI_ORGANIZATION=<your-org-id> # OPTIONAL
|
||||
OPENAI_API_BASE=<openaiai-api-base> # OPTIONAL
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
<Accordion title="Anthropic">
|
||||
```python Code
|
||||
ANTHROPIC_API_KEY=<your-api-key>
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
llm = LLM(model="gpt-4", temperature=0.7)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
<Accordion title="Google">
|
||||
```python Code
|
||||
GEMINI_API_KEY=<your-api-key>
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
### 4. Custom LLM Objects
|
||||
<Accordion title="Azure">
|
||||
```python Code
|
||||
AZURE_API_KEY=<your-api-key> # "my-azure-api-key"
|
||||
AZURE_API_BASE=<your-resource-url> # "https://example-endpoint.openai.azure.com"
|
||||
AZURE_API_VERSION=<api-version> # "2023-05-15"
|
||||
AZURE_AD_TOKEN=<your-azure-ad-token> # Optional
|
||||
AZURE_API_TYPE=<your-azure-api-type> # Optional
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="AWS Bedrock">
|
||||
```python Code
|
||||
AWS_ACCESS_KEY_ID=<your-access-key>
|
||||
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
||||
AWS_DEFAULT_REGION=<your-region>
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Mistral">
|
||||
```python Code
|
||||
MISTRAL_API_KEY=<your-api-key>
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Groq">
|
||||
```python Code
|
||||
GROQ_API_KEY=<your-api-key>
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="IBM watsonx.ai">
|
||||
```python Code
|
||||
WATSONX_URL=<your-url> # (required) Base URL of your WatsonX instance
|
||||
WATSONX_APIKEY=<your-apikey> # (required) IBM cloud API key
|
||||
WATSONX_TOKEN=<your-token> # (required) IAM auth token (alternative to APIKEY)
|
||||
WATSONX_PROJECT_ID=<your-project-id> # (optional) Project ID of your WatsonX instance
|
||||
WATSONX_DEPLOYMENT_SPACE_ID=<your-space-id> # (optional) ID of deployment space for deployed models
|
||||
```
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
### 3. Custom LLM Objects
|
||||
|
||||
Pass a custom LLM implementation or object from another library.
|
||||
|
||||
See below for examples.
|
||||
|
||||
<Tabs>
|
||||
<Tab title="String Identifier">
|
||||
```python Code
|
||||
agent = Agent(llm="gpt-4o", ...)
|
||||
```
|
||||
</Tab>
|
||||
|
||||
<Tab title="LLM Instance">
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(model="gpt-4", temperature=0.7)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Connecting to OpenAI-Compatible LLMs
|
||||
|
||||
You can connect to OpenAI-compatible LLMs using either environment variables or by setting specific attributes on the LLM class:
|
||||
|
||||
1. Using environment variables:
|
||||
<Tabs>
|
||||
<Tab title="Using Environment Variables">
|
||||
```python Code
|
||||
import os
|
||||
|
||||
```python Code
|
||||
import os
|
||||
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||
os.environ["OPENAI_API_BASE"] = "https://api.your-provider.com/v1"
|
||||
```
|
||||
</Tab>
|
||||
<Tab title="Using LLM Class Attributes">
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||
os.environ["OPENAI_API_BASE"] = "https://api.your-provider.com/v1"
|
||||
```
|
||||
|
||||
2. Using LLM class attributes:
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="custom-model-name",
|
||||
api_key="your-api-key",
|
||||
base_url="https://api.your-provider.com/v1"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
llm = LLM(
|
||||
model="custom-model-name",
|
||||
api_key="your-api-key",
|
||||
base_url="https://api.your-provider.com/v1"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## LLM Configuration Options
|
||||
|
||||
@@ -97,55 +195,180 @@ When configuring an LLM for your agent, you have access to a wide range of param
|
||||
| **api_key** | `str` | Your API key for authentication. |
|
||||
|
||||
|
||||
## OpenAI Example Configuration
|
||||
These are examples of how to configure LLMs for your agent.
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
<AccordionGroup>
|
||||
<Accordion title="OpenAI">
|
||||
|
||||
llm = LLM(
|
||||
model="gpt-4",
|
||||
temperature=0.8,
|
||||
max_tokens=150,
|
||||
top_p=0.9,
|
||||
frequency_penalty=0.1,
|
||||
presence_penalty=0.1,
|
||||
stop=["END"],
|
||||
seed=42,
|
||||
base_url="https://api.openai.com/v1",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
## Cerebras Example Configuration
|
||||
llm = LLM(
|
||||
model="gpt-4",
|
||||
temperature=0.8,
|
||||
max_tokens=150,
|
||||
top_p=0.9,
|
||||
frequency_penalty=0.1,
|
||||
presence_penalty=0.1,
|
||||
stop=["END"],
|
||||
seed=42,
|
||||
base_url="https://api.openai.com/v1",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
<Accordion title="Cerebras">
|
||||
|
||||
llm = LLM(
|
||||
model="cerebras/llama-3.1-70b",
|
||||
base_url="https://api.cerebras.ai/v1",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
## Using Ollama (Local LLMs)
|
||||
llm = LLM(
|
||||
model="cerebras/llama-3.1-70b",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
CrewAI supports using Ollama for running open-source models locally:
|
||||
<Accordion title="Ollama (Local LLMs)">
|
||||
|
||||
1. Install Ollama: [ollama.ai](https://ollama.ai/)
|
||||
2. Run a model: `ollama run llama2`
|
||||
3. Configure agent:
|
||||
CrewAI supports using Ollama for running open-source models locally:
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
1. Install Ollama: [ollama.ai](https://ollama.ai/)
|
||||
2. Run a model: `ollama run llama2`
|
||||
3. Configure agent:
|
||||
|
||||
agent = Agent(
|
||||
llm=LLM(model="ollama/llama3.1", base_url="http://localhost:11434"),
|
||||
...
|
||||
)
|
||||
```
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
agent = Agent(
|
||||
llm=LLM(
|
||||
model="ollama/llama3.1",
|
||||
base_url="http://localhost:11434"
|
||||
),
|
||||
...
|
||||
)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Groq">
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="groq/llama3-8b-8192",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Anthropic">
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="anthropic/claude-3-5-sonnet-20241022",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Fireworks AI">
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Gemini">
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="gemini/gemini-1.5-pro-002",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Perplexity AI (pplx-api)">
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="llama-3.1-sonar-large-128k-online",
|
||||
base_url="https://api.perplexity.ai/",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="IBM watsonx.ai">
|
||||
You can use IBM Watson by seeting the following ENV vars:
|
||||
|
||||
```python Code
|
||||
WATSONX_URL=<your-url>
|
||||
WATSONX_APIKEY=<your-apikey>
|
||||
WATSONX_PROJECT_ID=<your-project-id>
|
||||
```
|
||||
|
||||
You can then define your agents llms by updating the `agents.yml`
|
||||
|
||||
```yaml Code
|
||||
researcher:
|
||||
role: Research Specialist
|
||||
goal: Conduct comprehensive research and analysis to gather relevant information,
|
||||
synthesize findings, and produce well-documented insights.
|
||||
backstory: A dedicated research professional with years of experience in academic
|
||||
investigation, literature review, and data analysis, known for thorough and
|
||||
methodical approaches to complex research questions.
|
||||
verbose: true
|
||||
llm: watsonx/meta-llama/llama-3-1-70b-instruct
|
||||
```
|
||||
|
||||
You can also set up agents more dynamically as a base level LLM instance, like bellow:
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="watsonx/ibm/granite-13b-chat-v2",
|
||||
base_url="https://api.watsonx.ai/v1",
|
||||
api_key="your-api-key-here"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
|
||||
<Accordion title="Hugging Face">
|
||||
|
||||
```python Code
|
||||
from crewai import LLM
|
||||
|
||||
llm = LLM(
|
||||
model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||
api_key="your-api-key-here",
|
||||
base_url="your_api_endpoint"
|
||||
)
|
||||
agent = Agent(llm=llm, ...)
|
||||
```
|
||||
</Accordion>
|
||||
</AccordionGroup>
|
||||
|
||||
## Changing the Base API URL
|
||||
|
||||
@@ -177,4 +400,4 @@ This is particularly useful when working with OpenAI-compatible APIs or when you
|
||||
- **API Errors**: Check your API key, network connection, and rate limits.
|
||||
- **Unexpected Outputs**: Refine your prompts and adjust temperature or top_p.
|
||||
- **Performance Issues**: Consider using a more powerful model or optimizing your queries.
|
||||
- **Timeout Errors**: Increase the `timeout` parameter or optimize your input.
|
||||
- **Timeout Errors**: Increase the `timeout` parameter or optimize your input.
|
||||
|
||||
@@ -18,6 +18,7 @@ reason, and learn from past interactions.
|
||||
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
|
||||
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
|
||||
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
|
||||
| **User Memory** | Stores user-specific information and preferences, enhancing personalization and user experience. |
|
||||
|
||||
## How Memory Systems Empower Agents
|
||||
|
||||
@@ -92,6 +93,47 @@ my_crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
## Integrating Mem0 for Enhanced User Memory
|
||||
|
||||
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
||||
|
||||
To include user-specific memory you can get your API key [here](https://app.mem0.ai/dashboard/api-keys) and refer the [docs](https://docs.mem0.ai/platform/quickstart#4-1-create-memories) for adding user preferences.
|
||||
|
||||
|
||||
```python Code
|
||||
import os
|
||||
from crewai import Crew, Process
|
||||
from mem0 import MemoryClient
|
||||
|
||||
# Set environment variables for Mem0
|
||||
os.environ["MEM0_API_KEY"] = "m0-xx"
|
||||
|
||||
# Step 1: Record preferences based on past conversation or user input
|
||||
client = MemoryClient()
|
||||
messages = [
|
||||
{"role": "user", "content": "Hi there! I'm planning a vacation and could use some advice."},
|
||||
{"role": "assistant", "content": "Hello! I'd be happy to help with your vacation planning. What kind of destination do you prefer?"},
|
||||
{"role": "user", "content": "I am more of a beach person than a mountain person."},
|
||||
{"role": "assistant", "content": "That's interesting. Do you like hotels or Airbnb?"},
|
||||
{"role": "user", "content": "I like Airbnb more."},
|
||||
]
|
||||
client.add(messages, user_id="john")
|
||||
|
||||
# Step 2: Create a Crew with User Memory
|
||||
|
||||
crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
memory_config={
|
||||
"provider": "mem0",
|
||||
"config": {"user_id": "john"},
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
## Additional Embedding Providers
|
||||
|
||||
@@ -254,6 +296,31 @@ my_crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
### Using Watson embeddings
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
# Note: Ensure you have installed and imported `ibm_watsonx_ai` for Watson embeddings to work.
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "watson",
|
||||
"config": {
|
||||
"model": "<model_name>",
|
||||
"api_url": "<api_url>",
|
||||
"api_key": "<YOUR_API_KEY>",
|
||||
"project_id": "<YOUR_PROJECT_ID>",
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Resetting Memory
|
||||
|
||||
```shell
|
||||
|
||||
@@ -5,13 +5,14 @@ icon: screwdriver-wrench
|
||||
---
|
||||
|
||||
## Introduction
|
||||
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers.
|
||||
|
||||
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers.
|
||||
This documentation outlines how to create, integrate, and leverage these tools within the CrewAI framework, including a new focus on collaboration tools.
|
||||
|
||||
## What is a Tool?
|
||||
|
||||
A tool in CrewAI is a skill or function that agents can utilize to perform various actions.
|
||||
This includes tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools),
|
||||
A tool in CrewAI is a skill or function that agents can utilize to perform various actions.
|
||||
This includes tools from the [CrewAI Toolkit](https://github.com/joaomdmoura/crewai-tools) and [LangChain Tools](https://python.langchain.com/docs/integrations/tools),
|
||||
enabling everything from simple searches to complex interactions and effective teamwork among agents.
|
||||
|
||||
## Key Characteristics of Tools
|
||||
@@ -103,57 +104,53 @@ crew.kickoff()
|
||||
|
||||
Here is a list of the available tools and their descriptions:
|
||||
|
||||
| Tool | Description |
|
||||
| :-------------------------- | :-------------------------------------------------------------------------------------------- |
|
||||
| **BrowserbaseLoadTool** | A tool for interacting with and extracting data from web browsers. |
|
||||
| **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents. |
|
||||
| **CodeInterpreterTool** | A tool for interpreting python code. |
|
||||
| **ComposioTool** | Enables use of Composio tools. |
|
||||
| **CSVSearchTool** | A RAG tool designed for searching within CSV files, tailored to handle structured data. |
|
||||
| **DALL-E Tool** | A tool for generating images using the DALL-E API. |
|
||||
| **DirectorySearchTool** | A RAG tool for searching within directories, useful for navigating through file systems. |
|
||||
| **DOCXSearchTool** | A RAG tool aimed at searching within DOCX documents, ideal for processing Word files. |
|
||||
| **DirectoryReadTool** | Facilitates reading and processing of directory structures and their contents. |
|
||||
| **EXASearchTool** | A tool designed for performing exhaustive searches across various data sources. |
|
||||
| **FileReadTool** | Enables reading and extracting data from files, supporting various file formats. |
|
||||
| **FirecrawlSearchTool** | A tool to search webpages using Firecrawl and return the results. |
|
||||
| **FirecrawlCrawlWebsiteTool** | A tool for crawling webpages using Firecrawl. |
|
||||
| **FirecrawlScrapeWebsiteTool** | A tool for scraping webpages URL using Firecrawl and returning its contents. |
|
||||
| **GithubSearchTool** | A RAG tool for searching within GitHub repositories, useful for code and documentation search.|
|
||||
| **SerperDevTool** | A specialized tool for development purposes, with specific functionalities under development. |
|
||||
| **TXTSearchTool** | A RAG tool focused on searching within text (.txt) files, suitable for unstructured data. |
|
||||
| **JSONSearchTool** | A RAG tool designed for searching within JSON files, catering to structured data handling. |
|
||||
| **LlamaIndexTool** | Enables the use of LlamaIndex tools. |
|
||||
| **MDXSearchTool** | A RAG tool tailored for searching within Markdown (MDX) files, useful for documentation. |
|
||||
| **PDFSearchTool** | A RAG tool aimed at searching within PDF documents, ideal for processing scanned documents. |
|
||||
| **PGSearchTool** | A RAG tool optimized for searching within PostgreSQL databases, suitable for database queries. |
|
||||
| **Vision Tool** | A tool for generating images using the DALL-E API. |
|
||||
| **RagTool** | A general-purpose RAG tool capable of handling various data sources and types. |
|
||||
| **ScrapeElementFromWebsiteTool** | Enables scraping specific elements from websites, useful for targeted data extraction. |
|
||||
| **ScrapeWebsiteTool** | Facilitates scraping entire websites, ideal for comprehensive data collection. |
|
||||
| **WebsiteSearchTool** | A RAG tool for searching website content, optimized for web data extraction. |
|
||||
| **XMLSearchTool** | A RAG tool designed for searching within XML files, suitable for structured data formats. |
|
||||
| **YoutubeChannelSearchTool**| A RAG tool for searching within YouTube channels, useful for video content analysis. |
|
||||
| **YoutubeVideoSearchTool** | A RAG tool aimed at searching within YouTube videos, ideal for video data extraction. |
|
||||
| Tool | Description |
|
||||
| :------------------------------- | :--------------------------------------------------------------------------------------------- |
|
||||
| **BrowserbaseLoadTool** | A tool for interacting with and extracting data from web browsers. |
|
||||
| **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents. |
|
||||
| **CodeInterpreterTool** | A tool for interpreting python code. |
|
||||
| **ComposioTool** | Enables use of Composio tools. |
|
||||
| **CSVSearchTool** | A RAG tool designed for searching within CSV files, tailored to handle structured data. |
|
||||
| **DALL-E Tool** | A tool for generating images using the DALL-E API. |
|
||||
| **DirectorySearchTool** | A RAG tool for searching within directories, useful for navigating through file systems. |
|
||||
| **DOCXSearchTool** | A RAG tool aimed at searching within DOCX documents, ideal for processing Word files. |
|
||||
| **DirectoryReadTool** | Facilitates reading and processing of directory structures and their contents. |
|
||||
| **EXASearchTool** | A tool designed for performing exhaustive searches across various data sources. |
|
||||
| **FileReadTool** | Enables reading and extracting data from files, supporting various file formats. |
|
||||
| **FirecrawlSearchTool** | A tool to search webpages using Firecrawl and return the results. |
|
||||
| **FirecrawlCrawlWebsiteTool** | A tool for crawling webpages using Firecrawl. |
|
||||
| **FirecrawlScrapeWebsiteTool** | A tool for scraping webpages URL using Firecrawl and returning its contents. |
|
||||
| **GithubSearchTool** | A RAG tool for searching within GitHub repositories, useful for code and documentation search. |
|
||||
| **SerperDevTool** | A specialized tool for development purposes, with specific functionalities under development. |
|
||||
| **TXTSearchTool** | A RAG tool focused on searching within text (.txt) files, suitable for unstructured data. |
|
||||
| **JSONSearchTool** | A RAG tool designed for searching within JSON files, catering to structured data handling. |
|
||||
| **LlamaIndexTool** | Enables the use of LlamaIndex tools. |
|
||||
| **MDXSearchTool** | A RAG tool tailored for searching within Markdown (MDX) files, useful for documentation. |
|
||||
| **PDFSearchTool** | A RAG tool aimed at searching within PDF documents, ideal for processing scanned documents. |
|
||||
| **PGSearchTool** | A RAG tool optimized for searching within PostgreSQL databases, suitable for database queries. |
|
||||
| **Vision Tool** | A tool for generating images using the DALL-E API. |
|
||||
| **RagTool** | A general-purpose RAG tool capable of handling various data sources and types. |
|
||||
| **ScrapeElementFromWebsiteTool** | Enables scraping specific elements from websites, useful for targeted data extraction. |
|
||||
| **ScrapeWebsiteTool** | Facilitates scraping entire websites, ideal for comprehensive data collection. |
|
||||
| **WebsiteSearchTool** | A RAG tool for searching website content, optimized for web data extraction. |
|
||||
| **XMLSearchTool** | A RAG tool designed for searching within XML files, suitable for structured data formats. |
|
||||
| **YoutubeChannelSearchTool** | A RAG tool for searching within YouTube channels, useful for video content analysis. |
|
||||
| **YoutubeVideoSearchTool** | A RAG tool aimed at searching within YouTube videos, ideal for video data extraction. |
|
||||
|
||||
## Creating your own Tools
|
||||
|
||||
<Tip>
|
||||
Developers can craft `custom tools` tailored for their agent’s needs or utilize pre-built options.
|
||||
Developers can craft `custom tools` tailored for their agent’s needs or
|
||||
utilize pre-built options.
|
||||
</Tip>
|
||||
|
||||
To create your own CrewAI tools you will need to install our extra tools package:
|
||||
|
||||
```bash
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
Once you do that there are two main ways for one to create a CrewAI tool:
|
||||
There are two main ways for one to create a CrewAI tool:
|
||||
|
||||
### Subclassing `BaseTool`
|
||||
|
||||
```python Code
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
@@ -167,7 +164,7 @@ class MyCustomTool(BaseTool):
|
||||
### Utilizing the `tool` Decorator
|
||||
|
||||
```python Code
|
||||
from crewai_tools import tool
|
||||
from crewai.tools import tool
|
||||
@tool("Name of my tool")
|
||||
def my_tool(question: str) -> str:
|
||||
"""Clear description for what this tool is useful for, your agent will need this information to use it."""
|
||||
@@ -178,11 +175,13 @@ def my_tool(question: str) -> str:
|
||||
### Custom Caching Mechanism
|
||||
|
||||
<Tip>
|
||||
Tools can optionally implement a `cache_function` to fine-tune caching behavior. This function determines when to cache results based on specific conditions, offering granular control over caching logic.
|
||||
Tools can optionally implement a `cache_function` to fine-tune caching
|
||||
behavior. This function determines when to cache results based on specific
|
||||
conditions, offering granular control over caching logic.
|
||||
</Tip>
|
||||
|
||||
```python Code
|
||||
from crewai_tools import tool
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool
|
||||
def multiplication_tool(first_number: int, second_number: int) -> str:
|
||||
@@ -208,6 +207,6 @@ writer1 = Agent(
|
||||
|
||||
## Conclusion
|
||||
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
|
||||
When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling,
|
||||
caching mechanisms, and the flexibility of tool arguments to optimize your agents' performance and capabilities.
|
||||
Tools are pivotal in extending the capabilities of CrewAI agents, enabling them to undertake a broad spectrum of tasks and collaborate effectively.
|
||||
When building solutions with CrewAI, leverage both custom and existing tools to empower your agents and enhance the AI ecosystem. Consider utilizing error handling,
|
||||
caching mechanisms, and the flexibility of tool arguments to optimize your agents' performance and capabilities.
|
||||
|
||||
59
docs/how-to/before-and-after-kickoff-hooks.mdx
Normal file
59
docs/how-to/before-and-after-kickoff-hooks.mdx
Normal file
@@ -0,0 +1,59 @@
|
||||
---
|
||||
title: Before and After Kickoff Hooks
|
||||
description: Learn how to use before and after kickoff hooks in CrewAI
|
||||
---
|
||||
|
||||
CrewAI provides hooks that allow you to execute code before and after a crew's kickoff. These hooks are useful for preprocessing inputs or post-processing results.
|
||||
|
||||
## Before Kickoff Hook
|
||||
|
||||
The before kickoff hook is executed before the crew starts its tasks. It receives the input dictionary and can modify it before passing it to the crew. You can use this hook to set up your environment, load necessary data, or preprocess your inputs. This is useful in scenarios where the input data might need enrichment or validation before being processed by the crew.
|
||||
|
||||
Here's an example of defining a before kickoff function in your `crew.py`:
|
||||
|
||||
```python
|
||||
from crewai import CrewBase, before_kickoff
|
||||
|
||||
@CrewBase
|
||||
class MyCrew:
|
||||
@before_kickoff
|
||||
def prepare_data(self, inputs):
|
||||
# Preprocess or modify inputs
|
||||
inputs['processed'] = True
|
||||
return inputs
|
||||
|
||||
#...
|
||||
```
|
||||
|
||||
In this example, the prepare_data function modifies the inputs by adding a new key-value pair indicating that the inputs have been processed.
|
||||
|
||||
## After Kickoff Hook
|
||||
|
||||
The after kickoff hook is executed after the crew has completed its tasks. It receives the result object, which contains the outputs of the crew's execution. This hook is ideal for post-processing results, such as logging, data transformation, or further analysis.
|
||||
|
||||
Here's how you can define an after kickoff function in your `crew.py`:
|
||||
|
||||
```python
|
||||
from crewai import CrewBase, after_kickoff
|
||||
|
||||
@CrewBase
|
||||
class MyCrew:
|
||||
@after_kickoff
|
||||
def log_results(self, result):
|
||||
# Log or modify the results
|
||||
print("Crew execution completed with result:", result)
|
||||
return result
|
||||
|
||||
# ...
|
||||
```
|
||||
|
||||
|
||||
In the `log_results` function, the results of the crew execution are simply printed out. You can extend this to perform more complex operations such as sending notifications or integrating with other services.
|
||||
|
||||
## Utilizing Both Hooks
|
||||
|
||||
Both hooks can be used together to provide a comprehensive setup and teardown process for your crew's execution. They are particularly useful in maintaining clean code architecture by separating concerns and enhancing the modularity of your CrewAI implementations.
|
||||
|
||||
## Conclusion
|
||||
|
||||
Before and after kickoff hooks in CrewAI offer powerful ways to interact with the lifecycle of a crew's execution. By understanding and utilizing these hooks, you can greatly enhance the robustness and flexibility of your AI agents.
|
||||
@@ -6,28 +6,27 @@ icon: hammer
|
||||
|
||||
## Creating and Utilizing Tools in CrewAI
|
||||
|
||||
This guide provides detailed instructions on creating custom tools for the CrewAI framework and how to efficiently manage and utilize these tools,
|
||||
incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools,
|
||||
This guide provides detailed instructions on creating custom tools for the CrewAI framework and how to efficiently manage and utilize these tools,
|
||||
incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools,
|
||||
enabling agents to perform a wide range of actions.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before creating your own tools, ensure you have the crewAI extra tools package installed:
|
||||
|
||||
```bash
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
### Subclassing `BaseTool`
|
||||
|
||||
To create a personalized tool, inherit from `BaseTool` and define the necessary attributes and the `_run` method.
|
||||
To create a personalized tool, inherit from `BaseTool` and define the necessary attributes, including the `args_schema` for input validation, and the `_run` method.
|
||||
|
||||
```python Code
|
||||
from crewai_tools import BaseTool
|
||||
from typing import Type
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
class MyToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = "What this tool does. It's vital for effective utilization."
|
||||
args_schema: Type[BaseModel] = MyToolInput
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Your tool's logic here
|
||||
@@ -40,7 +39,7 @@ Alternatively, you can use the tool decorator `@tool`. This approach allows you
|
||||
offering a concise and efficient way to create specialized tools tailored to your needs.
|
||||
|
||||
```python Code
|
||||
from crewai_tools import tool
|
||||
from crewai.tools import tool
|
||||
|
||||
@tool("Tool Name")
|
||||
def my_simple_tool(question: str) -> str:
|
||||
@@ -66,5 +65,5 @@ def my_cache_strategy(arguments: dict, result: str) -> bool:
|
||||
cached_tool.cache_function = my_cache_strategy
|
||||
```
|
||||
|
||||
By adhering to these guidelines and incorporating new functionalities and collaboration tools into your tool creation and management processes,
|
||||
By adhering to these guidelines and incorporating new functionalities and collaboration tools into your tool creation and management processes,
|
||||
you can leverage the full capabilities of the CrewAI framework, enhancing both the development experience and the efficiency of your AI agents.
|
||||
|
||||
@@ -8,7 +8,7 @@ icon: rocket
|
||||
|
||||
Let's create a simple crew that will help us `research` and `report` on the `latest AI developments` for a given topic or subject.
|
||||
|
||||
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
|
||||
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
|
||||
If you haven't installed them yet, you can do so by following the [installation guide](/installation).
|
||||
|
||||
Follow the steps below to get crewing! 🚣♂️
|
||||
@@ -23,7 +23,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="Modify your `agents.yaml` file">
|
||||
<Step title="Modify your `agents.yaml` file">
|
||||
<Tip>
|
||||
You can also modify the agents as needed to fit your use case or copy and paste as is to your project.
|
||||
Any variable interpolated in your `agents.yaml` and `tasks.yaml` files like `{topic}` will be replaced by the value of the variable in the `main.py` file.
|
||||
@@ -39,7 +39,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
You're a seasoned researcher with a knack for uncovering the latest
|
||||
developments in {topic}. Known for your ability to find the most relevant
|
||||
information and present it in a clear and concise manner.
|
||||
|
||||
|
||||
reporting_analyst:
|
||||
role: >
|
||||
{topic} Reporting Analyst
|
||||
@@ -51,7 +51,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
it easy for others to understand and act on the information you provide.
|
||||
```
|
||||
</Step>
|
||||
<Step title="Modify your `tasks.yaml` file">
|
||||
<Step title="Modify your `tasks.yaml` file">
|
||||
```yaml tasks.yaml
|
||||
# src/latest_ai_development/config/tasks.yaml
|
||||
research_task:
|
||||
@@ -73,8 +73,8 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
agent: reporting_analyst
|
||||
output_file: report.md
|
||||
```
|
||||
</Step>
|
||||
<Step title="Modify your `crew.py` file">
|
||||
</Step>
|
||||
<Step title="Modify your `crew.py` file">
|
||||
```python crew.py
|
||||
# src/latest_ai_development/crew.py
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
@@ -121,10 +121,34 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
)
|
||||
)
|
||||
```
|
||||
</Step>
|
||||
<Step title="Feel free to pass custom inputs to your crew">
|
||||
<Step title="[Optional] Add before and after crew functions">
|
||||
```python crew.py
|
||||
# src/latest_ai_development/crew.py
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff
|
||||
from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class LatestAiDevelopmentCrew():
|
||||
"""LatestAiDevelopment crew"""
|
||||
|
||||
@before_kickoff
|
||||
def before_kickoff_function(self, inputs):
|
||||
print(f"Before kickoff function with inputs: {inputs}")
|
||||
return inputs # You can return the inputs or modify them as needed
|
||||
|
||||
@after_kickoff
|
||||
def after_kickoff_function(self, result):
|
||||
print(f"After kickoff function with result: {result}")
|
||||
return result # You can return the result or modify it as needed
|
||||
|
||||
# ... remaining code
|
||||
```
|
||||
</Step>
|
||||
<Step title="Feel free to pass custom inputs to your crew">
|
||||
For example, you can pass the `topic` input to your crew to customize the research and reporting.
|
||||
```python main.py
|
||||
#!/usr/bin/env python
|
||||
@@ -237,14 +261,14 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
### Note on Consistency in Naming
|
||||
|
||||
The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code.
|
||||
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
|
||||
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
|
||||
This naming consistency allows CrewAI to automatically link your configurations with your code; otherwise, your task won't recognize the reference properly.
|
||||
|
||||
#### Example References
|
||||
|
||||
<Tip>
|
||||
Note how we use the same name for the agent in the `agents.yaml` (`email_summarizer`) file as the method name in the `crew.py` (`email_summarizer`) file.
|
||||
</Tip>
|
||||
</Tip>
|
||||
|
||||
```yaml agents.yaml
|
||||
email_summarizer:
|
||||
@@ -281,6 +305,8 @@ Use the annotations to properly reference the agent and task in the `crew.py` fi
|
||||
* `@task`
|
||||
* `@crew`
|
||||
* `@tool`
|
||||
* `@before_kickoff`
|
||||
* `@after_kickoff`
|
||||
* `@callback`
|
||||
* `@output_json`
|
||||
* `@output_pydantic`
|
||||
@@ -304,7 +330,7 @@ def email_summarizer_task(self) -> Task:
|
||||
|
||||
<Tip>
|
||||
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
|
||||
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
|
||||
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
|
||||
You can learn more about the core concepts [here](/concepts).
|
||||
</Tip>
|
||||
|
||||
@@ -330,4 +356,4 @@ This will clear the crew's memory, allowing for a fresh start.
|
||||
|
||||
## Deploying Your Project
|
||||
|
||||
The easiest way to deploy your crew is through [CrewAI Enterprise](https://www.crewai.com/crewaiplus), where you can deploy your crew in a few clicks.
|
||||
The easiest way to deploy your crew is through [CrewAI Enterprise](http://app.crewai.com/), where you can deploy your crew in a few clicks.
|
||||
|
||||
@@ -34,6 +34,7 @@ from crewai_tools import GithubSearchTool
|
||||
# Initialize the tool for semantic searches within a specific GitHub repository
|
||||
tool = GithubSearchTool(
|
||||
github_repo='https://github.com/example/repo',
|
||||
gh_token='your_github_personal_access_token',
|
||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||
)
|
||||
|
||||
@@ -41,6 +42,7 @@ tool = GithubSearchTool(
|
||||
|
||||
# Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution
|
||||
tool = GithubSearchTool(
|
||||
gh_token='your_github_personal_access_token',
|
||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||
)
|
||||
```
|
||||
@@ -48,6 +50,7 @@ tool = GithubSearchTool(
|
||||
## Arguments
|
||||
|
||||
- `github_repo` : The URL of the GitHub repository where the search will be conducted. This is a mandatory field and specifies the target repository for your search.
|
||||
- `gh_token` : Your GitHub Personal Access Token (PAT) required for authentication. You can create one in your GitHub account settings under Developer Settings > Personal Access Tokens.
|
||||
- `content_types` : Specifies the types of content to include in your search. You must provide a list of content types from the following options: `code` for searching within the code,
|
||||
`repo` for searching within the repository's general information, `pr` for searching within pull requests, and `issue` for searching within issues.
|
||||
This field is mandatory and allows tailoring the search to specific content types within the GitHub repository.
|
||||
@@ -77,5 +80,4 @@ tool = GithubSearchTool(
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
```
|
||||
)
|
||||
6
poetry.lock
generated
6
poetry.lock
generated
@@ -1597,12 +1597,12 @@ files = [
|
||||
google-auth = ">=2.14.1,<3.0.dev0"
|
||||
googleapis-common-protos = ">=1.56.2,<2.0.dev0"
|
||||
grpcio = [
|
||||
{version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
]
|
||||
grpcio-status = [
|
||||
{version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
|
||||
{version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||
]
|
||||
proto-plus = ">=1.22.3,<2.0.0dev"
|
||||
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0"
|
||||
@@ -4286,8 +4286,8 @@ files = [
|
||||
|
||||
[package.dependencies]
|
||||
numpy = [
|
||||
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
|
||||
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
|
||||
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
|
||||
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
|
||||
]
|
||||
python-dateutil = ">=2.8.2"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "crewai"
|
||||
version = "0.76.2"
|
||||
version = "0.83.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
@@ -16,7 +16,7 @@ dependencies = [
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
||||
"instructor>=1.3.3",
|
||||
"regex>=2024.9.11",
|
||||
"crewai-tools>=0.13.2",
|
||||
"crewai-tools>=0.14.0",
|
||||
"click>=8.1.7",
|
||||
"python-dotenv>=1.0.0",
|
||||
"appdirs>=1.4.4",
|
||||
@@ -27,7 +27,10 @@ dependencies = [
|
||||
"pyvis>=0.3.2",
|
||||
"uv>=0.4.25",
|
||||
"tomli-w>=1.1.0",
|
||||
"chromadb>=0.4.24",
|
||||
"tomli>=2.0.2",
|
||||
"chromadb>=0.5.18",
|
||||
"pdfplumber>=0.11.4",
|
||||
"openpyxl>=3.1.5",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -36,8 +39,19 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools>=0.13.2"]
|
||||
tools = ["crewai-tools>=0.14.0"]
|
||||
agentops = ["agentops>=0.3.0"]
|
||||
fastembed = ["fastembed>=0.4.1"]
|
||||
pdfplumber = [
|
||||
"pdfplumber>=0.11.4",
|
||||
]
|
||||
pandas = [
|
||||
"pandas>=2.2.3",
|
||||
]
|
||||
openpyxl = [
|
||||
"openpyxl>=3.1.5",
|
||||
]
|
||||
mem0 = ["mem0ai>=0.1.29"]
|
||||
|
||||
[tool.uv]
|
||||
dev-dependencies = [
|
||||
@@ -51,7 +65,7 @@ dev-dependencies = [
|
||||
"mkdocs-material-extensions>=1.3.1",
|
||||
"pillow>=10.2.0",
|
||||
"cairosvg>=2.7.1",
|
||||
"crewai-tools>=0.13.2",
|
||||
"crewai-tools>=0.14.0",
|
||||
"pytest>=8.0.0",
|
||||
"pytest-vcr>=1.0.2",
|
||||
"python-dotenv>=1.0.0",
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
import warnings
|
||||
|
||||
from crewai.agent import Agent
|
||||
from crewai.crew import Crew
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.llm import LLM
|
||||
from crewai.pipeline import Pipeline
|
||||
from crewai.process import Process
|
||||
@@ -14,5 +16,15 @@ warnings.filterwarnings(
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
__version__ = "0.76.2"
|
||||
__all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router", "LLM", "Flow"]
|
||||
__version__ = "0.83.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
"Process",
|
||||
"Task",
|
||||
"Pipeline",
|
||||
"Router",
|
||||
"LLM",
|
||||
"Flow",
|
||||
"Knowledge",
|
||||
]
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
from inspect import signature
|
||||
from typing import Any, List, Literal, Optional, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
@@ -9,9 +8,11 @@ from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
from crewai.llm import LLM
|
||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
@@ -51,6 +52,7 @@ class Agent(BaseAgent):
|
||||
role: The role of the agent.
|
||||
goal: The objective of the agent.
|
||||
backstory: The backstory of the agent.
|
||||
knowledge: The knowledge base of the agent.
|
||||
config: Dict representation of agent configuration.
|
||||
llm: The language model that will run the agent.
|
||||
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
||||
@@ -122,6 +124,11 @@ class Agent(BaseAgent):
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
self.agent_ops_agent_name = self.role
|
||||
unnacepted_attributes = [
|
||||
"AWS_ACCESS_KEY_ID",
|
||||
"AWS_SECRET_ACCESS_KEY",
|
||||
"AWS_REGION_NAME",
|
||||
]
|
||||
|
||||
# Handle different cases for self.llm
|
||||
if isinstance(self.llm, str):
|
||||
@@ -131,8 +138,12 @@ class Agent(BaseAgent):
|
||||
# If it's already an LLM instance, keep it as is
|
||||
pass
|
||||
elif self.llm is None:
|
||||
# If it's None, use environment variables or default
|
||||
model_name = os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini")
|
||||
# Determine the model name from environment variables or use default
|
||||
model_name = (
|
||||
os.environ.get("OPENAI_MODEL_NAME")
|
||||
or os.environ.get("MODEL")
|
||||
or "gpt-4o-mini"
|
||||
)
|
||||
llm_params = {"model": model_name}
|
||||
|
||||
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get(
|
||||
@@ -141,9 +152,44 @@ class Agent(BaseAgent):
|
||||
if api_base:
|
||||
llm_params["base_url"] = api_base
|
||||
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
if api_key:
|
||||
llm_params["api_key"] = api_key
|
||||
set_provider = model_name.split("/")[0] if "/" in model_name else "openai"
|
||||
|
||||
# Iterate over all environment variables to find matching API keys or use defaults
|
||||
for provider, env_vars in ENV_VARS.items():
|
||||
if provider == set_provider:
|
||||
for env_var in env_vars:
|
||||
if env_var["key_name"] in unnacepted_attributes:
|
||||
continue
|
||||
# Check if the environment variable is set
|
||||
if "key_name" in env_var:
|
||||
env_value = os.environ.get(env_var["key_name"])
|
||||
if env_value:
|
||||
# Map key names containing "API_KEY" to "api_key"
|
||||
key_name = (
|
||||
"api_key"
|
||||
if "API_KEY" in env_var["key_name"]
|
||||
else env_var["key_name"]
|
||||
)
|
||||
# Map key names containing "API_BASE" to "api_base"
|
||||
key_name = (
|
||||
"api_base"
|
||||
if "API_BASE" in env_var["key_name"]
|
||||
else key_name
|
||||
)
|
||||
# Map key names containing "API_VERSION" to "api_version"
|
||||
key_name = (
|
||||
"api_version"
|
||||
if "API_VERSION" in env_var["key_name"]
|
||||
else key_name
|
||||
)
|
||||
llm_params[key_name] = env_value
|
||||
# Check for default values if the environment variable is not set
|
||||
elif env_var.get("default", False):
|
||||
for key, value in env_var.items():
|
||||
if key not in ["prompt", "key_name", "default"]:
|
||||
# Only add default if the key is already set in os.environ
|
||||
if key in os.environ:
|
||||
llm_params[key] = value
|
||||
|
||||
self.llm = LLM(**llm_params)
|
||||
else:
|
||||
@@ -193,7 +239,7 @@ class Agent(BaseAgent):
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
"""Execute a task with the agent.
|
||||
|
||||
@@ -217,14 +263,28 @@ class Agent(BaseAgent):
|
||||
|
||||
if self.crew and self.crew.memory:
|
||||
contextual_memory = ContextualMemory(
|
||||
self.crew.memory_config,
|
||||
self.crew._short_term_memory,
|
||||
self.crew._long_term_memory,
|
||||
self.crew._entity_memory,
|
||||
self.crew._user_memory,
|
||||
)
|
||||
memory = contextual_memory.build_context_for_task(task, context)
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
# Integrate the knowledge base
|
||||
if self.crew and self.crew.knowledge:
|
||||
knowledge_snippets = self.crew.knowledge.query([task.prompt()])
|
||||
valid_snippets = [
|
||||
result["context"]
|
||||
for result in knowledge_snippets
|
||||
if result and result.get("context")
|
||||
]
|
||||
if valid_snippets:
|
||||
formatted_knowledge = "\n".join(valid_snippets)
|
||||
task_prompt += f"\n\nAdditional Information:\n{formatted_knowledge}"
|
||||
|
||||
tools = tools or self.tools or []
|
||||
self.create_agent_executor(tools=tools, task=task)
|
||||
|
||||
@@ -260,7 +320,9 @@ class Agent(BaseAgent):
|
||||
|
||||
return result
|
||||
|
||||
def create_agent_executor(self, tools=None, task=None) -> None:
|
||||
def create_agent_executor(
|
||||
self, tools: Optional[List[BaseTool]] = None, task=None
|
||||
) -> None:
|
||||
"""Create an agent executor for the agent.
|
||||
|
||||
Returns:
|
||||
@@ -333,7 +395,7 @@ class Agent(BaseAgent):
|
||||
tools_list = []
|
||||
try:
|
||||
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
|
||||
from crewai_tools import BaseTool as CrewAITool
|
||||
from crewai.tools import BaseTool as CrewAITool
|
||||
|
||||
for tool in tools:
|
||||
if isinstance(tool, CrewAITool):
|
||||
@@ -392,30 +454,20 @@ class Agent(BaseAgent):
|
||||
|
||||
return description
|
||||
|
||||
def _render_text_description_and_args(self, tools: List[Any]) -> str:
|
||||
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
|
||||
"""Render the tool name, description, and args in plain text.
|
||||
|
||||
Output will be in the format of:
|
||||
Output will be in the format of:
|
||||
|
||||
.. code-block:: markdown
|
||||
.. code-block:: markdown
|
||||
|
||||
search: This tool is used for search, args: {"query": {"type": "string"}}
|
||||
calculator: This tool is used for math, \
|
||||
args: {"expression": {"type": "string"}}
|
||||
args: {"expression": {"type": "string"}}
|
||||
"""
|
||||
tool_strings = []
|
||||
for tool in tools:
|
||||
args_schema = str(tool.model_fields)
|
||||
if hasattr(tool, "func") and tool.func:
|
||||
sig = signature(tool.func)
|
||||
description = (
|
||||
f"Tool Name: {tool.name}{sig}\nTool Description: {tool.description}"
|
||||
)
|
||||
else:
|
||||
description = (
|
||||
f"Tool Name: {tool.name}\nTool Description: {tool.description}"
|
||||
)
|
||||
tool_strings.append(f"{description}\nTool Arguments: {args_schema}")
|
||||
tool_strings.append(tool.description)
|
||||
|
||||
return "\n".join(tool_strings)
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ from pydantic_core import PydanticCustomError
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
|
||||
@@ -49,11 +50,11 @@ class BaseAgent(ABC, BaseModel):
|
||||
|
||||
|
||||
Methods:
|
||||
execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> str:
|
||||
execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[BaseTool]] = None) -> str:
|
||||
Abstract method to execute a task.
|
||||
create_agent_executor(tools=None) -> None:
|
||||
Abstract method to create an agent executor.
|
||||
_parse_tools(tools: List[Any]) -> List[Any]:
|
||||
_parse_tools(tools: List[BaseTool]) -> List[Any]:
|
||||
Abstract method to parse tools.
|
||||
get_delegation_tools(agents: List["BaseAgent"]):
|
||||
Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew.
|
||||
@@ -105,7 +106,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[Any]] = Field(
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: Optional[int] = Field(
|
||||
@@ -188,7 +189,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
self,
|
||||
task: Any,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@@ -197,11 +198,11 @@ class BaseAgent(ABC, BaseModel):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _parse_tools(self, tools: List[Any]) -> List[Any]:
|
||||
def _parse_tools(self, tools: List[BaseTool]) -> List[BaseTool]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[Any]:
|
||||
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
|
||||
"""Set the task tools that init BaseAgenTools class."""
|
||||
pass
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ from crewai.types.usage_metrics import UsageMetrics
|
||||
class TokenProcess:
|
||||
total_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
cached_prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
successful_requests: int = 0
|
||||
|
||||
@@ -15,6 +16,9 @@ class TokenProcess:
|
||||
self.completion_tokens = self.completion_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
|
||||
def sum_cached_prompt_tokens(self, tokens: int):
|
||||
self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
|
||||
|
||||
def sum_successful_requests(self, requests: int):
|
||||
self.successful_requests = self.successful_requests + requests
|
||||
|
||||
@@ -22,6 +26,7 @@ class TokenProcess:
|
||||
return UsageMetrics(
|
||||
total_tokens=self.total_tokens,
|
||||
prompt_tokens=self.prompt_tokens,
|
||||
cached_prompt_tokens=self.cached_prompt_tokens,
|
||||
completion_tokens=self.completion_tokens,
|
||||
successful_requests=self.successful_requests,
|
||||
)
|
||||
|
||||
@@ -117,6 +117,15 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
|
||||
if answer is None or answer == "":
|
||||
self._printer.print(
|
||||
content="Received None or empty response from LLM call.",
|
||||
color="red",
|
||||
)
|
||||
raise ValueError(
|
||||
"Invalid response from LLM call - None or empty."
|
||||
)
|
||||
|
||||
if not self.use_stop_words:
|
||||
try:
|
||||
self._format_answer(answer)
|
||||
@@ -136,25 +145,26 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
formatted_answer.result = action_result
|
||||
self._show_logs(formatted_answer)
|
||||
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
if self.step_callback:
|
||||
self.step_callback(formatted_answer)
|
||||
|
||||
if self._should_force_answer():
|
||||
if self.have_forced_answer:
|
||||
return AgentFinish(
|
||||
output=self._i18n.errors(
|
||||
"force_final_answer_error"
|
||||
).format(formatted_answer.text),
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
else:
|
||||
formatted_answer.text += (
|
||||
f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
self.have_forced_answer = True
|
||||
self.messages.append(
|
||||
self._format_msg(formatted_answer.text, role="assistant")
|
||||
)
|
||||
if self._should_force_answer():
|
||||
if self.have_forced_answer:
|
||||
return AgentFinish(
|
||||
thought="",
|
||||
output=self._i18n.errors(
|
||||
"force_final_answer_error"
|
||||
).format(formatted_answer.text),
|
||||
text=formatted_answer.text,
|
||||
)
|
||||
else:
|
||||
formatted_answer.text += (
|
||||
f'\n{self._i18n.errors("force_final_answer")}'
|
||||
)
|
||||
self.have_forced_answer = True
|
||||
self.messages.append(
|
||||
self._format_msg(formatted_answer.text, role="assistant")
|
||||
)
|
||||
|
||||
except OutputParserException as e:
|
||||
self.messages.append({"role": "user", "content": e.error})
|
||||
@@ -323,9 +333,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||
train_iteration = self.crew._train_iteration
|
||||
if agent_id in training_data and isinstance(train_iteration, int):
|
||||
training_data[agent_id][train_iteration][
|
||||
"improved_output"
|
||||
] = result.output
|
||||
training_data[agent_id][train_iteration]["improved_output"] = (
|
||||
result.output
|
||||
)
|
||||
training_handler.save(training_data)
|
||||
else:
|
||||
self._logger.log(
|
||||
@@ -376,4 +386,5 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return CrewAgentParser(agent=self.agent).parse(answer)
|
||||
|
||||
def _format_msg(self, prompt: str, role: str = "user") -> Dict[str, str]:
|
||||
prompt = prompt.rstrip()
|
||||
return {"role": role, "content": prompt}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from ..tools.cache_tools import CacheTools
|
||||
from ..tools.cache_tools.cache_tools import CacheTools
|
||||
from ..tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from .cache.cache_handler import CacheHandler
|
||||
|
||||
|
||||
70
src/crewai/cli/add_crew_to_flow.py
Normal file
70
src/crewai/cli/add_crew_to_flow.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.utils import copy_template
|
||||
|
||||
|
||||
def add_crew_to_flow(crew_name: str) -> None:
|
||||
"""Add a new crew to the current flow."""
|
||||
# Check if pyproject.toml exists in the current directory
|
||||
if not Path("pyproject.toml").exists():
|
||||
print("This command must be run from the root of a flow project.")
|
||||
raise click.ClickException(
|
||||
"This command must be run from the root of a flow project."
|
||||
)
|
||||
|
||||
# Determine the flow folder based on the current directory
|
||||
flow_folder = Path.cwd()
|
||||
crews_folder = flow_folder / "src" / flow_folder.name / "crews"
|
||||
|
||||
if not crews_folder.exists():
|
||||
print("Crews folder does not exist in the current flow.")
|
||||
raise click.ClickException("Crews folder does not exist in the current flow.")
|
||||
|
||||
# Create the crew within the flow's crews directory
|
||||
create_embedded_crew(crew_name, parent_folder=crews_folder)
|
||||
|
||||
click.echo(
|
||||
f"Crew {crew_name} added to the current flow successfully!",
|
||||
)
|
||||
|
||||
|
||||
def create_embedded_crew(crew_name: str, parent_folder: Path) -> None:
|
||||
"""Create a new crew within an existing flow project."""
|
||||
folder_name = crew_name.replace(" ", "_").replace("-", "_").lower()
|
||||
class_name = crew_name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||
|
||||
crew_folder = parent_folder / folder_name
|
||||
|
||||
if crew_folder.exists():
|
||||
if not click.confirm(
|
||||
f"Crew {folder_name} already exists. Do you want to override it?"
|
||||
):
|
||||
click.secho("Operation cancelled.", fg="yellow")
|
||||
return
|
||||
click.secho(f"Overriding crew {folder_name}...", fg="green", bold=True)
|
||||
else:
|
||||
click.secho(f"Creating crew {folder_name}...", fg="green", bold=True)
|
||||
crew_folder.mkdir(parents=True)
|
||||
|
||||
# Create config and crew.py files
|
||||
config_folder = crew_folder / "config"
|
||||
config_folder.mkdir(exist_ok=True)
|
||||
|
||||
templates_dir = Path(__file__).parent / "templates" / "crew"
|
||||
config_template_files = ["agents.yaml", "tasks.yaml"]
|
||||
crew_template_file = f"{folder_name}.py" # Updated file name
|
||||
|
||||
for file_name in config_template_files:
|
||||
src_file = templates_dir / "config" / file_name
|
||||
dst_file = config_folder / file_name
|
||||
copy_template(src_file, dst_file, crew_name, class_name, folder_name)
|
||||
|
||||
src_file = templates_dir / "crew.py"
|
||||
dst_file = crew_folder / crew_template_file
|
||||
copy_template(src_file, dst_file, crew_name, class_name, folder_name)
|
||||
|
||||
click.secho(
|
||||
f"Crew {crew_name} added to the flow successfully!", fg="green", bold=True
|
||||
)
|
||||
@@ -34,7 +34,9 @@ class AuthenticationCommand:
|
||||
"scope": "openid",
|
||||
"audience": AUTH0_AUDIENCE,
|
||||
}
|
||||
response = requests.post(url=self.DEVICE_CODE_URL, data=device_code_payload)
|
||||
response = requests.post(
|
||||
url=self.DEVICE_CODE_URL, data=device_code_payload, timeout=20
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
@@ -54,7 +56,7 @@ class AuthenticationCommand:
|
||||
|
||||
attempts = 0
|
||||
while True and attempts < 5:
|
||||
response = requests.post(self.TOKEN_URL, data=token_payload)
|
||||
response = requests.post(self.TOKEN_URL, data=token_payload, timeout=30)
|
||||
token_data = response.json()
|
||||
|
||||
if response.status_code == 200:
|
||||
|
||||
@@ -3,6 +3,7 @@ from typing import Optional
|
||||
import click
|
||||
import pkg_resources
|
||||
|
||||
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
||||
from crewai.cli.create_crew import create_crew
|
||||
from crewai.cli.create_flow import create_flow
|
||||
from crewai.cli.create_pipeline import create_pipeline
|
||||
@@ -135,6 +136,7 @@ def log_tasks_outputs() -> None:
|
||||
@click.option("-l", "--long", is_flag=True, help="Reset LONG TERM memory")
|
||||
@click.option("-s", "--short", is_flag=True, help="Reset SHORT TERM memory")
|
||||
@click.option("-e", "--entities", is_flag=True, help="Reset ENTITIES memory")
|
||||
@click.option("-kn", "--knowledge", is_flag=True, help="Reset KNOWLEDGE storage")
|
||||
@click.option(
|
||||
"-k",
|
||||
"--kickoff-outputs",
|
||||
@@ -142,17 +144,24 @@ def log_tasks_outputs() -> None:
|
||||
help="Reset LATEST KICKOFF TASK OUTPUTS",
|
||||
)
|
||||
@click.option("-a", "--all", is_flag=True, help="Reset ALL memories")
|
||||
def reset_memories(long, short, entities, kickoff_outputs, all):
|
||||
def reset_memories(
|
||||
long: bool,
|
||||
short: bool,
|
||||
entities: bool,
|
||||
knowledge: bool,
|
||||
kickoff_outputs: bool,
|
||||
all: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_ouputs). This will delete all the data saved.
|
||||
"""
|
||||
try:
|
||||
if not all and not (long or short or entities or kickoff_outputs):
|
||||
if not all and not (long or short or entities or knowledge or kickoff_outputs):
|
||||
click.echo(
|
||||
"Please specify at least one memory type to reset using the appropriate flags."
|
||||
)
|
||||
return
|
||||
reset_memories_command(long, short, entities, kickoff_outputs, all)
|
||||
reset_memories_command(long, short, entities, knowledge, kickoff_outputs, all)
|
||||
except Exception as e:
|
||||
click.echo(f"An error occurred while resetting memories: {e}", err=True)
|
||||
|
||||
@@ -178,10 +187,16 @@ def test(n_iterations: int, model: str):
|
||||
evaluate_crew(n_iterations, model)
|
||||
|
||||
|
||||
@crewai.command()
|
||||
def install():
|
||||
@crewai.command(
|
||||
context_settings=dict(
|
||||
ignore_unknown_options=True,
|
||||
allow_extra_args=True,
|
||||
)
|
||||
)
|
||||
@click.pass_context
|
||||
def install(context):
|
||||
"""Install the Crew."""
|
||||
install_crew()
|
||||
install_crew(context.args)
|
||||
|
||||
|
||||
@crewai.command()
|
||||
@@ -320,5 +335,13 @@ def flow_plot():
|
||||
plot_flow()
|
||||
|
||||
|
||||
@flow.command(name="add-crew")
|
||||
@click.argument("crew_name")
|
||||
def flow_add_crew(crew_name):
|
||||
"""Add a crew to an existing flow."""
|
||||
click.echo(f"Adding crew {crew_name} to the flow")
|
||||
add_crew_to_flow(crew_name)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
crewai()
|
||||
|
||||
38
src/crewai/cli/config.py
Normal file
38
src/crewai/cli/config.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import json
|
||||
from pathlib import Path
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional
|
||||
|
||||
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "crewai" / "settings.json"
|
||||
|
||||
class Settings(BaseModel):
|
||||
tool_repository_username: Optional[str] = Field(None, description="Username for interacting with the Tool Repository")
|
||||
tool_repository_password: Optional[str] = Field(None, description="Password for interacting with the Tool Repository")
|
||||
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, exclude=True)
|
||||
|
||||
def __init__(self, config_path: Path = DEFAULT_CONFIG_PATH, **data):
|
||||
"""Load Settings from config path"""
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
file_data = {}
|
||||
if config_path.is_file():
|
||||
try:
|
||||
with config_path.open("r") as f:
|
||||
file_data = json.load(f)
|
||||
except json.JSONDecodeError:
|
||||
file_data = {}
|
||||
|
||||
merged_data = {**file_data, **data}
|
||||
super().__init__(config_path=config_path, **merged_data)
|
||||
|
||||
def dump(self) -> None:
|
||||
"""Save current settings to settings.json"""
|
||||
if self.config_path.is_file():
|
||||
with self.config_path.open("r") as f:
|
||||
existing_data = json.load(f)
|
||||
else:
|
||||
existing_data = {}
|
||||
|
||||
updated_data = {**existing_data, **self.model_dump(exclude_unset=True)}
|
||||
with self.config_path.open("w") as f:
|
||||
json.dump(updated_data, f, indent=4)
|
||||
@@ -1,19 +1,161 @@
|
||||
ENV_VARS = {
|
||||
'openai': ['OPENAI_API_KEY'],
|
||||
'anthropic': ['ANTHROPIC_API_KEY'],
|
||||
'gemini': ['GEMINI_API_KEY'],
|
||||
'groq': ['GROQ_API_KEY'],
|
||||
'ollama': ['FAKE_KEY'],
|
||||
"openai": [
|
||||
{
|
||||
"prompt": "Enter your OPENAI API key (press Enter to skip)",
|
||||
"key_name": "OPENAI_API_KEY",
|
||||
}
|
||||
],
|
||||
"anthropic": [
|
||||
{
|
||||
"prompt": "Enter your ANTHROPIC API key (press Enter to skip)",
|
||||
"key_name": "ANTHROPIC_API_KEY",
|
||||
}
|
||||
],
|
||||
"gemini": [
|
||||
{
|
||||
"prompt": "Enter your GEMINI API key (press Enter to skip)",
|
||||
"key_name": "GEMINI_API_KEY",
|
||||
}
|
||||
],
|
||||
"groq": [
|
||||
{
|
||||
"prompt": "Enter your GROQ API key (press Enter to skip)",
|
||||
"key_name": "GROQ_API_KEY",
|
||||
}
|
||||
],
|
||||
"watson": [
|
||||
{
|
||||
"prompt": "Enter your WATSONX URL (press Enter to skip)",
|
||||
"key_name": "WATSONX_URL",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your WATSONX API Key (press Enter to skip)",
|
||||
"key_name": "WATSONX_APIKEY",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your WATSONX Project Id (press Enter to skip)",
|
||||
"key_name": "WATSONX_PROJECT_ID",
|
||||
},
|
||||
],
|
||||
"ollama": [
|
||||
{
|
||||
"default": True,
|
||||
"API_BASE": "http://localhost:11434",
|
||||
}
|
||||
],
|
||||
"bedrock": [
|
||||
{
|
||||
"prompt": "Enter your AWS Access Key ID (press Enter to skip)",
|
||||
"key_name": "AWS_ACCESS_KEY_ID",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AWS Secret Access Key (press Enter to skip)",
|
||||
"key_name": "AWS_SECRET_ACCESS_KEY",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AWS Region Name (press Enter to skip)",
|
||||
"key_name": "AWS_REGION_NAME",
|
||||
},
|
||||
],
|
||||
"azure": [
|
||||
{
|
||||
"prompt": "Enter your Azure deployment name (must start with 'azure/')",
|
||||
"key_name": "model",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AZURE API key (press Enter to skip)",
|
||||
"key_name": "AZURE_API_KEY",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AZURE API base URL (press Enter to skip)",
|
||||
"key_name": "AZURE_API_BASE",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your AZURE API version (press Enter to skip)",
|
||||
"key_name": "AZURE_API_VERSION",
|
||||
},
|
||||
],
|
||||
"cerebras": [
|
||||
{
|
||||
"prompt": "Enter your Cerebras model name (must start with 'cerebras/')",
|
||||
"key_name": "model",
|
||||
},
|
||||
{
|
||||
"prompt": "Enter your Cerebras API version (press Enter to skip)",
|
||||
"key_name": "CEREBRAS_API_KEY",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
PROVIDERS = ['openai', 'anthropic', 'gemini', 'groq', 'ollama']
|
||||
|
||||
PROVIDERS = [
|
||||
"openai",
|
||||
"anthropic",
|
||||
"gemini",
|
||||
"groq",
|
||||
"ollama",
|
||||
"watson",
|
||||
"bedrock",
|
||||
"azure",
|
||||
"cerebras",
|
||||
]
|
||||
|
||||
MODELS = {
|
||||
'openai': ['gpt-4', 'gpt-4o', 'gpt-4o-mini', 'o1-mini', 'o1-preview'],
|
||||
'anthropic': ['claude-3-5-sonnet-20240620', 'claude-3-sonnet-20240229', 'claude-3-opus-20240229', 'claude-3-haiku-20240307'],
|
||||
'gemini': ['gemini-1.5-flash', 'gemini-1.5-pro', 'gemini-gemma-2-9b-it', 'gemini-gemma-2-27b-it'],
|
||||
'groq': ['llama-3.1-8b-instant', 'llama-3.1-70b-versatile', 'llama-3.1-405b-reasoning', 'gemma2-9b-it', 'gemma-7b-it'],
|
||||
'ollama': ['llama3.1', 'mixtral'],
|
||||
"openai": ["gpt-4", "gpt-4o", "gpt-4o-mini", "o1-mini", "o1-preview"],
|
||||
"anthropic": [
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"claude-3-haiku-20240307",
|
||||
],
|
||||
"gemini": [
|
||||
"gemini/gemini-1.5-flash",
|
||||
"gemini/gemini-1.5-pro",
|
||||
"gemini/gemini-gemma-2-9b-it",
|
||||
"gemini/gemini-gemma-2-27b-it",
|
||||
],
|
||||
"groq": [
|
||||
"groq/llama-3.1-8b-instant",
|
||||
"groq/llama-3.1-70b-versatile",
|
||||
"groq/llama-3.1-405b-reasoning",
|
||||
"groq/gemma2-9b-it",
|
||||
"groq/gemma-7b-it",
|
||||
],
|
||||
"ollama": ["ollama/llama3.1", "ollama/mixtral"],
|
||||
"watson": [
|
||||
"watsonx/meta-llama/llama-3-1-70b-instruct",
|
||||
"watsonx/meta-llama/llama-3-1-8b-instruct",
|
||||
"watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
||||
"watsonx/meta-llama/llama-3-2-1b-instruct",
|
||||
"watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
||||
"watsonx/meta-llama/llama-3-405b-instruct",
|
||||
"watsonx/mistral/mistral-large",
|
||||
"watsonx/ibm/granite-3-8b-instruct",
|
||||
],
|
||||
"bedrock": [
|
||||
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
||||
"bedrock/anthropic.claude-v2:1",
|
||||
"bedrock/anthropic.claude-v2",
|
||||
"bedrock/anthropic.claude-instant-v1",
|
||||
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
||||
"bedrock/meta.llama3-1-70b-instruct-v1:0",
|
||||
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
||||
"bedrock/meta.llama3-70b-instruct-v1:0",
|
||||
"bedrock/meta.llama3-8b-instruct-v1:0",
|
||||
"bedrock/amazon.titan-text-lite-v1",
|
||||
"bedrock/amazon.titan-text-express-v1",
|
||||
"bedrock/cohere.command-text-v14",
|
||||
"bedrock/ai21.j2-mid-v1",
|
||||
"bedrock/ai21.j2-ultra-v1",
|
||||
"bedrock/ai21.jamba-instruct-v1:0",
|
||||
"bedrock/meta.llama2-13b-chat-v1",
|
||||
"bedrock/meta.llama2-70b-chat-v1",
|
||||
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
||||
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
||||
],
|
||||
}
|
||||
|
||||
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
||||
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
from crewai.cli.constants import ENV_VARS, MODELS
|
||||
from crewai.cli.provider import (
|
||||
PROVIDERS,
|
||||
get_provider_data,
|
||||
select_model,
|
||||
select_provider,
|
||||
@@ -29,20 +29,20 @@ def create_folder_structure(name, parent_folder=None):
|
||||
click.secho("Operation cancelled.", fg="yellow")
|
||||
sys.exit(0)
|
||||
click.secho(f"Overriding folder {folder_name}...", fg="green", bold=True)
|
||||
else:
|
||||
click.secho(
|
||||
f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
|
||||
fg="green",
|
||||
bold=True,
|
||||
)
|
||||
shutil.rmtree(folder_path) # Delete the existing folder and its contents
|
||||
|
||||
if not folder_path.exists():
|
||||
folder_path.mkdir(parents=True)
|
||||
(folder_path / "tests").mkdir(exist_ok=True)
|
||||
if not parent_folder:
|
||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
||||
click.secho(
|
||||
f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
|
||||
fg="green",
|
||||
bold=True,
|
||||
)
|
||||
|
||||
folder_path.mkdir(parents=True)
|
||||
(folder_path / "tests").mkdir(exist_ok=True)
|
||||
if not parent_folder:
|
||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
||||
|
||||
return folder_path, folder_name, class_name
|
||||
|
||||
@@ -92,7 +92,10 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||
|
||||
existing_provider = None
|
||||
for provider, env_keys in ENV_VARS.items():
|
||||
if any(key in env_vars for key in env_keys):
|
||||
if any(
|
||||
"key_name" in details and details["key_name"] in env_vars
|
||||
for details in env_keys
|
||||
):
|
||||
existing_provider = provider
|
||||
break
|
||||
|
||||
@@ -118,47 +121,48 @@ def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||
"No provider selected. Please try again or press 'q' to exit.", fg="red"
|
||||
)
|
||||
|
||||
while True:
|
||||
selected_model = select_model(selected_provider, provider_models)
|
||||
if selected_model is None: # User typed 'q'
|
||||
click.secho("Exiting...", fg="yellow")
|
||||
sys.exit(0)
|
||||
if selected_model: # Valid selection
|
||||
break
|
||||
click.secho(
|
||||
"No model selected. Please try again or press 'q' to exit.", fg="red"
|
||||
)
|
||||
# Check if the selected provider has predefined models
|
||||
if selected_provider in MODELS and MODELS[selected_provider]:
|
||||
while True:
|
||||
selected_model = select_model(selected_provider, provider_models)
|
||||
if selected_model is None: # User typed 'q'
|
||||
click.secho("Exiting...", fg="yellow")
|
||||
sys.exit(0)
|
||||
if selected_model: # Valid selection
|
||||
break
|
||||
click.secho(
|
||||
"No model selected. Please try again or press 'q' to exit.",
|
||||
fg="red",
|
||||
)
|
||||
env_vars["MODEL"] = selected_model
|
||||
|
||||
if selected_provider in PROVIDERS:
|
||||
api_key_var = ENV_VARS[selected_provider][0]
|
||||
else:
|
||||
api_key_var = click.prompt(
|
||||
f"Enter the environment variable name for your {selected_provider.capitalize()} API key",
|
||||
type=str,
|
||||
default="",
|
||||
)
|
||||
# Check if the selected provider requires API keys
|
||||
if selected_provider in ENV_VARS:
|
||||
provider_env_vars = ENV_VARS[selected_provider]
|
||||
for details in provider_env_vars:
|
||||
if details.get("default", False):
|
||||
# Automatically add default key-value pairs
|
||||
for key, value in details.items():
|
||||
if key not in ["prompt", "key_name", "default"]:
|
||||
env_vars[key] = value
|
||||
elif "key_name" in details:
|
||||
# Prompt for non-default key-value pairs
|
||||
prompt = details["prompt"]
|
||||
key_name = details["key_name"]
|
||||
api_key_value = click.prompt(prompt, default="", show_default=False)
|
||||
|
||||
api_key_value = ""
|
||||
click.echo(
|
||||
f"Enter your {selected_provider.capitalize()} API key (press Enter to skip): ",
|
||||
nl=False,
|
||||
)
|
||||
try:
|
||||
api_key_value = input()
|
||||
except (KeyboardInterrupt, EOFError):
|
||||
api_key_value = ""
|
||||
if api_key_value.strip():
|
||||
env_vars[key_name] = api_key_value
|
||||
|
||||
if api_key_value.strip():
|
||||
env_vars = {api_key_var: api_key_value}
|
||||
if env_vars:
|
||||
write_env_file(folder_path, env_vars)
|
||||
click.secho("API key saved to .env file", fg="green")
|
||||
click.secho("API keys and model saved to .env file", fg="green")
|
||||
else:
|
||||
click.secho(
|
||||
"No API key provided. Skipping .env file creation.", fg="yellow"
|
||||
"No API keys provided. Skipping .env file creation.", fg="yellow"
|
||||
)
|
||||
|
||||
env_vars["MODEL"] = selected_model
|
||||
click.secho(f"Selected model: {selected_model}", fg="green")
|
||||
click.secho(f"Selected model: {env_vars.get('MODEL', 'N/A')}", fg="green")
|
||||
|
||||
package_dir = Path(__file__).parent
|
||||
templates_dir = package_dir / "templates" / "crew"
|
||||
|
||||
@@ -3,12 +3,13 @@ import subprocess
|
||||
import click
|
||||
|
||||
|
||||
def install_crew() -> None:
|
||||
def install_crew(proxy_options: list[str]) -> None:
|
||||
"""
|
||||
Install the crew by running the UV command to lock and install.
|
||||
"""
|
||||
try:
|
||||
subprocess.run(["uv", "sync"], check=True, capture_output=False, text=True)
|
||||
command = ["uv", "sync"] + proxy_options
|
||||
subprocess.run(command, check=True, capture_output=False, text=True)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||
|
||||
@@ -7,7 +7,7 @@ def plot_flow() -> None:
|
||||
"""
|
||||
Plot the flow by running a command in the UV environment.
|
||||
"""
|
||||
command = ["uv", "run", "plot_flow"]
|
||||
command = ["uv", "run", "plot"]
|
||||
|
||||
try:
|
||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
@@ -164,7 +164,7 @@ def fetch_provider_data(cache_file):
|
||||
- dict or None: The fetched provider data or None if the operation fails.
|
||||
"""
|
||||
try:
|
||||
response = requests.get(JSON_URL, stream=True, timeout=10)
|
||||
response = requests.get(JSON_URL, stream=True, timeout=60)
|
||||
response.raise_for_status()
|
||||
data = download_data(response)
|
||||
with open(cache_file, "w") as f:
|
||||
|
||||
@@ -5,9 +5,17 @@ from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
|
||||
|
||||
def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
def reset_memories_command(
|
||||
long,
|
||||
short,
|
||||
entity,
|
||||
knowledge,
|
||||
kickoff_outputs,
|
||||
all,
|
||||
) -> None:
|
||||
"""
|
||||
Reset the crew memories.
|
||||
|
||||
@@ -17,6 +25,7 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
entity (bool): Whether to reset the entity memory.
|
||||
kickoff_outputs (bool): Whether to reset the latest kickoff task outputs.
|
||||
all (bool): Whether to reset all memories.
|
||||
knowledge (bool): Whether to reset the knowledge.
|
||||
"""
|
||||
|
||||
try:
|
||||
@@ -25,6 +34,7 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
EntityMemory().reset()
|
||||
LongTermMemory().reset()
|
||||
TaskOutputStorageHandler().reset()
|
||||
KnowledgeStorage().reset()
|
||||
click.echo("All memories have been reset.")
|
||||
else:
|
||||
if long:
|
||||
@@ -40,6 +50,9 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
||||
if kickoff_outputs:
|
||||
TaskOutputStorageHandler().reset()
|
||||
click.echo("Latest Kickoff outputs stored has been reset.")
|
||||
if knowledge:
|
||||
KnowledgeStorage().reset()
|
||||
click.echo("Knowledge has been reset.")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
import subprocess
|
||||
|
||||
import click
|
||||
import tomllib
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import get_crewai_version
|
||||
from crewai.cli.utils import get_crewai_version, read_toml
|
||||
|
||||
|
||||
def run_crew() -> None:
|
||||
@@ -15,10 +14,9 @@ def run_crew() -> None:
|
||||
crewai_version = get_crewai_version()
|
||||
min_required_version = "0.71.0"
|
||||
|
||||
with open("pyproject.toml", "rb") as f:
|
||||
data = tomllib.load(f)
|
||||
pyproject_data = read_toml()
|
||||
|
||||
if data.get("tool", {}).get("poetry") and (
|
||||
if pyproject_data.get("tool", {}).get("poetry") and (
|
||||
version.parse(crewai_version) < version.parse(min_required_version)
|
||||
):
|
||||
click.secho(
|
||||
@@ -26,7 +24,6 @@ def run_crew() -> None:
|
||||
f"Please run `crewai update` to update your pyproject.toml to use uv.",
|
||||
fg="red",
|
||||
)
|
||||
print()
|
||||
|
||||
try:
|
||||
subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
@@ -35,10 +32,7 @@ def run_crew() -> None:
|
||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||
click.echo(e.output, err=True, nl=True)
|
||||
|
||||
with open("pyproject.toml", "rb") as f:
|
||||
data = tomllib.load(f)
|
||||
|
||||
if data.get("tool", {}).get("poetry"):
|
||||
if pyproject_data.get("tool", {}).get("poetry"):
|
||||
click.secho(
|
||||
"It's possible that you are using an old version of crewAI that uses poetry, please run `crewai update` to update your pyproject.toml to use uv.",
|
||||
fg="yellow",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
from crewai.project import CrewBase, agent, crew, task, before_kickoff, after_kickoff
|
||||
|
||||
# Uncomment the following line to use an example of a custom tool
|
||||
# from {{folder_name}}.tools.custom_tool import MyCustomTool
|
||||
@@ -8,9 +8,24 @@ from crewai.project import CrewBase, agent, crew, task
|
||||
# from crewai_tools import SerperDevTool
|
||||
|
||||
@CrewBase
|
||||
class {{crew_name}}Crew():
|
||||
class {{crew_name}}():
|
||||
"""{{crew_name}} crew"""
|
||||
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
@before_kickoff # Optional hook to be executed before the crew starts
|
||||
def pull_data_example(self, inputs):
|
||||
# Example of pulling data from an external API, dynamically changing the inputs
|
||||
inputs['extra_data'] = "This is extra data"
|
||||
return inputs
|
||||
|
||||
@after_kickoff # Optional hook to be executed after the crew has finished
|
||||
def log_results(self, output):
|
||||
# Example of logging results, dynamically changing the output
|
||||
print(f"Results: {output}")
|
||||
return output
|
||||
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
@@ -48,4 +63,4 @@ class {{crew_name}}Crew():
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
# process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
from {{folder_name}}.crew import {{crew_name}}Crew
|
||||
import warnings
|
||||
|
||||
from {{folder_name}}.crew import {{crew_name}}
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
|
||||
# This main file is intended to be a way for you to run your
|
||||
# crew locally, so refrain from adding unnecessary logic into this file.
|
||||
@@ -14,7 +18,7 @@ def run():
|
||||
inputs = {
|
||||
'topic': 'AI LLMs'
|
||||
}
|
||||
{{crew_name}}Crew().crew().kickoff(inputs=inputs)
|
||||
{{crew_name}}().crew().kickoff(inputs=inputs)
|
||||
|
||||
|
||||
def train():
|
||||
@@ -25,7 +29,7 @@ def train():
|
||||
"topic": "AI LLMs"
|
||||
}
|
||||
try:
|
||||
{{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
|
||||
{{crew_name}}().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while training the crew: {e}")
|
||||
@@ -35,7 +39,7 @@ def replay():
|
||||
Replay the crew execution from a specific task.
|
||||
"""
|
||||
try:
|
||||
{{crew_name}}Crew().crew().replay(task_id=sys.argv[1])
|
||||
{{crew_name}}().crew().replay(task_id=sys.argv[1])
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while replaying the crew: {e}")
|
||||
@@ -48,7 +52,7 @@ def test():
|
||||
"topic": "AI LLMs"
|
||||
}
|
||||
try:
|
||||
{{crew_name}}Crew().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||
|
||||
except Exception as e:
|
||||
raise Exception(f"An error occurred while replaying the crew: {e}")
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.76.2,<1.0.0"
|
||||
"crewai[tools]>=0.83.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.tools import BaseTool
|
||||
from typing import Type
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class MyCustomToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.76.2,<1.0.0",
|
||||
"crewai[tools]>=0.83.0,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,4 +1,13 @@
|
||||
from crewai_tools import BaseTool
|
||||
from typing import Type
|
||||
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class MyCustomToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
@@ -6,6 +15,7 @@ class MyCustomTool(BaseTool):
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
|
||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.10,<=3.13"
|
||||
crewai = { extras = ["tools"], version = ">=0.76.2,<1.0.0" }
|
||||
crewai = { extras = ["tools"], version = ">=0.83.0,<1.0.0" }
|
||||
asyncio = "*"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
from crewai_tools import BaseTool
|
||||
from typing import Type
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class MyCustomToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = ["Your Name <you@example.com>"]
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.76.2,<1.0.0"
|
||||
"crewai[tools]>=0.83.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
from crewai_tools import BaseTool
|
||||
from typing import Type
|
||||
from crewai.tools import BaseTool
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class MyCustomToolInput(BaseModel):
|
||||
"""Input schema for MyCustomTool."""
|
||||
argument: str = Field(..., description="Description of the argument.")
|
||||
|
||||
class MyCustomTool(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
description: str = (
|
||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||
)
|
||||
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||
|
||||
def _run(self, argument: str) -> str:
|
||||
# Implementation goes here
|
||||
|
||||
@@ -5,6 +5,6 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<=3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.76.2"
|
||||
"crewai[tools]>=0.83.0"
|
||||
]
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from crewai_tools import BaseTool
|
||||
from crewai.tools import BaseTool
|
||||
|
||||
|
||||
class {{class_name}}(BaseTool):
|
||||
name: str = "Name of my tool"
|
||||
|
||||
@@ -1,17 +1,15 @@
|
||||
import base64
|
||||
import os
|
||||
import platform
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from netrc import netrc
|
||||
import stat
|
||||
|
||||
import click
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli import git
|
||||
from crewai.cli.command import BaseCommand, PlusAPIMixin
|
||||
from crewai.cli.config import Settings
|
||||
from crewai.cli.utils import (
|
||||
get_project_description,
|
||||
get_project_name,
|
||||
@@ -153,26 +151,16 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
raise SystemExit
|
||||
|
||||
login_response_json = login_response.json()
|
||||
self._set_netrc_credentials(login_response_json["credential"])
|
||||
|
||||
settings = Settings()
|
||||
settings.tool_repository_username = login_response_json["credential"]["username"]
|
||||
settings.tool_repository_password = login_response_json["credential"]["password"]
|
||||
settings.dump()
|
||||
|
||||
console.print(
|
||||
"Successfully authenticated to the tool repository.", style="bold green"
|
||||
)
|
||||
|
||||
def _set_netrc_credentials(self, credentials, netrc_path=None):
|
||||
if not netrc_path:
|
||||
netrc_filename = "_netrc" if platform.system() == "Windows" else ".netrc"
|
||||
netrc_path = Path.home() / netrc_filename
|
||||
netrc_path.touch(mode=stat.S_IRUSR | stat.S_IWUSR, exist_ok=True)
|
||||
|
||||
netrc_instance = netrc(file=netrc_path)
|
||||
netrc_instance.hosts["app.crewai.com"] = (credentials["username"], "", credentials["password"])
|
||||
|
||||
with open(netrc_path, 'w') as file:
|
||||
file.write(str(netrc_instance))
|
||||
|
||||
console.print(f"Added credentials to {netrc_path}", style="bold green")
|
||||
|
||||
def _add_package(self, tool_details):
|
||||
tool_handle = tool_details["handle"]
|
||||
repository_handle = tool_details["repository"]["handle"]
|
||||
@@ -187,7 +175,11 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
tool_handle,
|
||||
]
|
||||
add_package_result = subprocess.run(
|
||||
add_package_command, capture_output=False, text=True, check=True
|
||||
add_package_command,
|
||||
capture_output=False,
|
||||
env=self._build_env_with_credentials(repository_handle),
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
|
||||
if add_package_result.stderr:
|
||||
@@ -206,3 +198,13 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
||||
"[bold yellow]Tip:[/bold yellow] Navigate to a different directory and try again."
|
||||
)
|
||||
raise SystemExit
|
||||
|
||||
def _build_env_with_credentials(self, repository_handle: str):
|
||||
repository_handle = repository_handle.upper().replace("-", "_")
|
||||
settings = Settings()
|
||||
|
||||
env = os.environ.copy()
|
||||
env[f"UV_INDEX_{repository_handle}_USERNAME"] = str(settings.tool_repository_username or "")
|
||||
env[f"UV_INDEX_{repository_handle}_PASSWORD"] = str(settings.tool_repository_password or "")
|
||||
|
||||
return env
|
||||
|
||||
@@ -2,7 +2,8 @@ import os
|
||||
import shutil
|
||||
|
||||
import tomli_w
|
||||
import tomllib
|
||||
|
||||
from crewai.cli.utils import read_toml
|
||||
|
||||
|
||||
def update_crew() -> None:
|
||||
@@ -18,10 +19,9 @@ def migrate_pyproject(input_file, output_file):
|
||||
And it will be used to migrate the pyproject.toml to the new format when uv is used.
|
||||
When the time comes that uv supports the new format, this function will be deprecated.
|
||||
"""
|
||||
|
||||
poetry_data = {}
|
||||
# Read the input pyproject.toml
|
||||
with open(input_file, "rb") as f:
|
||||
pyproject = tomllib.load(f)
|
||||
pyproject_data = read_toml()
|
||||
|
||||
# Initialize the new project structure
|
||||
new_pyproject = {
|
||||
@@ -30,30 +30,30 @@ def migrate_pyproject(input_file, output_file):
|
||||
}
|
||||
|
||||
# Migrate project metadata
|
||||
if "tool" in pyproject and "poetry" in pyproject["tool"]:
|
||||
poetry = pyproject["tool"]["poetry"]
|
||||
new_pyproject["project"]["name"] = poetry.get("name")
|
||||
new_pyproject["project"]["version"] = poetry.get("version")
|
||||
new_pyproject["project"]["description"] = poetry.get("description")
|
||||
if "tool" in pyproject_data and "poetry" in pyproject_data["tool"]:
|
||||
poetry_data = pyproject_data["tool"]["poetry"]
|
||||
new_pyproject["project"]["name"] = poetry_data.get("name")
|
||||
new_pyproject["project"]["version"] = poetry_data.get("version")
|
||||
new_pyproject["project"]["description"] = poetry_data.get("description")
|
||||
new_pyproject["project"]["authors"] = [
|
||||
{
|
||||
"name": author.split("<")[0].strip(),
|
||||
"email": author.split("<")[1].strip(">").strip(),
|
||||
}
|
||||
for author in poetry.get("authors", [])
|
||||
for author in poetry_data.get("authors", [])
|
||||
]
|
||||
new_pyproject["project"]["requires-python"] = poetry.get("python")
|
||||
new_pyproject["project"]["requires-python"] = poetry_data.get("python")
|
||||
else:
|
||||
# If it's already in the new format, just copy the project section
|
||||
new_pyproject["project"] = pyproject.get("project", {})
|
||||
new_pyproject["project"] = pyproject_data.get("project", {})
|
||||
|
||||
# Migrate or copy dependencies
|
||||
if "dependencies" in new_pyproject["project"]:
|
||||
# If dependencies are already in the new format, keep them as is
|
||||
pass
|
||||
elif "dependencies" in poetry:
|
||||
elif poetry_data and "dependencies" in poetry_data:
|
||||
new_pyproject["project"]["dependencies"] = []
|
||||
for dep, version in poetry["dependencies"].items():
|
||||
for dep, version in poetry_data["dependencies"].items():
|
||||
if isinstance(version, dict): # Handle extras
|
||||
extras = ",".join(version.get("extras", []))
|
||||
new_dep = f"{dep}[{extras}]"
|
||||
@@ -67,10 +67,10 @@ def migrate_pyproject(input_file, output_file):
|
||||
new_pyproject["project"]["dependencies"].append(new_dep)
|
||||
|
||||
# Migrate or copy scripts
|
||||
if "scripts" in poetry:
|
||||
new_pyproject["project"]["scripts"] = poetry["scripts"]
|
||||
elif "scripts" in pyproject.get("project", {}):
|
||||
new_pyproject["project"]["scripts"] = pyproject["project"]["scripts"]
|
||||
if poetry_data and "scripts" in poetry_data:
|
||||
new_pyproject["project"]["scripts"] = poetry_data["scripts"]
|
||||
elif pyproject_data.get("project", {}) and "scripts" in pyproject_data["project"]:
|
||||
new_pyproject["project"]["scripts"] = pyproject_data["project"]["scripts"]
|
||||
else:
|
||||
new_pyproject["project"]["scripts"] = {}
|
||||
|
||||
@@ -87,8 +87,8 @@ def migrate_pyproject(input_file, output_file):
|
||||
new_pyproject["project"]["scripts"]["run_crew"] = f"{module_name}.main:run"
|
||||
|
||||
# Migrate optional dependencies
|
||||
if "extras" in poetry:
|
||||
new_pyproject["project"]["optional-dependencies"] = poetry["extras"]
|
||||
if poetry_data and "extras" in poetry_data:
|
||||
new_pyproject["project"]["optional-dependencies"] = poetry_data["extras"]
|
||||
|
||||
# Backup the old pyproject.toml
|
||||
backup_file = "pyproject-old.toml"
|
||||
|
||||
@@ -6,6 +6,7 @@ from functools import reduce
|
||||
from typing import Any, Dict, List
|
||||
|
||||
import click
|
||||
import tomli
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.authentication.utils import TokenManager
|
||||
@@ -54,6 +55,13 @@ def simple_toml_parser(content):
|
||||
return result
|
||||
|
||||
|
||||
def read_toml(file_path: str = "pyproject.toml"):
|
||||
"""Read the content of a TOML file and return it as a dictionary."""
|
||||
with open(file_path, "rb") as f:
|
||||
toml_dict = tomli.load(f)
|
||||
return toml_dict
|
||||
|
||||
|
||||
def parse_toml(content):
|
||||
if sys.version_info >= (3, 11):
|
||||
return tomllib.loads(content)
|
||||
|
||||
@@ -5,7 +5,7 @@ import uuid
|
||||
import warnings
|
||||
from concurrent.futures import Future
|
||||
from hashlib import md5
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
@@ -27,17 +27,17 @@ from crewai.llm import LLM
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.memory.user.user_memory import UserMemory
|
||||
from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools import AgentTools
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import (
|
||||
TRAINING_DATA_FILE,
|
||||
)
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.formatter import (
|
||||
@@ -71,6 +71,7 @@ class Crew(BaseModel):
|
||||
manager_llm: The language model that will run manager agent.
|
||||
manager_agent: Custom agent that will be used as manager.
|
||||
memory: Whether the crew should use memory to store memories of it's execution.
|
||||
memory_config: Configuration for the memory to be used for the crew.
|
||||
cache: Whether the crew should use a cache to store the results of the tools execution.
|
||||
function_calling_llm: The language model that will run the tool calling for all the agents.
|
||||
process: The process flow that the crew will follow (e.g., sequential, hierarchical).
|
||||
@@ -94,6 +95,7 @@ class Crew(BaseModel):
|
||||
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
|
||||
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
|
||||
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
|
||||
_user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr()
|
||||
_train: Optional[bool] = PrivateAttr(default=False)
|
||||
_train_iteration: Optional[int] = PrivateAttr()
|
||||
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||
@@ -114,6 +116,10 @@ class Crew(BaseModel):
|
||||
default=False,
|
||||
description="Whether the crew should use memory to store memories of it's execution",
|
||||
)
|
||||
memory_config: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Configuration for the memory to be used for the crew.",
|
||||
)
|
||||
short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(
|
||||
default=None,
|
||||
description="An Instance of the ShortTermMemory to be used by the Crew",
|
||||
@@ -126,7 +132,11 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="An Instance of the EntityMemory to be used by the Crew",
|
||||
)
|
||||
embedder: Optional[Any] = Field(
|
||||
user_memory: Optional[InstanceOf[UserMemory]] = Field(
|
||||
default=None,
|
||||
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
|
||||
)
|
||||
embedder: Optional[dict] = Field(
|
||||
default=None,
|
||||
description="Configuration for the embedder to be used for the crew.",
|
||||
)
|
||||
@@ -154,6 +164,16 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="Callback to be executed after each task for all agents execution.",
|
||||
)
|
||||
before_kickoff_callbacks: List[
|
||||
Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]
|
||||
] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed before crew kickoff. It may be used to adjust inputs before the crew is executed.",
|
||||
)
|
||||
after_kickoff_callbacks: List[Callable[[CrewOutput], CrewOutput]] = Field(
|
||||
default_factory=list,
|
||||
description="List of callbacks to be executed after crew kickoff. It may be used to adjust the output of the crew.",
|
||||
)
|
||||
max_rpm: Optional[int] = Field(
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the crew execution to be respected.",
|
||||
@@ -182,6 +202,10 @@ class Crew(BaseModel):
|
||||
default=[],
|
||||
description="List of execution logs for tasks",
|
||||
)
|
||||
knowledge: Optional[Dict[str, Any]] = Field(
|
||||
default=None, description="Knowledge for the crew. Add knowledge sources to the knowledge object."
|
||||
)
|
||||
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
@@ -238,13 +262,31 @@ class Crew(BaseModel):
|
||||
self._short_term_memory = (
|
||||
self.short_term_memory
|
||||
if self.short_term_memory
|
||||
else ShortTermMemory(crew=self, embedder_config=self.embedder)
|
||||
else ShortTermMemory(
|
||||
crew=self,
|
||||
embedder_config=self.embedder,
|
||||
)
|
||||
)
|
||||
self._entity_memory = (
|
||||
self.entity_memory
|
||||
if self.entity_memory
|
||||
else EntityMemory(crew=self, embedder_config=self.embedder)
|
||||
)
|
||||
if hasattr(self, "memory_config") and self.memory_config is not None:
|
||||
self._user_memory = (
|
||||
self.user_memory if self.user_memory else UserMemory(crew=self)
|
||||
)
|
||||
else:
|
||||
self._user_memory = None
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def create_crew_knowledge(self) -> "Crew":
|
||||
if self.knowledge:
|
||||
try:
|
||||
self.knowledge = Knowledge(**self.knowledge) if isinstance(self.knowledge, dict) else self.knowledge
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Invalid knowledge configuration: {str(e)}")
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -445,18 +487,22 @@ class Crew(BaseModel):
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
|
||||
for agent in train_crew.agents:
|
||||
result = TaskEvaluator(agent).evaluate_training_data(
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
if training_data.get(str(agent.id)):
|
||||
result = TaskEvaluator(agent).evaluate_training_data(
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> CrewOutput:
|
||||
for before_callback in self.before_kickoff_callbacks:
|
||||
inputs = before_callback(inputs)
|
||||
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
||||
self._task_output_handler.reset()
|
||||
@@ -499,6 +545,9 @@ class Crew(BaseModel):
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
|
||||
for after_callback in self.after_kickoff_callbacks:
|
||||
result = after_callback(result)
|
||||
|
||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||
|
||||
self.usage_metrics = UsageMetrics()
|
||||
|
||||
@@ -1,10 +1,20 @@
|
||||
# flow.py
|
||||
|
||||
import asyncio
|
||||
import inspect
|
||||
from typing import Any, Callable, Dict, Generic, List, Set, Type, TypeVar, Union
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Generic,
|
||||
List,
|
||||
Optional,
|
||||
Set,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, ValidationError
|
||||
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
from crewai.flow.utils import get_possible_return_constants
|
||||
@@ -120,6 +130,7 @@ class FlowMeta(type):
|
||||
methods = attr_value.__trigger_methods__
|
||||
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
||||
listeners[attr_name] = (condition_type, methods)
|
||||
|
||||
elif hasattr(attr_value, "__is_router__"):
|
||||
routers[attr_value.__router_for__] = attr_name
|
||||
possible_returns = get_possible_return_constants(attr_value)
|
||||
@@ -159,7 +170,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
def __init__(self) -> None:
|
||||
self._methods: Dict[str, Callable] = {}
|
||||
self._state: T = self._create_initial_state()
|
||||
self._completed_methods: Set[str] = set()
|
||||
self._method_execution_counts: Dict[str, int] = {}
|
||||
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||
|
||||
@@ -190,10 +201,74 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
"""Returns the list of all outputs from executed methods."""
|
||||
return self._method_outputs
|
||||
|
||||
def kickoff(self) -> Any:
|
||||
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
||||
"""
|
||||
Initializes or updates the state with the provided inputs.
|
||||
|
||||
Args:
|
||||
inputs: Dictionary of inputs to initialize or update the state.
|
||||
|
||||
Raises:
|
||||
ValueError: If inputs do not match the structured state model.
|
||||
TypeError: If state is neither a BaseModel instance nor a dictionary.
|
||||
"""
|
||||
if isinstance(self._state, BaseModel):
|
||||
# Structured state management
|
||||
try:
|
||||
# Define a function to create the dynamic class
|
||||
def create_model_with_extra_forbid(
|
||||
base_model: Type[BaseModel],
|
||||
) -> Type[BaseModel]:
|
||||
class ModelWithExtraForbid(base_model): # type: ignore
|
||||
model_config = base_model.model_config.copy()
|
||||
model_config["extra"] = "forbid"
|
||||
|
||||
return ModelWithExtraForbid
|
||||
|
||||
# Create the dynamic class
|
||||
ModelWithExtraForbid = create_model_with_extra_forbid(
|
||||
self._state.__class__
|
||||
)
|
||||
|
||||
# Create a new instance using the combined state and inputs
|
||||
self._state = cast(
|
||||
T, ModelWithExtraForbid(**{**self._state.model_dump(), **inputs})
|
||||
)
|
||||
|
||||
except ValidationError as e:
|
||||
raise ValueError(f"Invalid inputs for structured state: {e}") from e
|
||||
elif isinstance(self._state, dict):
|
||||
# Unstructured state management
|
||||
self._state.update(inputs)
|
||||
else:
|
||||
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
||||
|
||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
"""
|
||||
Starts the execution of the flow synchronously.
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary of inputs to initialize or update the state.
|
||||
|
||||
Returns:
|
||||
The final output from the flow execution.
|
||||
"""
|
||||
if inputs is not None:
|
||||
self._initialize_state(inputs)
|
||||
return asyncio.run(self.kickoff_async())
|
||||
|
||||
async def kickoff_async(self) -> Any:
|
||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
"""
|
||||
Starts the execution of the flow asynchronously.
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary of inputs to initialize or update the state.
|
||||
|
||||
Returns:
|
||||
The final output from the flow execution.
|
||||
"""
|
||||
if inputs is not None:
|
||||
self._initialize_state(inputs)
|
||||
if not self._start_methods:
|
||||
raise ValueError("No start method defined")
|
||||
|
||||
@@ -216,17 +291,27 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
else:
|
||||
return None # Or raise an exception if no methods were executed
|
||||
|
||||
async def _execute_start_method(self, start_method: str) -> None:
|
||||
result = await self._execute_method(self._methods[start_method])
|
||||
await self._execute_listeners(start_method, result)
|
||||
async def _execute_start_method(self, start_method_name: str) -> None:
|
||||
result = await self._execute_method(
|
||||
start_method_name, self._methods[start_method_name]
|
||||
)
|
||||
await self._execute_listeners(start_method_name, result)
|
||||
|
||||
async def _execute_method(self, method: Callable, *args: Any, **kwargs: Any) -> Any:
|
||||
async def _execute_method(
|
||||
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
|
||||
) -> Any:
|
||||
result = (
|
||||
await method(*args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(method)
|
||||
else method(*args, **kwargs)
|
||||
)
|
||||
self._method_outputs.append(result) # Store the output
|
||||
|
||||
# Track method execution counts
|
||||
self._method_execution_counts[method_name] = (
|
||||
self._method_execution_counts.get(method_name, 0) + 1
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
|
||||
@@ -234,32 +319,39 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
if trigger_method in self._routers:
|
||||
router_method = self._methods[self._routers[trigger_method]]
|
||||
path = await self._execute_method(router_method)
|
||||
# Use the path as the new trigger method
|
||||
path = await self._execute_method(
|
||||
self._routers[trigger_method], router_method
|
||||
)
|
||||
trigger_method = path
|
||||
|
||||
for listener, (condition_type, methods) in self._listeners.items():
|
||||
for listener_name, (condition_type, methods) in self._listeners.items():
|
||||
if condition_type == "OR":
|
||||
if trigger_method in methods:
|
||||
# Schedule the listener without preventing re-execution
|
||||
listener_tasks.append(
|
||||
self._execute_single_listener(listener, result)
|
||||
self._execute_single_listener(listener_name, result)
|
||||
)
|
||||
elif condition_type == "AND":
|
||||
if listener not in self._pending_and_listeners:
|
||||
self._pending_and_listeners[listener] = set()
|
||||
self._pending_and_listeners[listener].add(trigger_method)
|
||||
if set(methods) == self._pending_and_listeners[listener]:
|
||||
# Initialize pending methods for this listener if not already done
|
||||
if listener_name not in self._pending_and_listeners:
|
||||
self._pending_and_listeners[listener_name] = set(methods)
|
||||
# Remove the trigger method from pending methods
|
||||
self._pending_and_listeners[listener_name].discard(trigger_method)
|
||||
if not self._pending_and_listeners[listener_name]:
|
||||
# All required methods have been executed
|
||||
listener_tasks.append(
|
||||
self._execute_single_listener(listener, result)
|
||||
self._execute_single_listener(listener_name, result)
|
||||
)
|
||||
del self._pending_and_listeners[listener]
|
||||
# Reset pending methods for this listener
|
||||
self._pending_and_listeners.pop(listener_name, None)
|
||||
|
||||
# Run all listener tasks concurrently and wait for them to complete
|
||||
await asyncio.gather(*listener_tasks)
|
||||
if listener_tasks:
|
||||
await asyncio.gather(*listener_tasks)
|
||||
|
||||
async def _execute_single_listener(self, listener: str, result: Any) -> None:
|
||||
async def _execute_single_listener(self, listener_name: str, result: Any) -> None:
|
||||
try:
|
||||
method = self._methods[listener]
|
||||
method = self._methods[listener_name]
|
||||
sig = inspect.signature(method)
|
||||
params = list(sig.parameters.values())
|
||||
|
||||
@@ -268,15 +360,19 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
if method_params:
|
||||
# If listener expects parameters, pass the result
|
||||
listener_result = await self._execute_method(method, result)
|
||||
listener_result = await self._execute_method(
|
||||
listener_name, method, result
|
||||
)
|
||||
else:
|
||||
# If listener does not expect parameters, call without arguments
|
||||
listener_result = await self._execute_method(method)
|
||||
listener_result = await self._execute_method(listener_name, method)
|
||||
|
||||
# Execute listeners of this listener
|
||||
await self._execute_listeners(listener, listener_result)
|
||||
await self._execute_listeners(listener_name, listener_result)
|
||||
except Exception as e:
|
||||
print(f"[Flow._execute_single_listener] Error in method {listener}: {e}")
|
||||
print(
|
||||
f"[Flow._execute_single_listener] Error in method {listener_name}: {e}"
|
||||
)
|
||||
import traceback
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
55
src/crewai/knowledge/embedder/base_embedder.py
Normal file
55
src/crewai/knowledge/embedder/base_embedder.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List
|
||||
|
||||
import numpy as np
|
||||
|
||||
|
||||
class BaseEmbedder(ABC):
|
||||
"""
|
||||
Abstract base class for text embedding models
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def embed_chunks(self, chunks: List[str]) -> np.ndarray:
|
||||
"""
|
||||
Generate embeddings for a list of text chunks
|
||||
|
||||
Args:
|
||||
chunks: List of text chunks to embed
|
||||
|
||||
Returns:
|
||||
Array of embeddings
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def embed_texts(self, texts: List[str]) -> np.ndarray:
|
||||
"""
|
||||
Generate embeddings for a list of texts
|
||||
|
||||
Args:
|
||||
texts: List of texts to embed
|
||||
|
||||
Returns:
|
||||
Array of embeddings
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def embed_text(self, text: str) -> np.ndarray:
|
||||
"""
|
||||
Generate embedding for a single text
|
||||
|
||||
Args:
|
||||
text: Text to embed
|
||||
|
||||
Returns:
|
||||
Embedding array
|
||||
"""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def dimension(self) -> int:
|
||||
"""Get the dimension of the embeddings"""
|
||||
pass
|
||||
93
src/crewai/knowledge/embedder/fastembed.py
Normal file
93
src/crewai/knowledge/embedder/fastembed.py
Normal file
@@ -0,0 +1,93 @@
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from .base_embedder import BaseEmbedder
|
||||
|
||||
try:
|
||||
from fastembed_gpu import TextEmbedding # type: ignore
|
||||
|
||||
FASTEMBED_AVAILABLE = True
|
||||
except ImportError:
|
||||
try:
|
||||
from fastembed import TextEmbedding
|
||||
|
||||
FASTEMBED_AVAILABLE = True
|
||||
except ImportError:
|
||||
FASTEMBED_AVAILABLE = False
|
||||
|
||||
|
||||
class FastEmbed(BaseEmbedder):
|
||||
"""
|
||||
A wrapper class for text embedding models using FastEmbed
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str = "BAAI/bge-small-en-v1.5",
|
||||
cache_dir: Optional[Union[str, Path]] = None,
|
||||
):
|
||||
"""
|
||||
Initialize the embedding model
|
||||
|
||||
Args:
|
||||
model_name: Name of the model to use
|
||||
cache_dir: Directory to cache the model
|
||||
gpu: Whether to use GPU acceleration
|
||||
"""
|
||||
if not FASTEMBED_AVAILABLE:
|
||||
raise ImportError(
|
||||
"FastEmbed is not installed. Please install it with: "
|
||||
"uv pip install fastembed or uv pip install fastembed-gpu for GPU support"
|
||||
)
|
||||
|
||||
self.model = TextEmbedding(
|
||||
model_name=model_name,
|
||||
cache_dir=str(cache_dir) if cache_dir else None,
|
||||
)
|
||||
|
||||
def embed_chunks(self, chunks: List[str]) -> List[np.ndarray]:
|
||||
"""
|
||||
Generate embeddings for a list of text chunks
|
||||
|
||||
Args:
|
||||
chunks: List of text chunks to embed
|
||||
|
||||
Returns:
|
||||
List of embeddings
|
||||
"""
|
||||
embeddings = list(self.model.embed(chunks))
|
||||
return embeddings
|
||||
|
||||
def embed_texts(self, texts: List[str]) -> List[np.ndarray]:
|
||||
"""
|
||||
Generate embeddings for a list of texts
|
||||
|
||||
Args:
|
||||
texts: List of texts to embed
|
||||
|
||||
Returns:
|
||||
List of embeddings
|
||||
"""
|
||||
embeddings = list(self.model.embed(texts))
|
||||
return embeddings
|
||||
|
||||
def embed_text(self, text: str) -> np.ndarray:
|
||||
"""
|
||||
Generate embedding for a single text
|
||||
|
||||
Args:
|
||||
text: Text to embed
|
||||
|
||||
Returns:
|
||||
Embedding array
|
||||
"""
|
||||
return self.embed_texts([text])[0]
|
||||
|
||||
@property
|
||||
def dimension(self) -> int:
|
||||
"""Get the dimension of the embeddings"""
|
||||
# Generate a test embedding to get dimensions
|
||||
test_embed = self.embed_text("test")
|
||||
return len(test_embed)
|
||||
54
src/crewai/knowledge/knowledge.py
Normal file
54
src/crewai/knowledge/knowledge.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import os
|
||||
|
||||
from typing import List, Optional, Dict, Any
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
from crewai.utilities.logger import Logger
|
||||
from crewai.utilities.constants import DEFAULT_SCORE_THRESHOLD
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
|
||||
|
||||
|
||||
class Knowledge(BaseModel):
|
||||
"""
|
||||
Knowledge is a collection of sources and setup for the vector store to save and query relevant context.
|
||||
Args:
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
"""
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
|
||||
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None, **data):
|
||||
super().__init__(**data)
|
||||
self.storage = KnowledgeStorage(embedder_config=embedder_config or None)
|
||||
|
||||
try:
|
||||
for source in self.sources:
|
||||
source.add()
|
||||
except Exception as e:
|
||||
Logger(verbose=True).log(
|
||||
"warning",
|
||||
f"Failed to init knowledge: {e}",
|
||||
color="yellow",
|
||||
)
|
||||
|
||||
def query(
|
||||
self, query: List[str], limit: int = 3, preference: Optional[str] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Query across all knowledge sources to find the most relevant information.
|
||||
Returns the top_k most relevant chunks.
|
||||
"""
|
||||
|
||||
results = self.storage.search(
|
||||
query,
|
||||
limit,
|
||||
filter={"preference": preference} if preference else None,
|
||||
score_threshold=DEFAULT_SCORE_THRESHOLD,
|
||||
)
|
||||
return results
|
||||
0
src/crewai/knowledge/source/__init__.py
Normal file
0
src/crewai/knowledge/source/__init__.py
Normal file
36
src/crewai/knowledge/source/base_file_knowledge_source.py
Normal file
36
src/crewai/knowledge/source/base_file_knowledge_source.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from pathlib import Path
|
||||
from typing import Union, List
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from typing import Dict, Any
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
|
||||
|
||||
class BaseFileKnowledgeSource(BaseKnowledgeSource):
|
||||
"""Base class for knowledge sources that load content from files."""
|
||||
|
||||
file_path: Union[Path, List[Path]] = Field(...)
|
||||
content: Dict[Path, str] = Field(init=False, default_factory=dict)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
|
||||
def model_post_init(self, _):
|
||||
"""Post-initialization method to load content."""
|
||||
self.content = self.load_content()
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess file content. Should be overridden by subclasses."""
|
||||
paths = [self.file_path] if isinstance(self.file_path, Path) else self.file_path
|
||||
|
||||
for path in paths:
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"File not found: {path}")
|
||||
if not path.is_file():
|
||||
raise ValueError(f"Path is not a file: {path}")
|
||||
return {}
|
||||
|
||||
def save_documents(self, metadata: Dict[str, Any]):
|
||||
"""Save the documents to the storage."""
|
||||
chunk_metadatas = [metadata.copy() for _ in self.chunks]
|
||||
self.storage.save(self.chunks, chunk_metadatas)
|
||||
48
src/crewai/knowledge/source/base_knowledge_source.py
Normal file
48
src/crewai/knowledge/source/base_knowledge_source.py
Normal file
@@ -0,0 +1,48 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Dict, Any
|
||||
|
||||
import numpy as np
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
|
||||
|
||||
class BaseKnowledgeSource(BaseModel, ABC):
|
||||
"""Abstract base class for knowledge sources."""
|
||||
|
||||
chunk_size: int = 4000
|
||||
chunk_overlap: int = 200
|
||||
chunks: List[str] = Field(default_factory=list)
|
||||
chunk_embeddings: List[np.ndarray] = Field(default_factory=list)
|
||||
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
@abstractmethod
|
||||
def load_content(self) -> Dict[Any, str]:
|
||||
"""Load and preprocess content from the source."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def add(self) -> None:
|
||||
"""Process content, chunk it, compute embeddings, and save them."""
|
||||
pass
|
||||
|
||||
def get_embeddings(self) -> List[np.ndarray]:
|
||||
"""Return the list of embeddings for the chunks."""
|
||||
return self.chunk_embeddings
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
|
||||
def save_documents(self, metadata: Dict[str, Any]):
|
||||
"""
|
||||
Save the documents to the storage.
|
||||
This method should be called after the chunks and embeddings are generated.
|
||||
"""
|
||||
self.storage.save(self.chunks, metadata)
|
||||
44
src/crewai/knowledge/source/csv_knowledge_source.py
Normal file
44
src/crewai/knowledge/source/csv_knowledge_source.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import csv
|
||||
from typing import Dict, List
|
||||
from pathlib import Path
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class CSVKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries CSV file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess CSV file content."""
|
||||
super().load_content() # Validate the file path
|
||||
|
||||
file_path = (
|
||||
self.file_path[0] if isinstance(self.file_path, list) else self.file_path
|
||||
)
|
||||
file_path = Path(file_path) if isinstance(file_path, str) else file_path
|
||||
|
||||
with open(file_path, "r", encoding="utf-8") as csvfile:
|
||||
reader = csv.reader(csvfile)
|
||||
content = ""
|
||||
for row in reader:
|
||||
content += " ".join(row) + "\n"
|
||||
return {file_path: content}
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add CSV file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
content_str = (
|
||||
str(self.content) if isinstance(self.content, dict) else self.content
|
||||
)
|
||||
new_chunks = self._chunk_text(content_str)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
56
src/crewai/knowledge/source/excel_knowledge_source.py
Normal file
56
src/crewai/knowledge/source/excel_knowledge_source.py
Normal file
@@ -0,0 +1,56 @@
|
||||
from typing import Dict, List
|
||||
from pathlib import Path
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class ExcelKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries Excel file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess Excel file content."""
|
||||
super().load_content() # Validate the file path
|
||||
pd = self._import_dependencies()
|
||||
|
||||
if isinstance(self.file_path, list):
|
||||
file_path = self.file_path[0]
|
||||
else:
|
||||
file_path = self.file_path
|
||||
|
||||
df = pd.read_excel(file_path)
|
||||
content = df.to_csv(index=False)
|
||||
return {file_path: content}
|
||||
|
||||
def _import_dependencies(self):
|
||||
"""Dynamically import dependencies."""
|
||||
try:
|
||||
import openpyxl # noqa
|
||||
import pandas as pd
|
||||
|
||||
return pd
|
||||
except ImportError as e:
|
||||
missing_package = str(e).split()[-1]
|
||||
raise ImportError(
|
||||
f"{missing_package} is not installed. Please install it with: pip install {missing_package}"
|
||||
)
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add Excel file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
# Convert dictionary values to a single string if content is a dictionary
|
||||
if isinstance(self.content, dict):
|
||||
content_str = "\n".join(str(value) for value in self.content.values())
|
||||
else:
|
||||
content_str = str(self.content)
|
||||
|
||||
new_chunks = self._chunk_text(content_str)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
54
src/crewai/knowledge/source/json_knowledge_source.py
Normal file
54
src/crewai/knowledge/source/json_knowledge_source.py
Normal file
@@ -0,0 +1,54 @@
|
||||
import json
|
||||
from typing import Any, Dict, List
|
||||
from pathlib import Path
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class JSONKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries JSON file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess JSON file content."""
|
||||
super().load_content() # Validate the file path
|
||||
paths = [self.file_path] if isinstance(self.file_path, Path) else self.file_path
|
||||
|
||||
content: Dict[Path, str] = {}
|
||||
for path in paths:
|
||||
with open(path, "r", encoding="utf-8") as json_file:
|
||||
data = json.load(json_file)
|
||||
content[path] = self._json_to_text(data)
|
||||
return content
|
||||
|
||||
def _json_to_text(self, data: Any, level: int = 0) -> str:
|
||||
"""Recursively convert JSON data to a text representation."""
|
||||
text = ""
|
||||
indent = " " * level
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
text += f"{indent}{key}: {self._json_to_text(value, level + 1)}\n"
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
text += f"{indent}- {self._json_to_text(item, level + 1)}\n"
|
||||
else:
|
||||
text += f"{str(data)}"
|
||||
return text
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add JSON file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
content_str = (
|
||||
str(self.content) if isinstance(self.content, dict) else self.content
|
||||
)
|
||||
new_chunks = self._chunk_text(content_str)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
54
src/crewai/knowledge/source/pdf_knowledge_source.py
Normal file
54
src/crewai/knowledge/source/pdf_knowledge_source.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from typing import List, Dict
|
||||
from pathlib import Path
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class PDFKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries PDF file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess PDF file content."""
|
||||
super().load_content() # Validate the file paths
|
||||
pdfplumber = self._import_pdfplumber()
|
||||
|
||||
paths = [self.file_path] if isinstance(self.file_path, Path) else self.file_path
|
||||
content = {}
|
||||
|
||||
for path in paths:
|
||||
text = ""
|
||||
with pdfplumber.open(path) as pdf:
|
||||
for page in pdf.pages:
|
||||
page_text = page.extract_text()
|
||||
if page_text:
|
||||
text += page_text + "\n"
|
||||
content[path] = text
|
||||
return content
|
||||
|
||||
def _import_pdfplumber(self):
|
||||
"""Dynamically import pdfplumber."""
|
||||
try:
|
||||
import pdfplumber
|
||||
|
||||
return pdfplumber
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"pdfplumber is not installed. Please install it with: pip install pdfplumber"
|
||||
)
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add PDF file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
for _, text in self.content.items():
|
||||
new_chunks = self._chunk_text(text)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
33
src/crewai/knowledge/source/string_knowledge_source.py
Normal file
33
src/crewai/knowledge/source/string_knowledge_source.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from typing import List
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
|
||||
|
||||
class StringKnowledgeSource(BaseKnowledgeSource):
|
||||
"""A knowledge source that stores and queries plain text content using embeddings."""
|
||||
|
||||
content: str = Field(...)
|
||||
|
||||
def model_post_init(self, _):
|
||||
"""Post-initialization method to validate content."""
|
||||
self.load_content()
|
||||
|
||||
def load_content(self):
|
||||
"""Validate string content."""
|
||||
if not isinstance(self.content, str):
|
||||
raise ValueError("StringKnowledgeSource only accepts string content")
|
||||
|
||||
def add(self) -> None:
|
||||
"""Add string content to the knowledge source, chunk it, compute embeddings, and save them."""
|
||||
new_chunks = self._chunk_text(self.content)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
35
src/crewai/knowledge/source/text_file_knowledge_source.py
Normal file
35
src/crewai/knowledge/source/text_file_knowledge_source.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from typing import Dict, List
|
||||
from pathlib import Path
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
|
||||
|
||||
class TextFileKnowledgeSource(BaseFileKnowledgeSource):
|
||||
"""A knowledge source that stores and queries text file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess text file content."""
|
||||
super().load_content()
|
||||
paths = [self.file_path] if isinstance(self.file_path, Path) else self.file_path
|
||||
content = {}
|
||||
for path in paths:
|
||||
with path.open("r", encoding="utf-8") as f:
|
||||
content[path] = f.read() # type: ignore
|
||||
return content
|
||||
|
||||
def add(self) -> None:
|
||||
"""
|
||||
Add text file content to the knowledge source, chunk it, compute embeddings,
|
||||
and save the embeddings.
|
||||
"""
|
||||
for _, text in self.content.items():
|
||||
new_chunks = self._chunk_text(text)
|
||||
self.chunks.extend(new_chunks)
|
||||
self.save_documents(metadata=self.metadata)
|
||||
|
||||
def _chunk_text(self, text: str) -> List[str]:
|
||||
"""Utility method to split text into chunks."""
|
||||
return [
|
||||
text[i : i + self.chunk_size]
|
||||
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||
]
|
||||
0
src/crewai/knowledge/storage/__init__.py
Normal file
0
src/crewai/knowledge/storage/__init__.py
Normal file
29
src/crewai/knowledge/storage/base_knowledge_storage.py
Normal file
29
src/crewai/knowledge/storage/base_knowledge_storage.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
|
||||
class BaseKnowledgeStorage(ABC):
|
||||
"""Abstract base class for knowledge storage implementations."""
|
||||
|
||||
@abstractmethod
|
||||
def search(
|
||||
self,
|
||||
query: List[str],
|
||||
limit: int = 3,
|
||||
filter: Optional[dict] = None,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Search for documents in the knowledge base."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def save(
|
||||
self, documents: List[str], metadata: Dict[str, Any] | List[Dict[str, Any]]
|
||||
) -> None:
|
||||
"""Save documents to the knowledge base."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def reset(self) -> None:
|
||||
"""Reset the knowledge base."""
|
||||
pass
|
||||
132
src/crewai/knowledge/storage/knowledge_storage.py
Normal file
132
src/crewai/knowledge/storage/knowledge_storage.py
Normal file
@@ -0,0 +1,132 @@
|
||||
import contextlib
|
||||
import io
|
||||
import logging
|
||||
import chromadb
|
||||
import os
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
from typing import Optional, List
|
||||
from typing import Dict, Any
|
||||
from crewai.utilities import EmbeddingConfigurator
|
||||
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||
import hashlib
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def suppress_logging(
|
||||
logger_name="chromadb.segment.impl.vector.local_persistent_hnsw",
|
||||
level=logging.ERROR,
|
||||
):
|
||||
logger = logging.getLogger(logger_name)
|
||||
original_level = logger.getEffectiveLevel()
|
||||
logger.setLevel(level)
|
||||
with (
|
||||
contextlib.redirect_stdout(io.StringIO()),
|
||||
contextlib.redirect_stderr(io.StringIO()),
|
||||
contextlib.suppress(UserWarning),
|
||||
):
|
||||
yield
|
||||
logger.setLevel(original_level)
|
||||
|
||||
|
||||
class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
"""
|
||||
Extends Storage to handle embeddings for memory entries, improving
|
||||
search efficiency.
|
||||
"""
|
||||
|
||||
collection: Optional[chromadb.Collection] = None
|
||||
|
||||
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None):
|
||||
self._initialize_app(embedder_config or {})
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: List[str],
|
||||
limit: int = 3,
|
||||
filter: Optional[dict] = None,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Dict[str, Any]]:
|
||||
with suppress_logging():
|
||||
if self.collection:
|
||||
fetched = self.collection.query(
|
||||
query_texts=query,
|
||||
n_results=limit,
|
||||
where=filter,
|
||||
)
|
||||
results = []
|
||||
for i in range(len(fetched["ids"][0])): # type: ignore
|
||||
result = {
|
||||
"id": fetched["ids"][0][i], # type: ignore
|
||||
"metadata": fetched["metadatas"][0][i], # type: ignore
|
||||
"context": fetched["documents"][0][i], # type: ignore
|
||||
"score": fetched["distances"][0][i], # type: ignore
|
||||
}
|
||||
if result["score"] >= score_threshold: # type: ignore
|
||||
results.append(result)
|
||||
return results
|
||||
else:
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
def _initialize_app(self, embedder_config: Optional[Dict[str, Any]] = None):
|
||||
import chromadb
|
||||
from chromadb.config import Settings
|
||||
|
||||
self._set_embedder_config(embedder_config)
|
||||
|
||||
chroma_client = chromadb.PersistentClient(
|
||||
path=f"{db_storage_path()}/knowledge",
|
||||
settings=Settings(allow_reset=True),
|
||||
)
|
||||
|
||||
self.app = chroma_client
|
||||
|
||||
try:
|
||||
self.collection = self.app.get_or_create_collection(name="knowledge")
|
||||
except Exception:
|
||||
raise Exception("Failed to create or get collection")
|
||||
|
||||
def reset(self):
|
||||
if self.app:
|
||||
self.app.reset()
|
||||
|
||||
def save(
|
||||
self, documents: List[str], metadata: Dict[str, Any] | List[Dict[str, Any]]
|
||||
):
|
||||
if self.collection:
|
||||
metadatas = [metadata] if isinstance(metadata, dict) else metadata
|
||||
|
||||
ids = [
|
||||
hashlib.sha256(doc.encode("utf-8")).hexdigest() for doc in documents
|
||||
]
|
||||
|
||||
self.collection.upsert(
|
||||
documents=documents,
|
||||
metadatas=metadatas,
|
||||
ids=ids,
|
||||
)
|
||||
else:
|
||||
raise Exception("Collection not initialized")
|
||||
|
||||
def _create_default_embedding_function(self):
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
|
||||
return OpenAIEmbeddingFunction(
|
||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||
)
|
||||
|
||||
def _set_embedder_config(
|
||||
self, embedder_config: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
"""Set the embedding configuration for the knowledge storage.
|
||||
|
||||
Args:
|
||||
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
||||
If None or empty, defaults to the default embedding function.
|
||||
"""
|
||||
self.embedder_config = (
|
||||
EmbeddingConfigurator().configure_embedder(embedder_config)
|
||||
if embedder_config
|
||||
else self._create_default_embedding_function()
|
||||
)
|
||||
@@ -1,7 +1,10 @@
|
||||
import logging
|
||||
import sys
|
||||
import threading
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
import logging
|
||||
import warnings
|
||||
|
||||
import litellm
|
||||
from litellm import get_supported_openai_params
|
||||
|
||||
@@ -9,20 +12,26 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
|
||||
import sys
|
||||
import io
|
||||
|
||||
class FilteredStream:
|
||||
def __init__(self, original_stream):
|
||||
self._original_stream = original_stream
|
||||
self._lock = threading.Lock()
|
||||
|
||||
class FilteredStream(io.StringIO):
|
||||
def write(self, s):
|
||||
if (
|
||||
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
|
||||
in s
|
||||
or "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True`"
|
||||
in s
|
||||
):
|
||||
return
|
||||
super().write(s)
|
||||
def write(self, s) -> int:
|
||||
with self._lock:
|
||||
if (
|
||||
"Give Feedback / Get Help: https://github.com/BerriAI/litellm/issues/new"
|
||||
in s
|
||||
or "LiteLLM.Info: If you need to debug this error, use `litellm.set_verbose=True`"
|
||||
in s
|
||||
):
|
||||
return 0
|
||||
return self._original_stream.write(s)
|
||||
|
||||
def flush(self):
|
||||
with self._lock:
|
||||
return self._original_stream.flush()
|
||||
|
||||
|
||||
LLM_CONTEXT_WINDOW_SIZES = {
|
||||
@@ -60,8 +69,8 @@ def suppress_warnings():
|
||||
# Redirect stdout and stderr
|
||||
old_stdout = sys.stdout
|
||||
old_stderr = sys.stderr
|
||||
sys.stdout = FilteredStream()
|
||||
sys.stderr = FilteredStream()
|
||||
sys.stdout = FilteredStream(old_stdout)
|
||||
sys.stderr = FilteredStream(old_stderr)
|
||||
|
||||
try:
|
||||
yield
|
||||
@@ -118,12 +127,12 @@ class LLM:
|
||||
|
||||
litellm.drop_params = True
|
||||
litellm.set_verbose = False
|
||||
litellm.callbacks = callbacks
|
||||
self.set_callbacks(callbacks)
|
||||
|
||||
def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = []) -> str:
|
||||
with suppress_warnings():
|
||||
if callbacks and len(callbacks) > 0:
|
||||
litellm.callbacks = callbacks
|
||||
self.set_callbacks(callbacks)
|
||||
|
||||
try:
|
||||
params = {
|
||||
@@ -181,3 +190,15 @@ class LLM:
|
||||
def get_context_window_size(self) -> int:
|
||||
# Only using 75% of the context window size to avoid cutting the message in the middle
|
||||
return int(LLM_CONTEXT_WINDOW_SIZES.get(self.model, 8192) * 0.75)
|
||||
|
||||
def set_callbacks(self, callbacks: List[Any]):
|
||||
callback_types = [type(callback) for callback in callbacks]
|
||||
for callback in litellm.success_callback[:]:
|
||||
if type(callback) in callback_types:
|
||||
litellm.success_callback.remove(callback)
|
||||
|
||||
for callback in litellm._async_success_callback[:]:
|
||||
if type(callback) in callback_types:
|
||||
litellm._async_success_callback.remove(callback)
|
||||
|
||||
litellm.callbacks = callbacks
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
from .entity.entity_memory import EntityMemory
|
||||
from .long_term.long_term_memory import LongTermMemory
|
||||
from .short_term.short_term_memory import ShortTermMemory
|
||||
from .user.user_memory import UserMemory
|
||||
|
||||
__all__ = ["EntityMemory", "LongTermMemory", "ShortTermMemory"]
|
||||
__all__ = ["UserMemory", "EntityMemory", "LongTermMemory", "ShortTermMemory"]
|
||||
|
||||
@@ -1,13 +1,25 @@
|
||||
from typing import Optional
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory
|
||||
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory, UserMemory
|
||||
|
||||
|
||||
class ContextualMemory:
|
||||
def __init__(self, stm: ShortTermMemory, ltm: LongTermMemory, em: EntityMemory):
|
||||
def __init__(
|
||||
self,
|
||||
memory_config: Optional[Dict[str, Any]],
|
||||
stm: ShortTermMemory,
|
||||
ltm: LongTermMemory,
|
||||
em: EntityMemory,
|
||||
um: UserMemory,
|
||||
):
|
||||
if memory_config is not None:
|
||||
self.memory_provider = memory_config.get("provider")
|
||||
else:
|
||||
self.memory_provider = None
|
||||
self.stm = stm
|
||||
self.ltm = ltm
|
||||
self.em = em
|
||||
self.um = um
|
||||
|
||||
def build_context_for_task(self, task, context) -> str:
|
||||
"""
|
||||
@@ -23,6 +35,8 @@ class ContextualMemory:
|
||||
context.append(self._fetch_ltm_context(task.description))
|
||||
context.append(self._fetch_stm_context(query))
|
||||
context.append(self._fetch_entity_context(query))
|
||||
if self.memory_provider == "mem0":
|
||||
context.append(self._fetch_user_context(query))
|
||||
return "\n".join(filter(None, context))
|
||||
|
||||
def _fetch_stm_context(self, query) -> str:
|
||||
@@ -32,7 +46,10 @@ class ContextualMemory:
|
||||
"""
|
||||
stm_results = self.stm.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['context']}" for result in stm_results]
|
||||
[
|
||||
f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
|
||||
for result in stm_results
|
||||
]
|
||||
)
|
||||
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
||||
|
||||
@@ -62,6 +79,26 @@ class ContextualMemory:
|
||||
"""
|
||||
em_results = self.em.search(query)
|
||||
formatted_results = "\n".join(
|
||||
[f"- {result['context']}" for result in em_results] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
|
||||
[
|
||||
f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
|
||||
for result in em_results
|
||||
] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
|
||||
)
|
||||
return f"Entities:\n{formatted_results}" if em_results else ""
|
||||
|
||||
def _fetch_user_context(self, query: str) -> str:
|
||||
"""
|
||||
Fetches and formats relevant user information from User Memory.
|
||||
Args:
|
||||
query (str): The search query to find relevant user memories.
|
||||
Returns:
|
||||
str: Formatted user memories as bullet points, or an empty string if none found.
|
||||
"""
|
||||
user_memories = self.um.search(query)
|
||||
if not user_memories:
|
||||
return ""
|
||||
|
||||
formatted_memories = "\n".join(
|
||||
f"- {result['memory']}" for result in user_memories
|
||||
)
|
||||
return f"User memories/preferences:\n{formatted_memories}"
|
||||
|
||||
@@ -11,21 +11,43 @@ class EntityMemory(Memory):
|
||||
"""
|
||||
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None):
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="entities",
|
||||
allow_reset=True,
|
||||
embedder_config=embedder_config,
|
||||
crew=crew,
|
||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||
self.memory_provider = crew.memory_config.get("provider")
|
||||
else:
|
||||
self.memory_provider = None
|
||||
|
||||
if self.memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
)
|
||||
storage = Mem0Storage(type="entities", crew=crew)
|
||||
else:
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="entities",
|
||||
allow_reset=True,
|
||||
embedder_config=embedder_config,
|
||||
crew=crew,
|
||||
)
|
||||
)
|
||||
)
|
||||
super().__init__(storage)
|
||||
|
||||
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||
"""Saves an entity item into the SQLite storage."""
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
if self.memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
Type: {item.type}
|
||||
Entity Description: {item.description}
|
||||
"""
|
||||
else:
|
||||
data = f"{item.name}({item.type}): {item.description}"
|
||||
super().save(data, item.metadata)
|
||||
|
||||
def reset(self) -> None:
|
||||
|
||||
@@ -23,5 +23,12 @@ class Memory:
|
||||
|
||||
self.storage.save(value, metadata)
|
||||
|
||||
def search(self, query: str) -> List[Dict[str, Any]]:
|
||||
return self.storage.search(query)
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Any]:
|
||||
return self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
)
|
||||
|
||||
@@ -14,13 +14,27 @@ class ShortTermMemory(Memory):
|
||||
"""
|
||||
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None):
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="short_term", embedder_config=embedder_config, crew=crew
|
||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||
self.memory_provider = crew.memory_config.get("provider")
|
||||
else:
|
||||
self.memory_provider = None
|
||||
|
||||
if self.memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
)
|
||||
storage = Mem0Storage(type="short_term", crew=crew)
|
||||
else:
|
||||
storage = (
|
||||
storage
|
||||
if storage
|
||||
else RAGStorage(
|
||||
type="short_term", embedder_config=embedder_config, crew=crew
|
||||
)
|
||||
)
|
||||
)
|
||||
super().__init__(storage)
|
||||
|
||||
def save(
|
||||
@@ -30,11 +44,20 @@ class ShortTermMemory(Memory):
|
||||
agent: Optional[str] = None,
|
||||
) -> None:
|
||||
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
||||
if self.memory_provider == "mem0":
|
||||
item.data = f"Remember the following insights from Agent run: {item.data}"
|
||||
|
||||
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
||||
|
||||
def search(self, query: str, score_threshold: float = 0.35):
|
||||
return self.storage.search(query=query, score_threshold=score_threshold) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
):
|
||||
return self.storage.search(
|
||||
query=query, limit=limit, score_threshold=score_threshold
|
||||
) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters
|
||||
|
||||
def reset(self) -> None:
|
||||
try:
|
||||
|
||||
@@ -7,8 +7,10 @@ class Storage:
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
pass
|
||||
|
||||
def search(self, key: str) -> List[Dict[str, Any]]: # type: ignore
|
||||
pass
|
||||
def search(
|
||||
self, query: str, limit: int, score_threshold: float
|
||||
) -> Dict[str, Any] | List[Any]:
|
||||
return {}
|
||||
|
||||
def reset(self) -> None:
|
||||
pass
|
||||
|
||||
@@ -70,7 +70,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
task.expected_output,
|
||||
json.dumps(output, cls=CrewJSONEncoder),
|
||||
task_index,
|
||||
json.dumps(inputs),
|
||||
json.dumps(inputs, cls=CrewJSONEncoder),
|
||||
was_replayed,
|
||||
),
|
||||
)
|
||||
@@ -103,7 +103,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
else value
|
||||
)
|
||||
|
||||
query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?"
|
||||
query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?" # nosec
|
||||
values.append(task_index)
|
||||
|
||||
cursor.execute(query, tuple(values))
|
||||
|
||||
@@ -83,7 +83,7 @@ class LTMSQLiteStorage:
|
||||
WHERE task_description = ?
|
||||
ORDER BY datetime DESC, score ASC
|
||||
LIMIT {latest_n}
|
||||
""",
|
||||
""", # nosec
|
||||
(task_description,),
|
||||
)
|
||||
rows = cursor.fetchall()
|
||||
|
||||
104
src/crewai/memory/storage/mem0_storage.py
Normal file
104
src/crewai/memory/storage/mem0_storage.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
|
||||
from mem0 import MemoryClient
|
||||
from crewai.memory.storage.interface import Storage
|
||||
|
||||
|
||||
class Mem0Storage(Storage):
|
||||
"""
|
||||
Extends Storage to handle embedding and searching across entities using Mem0.
|
||||
"""
|
||||
|
||||
def __init__(self, type, crew=None):
|
||||
super().__init__()
|
||||
|
||||
if type not in ["user", "short_term", "long_term", "entities"]:
|
||||
raise ValueError("Invalid type for Mem0Storage. Must be 'user' or 'agent'.")
|
||||
|
||||
self.memory_type = type
|
||||
self.crew = crew
|
||||
self.memory_config = crew.memory_config
|
||||
|
||||
# User ID is required for user memory type "user" since it's used as a unique identifier for the user.
|
||||
user_id = self._get_user_id()
|
||||
if type == "user" and not user_id:
|
||||
raise ValueError("User ID is required for user memory type")
|
||||
|
||||
# API key in memory config overrides the environment variable
|
||||
mem0_api_key = self.memory_config.get("config", {}).get("api_key") or os.getenv(
|
||||
"MEM0_API_KEY"
|
||||
)
|
||||
self.memory = MemoryClient(api_key=mem0_api_key)
|
||||
|
||||
def _sanitize_role(self, role: str) -> str:
|
||||
"""
|
||||
Sanitizes agent roles to ensure valid directory names.
|
||||
"""
|
||||
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||
|
||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||
user_id = self._get_user_id()
|
||||
agent_name = self._get_agent_name()
|
||||
if self.memory_type == "user":
|
||||
self.memory.add(value, user_id=user_id, metadata={**metadata})
|
||||
elif self.memory_type == "short_term":
|
||||
agent_name = self._get_agent_name()
|
||||
self.memory.add(
|
||||
value, agent_id=agent_name, metadata={"type": "short_term", **metadata}
|
||||
)
|
||||
elif self.memory_type == "long_term":
|
||||
agent_name = self._get_agent_name()
|
||||
self.memory.add(
|
||||
value,
|
||||
agent_id=agent_name,
|
||||
infer=False,
|
||||
metadata={"type": "long_term", **metadata},
|
||||
)
|
||||
elif self.memory_type == "entities":
|
||||
entity_name = None
|
||||
self.memory.add(
|
||||
value, user_id=entity_name, metadata={"type": "entity", **metadata}
|
||||
)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
) -> List[Any]:
|
||||
params = {"query": query, "limit": limit}
|
||||
if self.memory_type == "user":
|
||||
user_id = self._get_user_id()
|
||||
params["user_id"] = user_id
|
||||
elif self.memory_type == "short_term":
|
||||
agent_name = self._get_agent_name()
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "short_term"}
|
||||
elif self.memory_type == "long_term":
|
||||
agent_name = self._get_agent_name()
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "long_term"}
|
||||
elif self.memory_type == "entities":
|
||||
agent_name = self._get_agent_name()
|
||||
params["agent_id"] = agent_name
|
||||
params["metadata"] = {"type": "entity"}
|
||||
|
||||
# Discard the filters for now since we create the filters
|
||||
# automatically when the crew is created.
|
||||
results = self.memory.search(**params)
|
||||
return [r for r in results if r["score"] >= score_threshold]
|
||||
|
||||
def _get_user_id(self):
|
||||
if self.memory_type == "user":
|
||||
if hasattr(self, "memory_config") and self.memory_config is not None:
|
||||
return self.memory_config.get("config", {}).get("user_id")
|
||||
else:
|
||||
return None
|
||||
return None
|
||||
|
||||
def _get_agent_name(self):
|
||||
agents = self.crew.agents if self.crew else []
|
||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||
agents = "_".join(agents)
|
||||
return agents
|
||||
@@ -4,13 +4,12 @@ import logging
|
||||
import os
|
||||
import shutil
|
||||
import uuid
|
||||
|
||||
from typing import Any, Dict, List, Optional
|
||||
from chromadb.api import ClientAPI
|
||||
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
from chromadb.api import ClientAPI
|
||||
from chromadb.api.types import validate_embedding_function
|
||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||
from typing import cast
|
||||
from crewai.utilities import EmbeddingConfigurator
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
@@ -21,9 +20,11 @@ def suppress_logging(
|
||||
logger = logging.getLogger(logger_name)
|
||||
original_level = logger.getEffectiveLevel()
|
||||
logger.setLevel(level)
|
||||
with contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr(
|
||||
io.StringIO()
|
||||
), contextlib.suppress(UserWarning):
|
||||
with (
|
||||
contextlib.redirect_stdout(io.StringIO()),
|
||||
contextlib.redirect_stderr(io.StringIO()),
|
||||
contextlib.suppress(UserWarning),
|
||||
):
|
||||
yield
|
||||
logger.setLevel(original_level)
|
||||
|
||||
@@ -49,77 +50,8 @@ class RAGStorage(BaseRAGStorage):
|
||||
self._initialize_app()
|
||||
|
||||
def _set_embedder_config(self):
|
||||
import chromadb.utils.embedding_functions as embedding_functions
|
||||
|
||||
if self.embedder_config is None:
|
||||
self.embedder_config = self._create_default_embedding_function()
|
||||
|
||||
if isinstance(self.embedder_config, dict):
|
||||
provider = self.embedder_config.get("provider")
|
||||
config = self.embedder_config.get("config", {})
|
||||
model_name = config.get("model")
|
||||
if provider == "openai":
|
||||
self.embedder_config = embedding_functions.OpenAIEmbeddingFunction(
|
||||
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
|
||||
model_name=model_name,
|
||||
)
|
||||
elif provider == "azure":
|
||||
self.embedder_config = embedding_functions.OpenAIEmbeddingFunction(
|
||||
api_key=config.get("api_key"),
|
||||
api_base=config.get("api_base"),
|
||||
api_type=config.get("api_type", "azure"),
|
||||
api_version=config.get("api_version"),
|
||||
model_name=model_name,
|
||||
)
|
||||
elif provider == "ollama":
|
||||
from openai import OpenAI
|
||||
|
||||
class OllamaEmbeddingFunction(EmbeddingFunction):
|
||||
def __call__(self, input: Documents) -> Embeddings:
|
||||
client = OpenAI(
|
||||
base_url="http://localhost:11434/v1",
|
||||
api_key=config.get("api_key", "ollama"),
|
||||
)
|
||||
try:
|
||||
response = client.embeddings.create(
|
||||
input=input, model=model_name
|
||||
)
|
||||
embeddings = [item.embedding for item in response.data]
|
||||
return cast(Embeddings, embeddings)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
self.embedder_config = OllamaEmbeddingFunction()
|
||||
elif provider == "vertexai":
|
||||
self.embedder_config = (
|
||||
embedding_functions.GoogleVertexEmbeddingFunction(
|
||||
model_name=model_name,
|
||||
api_key=config.get("api_key"),
|
||||
)
|
||||
)
|
||||
elif provider == "google":
|
||||
self.embedder_config = (
|
||||
embedding_functions.GoogleGenerativeAiEmbeddingFunction(
|
||||
model_name=model_name,
|
||||
api_key=config.get("api_key"),
|
||||
)
|
||||
)
|
||||
elif provider == "cohere":
|
||||
self.embedder_config = embedding_functions.CohereEmbeddingFunction(
|
||||
model_name=model_name,
|
||||
api_key=config.get("api_key"),
|
||||
)
|
||||
elif provider == "huggingface":
|
||||
self.embedder_config = embedding_functions.HuggingFaceEmbeddingServer(
|
||||
url=config.get("api_url"),
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"Unsupported embedding provider: {provider}, supported providers: [openai, azure, ollama, vertexai, google, cohere, huggingface]"
|
||||
)
|
||||
else:
|
||||
validate_embedding_function(self.embedder_config) # type: ignore # used for validating embedder_config if defined a embedding function/class
|
||||
self.embedder_config = self.embedder_config
|
||||
configurator = EmbeddingConfigurator()
|
||||
self.embedder_config = configurator.configure_embedder(self.embedder_config)
|
||||
|
||||
def _initialize_app(self):
|
||||
import chromadb
|
||||
@@ -211,8 +143,10 @@ class RAGStorage(BaseRAGStorage):
|
||||
)
|
||||
|
||||
def _create_default_embedding_function(self):
|
||||
import chromadb.utils.embedding_functions as embedding_functions
|
||||
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||
OpenAIEmbeddingFunction,
|
||||
)
|
||||
|
||||
return embedding_functions.OpenAIEmbeddingFunction(
|
||||
return OpenAIEmbeddingFunction(
|
||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||
)
|
||||
|
||||
0
src/crewai/memory/user/__init__.py
Normal file
0
src/crewai/memory/user/__init__.py
Normal file
45
src/crewai/memory/user/user_memory.py
Normal file
45
src/crewai/memory/user/user_memory.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from crewai.memory.memory import Memory
|
||||
|
||||
|
||||
class UserMemory(Memory):
|
||||
"""
|
||||
UserMemory class for handling user memory storage and retrieval.
|
||||
Inherits from the Memory class and utilizes an instance of a class that
|
||||
adheres to the Storage for data storage, specifically working with
|
||||
MemoryItem instances.
|
||||
"""
|
||||
|
||||
def __init__(self, crew=None):
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
raise ImportError(
|
||||
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||
)
|
||||
storage = Mem0Storage(type="user", crew=crew)
|
||||
super().__init__(storage)
|
||||
|
||||
def save(
|
||||
self,
|
||||
value,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
agent: Optional[str] = None,
|
||||
) -> None:
|
||||
# TODO: Change this function since we want to take care of the case where we save memories for the usr
|
||||
data = f"Remember the details about the user: {value}"
|
||||
super().save(data, metadata)
|
||||
|
||||
def search(
|
||||
self,
|
||||
query: str,
|
||||
limit: int = 3,
|
||||
score_threshold: float = 0.35,
|
||||
):
|
||||
results = super().search(
|
||||
query=query,
|
||||
limit=limit,
|
||||
score_threshold=score_threshold,
|
||||
)
|
||||
return results
|
||||
8
src/crewai/memory/user/user_memory_item.py
Normal file
8
src/crewai/memory/user/user_memory_item.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
|
||||
class UserMemoryItem:
|
||||
def __init__(self, data: Any, user: str, metadata: Optional[Dict[str, Any]] = None):
|
||||
self.data = data
|
||||
self.user = user
|
||||
self.metadata = metadata if metadata is not None else {}
|
||||
@@ -1,5 +1,7 @@
|
||||
from .annotations import (
|
||||
after_kickoff,
|
||||
agent,
|
||||
before_kickoff,
|
||||
cache_handler,
|
||||
callback,
|
||||
crew,
|
||||
@@ -26,4 +28,6 @@ __all__ = [
|
||||
"llm",
|
||||
"cache_handler",
|
||||
"pipeline",
|
||||
"before_kickoff",
|
||||
"after_kickoff",
|
||||
]
|
||||
|
||||
@@ -5,6 +5,16 @@ from crewai import Crew
|
||||
from crewai.project.utils import memoize
|
||||
|
||||
|
||||
def before_kickoff(func):
|
||||
func.is_before_kickoff = True
|
||||
return func
|
||||
|
||||
|
||||
def after_kickoff(func):
|
||||
func.is_after_kickoff = True
|
||||
return func
|
||||
|
||||
|
||||
def task(func):
|
||||
func.is_task = True
|
||||
|
||||
@@ -99,6 +109,19 @@ def crew(func) -> Callable[..., Crew]:
|
||||
self.agents = instantiated_agents
|
||||
self.tasks = instantiated_tasks
|
||||
|
||||
return func(self, *args, **kwargs)
|
||||
crew = func(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
def callback_wrapper(callback, instance):
|
||||
def wrapper(*args, **kwargs):
|
||||
return callback(instance, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
for _, callback in self._before_kickoff.items():
|
||||
crew.before_kickoff_callbacks.append(callback_wrapper(callback, self))
|
||||
for _, callback in self._after_kickoff.items():
|
||||
crew.after_kickoff_callbacks.append(callback_wrapper(callback, self))
|
||||
|
||||
return crew
|
||||
|
||||
return memoize(wrapper)
|
||||
|
||||
@@ -34,18 +34,39 @@ def CrewBase(cls: T) -> T:
|
||||
self.map_all_agent_variables()
|
||||
self.map_all_task_variables()
|
||||
|
||||
# Preserve task and agent information
|
||||
self._original_tasks = {
|
||||
# Preserve all decorated functions
|
||||
self._original_functions = {
|
||||
name: method
|
||||
for name, method in cls.__dict__.items()
|
||||
if hasattr(method, "is_task") and method.is_task
|
||||
}
|
||||
self._original_agents = {
|
||||
name: method
|
||||
for name, method in cls.__dict__.items()
|
||||
if hasattr(method, "is_agent") and method.is_agent
|
||||
if any(
|
||||
hasattr(method, attr)
|
||||
for attr in [
|
||||
"is_task",
|
||||
"is_agent",
|
||||
"is_before_kickoff",
|
||||
"is_after_kickoff",
|
||||
"is_kickoff",
|
||||
]
|
||||
)
|
||||
}
|
||||
|
||||
# Store specific function types
|
||||
self._original_tasks = self._filter_functions(
|
||||
self._original_functions, "is_task"
|
||||
)
|
||||
self._original_agents = self._filter_functions(
|
||||
self._original_functions, "is_agent"
|
||||
)
|
||||
self._before_kickoff = self._filter_functions(
|
||||
self._original_functions, "is_before_kickoff"
|
||||
)
|
||||
self._after_kickoff = self._filter_functions(
|
||||
self._original_functions, "is_after_kickoff"
|
||||
)
|
||||
self._kickoff = self._filter_functions(
|
||||
self._original_functions, "is_kickoff"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def load_yaml(config_path: Path):
|
||||
try:
|
||||
|
||||
@@ -23,6 +23,7 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter, convert_to_model
|
||||
from crewai.utilities.i18n import I18N
|
||||
@@ -91,7 +92,7 @@ class Task(BaseModel):
|
||||
output: Optional[TaskOutput] = Field(
|
||||
description="Task output, it's final result after being executed", default=None
|
||||
)
|
||||
tools: Optional[List[Any]] = Field(
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
default_factory=list,
|
||||
description="Tools the agent is limited to use for this task.",
|
||||
)
|
||||
@@ -185,7 +186,7 @@ class Task(BaseModel):
|
||||
self,
|
||||
agent: Optional[BaseAgent] = None,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> TaskOutput:
|
||||
"""Execute the task synchronously."""
|
||||
return self._execute_core(agent, context, tools)
|
||||
@@ -202,12 +203,14 @@ class Task(BaseModel):
|
||||
self,
|
||||
agent: BaseAgent | None = None,
|
||||
context: Optional[str] = None,
|
||||
tools: Optional[List[Any]] = None,
|
||||
tools: Optional[List[BaseTool]] = None,
|
||||
) -> Future[TaskOutput]:
|
||||
"""Execute the task asynchronously."""
|
||||
future: Future[TaskOutput] = Future()
|
||||
threading.Thread(
|
||||
target=self._execute_task_async, args=(agent, context, tools, future)
|
||||
daemon=True,
|
||||
target=self._execute_task_async,
|
||||
args=(agent, context, tools, future),
|
||||
).start()
|
||||
return future
|
||||
|
||||
@@ -276,7 +279,9 @@ class Task(BaseModel):
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ with suppress_warnings():
|
||||
|
||||
|
||||
from opentelemetry import trace # noqa: E402
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter # noqa: E402
|
||||
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter # noqa: E402
|
||||
from opentelemetry.sdk.resources import SERVICE_NAME, Resource # noqa: E402
|
||||
from opentelemetry.sdk.trace import TracerProvider # noqa: E402
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor # noqa: E402
|
||||
@@ -48,6 +48,10 @@ class Telemetry:
|
||||
def __init__(self):
|
||||
self.ready = False
|
||||
self.trace_set = False
|
||||
|
||||
if os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true":
|
||||
return
|
||||
|
||||
try:
|
||||
telemetry_endpoint = "https://telemetry.crewai.com:4319"
|
||||
self.resource = Resource(
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
from .base_tool import BaseTool, tool
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
from crewai.agents.agent_builder.utilities.base_agent_tool import BaseAgentTools
|
||||
|
||||
|
||||
class AgentTools(BaseAgentTools):
|
||||
"""Default tools around agent delegation"""
|
||||
|
||||
def tools(self):
|
||||
from langchain.tools import StructuredTool
|
||||
|
||||
coworkers = ", ".join([f"{agent.role}" for agent in self.agents])
|
||||
tools = [
|
||||
StructuredTool.from_function(
|
||||
func=self.delegate_work,
|
||||
name="Delegate work to coworker",
|
||||
description=self.i18n.tools("delegate_work").format(
|
||||
coworkers=coworkers
|
||||
),
|
||||
),
|
||||
StructuredTool.from_function(
|
||||
func=self.ask_question,
|
||||
name="Ask question to coworker",
|
||||
description=self.i18n.tools("ask_question").format(coworkers=coworkers),
|
||||
),
|
||||
]
|
||||
return tools
|
||||
32
src/crewai/tools/agent_tools/agent_tools.py
Normal file
32
src/crewai/tools/agent_tools/agent_tools.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.utilities import I18N
|
||||
|
||||
from .delegate_work_tool import DelegateWorkTool
|
||||
from .ask_question_tool import AskQuestionTool
|
||||
|
||||
|
||||
class AgentTools:
|
||||
"""Manager class for agent-related tools"""
|
||||
|
||||
def __init__(self, agents: list[BaseAgent], i18n: I18N = I18N()):
|
||||
self.agents = agents
|
||||
self.i18n = i18n
|
||||
|
||||
def tools(self) -> list[BaseTool]:
|
||||
"""Get all available agent tools"""
|
||||
coworkers = ", ".join([f"{agent.role}" for agent in self.agents])
|
||||
|
||||
delegate_tool = DelegateWorkTool(
|
||||
agents=self.agents,
|
||||
i18n=self.i18n,
|
||||
description=self.i18n.tools("delegate_work").format(coworkers=coworkers),
|
||||
)
|
||||
|
||||
ask_tool = AskQuestionTool(
|
||||
agents=self.agents,
|
||||
i18n=self.i18n,
|
||||
description=self.i18n.tools("ask_question").format(coworkers=coworkers),
|
||||
)
|
||||
|
||||
return [delegate_tool, ask_tool]
|
||||
26
src/crewai/tools/agent_tools/ask_question_tool.py
Normal file
26
src/crewai/tools/agent_tools/ask_question_tool.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
|
||||
from typing import Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class AskQuestionToolSchema(BaseModel):
|
||||
question: str = Field(..., description="The question to ask")
|
||||
context: str = Field(..., description="The context for the question")
|
||||
coworker: str = Field(..., description="The role/name of the coworker to ask")
|
||||
|
||||
|
||||
class AskQuestionTool(BaseAgentTool):
|
||||
"""Tool for asking questions to coworkers"""
|
||||
|
||||
name: str = "Ask question to coworker"
|
||||
args_schema: type[BaseModel] = AskQuestionToolSchema
|
||||
|
||||
def _run(
|
||||
self,
|
||||
question: str,
|
||||
context: str,
|
||||
coworker: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
coworker = self._get_coworker(coworker, **kwargs)
|
||||
return self._execute(coworker, question, context)
|
||||
@@ -1,22 +1,19 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, Union
|
||||
from pydantic import Field
|
||||
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.task import Task
|
||||
from crewai.utilities import I18N
|
||||
|
||||
|
||||
class BaseAgentTools(BaseModel, ABC):
|
||||
"""Default tools around agent delegation"""
|
||||
class BaseAgentTool(BaseTool):
|
||||
"""Base class for agent-related tools"""
|
||||
|
||||
agents: List[BaseAgent] = Field(description="List of agents in this crew.")
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
|
||||
@abstractmethod
|
||||
def tools(self):
|
||||
pass
|
||||
agents: list[BaseAgent] = Field(description="List of available agents")
|
||||
i18n: I18N = Field(
|
||||
default_factory=I18N, description="Internationalization settings"
|
||||
)
|
||||
|
||||
def _get_coworker(self, coworker: Optional[str], **kwargs) -> Optional[str]:
|
||||
coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker")
|
||||
@@ -24,27 +21,11 @@ class BaseAgentTools(BaseModel, ABC):
|
||||
is_list = coworker.startswith("[") and coworker.endswith("]")
|
||||
if is_list:
|
||||
coworker = coworker[1:-1].split(",")[0]
|
||||
|
||||
return coworker
|
||||
|
||||
def delegate_work(
|
||||
self, task: str, context: str, coworker: Optional[str] = None, **kwargs
|
||||
):
|
||||
"""Useful to delegate a specific task to a coworker passing all necessary context and names."""
|
||||
coworker = self._get_coworker(coworker, **kwargs)
|
||||
return self._execute(coworker, task, context)
|
||||
|
||||
def ask_question(
|
||||
self, question: str, context: str, coworker: Optional[str] = None, **kwargs
|
||||
):
|
||||
"""Useful to ask a question, opinion or take from a coworker passing all necessary context and names."""
|
||||
coworker = self._get_coworker(coworker, **kwargs)
|
||||
return self._execute(coworker, question, context)
|
||||
|
||||
def _execute(
|
||||
self, agent_name: Union[str, None], task: str, context: Union[str, None]
|
||||
):
|
||||
"""Execute the command."""
|
||||
) -> str:
|
||||
try:
|
||||
if agent_name is None:
|
||||
agent_name = ""
|
||||
@@ -57,7 +38,6 @@ class BaseAgentTools(BaseModel, ABC):
|
||||
# when it should look like this:
|
||||
# {"task": "....", "coworker": "...."}
|
||||
agent_name = agent_name.casefold().replace('"', "").replace("\n", "")
|
||||
|
||||
agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None")
|
||||
available_agent
|
||||
for available_agent in self.agents
|
||||
29
src/crewai/tools/agent_tools/delegate_work_tool.py
Normal file
29
src/crewai/tools/agent_tools/delegate_work_tool.py
Normal file
@@ -0,0 +1,29 @@
|
||||
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class DelegateWorkToolSchema(BaseModel):
|
||||
task: str = Field(..., description="The task to delegate")
|
||||
context: str = Field(..., description="The context for the task")
|
||||
coworker: str = Field(
|
||||
..., description="The role/name of the coworker to delegate to"
|
||||
)
|
||||
|
||||
|
||||
class DelegateWorkTool(BaseAgentTool):
|
||||
"""Tool for delegating work to coworkers"""
|
||||
|
||||
name: str = "Delegate work to coworker"
|
||||
args_schema: type[BaseModel] = DelegateWorkToolSchema
|
||||
|
||||
def _run(
|
||||
self,
|
||||
task: str,
|
||||
context: str,
|
||||
coworker: Optional[str] = None,
|
||||
**kwargs,
|
||||
) -> str:
|
||||
coworker = self._get_coworker(coworker, **kwargs)
|
||||
return self._execute(coworker, task, context)
|
||||
186
src/crewai/tools/base_tool.py
Normal file
186
src/crewai/tools/base_tool.py
Normal file
@@ -0,0 +1,186 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Any, Callable, Type, get_args, get_origin
|
||||
|
||||
from langchain_core.tools import StructuredTool
|
||||
from pydantic import BaseModel, ConfigDict, Field, validator
|
||||
from pydantic import BaseModel as PydanticBaseModel
|
||||
|
||||
|
||||
class BaseTool(BaseModel, ABC):
|
||||
class _ArgsSchemaPlaceholder(PydanticBaseModel):
|
||||
pass
|
||||
|
||||
model_config = ConfigDict()
|
||||
|
||||
name: str
|
||||
"""The unique name of the tool that clearly communicates its purpose."""
|
||||
description: str
|
||||
"""Used to tell the model how/when/why to use the tool."""
|
||||
args_schema: Type[PydanticBaseModel] = Field(default_factory=_ArgsSchemaPlaceholder)
|
||||
"""The schema for the arguments that the tool accepts."""
|
||||
description_updated: bool = False
|
||||
"""Flag to check if the description has been updated."""
|
||||
cache_function: Callable = lambda _args=None, _result=None: True
|
||||
"""Function that will be used to determine if the tool should be cached, should return a boolean. If None, the tool will be cached."""
|
||||
result_as_answer: bool = False
|
||||
"""Flag to check if the tool should be the final agent answer."""
|
||||
|
||||
@validator("args_schema", always=True, pre=True)
|
||||
def _default_args_schema(
|
||||
cls, v: Type[PydanticBaseModel]
|
||||
) -> Type[PydanticBaseModel]:
|
||||
if not isinstance(v, cls._ArgsSchemaPlaceholder):
|
||||
return v
|
||||
|
||||
return type(
|
||||
f"{cls.__name__}Schema",
|
||||
(PydanticBaseModel,),
|
||||
{
|
||||
"__annotations__": {
|
||||
k: v for k, v in cls._run.__annotations__.items() if k != "return"
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
self._generate_description()
|
||||
|
||||
super().model_post_init(__context)
|
||||
|
||||
def run(
|
||||
self,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
print(f"Using Tool: {self.name}")
|
||||
return self._run(*args, **kwargs)
|
||||
|
||||
@abstractmethod
|
||||
def _run(
|
||||
self,
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
) -> Any:
|
||||
"""Here goes the actual implementation of the tool."""
|
||||
|
||||
def to_langchain(self) -> StructuredTool:
|
||||
self._set_args_schema()
|
||||
return StructuredTool(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
args_schema=self.args_schema,
|
||||
func=self._run,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_langchain(cls, tool: StructuredTool) -> "BaseTool":
|
||||
if cls == Tool:
|
||||
if tool.func is None:
|
||||
raise ValueError("StructuredTool must have a callable 'func'")
|
||||
return Tool(
|
||||
name=tool.name,
|
||||
description=tool.description,
|
||||
args_schema=tool.args_schema,
|
||||
func=tool.func,
|
||||
)
|
||||
raise NotImplementedError(f"from_langchain not implemented for {cls.__name__}")
|
||||
|
||||
def _set_args_schema(self):
|
||||
if self.args_schema is None:
|
||||
class_name = f"{self.__class__.__name__}Schema"
|
||||
self.args_schema = type(
|
||||
class_name,
|
||||
(PydanticBaseModel,),
|
||||
{
|
||||
"__annotations__": {
|
||||
k: v
|
||||
for k, v in self._run.__annotations__.items()
|
||||
if k != "return"
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
def _generate_description(self):
|
||||
args_schema = {
|
||||
name: {
|
||||
"description": field.description,
|
||||
"type": BaseTool._get_arg_annotations(field.annotation),
|
||||
}
|
||||
for name, field in self.args_schema.model_fields.items()
|
||||
}
|
||||
|
||||
self.description = f"Tool Name: {self.name}\nTool Arguments: {args_schema}\nTool Description: {self.description}"
|
||||
|
||||
@staticmethod
|
||||
def _get_arg_annotations(annotation: type[Any] | None) -> str:
|
||||
if annotation is None:
|
||||
return "None"
|
||||
|
||||
origin = get_origin(annotation)
|
||||
args = get_args(annotation)
|
||||
|
||||
if origin is None:
|
||||
return (
|
||||
annotation.__name__
|
||||
if hasattr(annotation, "__name__")
|
||||
else str(annotation)
|
||||
)
|
||||
|
||||
if args:
|
||||
args_str = ", ".join(BaseTool._get_arg_annotations(arg) for arg in args)
|
||||
return f"{origin.__name__}[{args_str}]"
|
||||
|
||||
return origin.__name__
|
||||
|
||||
|
||||
class Tool(BaseTool):
|
||||
func: Callable
|
||||
"""The function that will be executed when the tool is called."""
|
||||
|
||||
def _run(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self.func(*args, **kwargs)
|
||||
|
||||
|
||||
def to_langchain(
|
||||
tools: list[BaseTool | StructuredTool],
|
||||
) -> list[StructuredTool]:
|
||||
return [t.to_langchain() if isinstance(t, BaseTool) else t for t in tools]
|
||||
|
||||
|
||||
def tool(*args):
|
||||
"""
|
||||
Decorator to create a tool from a function.
|
||||
"""
|
||||
|
||||
def _make_with_name(tool_name: str) -> Callable:
|
||||
def _make_tool(f: Callable) -> BaseTool:
|
||||
if f.__doc__ is None:
|
||||
raise ValueError("Function must have a docstring")
|
||||
if f.__annotations__ is None:
|
||||
raise ValueError("Function must have type annotations")
|
||||
|
||||
class_name = "".join(tool_name.split()).title()
|
||||
args_schema = type(
|
||||
class_name,
|
||||
(PydanticBaseModel,),
|
||||
{
|
||||
"__annotations__": {
|
||||
k: v for k, v in f.__annotations__.items() if k != "return"
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
return Tool(
|
||||
name=tool_name,
|
||||
description=f.__doc__,
|
||||
func=f,
|
||||
args_schema=args_schema,
|
||||
)
|
||||
|
||||
return _make_tool
|
||||
|
||||
if len(args) == 1 and callable(args[0]):
|
||||
return _make_with_name(args[0].__name__)(args[0])
|
||||
if len(args) == 1 and isinstance(args[0], str):
|
||||
return _make_with_name(args[0])
|
||||
raise ValueError("Invalid arguments")
|
||||
0
src/crewai/tools/cache_tools/__init__.py
Normal file
0
src/crewai/tools/cache_tools/__init__.py
Normal file
@@ -10,6 +10,7 @@ import crewai.utilities.events as events
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.task import Task
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||
from crewai.tools.tool_usage_events import ToolUsageError, ToolUsageFinished
|
||||
from crewai.utilities import I18N, Converter, ConverterError, Printer
|
||||
@@ -49,7 +50,7 @@ class ToolUsage:
|
||||
def __init__(
|
||||
self,
|
||||
tools_handler: ToolsHandler,
|
||||
tools: List[Any],
|
||||
tools: List[BaseTool],
|
||||
original_tools: List[Any],
|
||||
tools_description: str,
|
||||
tools_names: str,
|
||||
@@ -298,22 +299,7 @@ class ToolUsage:
|
||||
"""Render the tool name and description in plain text."""
|
||||
descriptions = []
|
||||
for tool in self.tools:
|
||||
args = {
|
||||
name: {
|
||||
"description": field.description,
|
||||
"type": field.annotation.__name__,
|
||||
}
|
||||
for name, field in tool.args_schema.model_fields.items()
|
||||
}
|
||||
descriptions.append(
|
||||
"\n".join(
|
||||
[
|
||||
f"Tool Name: {tool.name.lower()}",
|
||||
f"Tool Description: {tool.description}",
|
||||
f"Tool Arguments: {args}",
|
||||
]
|
||||
)
|
||||
)
|
||||
descriptions.append(tool.description)
|
||||
return "\n--\n".join(descriptions)
|
||||
|
||||
def _function_calling(self, tool_string: str):
|
||||
|
||||
@@ -8,6 +8,7 @@ class UsageMetrics(BaseModel):
|
||||
Attributes:
|
||||
total_tokens: Total number of tokens used.
|
||||
prompt_tokens: Number of tokens used in prompts.
|
||||
cached_prompt_tokens: Number of cached prompt tokens used.
|
||||
completion_tokens: Number of tokens used in completions.
|
||||
successful_requests: Number of successful requests made.
|
||||
"""
|
||||
@@ -16,6 +17,9 @@ class UsageMetrics(BaseModel):
|
||||
prompt_tokens: int = Field(
|
||||
default=0, description="Number of tokens used in prompts."
|
||||
)
|
||||
cached_prompt_tokens: int = Field(
|
||||
default=0, description="Number of cached prompt tokens used."
|
||||
)
|
||||
completion_tokens: int = Field(
|
||||
default=0, description="Number of tokens used in completions."
|
||||
)
|
||||
@@ -32,5 +36,6 @@ class UsageMetrics(BaseModel):
|
||||
"""
|
||||
self.total_tokens += usage_metrics.total_tokens
|
||||
self.prompt_tokens += usage_metrics.prompt_tokens
|
||||
self.cached_prompt_tokens += usage_metrics.cached_prompt_tokens
|
||||
self.completion_tokens += usage_metrics.completion_tokens
|
||||
self.successful_requests += usage_metrics.successful_requests
|
||||
|
||||
@@ -10,6 +10,7 @@ from .rpm_controller import RPMController
|
||||
from .exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
from .embedding_configurator import EmbeddingConfigurator
|
||||
|
||||
__all__ = [
|
||||
"Converter",
|
||||
@@ -23,4 +24,5 @@ __all__ = [
|
||||
"RPMController",
|
||||
"YamlParser",
|
||||
"LLMContextLengthExceededException",
|
||||
"EmbeddingConfigurator",
|
||||
]
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
TRAINING_DATA_FILE = "training_data.pkl"
|
||||
TRAINED_AGENTS_DATA_FILE = "trained_agents_data.pkl"
|
||||
DEFAULT_SCORE_THRESHOLD = 0.35
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user