mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-17 21:08:29 +00:00
Compare commits
135 Commits
feat/cli-m
...
knowledge
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c0ad4576e2 | ||
|
|
6359b64d22 | ||
|
|
9329119f76 | ||
|
|
38c0d61b11 | ||
|
|
8564f5551f | ||
|
|
8a5404275f | ||
|
|
52189a46bc | ||
|
|
44ab749fda | ||
|
|
3c4504bd4f | ||
|
|
fde1ee45f9 | ||
|
|
6774bc2c53 | ||
|
|
94c62263ed | ||
|
|
495c3859af | ||
|
|
3e003f5e32 | ||
|
|
1c8b509d7d | ||
|
|
58af5c08f9 | ||
|
|
23276cbd76 | ||
|
|
fe18da5e11 | ||
|
|
76da972ce9 | ||
|
|
4663997b4c | ||
|
|
b185b9e289 | ||
|
|
787f2eaa7c | ||
|
|
e7d816fb2a | ||
|
|
8373c9b521 | ||
|
|
ec2fe6ff91 | ||
|
|
55e968c9e0 | ||
|
|
58bf2d57f7 | ||
|
|
705ee16c1c | ||
|
|
0c5b6f2a93 | ||
|
|
914067df37 | ||
|
|
de742c827d | ||
|
|
efa8a378a1 | ||
|
|
e882725b8a | ||
|
|
cbfdbe3b68 | ||
|
|
c8bf242633 | ||
|
|
70910dd7b4 | ||
|
|
b104404418 | ||
|
|
d579c5ae12 | ||
|
|
4831dcb85b | ||
|
|
cbfcde73ec | ||
|
|
b2c06d5b7a | ||
|
|
352d05370e | ||
|
|
0b9092702b | ||
|
|
8376698534 | ||
|
|
b90793874c | ||
|
|
cdf5233523 | ||
|
|
cb03ee60b8 | ||
|
|
10f445e18a | ||
|
|
3dc02310b6 | ||
|
|
98a708ca15 | ||
|
|
e70bc94ab6 | ||
|
|
9285ebf8a2 | ||
|
|
4ca785eb15 | ||
|
|
c57cbd8591 | ||
|
|
7fb1289205 | ||
|
|
f02681ae01 | ||
|
|
c725105b1f | ||
|
|
36aa4bcb46 | ||
|
|
b98f8f9fe1 | ||
|
|
bcfcf88e78 | ||
|
|
fd0de3a47e | ||
|
|
c7b9ae02fd | ||
|
|
4afb022572 | ||
|
|
8610faef22 | ||
|
|
6d677541c7 | ||
|
|
49220ec163 | ||
|
|
40a676b7ac | ||
|
|
50bf146d1e | ||
|
|
40d378abfb | ||
|
|
1b09b085a7 | ||
|
|
7b59c5b049 | ||
|
|
86ede8344c | ||
|
|
59165cbad8 | ||
|
|
4af263ca1e | ||
|
|
9f2acfe91f | ||
|
|
617ee989cd | ||
|
|
6131dbac4f | ||
|
|
1a35114c08 | ||
|
|
e856359e23 | ||
|
|
a8a2f80616 | ||
|
|
faa231e278 | ||
|
|
3d44795476 | ||
|
|
f50e709985 | ||
|
|
dc314c1151 | ||
|
|
75322b2de1 | ||
|
|
d70c542547 | ||
|
|
57201fb856 | ||
|
|
9b142e580b | ||
|
|
3878daffd6 | ||
|
|
34954e6f74 | ||
|
|
e66a135d5d | ||
|
|
66698503b8 | ||
|
|
ec2967c362 | ||
|
|
4ae07468f3 | ||
|
|
6193eb13fa | ||
|
|
55cd15bfc6 | ||
|
|
5f46ff8836 | ||
|
|
cdfbd5f62b | ||
|
|
b43f3987ec | ||
|
|
240527d06c | ||
|
|
276cb7b7e8 | ||
|
|
048aa6cbcc | ||
|
|
fa9949b9d0 | ||
|
|
500072d855 | ||
|
|
04bcfa6e2d | ||
|
|
26afee9bed | ||
|
|
f29f4abdd7 | ||
|
|
4589d6fe9d | ||
|
|
201e652fa2 | ||
|
|
8bc07e6071 | ||
|
|
6baaad045a | ||
|
|
74c1703310 | ||
|
|
a921828e51 | ||
|
|
e1fd83e6a7 | ||
|
|
7d68e287cc | ||
|
|
f39a975e20 | ||
|
|
b8a3c29745 | ||
|
|
9cd4ff05c9 | ||
|
|
4687779702 | ||
|
|
8731915330 | ||
|
|
093259389e | ||
|
|
6bcb3d1080 | ||
|
|
71a217b210 | ||
|
|
b98256e434 | ||
|
|
40f81aecf5 | ||
|
|
d1737a96fb | ||
|
|
84f48c465d | ||
|
|
60efcad481 | ||
|
|
53a9f107ca | ||
|
|
6fa2b89831 | ||
|
|
d72ebb9bb8 | ||
|
|
81ae07abdb | ||
|
|
6d20ba70a1 | ||
|
|
67f55bae2c | ||
|
|
9b59de1720 |
19
.github/security.md
vendored
Normal file
19
.github/security.md
vendored
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
CrewAI takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organization.
|
||||||
|
If you believe you have found a security vulnerability in any CrewAI product or service, please report it to us as described below.
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
Please do not report security vulnerabilities through public GitHub issues.
|
||||||
|
To report a vulnerability, please email us at security@crewai.com.
|
||||||
|
Please include the requested information listed below so that we can triage your report more quickly
|
||||||
|
|
||||||
|
- Type of issue (e.g. SQL injection, cross-site scripting, etc.)
|
||||||
|
- Full paths of source file(s) related to the manifestation of the issue
|
||||||
|
- The location of the affected source code (tag/branch/commit or direct URL)
|
||||||
|
- Any special configuration required to reproduce the issue
|
||||||
|
- Step-by-step instructions to reproduce the issue (please include screenshots if needed)
|
||||||
|
- Proof-of-concept or exploit code (if possible)
|
||||||
|
- Impact of the issue, including how an attacker might exploit the issue
|
||||||
|
|
||||||
|
Once we have received your report, we will respond to you at the email address you provide. If the issue is confirmed, we will release a patch as soon as possible depending on the complexity of the issue.
|
||||||
|
|
||||||
|
At this time, we are not offering a bug bounty program. Any rewards will be at our discretion.
|
||||||
2
.github/workflows/security-checker.yml
vendored
2
.github/workflows/security-checker.yml
vendored
@@ -19,5 +19,5 @@ jobs:
|
|||||||
run: pip install bandit
|
run: pip install bandit
|
||||||
|
|
||||||
- name: Run Bandit
|
- name: Run Bandit
|
||||||
run: bandit -c pyproject.toml -r src/ -lll
|
run: bandit -c pyproject.toml -r src/ -ll
|
||||||
|
|
||||||
|
|||||||
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
|||||||
run: uv python install 3.11.9
|
run: uv python install 3.11.9
|
||||||
|
|
||||||
- name: Install the project
|
- name: Install the project
|
||||||
run: uv sync --dev
|
run: uv sync --dev --all-extras
|
||||||
|
|
||||||
- name: Run tests
|
- name: Run tests
|
||||||
run: uv run pytest tests
|
run: uv run pytest tests -vv
|
||||||
|
|||||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -17,3 +17,7 @@ rc-tests/*
|
|||||||
temp/*
|
temp/*
|
||||||
.vscode/*
|
.vscode/*
|
||||||
crew_tasks_output.json
|
crew_tasks_output.json
|
||||||
|
.codesight
|
||||||
|
.mypy_cache
|
||||||
|
.ruff_cache
|
||||||
|
.venv
|
||||||
|
|||||||
@@ -351,7 +351,7 @@ pre-commit install
|
|||||||
### Running Tests
|
### Running Tests
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uvx pytest
|
uv run pytest .
|
||||||
```
|
```
|
||||||
|
|
||||||
### Running static type checks
|
### Running static type checks
|
||||||
|
|||||||
@@ -31,16 +31,17 @@ Think of an agent as a member of a team, with specific skills and a particular j
|
|||||||
| **Max RPM** *(optional)* | `max_rpm` | Max RPM is the maximum number of requests per minute the agent can perform to avoid rate limits. It's optional and can be left unspecified, with a default value of `None`. |
|
| **Max RPM** *(optional)* | `max_rpm` | Max RPM is the maximum number of requests per minute the agent can perform to avoid rate limits. It's optional and can be left unspecified, with a default value of `None`. |
|
||||||
| **Max Execution Time** *(optional)* | `max_execution_time` | Max Execution Time is the maximum execution time for an agent to execute a task. It's optional and can be left unspecified, with a default value of `None`, meaning no max execution time. |
|
| **Max Execution Time** *(optional)* | `max_execution_time` | Max Execution Time is the maximum execution time for an agent to execute a task. It's optional and can be left unspecified, with a default value of `None`, meaning no max execution time. |
|
||||||
| **Verbose** *(optional)* | `verbose` | Setting this to `True` configures the internal logger to provide detailed execution logs, aiding in debugging and monitoring. Default is `False`. |
|
| **Verbose** *(optional)* | `verbose` | Setting this to `True` configures the internal logger to provide detailed execution logs, aiding in debugging and monitoring. Default is `False`. |
|
||||||
| **Allow Delegation** *(optional)* | `allow_delegation` | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. Default is `False`.
|
| **Allow Delegation** *(optional)* | `allow_delegation` | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. Default is `False`. |
|
||||||
| **Step Callback** *(optional)* | `step_callback` | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback`. |
|
| **Step Callback** *(optional)* | `step_callback` | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback`. |
|
||||||
| **Cache** *(optional)* | `cache` | Indicates if the agent should use a cache for tool usage. Default is `True`. |
|
| **Cache** *(optional)* | `cache` | Indicates if the agent should use a cache for tool usage. Default is `True`. |
|
||||||
| **System Template** *(optional)* | `system_template` | Specifies the system format for the agent. Default is `None`. |
|
| **System Template** *(optional)* | `system_template` | Specifies the system format for the agent. Default is `None`. |
|
||||||
| **Prompt Template** *(optional)* | `prompt_template` | Specifies the prompt format for the agent. Default is `None`. |
|
| **Prompt Template** *(optional)* | `prompt_template` | Specifies the prompt format for the agent. Default is `None`. |
|
||||||
| **Response Template** *(optional)* | `response_template` | Specifies the response format for the agent. Default is `None`. |
|
| **Response Template** *(optional)* | `response_template` | Specifies the response format for the agent. Default is `None`. |
|
||||||
| **Allow Code Execution** *(optional)* | `allow_code_execution` | Enable code execution for the agent. Default is `False`. |
|
| **Allow Code Execution** *(optional)* | `allow_code_execution` | Enable code execution for the agent. Default is `False`. |
|
||||||
| **Max Retry Limit** *(optional)* | `max_retry_limit` | Maximum number of retries for an agent to execute a task when an error occurs. Default is `2`.
|
| **Max Retry Limit** *(optional)* | `max_retry_limit` | Maximum number of retries for an agent to execute a task when an error occurs. Default is `2`. |
|
||||||
| **Use System Prompt** *(optional)* | `use_system_prompt` | Adds the ability to not use system prompt (to support o1 models). Default is `True`. |
|
| **Use System Prompt** *(optional)* | `use_system_prompt` | Adds the ability to not use system prompt (to support o1 models). Default is `True`. |
|
||||||
| **Respect Context Window** *(optional)* | `respect_context_window` | Summary strategy to avoid overflowing the context window. Default is `True`. |
|
| **Respect Context Window** *(optional)* | `respect_context_window` | Summary strategy to avoid overflowing the context window. Default is `True`. |
|
||||||
|
| **Code Execution Mode** *(optional)* | `code_execution_mode` | Determines the mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution on the host machine). Default is `safe`. |
|
||||||
|
|
||||||
## Creating an agent
|
## Creating an agent
|
||||||
|
|
||||||
@@ -83,6 +84,7 @@ agent = Agent(
|
|||||||
max_retry_limit=2, # Optional
|
max_retry_limit=2, # Optional
|
||||||
use_system_prompt=True, # Optional
|
use_system_prompt=True, # Optional
|
||||||
respect_context_window=True, # Optional
|
respect_context_window=True, # Optional
|
||||||
|
code_execution_mode='safe', # Optional, defaults to 'safe'
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -156,4 +158,4 @@ crew = my_crew.kickoff(inputs={"input": "Mark Twain"})
|
|||||||
## Conclusion
|
## Conclusion
|
||||||
|
|
||||||
Agents are the building blocks of the CrewAI framework. By understanding how to define and interact with agents,
|
Agents are the building blocks of the CrewAI framework. By understanding how to define and interact with agents,
|
||||||
you can create sophisticated AI systems that leverage the power of collaborative intelligence.
|
you can create sophisticated AI systems that leverage the power of collaborative intelligence. The `code_execution_mode` attribute provides flexibility in how agents execute code, allowing for both secure and direct execution options.
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ icon: terminal
|
|||||||
|
|
||||||
# CrewAI CLI Documentation
|
# CrewAI CLI Documentation
|
||||||
|
|
||||||
The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you to create, train, run, and manage crews and pipelines.
|
The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you to create, train, run, and manage crews & flows.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
@@ -146,3 +146,34 @@ crewai run
|
|||||||
Make sure to run these commands from the directory where your CrewAI project is set up.
|
Make sure to run these commands from the directory where your CrewAI project is set up.
|
||||||
Some commands may require additional configuration or setup within your project structure.
|
Some commands may require additional configuration or setup within your project structure.
|
||||||
</Note>
|
</Note>
|
||||||
|
|
||||||
|
|
||||||
|
### 9. API Keys
|
||||||
|
|
||||||
|
When running ```crewai create crew``` command, the CLI will first show you the top 5 most common LLM providers and ask you to select one.
|
||||||
|
|
||||||
|
Once you've selected an LLM provider, you will be prompted for API keys.
|
||||||
|
|
||||||
|
#### Initial API key providers
|
||||||
|
|
||||||
|
The CLI will initially prompt for API keys for the following services:
|
||||||
|
|
||||||
|
* OpenAI
|
||||||
|
* Groq
|
||||||
|
* Anthropic
|
||||||
|
* Google Gemini
|
||||||
|
|
||||||
|
When you select a provider, the CLI will prompt you to enter your API key.
|
||||||
|
|
||||||
|
#### Other Options
|
||||||
|
|
||||||
|
If you select option 6, you will be able to select from a list of LiteLLM supported providers.
|
||||||
|
|
||||||
|
When you select a provider, the CLI will prompt you to enter the Key name and the API key.
|
||||||
|
|
||||||
|
See the following link for each provider's key name:
|
||||||
|
|
||||||
|
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -22,7 +22,8 @@ A crew in crewAI represents a collaborative group of agents working together to
|
|||||||
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
|
| **Max RPM** _(optional)_ | `max_rpm` | Maximum requests per minute the crew adheres to during execution. Defaults to `None`. |
|
||||||
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
||||||
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
||||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). Defaults to `False`. |
|
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
||||||
|
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||||
|
|||||||
@@ -18,68 +18,71 @@ Flows allow you to create structured, event-driven workflows. They provide a sea
|
|||||||
|
|
||||||
4. **Flexible Control Flow**: Implement conditional logic, loops, and branching within your workflows.
|
4. **Flexible Control Flow**: Implement conditional logic, loops, and branching within your workflows.
|
||||||
|
|
||||||
|
5. **Input Flexibility**: Flows can accept inputs to initialize or update their state, with different handling for structured and unstructured state management.
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
Let's create a simple Flow where you will use OpenAI to generate a random city in one task and then use that city to generate a fun fact in another task.
|
Let's create a simple Flow where you will use OpenAI to generate a random city in one task and then use that city to generate a fun fact in another task.
|
||||||
|
|
||||||
```python Code
|
### Passing Inputs to Flows
|
||||||
import asyncio
|
|
||||||
|
|
||||||
|
Flows can accept inputs to initialize or update their state before execution. The way inputs are handled depends on whether the flow uses structured or unstructured state management.
|
||||||
|
|
||||||
|
#### Structured State Management
|
||||||
|
|
||||||
|
In structured state management, the flow's state is defined using a Pydantic `BaseModel`. Inputs must match the model's schema, and any updates will overwrite the default values.
|
||||||
|
|
||||||
|
```python
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
from litellm import completion
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
class ExampleState(BaseModel):
|
||||||
|
counter: int = 0
|
||||||
|
message: str = ""
|
||||||
|
|
||||||
class ExampleFlow(Flow):
|
class StructuredExampleFlow(Flow[ExampleState]):
|
||||||
model = "gpt-4o-mini"
|
|
||||||
|
|
||||||
@start()
|
@start()
|
||||||
def generate_city(self):
|
def first_method(self):
|
||||||
print("Starting flow")
|
# Implementation
|
||||||
|
|
||||||
response = completion(
|
flow = StructuredExampleFlow()
|
||||||
model=self.model,
|
flow.kickoff(inputs={"counter": 10})
|
||||||
messages=[
|
```
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "Return the name of a random city in the world.",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
random_city = response["choices"][0]["message"]["content"]
|
In this example, the `counter` is initialized to `10`, while `message` retains its default value.
|
||||||
print(f"Random City: {random_city}")
|
|
||||||
|
|
||||||
return random_city
|
#### Unstructured State Management
|
||||||
|
|
||||||
@listen(generate_city)
|
In unstructured state management, the flow's state is a dictionary. You can pass any dictionary to update the state.
|
||||||
def generate_fun_fact(self, random_city):
|
|
||||||
response = completion(
|
|
||||||
model=self.model,
|
|
||||||
messages=[
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": f"Tell me a fun fact about {random_city}",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
fun_fact = response["choices"][0]["message"]["content"]
|
```python
|
||||||
return fun_fact
|
from crewai.flow.flow import Flow, listen, start
|
||||||
|
|
||||||
|
class UnstructuredExampleFlow(Flow):
|
||||||
|
@start()
|
||||||
|
def first_method(self):
|
||||||
|
# Implementation
|
||||||
|
|
||||||
async def main():
|
flow = UnstructuredExampleFlow()
|
||||||
flow = ExampleFlow()
|
flow.kickoff(inputs={"counter": 5, "message": "Initial message"})
|
||||||
result = await flow.kickoff()
|
```
|
||||||
|
|
||||||
print(f"Generated fun fact: {result}")
|
Here, both `counter` and `message` are updated based on the provided inputs.
|
||||||
|
|
||||||
asyncio.run(main())
|
**Note:** Ensure that inputs for structured state management adhere to the defined schema to avoid validation errors.
|
||||||
|
|
||||||
|
### Example Flow
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Existing example code
|
||||||
```
|
```
|
||||||
|
|
||||||
In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task.
|
In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task.
|
||||||
|
|
||||||
When you run the Flow, it will generate a random city and then generate a fun fact about that city. The output will be printed to the console.
|
When you run the Flow, it will generate a random city and then generate a fun fact about that city. The output will be printed to the console.
|
||||||
|
|
||||||
|
**Note:** Ensure you have set up your `.env` file to store your `OPENAI_API_KEY`. This key is necessary for authenticating requests to the OpenAI API.
|
||||||
|
|
||||||
### @start()
|
### @start()
|
||||||
|
|
||||||
The `@start()` decorator is used to mark a method as the starting point of a Flow. When a Flow is started, all the methods decorated with `@start()` are executed in parallel. You can have multiple start methods in a Flow, and they will all be executed when the Flow is started.
|
The `@start()` decorator is used to mark a method as the starting point of a Flow. When a Flow is started, all the methods decorated with `@start()` are executed in parallel. You can have multiple start methods in a Flow, and they will all be executed when the Flow is started.
|
||||||
@@ -94,14 +97,14 @@ The `@listen()` decorator can be used in several ways:
|
|||||||
|
|
||||||
1. **Listening to a Method by Name**: You can pass the name of the method you want to listen to as a string. When that method completes, the listener method will be triggered.
|
1. **Listening to a Method by Name**: You can pass the name of the method you want to listen to as a string. When that method completes, the listener method will be triggered.
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
@listen("generate_city")
|
@listen("generate_city")
|
||||||
def generate_fun_fact(self, random_city):
|
def generate_fun_fact(self, random_city):
|
||||||
# Implementation
|
# Implementation
|
||||||
```
|
```
|
||||||
|
|
||||||
2. **Listening to a Method Directly**: You can pass the method itself. When that method completes, the listener method will be triggered.
|
2. **Listening to a Method Directly**: You can pass the method itself. When that method completes, the listener method will be triggered.
|
||||||
```python Code
|
```python
|
||||||
@listen(generate_city)
|
@listen(generate_city)
|
||||||
def generate_fun_fact(self, random_city):
|
def generate_fun_fact(self, random_city):
|
||||||
# Implementation
|
# Implementation
|
||||||
@@ -118,8 +121,7 @@ When you run a Flow, the final output is determined by the last method that comp
|
|||||||
Here's how you can access the final output:
|
Here's how you can access the final output:
|
||||||
|
|
||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
```python Code
|
```python
|
||||||
import asyncio
|
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
|
|
||||||
class OutputExampleFlow(Flow):
|
class OutputExampleFlow(Flow):
|
||||||
@@ -131,16 +133,14 @@ class OutputExampleFlow(Flow):
|
|||||||
def second_method(self, first_output):
|
def second_method(self, first_output):
|
||||||
return f"Second method received: {first_output}"
|
return f"Second method received: {first_output}"
|
||||||
|
|
||||||
async def main():
|
|
||||||
flow = OutputExampleFlow()
|
flow = OutputExampleFlow()
|
||||||
final_output = await flow.kickoff()
|
final_output = flow.kickoff()
|
||||||
|
|
||||||
print("---- Final Output ----")
|
print("---- Final Output ----")
|
||||||
print(final_output)
|
print(final_output)
|
||||||
|
|
||||||
asyncio.run(main())
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text Output
|
```text
|
||||||
---- Final Output ----
|
---- Final Output ----
|
||||||
Second method received: Output from first_method
|
Second method received: Output from first_method
|
||||||
```
|
```
|
||||||
@@ -150,7 +150,6 @@ Second method received: Output from first_method
|
|||||||
In this example, the `second_method` is the last method to complete, so its output will be the final output of the Flow.
|
In this example, the `second_method` is the last method to complete, so its output will be the final output of the Flow.
|
||||||
The `kickoff()` method will return the final output, which is then printed to the console.
|
The `kickoff()` method will return the final output, which is then printed to the console.
|
||||||
|
|
||||||
|
|
||||||
#### Accessing and Updating State
|
#### Accessing and Updating State
|
||||||
|
|
||||||
In addition to retrieving the final output, you can also access and update the state within your Flow. The state can be used to store and share data between different methods in the Flow. After the Flow has run, you can access the state to retrieve any information that was added or updated during the execution.
|
In addition to retrieving the final output, you can also access and update the state within your Flow. The state can be used to store and share data between different methods in the Flow. After the Flow has run, you can access the state to retrieve any information that was added or updated during the execution.
|
||||||
@@ -159,8 +158,7 @@ Here's an example of how to update and access the state:
|
|||||||
|
|
||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
import asyncio
|
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
@@ -181,21 +179,19 @@ class StateExampleFlow(Flow[ExampleState]):
|
|||||||
self.state.counter += 1
|
self.state.counter += 1
|
||||||
return self.state.message
|
return self.state.message
|
||||||
|
|
||||||
async def main():
|
|
||||||
flow = StateExampleFlow()
|
flow = StateExampleFlow()
|
||||||
final_output = await flow.kickoff()
|
final_output = flow.kickoff()
|
||||||
print(f"Final Output: {final_output}")
|
print(f"Final Output: {final_output}")
|
||||||
print("Final State:")
|
print("Final State:")
|
||||||
print(flow.state)
|
print(flow.state)
|
||||||
|
|
||||||
asyncio.run(main())
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text Output
|
```text
|
||||||
Final Output: Hello from first_method - updated by second_method
|
Final Output: Hello from first_method - updated by second_method
|
||||||
Final State:
|
Final State:
|
||||||
counter=2 message='Hello from first_method - updated by second_method'
|
counter=2 message='Hello from first_method - updated by second_method'
|
||||||
```
|
```
|
||||||
|
|
||||||
</CodeGroup>
|
</CodeGroup>
|
||||||
|
|
||||||
In this example, the state is updated by both `first_method` and `second_method`.
|
In this example, the state is updated by both `first_method` and `second_method`.
|
||||||
@@ -214,12 +210,10 @@ allowing developers to choose the approach that best fits their application's ne
|
|||||||
In unstructured state management, all state is stored in the `state` attribute of the `Flow` class.
|
In unstructured state management, all state is stored in the `state` attribute of the `Flow` class.
|
||||||
This approach offers flexibility, enabling developers to add or modify state attributes on the fly without defining a strict schema.
|
This approach offers flexibility, enabling developers to add or modify state attributes on the fly without defining a strict schema.
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
import asyncio
|
|
||||||
|
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
|
|
||||||
class UntructuredExampleFlow(Flow):
|
class UnstructuredExampleFlow(Flow):
|
||||||
|
|
||||||
@start()
|
@start()
|
||||||
def first_method(self):
|
def first_method(self):
|
||||||
@@ -238,13 +232,8 @@ class UntructuredExampleFlow(Flow):
|
|||||||
|
|
||||||
print(f"State after third_method: {self.state}")
|
print(f"State after third_method: {self.state}")
|
||||||
|
|
||||||
|
flow = UnstructuredExampleFlow()
|
||||||
async def main():
|
flow.kickoff()
|
||||||
flow = UntructuredExampleFlow()
|
|
||||||
await flow.kickoff()
|
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Key Points:**
|
**Key Points:**
|
||||||
@@ -257,18 +246,14 @@ asyncio.run(main())
|
|||||||
Structured state management leverages predefined schemas to ensure consistency and type safety across the workflow.
|
Structured state management leverages predefined schemas to ensure consistency and type safety across the workflow.
|
||||||
By using models like Pydantic's `BaseModel`, developers can define the exact shape of the state, enabling better validation and auto-completion in development environments.
|
By using models like Pydantic's `BaseModel`, developers can define the exact shape of the state, enabling better validation and auto-completion in development environments.
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
import asyncio
|
|
||||||
|
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
|
|
||||||
class ExampleState(BaseModel):
|
class ExampleState(BaseModel):
|
||||||
counter: int = 0
|
counter: int = 0
|
||||||
message: str = ""
|
message: str = ""
|
||||||
|
|
||||||
|
|
||||||
class StructuredExampleFlow(Flow[ExampleState]):
|
class StructuredExampleFlow(Flow[ExampleState]):
|
||||||
|
|
||||||
@start()
|
@start()
|
||||||
@@ -287,13 +272,8 @@ class StructuredExampleFlow(Flow[ExampleState]):
|
|||||||
|
|
||||||
print(f"State after third_method: {self.state}")
|
print(f"State after third_method: {self.state}")
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
flow = StructuredExampleFlow()
|
flow = StructuredExampleFlow()
|
||||||
await flow.kickoff()
|
flow.kickoff()
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Key Points:**
|
**Key Points:**
|
||||||
@@ -325,8 +305,7 @@ The `or_` function in Flows allows you to listen to multiple methods and trigger
|
|||||||
|
|
||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
import asyncio
|
|
||||||
from crewai.flow.flow import Flow, listen, or_, start
|
from crewai.flow.flow import Flow, listen, or_, start
|
||||||
|
|
||||||
class OrExampleFlow(Flow):
|
class OrExampleFlow(Flow):
|
||||||
@@ -343,16 +322,11 @@ class OrExampleFlow(Flow):
|
|||||||
def logger(self, result):
|
def logger(self, result):
|
||||||
print(f"Logger: {result}")
|
print(f"Logger: {result}")
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
flow = OrExampleFlow()
|
flow = OrExampleFlow()
|
||||||
await flow.kickoff()
|
flow.kickoff()
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text Output
|
```text
|
||||||
Logger: Hello from the start method
|
Logger: Hello from the start method
|
||||||
Logger: Hello from the second method
|
Logger: Hello from the second method
|
||||||
```
|
```
|
||||||
@@ -368,8 +342,7 @@ The `and_` function in Flows allows you to listen to multiple methods and trigge
|
|||||||
|
|
||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
import asyncio
|
|
||||||
from crewai.flow.flow import Flow, and_, listen, start
|
from crewai.flow.flow import Flow, and_, listen, start
|
||||||
|
|
||||||
class AndExampleFlow(Flow):
|
class AndExampleFlow(Flow):
|
||||||
@@ -387,16 +360,11 @@ class AndExampleFlow(Flow):
|
|||||||
print("---- Logger ----")
|
print("---- Logger ----")
|
||||||
print(self.state)
|
print(self.state)
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
flow = AndExampleFlow()
|
flow = AndExampleFlow()
|
||||||
await flow.kickoff()
|
flow.kickoff()
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text Output
|
```text
|
||||||
---- Logger ----
|
---- Logger ----
|
||||||
{'greeting': 'Hello from the start method', 'joke': 'What do computers eat? Microchips.'}
|
{'greeting': 'Hello from the start method', 'joke': 'What do computers eat? Microchips.'}
|
||||||
```
|
```
|
||||||
@@ -413,8 +381,7 @@ You can specify different routes based on the output of the method, allowing you
|
|||||||
|
|
||||||
<CodeGroup>
|
<CodeGroup>
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
import asyncio
|
|
||||||
import random
|
import random
|
||||||
from crewai.flow.flow import Flow, listen, router, start
|
from crewai.flow.flow import Flow, listen, router, start
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
@@ -445,16 +412,11 @@ class RouterFlow(Flow[ExampleState]):
|
|||||||
def fourth_method(self):
|
def fourth_method(self):
|
||||||
print("Fourth method running")
|
print("Fourth method running")
|
||||||
|
|
||||||
|
|
||||||
async def main():
|
|
||||||
flow = RouterFlow()
|
flow = RouterFlow()
|
||||||
await flow.kickoff()
|
flow.kickoff()
|
||||||
|
|
||||||
|
|
||||||
asyncio.run(main())
|
|
||||||
```
|
```
|
||||||
|
|
||||||
``` text Output
|
```text
|
||||||
Starting the structured flow
|
Starting the structured flow
|
||||||
Third method running
|
Third method running
|
||||||
Fourth method running
|
Fourth method running
|
||||||
@@ -486,7 +448,7 @@ This command will generate a new CrewAI project with the necessary folder struct
|
|||||||
After running the `crewai create flow name_of_flow` command, you will see a folder structure similar to the following:
|
After running the `crewai create flow name_of_flow` command, you will see a folder structure similar to the following:
|
||||||
|
|
||||||
| Directory/File | Description |
|
| Directory/File | Description |
|
||||||
|:---------------------------------|:------------------------------------------------------------------|
|
| :--------------------- | :----------------------------------------------------------------- |
|
||||||
| `name_of_flow/` | Root directory for the flow. |
|
| `name_of_flow/` | Root directory for the flow. |
|
||||||
| ├── `crews/` | Contains directories for specific crews. |
|
| ├── `crews/` | Contains directories for specific crews. |
|
||||||
| │ └── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts. |
|
| │ └── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts. |
|
||||||
@@ -501,7 +463,6 @@ After running the `crewai create flow name_of_flow` command, you will see a fold
|
|||||||
| ├── `pyproject.toml` | Configuration file for project dependencies and settings. |
|
| ├── `pyproject.toml` | Configuration file for project dependencies and settings. |
|
||||||
| └── `.gitignore` | Specifies files and directories to ignore in version control. |
|
| └── `.gitignore` | Specifies files and directories to ignore in version control. |
|
||||||
|
|
||||||
|
|
||||||
### Building Your Crews
|
### Building Your Crews
|
||||||
|
|
||||||
In the `crews` folder, you can define multiple crews. Each crew will have its own folder containing configuration files and the crew definition file. For example, the `poem_crew` folder contains:
|
In the `crews` folder, you can define multiple crews. Each crew will have its own folder containing configuration files and the crew definition file. For example, the `poem_crew` folder contains:
|
||||||
@@ -518,9 +479,8 @@ The `main.py` file is where you create your flow and connect the crews together.
|
|||||||
|
|
||||||
Here's an example of how you can connect the `poem_crew` in the `main.py` file:
|
Here's an example of how you can connect the `poem_crew` in the `main.py` file:
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
import asyncio
|
|
||||||
from random import randint
|
from random import randint
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
@@ -536,14 +496,12 @@ class PoemFlow(Flow[PoemState]):
|
|||||||
@start()
|
@start()
|
||||||
def generate_sentence_count(self):
|
def generate_sentence_count(self):
|
||||||
print("Generating sentence count")
|
print("Generating sentence count")
|
||||||
# Generate a number between 1 and 5
|
|
||||||
self.state.sentence_count = randint(1, 5)
|
self.state.sentence_count = randint(1, 5)
|
||||||
|
|
||||||
@listen(generate_sentence_count)
|
@listen(generate_sentence_count)
|
||||||
def generate_poem(self):
|
def generate_poem(self):
|
||||||
print("Generating poem")
|
print("Generating poem")
|
||||||
poem_crew = PoemCrew().crew()
|
result = PoemCrew().crew().kickoff(inputs={"sentence_count": self.state.sentence_count})
|
||||||
result = poem_crew.kickoff(inputs={"sentence_count": self.state.sentence_count})
|
|
||||||
|
|
||||||
print("Poem generated", result.raw)
|
print("Poem generated", result.raw)
|
||||||
self.state.poem = result.raw
|
self.state.poem = result.raw
|
||||||
@@ -554,18 +512,17 @@ class PoemFlow(Flow[PoemState]):
|
|||||||
with open("poem.txt", "w") as f:
|
with open("poem.txt", "w") as f:
|
||||||
f.write(self.state.poem)
|
f.write(self.state.poem)
|
||||||
|
|
||||||
async def run():
|
def kickoff():
|
||||||
"""
|
|
||||||
Run the flow.
|
|
||||||
"""
|
|
||||||
poem_flow = PoemFlow()
|
poem_flow = PoemFlow()
|
||||||
await poem_flow.kickoff()
|
poem_flow.kickoff()
|
||||||
|
|
||||||
def main():
|
|
||||||
asyncio.run(run())
|
def plot():
|
||||||
|
poem_flow = PoemFlow()
|
||||||
|
poem_flow.plot()
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
kickoff()
|
||||||
```
|
```
|
||||||
|
|
||||||
In this example, the `PoemFlow` class defines a flow that generates a sentence count, uses the `PoemCrew` to generate a poem, and then saves the poem to a file. The flow is kicked off by calling the `kickoff()` method.
|
In this example, the `PoemFlow` class defines a flow that generates a sentence count, uses the `PoemCrew` to generate a poem, and then saves the poem to a file. The flow is kicked off by calling the `kickoff()` method.
|
||||||
@@ -587,17 +544,53 @@ source .venv/bin/activate
|
|||||||
After activating the virtual environment, you can run the flow by executing one of the following commands:
|
After activating the virtual environment, you can run the flow by executing one of the following commands:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
crewai flow run
|
crewai flow kickoff
|
||||||
```
|
```
|
||||||
|
|
||||||
or
|
or
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv run run_flow
|
uv run kickoff
|
||||||
```
|
```
|
||||||
|
|
||||||
The flow will execute, and you should see the output in the console.
|
The flow will execute, and you should see the output in the console.
|
||||||
|
|
||||||
|
|
||||||
|
### Adding Additional Crews Using the CLI
|
||||||
|
|
||||||
|
Once you have created your initial flow, you can easily add additional crews to your project using the CLI. This allows you to expand your flow's capabilities by integrating new crews without starting from scratch.
|
||||||
|
|
||||||
|
To add a new crew to your existing flow, use the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
crewai flow add-crew <crew_name>
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will create a new directory for your crew within the `crews` folder of your flow project. It will include the necessary configuration files and a crew definition file, similar to the initial setup.
|
||||||
|
|
||||||
|
#### Folder Structure
|
||||||
|
|
||||||
|
After adding a new crew, your folder structure will look like this:
|
||||||
|
|
||||||
|
| Directory/File | Description |
|
||||||
|
| :--------------------- | :----------------------------------------------------------------- |
|
||||||
|
| `name_of_flow/` | Root directory for the flow. |
|
||||||
|
| ├── `crews/` | Contains directories for specific crews. |
|
||||||
|
| │ ├── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts. |
|
||||||
|
| │ │ ├── `config/` | Configuration files directory for the "poem_crew". |
|
||||||
|
| │ │ │ ├── `agents.yaml` | YAML file defining the agents for "poem_crew". |
|
||||||
|
| │ │ │ └── `tasks.yaml` | YAML file defining the tasks for "poem_crew". |
|
||||||
|
| │ │ └── `poem_crew.py` | Script for "poem_crew" functionality. |
|
||||||
|
| └── `name_of_crew/` | Directory for the new crew. |
|
||||||
|
| ├── `config/` | Configuration files directory for the new crew. |
|
||||||
|
| │ ├── `agents.yaml` | YAML file defining the agents for the new crew. |
|
||||||
|
| │ └── `tasks.yaml` | YAML file defining the tasks for the new crew. |
|
||||||
|
| └── `name_of_crew.py` | Script for the new crew functionality. |
|
||||||
|
|
||||||
|
You can then customize the `agents.yaml` and `tasks.yaml` files to define the agents and tasks for your new crew. The `name_of_crew.py` file will contain the crew's logic, which you can modify to suit your needs.
|
||||||
|
|
||||||
|
By using the CLI to add additional crews, you can efficiently build complex AI workflows that leverage multiple crews working together.
|
||||||
|
|
||||||
## Plot Flows
|
## Plot Flows
|
||||||
|
|
||||||
Visualizing your AI workflows can provide valuable insights into the structure and execution paths of your flows. CrewAI offers a powerful visualization tool that allows you to generate interactive plots of your flows, making it easier to understand and optimize your AI workflows.
|
Visualizing your AI workflows can provide valuable insights into the structure and execution paths of your flows. CrewAI offers a powerful visualization tool that allows you to generate interactive plots of your flows, making it easier to understand and optimize your AI workflows.
|
||||||
@@ -614,7 +607,7 @@ CrewAI provides two convenient methods to generate plots of your flows:
|
|||||||
|
|
||||||
If you are working directly with a flow instance, you can generate a plot by calling the `plot()` method on your flow object. This method will create an HTML file containing the interactive plot of your flow.
|
If you are working directly with a flow instance, you can generate a plot by calling the `plot()` method on your flow object. This method will create an HTML file containing the interactive plot of your flow.
|
||||||
|
|
||||||
```python Code
|
```python
|
||||||
# Assuming you have a flow instance
|
# Assuming you have a flow instance
|
||||||
flow.plot("my_flow_plot")
|
flow.plot("my_flow_plot")
|
||||||
```
|
```
|
||||||
@@ -637,13 +630,114 @@ The generated plot will display nodes representing the tasks in your flow, with
|
|||||||
|
|
||||||
By visualizing your flows, you can gain a clearer understanding of the workflow's structure, making it easier to debug, optimize, and communicate your AI processes to others.
|
By visualizing your flows, you can gain a clearer understanding of the workflow's structure, making it easier to debug, optimize, and communicate your AI processes to others.
|
||||||
|
|
||||||
### Conclusion
|
|
||||||
|
|
||||||
Plotting your flows is a powerful feature of CrewAI that enhances your ability to design and manage complex AI workflows. Whether you choose to use the `plot()` method or the command line, generating plots will provide you with a visual representation of your workflows, aiding in both development and presentation.
|
## Advanced
|
||||||
|
|
||||||
|
In this section, we explore more complex use cases of CrewAI Flows, starting with a self-evaluation loop. This pattern is crucial for developing AI systems that can iteratively improve their outputs through feedback.
|
||||||
|
|
||||||
|
### 1) Self-Evaluation Loop
|
||||||
|
|
||||||
|
The self-evaluation loop is a powerful pattern that allows AI workflows to automatically assess and refine their outputs. This example demonstrates how to set up a flow that generates content, evaluates it, and iterates based on feedback until the desired quality is achieved.
|
||||||
|
|
||||||
|
#### Overview
|
||||||
|
|
||||||
|
The self-evaluation loop involves two main Crews:
|
||||||
|
|
||||||
|
1. **ShakespeareanXPostCrew**: Generates a Shakespearean-style post on a given topic.
|
||||||
|
2. **XPostReviewCrew**: Evaluates the generated post, providing feedback on its validity and quality.
|
||||||
|
|
||||||
|
The process iterates until the post meets the criteria or a maximum retry limit is reached. This approach ensures high-quality outputs through iterative refinement.
|
||||||
|
|
||||||
|
#### Importance
|
||||||
|
|
||||||
|
This pattern is essential for building robust AI systems that can adapt and improve over time. By automating the evaluation and feedback loop, developers can ensure that their AI workflows produce reliable and high-quality results.
|
||||||
|
|
||||||
|
#### Main Code Highlights
|
||||||
|
|
||||||
|
Below is the `main.py` file for the self-evaluation loop flow:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from typing import Optional
|
||||||
|
from crewai.flow.flow import Flow, listen, router, start
|
||||||
|
from pydantic import BaseModel
|
||||||
|
from self_evaluation_loop_flow.crews.shakespeare_crew.shakespeare_crew import (
|
||||||
|
ShakespeareanXPostCrew,
|
||||||
|
)
|
||||||
|
from self_evaluation_loop_flow.crews.x_post_review_crew.x_post_review_crew import (
|
||||||
|
XPostReviewCrew,
|
||||||
|
)
|
||||||
|
|
||||||
|
class ShakespeareXPostFlowState(BaseModel):
|
||||||
|
x_post: str = ""
|
||||||
|
feedback: Optional[str] = None
|
||||||
|
valid: bool = False
|
||||||
|
retry_count: int = 0
|
||||||
|
|
||||||
|
class ShakespeareXPostFlow(Flow[ShakespeareXPostFlowState]):
|
||||||
|
|
||||||
|
@start("retry")
|
||||||
|
def generate_shakespeare_x_post(self):
|
||||||
|
print("Generating Shakespearean X post")
|
||||||
|
topic = "Flying cars"
|
||||||
|
result = (
|
||||||
|
ShakespeareanXPostCrew()
|
||||||
|
.crew()
|
||||||
|
.kickoff(inputs={"topic": topic, "feedback": self.state.feedback})
|
||||||
|
)
|
||||||
|
print("X post generated", result.raw)
|
||||||
|
self.state.x_post = result.raw
|
||||||
|
|
||||||
|
@router(generate_shakespeare_x_post)
|
||||||
|
def evaluate_x_post(self):
|
||||||
|
if self.state.retry_count > 3:
|
||||||
|
return "max_retry_exceeded"
|
||||||
|
result = XPostReviewCrew().crew().kickoff(inputs={"x_post": self.state.x_post})
|
||||||
|
self.state.valid = result["valid"]
|
||||||
|
self.state.feedback = result["feedback"]
|
||||||
|
print("valid", self.state.valid)
|
||||||
|
print("feedback", self.state.feedback)
|
||||||
|
self.state.retry_count += 1
|
||||||
|
if self.state.valid:
|
||||||
|
return "complete"
|
||||||
|
return "retry"
|
||||||
|
|
||||||
|
@listen("complete")
|
||||||
|
def save_result(self):
|
||||||
|
print("X post is valid")
|
||||||
|
print("X post:", self.state.x_post)
|
||||||
|
with open("x_post.txt", "w") as file:
|
||||||
|
file.write(self.state.x_post)
|
||||||
|
|
||||||
|
@listen("max_retry_exceeded")
|
||||||
|
def max_retry_exceeded_exit(self):
|
||||||
|
print("Max retry count exceeded")
|
||||||
|
print("X post:", self.state.x_post)
|
||||||
|
print("Feedback:", self.state.feedback)
|
||||||
|
|
||||||
|
def kickoff():
|
||||||
|
shakespeare_flow = ShakespeareXPostFlow()
|
||||||
|
shakespeare_flow.kickoff()
|
||||||
|
|
||||||
|
def plot():
|
||||||
|
shakespeare_flow = ShakespeareXPostFlow()
|
||||||
|
shakespeare_flow.plot()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
kickoff()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Code Highlights
|
||||||
|
|
||||||
|
- **Retry Mechanism**: The flow uses a retry mechanism to regenerate the post if it doesn't meet the criteria, up to a maximum of three retries.
|
||||||
|
- **Feedback Loop**: Feedback from the `XPostReviewCrew` is used to refine the post iteratively.
|
||||||
|
- **State Management**: The flow maintains state using a Pydantic model, ensuring type safety and clarity.
|
||||||
|
|
||||||
|
For a complete example and further details, please refer to the [Self Evaluation Loop Flow repository](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow).
|
||||||
|
|
||||||
|
|
||||||
## Next Steps
|
## Next Steps
|
||||||
|
|
||||||
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are four specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
|
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are five specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
|
||||||
|
|
||||||
1. **Email Auto Responder Flow**: This example demonstrates an infinite loop where a background job continually runs to automate email responses. It's a great use case for tasks that need to be performed repeatedly without manual intervention. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/email_auto_responder_flow)
|
1. **Email Auto Responder Flow**: This example demonstrates an infinite loop where a background job continually runs to automate email responses. It's a great use case for tasks that need to be performed repeatedly without manual intervention. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/email_auto_responder_flow)
|
||||||
|
|
||||||
@@ -653,6 +747,8 @@ If you're interested in exploring additional examples of flows, we have a variet
|
|||||||
|
|
||||||
4. **Meeting Assistant Flow**: This flow demonstrates how to broadcast one event to trigger multiple follow-up actions. For instance, after a meeting is completed, the flow can update a Trello board, send a Slack message, and save the results. It's a great example of handling multiple outcomes from a single event, making it ideal for comprehensive task management and notification systems. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/meeting_assistant_flow)
|
4. **Meeting Assistant Flow**: This flow demonstrates how to broadcast one event to trigger multiple follow-up actions. For instance, after a meeting is completed, the flow can update a Trello board, send a Slack message, and save the results. It's a great example of handling multiple outcomes from a single event, making it ideal for comprehensive task management and notification systems. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/meeting_assistant_flow)
|
||||||
|
|
||||||
|
5. **Self Evaluation Loop Flow**: This flow demonstrates a self-evaluation loop where AI workflows automatically assess and refine their outputs through feedback. It involves generating content, evaluating it, and iterating until the desired quality is achieved. This pattern is crucial for developing robust AI systems that can adapt and improve over time. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow)
|
||||||
|
|
||||||
By exploring these examples, you can gain insights into how to leverage CrewAI Flows for various use cases, from automating repetitive tasks to managing complex, multi-step processes with dynamic decision-making and human feedback.
|
By exploring these examples, you can gain insights into how to leverage CrewAI Flows for various use cases, from automating repetitive tasks to managing complex, multi-step processes with dynamic decision-making and human feedback.
|
||||||
|
|
||||||
Also, check out our YouTube video on how to use flows in CrewAI below!
|
Also, check out our YouTube video on how to use flows in CrewAI below!
|
||||||
|
|||||||
75
docs/concepts/knowledge.mdx
Normal file
75
docs/concepts/knowledge.mdx
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
---
|
||||||
|
title: Knowledge
|
||||||
|
description: What is knowledge in CrewAI and how to use it.
|
||||||
|
icon: book
|
||||||
|
---
|
||||||
|
|
||||||
|
# Using Knowledge in CrewAI
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The Knowledge class in CrewAI provides a powerful way to manage and query knowledge sources for your AI agents. This guide will show you how to implement knowledge management in your CrewAI projects.
|
||||||
|
Additionally, we have specific tools for generate knowledge sources for strings, text files, PDF's, and Spreadsheets. You can expand on any source type by extending the `KnowledgeSource` class.
|
||||||
|
|
||||||
|
## Basic Implementation
|
||||||
|
|
||||||
|
Here's a simple example of how to use the Knowledge class:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from crewai import Agent, Task, Crew, Process, LLM
|
||||||
|
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||||
|
|
||||||
|
# Create a knowledge source
|
||||||
|
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||||
|
string_source = StringKnowledgeSource(
|
||||||
|
content=content, metadata={"preference": "personal"}
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
llm = LLM(model="gpt-4o-mini", temperature=0)
|
||||||
|
# Create an agent with the knowledge store
|
||||||
|
agent = Agent(
|
||||||
|
role="About User",
|
||||||
|
goal="You know everything about the user.",
|
||||||
|
backstory="""You are a master at understanding people and their preferences.""",
|
||||||
|
verbose=True,
|
||||||
|
allow_delegation=False,
|
||||||
|
llm=llm,
|
||||||
|
)
|
||||||
|
task = Task(
|
||||||
|
description="Answer the following questions about the user: {question}",
|
||||||
|
expected_output="An answer to the question.",
|
||||||
|
agent=agent,
|
||||||
|
)
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
agents=[agent],
|
||||||
|
tasks=[task],
|
||||||
|
verbose=True,
|
||||||
|
process=Process.sequential,
|
||||||
|
knowledge={"sources": [string_source], "metadata": {"preference": "personal"}}, # Enable knowledge by adding the sources here. You can also add more sources to the sources list.
|
||||||
|
)
|
||||||
|
|
||||||
|
result = crew.kickoff(inputs={"question": "What city does John live in and how old is he?"})
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Embedder Configuration
|
||||||
|
|
||||||
|
You can also configure the embedder for the knowledge store. This is useful if you want to use a different embedder for the knowledge store than the one used for the agents.
|
||||||
|
|
||||||
|
```python
|
||||||
|
...
|
||||||
|
string_source = StringKnowledgeSource(
|
||||||
|
content="Users name is John. He is 30 years old and lives in San Francisco.",
|
||||||
|
metadata={"preference": "personal"}
|
||||||
|
)
|
||||||
|
crew = Crew(
|
||||||
|
...
|
||||||
|
knowledge={
|
||||||
|
"sources": [string_source],
|
||||||
|
"metadata": {"preference": "personal"},
|
||||||
|
"embedder_config": {"provider": "openai", "config": {"model": "text-embedding-3-small"}},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
```
|
||||||
@@ -25,43 +25,141 @@ By default, CrewAI uses the `gpt-4o-mini` model. It uses environment variables i
|
|||||||
- `OPENAI_API_BASE`
|
- `OPENAI_API_BASE`
|
||||||
- `OPENAI_API_KEY`
|
- `OPENAI_API_KEY`
|
||||||
|
|
||||||
### 2. String Identifier
|
### 2. Updating YAML files
|
||||||
|
|
||||||
|
You can update the `agents.yml` file to refer to the LLM you want to use:
|
||||||
|
|
||||||
|
```yaml Code
|
||||||
|
researcher:
|
||||||
|
role: Research Specialist
|
||||||
|
goal: Conduct comprehensive research and analysis to gather relevant information,
|
||||||
|
synthesize findings, and produce well-documented insights.
|
||||||
|
backstory: A dedicated research professional with years of experience in academic
|
||||||
|
investigation, literature review, and data analysis, known for thorough and
|
||||||
|
methodical approaches to complex research questions.
|
||||||
|
verbose: true
|
||||||
|
llm: openai/gpt-4o
|
||||||
|
# llm: azure/gpt-4o-mini
|
||||||
|
# llm: gemini/gemini-pro
|
||||||
|
# llm: anthropic/claude-3-5-sonnet-20240620
|
||||||
|
# llm: bedrock/anthropic.claude-3-sonnet-20240229-v1:0
|
||||||
|
# llm: mistral/mistral-large-latest
|
||||||
|
# llm: ollama/llama3:70b
|
||||||
|
# llm: groq/llama-3.2-90b-vision-preview
|
||||||
|
# llm: watsonx/meta-llama/llama-3-1-70b-instruct
|
||||||
|
# llm: nvidia_nim/meta/llama3-70b-instruct
|
||||||
|
# llm: sambanova/Meta-Llama-3.1-8B-Instruct
|
||||||
|
# ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Keep in mind that you will need to set certain ENV vars depending on the model you are
|
||||||
|
using to account for the credentials or set a custom LLM object like described below.
|
||||||
|
Here are some of the required ENV vars for some of the LLM integrations:
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="OpenAI">
|
||||||
|
```python Code
|
||||||
|
OPENAI_API_KEY=<your-api-key>
|
||||||
|
OPENAI_API_BASE=<optional-custom-base-url>
|
||||||
|
OPENAI_MODEL_NAME=<openai-model-name>
|
||||||
|
OPENAI_ORGANIZATION=<your-org-id> # OPTIONAL
|
||||||
|
OPENAI_API_BASE=<openaiai-api-base> # OPTIONAL
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Anthropic">
|
||||||
|
```python Code
|
||||||
|
ANTHROPIC_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Google">
|
||||||
|
```python Code
|
||||||
|
GEMINI_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Azure">
|
||||||
|
```python Code
|
||||||
|
AZURE_API_KEY=<your-api-key> # "my-azure-api-key"
|
||||||
|
AZURE_API_BASE=<your-resource-url> # "https://example-endpoint.openai.azure.com"
|
||||||
|
AZURE_API_VERSION=<api-version> # "2023-05-15"
|
||||||
|
AZURE_AD_TOKEN=<your-azure-ad-token> # Optional
|
||||||
|
AZURE_API_TYPE=<your-azure-api-type> # Optional
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="AWS Bedrock">
|
||||||
|
```python Code
|
||||||
|
AWS_ACCESS_KEY_ID=<your-access-key>
|
||||||
|
AWS_SECRET_ACCESS_KEY=<your-secret-key>
|
||||||
|
AWS_DEFAULT_REGION=<your-region>
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Mistral">
|
||||||
|
```python Code
|
||||||
|
MISTRAL_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Groq">
|
||||||
|
```python Code
|
||||||
|
GROQ_API_KEY=<your-api-key>
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="IBM watsonx.ai">
|
||||||
|
```python Code
|
||||||
|
WATSONX_URL=<your-url> # (required) Base URL of your WatsonX instance
|
||||||
|
WATSONX_APIKEY=<your-apikey> # (required) IBM cloud API key
|
||||||
|
WATSONX_TOKEN=<your-token> # (required) IAM auth token (alternative to APIKEY)
|
||||||
|
WATSONX_PROJECT_ID=<your-project-id> # (optional) Project ID of your WatsonX instance
|
||||||
|
WATSONX_DEPLOYMENT_SPACE_ID=<your-space-id> # (optional) ID of deployment space for deployed models
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
|
|
||||||
|
### 3. Custom LLM Objects
|
||||||
|
|
||||||
|
Pass a custom LLM implementation or object from another library.
|
||||||
|
|
||||||
|
See below for examples.
|
||||||
|
|
||||||
|
<Tabs>
|
||||||
|
<Tab title="String Identifier">
|
||||||
```python Code
|
```python Code
|
||||||
agent = Agent(llm="gpt-4o", ...)
|
agent = Agent(llm="gpt-4o", ...)
|
||||||
```
|
```
|
||||||
|
</Tab>
|
||||||
|
|
||||||
### 3. LLM Instance
|
<Tab title="LLM Instance">
|
||||||
|
|
||||||
List of [more providers](https://docs.litellm.ai/docs/providers).
|
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai import LLM
|
from crewai import LLM
|
||||||
|
|
||||||
llm = LLM(model="gpt-4", temperature=0.7)
|
llm = LLM(model="gpt-4", temperature=0.7)
|
||||||
agent = Agent(llm=llm, ...)
|
agent = Agent(llm=llm, ...)
|
||||||
```
|
```
|
||||||
|
</Tab>
|
||||||
### 4. Custom LLM Objects
|
</Tabs>
|
||||||
|
|
||||||
Pass a custom LLM implementation or object from another library.
|
|
||||||
|
|
||||||
## Connecting to OpenAI-Compatible LLMs
|
## Connecting to OpenAI-Compatible LLMs
|
||||||
|
|
||||||
You can connect to OpenAI-compatible LLMs using either environment variables or by setting specific attributes on the LLM class:
|
You can connect to OpenAI-compatible LLMs using either environment variables or by setting specific attributes on the LLM class:
|
||||||
|
|
||||||
1. Using environment variables:
|
<Tabs>
|
||||||
|
<Tab title="Using Environment Variables">
|
||||||
```python Code
|
```python Code
|
||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||||
os.environ["OPENAI_API_BASE"] = "https://api.your-provider.com/v1"
|
os.environ["OPENAI_API_BASE"] = "https://api.your-provider.com/v1"
|
||||||
```
|
```
|
||||||
|
</Tab>
|
||||||
2. Using LLM class attributes:
|
<Tab title="Using LLM Class Attributes">
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
llm = LLM(
|
llm = LLM(
|
||||||
model="custom-model-name",
|
model="custom-model-name",
|
||||||
api_key="your-api-key",
|
api_key="your-api-key",
|
||||||
@@ -69,6 +167,8 @@ llm = LLM(
|
|||||||
)
|
)
|
||||||
agent = Agent(llm=llm, ...)
|
agent = Agent(llm=llm, ...)
|
||||||
```
|
```
|
||||||
|
</Tab>
|
||||||
|
</Tabs>
|
||||||
|
|
||||||
## LLM Configuration Options
|
## LLM Configuration Options
|
||||||
|
|
||||||
@@ -95,9 +195,14 @@ When configuring an LLM for your agent, you have access to a wide range of param
|
|||||||
| **api_key** | `str` | Your API key for authentication. |
|
| **api_key** | `str` | Your API key for authentication. |
|
||||||
|
|
||||||
|
|
||||||
Example:
|
These are examples of how to configure LLMs for your agent.
|
||||||
|
|
||||||
|
<AccordionGroup>
|
||||||
|
<Accordion title="OpenAI">
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
llm = LLM(
|
llm = LLM(
|
||||||
model="gpt-4",
|
model="gpt-4",
|
||||||
temperature=0.8,
|
temperature=0.8,
|
||||||
@@ -112,26 +217,166 @@ llm = LLM(
|
|||||||
)
|
)
|
||||||
agent = Agent(llm=llm, ...)
|
agent = Agent(llm=llm, ...)
|
||||||
```
|
```
|
||||||
## Using Ollama (Local LLMs)
|
</Accordion>
|
||||||
|
|
||||||
crewAI supports using Ollama for running open-source models locally:
|
<Accordion title="Cerebras">
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="cerebras/llama-3.1-70b",
|
||||||
|
api_key="your-api-key-here"
|
||||||
|
)
|
||||||
|
agent = Agent(llm=llm, ...)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Ollama (Local LLMs)">
|
||||||
|
|
||||||
|
CrewAI supports using Ollama for running open-source models locally:
|
||||||
|
|
||||||
1. Install Ollama: [ollama.ai](https://ollama.ai/)
|
1. Install Ollama: [ollama.ai](https://ollama.ai/)
|
||||||
2. Run a model: `ollama run llama2`
|
2. Run a model: `ollama run llama2`
|
||||||
3. Configure agent:
|
3. Configure agent:
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
agent = Agent(
|
agent = Agent(
|
||||||
llm=LLM(model="ollama/llama3.1", base_url="http://localhost:11434"),
|
llm=LLM(
|
||||||
|
model="ollama/llama3.1",
|
||||||
|
base_url="http://localhost:11434"
|
||||||
|
),
|
||||||
...
|
...
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Groq">
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="groq/llama3-8b-8192",
|
||||||
|
api_key="your-api-key-here"
|
||||||
|
)
|
||||||
|
agent = Agent(llm=llm, ...)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Anthropic">
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="anthropic/claude-3-5-sonnet-20241022",
|
||||||
|
api_key="your-api-key-here"
|
||||||
|
)
|
||||||
|
agent = Agent(llm=llm, ...)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Fireworks AI">
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct",
|
||||||
|
api_key="your-api-key-here"
|
||||||
|
)
|
||||||
|
agent = Agent(llm=llm, ...)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Gemini">
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="gemini/gemini-1.5-pro-002",
|
||||||
|
api_key="your-api-key-here"
|
||||||
|
)
|
||||||
|
agent = Agent(llm=llm, ...)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Perplexity AI (pplx-api)">
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="perplexity/mistral-7b-instruct",
|
||||||
|
base_url="https://api.perplexity.ai/v1",
|
||||||
|
api_key="your-api-key-here"
|
||||||
|
)
|
||||||
|
agent = Agent(llm=llm, ...)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="IBM watsonx.ai">
|
||||||
|
You can use IBM Watson by seeting the following ENV vars:
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
WATSONX_URL=<your-url>
|
||||||
|
WATSONX_APIKEY=<your-apikey>
|
||||||
|
WATSONX_PROJECT_ID=<your-project-id>
|
||||||
|
```
|
||||||
|
|
||||||
|
You can then define your agents llms by updating the `agents.yml`
|
||||||
|
|
||||||
|
```yaml Code
|
||||||
|
researcher:
|
||||||
|
role: Research Specialist
|
||||||
|
goal: Conduct comprehensive research and analysis to gather relevant information,
|
||||||
|
synthesize findings, and produce well-documented insights.
|
||||||
|
backstory: A dedicated research professional with years of experience in academic
|
||||||
|
investigation, literature review, and data analysis, known for thorough and
|
||||||
|
methodical approaches to complex research questions.
|
||||||
|
verbose: true
|
||||||
|
llm: watsonx/meta-llama/llama-3-1-70b-instruct
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also set up agents more dynamically as a base level LLM instance, like bellow:
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="watsonx/ibm/granite-13b-chat-v2",
|
||||||
|
base_url="https://api.watsonx.ai/v1",
|
||||||
|
api_key="your-api-key-here"
|
||||||
|
)
|
||||||
|
agent = Agent(llm=llm, ...)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
|
||||||
|
<Accordion title="Hugging Face">
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
|
llm = LLM(
|
||||||
|
model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
|
||||||
|
api_key="your-api-key-here",
|
||||||
|
base_url="your_api_endpoint"
|
||||||
|
)
|
||||||
|
agent = Agent(llm=llm, ...)
|
||||||
|
```
|
||||||
|
</Accordion>
|
||||||
|
</AccordionGroup>
|
||||||
|
|
||||||
## Changing the Base API URL
|
## Changing the Base API URL
|
||||||
|
|
||||||
You can change the base API URL for any LLM provider by setting the `base_url` parameter:
|
You can change the base API URL for any LLM provider by setting the `base_url` parameter:
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
|
from crewai import LLM
|
||||||
|
|
||||||
llm = LLM(
|
llm = LLM(
|
||||||
model="custom-model-name",
|
model="custom-model-name",
|
||||||
base_url="https://api.your-provider.com/v1",
|
base_url="https://api.your-provider.com/v1",
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ reason, and learn from past interactions.
|
|||||||
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
|
| **Long-Term Memory** | Preserves valuable insights and learnings from past executions, allowing agents to build and refine their knowledge over time. |
|
||||||
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
|
| **Entity Memory** | Captures and organizes information about entities (people, places, concepts) encountered during tasks, facilitating deeper understanding and relationship mapping. Uses `RAG` for storing entity information. |
|
||||||
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
|
| **Contextual Memory**| Maintains the context of interactions by combining `ShortTermMemory`, `LongTermMemory`, and `EntityMemory`, aiding in the coherence and relevance of agent responses over a sequence of tasks or a conversation. |
|
||||||
|
| **User Memory** | Stores user-specific information and preferences, enhancing personalization and user experience. |
|
||||||
|
|
||||||
## How Memory Systems Empower Agents
|
## How Memory Systems Empower Agents
|
||||||
|
|
||||||
@@ -34,7 +35,7 @@ By default, the memory system is disabled, and you can ensure it is active by se
|
|||||||
The memory will use OpenAI embeddings by default, but you can change it by setting `embedder` to a different model.
|
The memory will use OpenAI embeddings by default, but you can change it by setting `embedder` to a different model.
|
||||||
It's also possible to initialize the memory instance with your own instance.
|
It's also possible to initialize the memory instance with your own instance.
|
||||||
|
|
||||||
The 'embedder' only applies to **Short-Term Memory** which uses Chroma for RAG using the EmbedChain package.
|
The 'embedder' only applies to **Short-Term Memory** which uses Chroma for RAG.
|
||||||
The **Long-Term Memory** uses SQLite3 to store task results. Currently, there is no way to override these storage implementations.
|
The **Long-Term Memory** uses SQLite3 to store task results. Currently, there is no way to override these storage implementations.
|
||||||
The data storage files are saved into a platform-specific location found using the appdirs package,
|
The data storage files are saved into a platform-specific location found using the appdirs package,
|
||||||
and the name of the project can be overridden using the **CREWAI_STORAGE_DIR** environment variable.
|
and the name of the project can be overridden using the **CREWAI_STORAGE_DIR** environment variable.
|
||||||
@@ -92,6 +93,47 @@ my_crew = Crew(
|
|||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Integrating Mem0 for Enhanced User Memory
|
||||||
|
|
||||||
|
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
||||||
|
|
||||||
|
To include user-specific memory you can get your API key [here](https://app.mem0.ai/dashboard/api-keys) and refer the [docs](https://docs.mem0.ai/platform/quickstart#4-1-create-memories) for adding user preferences.
|
||||||
|
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
import os
|
||||||
|
from crewai import Crew, Process
|
||||||
|
from mem0 import MemoryClient
|
||||||
|
|
||||||
|
# Set environment variables for Mem0
|
||||||
|
os.environ["MEM0_API_KEY"] = "m0-xx"
|
||||||
|
|
||||||
|
# Step 1: Record preferences based on past conversation or user input
|
||||||
|
client = MemoryClient()
|
||||||
|
messages = [
|
||||||
|
{"role": "user", "content": "Hi there! I'm planning a vacation and could use some advice."},
|
||||||
|
{"role": "assistant", "content": "Hello! I'd be happy to help with your vacation planning. What kind of destination do you prefer?"},
|
||||||
|
{"role": "user", "content": "I am more of a beach person than a mountain person."},
|
||||||
|
{"role": "assistant", "content": "That's interesting. Do you like hotels or Airbnb?"},
|
||||||
|
{"role": "user", "content": "I like Airbnb more."},
|
||||||
|
]
|
||||||
|
client.add(messages, user_id="john")
|
||||||
|
|
||||||
|
# Step 2: Create a Crew with User Memory
|
||||||
|
|
||||||
|
crew = Crew(
|
||||||
|
agents=[...],
|
||||||
|
tasks=[...],
|
||||||
|
verbose=True,
|
||||||
|
process=Process.sequential,
|
||||||
|
memory=True,
|
||||||
|
memory_config={
|
||||||
|
"provider": "mem0",
|
||||||
|
"config": {"user_id": "john"},
|
||||||
|
},
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
## Additional Embedding Providers
|
## Additional Embedding Providers
|
||||||
|
|
||||||
@@ -113,6 +155,42 @@ my_crew = Crew(
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
Alternatively, you can directly pass the OpenAIEmbeddingFunction to the embedder parameter.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
```python Code
|
||||||
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
|
||||||
|
|
||||||
|
my_crew = Crew(
|
||||||
|
agents=[...],
|
||||||
|
tasks=[...],
|
||||||
|
process=Process.sequential,
|
||||||
|
memory=True,
|
||||||
|
verbose=True,
|
||||||
|
embedder=OpenAIEmbeddingFunction(api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"),
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using Ollama embeddings
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
|
||||||
|
my_crew = Crew(
|
||||||
|
agents=[...],
|
||||||
|
tasks=[...],
|
||||||
|
process=Process.sequential,
|
||||||
|
memory=True,
|
||||||
|
verbose=True,
|
||||||
|
embedder={
|
||||||
|
"provider": "ollama",
|
||||||
|
"config": {
|
||||||
|
"model": "mxbai-embed-large"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
### Using Google AI embeddings
|
### Using Google AI embeddings
|
||||||
|
|
||||||
@@ -128,9 +206,8 @@ my_crew = Crew(
|
|||||||
embedder={
|
embedder={
|
||||||
"provider": "google",
|
"provider": "google",
|
||||||
"config": {
|
"config": {
|
||||||
"model": 'models/embedding-001',
|
"api_key": "<YOUR_API_KEY>",
|
||||||
"task_type": "retrieval_document",
|
"model_name": "<model_name>"
|
||||||
"title": "Embeddings for Embedchain"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@@ -139,6 +216,7 @@ my_crew = Crew(
|
|||||||
### Using Azure OpenAI embeddings
|
### Using Azure OpenAI embeddings
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
|
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
|
||||||
from crewai import Crew, Agent, Task, Process
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
|
||||||
my_crew = Crew(
|
my_crew = Crew(
|
||||||
@@ -147,36 +225,20 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder={
|
embedder=OpenAIEmbeddingFunction(
|
||||||
"provider": "azure_openai",
|
api_key="YOUR_API_KEY",
|
||||||
"config": {
|
api_base="YOUR_API_BASE_PATH",
|
||||||
"model": 'text-embedding-ada-002',
|
api_type="azure",
|
||||||
"deployment_name": "your_embedding_model_deployment_name"
|
api_version="YOUR_API_VERSION",
|
||||||
}
|
model_name="text-embedding-3-small"
|
||||||
}
|
|
||||||
)
|
)
|
||||||
```
|
|
||||||
|
|
||||||
### Using GPT4ALL embeddings
|
|
||||||
|
|
||||||
```python Code
|
|
||||||
from crewai import Crew, Agent, Task, Process
|
|
||||||
|
|
||||||
my_crew = Crew(
|
|
||||||
agents=[...],
|
|
||||||
tasks=[...],
|
|
||||||
process=Process.sequential,
|
|
||||||
memory=True,
|
|
||||||
verbose=True,
|
|
||||||
embedder={
|
|
||||||
"provider": "gpt4all"
|
|
||||||
}
|
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Using Vertex AI embeddings
|
### Using Vertex AI embeddings
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
|
from chromadb.utils.embedding_functions import GoogleVertexEmbeddingFunction
|
||||||
from crewai import Crew, Agent, Task, Process
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
|
||||||
my_crew = Crew(
|
my_crew = Crew(
|
||||||
@@ -185,12 +247,12 @@ my_crew = Crew(
|
|||||||
process=Process.sequential,
|
process=Process.sequential,
|
||||||
memory=True,
|
memory=True,
|
||||||
verbose=True,
|
verbose=True,
|
||||||
embedder={
|
embedder=GoogleVertexEmbeddingFunction(
|
||||||
"provider": "vertexai",
|
project_id="YOUR_PROJECT_ID",
|
||||||
"config": {
|
region="YOUR_REGION",
|
||||||
"model": 'textembedding-gecko'
|
api_key="YOUR_API_KEY",
|
||||||
}
|
model_name="textembedding-gecko"
|
||||||
}
|
)
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -208,8 +270,52 @@ my_crew = Crew(
|
|||||||
embedder={
|
embedder={
|
||||||
"provider": "cohere",
|
"provider": "cohere",
|
||||||
"config": {
|
"config": {
|
||||||
"model": "embed-english-v3.0",
|
"api_key": "YOUR_API_KEY",
|
||||||
"vector_dimension": 1024
|
"model_name": "<model_name>"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
### Using HuggingFace embeddings
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
|
||||||
|
my_crew = Crew(
|
||||||
|
agents=[...],
|
||||||
|
tasks=[...],
|
||||||
|
process=Process.sequential,
|
||||||
|
memory=True,
|
||||||
|
verbose=True,
|
||||||
|
embedder={
|
||||||
|
"provider": "huggingface",
|
||||||
|
"config": {
|
||||||
|
"api_url": "<api_url>",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using Watson embeddings
|
||||||
|
|
||||||
|
```python Code
|
||||||
|
from crewai import Crew, Agent, Task, Process
|
||||||
|
|
||||||
|
# Note: Ensure you have installed and imported `ibm_watsonx_ai` for Watson embeddings to work.
|
||||||
|
|
||||||
|
my_crew = Crew(
|
||||||
|
agents=[...],
|
||||||
|
tasks=[...],
|
||||||
|
process=Process.sequential,
|
||||||
|
memory=True,
|
||||||
|
verbose=True,
|
||||||
|
embedder={
|
||||||
|
"provider": "watson",
|
||||||
|
"config": {
|
||||||
|
"model": "<model_name>",
|
||||||
|
"api_url": "<api_url>",
|
||||||
|
"api_key": "<YOUR_API_KEY>",
|
||||||
|
"project_id": "<YOUR_PROJECT_ID>",
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ icon: screwdriver-wrench
|
|||||||
---
|
---
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers.
|
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers.
|
||||||
This documentation outlines how to create, integrate, and leverage these tools within the CrewAI framework, including a new focus on collaboration tools.
|
This documentation outlines how to create, integrate, and leverage these tools within the CrewAI framework, including a new focus on collaboration tools.
|
||||||
|
|
||||||
@@ -104,7 +105,7 @@ crew.kickoff()
|
|||||||
Here is a list of the available tools and their descriptions:
|
Here is a list of the available tools and their descriptions:
|
||||||
|
|
||||||
| Tool | Description |
|
| Tool | Description |
|
||||||
| :-------------------------- | :-------------------------------------------------------------------------------------------- |
|
| :------------------------------- | :--------------------------------------------------------------------------------------------- |
|
||||||
| **BrowserbaseLoadTool** | A tool for interacting with and extracting data from web browsers. |
|
| **BrowserbaseLoadTool** | A tool for interacting with and extracting data from web browsers. |
|
||||||
| **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents. |
|
| **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents. |
|
||||||
| **CodeInterpreterTool** | A tool for interpreting python code. |
|
| **CodeInterpreterTool** | A tool for interpreting python code. |
|
||||||
@@ -139,21 +140,17 @@ Here is a list of the available tools and their descriptions:
|
|||||||
## Creating your own Tools
|
## Creating your own Tools
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
Developers can craft `custom tools` tailored for their agent’s needs or utilize pre-built options.
|
Developers can craft `custom tools` tailored for their agent’s needs or
|
||||||
|
utilize pre-built options.
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
To create your own CrewAI tools you will need to install our extra tools package:
|
There are two main ways for one to create a CrewAI tool:
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install 'crewai[tools]'
|
|
||||||
```
|
|
||||||
|
|
||||||
Once you do that there are two main ways for one to create a CrewAI tool:
|
|
||||||
|
|
||||||
### Subclassing `BaseTool`
|
### Subclassing `BaseTool`
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai_tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
|
|
||||||
|
|
||||||
class MyCustomTool(BaseTool):
|
class MyCustomTool(BaseTool):
|
||||||
name: str = "Name of my tool"
|
name: str = "Name of my tool"
|
||||||
@@ -167,7 +164,7 @@ class MyCustomTool(BaseTool):
|
|||||||
### Utilizing the `tool` Decorator
|
### Utilizing the `tool` Decorator
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai_tools import tool
|
from crewai.tools import tool
|
||||||
@tool("Name of my tool")
|
@tool("Name of my tool")
|
||||||
def my_tool(question: str) -> str:
|
def my_tool(question: str) -> str:
|
||||||
"""Clear description for what this tool is useful for, your agent will need this information to use it."""
|
"""Clear description for what this tool is useful for, your agent will need this information to use it."""
|
||||||
@@ -178,11 +175,13 @@ def my_tool(question: str) -> str:
|
|||||||
### Custom Caching Mechanism
|
### Custom Caching Mechanism
|
||||||
|
|
||||||
<Tip>
|
<Tip>
|
||||||
Tools can optionally implement a `cache_function` to fine-tune caching behavior. This function determines when to cache results based on specific conditions, offering granular control over caching logic.
|
Tools can optionally implement a `cache_function` to fine-tune caching
|
||||||
|
behavior. This function determines when to cache results based on specific
|
||||||
|
conditions, offering granular control over caching logic.
|
||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai_tools import tool
|
from crewai.tools import tool
|
||||||
|
|
||||||
@tool
|
@tool
|
||||||
def multiplication_tool(first_number: int, second_number: int) -> str:
|
def multiplication_tool(first_number: int, second_number: int) -> str:
|
||||||
|
|||||||
@@ -10,24 +10,23 @@ This guide provides detailed instructions on creating custom tools for the CrewA
|
|||||||
incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools,
|
incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools,
|
||||||
enabling agents to perform a wide range of actions.
|
enabling agents to perform a wide range of actions.
|
||||||
|
|
||||||
### Prerequisites
|
|
||||||
|
|
||||||
Before creating your own tools, ensure you have the crewAI extra tools package installed:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
pip install 'crewai[tools]'
|
|
||||||
```
|
|
||||||
|
|
||||||
### Subclassing `BaseTool`
|
### Subclassing `BaseTool`
|
||||||
|
|
||||||
To create a personalized tool, inherit from `BaseTool` and define the necessary attributes and the `_run` method.
|
To create a personalized tool, inherit from `BaseTool` and define the necessary attributes, including the `args_schema` for input validation, and the `_run` method.
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai_tools import BaseTool
|
from typing import Type
|
||||||
|
from crewai.tools import BaseTool
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
class MyToolInput(BaseModel):
|
||||||
|
"""Input schema for MyCustomTool."""
|
||||||
|
argument: str = Field(..., description="Description of the argument.")
|
||||||
|
|
||||||
class MyCustomTool(BaseTool):
|
class MyCustomTool(BaseTool):
|
||||||
name: str = "Name of my tool"
|
name: str = "Name of my tool"
|
||||||
description: str = "What this tool does. It's vital for effective utilization."
|
description: str = "What this tool does. It's vital for effective utilization."
|
||||||
|
args_schema: Type[BaseModel] = MyToolInput
|
||||||
|
|
||||||
def _run(self, argument: str) -> str:
|
def _run(self, argument: str) -> str:
|
||||||
# Your tool's logic here
|
# Your tool's logic here
|
||||||
@@ -40,7 +39,7 @@ Alternatively, you can use the tool decorator `@tool`. This approach allows you
|
|||||||
offering a concise and efficient way to create specialized tools tailored to your needs.
|
offering a concise and efficient way to create specialized tools tailored to your needs.
|
||||||
|
|
||||||
```python Code
|
```python Code
|
||||||
from crewai_tools import tool
|
from crewai.tools import tool
|
||||||
|
|
||||||
@tool("Tool Name")
|
@tool("Tool Name")
|
||||||
def my_simple_tool(question: str) -> str:
|
def my_simple_tool(question: str) -> str:
|
||||||
|
|||||||
@@ -330,4 +330,4 @@ This will clear the crew's memory, allowing for a fresh start.
|
|||||||
|
|
||||||
## Deploying Your Project
|
## Deploying Your Project
|
||||||
|
|
||||||
The easiest way to deploy your crew is through [CrewAI Enterprise](https://www.crewai.com/crewaiplus), where you can deploy your crew in a few clicks.
|
The easiest way to deploy your crew is through [CrewAI Enterprise](http://app.crewai.com/), where you can deploy your crew in a few clicks.
|
||||||
|
|||||||
@@ -34,6 +34,7 @@ from crewai_tools import GithubSearchTool
|
|||||||
# Initialize the tool for semantic searches within a specific GitHub repository
|
# Initialize the tool for semantic searches within a specific GitHub repository
|
||||||
tool = GithubSearchTool(
|
tool = GithubSearchTool(
|
||||||
github_repo='https://github.com/example/repo',
|
github_repo='https://github.com/example/repo',
|
||||||
|
gh_token='your_github_personal_access_token',
|
||||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -41,6 +42,7 @@ tool = GithubSearchTool(
|
|||||||
|
|
||||||
# Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution
|
# Initialize the tool for semantic searches within a specific GitHub repository, so the agent can search any repository if it learns about during its execution
|
||||||
tool = GithubSearchTool(
|
tool = GithubSearchTool(
|
||||||
|
gh_token='your_github_personal_access_token',
|
||||||
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
content_types=['code', 'issue'] # Options: code, repo, pr, issue
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
@@ -48,6 +50,7 @@ tool = GithubSearchTool(
|
|||||||
## Arguments
|
## Arguments
|
||||||
|
|
||||||
- `github_repo` : The URL of the GitHub repository where the search will be conducted. This is a mandatory field and specifies the target repository for your search.
|
- `github_repo` : The URL of the GitHub repository where the search will be conducted. This is a mandatory field and specifies the target repository for your search.
|
||||||
|
- `gh_token` : Your GitHub Personal Access Token (PAT) required for authentication. You can create one in your GitHub account settings under Developer Settings > Personal Access Tokens.
|
||||||
- `content_types` : Specifies the types of content to include in your search. You must provide a list of content types from the following options: `code` for searching within the code,
|
- `content_types` : Specifies the types of content to include in your search. You must provide a list of content types from the following options: `code` for searching within the code,
|
||||||
`repo` for searching within the repository's general information, `pr` for searching within pull requests, and `issue` for searching within issues.
|
`repo` for searching within the repository's general information, `pr` for searching within pull requests, and `issue` for searching within issues.
|
||||||
This field is mandatory and allows tailoring the search to specific content types within the GitHub repository.
|
This field is mandatory and allows tailoring the search to specific content types within the GitHub repository.
|
||||||
@@ -78,4 +81,3 @@ tool = GithubSearchTool(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
```
|
|
||||||
@@ -11,10 +11,10 @@ icon: eye
|
|||||||
This tool is used to extract text from images. When passed to the agent it will extract the text from the image and then use it to generate a response, report or any other output.
|
This tool is used to extract text from images. When passed to the agent it will extract the text from the image and then use it to generate a response, report or any other output.
|
||||||
The URL or the PATH of the image should be passed to the Agent.
|
The URL or the PATH of the image should be passed to the Agent.
|
||||||
|
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
Install the crewai_tools package
|
Install the crewai_tools package
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
pip install 'crewai[tools]'
|
pip install 'crewai[tools]'
|
||||||
```
|
```
|
||||||
@@ -45,6 +45,5 @@ def researcher(self) -> Agent:
|
|||||||
The VisionTool requires the following arguments:
|
The VisionTool requires the following arguments:
|
||||||
|
|
||||||
| Argument | Type | Description |
|
| Argument | Type | Description |
|
||||||
|:---------------|:---------|:-------------------------------------------------------------------------------------------------------------------------------------|
|
| :----------------- | :------- | :------------------------------------------------------------------------------- |
|
||||||
| **image_path** | `string` | **Mandatory**. The path to the image file from which text needs to be extracted. |
|
| **image_path_url** | `string` | **Mandatory**. The path to the image file from which text needs to be extracted. |
|
||||||
|
|
||||||
|
|||||||
6
poetry.lock
generated
6
poetry.lock
generated
@@ -1597,12 +1597,12 @@ files = [
|
|||||||
google-auth = ">=2.14.1,<3.0.dev0"
|
google-auth = ">=2.14.1,<3.0.dev0"
|
||||||
googleapis-common-protos = ">=1.56.2,<2.0.dev0"
|
googleapis-common-protos = ">=1.56.2,<2.0.dev0"
|
||||||
grpcio = [
|
grpcio = [
|
||||||
{version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
|
||||||
{version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
|
{version = ">=1.33.2,<2.0dev", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
|
||||||
|
{version = ">=1.49.1,<2.0dev", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||||
]
|
]
|
||||||
grpcio-status = [
|
grpcio-status = [
|
||||||
{version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
|
||||||
{version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
|
{version = ">=1.33.2,<2.0.dev0", optional = true, markers = "python_version < \"3.11\" and extra == \"grpc\""},
|
||||||
|
{version = ">=1.49.1,<2.0.dev0", optional = true, markers = "python_version >= \"3.11\" and extra == \"grpc\""},
|
||||||
]
|
]
|
||||||
proto-plus = ">=1.22.3,<2.0.0dev"
|
proto-plus = ">=1.22.3,<2.0.0dev"
|
||||||
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0"
|
protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4.21.4 || >4.21.4,<4.21.5 || >4.21.5,<6.0.0.dev0"
|
||||||
@@ -4286,8 +4286,8 @@ files = [
|
|||||||
|
|
||||||
[package.dependencies]
|
[package.dependencies]
|
||||||
numpy = [
|
numpy = [
|
||||||
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
|
|
||||||
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
|
{version = ">=1.22.4", markers = "python_version < \"3.11\""},
|
||||||
|
{version = ">=1.23.2", markers = "python_version == \"3.11\""},
|
||||||
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
|
{version = ">=1.26.0", markers = "python_version >= \"3.12\""},
|
||||||
]
|
]
|
||||||
python-dateutil = ">=2.8.2"
|
python-dateutil = ">=2.8.2"
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "crewai"
|
name = "crewai"
|
||||||
version = "0.70.1"
|
version = "0.80.0"
|
||||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10,<=3.13"
|
requires-python = ">=3.10,<=3.13"
|
||||||
@@ -16,19 +16,19 @@ dependencies = [
|
|||||||
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
||||||
"instructor>=1.3.3",
|
"instructor>=1.3.3",
|
||||||
"regex>=2024.9.11",
|
"regex>=2024.9.11",
|
||||||
"crewai-tools>=0.12.1",
|
"crewai-tools>=0.14.0",
|
||||||
"click>=8.1.7",
|
"click>=8.1.7",
|
||||||
"python-dotenv>=1.0.0",
|
"python-dotenv>=1.0.0",
|
||||||
"appdirs>=1.4.4",
|
"appdirs>=1.4.4",
|
||||||
"jsonref>=1.1.0",
|
"jsonref>=1.1.0",
|
||||||
"agentops>=0.3.0",
|
|
||||||
"embedchain>=0.1.114",
|
|
||||||
"json-repair>=0.25.2",
|
"json-repair>=0.25.2",
|
||||||
"auth0-python>=4.7.1",
|
"auth0-python>=4.7.1",
|
||||||
"litellm>=1.44.22",
|
"litellm>=1.44.22",
|
||||||
"pyvis>=0.3.2",
|
"pyvis>=0.3.2",
|
||||||
"uv>=0.4.18",
|
"uv>=0.4.25",
|
||||||
"tomli-w>=1.1.0",
|
"tomli-w>=1.1.0",
|
||||||
|
"tomli>=2.0.2",
|
||||||
|
"chromadb>=0.5.18",
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.urls]
|
[project.urls]
|
||||||
@@ -37,8 +37,19 @@ Documentation = "https://docs.crewai.com"
|
|||||||
Repository = "https://github.com/crewAIInc/crewAI"
|
Repository = "https://github.com/crewAIInc/crewAI"
|
||||||
|
|
||||||
[project.optional-dependencies]
|
[project.optional-dependencies]
|
||||||
tools = ["crewai-tools>=0.12.1"]
|
tools = ["crewai-tools>=0.14.0"]
|
||||||
agentops = ["agentops>=0.3.0"]
|
agentops = ["agentops>=0.3.0"]
|
||||||
|
fastembed = ["fastembed>=0.4.1"]
|
||||||
|
pdfplumber = [
|
||||||
|
"pdfplumber>=0.11.4",
|
||||||
|
]
|
||||||
|
pandas = [
|
||||||
|
"pandas>=2.2.3",
|
||||||
|
]
|
||||||
|
openpyxl = [
|
||||||
|
"openpyxl>=3.1.5",
|
||||||
|
]
|
||||||
|
mem0 = ["mem0ai>=0.1.29"]
|
||||||
|
|
||||||
[tool.uv]
|
[tool.uv]
|
||||||
dev-dependencies = [
|
dev-dependencies = [
|
||||||
@@ -52,7 +63,7 @@ dev-dependencies = [
|
|||||||
"mkdocs-material-extensions>=1.3.1",
|
"mkdocs-material-extensions>=1.3.1",
|
||||||
"pillow>=10.2.0",
|
"pillow>=10.2.0",
|
||||||
"cairosvg>=2.7.1",
|
"cairosvg>=2.7.1",
|
||||||
"crewai-tools>=0.12.1",
|
"crewai-tools>=0.14.0",
|
||||||
"pytest>=8.0.0",
|
"pytest>=8.0.0",
|
||||||
"pytest-vcr>=1.0.2",
|
"pytest-vcr>=1.0.2",
|
||||||
"python-dotenv>=1.0.0",
|
"python-dotenv>=1.0.0",
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from crewai.agent import Agent
|
from crewai.agent import Agent
|
||||||
from crewai.crew import Crew
|
from crewai.crew import Crew
|
||||||
from crewai.flow.flow import Flow
|
from crewai.flow.flow import Flow
|
||||||
|
from crewai.knowledge.knowledge import Knowledge
|
||||||
from crewai.llm import LLM
|
from crewai.llm import LLM
|
||||||
from crewai.pipeline import Pipeline
|
from crewai.pipeline import Pipeline
|
||||||
from crewai.process import Process
|
from crewai.process import Process
|
||||||
@@ -14,5 +16,15 @@ warnings.filterwarnings(
|
|||||||
category=UserWarning,
|
category=UserWarning,
|
||||||
module="pydantic.main",
|
module="pydantic.main",
|
||||||
)
|
)
|
||||||
__version__ = "0.70.1"
|
__version__ = "0.80.0"
|
||||||
__all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router", "LLM", "Flow"]
|
__all__ = [
|
||||||
|
"Agent",
|
||||||
|
"Crew",
|
||||||
|
"Process",
|
||||||
|
"Task",
|
||||||
|
"Pipeline",
|
||||||
|
"Router",
|
||||||
|
"LLM",
|
||||||
|
"Flow",
|
||||||
|
"Knowledge",
|
||||||
|
]
|
||||||
|
|||||||
@@ -1,15 +1,18 @@
|
|||||||
import os
|
import os
|
||||||
from inspect import signature
|
import shutil
|
||||||
from typing import Any, List, Optional, Union
|
import subprocess
|
||||||
|
from typing import Any, List, Literal, Optional, Union
|
||||||
|
|
||||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||||
|
|
||||||
from crewai.agents import CacheHandler
|
from crewai.agents import CacheHandler
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||||
|
from crewai.cli.constants import ENV_VARS
|
||||||
from crewai.llm import LLM
|
from crewai.llm import LLM
|
||||||
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||||
from crewai.tools.agent_tools import AgentTools
|
from crewai.tools import BaseTool
|
||||||
|
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||||
from crewai.utilities import Converter, Prompts
|
from crewai.utilities import Converter, Prompts
|
||||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||||
@@ -49,6 +52,7 @@ class Agent(BaseAgent):
|
|||||||
role: The role of the agent.
|
role: The role of the agent.
|
||||||
goal: The objective of the agent.
|
goal: The objective of the agent.
|
||||||
backstory: The backstory of the agent.
|
backstory: The backstory of the agent.
|
||||||
|
knowledge: The knowledge base of the agent.
|
||||||
config: Dict representation of agent configuration.
|
config: Dict representation of agent configuration.
|
||||||
llm: The language model that will run the agent.
|
llm: The language model that will run the agent.
|
||||||
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
||||||
@@ -112,10 +116,19 @@ class Agent(BaseAgent):
|
|||||||
default=2,
|
default=2,
|
||||||
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||||
)
|
)
|
||||||
|
code_execution_mode: Literal["safe", "unsafe"] = Field(
|
||||||
|
default="safe",
|
||||||
|
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
||||||
|
)
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def post_init_setup(self):
|
def post_init_setup(self):
|
||||||
self.agent_ops_agent_name = self.role
|
self.agent_ops_agent_name = self.role
|
||||||
|
unnacepted_attributes = [
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"AWS_REGION_NAME",
|
||||||
|
]
|
||||||
|
|
||||||
# Handle different cases for self.llm
|
# Handle different cases for self.llm
|
||||||
if isinstance(self.llm, str):
|
if isinstance(self.llm, str):
|
||||||
@@ -125,8 +138,12 @@ class Agent(BaseAgent):
|
|||||||
# If it's already an LLM instance, keep it as is
|
# If it's already an LLM instance, keep it as is
|
||||||
pass
|
pass
|
||||||
elif self.llm is None:
|
elif self.llm is None:
|
||||||
# If it's None, use environment variables or default
|
# Determine the model name from environment variables or use default
|
||||||
model_name = os.environ.get("OPENAI_MODEL_NAME", "gpt-4o-mini")
|
model_name = (
|
||||||
|
os.environ.get("OPENAI_MODEL_NAME")
|
||||||
|
or os.environ.get("MODEL")
|
||||||
|
or "gpt-4o-mini"
|
||||||
|
)
|
||||||
llm_params = {"model": model_name}
|
llm_params = {"model": model_name}
|
||||||
|
|
||||||
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get(
|
api_base = os.environ.get("OPENAI_API_BASE") or os.environ.get(
|
||||||
@@ -135,9 +152,44 @@ class Agent(BaseAgent):
|
|||||||
if api_base:
|
if api_base:
|
||||||
llm_params["base_url"] = api_base
|
llm_params["base_url"] = api_base
|
||||||
|
|
||||||
api_key = os.environ.get("OPENAI_API_KEY")
|
set_provider = model_name.split("/")[0] if "/" in model_name else "openai"
|
||||||
if api_key:
|
|
||||||
llm_params["api_key"] = api_key
|
# Iterate over all environment variables to find matching API keys or use defaults
|
||||||
|
for provider, env_vars in ENV_VARS.items():
|
||||||
|
if provider == set_provider:
|
||||||
|
for env_var in env_vars:
|
||||||
|
if env_var["key_name"] in unnacepted_attributes:
|
||||||
|
continue
|
||||||
|
# Check if the environment variable is set
|
||||||
|
if "key_name" in env_var:
|
||||||
|
env_value = os.environ.get(env_var["key_name"])
|
||||||
|
if env_value:
|
||||||
|
# Map key names containing "API_KEY" to "api_key"
|
||||||
|
key_name = (
|
||||||
|
"api_key"
|
||||||
|
if "API_KEY" in env_var["key_name"]
|
||||||
|
else env_var["key_name"]
|
||||||
|
)
|
||||||
|
# Map key names containing "API_BASE" to "api_base"
|
||||||
|
key_name = (
|
||||||
|
"api_base"
|
||||||
|
if "API_BASE" in env_var["key_name"]
|
||||||
|
else key_name
|
||||||
|
)
|
||||||
|
# Map key names containing "API_VERSION" to "api_version"
|
||||||
|
key_name = (
|
||||||
|
"api_version"
|
||||||
|
if "API_VERSION" in env_var["key_name"]
|
||||||
|
else key_name
|
||||||
|
)
|
||||||
|
llm_params[key_name] = env_value
|
||||||
|
# Check for default values if the environment variable is not set
|
||||||
|
elif env_var.get("default", False):
|
||||||
|
for key, value in env_var.items():
|
||||||
|
if key not in ["prompt", "key_name", "default"]:
|
||||||
|
# Only add default if the key is already set in os.environ
|
||||||
|
if key in os.environ:
|
||||||
|
llm_params[key] = value
|
||||||
|
|
||||||
self.llm = LLM(**llm_params)
|
self.llm = LLM(**llm_params)
|
||||||
else:
|
else:
|
||||||
@@ -173,6 +225,9 @@ class Agent(BaseAgent):
|
|||||||
if not self.agent_executor:
|
if not self.agent_executor:
|
||||||
self._setup_agent_executor()
|
self._setup_agent_executor()
|
||||||
|
|
||||||
|
if self.allow_code_execution:
|
||||||
|
self._validate_docker_installation()
|
||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def _setup_agent_executor(self):
|
def _setup_agent_executor(self):
|
||||||
@@ -184,7 +239,7 @@ class Agent(BaseAgent):
|
|||||||
self,
|
self,
|
||||||
task: Any,
|
task: Any,
|
||||||
context: Optional[str] = None,
|
context: Optional[str] = None,
|
||||||
tools: Optional[List[Any]] = None,
|
tools: Optional[List[BaseTool]] = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Execute a task with the agent.
|
"""Execute a task with the agent.
|
||||||
|
|
||||||
@@ -208,14 +263,28 @@ class Agent(BaseAgent):
|
|||||||
|
|
||||||
if self.crew and self.crew.memory:
|
if self.crew and self.crew.memory:
|
||||||
contextual_memory = ContextualMemory(
|
contextual_memory = ContextualMemory(
|
||||||
|
self.crew.memory_config,
|
||||||
self.crew._short_term_memory,
|
self.crew._short_term_memory,
|
||||||
self.crew._long_term_memory,
|
self.crew._long_term_memory,
|
||||||
self.crew._entity_memory,
|
self.crew._entity_memory,
|
||||||
|
self.crew._user_memory,
|
||||||
)
|
)
|
||||||
memory = contextual_memory.build_context_for_task(task, context)
|
memory = contextual_memory.build_context_for_task(task, context)
|
||||||
if memory.strip() != "":
|
if memory.strip() != "":
|
||||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||||
|
|
||||||
|
# Integrate the knowledge base
|
||||||
|
if self.crew and self.crew.knowledge:
|
||||||
|
knowledge_snippets = self.crew.knowledge.query([task.prompt()])
|
||||||
|
valid_snippets = [
|
||||||
|
result["context"]
|
||||||
|
for result in knowledge_snippets
|
||||||
|
if result and result.get("context")
|
||||||
|
]
|
||||||
|
if valid_snippets:
|
||||||
|
formatted_knowledge = "\n".join(valid_snippets)
|
||||||
|
task_prompt += f"\n\nAdditional Information:\n{formatted_knowledge}"
|
||||||
|
|
||||||
tools = tools or self.tools or []
|
tools = tools or self.tools or []
|
||||||
self.create_agent_executor(tools=tools, task=task)
|
self.create_agent_executor(tools=tools, task=task)
|
||||||
|
|
||||||
@@ -251,7 +320,9 @@ class Agent(BaseAgent):
|
|||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def create_agent_executor(self, tools=None, task=None) -> None:
|
def create_agent_executor(
|
||||||
|
self, tools: Optional[List[BaseTool]] = None, task=None
|
||||||
|
) -> None:
|
||||||
"""Create an agent executor for the agent.
|
"""Create an agent executor for the agent.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -308,7 +379,9 @@ class Agent(BaseAgent):
|
|||||||
try:
|
try:
|
||||||
from crewai_tools import CodeInterpreterTool
|
from crewai_tools import CodeInterpreterTool
|
||||||
|
|
||||||
return [CodeInterpreterTool()]
|
# Set the unsafe_mode based on the code_execution_mode attribute
|
||||||
|
unsafe_mode = self.code_execution_mode == "unsafe"
|
||||||
|
return [CodeInterpreterTool(unsafe_mode=unsafe_mode)]
|
||||||
except ModuleNotFoundError:
|
except ModuleNotFoundError:
|
||||||
self._logger.log(
|
self._logger.log(
|
||||||
"info", "Coding tools not available. Install crewai_tools. "
|
"info", "Coding tools not available. Install crewai_tools. "
|
||||||
@@ -322,7 +395,7 @@ class Agent(BaseAgent):
|
|||||||
tools_list = []
|
tools_list = []
|
||||||
try:
|
try:
|
||||||
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
|
# tentatively try to import from crewai_tools import BaseTool as CrewAITool
|
||||||
from crewai_tools import BaseTool as CrewAITool
|
from crewai.tools import BaseTool as CrewAITool
|
||||||
|
|
||||||
for tool in tools:
|
for tool in tools:
|
||||||
if isinstance(tool, CrewAITool):
|
if isinstance(tool, CrewAITool):
|
||||||
@@ -381,7 +454,7 @@ class Agent(BaseAgent):
|
|||||||
|
|
||||||
return description
|
return description
|
||||||
|
|
||||||
def _render_text_description_and_args(self, tools: List[Any]) -> str:
|
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str:
|
||||||
"""Render the tool name, description, and args in plain text.
|
"""Render the tool name, description, and args in plain text.
|
||||||
|
|
||||||
Output will be in the format of:
|
Output will be in the format of:
|
||||||
@@ -394,20 +467,29 @@ class Agent(BaseAgent):
|
|||||||
"""
|
"""
|
||||||
tool_strings = []
|
tool_strings = []
|
||||||
for tool in tools:
|
for tool in tools:
|
||||||
args_schema = str(tool.args)
|
tool_strings.append(tool.description)
|
||||||
if hasattr(tool, "func") and tool.func:
|
|
||||||
sig = signature(tool.func)
|
|
||||||
description = (
|
|
||||||
f"Tool Name: {tool.name}{sig}\nTool Description: {tool.description}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
description = (
|
|
||||||
f"Tool Name: {tool.name}\nTool Description: {tool.description}"
|
|
||||||
)
|
|
||||||
tool_strings.append(f"{description}\nTool Arguments: {args_schema}")
|
|
||||||
|
|
||||||
return "\n".join(tool_strings)
|
return "\n".join(tool_strings)
|
||||||
|
|
||||||
|
def _validate_docker_installation(self) -> None:
|
||||||
|
"""Check if Docker is installed and running."""
|
||||||
|
if not shutil.which("docker"):
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Docker is not installed. Please install Docker to use code execution with agent: {self.role}"
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
subprocess.run(
|
||||||
|
["docker", "info"],
|
||||||
|
check=True,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
raise RuntimeError(
|
||||||
|
f"Docker is not running. Please start Docker to use code execution with agent: {self.role}"
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __tools_names(tools) -> str:
|
def __tools_names(tools) -> str:
|
||||||
return ", ".join([t.name for t in tools])
|
return ", ".join([t.name for t in tools])
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ from pydantic_core import PydanticCustomError
|
|||||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||||
from crewai.agents.cache.cache_handler import CacheHandler
|
from crewai.agents.cache.cache_handler import CacheHandler
|
||||||
from crewai.agents.tools_handler import ToolsHandler
|
from crewai.agents.tools_handler import ToolsHandler
|
||||||
|
from crewai.tools import BaseTool
|
||||||
from crewai.utilities import I18N, Logger, RPMController
|
from crewai.utilities import I18N, Logger, RPMController
|
||||||
from crewai.utilities.config import process_config
|
from crewai.utilities.config import process_config
|
||||||
|
|
||||||
@@ -49,11 +50,11 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
Methods:
|
Methods:
|
||||||
execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> str:
|
execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[BaseTool]] = None) -> str:
|
||||||
Abstract method to execute a task.
|
Abstract method to execute a task.
|
||||||
create_agent_executor(tools=None) -> None:
|
create_agent_executor(tools=None) -> None:
|
||||||
Abstract method to create an agent executor.
|
Abstract method to create an agent executor.
|
||||||
_parse_tools(tools: List[Any]) -> List[Any]:
|
_parse_tools(tools: List[BaseTool]) -> List[Any]:
|
||||||
Abstract method to parse tools.
|
Abstract method to parse tools.
|
||||||
get_delegation_tools(agents: List["BaseAgent"]):
|
get_delegation_tools(agents: List["BaseAgent"]):
|
||||||
Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew.
|
Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew.
|
||||||
@@ -105,7 +106,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
default=False,
|
default=False,
|
||||||
description="Enable agent to delegate and ask questions among each other.",
|
description="Enable agent to delegate and ask questions among each other.",
|
||||||
)
|
)
|
||||||
tools: Optional[List[Any]] = Field(
|
tools: Optional[List[BaseTool]] = Field(
|
||||||
default_factory=list, description="Tools at agents' disposal"
|
default_factory=list, description="Tools at agents' disposal"
|
||||||
)
|
)
|
||||||
max_iter: Optional[int] = Field(
|
max_iter: Optional[int] = Field(
|
||||||
@@ -188,7 +189,7 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
self,
|
self,
|
||||||
task: Any,
|
task: Any,
|
||||||
context: Optional[str] = None,
|
context: Optional[str] = None,
|
||||||
tools: Optional[List[Any]] = None,
|
tools: Optional[List[BaseTool]] = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -197,11 +198,11 @@ class BaseAgent(ABC, BaseModel):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def _parse_tools(self, tools: List[Any]) -> List[Any]:
|
def _parse_tools(self, tools: List[BaseTool]) -> List[BaseTool]:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[Any]:
|
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]:
|
||||||
"""Set the task tools that init BaseAgenTools class."""
|
"""Set the task tools that init BaseAgenTools class."""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ if TYPE_CHECKING:
|
|||||||
|
|
||||||
class CrewAgentExecutorMixin:
|
class CrewAgentExecutorMixin:
|
||||||
crew: Optional["Crew"]
|
crew: Optional["Crew"]
|
||||||
crew_agent: Optional["BaseAgent"]
|
agent: Optional["BaseAgent"]
|
||||||
task: Optional["Task"]
|
task: Optional["Task"]
|
||||||
iterations: int
|
iterations: int
|
||||||
have_forced_answer: bool
|
have_forced_answer: bool
|
||||||
@@ -33,9 +33,9 @@ class CrewAgentExecutorMixin:
|
|||||||
"""Create and save a short-term memory item if conditions are met."""
|
"""Create and save a short-term memory item if conditions are met."""
|
||||||
if (
|
if (
|
||||||
self.crew
|
self.crew
|
||||||
and self.crew_agent
|
and self.agent
|
||||||
and self.task
|
and self.task
|
||||||
and "Action: Delegate work to coworker" not in output.log
|
and "Action: Delegate work to coworker" not in output.text
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
if (
|
if (
|
||||||
@@ -43,11 +43,11 @@ class CrewAgentExecutorMixin:
|
|||||||
and self.crew._short_term_memory
|
and self.crew._short_term_memory
|
||||||
):
|
):
|
||||||
self.crew._short_term_memory.save(
|
self.crew._short_term_memory.save(
|
||||||
value=output.log,
|
value=output.text,
|
||||||
metadata={
|
metadata={
|
||||||
"observation": self.task.description,
|
"observation": self.task.description,
|
||||||
},
|
},
|
||||||
agent=self.crew_agent.role,
|
agent=self.agent.role,
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Failed to add to short term memory: {e}")
|
print(f"Failed to add to short term memory: {e}")
|
||||||
@@ -61,18 +61,18 @@ class CrewAgentExecutorMixin:
|
|||||||
and self.crew._long_term_memory
|
and self.crew._long_term_memory
|
||||||
and self.crew._entity_memory
|
and self.crew._entity_memory
|
||||||
and self.task
|
and self.task
|
||||||
and self.crew_agent
|
and self.agent
|
||||||
):
|
):
|
||||||
try:
|
try:
|
||||||
ltm_agent = TaskEvaluator(self.crew_agent)
|
ltm_agent = TaskEvaluator(self.agent)
|
||||||
evaluation = ltm_agent.evaluate(self.task, output.log)
|
evaluation = ltm_agent.evaluate(self.task, output.text)
|
||||||
|
|
||||||
if isinstance(evaluation, ConverterError):
|
if isinstance(evaluation, ConverterError):
|
||||||
return
|
return
|
||||||
|
|
||||||
long_term_memory = LongTermMemoryItem(
|
long_term_memory = LongTermMemoryItem(
|
||||||
task=self.task.description,
|
task=self.task.description,
|
||||||
agent=self.crew_agent.role,
|
agent=self.agent.role,
|
||||||
quality=evaluation.quality,
|
quality=evaluation.quality,
|
||||||
datetime=str(time.time()),
|
datetime=str(time.time()),
|
||||||
expected_output=self.task.expected_output,
|
expected_output=self.task.expected_output,
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ from crewai.types.usage_metrics import UsageMetrics
|
|||||||
class TokenProcess:
|
class TokenProcess:
|
||||||
total_tokens: int = 0
|
total_tokens: int = 0
|
||||||
prompt_tokens: int = 0
|
prompt_tokens: int = 0
|
||||||
|
cached_prompt_tokens: int = 0
|
||||||
completion_tokens: int = 0
|
completion_tokens: int = 0
|
||||||
successful_requests: int = 0
|
successful_requests: int = 0
|
||||||
|
|
||||||
@@ -15,6 +16,9 @@ class TokenProcess:
|
|||||||
self.completion_tokens = self.completion_tokens + tokens
|
self.completion_tokens = self.completion_tokens + tokens
|
||||||
self.total_tokens = self.total_tokens + tokens
|
self.total_tokens = self.total_tokens + tokens
|
||||||
|
|
||||||
|
def sum_cached_prompt_tokens(self, tokens: int):
|
||||||
|
self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
|
||||||
|
|
||||||
def sum_successful_requests(self, requests: int):
|
def sum_successful_requests(self, requests: int):
|
||||||
self.successful_requests = self.successful_requests + requests
|
self.successful_requests = self.successful_requests + requests
|
||||||
|
|
||||||
@@ -22,6 +26,7 @@ class TokenProcess:
|
|||||||
return UsageMetrics(
|
return UsageMetrics(
|
||||||
total_tokens=self.total_tokens,
|
total_tokens=self.total_tokens,
|
||||||
prompt_tokens=self.prompt_tokens,
|
prompt_tokens=self.prompt_tokens,
|
||||||
|
cached_prompt_tokens=self.cached_prompt_tokens,
|
||||||
completion_tokens=self.completion_tokens,
|
completion_tokens=self.completion_tokens,
|
||||||
successful_requests=self.successful_requests,
|
successful_requests=self.successful_requests,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import json
|
|||||||
import re
|
import re
|
||||||
from typing import Any, Dict, List, Union
|
from typing import Any, Dict, List, Union
|
||||||
|
|
||||||
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
|
||||||
from crewai.agents.parser import (
|
from crewai.agents.parser import (
|
||||||
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
|
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
|
||||||
@@ -29,7 +30,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
llm: Any,
|
llm: Any,
|
||||||
task: Any,
|
task: Any,
|
||||||
crew: Any,
|
crew: Any,
|
||||||
agent: Any,
|
agent: BaseAgent,
|
||||||
prompt: dict[str, str],
|
prompt: dict[str, str],
|
||||||
max_iter: int,
|
max_iter: int,
|
||||||
tools: List[Any],
|
tools: List[Any],
|
||||||
@@ -103,7 +104,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
|
|
||||||
if self.crew and self.crew._train:
|
if self.crew and self.crew._train:
|
||||||
self._handle_crew_training_output(formatted_answer)
|
self._handle_crew_training_output(formatted_answer)
|
||||||
|
self._create_short_term_memory(formatted_answer)
|
||||||
|
self._create_long_term_memory(formatted_answer)
|
||||||
return {"output": formatted_answer.output}
|
return {"output": formatted_answer.output}
|
||||||
|
|
||||||
def _invoke_loop(self, formatted_answer=None):
|
def _invoke_loop(self, formatted_answer=None):
|
||||||
@@ -115,6 +117,15 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
callbacks=self.callbacks,
|
callbacks=self.callbacks,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if answer is None or answer == "":
|
||||||
|
self._printer.print(
|
||||||
|
content="Received None or empty response from LLM call.",
|
||||||
|
color="red",
|
||||||
|
)
|
||||||
|
raise ValueError(
|
||||||
|
"Invalid response from LLM call - None or empty."
|
||||||
|
)
|
||||||
|
|
||||||
if not self.use_stop_words:
|
if not self.use_stop_words:
|
||||||
try:
|
try:
|
||||||
self._format_answer(answer)
|
self._format_answer(answer)
|
||||||
@@ -140,6 +151,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
if self._should_force_answer():
|
if self._should_force_answer():
|
||||||
if self.have_forced_answer:
|
if self.have_forced_answer:
|
||||||
return AgentFinish(
|
return AgentFinish(
|
||||||
|
thought="",
|
||||||
output=self._i18n.errors(
|
output=self._i18n.errors(
|
||||||
"force_final_answer_error"
|
"force_final_answer_error"
|
||||||
).format(formatted_answer.text),
|
).format(formatted_answer.text),
|
||||||
@@ -176,6 +188,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
return formatted_answer
|
return formatted_answer
|
||||||
|
|
||||||
def _show_start_logs(self):
|
def _show_start_logs(self):
|
||||||
|
if self.agent is None:
|
||||||
|
raise ValueError("Agent cannot be None")
|
||||||
if self.agent.verbose or (
|
if self.agent.verbose or (
|
||||||
hasattr(self, "crew") and getattr(self.crew, "verbose", False)
|
hasattr(self, "crew") and getattr(self.crew, "verbose", False)
|
||||||
):
|
):
|
||||||
@@ -188,6 +202,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
)
|
)
|
||||||
|
|
||||||
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
||||||
|
if self.agent is None:
|
||||||
|
raise ValueError("Agent cannot be None")
|
||||||
if self.agent.verbose or (
|
if self.agent.verbose or (
|
||||||
hasattr(self, "crew") and getattr(self.crew, "verbose", False)
|
hasattr(self, "crew") and getattr(self.crew, "verbose", False)
|
||||||
):
|
):
|
||||||
@@ -306,7 +322,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
self, result: AgentFinish, human_feedback: str | None = None
|
self, result: AgentFinish, human_feedback: str | None = None
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Function to handle the process of the training data."""
|
"""Function to handle the process of the training data."""
|
||||||
agent_id = str(self.agent.id)
|
agent_id = str(self.agent.id) # type: ignore
|
||||||
|
|
||||||
# Load training data
|
# Load training data
|
||||||
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
||||||
@@ -339,7 +355,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
"initial_output": result.output,
|
"initial_output": result.output,
|
||||||
"human_feedback": human_feedback,
|
"human_feedback": human_feedback,
|
||||||
"agent": agent_id,
|
"agent": agent_id,
|
||||||
"agent_role": self.agent.role,
|
"agent_role": self.agent.role, # type: ignore
|
||||||
}
|
}
|
||||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||||
train_iteration = self.crew._train_iteration
|
train_iteration = self.crew._train_iteration
|
||||||
@@ -370,4 +386,5 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
|||||||
return CrewAgentParser(agent=self.agent).parse(answer)
|
return CrewAgentParser(agent=self.agent).parse(answer)
|
||||||
|
|
||||||
def _format_msg(self, prompt: str, role: str = "user") -> Dict[str, str]:
|
def _format_msg(self, prompt: str, role: str = "user") -> Dict[str, str]:
|
||||||
|
prompt = prompt.rstrip()
|
||||||
return {"role": role, "content": prompt}
|
return {"role": role, "content": prompt}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import Any, Optional, Union
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
from ..tools.cache_tools import CacheTools
|
from ..tools.cache_tools.cache_tools import CacheTools
|
||||||
from ..tools.tool_calling import InstructorToolCalling, ToolCalling
|
from ..tools.tool_calling import InstructorToolCalling, ToolCalling
|
||||||
from .cache.cache_handler import CacheHandler
|
from .cache.cache_handler import CacheHandler
|
||||||
|
|
||||||
|
|||||||
70
src/crewai/cli/add_crew_to_flow.py
Normal file
70
src/crewai/cli/add_crew_to_flow.py
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from crewai.cli.utils import copy_template
|
||||||
|
|
||||||
|
|
||||||
|
def add_crew_to_flow(crew_name: str) -> None:
|
||||||
|
"""Add a new crew to the current flow."""
|
||||||
|
# Check if pyproject.toml exists in the current directory
|
||||||
|
if not Path("pyproject.toml").exists():
|
||||||
|
print("This command must be run from the root of a flow project.")
|
||||||
|
raise click.ClickException(
|
||||||
|
"This command must be run from the root of a flow project."
|
||||||
|
)
|
||||||
|
|
||||||
|
# Determine the flow folder based on the current directory
|
||||||
|
flow_folder = Path.cwd()
|
||||||
|
crews_folder = flow_folder / "src" / flow_folder.name / "crews"
|
||||||
|
|
||||||
|
if not crews_folder.exists():
|
||||||
|
print("Crews folder does not exist in the current flow.")
|
||||||
|
raise click.ClickException("Crews folder does not exist in the current flow.")
|
||||||
|
|
||||||
|
# Create the crew within the flow's crews directory
|
||||||
|
create_embedded_crew(crew_name, parent_folder=crews_folder)
|
||||||
|
|
||||||
|
click.echo(
|
||||||
|
f"Crew {crew_name} added to the current flow successfully!",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def create_embedded_crew(crew_name: str, parent_folder: Path) -> None:
|
||||||
|
"""Create a new crew within an existing flow project."""
|
||||||
|
folder_name = crew_name.replace(" ", "_").replace("-", "_").lower()
|
||||||
|
class_name = crew_name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||||
|
|
||||||
|
crew_folder = parent_folder / folder_name
|
||||||
|
|
||||||
|
if crew_folder.exists():
|
||||||
|
if not click.confirm(
|
||||||
|
f"Crew {folder_name} already exists. Do you want to override it?"
|
||||||
|
):
|
||||||
|
click.secho("Operation cancelled.", fg="yellow")
|
||||||
|
return
|
||||||
|
click.secho(f"Overriding crew {folder_name}...", fg="green", bold=True)
|
||||||
|
else:
|
||||||
|
click.secho(f"Creating crew {folder_name}...", fg="green", bold=True)
|
||||||
|
crew_folder.mkdir(parents=True)
|
||||||
|
|
||||||
|
# Create config and crew.py files
|
||||||
|
config_folder = crew_folder / "config"
|
||||||
|
config_folder.mkdir(exist_ok=True)
|
||||||
|
|
||||||
|
templates_dir = Path(__file__).parent / "templates" / "crew"
|
||||||
|
config_template_files = ["agents.yaml", "tasks.yaml"]
|
||||||
|
crew_template_file = f"{folder_name}.py" # Updated file name
|
||||||
|
|
||||||
|
for file_name in config_template_files:
|
||||||
|
src_file = templates_dir / "config" / file_name
|
||||||
|
dst_file = config_folder / file_name
|
||||||
|
copy_template(src_file, dst_file, crew_name, class_name, folder_name)
|
||||||
|
|
||||||
|
src_file = templates_dir / "crew.py"
|
||||||
|
dst_file = crew_folder / crew_template_file
|
||||||
|
copy_template(src_file, dst_file, crew_name, class_name, folder_name)
|
||||||
|
|
||||||
|
click.secho(
|
||||||
|
f"Crew {crew_name} added to the flow successfully!", fg="green", bold=True
|
||||||
|
)
|
||||||
@@ -34,7 +34,9 @@ class AuthenticationCommand:
|
|||||||
"scope": "openid",
|
"scope": "openid",
|
||||||
"audience": AUTH0_AUDIENCE,
|
"audience": AUTH0_AUDIENCE,
|
||||||
}
|
}
|
||||||
response = requests.post(url=self.DEVICE_CODE_URL, data=device_code_payload)
|
response = requests.post(
|
||||||
|
url=self.DEVICE_CODE_URL, data=device_code_payload, timeout=20
|
||||||
|
)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
return response.json()
|
return response.json()
|
||||||
|
|
||||||
@@ -54,7 +56,7 @@ class AuthenticationCommand:
|
|||||||
|
|
||||||
attempts = 0
|
attempts = 0
|
||||||
while True and attempts < 5:
|
while True and attempts < 5:
|
||||||
response = requests.post(self.TOKEN_URL, data=token_payload)
|
response = requests.post(self.TOKEN_URL, data=token_payload, timeout=30)
|
||||||
token_data = response.json()
|
token_data = response.json()
|
||||||
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ from typing import Optional
|
|||||||
import click
|
import click
|
||||||
import pkg_resources
|
import pkg_resources
|
||||||
|
|
||||||
|
from crewai.cli.add_crew_to_flow import add_crew_to_flow
|
||||||
from crewai.cli.create_crew import create_crew
|
from crewai.cli.create_crew import create_crew
|
||||||
from crewai.cli.create_flow import create_flow
|
from crewai.cli.create_flow import create_flow
|
||||||
from crewai.cli.create_pipeline import create_pipeline
|
from crewai.cli.create_pipeline import create_pipeline
|
||||||
@@ -14,11 +15,11 @@ from .authentication.main import AuthenticationCommand
|
|||||||
from .deploy.main import DeployCommand
|
from .deploy.main import DeployCommand
|
||||||
from .evaluate_crew import evaluate_crew
|
from .evaluate_crew import evaluate_crew
|
||||||
from .install_crew import install_crew
|
from .install_crew import install_crew
|
||||||
|
from .kickoff_flow import kickoff_flow
|
||||||
from .plot_flow import plot_flow
|
from .plot_flow import plot_flow
|
||||||
from .replay_from_task import replay_task_command
|
from .replay_from_task import replay_task_command
|
||||||
from .reset_memories_command import reset_memories_command
|
from .reset_memories_command import reset_memories_command
|
||||||
from .run_crew import run_crew
|
from .run_crew import run_crew
|
||||||
from .run_flow import run_flow
|
|
||||||
from .tools.main import ToolCommand
|
from .tools.main import ToolCommand
|
||||||
from .train_crew import train_crew
|
from .train_crew import train_crew
|
||||||
from .update_crew import update_crew
|
from .update_crew import update_crew
|
||||||
@@ -32,10 +33,12 @@ def crewai():
|
|||||||
@crewai.command()
|
@crewai.command()
|
||||||
@click.argument("type", type=click.Choice(["crew", "pipeline", "flow"]))
|
@click.argument("type", type=click.Choice(["crew", "pipeline", "flow"]))
|
||||||
@click.argument("name")
|
@click.argument("name")
|
||||||
def create(type, name):
|
@click.option("--provider", type=str, help="The provider to use for the crew")
|
||||||
|
@click.option("--skip_provider", is_flag=True, help="Skip provider validation")
|
||||||
|
def create(type, name, provider, skip_provider=False):
|
||||||
"""Create a new crew, pipeline, or flow."""
|
"""Create a new crew, pipeline, or flow."""
|
||||||
if type == "crew":
|
if type == "crew":
|
||||||
create_crew(name)
|
create_crew(name, provider, skip_provider)
|
||||||
elif type == "pipeline":
|
elif type == "pipeline":
|
||||||
create_pipeline(name)
|
create_pipeline(name)
|
||||||
elif type == "flow":
|
elif type == "flow":
|
||||||
@@ -133,6 +136,7 @@ def log_tasks_outputs() -> None:
|
|||||||
@click.option("-l", "--long", is_flag=True, help="Reset LONG TERM memory")
|
@click.option("-l", "--long", is_flag=True, help="Reset LONG TERM memory")
|
||||||
@click.option("-s", "--short", is_flag=True, help="Reset SHORT TERM memory")
|
@click.option("-s", "--short", is_flag=True, help="Reset SHORT TERM memory")
|
||||||
@click.option("-e", "--entities", is_flag=True, help="Reset ENTITIES memory")
|
@click.option("-e", "--entities", is_flag=True, help="Reset ENTITIES memory")
|
||||||
|
@click.option("-kn", "--knowledge", is_flag=True, help="Reset KNOWLEDGE storage")
|
||||||
@click.option(
|
@click.option(
|
||||||
"-k",
|
"-k",
|
||||||
"--kickoff-outputs",
|
"--kickoff-outputs",
|
||||||
@@ -140,17 +144,24 @@ def log_tasks_outputs() -> None:
|
|||||||
help="Reset LATEST KICKOFF TASK OUTPUTS",
|
help="Reset LATEST KICKOFF TASK OUTPUTS",
|
||||||
)
|
)
|
||||||
@click.option("-a", "--all", is_flag=True, help="Reset ALL memories")
|
@click.option("-a", "--all", is_flag=True, help="Reset ALL memories")
|
||||||
def reset_memories(long, short, entities, kickoff_outputs, all):
|
def reset_memories(
|
||||||
|
long: bool,
|
||||||
|
short: bool,
|
||||||
|
entities: bool,
|
||||||
|
knowledge: bool,
|
||||||
|
kickoff_outputs: bool,
|
||||||
|
all: bool,
|
||||||
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_ouputs). This will delete all the data saved.
|
Reset the crew memories (long, short, entity, latest_crew_kickoff_ouputs). This will delete all the data saved.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
if not all and not (long or short or entities or kickoff_outputs):
|
if not all and not (long or short or entities or knowledge or kickoff_outputs):
|
||||||
click.echo(
|
click.echo(
|
||||||
"Please specify at least one memory type to reset using the appropriate flags."
|
"Please specify at least one memory type to reset using the appropriate flags."
|
||||||
)
|
)
|
||||||
return
|
return
|
||||||
reset_memories_command(long, short, entities, kickoff_outputs, all)
|
reset_memories_command(long, short, entities, knowledge, kickoff_outputs, all)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
click.echo(f"An error occurred while resetting memories: {e}", err=True)
|
click.echo(f"An error occurred while resetting memories: {e}", err=True)
|
||||||
|
|
||||||
@@ -176,10 +187,16 @@ def test(n_iterations: int, model: str):
|
|||||||
evaluate_crew(n_iterations, model)
|
evaluate_crew(n_iterations, model)
|
||||||
|
|
||||||
|
|
||||||
@crewai.command()
|
@crewai.command(
|
||||||
def install():
|
context_settings=dict(
|
||||||
|
ignore_unknown_options=True,
|
||||||
|
allow_extra_args=True,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
@click.pass_context
|
||||||
|
def install(context):
|
||||||
"""Install the Crew."""
|
"""Install the Crew."""
|
||||||
install_crew()
|
install_crew(context.args)
|
||||||
|
|
||||||
|
|
||||||
@crewai.command()
|
@crewai.command()
|
||||||
@@ -304,11 +321,11 @@ def flow():
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
@flow.command(name="run")
|
@flow.command(name="kickoff")
|
||||||
def flow_run():
|
def flow_run():
|
||||||
"""Run the Flow."""
|
"""Kickoff the Flow."""
|
||||||
click.echo("Running the Flow")
|
click.echo("Running the Flow")
|
||||||
run_flow()
|
kickoff_flow()
|
||||||
|
|
||||||
|
|
||||||
@flow.command(name="plot")
|
@flow.command(name="plot")
|
||||||
@@ -318,5 +335,13 @@ def flow_plot():
|
|||||||
plot_flow()
|
plot_flow()
|
||||||
|
|
||||||
|
|
||||||
|
@flow.command(name="add-crew")
|
||||||
|
@click.argument("crew_name")
|
||||||
|
def flow_add_crew(crew_name):
|
||||||
|
"""Add a crew to an existing flow."""
|
||||||
|
click.echo(f"Adding crew {crew_name} to the flow")
|
||||||
|
add_crew_to_flow(crew_name)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
crewai()
|
crewai()
|
||||||
|
|||||||
38
src/crewai/cli/config.py
Normal file
38
src/crewai/cli/config.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "crewai" / "settings.json"
|
||||||
|
|
||||||
|
class Settings(BaseModel):
|
||||||
|
tool_repository_username: Optional[str] = Field(None, description="Username for interacting with the Tool Repository")
|
||||||
|
tool_repository_password: Optional[str] = Field(None, description="Password for interacting with the Tool Repository")
|
||||||
|
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, exclude=True)
|
||||||
|
|
||||||
|
def __init__(self, config_path: Path = DEFAULT_CONFIG_PATH, **data):
|
||||||
|
"""Load Settings from config path"""
|
||||||
|
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
|
file_data = {}
|
||||||
|
if config_path.is_file():
|
||||||
|
try:
|
||||||
|
with config_path.open("r") as f:
|
||||||
|
file_data = json.load(f)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
file_data = {}
|
||||||
|
|
||||||
|
merged_data = {**file_data, **data}
|
||||||
|
super().__init__(config_path=config_path, **merged_data)
|
||||||
|
|
||||||
|
def dump(self) -> None:
|
||||||
|
"""Save current settings to settings.json"""
|
||||||
|
if self.config_path.is_file():
|
||||||
|
with self.config_path.open("r") as f:
|
||||||
|
existing_data = json.load(f)
|
||||||
|
else:
|
||||||
|
existing_data = {}
|
||||||
|
|
||||||
|
updated_data = {**existing_data, **self.model_dump(exclude_unset=True)}
|
||||||
|
with self.config_path.open("w") as f:
|
||||||
|
json.dump(updated_data, f, indent=4)
|
||||||
161
src/crewai/cli/constants.py
Normal file
161
src/crewai/cli/constants.py
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
ENV_VARS = {
|
||||||
|
"openai": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your OPENAI API key (press Enter to skip)",
|
||||||
|
"key_name": "OPENAI_API_KEY",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"anthropic": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your ANTHROPIC API key (press Enter to skip)",
|
||||||
|
"key_name": "ANTHROPIC_API_KEY",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"gemini": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your GEMINI API key (press Enter to skip)",
|
||||||
|
"key_name": "GEMINI_API_KEY",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"groq": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your GROQ API key (press Enter to skip)",
|
||||||
|
"key_name": "GROQ_API_KEY",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"watson": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your WATSONX URL (press Enter to skip)",
|
||||||
|
"key_name": "WATSONX_URL",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"prompt": "Enter your WATSONX API Key (press Enter to skip)",
|
||||||
|
"key_name": "WATSONX_APIKEY",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"prompt": "Enter your WATSONX Project Id (press Enter to skip)",
|
||||||
|
"key_name": "WATSONX_PROJECT_ID",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"ollama": [
|
||||||
|
{
|
||||||
|
"default": True,
|
||||||
|
"API_BASE": "http://localhost:11434",
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"bedrock": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your AWS Access Key ID (press Enter to skip)",
|
||||||
|
"key_name": "AWS_ACCESS_KEY_ID",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"prompt": "Enter your AWS Secret Access Key (press Enter to skip)",
|
||||||
|
"key_name": "AWS_SECRET_ACCESS_KEY",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"prompt": "Enter your AWS Region Name (press Enter to skip)",
|
||||||
|
"key_name": "AWS_REGION_NAME",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"azure": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your Azure deployment name (must start with 'azure/')",
|
||||||
|
"key_name": "model",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"prompt": "Enter your AZURE API key (press Enter to skip)",
|
||||||
|
"key_name": "AZURE_API_KEY",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"prompt": "Enter your AZURE API base URL (press Enter to skip)",
|
||||||
|
"key_name": "AZURE_API_BASE",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"prompt": "Enter your AZURE API version (press Enter to skip)",
|
||||||
|
"key_name": "AZURE_API_VERSION",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"cerebras": [
|
||||||
|
{
|
||||||
|
"prompt": "Enter your Cerebras model name (must start with 'cerebras/')",
|
||||||
|
"key_name": "model",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"prompt": "Enter your Cerebras API version (press Enter to skip)",
|
||||||
|
"key_name": "CEREBRAS_API_KEY",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PROVIDERS = [
|
||||||
|
"openai",
|
||||||
|
"anthropic",
|
||||||
|
"gemini",
|
||||||
|
"groq",
|
||||||
|
"ollama",
|
||||||
|
"watson",
|
||||||
|
"bedrock",
|
||||||
|
"azure",
|
||||||
|
"cerebras",
|
||||||
|
]
|
||||||
|
|
||||||
|
MODELS = {
|
||||||
|
"openai": ["gpt-4", "gpt-4o", "gpt-4o-mini", "o1-mini", "o1-preview"],
|
||||||
|
"anthropic": [
|
||||||
|
"claude-3-5-sonnet-20240620",
|
||||||
|
"claude-3-sonnet-20240229",
|
||||||
|
"claude-3-opus-20240229",
|
||||||
|
"claude-3-haiku-20240307",
|
||||||
|
],
|
||||||
|
"gemini": [
|
||||||
|
"gemini/gemini-1.5-flash",
|
||||||
|
"gemini/gemini-1.5-pro",
|
||||||
|
"gemini/gemini-gemma-2-9b-it",
|
||||||
|
"gemini/gemini-gemma-2-27b-it",
|
||||||
|
],
|
||||||
|
"groq": [
|
||||||
|
"groq/llama-3.1-8b-instant",
|
||||||
|
"groq/llama-3.1-70b-versatile",
|
||||||
|
"groq/llama-3.1-405b-reasoning",
|
||||||
|
"groq/gemma2-9b-it",
|
||||||
|
"groq/gemma-7b-it",
|
||||||
|
],
|
||||||
|
"ollama": ["ollama/llama3.1", "ollama/mixtral"],
|
||||||
|
"watson": [
|
||||||
|
"watsonx/meta-llama/llama-3-1-70b-instruct",
|
||||||
|
"watsonx/meta-llama/llama-3-1-8b-instruct",
|
||||||
|
"watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
||||||
|
"watsonx/meta-llama/llama-3-2-1b-instruct",
|
||||||
|
"watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
||||||
|
"watsonx/meta-llama/llama-3-405b-instruct",
|
||||||
|
"watsonx/mistral/mistral-large",
|
||||||
|
"watsonx/ibm/granite-3-8b-instruct",
|
||||||
|
],
|
||||||
|
"bedrock": [
|
||||||
|
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||||
|
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||||
|
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
||||||
|
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
||||||
|
"bedrock/anthropic.claude-v2:1",
|
||||||
|
"bedrock/anthropic.claude-v2",
|
||||||
|
"bedrock/anthropic.claude-instant-v1",
|
||||||
|
"bedrock/meta.llama3-1-405b-instruct-v1:0",
|
||||||
|
"bedrock/meta.llama3-1-70b-instruct-v1:0",
|
||||||
|
"bedrock/meta.llama3-1-8b-instruct-v1:0",
|
||||||
|
"bedrock/meta.llama3-70b-instruct-v1:0",
|
||||||
|
"bedrock/meta.llama3-8b-instruct-v1:0",
|
||||||
|
"bedrock/amazon.titan-text-lite-v1",
|
||||||
|
"bedrock/amazon.titan-text-express-v1",
|
||||||
|
"bedrock/cohere.command-text-v14",
|
||||||
|
"bedrock/ai21.j2-mid-v1",
|
||||||
|
"bedrock/ai21.j2-ultra-v1",
|
||||||
|
"bedrock/ai21.jamba-instruct-v1:0",
|
||||||
|
"bedrock/meta.llama2-13b-chat-v1",
|
||||||
|
"bedrock/meta.llama2-70b-chat-v1",
|
||||||
|
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
||||||
|
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
||||||
@@ -1,12 +1,19 @@
|
|||||||
|
import shutil
|
||||||
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
from crewai.cli.utils import copy_template
|
from crewai.cli.constants import ENV_VARS, MODELS
|
||||||
|
from crewai.cli.provider import (
|
||||||
|
get_provider_data,
|
||||||
|
select_model,
|
||||||
|
select_provider,
|
||||||
|
)
|
||||||
|
from crewai.cli.utils import copy_template, load_env_vars, write_env_file
|
||||||
|
|
||||||
|
|
||||||
def create_crew(name, parent_folder=None):
|
def create_folder_structure(name, parent_folder=None):
|
||||||
"""Create a new crew."""
|
|
||||||
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
folder_name = name.replace(" ", "_").replace("-", "_").lower()
|
||||||
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
|
||||||
|
|
||||||
@@ -15,32 +22,151 @@ def create_crew(name, parent_folder=None):
|
|||||||
else:
|
else:
|
||||||
folder_path = Path(folder_name)
|
folder_path = Path(folder_name)
|
||||||
|
|
||||||
|
if folder_path.exists():
|
||||||
|
if not click.confirm(
|
||||||
|
f"Folder {folder_name} already exists. Do you want to override it?"
|
||||||
|
):
|
||||||
|
click.secho("Operation cancelled.", fg="yellow")
|
||||||
|
sys.exit(0)
|
||||||
|
click.secho(f"Overriding folder {folder_name}...", fg="green", bold=True)
|
||||||
|
shutil.rmtree(folder_path) # Delete the existing folder and its contents
|
||||||
|
|
||||||
click.secho(
|
click.secho(
|
||||||
f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
|
f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
|
||||||
fg="green",
|
fg="green",
|
||||||
bold=True,
|
bold=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not folder_path.exists():
|
|
||||||
folder_path.mkdir(parents=True)
|
folder_path.mkdir(parents=True)
|
||||||
(folder_path / "tests").mkdir(exist_ok=True)
|
(folder_path / "tests").mkdir(exist_ok=True)
|
||||||
if not parent_folder:
|
if not parent_folder:
|
||||||
(folder_path / "src" / folder_name).mkdir(parents=True)
|
(folder_path / "src" / folder_name).mkdir(parents=True)
|
||||||
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
(folder_path / "src" / folder_name / "tools").mkdir(parents=True)
|
||||||
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
(folder_path / "src" / folder_name / "config").mkdir(parents=True)
|
||||||
with open(folder_path / ".env", "w") as file:
|
|
||||||
file.write("OPENAI_API_KEY=YOUR_API_KEY")
|
return folder_path, folder_name, class_name
|
||||||
else:
|
|
||||||
|
|
||||||
|
def copy_template_files(folder_path, name, class_name, parent_folder):
|
||||||
|
package_dir = Path(__file__).parent
|
||||||
|
templates_dir = package_dir / "templates" / "crew"
|
||||||
|
|
||||||
|
root_template_files = (
|
||||||
|
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
|
||||||
|
)
|
||||||
|
tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"]
|
||||||
|
config_template_files = ["config/agents.yaml", "config/tasks.yaml"]
|
||||||
|
src_template_files = (
|
||||||
|
["__init__.py", "main.py", "crew.py"] if not parent_folder else ["crew.py"]
|
||||||
|
)
|
||||||
|
|
||||||
|
for file_name in root_template_files:
|
||||||
|
src_file = templates_dir / file_name
|
||||||
|
dst_file = folder_path / file_name
|
||||||
|
copy_template(src_file, dst_file, name, class_name, folder_path.name)
|
||||||
|
|
||||||
|
src_folder = (
|
||||||
|
folder_path / "src" / folder_path.name if not parent_folder else folder_path
|
||||||
|
)
|
||||||
|
|
||||||
|
for file_name in src_template_files:
|
||||||
|
src_file = templates_dir / file_name
|
||||||
|
dst_file = src_folder / file_name
|
||||||
|
copy_template(src_file, dst_file, name, class_name, folder_path.name)
|
||||||
|
|
||||||
|
if not parent_folder:
|
||||||
|
for file_name in tools_template_files + config_template_files:
|
||||||
|
src_file = templates_dir / file_name
|
||||||
|
dst_file = src_folder / file_name
|
||||||
|
copy_template(src_file, dst_file, name, class_name, folder_path.name)
|
||||||
|
|
||||||
|
|
||||||
|
def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
|
||||||
|
folder_path, folder_name, class_name = create_folder_structure(name, parent_folder)
|
||||||
|
env_vars = load_env_vars(folder_path)
|
||||||
|
if not skip_provider:
|
||||||
|
if not provider:
|
||||||
|
provider_models = get_provider_data()
|
||||||
|
if not provider_models:
|
||||||
|
return
|
||||||
|
|
||||||
|
existing_provider = None
|
||||||
|
for provider, env_keys in ENV_VARS.items():
|
||||||
|
if any(
|
||||||
|
"key_name" in details and details["key_name"] in env_vars
|
||||||
|
for details in env_keys
|
||||||
|
):
|
||||||
|
existing_provider = provider
|
||||||
|
break
|
||||||
|
|
||||||
|
if existing_provider:
|
||||||
|
if not click.confirm(
|
||||||
|
f"Found existing environment variable configuration for {existing_provider.capitalize()}. Do you want to override it?"
|
||||||
|
):
|
||||||
|
click.secho("Keeping existing provider configuration.", fg="yellow")
|
||||||
|
return
|
||||||
|
|
||||||
|
provider_models = get_provider_data()
|
||||||
|
if not provider_models:
|
||||||
|
return
|
||||||
|
|
||||||
|
while True:
|
||||||
|
selected_provider = select_provider(provider_models)
|
||||||
|
if selected_provider is None: # User typed 'q'
|
||||||
|
click.secho("Exiting...", fg="yellow")
|
||||||
|
sys.exit(0)
|
||||||
|
if selected_provider: # Valid selection
|
||||||
|
break
|
||||||
click.secho(
|
click.secho(
|
||||||
f"\tFolder {folder_name} already exists. Please choose a different name.",
|
"No provider selected. Please try again or press 'q' to exit.", fg="red"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check if the selected provider has predefined models
|
||||||
|
if selected_provider in MODELS and MODELS[selected_provider]:
|
||||||
|
while True:
|
||||||
|
selected_model = select_model(selected_provider, provider_models)
|
||||||
|
if selected_model is None: # User typed 'q'
|
||||||
|
click.secho("Exiting...", fg="yellow")
|
||||||
|
sys.exit(0)
|
||||||
|
if selected_model: # Valid selection
|
||||||
|
break
|
||||||
|
click.secho(
|
||||||
|
"No model selected. Please try again or press 'q' to exit.",
|
||||||
fg="red",
|
fg="red",
|
||||||
)
|
)
|
||||||
return
|
env_vars["MODEL"] = selected_model
|
||||||
|
|
||||||
|
# Check if the selected provider requires API keys
|
||||||
|
if selected_provider in ENV_VARS:
|
||||||
|
provider_env_vars = ENV_VARS[selected_provider]
|
||||||
|
for details in provider_env_vars:
|
||||||
|
if details.get("default", False):
|
||||||
|
# Automatically add default key-value pairs
|
||||||
|
for key, value in details.items():
|
||||||
|
if key not in ["prompt", "key_name", "default"]:
|
||||||
|
env_vars[key] = value
|
||||||
|
elif "key_name" in details:
|
||||||
|
# Prompt for non-default key-value pairs
|
||||||
|
prompt = details["prompt"]
|
||||||
|
key_name = details["key_name"]
|
||||||
|
api_key_value = click.prompt(prompt, default="", show_default=False)
|
||||||
|
|
||||||
|
if api_key_value.strip():
|
||||||
|
env_vars[key_name] = api_key_value
|
||||||
|
|
||||||
|
if env_vars:
|
||||||
|
write_env_file(folder_path, env_vars)
|
||||||
|
click.secho("API keys and model saved to .env file", fg="green")
|
||||||
|
else:
|
||||||
|
click.secho(
|
||||||
|
"No API keys provided. Skipping .env file creation.", fg="yellow"
|
||||||
|
)
|
||||||
|
|
||||||
|
click.secho(f"Selected model: {env_vars.get('MODEL', 'N/A')}", fg="green")
|
||||||
|
|
||||||
package_dir = Path(__file__).parent
|
package_dir = Path(__file__).parent
|
||||||
templates_dir = package_dir / "templates" / "crew"
|
templates_dir = package_dir / "templates" / "crew"
|
||||||
|
|
||||||
# List of template files to copy
|
|
||||||
root_template_files = (
|
root_template_files = (
|
||||||
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
|
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -3,12 +3,13 @@ import subprocess
|
|||||||
import click
|
import click
|
||||||
|
|
||||||
|
|
||||||
def install_crew() -> None:
|
def install_crew(proxy_options: list[str]) -> None:
|
||||||
"""
|
"""
|
||||||
Install the crew by running the UV command to lock and install.
|
Install the crew by running the UV command to lock and install.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
subprocess.run(["uv", "sync"], check=True, capture_output=False, text=True)
|
command = ["uv", "sync"] + proxy_options
|
||||||
|
subprocess.run(command, check=True, capture_output=False, text=True)
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||||
|
|||||||
@@ -3,11 +3,11 @@ import subprocess
|
|||||||
import click
|
import click
|
||||||
|
|
||||||
|
|
||||||
def run_flow() -> None:
|
def kickoff_flow() -> None:
|
||||||
"""
|
"""
|
||||||
Run the flow by running a command in the UV environment.
|
Kickoff the flow by running a command in the UV environment.
|
||||||
"""
|
"""
|
||||||
command = ["uv", "run", "run_flow"]
|
command = ["uv", "run", "kickoff"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||||
@@ -7,7 +7,7 @@ def plot_flow() -> None:
|
|||||||
"""
|
"""
|
||||||
Plot the flow by running a command in the UV environment.
|
Plot the flow by running a command in the UV environment.
|
||||||
"""
|
"""
|
||||||
command = ["uv", "run", "plot_flow"]
|
command = ["uv", "run", "plot"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
result = subprocess.run(command, capture_output=False, text=True, check=True)
|
||||||
|
|||||||
227
src/crewai/cli/provider.py
Normal file
227
src/crewai/cli/provider.py
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
import json
|
||||||
|
import time
|
||||||
|
from collections import defaultdict
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import click
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from crewai.cli.constants import JSON_URL, MODELS, PROVIDERS
|
||||||
|
|
||||||
|
|
||||||
|
def select_choice(prompt_message, choices):
|
||||||
|
"""
|
||||||
|
Presents a list of choices to the user and prompts them to select one.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- prompt_message (str): The message to display to the user before presenting the choices.
|
||||||
|
- choices (list): A list of options to present to the user.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- str: The selected choice from the list, or None if the user chooses to quit.
|
||||||
|
"""
|
||||||
|
|
||||||
|
provider_models = get_provider_data()
|
||||||
|
if not provider_models:
|
||||||
|
return
|
||||||
|
click.secho(prompt_message, fg="cyan")
|
||||||
|
for idx, choice in enumerate(choices, start=1):
|
||||||
|
click.secho(f"{idx}. {choice}", fg="cyan")
|
||||||
|
click.secho("q. Quit", fg="cyan")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
choice = click.prompt(
|
||||||
|
"Enter the number of your choice or 'q' to quit", type=str
|
||||||
|
)
|
||||||
|
|
||||||
|
if choice.lower() == "q":
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
selected_index = int(choice) - 1
|
||||||
|
if 0 <= selected_index < len(choices):
|
||||||
|
return choices[selected_index]
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
click.secho(
|
||||||
|
"Invalid selection. Please select a number between 1 and 6 or 'q' to quit.",
|
||||||
|
fg="red",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def select_provider(provider_models):
|
||||||
|
"""
|
||||||
|
Presents a list of providers to the user and prompts them to select one.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- provider_models (dict): A dictionary of provider models.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- str: The selected provider
|
||||||
|
- None: If user explicitly quits
|
||||||
|
"""
|
||||||
|
predefined_providers = [p.lower() for p in PROVIDERS]
|
||||||
|
all_providers = sorted(set(predefined_providers + list(provider_models.keys())))
|
||||||
|
|
||||||
|
provider = select_choice(
|
||||||
|
"Select a provider to set up:", predefined_providers + ["other"]
|
||||||
|
)
|
||||||
|
if provider is None: # User typed 'q'
|
||||||
|
return None
|
||||||
|
|
||||||
|
if provider == "other":
|
||||||
|
provider = select_choice("Select a provider from the full list:", all_providers)
|
||||||
|
if provider is None: # User typed 'q'
|
||||||
|
return None
|
||||||
|
|
||||||
|
return provider.lower() if provider else False
|
||||||
|
|
||||||
|
|
||||||
|
def select_model(provider, provider_models):
|
||||||
|
"""
|
||||||
|
Presents a list of models for a given provider to the user and prompts them to select one.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- provider (str): The provider for which to select a model.
|
||||||
|
- provider_models (dict): A dictionary of provider models.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- str: The selected model, or None if the operation is aborted or an invalid selection is made.
|
||||||
|
"""
|
||||||
|
predefined_providers = [p.lower() for p in PROVIDERS]
|
||||||
|
|
||||||
|
if provider in predefined_providers:
|
||||||
|
available_models = MODELS.get(provider, [])
|
||||||
|
else:
|
||||||
|
available_models = provider_models.get(provider, [])
|
||||||
|
|
||||||
|
if not available_models:
|
||||||
|
click.secho(f"No models available for provider '{provider}'.", fg="red")
|
||||||
|
return None
|
||||||
|
|
||||||
|
selected_model = select_choice(
|
||||||
|
f"Select a model to use for {provider.capitalize()}:", available_models
|
||||||
|
)
|
||||||
|
return selected_model
|
||||||
|
|
||||||
|
|
||||||
|
def load_provider_data(cache_file, cache_expiry):
|
||||||
|
"""
|
||||||
|
Loads provider data from a cache file if it exists and is not expired. If the cache is expired or corrupted, it fetches the data from the web.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- cache_file (Path): The path to the cache file.
|
||||||
|
- cache_expiry (int): The cache expiry time in seconds.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- dict or None: The loaded provider data or None if the operation fails.
|
||||||
|
"""
|
||||||
|
current_time = time.time()
|
||||||
|
if (
|
||||||
|
cache_file.exists()
|
||||||
|
and (current_time - cache_file.stat().st_mtime) < cache_expiry
|
||||||
|
):
|
||||||
|
data = read_cache_file(cache_file)
|
||||||
|
if data:
|
||||||
|
return data
|
||||||
|
click.secho(
|
||||||
|
"Cache is corrupted. Fetching provider data from the web...", fg="yellow"
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
click.secho(
|
||||||
|
"Cache expired or not found. Fetching provider data from the web...",
|
||||||
|
fg="cyan",
|
||||||
|
)
|
||||||
|
return fetch_provider_data(cache_file)
|
||||||
|
|
||||||
|
|
||||||
|
def read_cache_file(cache_file):
|
||||||
|
"""
|
||||||
|
Reads and returns the JSON content from a cache file. Returns None if the file contains invalid JSON.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- cache_file (Path): The path to the cache file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- dict or None: The JSON content of the cache file or None if the JSON is invalid.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
with open(cache_file, "r") as f:
|
||||||
|
return json.load(f)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def fetch_provider_data(cache_file):
|
||||||
|
"""
|
||||||
|
Fetches provider data from a specified URL and caches it to a file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- cache_file (Path): The path to the cache file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- dict or None: The fetched provider data or None if the operation fails.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
response = requests.get(JSON_URL, stream=True, timeout=60)
|
||||||
|
response.raise_for_status()
|
||||||
|
data = download_data(response)
|
||||||
|
with open(cache_file, "w") as f:
|
||||||
|
json.dump(data, f)
|
||||||
|
return data
|
||||||
|
except requests.RequestException as e:
|
||||||
|
click.secho(f"Error fetching provider data: {e}", fg="red")
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
click.secho("Error parsing provider data. Invalid JSON format.", fg="red")
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def download_data(response):
|
||||||
|
"""
|
||||||
|
Downloads data from a given HTTP response and returns the JSON content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- response (requests.Response): The HTTP response object.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- dict: The JSON content of the response.
|
||||||
|
"""
|
||||||
|
total_size = int(response.headers.get("content-length", 0))
|
||||||
|
block_size = 8192
|
||||||
|
data_chunks = []
|
||||||
|
with click.progressbar(
|
||||||
|
length=total_size, label="Downloading", show_pos=True
|
||||||
|
) as progress_bar:
|
||||||
|
for chunk in response.iter_content(block_size):
|
||||||
|
if chunk:
|
||||||
|
data_chunks.append(chunk)
|
||||||
|
progress_bar.update(len(chunk))
|
||||||
|
data_content = b"".join(data_chunks)
|
||||||
|
return json.loads(data_content.decode("utf-8"))
|
||||||
|
|
||||||
|
|
||||||
|
def get_provider_data():
|
||||||
|
"""
|
||||||
|
Retrieves provider data from a cache file, filters out models based on provider criteria, and returns a dictionary of providers mapped to their models.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- dict or None: A dictionary of providers mapped to their models or None if the operation fails.
|
||||||
|
"""
|
||||||
|
cache_dir = Path.home() / ".crewai"
|
||||||
|
cache_dir.mkdir(exist_ok=True)
|
||||||
|
cache_file = cache_dir / "provider_cache.json"
|
||||||
|
cache_expiry = 24 * 3600
|
||||||
|
|
||||||
|
data = load_provider_data(cache_file, cache_expiry)
|
||||||
|
if not data:
|
||||||
|
return None
|
||||||
|
|
||||||
|
provider_models = defaultdict(list)
|
||||||
|
for model_name, properties in data.items():
|
||||||
|
provider = properties.get("litellm_provider", "").strip().lower()
|
||||||
|
if "http" in provider or provider == "other":
|
||||||
|
continue
|
||||||
|
if provider:
|
||||||
|
provider_models[provider].append(model_name)
|
||||||
|
return provider_models
|
||||||
@@ -5,9 +5,17 @@ from crewai.memory.entity.entity_memory import EntityMemory
|
|||||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||||
|
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||||
|
|
||||||
|
|
||||||
def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
def reset_memories_command(
|
||||||
|
long,
|
||||||
|
short,
|
||||||
|
entity,
|
||||||
|
knowledge,
|
||||||
|
kickoff_outputs,
|
||||||
|
all,
|
||||||
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Reset the crew memories.
|
Reset the crew memories.
|
||||||
|
|
||||||
@@ -17,6 +25,7 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
|||||||
entity (bool): Whether to reset the entity memory.
|
entity (bool): Whether to reset the entity memory.
|
||||||
kickoff_outputs (bool): Whether to reset the latest kickoff task outputs.
|
kickoff_outputs (bool): Whether to reset the latest kickoff task outputs.
|
||||||
all (bool): Whether to reset all memories.
|
all (bool): Whether to reset all memories.
|
||||||
|
knowledge (bool): Whether to reset the knowledge.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -25,6 +34,7 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
|||||||
EntityMemory().reset()
|
EntityMemory().reset()
|
||||||
LongTermMemory().reset()
|
LongTermMemory().reset()
|
||||||
TaskOutputStorageHandler().reset()
|
TaskOutputStorageHandler().reset()
|
||||||
|
KnowledgeStorage().reset()
|
||||||
click.echo("All memories have been reset.")
|
click.echo("All memories have been reset.")
|
||||||
else:
|
else:
|
||||||
if long:
|
if long:
|
||||||
@@ -40,6 +50,9 @@ def reset_memories_command(long, short, entity, kickoff_outputs, all) -> None:
|
|||||||
if kickoff_outputs:
|
if kickoff_outputs:
|
||||||
TaskOutputStorageHandler().reset()
|
TaskOutputStorageHandler().reset()
|
||||||
click.echo("Latest Kickoff outputs stored has been reset.")
|
click.echo("Latest Kickoff outputs stored has been reset.")
|
||||||
|
if knowledge:
|
||||||
|
KnowledgeStorage().reset()
|
||||||
|
click.echo("Knowledge has been reset.")
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
||||||
|
|||||||
@@ -1,10 +1,9 @@
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
import click
|
import click
|
||||||
import tomllib
|
|
||||||
from packaging import version
|
from packaging import version
|
||||||
|
|
||||||
from crewai.cli.utils import get_crewai_version
|
from crewai.cli.utils import get_crewai_version, read_toml
|
||||||
|
|
||||||
|
|
||||||
def run_crew() -> None:
|
def run_crew() -> None:
|
||||||
@@ -15,10 +14,9 @@ def run_crew() -> None:
|
|||||||
crewai_version = get_crewai_version()
|
crewai_version = get_crewai_version()
|
||||||
min_required_version = "0.71.0"
|
min_required_version = "0.71.0"
|
||||||
|
|
||||||
with open("pyproject.toml", "rb") as f:
|
pyproject_data = read_toml()
|
||||||
data = tomllib.load(f)
|
|
||||||
|
|
||||||
if data.get("tool", {}).get("poetry") and (
|
if pyproject_data.get("tool", {}).get("poetry") and (
|
||||||
version.parse(crewai_version) < version.parse(min_required_version)
|
version.parse(crewai_version) < version.parse(min_required_version)
|
||||||
):
|
):
|
||||||
click.secho(
|
click.secho(
|
||||||
@@ -26,7 +24,6 @@ def run_crew() -> None:
|
|||||||
f"Please run `crewai update` to update your pyproject.toml to use uv.",
|
f"Please run `crewai update` to update your pyproject.toml to use uv.",
|
||||||
fg="red",
|
fg="red",
|
||||||
)
|
)
|
||||||
print()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
subprocess.run(command, capture_output=False, text=True, check=True)
|
subprocess.run(command, capture_output=False, text=True, check=True)
|
||||||
@@ -35,10 +32,7 @@ def run_crew() -> None:
|
|||||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||||
click.echo(e.output, err=True, nl=True)
|
click.echo(e.output, err=True, nl=True)
|
||||||
|
|
||||||
with open("pyproject.toml", "rb") as f:
|
if pyproject_data.get("tool", {}).get("poetry"):
|
||||||
data = tomllib.load(f)
|
|
||||||
|
|
||||||
if data.get("tool", {}).get("poetry"):
|
|
||||||
click.secho(
|
click.secho(
|
||||||
"It's possible that you are using an old version of crewAI that uses poetry, please run `crewai update` to update your pyproject.toml to use uv.",
|
"It's possible that you are using an old version of crewAI that uses poetry, please run `crewai update` to update your pyproject.toml to use uv.",
|
||||||
fg="yellow",
|
fg="yellow",
|
||||||
|
|||||||
@@ -8,9 +8,12 @@ from crewai.project import CrewBase, agent, crew, task
|
|||||||
# from crewai_tools import SerperDevTool
|
# from crewai_tools import SerperDevTool
|
||||||
|
|
||||||
@CrewBase
|
@CrewBase
|
||||||
class {{crew_name}}Crew():
|
class {{crew_name}}():
|
||||||
"""{{crew_name}} crew"""
|
"""{{crew_name}} crew"""
|
||||||
|
|
||||||
|
agents_config = 'config/agents.yaml'
|
||||||
|
tasks_config = 'config/tasks.yaml'
|
||||||
|
|
||||||
@agent
|
@agent
|
||||||
def researcher(self) -> Agent:
|
def researcher(self) -> Agent:
|
||||||
return Agent(
|
return Agent(
|
||||||
|
|||||||
@@ -1,9 +1,13 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
import sys
|
import sys
|
||||||
from {{folder_name}}.crew import {{crew_name}}Crew
|
import warnings
|
||||||
|
|
||||||
|
from {{folder_name}}.crew import {{crew_name}}
|
||||||
|
|
||||||
|
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||||
|
|
||||||
# This main file is intended to be a way for you to run your
|
# This main file is intended to be a way for you to run your
|
||||||
# crew locally, so refrain from adding necessary logic into this file.
|
# crew locally, so refrain from adding unnecessary logic into this file.
|
||||||
# Replace with inputs you want to test with, it will automatically
|
# Replace with inputs you want to test with, it will automatically
|
||||||
# interpolate any tasks and agents information
|
# interpolate any tasks and agents information
|
||||||
|
|
||||||
@@ -14,7 +18,7 @@ def run():
|
|||||||
inputs = {
|
inputs = {
|
||||||
'topic': 'AI LLMs'
|
'topic': 'AI LLMs'
|
||||||
}
|
}
|
||||||
{{crew_name}}Crew().crew().kickoff(inputs=inputs)
|
{{crew_name}}().crew().kickoff(inputs=inputs)
|
||||||
|
|
||||||
|
|
||||||
def train():
|
def train():
|
||||||
@@ -25,7 +29,7 @@ def train():
|
|||||||
"topic": "AI LLMs"
|
"topic": "AI LLMs"
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
{{crew_name}}Crew().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
|
{{crew_name}}().crew().train(n_iterations=int(sys.argv[1]), filename=sys.argv[2], inputs=inputs)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception(f"An error occurred while training the crew: {e}")
|
raise Exception(f"An error occurred while training the crew: {e}")
|
||||||
@@ -35,7 +39,7 @@ def replay():
|
|||||||
Replay the crew execution from a specific task.
|
Replay the crew execution from a specific task.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
{{crew_name}}Crew().crew().replay(task_id=sys.argv[1])
|
{{crew_name}}().crew().replay(task_id=sys.argv[1])
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception(f"An error occurred while replaying the crew: {e}")
|
raise Exception(f"An error occurred while replaying the crew: {e}")
|
||||||
@@ -48,7 +52,7 @@ def test():
|
|||||||
"topic": "AI LLMs"
|
"topic": "AI LLMs"
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
{{crew_name}}Crew().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise Exception(f"An error occurred while replaying the crew: {e}")
|
raise Exception(f"An error occurred while replaying the crew: {e}")
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
|||||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||||
requires-python = ">=3.10,<=3.13"
|
requires-python = ">=3.10,<=3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.67.1,<1.0.0"
|
"crewai[tools]>=0.80.0,<1.0.0"
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|||||||
@@ -1,11 +1,18 @@
|
|||||||
from crewai_tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
|
from typing import Type
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class MyCustomToolInput(BaseModel):
|
||||||
|
"""Input schema for MyCustomTool."""
|
||||||
|
argument: str = Field(..., description="Description of the argument.")
|
||||||
|
|
||||||
class MyCustomTool(BaseTool):
|
class MyCustomTool(BaseTool):
|
||||||
name: str = "Name of my tool"
|
name: str = "Name of my tool"
|
||||||
description: str = (
|
description: str = (
|
||||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||||
)
|
)
|
||||||
|
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||||
|
|
||||||
def _run(self, argument: str) -> str:
|
def _run(self, argument: str) -> str:
|
||||||
# Implementation goes here
|
# Implementation goes here
|
||||||
|
|||||||
@@ -1,65 +1,53 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python
|
||||||
import asyncio
|
|
||||||
from random import randint
|
from random import randint
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from crewai.flow.flow import Flow, listen, start
|
from crewai.flow.flow import Flow, listen, start
|
||||||
|
|
||||||
from .crews.poem_crew.poem_crew import PoemCrew
|
from .crews.poem_crew.poem_crew import PoemCrew
|
||||||
|
|
||||||
|
|
||||||
class PoemState(BaseModel):
|
class PoemState(BaseModel):
|
||||||
sentence_count: int = 1
|
sentence_count: int = 1
|
||||||
poem: str = ""
|
poem: str = ""
|
||||||
|
|
||||||
|
|
||||||
class PoemFlow(Flow[PoemState]):
|
class PoemFlow(Flow[PoemState]):
|
||||||
|
|
||||||
@start()
|
@start()
|
||||||
def generate_sentence_count(self):
|
def generate_sentence_count(self):
|
||||||
print("Generating sentence count")
|
print("Generating sentence count")
|
||||||
# Generate a number between 1 and 5
|
|
||||||
self.state.sentence_count = randint(1, 5)
|
self.state.sentence_count = randint(1, 5)
|
||||||
|
|
||||||
@listen(generate_sentence_count)
|
@listen(generate_sentence_count)
|
||||||
def generate_poem(self):
|
def generate_poem(self):
|
||||||
print("Generating poem")
|
print("Generating poem")
|
||||||
print(f"State before poem: {self.state}")
|
result = (
|
||||||
result = PoemCrew().crew().kickoff(inputs={"sentence_count": self.state.sentence_count})
|
PoemCrew()
|
||||||
|
.crew()
|
||||||
|
.kickoff(inputs={"sentence_count": self.state.sentence_count})
|
||||||
|
)
|
||||||
|
|
||||||
print("Poem generated", result.raw)
|
print("Poem generated", result.raw)
|
||||||
self.state.poem = result.raw
|
self.state.poem = result.raw
|
||||||
|
|
||||||
print(f"State after generate_poem: {self.state}")
|
|
||||||
|
|
||||||
@listen(generate_poem)
|
@listen(generate_poem)
|
||||||
def save_poem(self):
|
def save_poem(self):
|
||||||
print("Saving poem")
|
print("Saving poem")
|
||||||
print(f"State before save_poem: {self.state}")
|
|
||||||
with open("poem.txt", "w") as f:
|
with open("poem.txt", "w") as f:
|
||||||
f.write(self.state.poem)
|
f.write(self.state.poem)
|
||||||
print(f"State after save_poem: {self.state}")
|
|
||||||
|
|
||||||
async def run_flow():
|
|
||||||
"""
|
def kickoff():
|
||||||
Run the flow.
|
|
||||||
"""
|
|
||||||
poem_flow = PoemFlow()
|
poem_flow = PoemFlow()
|
||||||
await poem_flow.kickoff()
|
poem_flow.kickoff()
|
||||||
|
|
||||||
async def plot_flow():
|
|
||||||
"""
|
def plot():
|
||||||
Plot the flow.
|
|
||||||
"""
|
|
||||||
poem_flow = PoemFlow()
|
poem_flow = PoemFlow()
|
||||||
poem_flow.plot()
|
poem_flow.plot()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
asyncio.run(run_flow())
|
|
||||||
|
|
||||||
|
|
||||||
def plot():
|
|
||||||
asyncio.run(plot_flow())
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
kickoff()
|
||||||
|
|||||||
@@ -5,14 +5,12 @@ description = "{{name}} using crewAI"
|
|||||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||||
requires-python = ">=3.10,<=3.13"
|
requires-python = ">=3.10,<=3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.67.1,<1.0.0",
|
"crewai[tools]>=0.80.0,<1.0.0",
|
||||||
"asyncio"
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
{{folder_name}} = "{{folder_name}}.main:main"
|
kickoff = "{{folder_name}}.main:kickoff"
|
||||||
run_flow = "{{folder_name}}.main:main"
|
plot = "{{folder_name}}.main:plot"
|
||||||
plot_flow = "{{folder_name}}.main:plot"
|
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["hatchling"]
|
requires = ["hatchling"]
|
||||||
|
|||||||
@@ -1,4 +1,13 @@
|
|||||||
from crewai_tools import BaseTool
|
from typing import Type
|
||||||
|
|
||||||
|
from crewai.tools import BaseTool
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class MyCustomToolInput(BaseModel):
|
||||||
|
"""Input schema for MyCustomTool."""
|
||||||
|
|
||||||
|
argument: str = Field(..., description="Description of the argument.")
|
||||||
|
|
||||||
|
|
||||||
class MyCustomTool(BaseTool):
|
class MyCustomTool(BaseTool):
|
||||||
@@ -6,6 +15,7 @@ class MyCustomTool(BaseTool):
|
|||||||
description: str = (
|
description: str = (
|
||||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||||
)
|
)
|
||||||
|
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||||
|
|
||||||
def _run(self, argument: str) -> str:
|
def _run(self, argument: str) -> str:
|
||||||
# Implementation goes here
|
# Implementation goes here
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
|
|||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = ">=3.10,<=3.13"
|
python = ">=3.10,<=3.13"
|
||||||
crewai = { extras = ["tools"], version = ">=0.70.1,<1.0.0" }
|
crewai = { extras = ["tools"], version = ">=0.80.0,<1.0.0" }
|
||||||
asyncio = "*"
|
asyncio = "*"
|
||||||
|
|
||||||
[tool.poetry.scripts]
|
[tool.poetry.scripts]
|
||||||
|
|||||||
@@ -1,11 +1,18 @@
|
|||||||
from crewai_tools import BaseTool
|
from typing import Type
|
||||||
|
from crewai.tools import BaseTool
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class MyCustomToolInput(BaseModel):
|
||||||
|
"""Input schema for MyCustomTool."""
|
||||||
|
argument: str = Field(..., description="Description of the argument.")
|
||||||
|
|
||||||
class MyCustomTool(BaseTool):
|
class MyCustomTool(BaseTool):
|
||||||
name: str = "Name of my tool"
|
name: str = "Name of my tool"
|
||||||
description: str = (
|
description: str = (
|
||||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||||
)
|
)
|
||||||
|
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||||
|
|
||||||
def _run(self, argument: str) -> str:
|
def _run(self, argument: str) -> str:
|
||||||
# Implementation goes here
|
# Implementation goes here
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
|||||||
authors = ["Your Name <you@example.com>"]
|
authors = ["Your Name <you@example.com>"]
|
||||||
requires-python = ">=3.10,<=3.13"
|
requires-python = ">=3.10,<=3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.67.1,<1.0.0"
|
"crewai[tools]>=0.80.0,<1.0.0"
|
||||||
]
|
]
|
||||||
|
|
||||||
[project.scripts]
|
[project.scripts]
|
||||||
|
|||||||
@@ -1,11 +1,18 @@
|
|||||||
from crewai_tools import BaseTool
|
from typing import Type
|
||||||
|
from crewai.tools import BaseTool
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class MyCustomToolInput(BaseModel):
|
||||||
|
"""Input schema for MyCustomTool."""
|
||||||
|
argument: str = Field(..., description="Description of the argument.")
|
||||||
|
|
||||||
class MyCustomTool(BaseTool):
|
class MyCustomTool(BaseTool):
|
||||||
name: str = "Name of my tool"
|
name: str = "Name of my tool"
|
||||||
description: str = (
|
description: str = (
|
||||||
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
"Clear description for what this tool is useful for, you agent will need this information to use it."
|
||||||
)
|
)
|
||||||
|
args_schema: Type[BaseModel] = MyCustomToolInput
|
||||||
|
|
||||||
def _run(self, argument: str) -> str:
|
def _run(self, argument: str) -> str:
|
||||||
# Implementation goes here
|
# Implementation goes here
|
||||||
|
|||||||
@@ -5,6 +5,6 @@ description = "Power up your crews with {{folder_name}}"
|
|||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.10,<=3.13"
|
requires-python = ">=3.10,<=3.13"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crewai[tools]>=0.70.1"
|
"crewai[tools]>=0.80.0"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
from crewai_tools import BaseTool
|
from crewai.tools import BaseTool
|
||||||
|
|
||||||
|
|
||||||
class {{class_name}}(BaseTool):
|
class {{class_name}}(BaseTool):
|
||||||
name: str = "Name of my tool"
|
name: str = "Name of my tool"
|
||||||
|
|||||||
@@ -1,17 +1,15 @@
|
|||||||
import base64
|
import base64
|
||||||
import os
|
import os
|
||||||
import platform
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import tempfile
|
import tempfile
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from netrc import netrc
|
|
||||||
import stat
|
|
||||||
|
|
||||||
import click
|
import click
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
|
|
||||||
from crewai.cli import git
|
from crewai.cli import git
|
||||||
from crewai.cli.command import BaseCommand, PlusAPIMixin
|
from crewai.cli.command import BaseCommand, PlusAPIMixin
|
||||||
|
from crewai.cli.config import Settings
|
||||||
from crewai.cli.utils import (
|
from crewai.cli.utils import (
|
||||||
get_project_description,
|
get_project_description,
|
||||||
get_project_name,
|
get_project_name,
|
||||||
@@ -28,8 +26,6 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
|||||||
A class to handle tool repository related operations for CrewAI projects.
|
A class to handle tool repository related operations for CrewAI projects.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
BASE_URL = "https://app.crewai.com/pypi/"
|
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
BaseCommand.__init__(self)
|
BaseCommand.__init__(self)
|
||||||
PlusAPIMixin.__init__(self, telemetry=self._telemetry)
|
PlusAPIMixin.__init__(self, telemetry=self._telemetry)
|
||||||
@@ -155,39 +151,35 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
|||||||
raise SystemExit
|
raise SystemExit
|
||||||
|
|
||||||
login_response_json = login_response.json()
|
login_response_json = login_response.json()
|
||||||
self._set_netrc_credentials(login_response_json["credential"])
|
|
||||||
|
settings = Settings()
|
||||||
|
settings.tool_repository_username = login_response_json["credential"]["username"]
|
||||||
|
settings.tool_repository_password = login_response_json["credential"]["password"]
|
||||||
|
settings.dump()
|
||||||
|
|
||||||
console.print(
|
console.print(
|
||||||
"Successfully authenticated to the tool repository.", style="bold green"
|
"Successfully authenticated to the tool repository.", style="bold green"
|
||||||
)
|
)
|
||||||
|
|
||||||
def _set_netrc_credentials(self, credentials, netrc_path=None):
|
|
||||||
if not netrc_path:
|
|
||||||
netrc_filename = "_netrc" if platform.system() == "Windows" else ".netrc"
|
|
||||||
netrc_path = Path.home() / netrc_filename
|
|
||||||
netrc_path.touch(mode=stat.S_IRUSR | stat.S_IWUSR, exist_ok=True)
|
|
||||||
|
|
||||||
netrc_instance = netrc(file=netrc_path)
|
|
||||||
netrc_instance.hosts["app.crewai.com"] = (credentials["username"], "", credentials["password"])
|
|
||||||
|
|
||||||
with open(netrc_path, 'w') as file:
|
|
||||||
file.write(str(netrc_instance))
|
|
||||||
|
|
||||||
console.print(f"Added credentials to {netrc_path}", style="bold green")
|
|
||||||
|
|
||||||
def _add_package(self, tool_details):
|
def _add_package(self, tool_details):
|
||||||
tool_handle = tool_details["handle"]
|
tool_handle = tool_details["handle"]
|
||||||
repository_handle = tool_details["repository"]["handle"]
|
repository_handle = tool_details["repository"]["handle"]
|
||||||
|
repository_url = tool_details["repository"]["url"]
|
||||||
|
index = f"{repository_handle}={repository_url}"
|
||||||
|
|
||||||
add_package_command = [
|
add_package_command = [
|
||||||
"uv",
|
"uv",
|
||||||
"add",
|
"add",
|
||||||
"--extra-index-url",
|
"--index",
|
||||||
self.BASE_URL + repository_handle,
|
index,
|
||||||
tool_handle,
|
tool_handle,
|
||||||
]
|
]
|
||||||
add_package_result = subprocess.run(
|
add_package_result = subprocess.run(
|
||||||
add_package_command, capture_output=False, text=True, check=True
|
add_package_command,
|
||||||
|
capture_output=False,
|
||||||
|
env=self._build_env_with_credentials(repository_handle),
|
||||||
|
text=True,
|
||||||
|
check=True
|
||||||
)
|
)
|
||||||
|
|
||||||
if add_package_result.stderr:
|
if add_package_result.stderr:
|
||||||
@@ -206,3 +198,13 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
|
|||||||
"[bold yellow]Tip:[/bold yellow] Navigate to a different directory and try again."
|
"[bold yellow]Tip:[/bold yellow] Navigate to a different directory and try again."
|
||||||
)
|
)
|
||||||
raise SystemExit
|
raise SystemExit
|
||||||
|
|
||||||
|
def _build_env_with_credentials(self, repository_handle: str):
|
||||||
|
repository_handle = repository_handle.upper().replace("-", "_")
|
||||||
|
settings = Settings()
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
env[f"UV_INDEX_{repository_handle}_USERNAME"] = str(settings.tool_repository_username or "")
|
||||||
|
env[f"UV_INDEX_{repository_handle}_PASSWORD"] = str(settings.tool_repository_password or "")
|
||||||
|
|
||||||
|
return env
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
import tomli_w
|
import tomli_w
|
||||||
import tomllib
|
|
||||||
|
from crewai.cli.utils import read_toml
|
||||||
|
|
||||||
|
|
||||||
def update_crew() -> None:
|
def update_crew() -> None:
|
||||||
@@ -17,10 +19,9 @@ def migrate_pyproject(input_file, output_file):
|
|||||||
And it will be used to migrate the pyproject.toml to the new format when uv is used.
|
And it will be used to migrate the pyproject.toml to the new format when uv is used.
|
||||||
When the time comes that uv supports the new format, this function will be deprecated.
|
When the time comes that uv supports the new format, this function will be deprecated.
|
||||||
"""
|
"""
|
||||||
|
poetry_data = {}
|
||||||
# Read the input pyproject.toml
|
# Read the input pyproject.toml
|
||||||
with open(input_file, "rb") as f:
|
pyproject_data = read_toml()
|
||||||
pyproject = tomllib.load(f)
|
|
||||||
|
|
||||||
# Initialize the new project structure
|
# Initialize the new project structure
|
||||||
new_pyproject = {
|
new_pyproject = {
|
||||||
@@ -29,30 +30,30 @@ def migrate_pyproject(input_file, output_file):
|
|||||||
}
|
}
|
||||||
|
|
||||||
# Migrate project metadata
|
# Migrate project metadata
|
||||||
if "tool" in pyproject and "poetry" in pyproject["tool"]:
|
if "tool" in pyproject_data and "poetry" in pyproject_data["tool"]:
|
||||||
poetry = pyproject["tool"]["poetry"]
|
poetry_data = pyproject_data["tool"]["poetry"]
|
||||||
new_pyproject["project"]["name"] = poetry.get("name")
|
new_pyproject["project"]["name"] = poetry_data.get("name")
|
||||||
new_pyproject["project"]["version"] = poetry.get("version")
|
new_pyproject["project"]["version"] = poetry_data.get("version")
|
||||||
new_pyproject["project"]["description"] = poetry.get("description")
|
new_pyproject["project"]["description"] = poetry_data.get("description")
|
||||||
new_pyproject["project"]["authors"] = [
|
new_pyproject["project"]["authors"] = [
|
||||||
{
|
{
|
||||||
"name": author.split("<")[0].strip(),
|
"name": author.split("<")[0].strip(),
|
||||||
"email": author.split("<")[1].strip(">").strip(),
|
"email": author.split("<")[1].strip(">").strip(),
|
||||||
}
|
}
|
||||||
for author in poetry.get("authors", [])
|
for author in poetry_data.get("authors", [])
|
||||||
]
|
]
|
||||||
new_pyproject["project"]["requires-python"] = poetry.get("python")
|
new_pyproject["project"]["requires-python"] = poetry_data.get("python")
|
||||||
else:
|
else:
|
||||||
# If it's already in the new format, just copy the project section
|
# If it's already in the new format, just copy the project section
|
||||||
new_pyproject["project"] = pyproject.get("project", {})
|
new_pyproject["project"] = pyproject_data.get("project", {})
|
||||||
|
|
||||||
# Migrate or copy dependencies
|
# Migrate or copy dependencies
|
||||||
if "dependencies" in new_pyproject["project"]:
|
if "dependencies" in new_pyproject["project"]:
|
||||||
# If dependencies are already in the new format, keep them as is
|
# If dependencies are already in the new format, keep them as is
|
||||||
pass
|
pass
|
||||||
elif "dependencies" in poetry:
|
elif poetry_data and "dependencies" in poetry_data:
|
||||||
new_pyproject["project"]["dependencies"] = []
|
new_pyproject["project"]["dependencies"] = []
|
||||||
for dep, version in poetry["dependencies"].items():
|
for dep, version in poetry_data["dependencies"].items():
|
||||||
if isinstance(version, dict): # Handle extras
|
if isinstance(version, dict): # Handle extras
|
||||||
extras = ",".join(version.get("extras", []))
|
extras = ",".join(version.get("extras", []))
|
||||||
new_dep = f"{dep}[{extras}]"
|
new_dep = f"{dep}[{extras}]"
|
||||||
@@ -66,10 +67,10 @@ def migrate_pyproject(input_file, output_file):
|
|||||||
new_pyproject["project"]["dependencies"].append(new_dep)
|
new_pyproject["project"]["dependencies"].append(new_dep)
|
||||||
|
|
||||||
# Migrate or copy scripts
|
# Migrate or copy scripts
|
||||||
if "scripts" in poetry:
|
if poetry_data and "scripts" in poetry_data:
|
||||||
new_pyproject["project"]["scripts"] = poetry["scripts"]
|
new_pyproject["project"]["scripts"] = poetry_data["scripts"]
|
||||||
elif "scripts" in pyproject.get("project", {}):
|
elif pyproject_data.get("project", {}) and "scripts" in pyproject_data["project"]:
|
||||||
new_pyproject["project"]["scripts"] = pyproject["project"]["scripts"]
|
new_pyproject["project"]["scripts"] = pyproject_data["project"]["scripts"]
|
||||||
else:
|
else:
|
||||||
new_pyproject["project"]["scripts"] = {}
|
new_pyproject["project"]["scripts"] = {}
|
||||||
|
|
||||||
@@ -86,14 +87,23 @@ def migrate_pyproject(input_file, output_file):
|
|||||||
new_pyproject["project"]["scripts"]["run_crew"] = f"{module_name}.main:run"
|
new_pyproject["project"]["scripts"]["run_crew"] = f"{module_name}.main:run"
|
||||||
|
|
||||||
# Migrate optional dependencies
|
# Migrate optional dependencies
|
||||||
if "extras" in poetry:
|
if poetry_data and "extras" in poetry_data:
|
||||||
new_pyproject["project"]["optional-dependencies"] = poetry["extras"]
|
new_pyproject["project"]["optional-dependencies"] = poetry_data["extras"]
|
||||||
|
|
||||||
# Backup the old pyproject.toml
|
# Backup the old pyproject.toml
|
||||||
backup_file = "pyproject-old.toml"
|
backup_file = "pyproject-old.toml"
|
||||||
shutil.copy2(input_file, backup_file)
|
shutil.copy2(input_file, backup_file)
|
||||||
print(f"Original pyproject.toml backed up as {backup_file}")
|
print(f"Original pyproject.toml backed up as {backup_file}")
|
||||||
|
|
||||||
|
# Rename the poetry.lock file
|
||||||
|
lock_file = "poetry.lock"
|
||||||
|
lock_backup = "poetry-old.lock"
|
||||||
|
if os.path.exists(lock_file):
|
||||||
|
os.rename(lock_file, lock_backup)
|
||||||
|
print(f"Original poetry.lock renamed to {lock_backup}")
|
||||||
|
else:
|
||||||
|
print("No poetry.lock file found to rename.")
|
||||||
|
|
||||||
# Write the new pyproject.toml
|
# Write the new pyproject.toml
|
||||||
with open(output_file, "wb") as f:
|
with open(output_file, "wb") as f:
|
||||||
tomli_w.dump(new_pyproject, f)
|
tomli_w.dump(new_pyproject, f)
|
||||||
|
|||||||
@@ -6,9 +6,11 @@ from functools import reduce
|
|||||||
from typing import Any, Dict, List
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
import tomli
|
||||||
from rich.console import Console
|
from rich.console import Console
|
||||||
|
|
||||||
from crewai.cli.authentication.utils import TokenManager
|
from crewai.cli.authentication.utils import TokenManager
|
||||||
|
from crewai.cli.constants import ENV_VARS
|
||||||
|
|
||||||
if sys.version_info >= (3, 11):
|
if sys.version_info >= (3, 11):
|
||||||
import tomllib
|
import tomllib
|
||||||
@@ -53,6 +55,13 @@ def simple_toml_parser(content):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def read_toml(file_path: str = "pyproject.toml"):
|
||||||
|
"""Read the content of a TOML file and return it as a dictionary."""
|
||||||
|
with open(file_path, "rb") as f:
|
||||||
|
toml_dict = tomli.load(f)
|
||||||
|
return toml_dict
|
||||||
|
|
||||||
|
|
||||||
def parse_toml(content):
|
def parse_toml(content):
|
||||||
if sys.version_info >= (3, 11):
|
if sys.version_info >= (3, 11):
|
||||||
return tomllib.loads(content)
|
return tomllib.loads(content)
|
||||||
@@ -200,3 +209,76 @@ def tree_find_and_replace(directory, find, replace):
|
|||||||
new_dirpath = os.path.join(path, new_dirname)
|
new_dirpath = os.path.join(path, new_dirname)
|
||||||
old_dirpath = os.path.join(path, dirname)
|
old_dirpath = os.path.join(path, dirname)
|
||||||
os.rename(old_dirpath, new_dirpath)
|
os.rename(old_dirpath, new_dirpath)
|
||||||
|
|
||||||
|
|
||||||
|
def load_env_vars(folder_path):
|
||||||
|
"""
|
||||||
|
Loads environment variables from a .env file in the specified folder path.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- folder_path (Path): The path to the folder containing the .env file.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- dict: A dictionary of environment variables.
|
||||||
|
"""
|
||||||
|
env_file_path = folder_path / ".env"
|
||||||
|
env_vars = {}
|
||||||
|
if env_file_path.exists():
|
||||||
|
with open(env_file_path, "r") as file:
|
||||||
|
for line in file:
|
||||||
|
key, _, value = line.strip().partition("=")
|
||||||
|
if key and value:
|
||||||
|
env_vars[key] = value
|
||||||
|
return env_vars
|
||||||
|
|
||||||
|
|
||||||
|
def update_env_vars(env_vars, provider, model):
|
||||||
|
"""
|
||||||
|
Updates environment variables with the API key for the selected provider and model.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- env_vars (dict): Environment variables dictionary.
|
||||||
|
- provider (str): Selected provider.
|
||||||
|
- model (str): Selected model.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
- None
|
||||||
|
"""
|
||||||
|
api_key_var = ENV_VARS.get(
|
||||||
|
provider,
|
||||||
|
[
|
||||||
|
click.prompt(
|
||||||
|
f"Enter the environment variable name for your {provider.capitalize()} API key",
|
||||||
|
type=str,
|
||||||
|
)
|
||||||
|
],
|
||||||
|
)[0]
|
||||||
|
|
||||||
|
if api_key_var not in env_vars:
|
||||||
|
try:
|
||||||
|
env_vars[api_key_var] = click.prompt(
|
||||||
|
f"Enter your {provider.capitalize()} API key", type=str, hide_input=True
|
||||||
|
)
|
||||||
|
except click.exceptions.Abort:
|
||||||
|
click.secho("Operation aborted by the user.", fg="red")
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
click.secho(f"API key already exists for {provider.capitalize()}.", fg="yellow")
|
||||||
|
|
||||||
|
env_vars["MODEL"] = model
|
||||||
|
click.secho(f"Selected model: {model}", fg="green")
|
||||||
|
return env_vars
|
||||||
|
|
||||||
|
|
||||||
|
def write_env_file(folder_path, env_vars):
|
||||||
|
"""
|
||||||
|
Writes environment variables to a .env file in the specified folder.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
- folder_path (Path): The path to the folder where the .env file will be written.
|
||||||
|
- env_vars (dict): A dictionary of environment variables to write.
|
||||||
|
"""
|
||||||
|
env_file_path = folder_path / ".env"
|
||||||
|
with open(env_file_path, "w") as file:
|
||||||
|
for key, value in env_vars.items():
|
||||||
|
file.write(f"{key}={value}\n")
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import uuid
|
|||||||
import warnings
|
import warnings
|
||||||
from concurrent.futures import Future
|
from concurrent.futures import Future
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
|
||||||
|
|
||||||
from pydantic import (
|
from pydantic import (
|
||||||
UUID4,
|
UUID4,
|
||||||
@@ -27,17 +27,17 @@ from crewai.llm import LLM
|
|||||||
from crewai.memory.entity.entity_memory import EntityMemory
|
from crewai.memory.entity.entity_memory import EntityMemory
|
||||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||||
|
from crewai.knowledge.knowledge import Knowledge
|
||||||
|
from crewai.memory.user.user_memory import UserMemory
|
||||||
from crewai.process import Process
|
from crewai.process import Process
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.tasks.conditional_task import ConditionalTask
|
from crewai.tasks.conditional_task import ConditionalTask
|
||||||
from crewai.tasks.task_output import TaskOutput
|
from crewai.tasks.task_output import TaskOutput
|
||||||
from crewai.telemetry import Telemetry
|
from crewai.telemetry import Telemetry
|
||||||
from crewai.tools.agent_tools import AgentTools
|
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||||
from crewai.types.usage_metrics import UsageMetrics
|
from crewai.types.usage_metrics import UsageMetrics
|
||||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||||
from crewai.utilities.constants import (
|
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||||
TRAINING_DATA_FILE,
|
|
||||||
)
|
|
||||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||||
from crewai.utilities.formatter import (
|
from crewai.utilities.formatter import (
|
||||||
@@ -71,6 +71,7 @@ class Crew(BaseModel):
|
|||||||
manager_llm: The language model that will run manager agent.
|
manager_llm: The language model that will run manager agent.
|
||||||
manager_agent: Custom agent that will be used as manager.
|
manager_agent: Custom agent that will be used as manager.
|
||||||
memory: Whether the crew should use memory to store memories of it's execution.
|
memory: Whether the crew should use memory to store memories of it's execution.
|
||||||
|
memory_config: Configuration for the memory to be used for the crew.
|
||||||
cache: Whether the crew should use a cache to store the results of the tools execution.
|
cache: Whether the crew should use a cache to store the results of the tools execution.
|
||||||
function_calling_llm: The language model that will run the tool calling for all the agents.
|
function_calling_llm: The language model that will run the tool calling for all the agents.
|
||||||
process: The process flow that the crew will follow (e.g., sequential, hierarchical).
|
process: The process flow that the crew will follow (e.g., sequential, hierarchical).
|
||||||
@@ -94,6 +95,7 @@ class Crew(BaseModel):
|
|||||||
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
|
_short_term_memory: Optional[InstanceOf[ShortTermMemory]] = PrivateAttr()
|
||||||
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
|
_long_term_memory: Optional[InstanceOf[LongTermMemory]] = PrivateAttr()
|
||||||
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
|
_entity_memory: Optional[InstanceOf[EntityMemory]] = PrivateAttr()
|
||||||
|
_user_memory: Optional[InstanceOf[UserMemory]] = PrivateAttr()
|
||||||
_train: Optional[bool] = PrivateAttr(default=False)
|
_train: Optional[bool] = PrivateAttr(default=False)
|
||||||
_train_iteration: Optional[int] = PrivateAttr()
|
_train_iteration: Optional[int] = PrivateAttr()
|
||||||
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
_inputs: Optional[Dict[str, Any]] = PrivateAttr(default=None)
|
||||||
@@ -114,6 +116,10 @@ class Crew(BaseModel):
|
|||||||
default=False,
|
default=False,
|
||||||
description="Whether the crew should use memory to store memories of it's execution",
|
description="Whether the crew should use memory to store memories of it's execution",
|
||||||
)
|
)
|
||||||
|
memory_config: Optional[Dict[str, Any]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="Configuration for the memory to be used for the crew.",
|
||||||
|
)
|
||||||
short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(
|
short_term_memory: Optional[InstanceOf[ShortTermMemory]] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="An Instance of the ShortTermMemory to be used by the Crew",
|
description="An Instance of the ShortTermMemory to be used by the Crew",
|
||||||
@@ -126,8 +132,12 @@ class Crew(BaseModel):
|
|||||||
default=None,
|
default=None,
|
||||||
description="An Instance of the EntityMemory to be used by the Crew",
|
description="An Instance of the EntityMemory to be used by the Crew",
|
||||||
)
|
)
|
||||||
|
user_memory: Optional[InstanceOf[UserMemory]] = Field(
|
||||||
|
default=None,
|
||||||
|
description="An instance of the UserMemory to be used by the Crew to store/fetch memories of a specific user.",
|
||||||
|
)
|
||||||
embedder: Optional[dict] = Field(
|
embedder: Optional[dict] = Field(
|
||||||
default={"provider": "openai"},
|
default=None,
|
||||||
description="Configuration for the embedder to be used for the crew.",
|
description="Configuration for the embedder to be used for the crew.",
|
||||||
)
|
)
|
||||||
usage_metrics: Optional[UsageMetrics] = Field(
|
usage_metrics: Optional[UsageMetrics] = Field(
|
||||||
@@ -154,6 +164,16 @@ class Crew(BaseModel):
|
|||||||
default=None,
|
default=None,
|
||||||
description="Callback to be executed after each task for all agents execution.",
|
description="Callback to be executed after each task for all agents execution.",
|
||||||
)
|
)
|
||||||
|
before_kickoff_callbacks: List[
|
||||||
|
Callable[[Optional[Dict[str, Any]]], Optional[Dict[str, Any]]]
|
||||||
|
] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="List of callbacks to be executed before crew kickoff. It may be used to adjust inputs before the crew is executed.",
|
||||||
|
)
|
||||||
|
after_kickoff_callbacks: List[Callable[[CrewOutput], CrewOutput]] = Field(
|
||||||
|
default_factory=list,
|
||||||
|
description="List of callbacks to be executed after crew kickoff. It may be used to adjust the output of the crew.",
|
||||||
|
)
|
||||||
max_rpm: Optional[int] = Field(
|
max_rpm: Optional[int] = Field(
|
||||||
default=None,
|
default=None,
|
||||||
description="Maximum number of requests per minute for the crew execution to be respected.",
|
description="Maximum number of requests per minute for the crew execution to be respected.",
|
||||||
@@ -182,6 +202,10 @@ class Crew(BaseModel):
|
|||||||
default=[],
|
default=[],
|
||||||
description="List of execution logs for tasks",
|
description="List of execution logs for tasks",
|
||||||
)
|
)
|
||||||
|
knowledge: Optional[Dict[str, Any]] = Field(
|
||||||
|
default=None, description="Knowledge for the crew. Add knowledge sources to the knowledge object."
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@field_validator("id", mode="before")
|
@field_validator("id", mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -238,13 +262,31 @@ class Crew(BaseModel):
|
|||||||
self._short_term_memory = (
|
self._short_term_memory = (
|
||||||
self.short_term_memory
|
self.short_term_memory
|
||||||
if self.short_term_memory
|
if self.short_term_memory
|
||||||
else ShortTermMemory(crew=self, embedder_config=self.embedder)
|
else ShortTermMemory(
|
||||||
|
crew=self,
|
||||||
|
embedder_config=self.embedder,
|
||||||
|
)
|
||||||
)
|
)
|
||||||
self._entity_memory = (
|
self._entity_memory = (
|
||||||
self.entity_memory
|
self.entity_memory
|
||||||
if self.entity_memory
|
if self.entity_memory
|
||||||
else EntityMemory(crew=self, embedder_config=self.embedder)
|
else EntityMemory(crew=self, embedder_config=self.embedder)
|
||||||
)
|
)
|
||||||
|
if hasattr(self, "memory_config") and self.memory_config is not None:
|
||||||
|
self._user_memory = (
|
||||||
|
self.user_memory if self.user_memory else UserMemory(crew=self)
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self._user_memory = None
|
||||||
|
return self
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def create_crew_knowledge(self) -> "Crew":
|
||||||
|
if self.knowledge:
|
||||||
|
try:
|
||||||
|
self.knowledge = Knowledge(**self.knowledge) if isinstance(self.knowledge, dict) else self.knowledge
|
||||||
|
except (TypeError, ValueError) as e:
|
||||||
|
raise ValueError(f"Invalid knowledge configuration: {str(e)}")
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
@@ -435,15 +477,17 @@ class Crew(BaseModel):
|
|||||||
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
|
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Trains the crew for a given number of iterations."""
|
"""Trains the crew for a given number of iterations."""
|
||||||
self._setup_for_training(filename)
|
train_crew = self.copy()
|
||||||
|
train_crew._setup_for_training(filename)
|
||||||
|
|
||||||
for n_iteration in range(n_iterations):
|
for n_iteration in range(n_iterations):
|
||||||
self._train_iteration = n_iteration
|
train_crew._train_iteration = n_iteration
|
||||||
self.kickoff(inputs=inputs)
|
train_crew.kickoff(inputs=inputs)
|
||||||
|
|
||||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||||
|
|
||||||
for agent in self.agents:
|
for agent in train_crew.agents:
|
||||||
|
if training_data.get(str(agent.id)):
|
||||||
result = TaskEvaluator(agent).evaluate_training_data(
|
result = TaskEvaluator(agent).evaluate_training_data(
|
||||||
training_data=training_data, agent_id=str(agent.id)
|
training_data=training_data, agent_id=str(agent.id)
|
||||||
)
|
)
|
||||||
@@ -456,6 +500,9 @@ class Crew(BaseModel):
|
|||||||
self,
|
self,
|
||||||
inputs: Optional[Dict[str, Any]] = None,
|
inputs: Optional[Dict[str, Any]] = None,
|
||||||
) -> CrewOutput:
|
) -> CrewOutput:
|
||||||
|
for before_callback in self.before_kickoff_callbacks:
|
||||||
|
inputs = before_callback(inputs)
|
||||||
|
|
||||||
"""Starts the crew to work on its assigned tasks."""
|
"""Starts the crew to work on its assigned tasks."""
|
||||||
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
||||||
self._task_output_handler.reset()
|
self._task_output_handler.reset()
|
||||||
@@ -498,6 +545,9 @@ class Crew(BaseModel):
|
|||||||
f"The process '{self.process}' is not implemented yet."
|
f"The process '{self.process}' is not implemented yet."
|
||||||
)
|
)
|
||||||
|
|
||||||
|
for after_callback in self.after_kickoff_callbacks:
|
||||||
|
result = after_callback(result)
|
||||||
|
|
||||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||||
|
|
||||||
self.usage_metrics = UsageMetrics()
|
self.usage_metrics = UsageMetrics()
|
||||||
@@ -774,7 +824,9 @@ class Crew(BaseModel):
|
|||||||
|
|
||||||
def _log_task_start(self, task: Task, role: str = "None"):
|
def _log_task_start(self, task: Task, role: str = "None"):
|
||||||
if self.output_log_file:
|
if self.output_log_file:
|
||||||
self._file_handler.log(task_name=task.name, task=task.description, agent=role, status="started")
|
self._file_handler.log(
|
||||||
|
task_name=task.name, task=task.description, agent=role, status="started"
|
||||||
|
)
|
||||||
|
|
||||||
def _update_manager_tools(self, task: Task):
|
def _update_manager_tools(self, task: Task):
|
||||||
if self.manager_agent:
|
if self.manager_agent:
|
||||||
@@ -796,7 +848,13 @@ class Crew(BaseModel):
|
|||||||
def _process_task_result(self, task: Task, output: TaskOutput) -> None:
|
def _process_task_result(self, task: Task, output: TaskOutput) -> None:
|
||||||
role = task.agent.role if task.agent is not None else "None"
|
role = task.agent.role if task.agent is not None else "None"
|
||||||
if self.output_log_file:
|
if self.output_log_file:
|
||||||
self._file_handler.log(task_name=task.name, task=task.description, agent=role, status="completed", output=output.raw)
|
self._file_handler.log(
|
||||||
|
task_name=task.name,
|
||||||
|
task=task.description,
|
||||||
|
agent=role,
|
||||||
|
status="completed",
|
||||||
|
output=output.raw,
|
||||||
|
)
|
||||||
|
|
||||||
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
||||||
if len(task_outputs) != 1:
|
if len(task_outputs) != 1:
|
||||||
@@ -979,17 +1037,19 @@ class Crew(BaseModel):
|
|||||||
inputs: Optional[Dict[str, Any]] = None,
|
inputs: Optional[Dict[str, Any]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||||
self._test_execution_span = self._telemetry.test_execution_span(
|
test_crew = self.copy()
|
||||||
self,
|
|
||||||
|
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||||
|
test_crew,
|
||||||
n_iterations,
|
n_iterations,
|
||||||
inputs,
|
inputs,
|
||||||
openai_model_name, # type: ignore[arg-type]
|
openai_model_name, # type: ignore[arg-type]
|
||||||
) # type: ignore[arg-type]
|
) # type: ignore[arg-type]
|
||||||
evaluator = CrewEvaluator(self, openai_model_name) # type: ignore[arg-type]
|
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
||||||
|
|
||||||
for i in range(1, n_iterations + 1):
|
for i in range(1, n_iterations + 1):
|
||||||
evaluator.set_iteration(i)
|
evaluator.set_iteration(i)
|
||||||
self.kickoff(inputs=inputs)
|
test_crew.kickoff(inputs=inputs)
|
||||||
|
|
||||||
evaluator.print_crew_evaluation_result()
|
evaluator.print_crew_evaluation_result()
|
||||||
|
|
||||||
|
|||||||
@@ -1,10 +1,20 @@
|
|||||||
# flow.py
|
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import inspect
|
import inspect
|
||||||
from typing import Any, Callable, Dict, Generic, List, Set, Type, TypeVar, Union
|
from typing import (
|
||||||
|
Any,
|
||||||
|
Callable,
|
||||||
|
Dict,
|
||||||
|
Generic,
|
||||||
|
List,
|
||||||
|
Optional,
|
||||||
|
Set,
|
||||||
|
Type,
|
||||||
|
TypeVar,
|
||||||
|
Union,
|
||||||
|
cast,
|
||||||
|
)
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel, ValidationError
|
||||||
|
|
||||||
from crewai.flow.flow_visualizer import plot_flow
|
from crewai.flow.flow_visualizer import plot_flow
|
||||||
from crewai.flow.utils import get_possible_return_constants
|
from crewai.flow.utils import get_possible_return_constants
|
||||||
@@ -120,6 +130,7 @@ class FlowMeta(type):
|
|||||||
methods = attr_value.__trigger_methods__
|
methods = attr_value.__trigger_methods__
|
||||||
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
||||||
listeners[attr_name] = (condition_type, methods)
|
listeners[attr_name] = (condition_type, methods)
|
||||||
|
|
||||||
elif hasattr(attr_value, "__is_router__"):
|
elif hasattr(attr_value, "__is_router__"):
|
||||||
routers[attr_value.__router_for__] = attr_name
|
routers[attr_value.__router_for__] = attr_name
|
||||||
possible_returns = get_possible_return_constants(attr_value)
|
possible_returns = get_possible_return_constants(attr_value)
|
||||||
@@ -159,7 +170,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
def __init__(self) -> None:
|
def __init__(self) -> None:
|
||||||
self._methods: Dict[str, Callable] = {}
|
self._methods: Dict[str, Callable] = {}
|
||||||
self._state: T = self._create_initial_state()
|
self._state: T = self._create_initial_state()
|
||||||
self._completed_methods: Set[str] = set()
|
self._method_execution_counts: Dict[str, int] = {}
|
||||||
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
||||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||||
|
|
||||||
@@ -190,7 +201,74 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
"""Returns the list of all outputs from executed methods."""
|
"""Returns the list of all outputs from executed methods."""
|
||||||
return self._method_outputs
|
return self._method_outputs
|
||||||
|
|
||||||
async def kickoff(self) -> Any:
|
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
||||||
|
"""
|
||||||
|
Initializes or updates the state with the provided inputs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
inputs: Dictionary of inputs to initialize or update the state.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If inputs do not match the structured state model.
|
||||||
|
TypeError: If state is neither a BaseModel instance nor a dictionary.
|
||||||
|
"""
|
||||||
|
if isinstance(self._state, BaseModel):
|
||||||
|
# Structured state management
|
||||||
|
try:
|
||||||
|
# Define a function to create the dynamic class
|
||||||
|
def create_model_with_extra_forbid(
|
||||||
|
base_model: Type[BaseModel],
|
||||||
|
) -> Type[BaseModel]:
|
||||||
|
class ModelWithExtraForbid(base_model): # type: ignore
|
||||||
|
model_config = base_model.model_config.copy()
|
||||||
|
model_config["extra"] = "forbid"
|
||||||
|
|
||||||
|
return ModelWithExtraForbid
|
||||||
|
|
||||||
|
# Create the dynamic class
|
||||||
|
ModelWithExtraForbid = create_model_with_extra_forbid(
|
||||||
|
self._state.__class__
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create a new instance using the combined state and inputs
|
||||||
|
self._state = cast(
|
||||||
|
T, ModelWithExtraForbid(**{**self._state.model_dump(), **inputs})
|
||||||
|
)
|
||||||
|
|
||||||
|
except ValidationError as e:
|
||||||
|
raise ValueError(f"Invalid inputs for structured state: {e}") from e
|
||||||
|
elif isinstance(self._state, dict):
|
||||||
|
# Unstructured state management
|
||||||
|
self._state.update(inputs)
|
||||||
|
else:
|
||||||
|
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
||||||
|
|
||||||
|
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||||
|
"""
|
||||||
|
Starts the execution of the flow synchronously.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
inputs: Optional dictionary of inputs to initialize or update the state.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The final output from the flow execution.
|
||||||
|
"""
|
||||||
|
if inputs is not None:
|
||||||
|
self._initialize_state(inputs)
|
||||||
|
return asyncio.run(self.kickoff_async())
|
||||||
|
|
||||||
|
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||||
|
"""
|
||||||
|
Starts the execution of the flow asynchronously.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
inputs: Optional dictionary of inputs to initialize or update the state.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The final output from the flow execution.
|
||||||
|
"""
|
||||||
|
if inputs is not None:
|
||||||
|
self._initialize_state(inputs)
|
||||||
if not self._start_methods:
|
if not self._start_methods:
|
||||||
raise ValueError("No start method defined")
|
raise ValueError("No start method defined")
|
||||||
|
|
||||||
@@ -213,17 +291,27 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
else:
|
else:
|
||||||
return None # Or raise an exception if no methods were executed
|
return None # Or raise an exception if no methods were executed
|
||||||
|
|
||||||
async def _execute_start_method(self, start_method: str) -> None:
|
async def _execute_start_method(self, start_method_name: str) -> None:
|
||||||
result = await self._execute_method(self._methods[start_method])
|
result = await self._execute_method(
|
||||||
await self._execute_listeners(start_method, result)
|
start_method_name, self._methods[start_method_name]
|
||||||
|
)
|
||||||
|
await self._execute_listeners(start_method_name, result)
|
||||||
|
|
||||||
async def _execute_method(self, method: Callable, *args: Any, **kwargs: Any) -> Any:
|
async def _execute_method(
|
||||||
|
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
|
||||||
|
) -> Any:
|
||||||
result = (
|
result = (
|
||||||
await method(*args, **kwargs)
|
await method(*args, **kwargs)
|
||||||
if asyncio.iscoroutinefunction(method)
|
if asyncio.iscoroutinefunction(method)
|
||||||
else method(*args, **kwargs)
|
else method(*args, **kwargs)
|
||||||
)
|
)
|
||||||
self._method_outputs.append(result) # Store the output
|
self._method_outputs.append(result) # Store the output
|
||||||
|
|
||||||
|
# Track method execution counts
|
||||||
|
self._method_execution_counts[method_name] = (
|
||||||
|
self._method_execution_counts.get(method_name, 0) + 1
|
||||||
|
)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
|
async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
|
||||||
@@ -231,32 +319,39 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
|
|
||||||
if trigger_method in self._routers:
|
if trigger_method in self._routers:
|
||||||
router_method = self._methods[self._routers[trigger_method]]
|
router_method = self._methods[self._routers[trigger_method]]
|
||||||
path = await self._execute_method(router_method)
|
path = await self._execute_method(
|
||||||
# Use the path as the new trigger method
|
self._routers[trigger_method], router_method
|
||||||
|
)
|
||||||
trigger_method = path
|
trigger_method = path
|
||||||
|
|
||||||
for listener, (condition_type, methods) in self._listeners.items():
|
for listener_name, (condition_type, methods) in self._listeners.items():
|
||||||
if condition_type == "OR":
|
if condition_type == "OR":
|
||||||
if trigger_method in methods:
|
if trigger_method in methods:
|
||||||
|
# Schedule the listener without preventing re-execution
|
||||||
listener_tasks.append(
|
listener_tasks.append(
|
||||||
self._execute_single_listener(listener, result)
|
self._execute_single_listener(listener_name, result)
|
||||||
)
|
)
|
||||||
elif condition_type == "AND":
|
elif condition_type == "AND":
|
||||||
if listener not in self._pending_and_listeners:
|
# Initialize pending methods for this listener if not already done
|
||||||
self._pending_and_listeners[listener] = set()
|
if listener_name not in self._pending_and_listeners:
|
||||||
self._pending_and_listeners[listener].add(trigger_method)
|
self._pending_and_listeners[listener_name] = set(methods)
|
||||||
if set(methods) == self._pending_and_listeners[listener]:
|
# Remove the trigger method from pending methods
|
||||||
|
self._pending_and_listeners[listener_name].discard(trigger_method)
|
||||||
|
if not self._pending_and_listeners[listener_name]:
|
||||||
|
# All required methods have been executed
|
||||||
listener_tasks.append(
|
listener_tasks.append(
|
||||||
self._execute_single_listener(listener, result)
|
self._execute_single_listener(listener_name, result)
|
||||||
)
|
)
|
||||||
del self._pending_and_listeners[listener]
|
# Reset pending methods for this listener
|
||||||
|
self._pending_and_listeners.pop(listener_name, None)
|
||||||
|
|
||||||
# Run all listener tasks concurrently and wait for them to complete
|
# Run all listener tasks concurrently and wait for them to complete
|
||||||
|
if listener_tasks:
|
||||||
await asyncio.gather(*listener_tasks)
|
await asyncio.gather(*listener_tasks)
|
||||||
|
|
||||||
async def _execute_single_listener(self, listener: str, result: Any) -> None:
|
async def _execute_single_listener(self, listener_name: str, result: Any) -> None:
|
||||||
try:
|
try:
|
||||||
method = self._methods[listener]
|
method = self._methods[listener_name]
|
||||||
sig = inspect.signature(method)
|
sig = inspect.signature(method)
|
||||||
params = list(sig.parameters.values())
|
params = list(sig.parameters.values())
|
||||||
|
|
||||||
@@ -265,15 +360,19 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
|||||||
|
|
||||||
if method_params:
|
if method_params:
|
||||||
# If listener expects parameters, pass the result
|
# If listener expects parameters, pass the result
|
||||||
listener_result = await self._execute_method(method, result)
|
listener_result = await self._execute_method(
|
||||||
|
listener_name, method, result
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
# If listener does not expect parameters, call without arguments
|
# If listener does not expect parameters, call without arguments
|
||||||
listener_result = await self._execute_method(method)
|
listener_result = await self._execute_method(listener_name, method)
|
||||||
|
|
||||||
# Execute listeners of this listener
|
# Execute listeners of this listener
|
||||||
await self._execute_listeners(listener, listener_result)
|
await self._execute_listeners(listener_name, listener_result)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"[Flow._execute_single_listener] Error in method {listener}: {e}")
|
print(
|
||||||
|
f"[Flow._execute_single_listener] Error in method {listener_name}: {e}"
|
||||||
|
)
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|||||||
55
src/crewai/knowledge/embedder/base_embedder.py
Normal file
55
src/crewai/knowledge/embedder/base_embedder.py
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
class BaseEmbedder(ABC):
|
||||||
|
"""
|
||||||
|
Abstract base class for text embedding models
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def embed_chunks(self, chunks: List[str]) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Generate embeddings for a list of text chunks
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chunks: List of text chunks to embed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Array of embeddings
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def embed_texts(self, texts: List[str]) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Generate embeddings for a list of texts
|
||||||
|
|
||||||
|
Args:
|
||||||
|
texts: List of texts to embed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Array of embeddings
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def embed_text(self, text: str) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Generate embedding for a single text
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text: Text to embed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Embedding array
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@property
|
||||||
|
@abstractmethod
|
||||||
|
def dimension(self) -> int:
|
||||||
|
"""Get the dimension of the embeddings"""
|
||||||
|
pass
|
||||||
93
src/crewai/knowledge/embedder/fastembed.py
Normal file
93
src/crewai/knowledge/embedder/fastembed.py
Normal file
@@ -0,0 +1,93 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Optional, Union
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .base_embedder import BaseEmbedder
|
||||||
|
|
||||||
|
try:
|
||||||
|
from fastembed_gpu import TextEmbedding # type: ignore
|
||||||
|
|
||||||
|
FASTEMBED_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from fastembed import TextEmbedding
|
||||||
|
|
||||||
|
FASTEMBED_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
FASTEMBED_AVAILABLE = False
|
||||||
|
|
||||||
|
|
||||||
|
class FastEmbed(BaseEmbedder):
|
||||||
|
"""
|
||||||
|
A wrapper class for text embedding models using FastEmbed
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
model_name: str = "BAAI/bge-small-en-v1.5",
|
||||||
|
cache_dir: Optional[Union[str, Path]] = None,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the embedding model
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_name: Name of the model to use
|
||||||
|
cache_dir: Directory to cache the model
|
||||||
|
gpu: Whether to use GPU acceleration
|
||||||
|
"""
|
||||||
|
if not FASTEMBED_AVAILABLE:
|
||||||
|
raise ImportError(
|
||||||
|
"FastEmbed is not installed. Please install it with: "
|
||||||
|
"uv pip install fastembed or uv pip install fastembed-gpu for GPU support"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.model = TextEmbedding(
|
||||||
|
model_name=model_name,
|
||||||
|
cache_dir=str(cache_dir) if cache_dir else None,
|
||||||
|
)
|
||||||
|
|
||||||
|
def embed_chunks(self, chunks: List[str]) -> List[np.ndarray]:
|
||||||
|
"""
|
||||||
|
Generate embeddings for a list of text chunks
|
||||||
|
|
||||||
|
Args:
|
||||||
|
chunks: List of text chunks to embed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of embeddings
|
||||||
|
"""
|
||||||
|
embeddings = list(self.model.embed(chunks))
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
def embed_texts(self, texts: List[str]) -> List[np.ndarray]:
|
||||||
|
"""
|
||||||
|
Generate embeddings for a list of texts
|
||||||
|
|
||||||
|
Args:
|
||||||
|
texts: List of texts to embed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of embeddings
|
||||||
|
"""
|
||||||
|
embeddings = list(self.model.embed(texts))
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
def embed_text(self, text: str) -> np.ndarray:
|
||||||
|
"""
|
||||||
|
Generate embedding for a single text
|
||||||
|
|
||||||
|
Args:
|
||||||
|
text: Text to embed
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Embedding array
|
||||||
|
"""
|
||||||
|
return self.embed_texts([text])[0]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def dimension(self) -> int:
|
||||||
|
"""Get the dimension of the embeddings"""
|
||||||
|
# Generate a test embedding to get dimensions
|
||||||
|
test_embed = self.embed_text("test")
|
||||||
|
return len(test_embed)
|
||||||
54
src/crewai/knowledge/knowledge.py
Normal file
54
src/crewai/knowledge/knowledge.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from typing import List, Optional, Dict, Any
|
||||||
|
from pydantic import BaseModel, ConfigDict, Field
|
||||||
|
|
||||||
|
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||||
|
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||||
|
from crewai.utilities.logger import Logger
|
||||||
|
from crewai.utilities.constants import DEFAULT_SCORE_THRESHOLD
|
||||||
|
os.environ["TOKENIZERS_PARALLELISM"] = "false" # removes logging from fastembed
|
||||||
|
|
||||||
|
|
||||||
|
class Knowledge(BaseModel):
|
||||||
|
"""
|
||||||
|
Knowledge is a collection of sources and setup for the vector store to save and query relevant context.
|
||||||
|
Args:
|
||||||
|
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||||
|
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||||
|
embedder_config: Optional[Dict[str, Any]] = None
|
||||||
|
"""
|
||||||
|
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||||
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
|
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||||
|
embedder_config: Optional[Dict[str, Any]] = None
|
||||||
|
|
||||||
|
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None, **data):
|
||||||
|
super().__init__(**data)
|
||||||
|
self.storage = KnowledgeStorage(embedder_config=embedder_config or None)
|
||||||
|
|
||||||
|
try:
|
||||||
|
for source in self.sources:
|
||||||
|
source.add()
|
||||||
|
except Exception as e:
|
||||||
|
Logger(verbose=True).log(
|
||||||
|
"warning",
|
||||||
|
f"Failed to init knowledge: {e}",
|
||||||
|
color="yellow",
|
||||||
|
)
|
||||||
|
|
||||||
|
def query(
|
||||||
|
self, query: List[str], limit: int = 3, preference: Optional[str] = None
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""
|
||||||
|
Query across all knowledge sources to find the most relevant information.
|
||||||
|
Returns the top_k most relevant chunks.
|
||||||
|
"""
|
||||||
|
|
||||||
|
results = self.storage.search(
|
||||||
|
query,
|
||||||
|
limit,
|
||||||
|
filter={"preference": preference} if preference else None,
|
||||||
|
score_threshold=DEFAULT_SCORE_THRESHOLD,
|
||||||
|
)
|
||||||
|
return results
|
||||||
0
src/crewai/knowledge/source/__init__.py
Normal file
0
src/crewai/knowledge/source/__init__.py
Normal file
36
src/crewai/knowledge/source/base_file_knowledge_source.py
Normal file
36
src/crewai/knowledge/source/base_file_knowledge_source.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
from pathlib import Path
|
||||||
|
from typing import Union, List
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||||
|
from typing import Dict, Any
|
||||||
|
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||||
|
|
||||||
|
|
||||||
|
class BaseFileKnowledgeSource(BaseKnowledgeSource):
|
||||||
|
"""Base class for knowledge sources that load content from files."""
|
||||||
|
|
||||||
|
file_path: Union[Path, List[Path]] = Field(...)
|
||||||
|
content: Dict[Path, str] = Field(init=False, default_factory=dict)
|
||||||
|
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||||
|
|
||||||
|
def model_post_init(self, _):
|
||||||
|
"""Post-initialization method to load content."""
|
||||||
|
self.content = self.load_content()
|
||||||
|
|
||||||
|
def load_content(self) -> Dict[Path, str]:
|
||||||
|
"""Load and preprocess file content. Should be overridden by subclasses."""
|
||||||
|
paths = [self.file_path] if isinstance(self.file_path, Path) else self.file_path
|
||||||
|
|
||||||
|
for path in paths:
|
||||||
|
if not path.exists():
|
||||||
|
raise FileNotFoundError(f"File not found: {path}")
|
||||||
|
if not path.is_file():
|
||||||
|
raise ValueError(f"Path is not a file: {path}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def save_documents(self, metadata: Dict[str, Any]):
|
||||||
|
"""Save the documents to the storage."""
|
||||||
|
chunk_metadatas = [metadata.copy() for _ in self.chunks]
|
||||||
|
self.storage.save(self.chunks, chunk_metadatas)
|
||||||
48
src/crewai/knowledge/source/base_knowledge_source.py
Normal file
48
src/crewai/knowledge/source/base_knowledge_source.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import List, Dict, Any
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from pydantic import BaseModel, ConfigDict, Field
|
||||||
|
|
||||||
|
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||||
|
|
||||||
|
|
||||||
|
class BaseKnowledgeSource(BaseModel, ABC):
|
||||||
|
"""Abstract base class for knowledge sources."""
|
||||||
|
|
||||||
|
chunk_size: int = 4000
|
||||||
|
chunk_overlap: int = 200
|
||||||
|
chunks: List[str] = Field(default_factory=list)
|
||||||
|
chunk_embeddings: List[np.ndarray] = Field(default_factory=list)
|
||||||
|
|
||||||
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||||
|
storage: KnowledgeStorage = Field(default_factory=KnowledgeStorage)
|
||||||
|
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def load_content(self) -> Dict[Any, str]:
|
||||||
|
"""Load and preprocess content from the source."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def add(self) -> None:
|
||||||
|
"""Process content, chunk it, compute embeddings, and save them."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_embeddings(self) -> List[np.ndarray]:
|
||||||
|
"""Return the list of embeddings for the chunks."""
|
||||||
|
return self.chunk_embeddings
|
||||||
|
|
||||||
|
def _chunk_text(self, text: str) -> List[str]:
|
||||||
|
"""Utility method to split text into chunks."""
|
||||||
|
return [
|
||||||
|
text[i : i + self.chunk_size]
|
||||||
|
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||||
|
]
|
||||||
|
|
||||||
|
def save_documents(self, metadata: Dict[str, Any]):
|
||||||
|
"""
|
||||||
|
Save the documents to the storage.
|
||||||
|
This method should be called after the chunks and embeddings are generated.
|
||||||
|
"""
|
||||||
|
self.storage.save(self.chunks, metadata)
|
||||||
44
src/crewai/knowledge/source/csv_knowledge_source.py
Normal file
44
src/crewai/knowledge/source/csv_knowledge_source.py
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
import csv
|
||||||
|
from typing import Dict, List
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||||
|
|
||||||
|
|
||||||
|
class CSVKnowledgeSource(BaseFileKnowledgeSource):
|
||||||
|
"""A knowledge source that stores and queries CSV file content using embeddings."""
|
||||||
|
|
||||||
|
def load_content(self) -> Dict[Path, str]:
|
||||||
|
"""Load and preprocess CSV file content."""
|
||||||
|
super().load_content() # Validate the file path
|
||||||
|
|
||||||
|
file_path = (
|
||||||
|
self.file_path[0] if isinstance(self.file_path, list) else self.file_path
|
||||||
|
)
|
||||||
|
file_path = Path(file_path) if isinstance(file_path, str) else file_path
|
||||||
|
|
||||||
|
with open(file_path, "r", encoding="utf-8") as csvfile:
|
||||||
|
reader = csv.reader(csvfile)
|
||||||
|
content = ""
|
||||||
|
for row in reader:
|
||||||
|
content += " ".join(row) + "\n"
|
||||||
|
return {file_path: content}
|
||||||
|
|
||||||
|
def add(self) -> None:
|
||||||
|
"""
|
||||||
|
Add CSV file content to the knowledge source, chunk it, compute embeddings,
|
||||||
|
and save the embeddings.
|
||||||
|
"""
|
||||||
|
content_str = (
|
||||||
|
str(self.content) if isinstance(self.content, dict) else self.content
|
||||||
|
)
|
||||||
|
new_chunks = self._chunk_text(content_str)
|
||||||
|
self.chunks.extend(new_chunks)
|
||||||
|
self.save_documents(metadata=self.metadata)
|
||||||
|
|
||||||
|
def _chunk_text(self, text: str) -> List[str]:
|
||||||
|
"""Utility method to split text into chunks."""
|
||||||
|
return [
|
||||||
|
text[i : i + self.chunk_size]
|
||||||
|
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||||
|
]
|
||||||
56
src/crewai/knowledge/source/excel_knowledge_source.py
Normal file
56
src/crewai/knowledge/source/excel_knowledge_source.py
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
from typing import Dict, List
|
||||||
|
from pathlib import Path
|
||||||
|
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||||
|
|
||||||
|
|
||||||
|
class ExcelKnowledgeSource(BaseFileKnowledgeSource):
|
||||||
|
"""A knowledge source that stores and queries Excel file content using embeddings."""
|
||||||
|
|
||||||
|
def load_content(self) -> Dict[Path, str]:
|
||||||
|
"""Load and preprocess Excel file content."""
|
||||||
|
super().load_content() # Validate the file path
|
||||||
|
pd = self._import_dependencies()
|
||||||
|
|
||||||
|
if isinstance(self.file_path, list):
|
||||||
|
file_path = self.file_path[0]
|
||||||
|
else:
|
||||||
|
file_path = self.file_path
|
||||||
|
|
||||||
|
df = pd.read_excel(file_path)
|
||||||
|
content = df.to_csv(index=False)
|
||||||
|
return {file_path: content}
|
||||||
|
|
||||||
|
def _import_dependencies(self):
|
||||||
|
"""Dynamically import dependencies."""
|
||||||
|
try:
|
||||||
|
import openpyxl # noqa
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
return pd
|
||||||
|
except ImportError as e:
|
||||||
|
missing_package = str(e).split()[-1]
|
||||||
|
raise ImportError(
|
||||||
|
f"{missing_package} is not installed. Please install it with: pip install {missing_package}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def add(self) -> None:
|
||||||
|
"""
|
||||||
|
Add Excel file content to the knowledge source, chunk it, compute embeddings,
|
||||||
|
and save the embeddings.
|
||||||
|
"""
|
||||||
|
# Convert dictionary values to a single string if content is a dictionary
|
||||||
|
if isinstance(self.content, dict):
|
||||||
|
content_str = "\n".join(str(value) for value in self.content.values())
|
||||||
|
else:
|
||||||
|
content_str = str(self.content)
|
||||||
|
|
||||||
|
new_chunks = self._chunk_text(content_str)
|
||||||
|
self.chunks.extend(new_chunks)
|
||||||
|
self.save_documents(metadata=self.metadata)
|
||||||
|
|
||||||
|
def _chunk_text(self, text: str) -> List[str]:
|
||||||
|
"""Utility method to split text into chunks."""
|
||||||
|
return [
|
||||||
|
text[i : i + self.chunk_size]
|
||||||
|
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||||
|
]
|
||||||
54
src/crewai/knowledge/source/json_knowledge_source.py
Normal file
54
src/crewai/knowledge/source/json_knowledge_source.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
import json
|
||||||
|
from typing import Any, Dict, List
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||||
|
|
||||||
|
|
||||||
|
class JSONKnowledgeSource(BaseFileKnowledgeSource):
|
||||||
|
"""A knowledge source that stores and queries JSON file content using embeddings."""
|
||||||
|
|
||||||
|
def load_content(self) -> Dict[Path, str]:
|
||||||
|
"""Load and preprocess JSON file content."""
|
||||||
|
super().load_content() # Validate the file path
|
||||||
|
paths = [self.file_path] if isinstance(self.file_path, Path) else self.file_path
|
||||||
|
|
||||||
|
content: Dict[Path, str] = {}
|
||||||
|
for path in paths:
|
||||||
|
with open(path, "r", encoding="utf-8") as json_file:
|
||||||
|
data = json.load(json_file)
|
||||||
|
content[path] = self._json_to_text(data)
|
||||||
|
return content
|
||||||
|
|
||||||
|
def _json_to_text(self, data: Any, level: int = 0) -> str:
|
||||||
|
"""Recursively convert JSON data to a text representation."""
|
||||||
|
text = ""
|
||||||
|
indent = " " * level
|
||||||
|
if isinstance(data, dict):
|
||||||
|
for key, value in data.items():
|
||||||
|
text += f"{indent}{key}: {self._json_to_text(value, level + 1)}\n"
|
||||||
|
elif isinstance(data, list):
|
||||||
|
for item in data:
|
||||||
|
text += f"{indent}- {self._json_to_text(item, level + 1)}\n"
|
||||||
|
else:
|
||||||
|
text += f"{str(data)}"
|
||||||
|
return text
|
||||||
|
|
||||||
|
def add(self) -> None:
|
||||||
|
"""
|
||||||
|
Add JSON file content to the knowledge source, chunk it, compute embeddings,
|
||||||
|
and save the embeddings.
|
||||||
|
"""
|
||||||
|
content_str = (
|
||||||
|
str(self.content) if isinstance(self.content, dict) else self.content
|
||||||
|
)
|
||||||
|
new_chunks = self._chunk_text(content_str)
|
||||||
|
self.chunks.extend(new_chunks)
|
||||||
|
self.save_documents(metadata=self.metadata)
|
||||||
|
|
||||||
|
def _chunk_text(self, text: str) -> List[str]:
|
||||||
|
"""Utility method to split text into chunks."""
|
||||||
|
return [
|
||||||
|
text[i : i + self.chunk_size]
|
||||||
|
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||||
|
]
|
||||||
54
src/crewai/knowledge/source/pdf_knowledge_source.py
Normal file
54
src/crewai/knowledge/source/pdf_knowledge_source.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
from typing import List, Dict
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||||
|
|
||||||
|
|
||||||
|
class PDFKnowledgeSource(BaseFileKnowledgeSource):
|
||||||
|
"""A knowledge source that stores and queries PDF file content using embeddings."""
|
||||||
|
|
||||||
|
def load_content(self) -> Dict[Path, str]:
|
||||||
|
"""Load and preprocess PDF file content."""
|
||||||
|
super().load_content() # Validate the file paths
|
||||||
|
pdfplumber = self._import_pdfplumber()
|
||||||
|
|
||||||
|
paths = [self.file_path] if isinstance(self.file_path, Path) else self.file_path
|
||||||
|
content = {}
|
||||||
|
|
||||||
|
for path in paths:
|
||||||
|
text = ""
|
||||||
|
with pdfplumber.open(path) as pdf:
|
||||||
|
for page in pdf.pages:
|
||||||
|
page_text = page.extract_text()
|
||||||
|
if page_text:
|
||||||
|
text += page_text + "\n"
|
||||||
|
content[path] = text
|
||||||
|
return content
|
||||||
|
|
||||||
|
def _import_pdfplumber(self):
|
||||||
|
"""Dynamically import pdfplumber."""
|
||||||
|
try:
|
||||||
|
import pdfplumber
|
||||||
|
|
||||||
|
return pdfplumber
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"pdfplumber is not installed. Please install it with: pip install pdfplumber"
|
||||||
|
)
|
||||||
|
|
||||||
|
def add(self) -> None:
|
||||||
|
"""
|
||||||
|
Add PDF file content to the knowledge source, chunk it, compute embeddings,
|
||||||
|
and save the embeddings.
|
||||||
|
"""
|
||||||
|
for _, text in self.content.items():
|
||||||
|
new_chunks = self._chunk_text(text)
|
||||||
|
self.chunks.extend(new_chunks)
|
||||||
|
self.save_documents(metadata=self.metadata)
|
||||||
|
|
||||||
|
def _chunk_text(self, text: str) -> List[str]:
|
||||||
|
"""Utility method to split text into chunks."""
|
||||||
|
return [
|
||||||
|
text[i : i + self.chunk_size]
|
||||||
|
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||||
|
]
|
||||||
33
src/crewai/knowledge/source/string_knowledge_source.py
Normal file
33
src/crewai/knowledge/source/string_knowledge_source.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
from typing import List
|
||||||
|
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||||
|
|
||||||
|
|
||||||
|
class StringKnowledgeSource(BaseKnowledgeSource):
|
||||||
|
"""A knowledge source that stores and queries plain text content using embeddings."""
|
||||||
|
|
||||||
|
content: str = Field(...)
|
||||||
|
|
||||||
|
def model_post_init(self, _):
|
||||||
|
"""Post-initialization method to validate content."""
|
||||||
|
self.load_content()
|
||||||
|
|
||||||
|
def load_content(self):
|
||||||
|
"""Validate string content."""
|
||||||
|
if not isinstance(self.content, str):
|
||||||
|
raise ValueError("StringKnowledgeSource only accepts string content")
|
||||||
|
|
||||||
|
def add(self) -> None:
|
||||||
|
"""Add string content to the knowledge source, chunk it, compute embeddings, and save them."""
|
||||||
|
new_chunks = self._chunk_text(self.content)
|
||||||
|
self.chunks.extend(new_chunks)
|
||||||
|
self.save_documents(metadata=self.metadata)
|
||||||
|
|
||||||
|
def _chunk_text(self, text: str) -> List[str]:
|
||||||
|
"""Utility method to split text into chunks."""
|
||||||
|
return [
|
||||||
|
text[i : i + self.chunk_size]
|
||||||
|
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||||
|
]
|
||||||
35
src/crewai/knowledge/source/text_file_knowledge_source.py
Normal file
35
src/crewai/knowledge/source/text_file_knowledge_source.py
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
from typing import Dict, List
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||||
|
|
||||||
|
|
||||||
|
class TextFileKnowledgeSource(BaseFileKnowledgeSource):
|
||||||
|
"""A knowledge source that stores and queries text file content using embeddings."""
|
||||||
|
|
||||||
|
def load_content(self) -> Dict[Path, str]:
|
||||||
|
"""Load and preprocess text file content."""
|
||||||
|
super().load_content()
|
||||||
|
paths = [self.file_path] if isinstance(self.file_path, Path) else self.file_path
|
||||||
|
content = {}
|
||||||
|
for path in paths:
|
||||||
|
with path.open("r", encoding="utf-8") as f:
|
||||||
|
content[path] = f.read() # type: ignore
|
||||||
|
return content
|
||||||
|
|
||||||
|
def add(self) -> None:
|
||||||
|
"""
|
||||||
|
Add text file content to the knowledge source, chunk it, compute embeddings,
|
||||||
|
and save the embeddings.
|
||||||
|
"""
|
||||||
|
for _, text in self.content.items():
|
||||||
|
new_chunks = self._chunk_text(text)
|
||||||
|
self.chunks.extend(new_chunks)
|
||||||
|
self.save_documents(metadata=self.metadata)
|
||||||
|
|
||||||
|
def _chunk_text(self, text: str) -> List[str]:
|
||||||
|
"""Utility method to split text into chunks."""
|
||||||
|
return [
|
||||||
|
text[i : i + self.chunk_size]
|
||||||
|
for i in range(0, len(text), self.chunk_size - self.chunk_overlap)
|
||||||
|
]
|
||||||
0
src/crewai/knowledge/storage/__init__.py
Normal file
0
src/crewai/knowledge/storage/__init__.py
Normal file
29
src/crewai/knowledge/storage/base_knowledge_storage.py
Normal file
29
src/crewai/knowledge/storage/base_knowledge_storage.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Dict, Any, List, Optional
|
||||||
|
|
||||||
|
|
||||||
|
class BaseKnowledgeStorage(ABC):
|
||||||
|
"""Abstract base class for knowledge storage implementations."""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def search(
|
||||||
|
self,
|
||||||
|
query: List[str],
|
||||||
|
limit: int = 3,
|
||||||
|
filter: Optional[dict] = None,
|
||||||
|
score_threshold: float = 0.35,
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
"""Search for documents in the knowledge base."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save(
|
||||||
|
self, documents: List[str], metadata: Dict[str, Any] | List[Dict[str, Any]]
|
||||||
|
) -> None:
|
||||||
|
"""Save documents to the knowledge base."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def reset(self) -> None:
|
||||||
|
"""Reset the knowledge base."""
|
||||||
|
pass
|
||||||
132
src/crewai/knowledge/storage/knowledge_storage.py
Normal file
132
src/crewai/knowledge/storage/knowledge_storage.py
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
import contextlib
|
||||||
|
import io
|
||||||
|
import logging
|
||||||
|
import chromadb
|
||||||
|
import os
|
||||||
|
from crewai.utilities.paths import db_storage_path
|
||||||
|
from typing import Optional, List
|
||||||
|
from typing import Dict, Any
|
||||||
|
from crewai.utilities import EmbeddingConfigurator
|
||||||
|
from crewai.knowledge.storage.base_knowledge_storage import BaseKnowledgeStorage
|
||||||
|
import hashlib
|
||||||
|
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def suppress_logging(
|
||||||
|
logger_name="chromadb.segment.impl.vector.local_persistent_hnsw",
|
||||||
|
level=logging.ERROR,
|
||||||
|
):
|
||||||
|
logger = logging.getLogger(logger_name)
|
||||||
|
original_level = logger.getEffectiveLevel()
|
||||||
|
logger.setLevel(level)
|
||||||
|
with (
|
||||||
|
contextlib.redirect_stdout(io.StringIO()),
|
||||||
|
contextlib.redirect_stderr(io.StringIO()),
|
||||||
|
contextlib.suppress(UserWarning),
|
||||||
|
):
|
||||||
|
yield
|
||||||
|
logger.setLevel(original_level)
|
||||||
|
|
||||||
|
|
||||||
|
class KnowledgeStorage(BaseKnowledgeStorage):
|
||||||
|
"""
|
||||||
|
Extends Storage to handle embeddings for memory entries, improving
|
||||||
|
search efficiency.
|
||||||
|
"""
|
||||||
|
|
||||||
|
collection: Optional[chromadb.Collection] = None
|
||||||
|
|
||||||
|
def __init__(self, embedder_config: Optional[Dict[str, Any]] = None):
|
||||||
|
self._initialize_app(embedder_config or {})
|
||||||
|
|
||||||
|
def search(
|
||||||
|
self,
|
||||||
|
query: List[str],
|
||||||
|
limit: int = 3,
|
||||||
|
filter: Optional[dict] = None,
|
||||||
|
score_threshold: float = 0.35,
|
||||||
|
) -> List[Dict[str, Any]]:
|
||||||
|
with suppress_logging():
|
||||||
|
if self.collection:
|
||||||
|
fetched = self.collection.query(
|
||||||
|
query_texts=query,
|
||||||
|
n_results=limit,
|
||||||
|
where=filter,
|
||||||
|
)
|
||||||
|
results = []
|
||||||
|
for i in range(len(fetched["ids"][0])): # type: ignore
|
||||||
|
result = {
|
||||||
|
"id": fetched["ids"][0][i], # type: ignore
|
||||||
|
"metadata": fetched["metadatas"][0][i], # type: ignore
|
||||||
|
"context": fetched["documents"][0][i], # type: ignore
|
||||||
|
"score": fetched["distances"][0][i], # type: ignore
|
||||||
|
}
|
||||||
|
if result["score"] >= score_threshold: # type: ignore
|
||||||
|
results.append(result)
|
||||||
|
return results
|
||||||
|
else:
|
||||||
|
raise Exception("Collection not initialized")
|
||||||
|
|
||||||
|
def _initialize_app(self, embedder_config: Optional[Dict[str, Any]] = None):
|
||||||
|
import chromadb
|
||||||
|
from chromadb.config import Settings
|
||||||
|
|
||||||
|
self._set_embedder_config(embedder_config)
|
||||||
|
|
||||||
|
chroma_client = chromadb.PersistentClient(
|
||||||
|
path=f"{db_storage_path()}/knowledge",
|
||||||
|
settings=Settings(allow_reset=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
self.app = chroma_client
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.collection = self.app.get_or_create_collection(name="knowledge")
|
||||||
|
except Exception:
|
||||||
|
raise Exception("Failed to create or get collection")
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
if self.app:
|
||||||
|
self.app.reset()
|
||||||
|
|
||||||
|
def save(
|
||||||
|
self, documents: List[str], metadata: Dict[str, Any] | List[Dict[str, Any]]
|
||||||
|
):
|
||||||
|
if self.collection:
|
||||||
|
metadatas = [metadata] if isinstance(metadata, dict) else metadata
|
||||||
|
|
||||||
|
ids = [
|
||||||
|
hashlib.sha256(doc.encode("utf-8")).hexdigest() for doc in documents
|
||||||
|
]
|
||||||
|
|
||||||
|
self.collection.upsert(
|
||||||
|
documents=documents,
|
||||||
|
metadatas=metadatas,
|
||||||
|
ids=ids,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise Exception("Collection not initialized")
|
||||||
|
|
||||||
|
def _create_default_embedding_function(self):
|
||||||
|
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||||
|
OpenAIEmbeddingFunction,
|
||||||
|
)
|
||||||
|
|
||||||
|
return OpenAIEmbeddingFunction(
|
||||||
|
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _set_embedder_config(
|
||||||
|
self, embedder_config: Optional[Dict[str, Any]] = None
|
||||||
|
) -> None:
|
||||||
|
"""Set the embedding configuration for the knowledge storage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
||||||
|
If None or empty, defaults to the default embedding function.
|
||||||
|
"""
|
||||||
|
self.embedder_config = (
|
||||||
|
EmbeddingConfigurator().configure_embedder(embedder_config)
|
||||||
|
if embedder_config
|
||||||
|
else self._create_default_embedding_function()
|
||||||
|
)
|
||||||
@@ -1,7 +1,10 @@
|
|||||||
|
import io
|
||||||
|
import logging
|
||||||
|
import sys
|
||||||
|
import warnings
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
import logging
|
|
||||||
import warnings
|
|
||||||
import litellm
|
import litellm
|
||||||
from litellm import get_supported_openai_params
|
from litellm import get_supported_openai_params
|
||||||
|
|
||||||
@@ -9,9 +12,6 @@ from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
|||||||
LLMContextLengthExceededException,
|
LLMContextLengthExceededException,
|
||||||
)
|
)
|
||||||
|
|
||||||
import sys
|
|
||||||
import io
|
|
||||||
|
|
||||||
|
|
||||||
class FilteredStream(io.StringIO):
|
class FilteredStream(io.StringIO):
|
||||||
def write(self, s):
|
def write(self, s):
|
||||||
@@ -118,12 +118,12 @@ class LLM:
|
|||||||
|
|
||||||
litellm.drop_params = True
|
litellm.drop_params = True
|
||||||
litellm.set_verbose = False
|
litellm.set_verbose = False
|
||||||
litellm.callbacks = callbacks
|
self.set_callbacks(callbacks)
|
||||||
|
|
||||||
def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = []) -> str:
|
def call(self, messages: List[Dict[str, str]], callbacks: List[Any] = []) -> str:
|
||||||
with suppress_warnings():
|
with suppress_warnings():
|
||||||
if callbacks and len(callbacks) > 0:
|
if callbacks and len(callbacks) > 0:
|
||||||
litellm.callbacks = callbacks
|
self.set_callbacks(callbacks)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
params = {
|
params = {
|
||||||
@@ -181,3 +181,15 @@ class LLM:
|
|||||||
def get_context_window_size(self) -> int:
|
def get_context_window_size(self) -> int:
|
||||||
# Only using 75% of the context window size to avoid cutting the message in the middle
|
# Only using 75% of the context window size to avoid cutting the message in the middle
|
||||||
return int(LLM_CONTEXT_WINDOW_SIZES.get(self.model, 8192) * 0.75)
|
return int(LLM_CONTEXT_WINDOW_SIZES.get(self.model, 8192) * 0.75)
|
||||||
|
|
||||||
|
def set_callbacks(self, callbacks: List[Any]):
|
||||||
|
callback_types = [type(callback) for callback in callbacks]
|
||||||
|
for callback in litellm.success_callback[:]:
|
||||||
|
if type(callback) in callback_types:
|
||||||
|
litellm.success_callback.remove(callback)
|
||||||
|
|
||||||
|
for callback in litellm._async_success_callback[:]:
|
||||||
|
if type(callback) in callback_types:
|
||||||
|
litellm._async_success_callback.remove(callback)
|
||||||
|
|
||||||
|
litellm.callbacks = callbacks
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
from .entity.entity_memory import EntityMemory
|
from .entity.entity_memory import EntityMemory
|
||||||
from .long_term.long_term_memory import LongTermMemory
|
from .long_term.long_term_memory import LongTermMemory
|
||||||
from .short_term.short_term_memory import ShortTermMemory
|
from .short_term.short_term_memory import ShortTermMemory
|
||||||
|
from .user.user_memory import UserMemory
|
||||||
|
|
||||||
__all__ = ["EntityMemory", "LongTermMemory", "ShortTermMemory"]
|
__all__ = ["UserMemory", "EntityMemory", "LongTermMemory", "ShortTermMemory"]
|
||||||
|
|||||||
@@ -1,13 +1,25 @@
|
|||||||
from typing import Optional
|
from typing import Optional, Dict, Any
|
||||||
|
|
||||||
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory
|
from crewai.memory import EntityMemory, LongTermMemory, ShortTermMemory, UserMemory
|
||||||
|
|
||||||
|
|
||||||
class ContextualMemory:
|
class ContextualMemory:
|
||||||
def __init__(self, stm: ShortTermMemory, ltm: LongTermMemory, em: EntityMemory):
|
def __init__(
|
||||||
|
self,
|
||||||
|
memory_config: Optional[Dict[str, Any]],
|
||||||
|
stm: ShortTermMemory,
|
||||||
|
ltm: LongTermMemory,
|
||||||
|
em: EntityMemory,
|
||||||
|
um: UserMemory,
|
||||||
|
):
|
||||||
|
if memory_config is not None:
|
||||||
|
self.memory_provider = memory_config.get("provider")
|
||||||
|
else:
|
||||||
|
self.memory_provider = None
|
||||||
self.stm = stm
|
self.stm = stm
|
||||||
self.ltm = ltm
|
self.ltm = ltm
|
||||||
self.em = em
|
self.em = em
|
||||||
|
self.um = um
|
||||||
|
|
||||||
def build_context_for_task(self, task, context) -> str:
|
def build_context_for_task(self, task, context) -> str:
|
||||||
"""
|
"""
|
||||||
@@ -23,6 +35,8 @@ class ContextualMemory:
|
|||||||
context.append(self._fetch_ltm_context(task.description))
|
context.append(self._fetch_ltm_context(task.description))
|
||||||
context.append(self._fetch_stm_context(query))
|
context.append(self._fetch_stm_context(query))
|
||||||
context.append(self._fetch_entity_context(query))
|
context.append(self._fetch_entity_context(query))
|
||||||
|
if self.memory_provider == "mem0":
|
||||||
|
context.append(self._fetch_user_context(query))
|
||||||
return "\n".join(filter(None, context))
|
return "\n".join(filter(None, context))
|
||||||
|
|
||||||
def _fetch_stm_context(self, query) -> str:
|
def _fetch_stm_context(self, query) -> str:
|
||||||
@@ -31,7 +45,12 @@ class ContextualMemory:
|
|||||||
formatted as bullet points.
|
formatted as bullet points.
|
||||||
"""
|
"""
|
||||||
stm_results = self.stm.search(query)
|
stm_results = self.stm.search(query)
|
||||||
formatted_results = "\n".join([f"- {result}" for result in stm_results])
|
formatted_results = "\n".join(
|
||||||
|
[
|
||||||
|
f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
|
||||||
|
for result in stm_results
|
||||||
|
]
|
||||||
|
)
|
||||||
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
return f"Recent Insights:\n{formatted_results}" if stm_results else ""
|
||||||
|
|
||||||
def _fetch_ltm_context(self, task) -> Optional[str]:
|
def _fetch_ltm_context(self, task) -> Optional[str]:
|
||||||
@@ -60,6 +79,26 @@ class ContextualMemory:
|
|||||||
"""
|
"""
|
||||||
em_results = self.em.search(query)
|
em_results = self.em.search(query)
|
||||||
formatted_results = "\n".join(
|
formatted_results = "\n".join(
|
||||||
[f"- {result['context']}" for result in em_results] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
|
[
|
||||||
|
f"- {result['memory'] if self.memory_provider == 'mem0' else result['context']}"
|
||||||
|
for result in em_results
|
||||||
|
] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
|
||||||
)
|
)
|
||||||
return f"Entities:\n{formatted_results}" if em_results else ""
|
return f"Entities:\n{formatted_results}" if em_results else ""
|
||||||
|
|
||||||
|
def _fetch_user_context(self, query: str) -> str:
|
||||||
|
"""
|
||||||
|
Fetches and formats relevant user information from User Memory.
|
||||||
|
Args:
|
||||||
|
query (str): The search query to find relevant user memories.
|
||||||
|
Returns:
|
||||||
|
str: Formatted user memories as bullet points, or an empty string if none found.
|
||||||
|
"""
|
||||||
|
user_memories = self.um.search(query)
|
||||||
|
if not user_memories:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
formatted_memories = "\n".join(
|
||||||
|
f"- {result['memory']}" for result in user_memories
|
||||||
|
)
|
||||||
|
return f"User memories/preferences:\n{formatted_memories}"
|
||||||
|
|||||||
@@ -11,12 +11,26 @@ class EntityMemory(Memory):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, crew=None, embedder_config=None, storage=None):
|
def __init__(self, crew=None, embedder_config=None, storage=None):
|
||||||
|
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||||
|
self.memory_provider = crew.memory_config.get("provider")
|
||||||
|
else:
|
||||||
|
self.memory_provider = None
|
||||||
|
|
||||||
|
if self.memory_provider == "mem0":
|
||||||
|
try:
|
||||||
|
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||||
|
)
|
||||||
|
storage = Mem0Storage(type="entities", crew=crew)
|
||||||
|
else:
|
||||||
storage = (
|
storage = (
|
||||||
storage
|
storage
|
||||||
if storage
|
if storage
|
||||||
else RAGStorage(
|
else RAGStorage(
|
||||||
type="entities",
|
type="entities",
|
||||||
allow_reset=False,
|
allow_reset=True,
|
||||||
embedder_config=embedder_config,
|
embedder_config=embedder_config,
|
||||||
crew=crew,
|
crew=crew,
|
||||||
)
|
)
|
||||||
@@ -25,6 +39,14 @@ class EntityMemory(Memory):
|
|||||||
|
|
||||||
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||||
"""Saves an entity item into the SQLite storage."""
|
"""Saves an entity item into the SQLite storage."""
|
||||||
|
if self.memory_provider == "mem0":
|
||||||
|
data = f"""
|
||||||
|
Remember details about the following entity:
|
||||||
|
Name: {item.name}
|
||||||
|
Type: {item.type}
|
||||||
|
Entity Description: {item.description}
|
||||||
|
"""
|
||||||
|
else:
|
||||||
data = f"{item.name}({item.type}): {item.description}"
|
data = f"{item.name}({item.type}): {item.description}"
|
||||||
super().save(data, item.metadata)
|
super().save(data, item.metadata)
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
from typing import Any, Dict
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
|
||||||
from crewai.memory.memory import Memory
|
from crewai.memory.memory import Memory
|
||||||
@@ -28,7 +28,7 @@ class LongTermMemory(Memory):
|
|||||||
datetime=item.datetime,
|
datetime=item.datetime,
|
||||||
)
|
)
|
||||||
|
|
||||||
def search(self, task: str, latest_n: int = 3) -> Dict[str, Any]:
|
def search(self, task: str, latest_n: int = 3) -> List[Dict[str, Any]]: # type: ignore # signature of "search" incompatible with supertype "Memory"
|
||||||
return self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load"
|
return self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load"
|
||||||
|
|
||||||
def reset(self) -> None:
|
def reset(self) -> None:
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
from typing import Any, Dict, Optional
|
from typing import Any, Dict, Optional, List
|
||||||
|
|
||||||
from crewai.memory.storage.interface import Storage
|
from crewai.memory.storage.rag_storage import RAGStorage
|
||||||
|
|
||||||
|
|
||||||
class Memory:
|
class Memory:
|
||||||
@@ -8,7 +8,7 @@ class Memory:
|
|||||||
Base class for memory, now supporting agent tags and generic metadata.
|
Base class for memory, now supporting agent tags and generic metadata.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, storage: Storage):
|
def __init__(self, storage: RAGStorage):
|
||||||
self.storage = storage
|
self.storage = storage
|
||||||
|
|
||||||
def save(
|
def save(
|
||||||
@@ -23,5 +23,12 @@ class Memory:
|
|||||||
|
|
||||||
self.storage.save(value, metadata)
|
self.storage.save(value, metadata)
|
||||||
|
|
||||||
def search(self, query: str) -> Dict[str, Any]:
|
def search(
|
||||||
return self.storage.search(query)
|
self,
|
||||||
|
query: str,
|
||||||
|
limit: int = 3,
|
||||||
|
score_threshold: float = 0.35,
|
||||||
|
) -> List[Any]:
|
||||||
|
return self.storage.search(
|
||||||
|
query=query, limit=limit, score_threshold=score_threshold
|
||||||
|
)
|
||||||
|
|||||||
@@ -14,6 +14,20 @@ class ShortTermMemory(Memory):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, crew=None, embedder_config=None, storage=None):
|
def __init__(self, crew=None, embedder_config=None, storage=None):
|
||||||
|
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||||
|
self.memory_provider = crew.memory_config.get("provider")
|
||||||
|
else:
|
||||||
|
self.memory_provider = None
|
||||||
|
|
||||||
|
if self.memory_provider == "mem0":
|
||||||
|
try:
|
||||||
|
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||||
|
)
|
||||||
|
storage = Mem0Storage(type="short_term", crew=crew)
|
||||||
|
else:
|
||||||
storage = (
|
storage = (
|
||||||
storage
|
storage
|
||||||
if storage
|
if storage
|
||||||
@@ -30,11 +44,20 @@ class ShortTermMemory(Memory):
|
|||||||
agent: Optional[str] = None,
|
agent: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
||||||
|
if self.memory_provider == "mem0":
|
||||||
|
item.data = f"Remember the following insights from Agent run: {item.data}"
|
||||||
|
|
||||||
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
||||||
|
|
||||||
def search(self, query: str, score_threshold: float = 0.35):
|
def search(
|
||||||
return self.storage.search(query=query, score_threshold=score_threshold) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters
|
self,
|
||||||
|
query: str,
|
||||||
|
limit: int = 3,
|
||||||
|
score_threshold: float = 0.35,
|
||||||
|
):
|
||||||
|
return self.storage.search(
|
||||||
|
query=query, limit=limit, score_threshold=score_threshold
|
||||||
|
) # type: ignore # BUG? The reference is to the parent class, but the parent class does not have this parameters
|
||||||
|
|
||||||
def reset(self) -> None:
|
def reset(self) -> None:
|
||||||
try:
|
try:
|
||||||
|
|||||||
76
src/crewai/memory/storage/base_rag_storage.py
Normal file
76
src/crewai/memory/storage/base_rag_storage.py
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from typing import Any, Dict, List, Optional
|
||||||
|
|
||||||
|
|
||||||
|
class BaseRAGStorage(ABC):
|
||||||
|
"""
|
||||||
|
Base class for RAG-based Storage implementations.
|
||||||
|
"""
|
||||||
|
|
||||||
|
app: Any | None = None
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
type: str,
|
||||||
|
allow_reset: bool = True,
|
||||||
|
embedder_config: Optional[Any] = None,
|
||||||
|
crew: Any = None,
|
||||||
|
):
|
||||||
|
self.type = type
|
||||||
|
self.allow_reset = allow_reset
|
||||||
|
self.embedder_config = embedder_config
|
||||||
|
self.crew = crew
|
||||||
|
self.agents = self._initialize_agents()
|
||||||
|
|
||||||
|
def _initialize_agents(self) -> str:
|
||||||
|
if self.crew:
|
||||||
|
return "_".join(
|
||||||
|
[self._sanitize_role(agent.role) for agent in self.crew.agents]
|
||||||
|
)
|
||||||
|
return ""
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _sanitize_role(self, role: str) -> str:
|
||||||
|
"""Sanitizes agent roles to ensure valid directory names."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||||
|
"""Save a value with metadata to the storage."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
limit: int = 3,
|
||||||
|
filter: Optional[dict] = None,
|
||||||
|
score_threshold: float = 0.35,
|
||||||
|
) -> List[Any]:
|
||||||
|
"""Search for entries in the storage."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def reset(self) -> None:
|
||||||
|
"""Reset the storage."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _generate_embedding(
|
||||||
|
self, text: str, metadata: Optional[Dict[str, Any]] = None
|
||||||
|
) -> Any:
|
||||||
|
"""Generate an embedding for the given text and metadata."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@abstractmethod
|
||||||
|
def _initialize_app(self):
|
||||||
|
"""Initialize the vector db."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def setup_config(self, config: Dict[str, Any]):
|
||||||
|
"""Setup the config of the storage."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def initialize_client(self):
|
||||||
|
"""Initialize the client of the storage. This should setup the app and the db collection"""
|
||||||
|
pass
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
from typing import Any, Dict
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
|
|
||||||
class Storage:
|
class Storage:
|
||||||
@@ -7,8 +7,10 @@ class Storage:
|
|||||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def search(self, key: str) -> Dict[str, Any]: # type: ignore
|
def search(
|
||||||
pass
|
self, query: str, limit: int, score_threshold: float
|
||||||
|
) -> Dict[str, Any] | List[Any]:
|
||||||
|
return {}
|
||||||
|
|
||||||
def reset(self) -> None:
|
def reset(self) -> None:
|
||||||
pass
|
pass
|
||||||
|
|||||||
@@ -70,7 +70,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
task.expected_output,
|
task.expected_output,
|
||||||
json.dumps(output, cls=CrewJSONEncoder),
|
json.dumps(output, cls=CrewJSONEncoder),
|
||||||
task_index,
|
task_index,
|
||||||
json.dumps(inputs),
|
json.dumps(inputs, cls=CrewJSONEncoder),
|
||||||
was_replayed,
|
was_replayed,
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
@@ -103,7 +103,7 @@ class KickoffTaskOutputsSQLiteStorage:
|
|||||||
else value
|
else value
|
||||||
)
|
)
|
||||||
|
|
||||||
query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?"
|
query = f"UPDATE latest_kickoff_task_outputs SET {', '.join(fields)} WHERE task_index = ?" # nosec
|
||||||
values.append(task_index)
|
values.append(task_index)
|
||||||
|
|
||||||
cursor.execute(query, tuple(values))
|
cursor.execute(query, tuple(values))
|
||||||
|
|||||||
@@ -83,7 +83,7 @@ class LTMSQLiteStorage:
|
|||||||
WHERE task_description = ?
|
WHERE task_description = ?
|
||||||
ORDER BY datetime DESC, score ASC
|
ORDER BY datetime DESC, score ASC
|
||||||
LIMIT {latest_n}
|
LIMIT {latest_n}
|
||||||
""",
|
""", # nosec
|
||||||
(task_description,),
|
(task_description,),
|
||||||
)
|
)
|
||||||
rows = cursor.fetchall()
|
rows = cursor.fetchall()
|
||||||
|
|||||||
104
src/crewai/memory/storage/mem0_storage.py
Normal file
104
src/crewai/memory/storage/mem0_storage.py
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
import os
|
||||||
|
from typing import Any, Dict, List
|
||||||
|
|
||||||
|
from mem0 import MemoryClient
|
||||||
|
from crewai.memory.storage.interface import Storage
|
||||||
|
|
||||||
|
|
||||||
|
class Mem0Storage(Storage):
|
||||||
|
"""
|
||||||
|
Extends Storage to handle embedding and searching across entities using Mem0.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, type, crew=None):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
if type not in ["user", "short_term", "long_term", "entities"]:
|
||||||
|
raise ValueError("Invalid type for Mem0Storage. Must be 'user' or 'agent'.")
|
||||||
|
|
||||||
|
self.memory_type = type
|
||||||
|
self.crew = crew
|
||||||
|
self.memory_config = crew.memory_config
|
||||||
|
|
||||||
|
# User ID is required for user memory type "user" since it's used as a unique identifier for the user.
|
||||||
|
user_id = self._get_user_id()
|
||||||
|
if type == "user" and not user_id:
|
||||||
|
raise ValueError("User ID is required for user memory type")
|
||||||
|
|
||||||
|
# API key in memory config overrides the environment variable
|
||||||
|
mem0_api_key = self.memory_config.get("config", {}).get("api_key") or os.getenv(
|
||||||
|
"MEM0_API_KEY"
|
||||||
|
)
|
||||||
|
self.memory = MemoryClient(api_key=mem0_api_key)
|
||||||
|
|
||||||
|
def _sanitize_role(self, role: str) -> str:
|
||||||
|
"""
|
||||||
|
Sanitizes agent roles to ensure valid directory names.
|
||||||
|
"""
|
||||||
|
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||||
|
|
||||||
|
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||||
|
user_id = self._get_user_id()
|
||||||
|
agent_name = self._get_agent_name()
|
||||||
|
if self.memory_type == "user":
|
||||||
|
self.memory.add(value, user_id=user_id, metadata={**metadata})
|
||||||
|
elif self.memory_type == "short_term":
|
||||||
|
agent_name = self._get_agent_name()
|
||||||
|
self.memory.add(
|
||||||
|
value, agent_id=agent_name, metadata={"type": "short_term", **metadata}
|
||||||
|
)
|
||||||
|
elif self.memory_type == "long_term":
|
||||||
|
agent_name = self._get_agent_name()
|
||||||
|
self.memory.add(
|
||||||
|
value,
|
||||||
|
agent_id=agent_name,
|
||||||
|
infer=False,
|
||||||
|
metadata={"type": "long_term", **metadata},
|
||||||
|
)
|
||||||
|
elif self.memory_type == "entities":
|
||||||
|
entity_name = None
|
||||||
|
self.memory.add(
|
||||||
|
value, user_id=entity_name, metadata={"type": "entity", **metadata}
|
||||||
|
)
|
||||||
|
|
||||||
|
def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
limit: int = 3,
|
||||||
|
score_threshold: float = 0.35,
|
||||||
|
) -> List[Any]:
|
||||||
|
params = {"query": query, "limit": limit}
|
||||||
|
if self.memory_type == "user":
|
||||||
|
user_id = self._get_user_id()
|
||||||
|
params["user_id"] = user_id
|
||||||
|
elif self.memory_type == "short_term":
|
||||||
|
agent_name = self._get_agent_name()
|
||||||
|
params["agent_id"] = agent_name
|
||||||
|
params["metadata"] = {"type": "short_term"}
|
||||||
|
elif self.memory_type == "long_term":
|
||||||
|
agent_name = self._get_agent_name()
|
||||||
|
params["agent_id"] = agent_name
|
||||||
|
params["metadata"] = {"type": "long_term"}
|
||||||
|
elif self.memory_type == "entities":
|
||||||
|
agent_name = self._get_agent_name()
|
||||||
|
params["agent_id"] = agent_name
|
||||||
|
params["metadata"] = {"type": "entity"}
|
||||||
|
|
||||||
|
# Discard the filters for now since we create the filters
|
||||||
|
# automatically when the crew is created.
|
||||||
|
results = self.memory.search(**params)
|
||||||
|
return [r for r in results if r["score"] >= score_threshold]
|
||||||
|
|
||||||
|
def _get_user_id(self):
|
||||||
|
if self.memory_type == "user":
|
||||||
|
if hasattr(self, "memory_config") and self.memory_config is not None:
|
||||||
|
return self.memory_config.get("config", {}).get("user_id")
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_agent_name(self):
|
||||||
|
agents = self.crew.agents if self.crew else []
|
||||||
|
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||||
|
agents = "_".join(agents)
|
||||||
|
return agents
|
||||||
@@ -3,10 +3,13 @@ import io
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import shutil
|
import shutil
|
||||||
from typing import Any, Dict, List, Optional
|
import uuid
|
||||||
|
|
||||||
from crewai.memory.storage.interface import Storage
|
from typing import Any, Dict, List, Optional
|
||||||
|
from chromadb.api import ClientAPI
|
||||||
|
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
|
||||||
from crewai.utilities.paths import db_storage_path
|
from crewai.utilities.paths import db_storage_path
|
||||||
|
from crewai.utilities import EmbeddingConfigurator
|
||||||
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
@contextlib.contextmanager
|
||||||
@@ -17,68 +20,59 @@ def suppress_logging(
|
|||||||
logger = logging.getLogger(logger_name)
|
logger = logging.getLogger(logger_name)
|
||||||
original_level = logger.getEffectiveLevel()
|
original_level = logger.getEffectiveLevel()
|
||||||
logger.setLevel(level)
|
logger.setLevel(level)
|
||||||
with contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr(
|
with (
|
||||||
io.StringIO()
|
contextlib.redirect_stdout(io.StringIO()),
|
||||||
), contextlib.suppress(UserWarning):
|
contextlib.redirect_stderr(io.StringIO()),
|
||||||
|
contextlib.suppress(UserWarning),
|
||||||
|
):
|
||||||
yield
|
yield
|
||||||
logger.setLevel(original_level)
|
logger.setLevel(original_level)
|
||||||
|
|
||||||
|
|
||||||
class RAGStorage(Storage):
|
class RAGStorage(BaseRAGStorage):
|
||||||
"""
|
"""
|
||||||
Extends Storage to handle embeddings for memory entries, improving
|
Extends Storage to handle embeddings for memory entries, improving
|
||||||
search efficiency.
|
search efficiency.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, type, allow_reset=True, embedder_config=None, crew=None):
|
app: ClientAPI | None = None
|
||||||
super().__init__()
|
|
||||||
if (
|
|
||||||
not os.getenv("OPENAI_API_KEY")
|
|
||||||
and not os.getenv("OPENAI_BASE_URL") == "https://api.openai.com/v1"
|
|
||||||
):
|
|
||||||
os.environ["OPENAI_API_KEY"] = "fake"
|
|
||||||
|
|
||||||
|
def __init__(self, type, allow_reset=True, embedder_config=None, crew=None):
|
||||||
|
super().__init__(type, allow_reset, embedder_config, crew)
|
||||||
agents = crew.agents if crew else []
|
agents = crew.agents if crew else []
|
||||||
agents = [self._sanitize_role(agent.role) for agent in agents]
|
agents = [self._sanitize_role(agent.role) for agent in agents]
|
||||||
agents = "_".join(agents)
|
agents = "_".join(agents)
|
||||||
|
self.agents = agents
|
||||||
|
|
||||||
config = {
|
|
||||||
"app": {
|
|
||||||
"config": {"name": type, "collect_metrics": False, "log_level": "ERROR"}
|
|
||||||
},
|
|
||||||
"chunker": {
|
|
||||||
"chunk_size": 5000,
|
|
||||||
"chunk_overlap": 100,
|
|
||||||
"length_function": "len",
|
|
||||||
"min_chunk_size": 150,
|
|
||||||
},
|
|
||||||
"vectordb": {
|
|
||||||
"provider": "chroma",
|
|
||||||
"config": {
|
|
||||||
"collection_name": type,
|
|
||||||
"dir": f"{db_storage_path()}/{type}/{agents}",
|
|
||||||
"allow_reset": allow_reset,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if embedder_config:
|
|
||||||
config["embedder"] = embedder_config
|
|
||||||
self.type = type
|
self.type = type
|
||||||
self.config = config
|
|
||||||
self.allow_reset = allow_reset
|
self.allow_reset = allow_reset
|
||||||
|
self._initialize_app()
|
||||||
|
|
||||||
|
def _set_embedder_config(self):
|
||||||
|
configurator = EmbeddingConfigurator()
|
||||||
|
self.embedder_config = configurator.configure_embedder(self.embedder_config)
|
||||||
|
|
||||||
def _initialize_app(self):
|
def _initialize_app(self):
|
||||||
from embedchain import App
|
import chromadb
|
||||||
from embedchain.llm.base import BaseLlm
|
from chromadb.config import Settings
|
||||||
|
|
||||||
class FakeLLM(BaseLlm):
|
self._set_embedder_config()
|
||||||
pass
|
chroma_client = chromadb.PersistentClient(
|
||||||
|
path=f"{db_storage_path()}/{self.type}/{self.agents}",
|
||||||
|
settings=Settings(allow_reset=self.allow_reset),
|
||||||
|
)
|
||||||
|
|
||||||
self.app = App.from_config(config=self.config)
|
self.app = chroma_client
|
||||||
self.app.llm = FakeLLM()
|
|
||||||
if self.allow_reset:
|
try:
|
||||||
self.app.reset()
|
self.collection = self.app.get_collection(
|
||||||
|
name=self.type, embedding_function=self.embedder_config
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
self.collection = self.app.create_collection(
|
||||||
|
name=self.type, embedding_function=self.embedder_config
|
||||||
|
)
|
||||||
|
|
||||||
def _sanitize_role(self, role: str) -> str:
|
def _sanitize_role(self, role: str) -> str:
|
||||||
"""
|
"""
|
||||||
@@ -87,11 +81,14 @@ class RAGStorage(Storage):
|
|||||||
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
return role.replace("\n", "").replace(" ", "_").replace("/", "_")
|
||||||
|
|
||||||
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
|
||||||
if not hasattr(self, "app"):
|
if not hasattr(self, "app") or not hasattr(self, "collection"):
|
||||||
self._initialize_app()
|
self._initialize_app()
|
||||||
|
try:
|
||||||
self._generate_embedding(value, metadata)
|
self._generate_embedding(value, metadata)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error during {self.type} save: {str(e)}")
|
||||||
|
|
||||||
def search( # type: ignore # BUG?: Signature of "search" incompatible with supertype "Storage"
|
def search(
|
||||||
self,
|
self,
|
||||||
query: str,
|
query: str,
|
||||||
limit: int = 3,
|
limit: int = 3,
|
||||||
@@ -100,31 +97,56 @@ class RAGStorage(Storage):
|
|||||||
) -> List[Any]:
|
) -> List[Any]:
|
||||||
if not hasattr(self, "app"):
|
if not hasattr(self, "app"):
|
||||||
self._initialize_app()
|
self._initialize_app()
|
||||||
from embedchain.vectordb.chroma import InvalidDimensionException
|
|
||||||
|
|
||||||
with suppress_logging():
|
|
||||||
try:
|
try:
|
||||||
results = (
|
with suppress_logging():
|
||||||
self.app.search(query, limit, where=filter)
|
response = self.collection.query(query_texts=query, n_results=limit)
|
||||||
if filter
|
|
||||||
else self.app.search(query, limit)
|
results = []
|
||||||
)
|
for i in range(len(response["ids"][0])):
|
||||||
except InvalidDimensionException:
|
result = {
|
||||||
self.app.reset()
|
"id": response["ids"][0][i],
|
||||||
|
"metadata": response["metadatas"][0][i],
|
||||||
|
"context": response["documents"][0][i],
|
||||||
|
"score": response["distances"][0][i],
|
||||||
|
}
|
||||||
|
if result["score"] >= score_threshold:
|
||||||
|
results.append(result)
|
||||||
|
|
||||||
|
return results
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error during {self.type} search: {str(e)}")
|
||||||
return []
|
return []
|
||||||
return [r for r in results if r["metadata"]["score"] >= score_threshold]
|
|
||||||
|
|
||||||
def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> Any:
|
def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> None: # type: ignore
|
||||||
if not hasattr(self, "app"):
|
if not hasattr(self, "app") or not hasattr(self, "collection"):
|
||||||
self._initialize_app()
|
self._initialize_app()
|
||||||
from embedchain.models.data_type import DataType
|
|
||||||
|
|
||||||
self.app.add(text, data_type=DataType.TEXT, metadata=metadata)
|
self.collection.add(
|
||||||
|
documents=[text],
|
||||||
|
metadatas=[metadata or {}],
|
||||||
|
ids=[str(uuid.uuid4())],
|
||||||
|
)
|
||||||
|
|
||||||
def reset(self) -> None:
|
def reset(self) -> None:
|
||||||
try:
|
try:
|
||||||
shutil.rmtree(f"{db_storage_path()}/{self.type}")
|
shutil.rmtree(f"{db_storage_path()}/{self.type}")
|
||||||
|
if self.app:
|
||||||
|
self.app.reset()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if "attempt to write a readonly database" in str(e):
|
||||||
|
# Ignore this specific error
|
||||||
|
pass
|
||||||
|
else:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
f"An error occurred while resetting the {self.type} memory: {e}"
|
f"An error occurred while resetting the {self.type} memory: {e}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def _create_default_embedding_function(self):
|
||||||
|
from chromadb.utils.embedding_functions.openai_embedding_function import (
|
||||||
|
OpenAIEmbeddingFunction,
|
||||||
|
)
|
||||||
|
|
||||||
|
return OpenAIEmbeddingFunction(
|
||||||
|
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||||
|
)
|
||||||
|
|||||||
0
src/crewai/memory/user/__init__.py
Normal file
0
src/crewai/memory/user/__init__.py
Normal file
45
src/crewai/memory/user/user_memory.py
Normal file
45
src/crewai/memory/user/user_memory.py
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
from crewai.memory.memory import Memory
|
||||||
|
|
||||||
|
|
||||||
|
class UserMemory(Memory):
|
||||||
|
"""
|
||||||
|
UserMemory class for handling user memory storage and retrieval.
|
||||||
|
Inherits from the Memory class and utilizes an instance of a class that
|
||||||
|
adheres to the Storage for data storage, specifically working with
|
||||||
|
MemoryItem instances.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, crew=None):
|
||||||
|
try:
|
||||||
|
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||||
|
except ImportError:
|
||||||
|
raise ImportError(
|
||||||
|
"Mem0 is not installed. Please install it with `pip install mem0ai`."
|
||||||
|
)
|
||||||
|
storage = Mem0Storage(type="user", crew=crew)
|
||||||
|
super().__init__(storage)
|
||||||
|
|
||||||
|
def save(
|
||||||
|
self,
|
||||||
|
value,
|
||||||
|
metadata: Optional[Dict[str, Any]] = None,
|
||||||
|
agent: Optional[str] = None,
|
||||||
|
) -> None:
|
||||||
|
# TODO: Change this function since we want to take care of the case where we save memories for the usr
|
||||||
|
data = f"Remember the details about the user: {value}"
|
||||||
|
super().save(data, metadata)
|
||||||
|
|
||||||
|
def search(
|
||||||
|
self,
|
||||||
|
query: str,
|
||||||
|
limit: int = 3,
|
||||||
|
score_threshold: float = 0.35,
|
||||||
|
):
|
||||||
|
results = super().search(
|
||||||
|
query=query,
|
||||||
|
limit=limit,
|
||||||
|
score_threshold=score_threshold,
|
||||||
|
)
|
||||||
|
return results
|
||||||
8
src/crewai/memory/user/user_memory_item.py
Normal file
8
src/crewai/memory/user/user_memory_item.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
from typing import Any, Dict, Optional
|
||||||
|
|
||||||
|
|
||||||
|
class UserMemoryItem:
|
||||||
|
def __init__(self, data: Any, user: str, metadata: Optional[Dict[str, Any]] = None):
|
||||||
|
self.data = data
|
||||||
|
self.user = user
|
||||||
|
self.metadata = metadata if metadata is not None else {}
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
from .annotations import (
|
from .annotations import (
|
||||||
|
after_kickoff,
|
||||||
agent,
|
agent,
|
||||||
|
before_kickoff,
|
||||||
cache_handler,
|
cache_handler,
|
||||||
callback,
|
callback,
|
||||||
crew,
|
crew,
|
||||||
@@ -26,4 +28,6 @@ __all__ = [
|
|||||||
"llm",
|
"llm",
|
||||||
"cache_handler",
|
"cache_handler",
|
||||||
"pipeline",
|
"pipeline",
|
||||||
|
"before_kickoff",
|
||||||
|
"after_kickoff",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -5,6 +5,16 @@ from crewai import Crew
|
|||||||
from crewai.project.utils import memoize
|
from crewai.project.utils import memoize
|
||||||
|
|
||||||
|
|
||||||
|
def before_kickoff(func):
|
||||||
|
func.is_before_kickoff = True
|
||||||
|
return func
|
||||||
|
|
||||||
|
|
||||||
|
def after_kickoff(func):
|
||||||
|
func.is_after_kickoff = True
|
||||||
|
return func
|
||||||
|
|
||||||
|
|
||||||
def task(func):
|
def task(func):
|
||||||
func.is_task = True
|
func.is_task = True
|
||||||
|
|
||||||
@@ -76,27 +86,13 @@ def crew(func) -> Callable[..., Crew]:
|
|||||||
instantiated_agents = []
|
instantiated_agents = []
|
||||||
agent_roles = set()
|
agent_roles = set()
|
||||||
|
|
||||||
# Collect methods from crew in order
|
# Use the preserved task and agent information
|
||||||
all_functions = [
|
tasks = self._original_tasks.items()
|
||||||
(name, getattr(self, name))
|
agents = self._original_agents.items()
|
||||||
for name, attr in self.__class__.__dict__.items()
|
|
||||||
if callable(attr)
|
|
||||||
]
|
|
||||||
tasks = [
|
|
||||||
(name, method)
|
|
||||||
for name, method in all_functions
|
|
||||||
if hasattr(method, "is_task")
|
|
||||||
]
|
|
||||||
|
|
||||||
agents = [
|
|
||||||
(name, method)
|
|
||||||
for name, method in all_functions
|
|
||||||
if hasattr(method, "is_agent")
|
|
||||||
]
|
|
||||||
|
|
||||||
# Instantiate tasks in order
|
# Instantiate tasks in order
|
||||||
for task_name, task_method in tasks:
|
for task_name, task_method in tasks:
|
||||||
task_instance = task_method()
|
task_instance = task_method(self)
|
||||||
instantiated_tasks.append(task_instance)
|
instantiated_tasks.append(task_instance)
|
||||||
agent_instance = getattr(task_instance, "agent", None)
|
agent_instance = getattr(task_instance, "agent", None)
|
||||||
if agent_instance and agent_instance.role not in agent_roles:
|
if agent_instance and agent_instance.role not in agent_roles:
|
||||||
@@ -105,7 +101,7 @@ def crew(func) -> Callable[..., Crew]:
|
|||||||
|
|
||||||
# Instantiate agents not included by tasks
|
# Instantiate agents not included by tasks
|
||||||
for agent_name, agent_method in agents:
|
for agent_name, agent_method in agents:
|
||||||
agent_instance = agent_method()
|
agent_instance = agent_method(self)
|
||||||
if agent_instance.role not in agent_roles:
|
if agent_instance.role not in agent_roles:
|
||||||
instantiated_agents.append(agent_instance)
|
instantiated_agents.append(agent_instance)
|
||||||
agent_roles.add(agent_instance.role)
|
agent_roles.add(agent_instance.role)
|
||||||
@@ -113,6 +109,19 @@ def crew(func) -> Callable[..., Crew]:
|
|||||||
self.agents = instantiated_agents
|
self.agents = instantiated_agents
|
||||||
self.tasks = instantiated_tasks
|
self.tasks = instantiated_tasks
|
||||||
|
|
||||||
return func(self, *args, **kwargs)
|
crew = func(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def callback_wrapper(callback, instance):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
return callback(instance, *args, **kwargs)
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
for _, callback in self._before_kickoff.items():
|
||||||
|
crew.before_kickoff_callbacks.append(callback_wrapper(callback, self))
|
||||||
|
for _, callback in self._after_kickoff.items():
|
||||||
|
crew.after_kickoff_callbacks.append(callback_wrapper(callback, self))
|
||||||
|
|
||||||
|
return crew
|
||||||
|
|
||||||
|
return memoize(wrapper)
|
||||||
|
|||||||
@@ -34,6 +34,39 @@ def CrewBase(cls: T) -> T:
|
|||||||
self.map_all_agent_variables()
|
self.map_all_agent_variables()
|
||||||
self.map_all_task_variables()
|
self.map_all_task_variables()
|
||||||
|
|
||||||
|
# Preserve all decorated functions
|
||||||
|
self._original_functions = {
|
||||||
|
name: method
|
||||||
|
for name, method in cls.__dict__.items()
|
||||||
|
if any(
|
||||||
|
hasattr(method, attr)
|
||||||
|
for attr in [
|
||||||
|
"is_task",
|
||||||
|
"is_agent",
|
||||||
|
"is_before_kickoff",
|
||||||
|
"is_after_kickoff",
|
||||||
|
"is_kickoff",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Store specific function types
|
||||||
|
self._original_tasks = self._filter_functions(
|
||||||
|
self._original_functions, "is_task"
|
||||||
|
)
|
||||||
|
self._original_agents = self._filter_functions(
|
||||||
|
self._original_functions, "is_agent"
|
||||||
|
)
|
||||||
|
self._before_kickoff = self._filter_functions(
|
||||||
|
self._original_functions, "is_before_kickoff"
|
||||||
|
)
|
||||||
|
self._after_kickoff = self._filter_functions(
|
||||||
|
self._original_functions, "is_after_kickoff"
|
||||||
|
)
|
||||||
|
self._kickoff = self._filter_functions(
|
||||||
|
self._original_functions, "is_kickoff"
|
||||||
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_yaml(config_path: Path):
|
def load_yaml(config_path: Path):
|
||||||
try:
|
try:
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ from pydantic import (
|
|||||||
from pydantic_core import PydanticCustomError
|
from pydantic_core import PydanticCustomError
|
||||||
|
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
|
from crewai.tools.base_tool import BaseTool
|
||||||
from crewai.tasks.output_format import OutputFormat
|
from crewai.tasks.output_format import OutputFormat
|
||||||
from crewai.tasks.task_output import TaskOutput
|
from crewai.tasks.task_output import TaskOutput
|
||||||
from crewai.telemetry.telemetry import Telemetry
|
from crewai.telemetry.telemetry import Telemetry
|
||||||
@@ -91,7 +92,7 @@ class Task(BaseModel):
|
|||||||
output: Optional[TaskOutput] = Field(
|
output: Optional[TaskOutput] = Field(
|
||||||
description="Task output, it's final result after being executed", default=None
|
description="Task output, it's final result after being executed", default=None
|
||||||
)
|
)
|
||||||
tools: Optional[List[Any]] = Field(
|
tools: Optional[List[BaseTool]] = Field(
|
||||||
default_factory=list,
|
default_factory=list,
|
||||||
description="Tools the agent is limited to use for this task.",
|
description="Tools the agent is limited to use for this task.",
|
||||||
)
|
)
|
||||||
@@ -185,7 +186,7 @@ class Task(BaseModel):
|
|||||||
self,
|
self,
|
||||||
agent: Optional[BaseAgent] = None,
|
agent: Optional[BaseAgent] = None,
|
||||||
context: Optional[str] = None,
|
context: Optional[str] = None,
|
||||||
tools: Optional[List[Any]] = None,
|
tools: Optional[List[BaseTool]] = None,
|
||||||
) -> TaskOutput:
|
) -> TaskOutput:
|
||||||
"""Execute the task synchronously."""
|
"""Execute the task synchronously."""
|
||||||
return self._execute_core(agent, context, tools)
|
return self._execute_core(agent, context, tools)
|
||||||
@@ -202,7 +203,7 @@ class Task(BaseModel):
|
|||||||
self,
|
self,
|
||||||
agent: BaseAgent | None = None,
|
agent: BaseAgent | None = None,
|
||||||
context: Optional[str] = None,
|
context: Optional[str] = None,
|
||||||
tools: Optional[List[Any]] = None,
|
tools: Optional[List[BaseTool]] = None,
|
||||||
) -> Future[TaskOutput]:
|
) -> Future[TaskOutput]:
|
||||||
"""Execute the task asynchronously."""
|
"""Execute the task asynchronously."""
|
||||||
future: Future[TaskOutput] = Future()
|
future: Future[TaskOutput] = Future()
|
||||||
|
|||||||
@@ -48,6 +48,10 @@ class Telemetry:
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.ready = False
|
self.ready = False
|
||||||
self.trace_set = False
|
self.trace_set = False
|
||||||
|
|
||||||
|
if os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true":
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
telemetry_endpoint = "https://telemetry.crewai.com:4319"
|
telemetry_endpoint = "https://telemetry.crewai.com:4319"
|
||||||
self.resource = Resource(
|
self.resource = Resource(
|
||||||
@@ -65,7 +69,7 @@ class Telemetry:
|
|||||||
|
|
||||||
self.provider.add_span_processor(processor)
|
self.provider.add_span_processor(processor)
|
||||||
self.ready = True
|
self.ready = True
|
||||||
except BaseException as e:
|
except Exception as e:
|
||||||
if isinstance(
|
if isinstance(
|
||||||
e,
|
e,
|
||||||
(SystemExit, KeyboardInterrupt, GeneratorExit, asyncio.CancelledError),
|
(SystemExit, KeyboardInterrupt, GeneratorExit, asyncio.CancelledError),
|
||||||
@@ -83,10 +87,18 @@ class Telemetry:
|
|||||||
self.ready = False
|
self.ready = False
|
||||||
self.trace_set = False
|
self.trace_set = False
|
||||||
|
|
||||||
|
def _safe_telemetry_operation(self, operation):
|
||||||
|
if not self.ready:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
operation()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None):
|
def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None):
|
||||||
"""Records the creation of a crew."""
|
"""Records the creation of a crew."""
|
||||||
if self.ready:
|
|
||||||
try:
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Crew Created")
|
span = tracer.start_span("Crew Created")
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
@@ -127,8 +139,7 @@ class Telemetry:
|
|||||||
"allow_code_execution?": agent.allow_code_execution,
|
"allow_code_execution?": agent.allow_code_execution,
|
||||||
"max_retry_limit": agent.max_retry_limit,
|
"max_retry_limit": agent.max_retry_limit,
|
||||||
"tools_names": [
|
"tools_names": [
|
||||||
tool.name.casefold()
|
tool.name.casefold() for tool in agent.tools or []
|
||||||
for tool in agent.tools or []
|
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
for agent in crew.agents
|
for agent in crew.agents
|
||||||
@@ -157,8 +168,7 @@ class Telemetry:
|
|||||||
else None
|
else None
|
||||||
),
|
),
|
||||||
"tools_names": [
|
"tools_names": [
|
||||||
tool.name.casefold()
|
tool.name.casefold() for tool in task.tools or []
|
||||||
for tool in task.tools or []
|
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
for task in crew.tasks
|
for task in crew.tasks
|
||||||
@@ -196,8 +206,7 @@ class Telemetry:
|
|||||||
"allow_code_execution?": agent.allow_code_execution,
|
"allow_code_execution?": agent.allow_code_execution,
|
||||||
"max_retry_limit": agent.max_retry_limit,
|
"max_retry_limit": agent.max_retry_limit,
|
||||||
"tools_names": [
|
"tools_names": [
|
||||||
tool.name.casefold()
|
tool.name.casefold() for tool in agent.tools or []
|
||||||
for tool in agent.tools or []
|
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
for agent in crew.agents
|
for agent in crew.agents
|
||||||
@@ -219,8 +228,7 @@ class Telemetry:
|
|||||||
),
|
),
|
||||||
"agent_key": task.agent.key if task.agent else None,
|
"agent_key": task.agent.key if task.agent else None,
|
||||||
"tools_names": [
|
"tools_names": [
|
||||||
tool.name.casefold()
|
tool.name.casefold() for tool in task.tools or []
|
||||||
for tool in task.tools or []
|
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
for task in crew.tasks
|
for task in crew.tasks
|
||||||
@@ -229,13 +237,13 @@ class Telemetry:
|
|||||||
)
|
)
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def task_started(self, crew: Crew, task: Task) -> Span | None:
|
def task_started(self, crew: Crew, task: Task) -> Span | None:
|
||||||
"""Records task started in a crew."""
|
"""Records task started in a crew."""
|
||||||
if self.ready:
|
|
||||||
try:
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
|
|
||||||
created_span = tracer.start_span("Task Created")
|
created_span = tracer.start_span("Task Created")
|
||||||
@@ -270,15 +278,13 @@ class Telemetry:
|
|||||||
)
|
)
|
||||||
|
|
||||||
return span
|
return span
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
return None
|
return self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def task_ended(self, span: Span, task: Task, crew: Crew):
|
def task_ended(self, span: Span, task: Task, crew: Crew):
|
||||||
"""Records task execution in a crew."""
|
"""Records task execution in a crew."""
|
||||||
if self.ready:
|
|
||||||
try:
|
def operation():
|
||||||
if crew.share_crew:
|
if crew.share_crew:
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
span,
|
span,
|
||||||
@@ -288,13 +294,13 @@ class Telemetry:
|
|||||||
|
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
|
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||||
"""Records the repeated usage 'error' of a tool by an agent."""
|
"""Records the repeated usage 'error' of a tool by an agent."""
|
||||||
if self.ready:
|
|
||||||
try:
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Tool Repeated Usage")
|
span = tracer.start_span("Tool Repeated Usage")
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
@@ -308,13 +314,13 @@ class Telemetry:
|
|||||||
self._add_attribute(span, "llm", llm.model)
|
self._add_attribute(span, "llm", llm.model)
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
|
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
|
||||||
"""Records the usage of a tool by an agent."""
|
"""Records the usage of a tool by an agent."""
|
||||||
if self.ready:
|
|
||||||
try:
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Tool Usage")
|
span = tracer.start_span("Tool Usage")
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
@@ -328,13 +334,13 @@ class Telemetry:
|
|||||||
self._add_attribute(span, "llm", llm.model)
|
self._add_attribute(span, "llm", llm.model)
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def tool_usage_error(self, llm: Any):
|
def tool_usage_error(self, llm: Any):
|
||||||
"""Records the usage of a tool by an agent."""
|
"""Records the usage of a tool by an agent."""
|
||||||
if self.ready:
|
|
||||||
try:
|
def operation():
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Tool Usage Error")
|
span = tracer.start_span("Tool Usage Error")
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
@@ -346,14 +352,13 @@ class Telemetry:
|
|||||||
self._add_attribute(span, "llm", llm.model)
|
self._add_attribute(span, "llm", llm.model)
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def individual_test_result_span(
|
def individual_test_result_span(
|
||||||
self, crew: Crew, quality: float, exec_time: int, model_name: str
|
self, crew: Crew, quality: float, exec_time: int, model_name: str
|
||||||
):
|
):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Crew Individual Test Result")
|
span = tracer.start_span("Crew Individual Test Result")
|
||||||
|
|
||||||
@@ -369,8 +374,8 @@ class Telemetry:
|
|||||||
self._add_attribute(span, "model_name", model_name)
|
self._add_attribute(span, "model_name", model_name)
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def test_execution_span(
|
def test_execution_span(
|
||||||
self,
|
self,
|
||||||
@@ -379,8 +384,7 @@ class Telemetry:
|
|||||||
inputs: dict[str, Any] | None,
|
inputs: dict[str, Any] | None,
|
||||||
model_name: str,
|
model_name: str,
|
||||||
):
|
):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Crew Test Execution")
|
span = tracer.start_span("Crew Test Execution")
|
||||||
|
|
||||||
@@ -401,44 +405,40 @@ class Telemetry:
|
|||||||
|
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def deploy_signup_error_span(self):
|
def deploy_signup_error_span(self):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Deploy Signup Error")
|
span = tracer.start_span("Deploy Signup Error")
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def start_deployment_span(self, uuid: Optional[str] = None):
|
def start_deployment_span(self, uuid: Optional[str] = None):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Start Deployment")
|
span = tracer.start_span("Start Deployment")
|
||||||
if uuid:
|
if uuid:
|
||||||
self._add_attribute(span, "uuid", uuid)
|
self._add_attribute(span, "uuid", uuid)
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def create_crew_deployment_span(self):
|
def create_crew_deployment_span(self):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Create Crew Deployment")
|
span = tracer.start_span("Create Crew Deployment")
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
|
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Get Crew Logs")
|
span = tracer.start_span("Get Crew Logs")
|
||||||
self._add_attribute(span, "log_type", log_type)
|
self._add_attribute(span, "log_type", log_type)
|
||||||
@@ -446,20 +446,19 @@ class Telemetry:
|
|||||||
self._add_attribute(span, "uuid", uuid)
|
self._add_attribute(span, "uuid", uuid)
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def remove_crew_span(self, uuid: Optional[str] = None):
|
def remove_crew_span(self, uuid: Optional[str] = None):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Remove Crew")
|
span = tracer.start_span("Remove Crew")
|
||||||
if uuid:
|
if uuid:
|
||||||
self._add_attribute(span, "uuid", uuid)
|
self._add_attribute(span, "uuid", uuid)
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None):
|
def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None):
|
||||||
"""Records the complete execution of a crew.
|
"""Records the complete execution of a crew.
|
||||||
@@ -467,8 +466,7 @@ class Telemetry:
|
|||||||
"""
|
"""
|
||||||
self.crew_creation(crew, inputs)
|
self.crew_creation(crew, inputs)
|
||||||
|
|
||||||
if (self.ready) and (crew.share_crew):
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Crew Execution")
|
span = tracer.start_span("Crew Execution")
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
@@ -533,12 +531,13 @@ class Telemetry:
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
return span
|
return span
|
||||||
except Exception:
|
|
||||||
pass
|
if crew.share_crew:
|
||||||
|
return self._safe_telemetry_operation(operation)
|
||||||
|
return None
|
||||||
|
|
||||||
def end_crew(self, crew, final_string_output):
|
def end_crew(self, crew, final_string_output):
|
||||||
if (self.ready) and (crew.share_crew):
|
def operation():
|
||||||
try:
|
|
||||||
self._add_attribute(
|
self._add_attribute(
|
||||||
crew._execution_span,
|
crew._execution_span,
|
||||||
"crewai_version",
|
"crewai_version",
|
||||||
@@ -563,47 +562,46 @@ class Telemetry:
|
|||||||
)
|
)
|
||||||
crew._execution_span.set_status(Status(StatusCode.OK))
|
crew._execution_span.set_status(Status(StatusCode.OK))
|
||||||
crew._execution_span.end()
|
crew._execution_span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
if crew.share_crew:
|
||||||
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def _add_attribute(self, span, key, value):
|
def _add_attribute(self, span, key, value):
|
||||||
"""Add an attribute to a span."""
|
"""Add an attribute to a span."""
|
||||||
try:
|
|
||||||
|
def operation():
|
||||||
return span.set_attribute(key, value)
|
return span.set_attribute(key, value)
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def flow_creation_span(self, flow_name: str):
|
def flow_creation_span(self, flow_name: str):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Flow Creation")
|
span = tracer.start_span("Flow Creation")
|
||||||
self._add_attribute(span, "flow_name", flow_name)
|
self._add_attribute(span, "flow_name", flow_name)
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def flow_plotting_span(self, flow_name: str, node_names: list[str]):
|
def flow_plotting_span(self, flow_name: str, node_names: list[str]):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Flow Plotting")
|
span = tracer.start_span("Flow Plotting")
|
||||||
self._add_attribute(span, "flow_name", flow_name)
|
self._add_attribute(span, "flow_name", flow_name)
|
||||||
self._add_attribute(span, "node_names", json.dumps(node_names))
|
self._add_attribute(span, "node_names", json.dumps(node_names))
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|
||||||
def flow_execution_span(self, flow_name: str, node_names: list[str]):
|
def flow_execution_span(self, flow_name: str, node_names: list[str]):
|
||||||
if self.ready:
|
def operation():
|
||||||
try:
|
|
||||||
tracer = trace.get_tracer("crewai.telemetry")
|
tracer = trace.get_tracer("crewai.telemetry")
|
||||||
span = tracer.start_span("Flow Execution")
|
span = tracer.start_span("Flow Execution")
|
||||||
self._add_attribute(span, "flow_name", flow_name)
|
self._add_attribute(span, "flow_name", flow_name)
|
||||||
self._add_attribute(span, "node_names", json.dumps(node_names))
|
self._add_attribute(span, "node_names", json.dumps(node_names))
|
||||||
span.set_status(Status(StatusCode.OK))
|
span.set_status(Status(StatusCode.OK))
|
||||||
span.end()
|
span.end()
|
||||||
except Exception:
|
|
||||||
pass
|
self._safe_telemetry_operation(operation)
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
from .base_tool import BaseTool, tool
|
||||||
|
|||||||
@@ -1,25 +0,0 @@
|
|||||||
from crewai.agents.agent_builder.utilities.base_agent_tool import BaseAgentTools
|
|
||||||
|
|
||||||
|
|
||||||
class AgentTools(BaseAgentTools):
|
|
||||||
"""Default tools around agent delegation"""
|
|
||||||
|
|
||||||
def tools(self):
|
|
||||||
from langchain.tools import StructuredTool
|
|
||||||
|
|
||||||
coworkers = ", ".join([f"{agent.role}" for agent in self.agents])
|
|
||||||
tools = [
|
|
||||||
StructuredTool.from_function(
|
|
||||||
func=self.delegate_work,
|
|
||||||
name="Delegate work to coworker",
|
|
||||||
description=self.i18n.tools("delegate_work").format(
|
|
||||||
coworkers=coworkers
|
|
||||||
),
|
|
||||||
),
|
|
||||||
StructuredTool.from_function(
|
|
||||||
func=self.ask_question,
|
|
||||||
name="Ask question to coworker",
|
|
||||||
description=self.i18n.tools("ask_question").format(coworkers=coworkers),
|
|
||||||
),
|
|
||||||
]
|
|
||||||
return tools
|
|
||||||
32
src/crewai/tools/agent_tools/agent_tools.py
Normal file
32
src/crewai/tools/agent_tools/agent_tools.py
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
from crewai.tools.base_tool import BaseTool
|
||||||
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
|
from crewai.utilities import I18N
|
||||||
|
|
||||||
|
from .delegate_work_tool import DelegateWorkTool
|
||||||
|
from .ask_question_tool import AskQuestionTool
|
||||||
|
|
||||||
|
|
||||||
|
class AgentTools:
|
||||||
|
"""Manager class for agent-related tools"""
|
||||||
|
|
||||||
|
def __init__(self, agents: list[BaseAgent], i18n: I18N = I18N()):
|
||||||
|
self.agents = agents
|
||||||
|
self.i18n = i18n
|
||||||
|
|
||||||
|
def tools(self) -> list[BaseTool]:
|
||||||
|
"""Get all available agent tools"""
|
||||||
|
coworkers = ", ".join([f"{agent.role}" for agent in self.agents])
|
||||||
|
|
||||||
|
delegate_tool = DelegateWorkTool(
|
||||||
|
agents=self.agents,
|
||||||
|
i18n=self.i18n,
|
||||||
|
description=self.i18n.tools("delegate_work").format(coworkers=coworkers),
|
||||||
|
)
|
||||||
|
|
||||||
|
ask_tool = AskQuestionTool(
|
||||||
|
agents=self.agents,
|
||||||
|
i18n=self.i18n,
|
||||||
|
description=self.i18n.tools("ask_question").format(coworkers=coworkers),
|
||||||
|
)
|
||||||
|
|
||||||
|
return [delegate_tool, ask_tool]
|
||||||
26
src/crewai/tools/agent_tools/ask_question_tool.py
Normal file
26
src/crewai/tools/agent_tools/ask_question_tool.py
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
|
||||||
|
from typing import Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class AskQuestionToolSchema(BaseModel):
|
||||||
|
question: str = Field(..., description="The question to ask")
|
||||||
|
context: str = Field(..., description="The context for the question")
|
||||||
|
coworker: str = Field(..., description="The role/name of the coworker to ask")
|
||||||
|
|
||||||
|
|
||||||
|
class AskQuestionTool(BaseAgentTool):
|
||||||
|
"""Tool for asking questions to coworkers"""
|
||||||
|
|
||||||
|
name: str = "Ask question to coworker"
|
||||||
|
args_schema: type[BaseModel] = AskQuestionToolSchema
|
||||||
|
|
||||||
|
def _run(
|
||||||
|
self,
|
||||||
|
question: str,
|
||||||
|
context: str,
|
||||||
|
coworker: Optional[str] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> str:
|
||||||
|
coworker = self._get_coworker(coworker, **kwargs)
|
||||||
|
return self._execute(coworker, question, context)
|
||||||
@@ -1,22 +1,19 @@
|
|||||||
from abc import ABC, abstractmethod
|
from typing import Optional, Union
|
||||||
from typing import List, Optional, Union
|
from pydantic import Field
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
|
||||||
|
|
||||||
|
from crewai.tools.base_tool import BaseTool
|
||||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||||
from crewai.task import Task
|
from crewai.task import Task
|
||||||
from crewai.utilities import I18N
|
from crewai.utilities import I18N
|
||||||
|
|
||||||
|
|
||||||
class BaseAgentTools(BaseModel, ABC):
|
class BaseAgentTool(BaseTool):
|
||||||
"""Default tools around agent delegation"""
|
"""Base class for agent-related tools"""
|
||||||
|
|
||||||
agents: List[BaseAgent] = Field(description="List of agents in this crew.")
|
agents: list[BaseAgent] = Field(description="List of available agents")
|
||||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
i18n: I18N = Field(
|
||||||
|
default_factory=I18N, description="Internationalization settings"
|
||||||
@abstractmethod
|
)
|
||||||
def tools(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _get_coworker(self, coworker: Optional[str], **kwargs) -> Optional[str]:
|
def _get_coworker(self, coworker: Optional[str], **kwargs) -> Optional[str]:
|
||||||
coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker")
|
coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker")
|
||||||
@@ -24,27 +21,11 @@ class BaseAgentTools(BaseModel, ABC):
|
|||||||
is_list = coworker.startswith("[") and coworker.endswith("]")
|
is_list = coworker.startswith("[") and coworker.endswith("]")
|
||||||
if is_list:
|
if is_list:
|
||||||
coworker = coworker[1:-1].split(",")[0]
|
coworker = coworker[1:-1].split(",")[0]
|
||||||
|
|
||||||
return coworker
|
return coworker
|
||||||
|
|
||||||
def delegate_work(
|
|
||||||
self, task: str, context: str, coworker: Optional[str] = None, **kwargs
|
|
||||||
):
|
|
||||||
"""Useful to delegate a specific task to a coworker passing all necessary context and names."""
|
|
||||||
coworker = self._get_coworker(coworker, **kwargs)
|
|
||||||
return self._execute(coworker, task, context)
|
|
||||||
|
|
||||||
def ask_question(
|
|
||||||
self, question: str, context: str, coworker: Optional[str] = None, **kwargs
|
|
||||||
):
|
|
||||||
"""Useful to ask a question, opinion or take from a coworker passing all necessary context and names."""
|
|
||||||
coworker = self._get_coworker(coworker, **kwargs)
|
|
||||||
return self._execute(coworker, question, context)
|
|
||||||
|
|
||||||
def _execute(
|
def _execute(
|
||||||
self, agent_name: Union[str, None], task: str, context: Union[str, None]
|
self, agent_name: Union[str, None], task: str, context: Union[str, None]
|
||||||
):
|
) -> str:
|
||||||
"""Execute the command."""
|
|
||||||
try:
|
try:
|
||||||
if agent_name is None:
|
if agent_name is None:
|
||||||
agent_name = ""
|
agent_name = ""
|
||||||
@@ -57,7 +38,6 @@ class BaseAgentTools(BaseModel, ABC):
|
|||||||
# when it should look like this:
|
# when it should look like this:
|
||||||
# {"task": "....", "coworker": "...."}
|
# {"task": "....", "coworker": "...."}
|
||||||
agent_name = agent_name.casefold().replace('"', "").replace("\n", "")
|
agent_name = agent_name.casefold().replace('"', "").replace("\n", "")
|
||||||
|
|
||||||
agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None")
|
agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None")
|
||||||
available_agent
|
available_agent
|
||||||
for available_agent in self.agents
|
for available_agent in self.agents
|
||||||
29
src/crewai/tools/agent_tools/delegate_work_tool.py
Normal file
29
src/crewai/tools/agent_tools/delegate_work_tool.py
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
|
||||||
|
class DelegateWorkToolSchema(BaseModel):
|
||||||
|
task: str = Field(..., description="The task to delegate")
|
||||||
|
context: str = Field(..., description="The context for the task")
|
||||||
|
coworker: str = Field(
|
||||||
|
..., description="The role/name of the coworker to delegate to"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class DelegateWorkTool(BaseAgentTool):
|
||||||
|
"""Tool for delegating work to coworkers"""
|
||||||
|
|
||||||
|
name: str = "Delegate work to coworker"
|
||||||
|
args_schema: type[BaseModel] = DelegateWorkToolSchema
|
||||||
|
|
||||||
|
def _run(
|
||||||
|
self,
|
||||||
|
task: str,
|
||||||
|
context: str,
|
||||||
|
coworker: Optional[str] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> str:
|
||||||
|
coworker = self._get_coworker(coworker, **kwargs)
|
||||||
|
return self._execute(coworker, task, context)
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user