Compare commits

..

70 Commits

Author SHA1 Message Date
Brandon Hancock
48e1505a0a Merge branch 'main' into undo-agentops-api-check 2024-10-16 11:18:19 -04:00
Braelyn Boynton
161e2e20a5 remove extra code 2024-09-05 14:50:01 +09:00
Braelyn Boynton
a68f2cec41 remove extra code 2024-09-05 14:48:54 +09:00
Braelyn Boynton
9db3a4ab23 remove extra code 2024-09-05 14:48:28 +09:00
Braelyn Boynton
7d4cf9a7bc undo agentops api key check 2024-09-05 14:45:01 +09:00
Braelyn Boynton
7af89abe53 Merge remote-tracking branch 'refs/remotes/upstream/main' into undo-agentops-api-check 2024-09-05 14:41:51 +09:00
Braelyn Boynton
b3ae127d2c Merge remote-tracking branch 'refs/remotes/upstream/main' 2024-08-08 16:56:49 -07:00
Braelyn Boynton
0543059dbe Merge remote-tracking branch 'upstream/main'
# Conflicts:
#	pyproject.toml
#	src/crewai/agent.py
#	src/crewai/crew.py
#	src/crewai/task.py
#	src/crewai/tools/tool_usage.py
#	src/crewai/utilities/evaluators/task_evaluator.py
2024-07-23 17:55:15 -04:00
Braelyn Boynton
c3b8ea21d3 deprecation messages 2024-07-08 13:56:17 -07:00
Braelyn Boynton
fa9a42cd89 fix crew logger bug 2024-06-06 18:28:11 -07:00
Braelyn Boynton
9b965d9e33 fix crew logger bug 2024-06-06 18:26:09 -07:00
Braelyn Boynton
45655a956a conditional protect agentops use 2024-06-06 17:58:34 -07:00
Braelyn Boynton
f2d2804854 Merge remote-tracking branch 'origin/main' 2024-06-06 17:09:05 -07:00
Braelyn Boynton
ae65622bd0 Merge remote-tracking branch 'upstream/main'
# Conflicts:
#	src/crewai/task.py
2024-06-06 17:08:39 -07:00
Braelyn Boynton
f516fba9b6 Merge branch 'main' into main 2024-06-06 17:07:28 -07:00
Braelyn Boynton
a4622bfce8 support skip auto end session 2024-05-29 14:28:24 -07:00
theCyberTech - Rip&Tear
0dd4f444ea Added timestamp to logger (#646)
* Added timestamp to logger

Updated the logger.py file to include timestamps when logging output. For example:

 [2024-05-20 15:32:48][DEBUG]: == Working Agent: Researcher
 [2024-05-20 15:32:48][INFO]: == Starting Task: Research the topic
 [2024-05-20 15:33:22][DEBUG]: == [Researcher] Task output:

* Update tool_usage.py

* Revert "Update tool_usage.py"

This reverts commit 95d18d5b6f.

incorrect bramch for this commit
2024-05-28 16:45:50 -07:00
Saif Mahmud
e2dfba63cd fixes #665 (#666) 2024-05-28 16:45:50 -07:00
theCyberTech - Rip&Tear
3bba04ac71 Update crew.py (#644)
Fixed Type on line 53
2024-05-28 16:45:50 -07:00
Mish Ushakov
b153bc1a80 Update BrowserbaseLoadTool.md (#647) 2024-05-28 16:45:50 -07:00
Mike Heavers
8e5bface29 Update README.md (#652)
Rework example so that if you use a custom LLM it doesn't throw code errors by uncommenting.
2024-05-28 16:45:50 -07:00
Anudeep Kolluri
9ac6752cbf Update agent.py (#655)
Changed default model value from gpt-4 to gpt-4o.
Reasoning.
gpt-4 costs 30$ per million tokens while gpt-4o costs 5$.
This is more cost friendly for default option.
2024-05-28 16:45:50 -07:00
Paul Sanders
a08d0dfe12 Clarify text in docstring (#662) 2024-05-28 16:45:50 -07:00
Paul Sanders
96e0dacfc1 Enable search in docs (#663) 2024-05-28 16:45:50 -07:00
Olivier Roberdet
f4ce482eb7 Fix typo in instruction en.json (#676) 2024-05-28 16:45:50 -07:00
Braelyn Boynton
c6471814b3 merge upstream 2024-05-28 16:45:20 -07:00
Howard Gil
2d88109cc3 Merge branch 'main' of https://github.com/joaomdmoura/crewAI 2024-05-21 12:18:03 -07:00
Braelyn Boynton
54237c9974 track task evaluator 2024-05-09 13:15:12 -07:00
Braelyn Boynton
b4241a892e agentops version bump 2024-05-06 21:28:47 -07:00
Braelyn Boynton
a6de5253d5 Merge remote-tracking branch 'upstream/main' 2024-05-06 11:50:31 -07:00
Braelyn Boynton
b9d6ec5721 use langchain callback handler to support all LLMs 2024-05-03 15:07:17 -07:00
Braelyn Boynton
498bf77f08 black formatting 2024-05-02 13:06:34 -07:00
Braelyn Boynton
be91c32488 Merge remote-tracking branch 'upstream/main'
# Conflicts:
#	pyproject.toml
#	src/crewai/agent.py
#	src/crewai/crew.py
#	src/crewai/tools/tool_usage.py
2024-05-02 12:52:31 -07:00
Braelyn Boynton
f2c2a625b0 add crew tag 2024-05-02 12:28:06 -07:00
Braelyn Boynton
b160a52139 Merge remote-tracking branch 'upstream/main'
# Conflicts:
#	pyproject.toml
#	src/crewai/agent.py
#	src/crewai/crew.py
#	src/crewai/tools/tool_usage.py
2024-04-30 01:09:16 -07:00
Braelyn Boynton
a19a37bd9a noop 2024-04-29 23:31:48 -07:00
Braelyn Boynton
2f789800b7 Revert "Revert "Revert "true dependency"""
This reverts commit e9335e89
2024-04-29 23:30:02 -07:00
Braelyn Boynton
8be18c8e11 agentops update 2024-04-19 20:05:47 -07:00
João Moura
e366f006ac Update pyproject.toml 2024-04-19 23:38:20 -03:00
João Moura
d678190850 Forcing version 0.1.5 2024-04-19 23:18:43 -03:00
Braelyn Boynton
9005dc7c59 cleanup 2024-04-19 19:10:26 -07:00
Braelyn Boynton
e9335e89a6 Revert "Revert "true dependency""
This reverts commit 4d1b460b
2024-04-19 19:09:20 -07:00
Braelyn Boynton
fd7de7f2eb Revert "Revert "cleanup""
This reverts commit cea33d9a5d.
2024-04-19 19:08:22 -07:00
Braelyn Boynton
c52b5e9690 agentops 0.1.5 2024-04-19 19:07:53 -07:00
Braelyn Boynton
7725e7c52e optional parent key 2024-04-19 19:04:21 -07:00
Braelyn Boynton
7f8573e6cb Merge remote-tracking branch 'origin/main' 2024-04-19 19:02:39 -07:00
Braelyn Boynton
cea33d9a5d Revert "cleanup"
This reverts commit 7f5635fb9e.
2024-04-19 19:02:20 -07:00
Braelyn Boynton
4d1b460b80 Revert "true dependency"
This reverts commit e52e8e9568.
2024-04-19 19:01:52 -07:00
João Moura
906a5bd8ec Update pyproject.toml 2024-04-19 22:54:57 -03:00
Braelyn Boynton
216cc832dc Merge remote-tracking branch 'upstream/main'
# Conflicts:
#	poetry.lock
2024-04-18 16:21:19 -07:00
Braelyn Boynton
7f5635fb9e cleanup 2024-04-17 17:19:38 -07:00
Braelyn Boynton
0ce8d14742 add crew org key to agentops 2024-04-17 14:48:58 -07:00
Braelyn Boynton
e52e8e9568 true dependency 2024-04-17 14:39:23 -07:00
Braelyn Boynton
4f7a9a5b4b Merge remote-tracking branch 'upstream/main'
# Conflicts:
#	src/crewai/crew.py
2024-04-17 14:27:31 -07:00
Braelyn Boynton
2af85c35b4 remove org key 2024-04-15 15:39:24 -04:00
Braelyn Boynton
e82149aaf9 Merge remote-tracking branch 'upstream/main' 2024-04-11 12:32:17 -07:00
Braelyn Boynton
de0ee8ce41 Merge remote-tracking branch 'upstream/main'
# Conflicts:
#	src/crewai/crew.py
2024-04-05 15:48:35 -07:00
Braelyn Boynton
b20ae847c4 agentops version bump 2024-04-05 15:47:01 -07:00
Braelyn Boynton
59f56324ea Merge remote-tracking branch 'upstream/main'
# Conflicts:
#	poetry.lock
#	src/crewai/tools/tool_usage.py
2024-04-05 15:18:40 -07:00
Braelyn Boynton
79a0d8b94d optional agentops 2024-04-04 14:34:20 -07:00
Braelyn Boynton
750085498f remove telemetry code 2024-04-04 13:23:20 -07:00
Braelyn Boynton
215e39833a optional dependency usage 2024-04-03 23:14:37 -07:00
Braelyn Boynton
67bc1de4d6 make agentops optional 2024-04-03 15:36:47 -07:00
Braelyn Boynton
45e307b98a code cleanup 2024-04-02 12:25:52 -07:00
Braelyn Boynton
4402c9be74 merge upstream 2024-04-02 12:22:49 -07:00
Braelyn Boynton
5e46514398 better tool and llm tracking 2024-03-29 17:45:58 -07:00
Braelyn Boynton
c44c2b6808 track tool usage time 2024-03-29 14:28:33 -07:00
Braelyn Boynton
a9339fcef6 end session after completion 2024-03-26 14:09:58 -07:00
Braelyn Boynton
f67d0a26f1 track tool usage 2024-03-20 18:25:41 -07:00
Braelyn Boynton
f6ee12dbc5 implements agentops with a langchain handler, agent tracking and tool call recording 2024-03-19 18:47:22 -07:00
99 changed files with 3231 additions and 4323 deletions

19
.github/security.md vendored
View File

@@ -1,19 +0,0 @@
CrewAI takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organization.
If you believe you have found a security vulnerability in any CrewAI product or service, please report it to us as described below.
## Reporting a Vulnerability
Please do not report security vulnerabilities through public GitHub issues.
To report a vulnerability, please email us at security@crewai.com.
Please include the requested information listed below so that we can triage your report more quickly
- Type of issue (e.g. SQL injection, cross-site scripting, etc.)
- Full paths of source file(s) related to the manifestation of the issue
- The location of the affected source code (tag/branch/commit or direct URL)
- Any special configuration required to reproduce the issue
- Step-by-step instructions to reproduce the issue (please include screenshots if needed)
- Proof-of-concept or exploit code (if possible)
- Impact of the issue, including how an attacker might exploit the issue
Once we have received your report, we will respond to you at the email address you provide. If the issue is confirmed, we will release a patch as soon as possible depending on the complexity of the issue.
At this time, we are not offering a bug bounty program. Any rewards will be at our discretion.

View File

@@ -252,12 +252,6 @@ or
python src/my_project/main.py python src/my_project/main.py
``` ```
If an error happens due to the usage of poetry, please run the following command to update your crewai package:
```bash
crewai update
```
You should see the output in the console and the `report.md` file should be created in the root of your project with the full final report. You should see the output in the console and the `report.md` file should be created in the root of your project with the full final report.
In addition to the sequential process, you can use the hierarchical process, which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results. [See more about the processes here](https://docs.crewai.com/core-concepts/Processes/). In addition to the sequential process, you can use the hierarchical process, which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results. [See more about the processes here](https://docs.crewai.com/core-concepts/Processes/).
@@ -351,7 +345,7 @@ pre-commit install
### Running Tests ### Running Tests
```bash ```bash
uv run pytest . uvx pytest
``` ```
### Running static type checks ### Running static type checks

View File

@@ -31,17 +31,16 @@ Think of an agent as a member of a team, with specific skills and a particular j
| **Max RPM** *(optional)* | `max_rpm` | Max RPM is the maximum number of requests per minute the agent can perform to avoid rate limits. It's optional and can be left unspecified, with a default value of `None`. | | **Max RPM** *(optional)* | `max_rpm` | Max RPM is the maximum number of requests per minute the agent can perform to avoid rate limits. It's optional and can be left unspecified, with a default value of `None`. |
| **Max Execution Time** *(optional)* | `max_execution_time` | Max Execution Time is the maximum execution time for an agent to execute a task. It's optional and can be left unspecified, with a default value of `None`, meaning no max execution time. | | **Max Execution Time** *(optional)* | `max_execution_time` | Max Execution Time is the maximum execution time for an agent to execute a task. It's optional and can be left unspecified, with a default value of `None`, meaning no max execution time. |
| **Verbose** *(optional)* | `verbose` | Setting this to `True` configures the internal logger to provide detailed execution logs, aiding in debugging and monitoring. Default is `False`. | | **Verbose** *(optional)* | `verbose` | Setting this to `True` configures the internal logger to provide detailed execution logs, aiding in debugging and monitoring. Default is `False`. |
| **Allow Delegation** *(optional)* | `allow_delegation` | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. Default is `False`. | | **Allow Delegation** *(optional)* | `allow_delegation` | Agents can delegate tasks or questions to one another, ensuring that each task is handled by the most suitable agent. Default is `False`.
| **Step Callback** *(optional)* | `step_callback` | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback`. | | **Step Callback** *(optional)* | `step_callback` | A function that is called after each step of the agent. This can be used to log the agent's actions or to perform other operations. It will overwrite the crew `step_callback`. |
| **Cache** *(optional)* | `cache` | Indicates if the agent should use a cache for tool usage. Default is `True`. | | **Cache** *(optional)* | `cache` | Indicates if the agent should use a cache for tool usage. Default is `True`. |
| **System Template** *(optional)* | `system_template` | Specifies the system format for the agent. Default is `None`. | | **System Template** *(optional)* | `system_template` | Specifies the system format for the agent. Default is `None`. |
| **Prompt Template** *(optional)* | `prompt_template` | Specifies the prompt format for the agent. Default is `None`. | | **Prompt Template** *(optional)* | `prompt_template` | Specifies the prompt format for the agent. Default is `None`. |
| **Response Template** *(optional)* | `response_template` | Specifies the response format for the agent. Default is `None`. | | **Response Template** *(optional)* | `response_template` | Specifies the response format for the agent. Default is `None`. |
| **Allow Code Execution** *(optional)* | `allow_code_execution` | Enable code execution for the agent. Default is `False`. | | **Allow Code Execution** *(optional)* | `allow_code_execution` | Enable code execution for the agent. Default is `False`. |
| **Max Retry Limit** *(optional)* | `max_retry_limit` | Maximum number of retries for an agent to execute a task when an error occurs. Default is `2`. | | **Max Retry Limit** *(optional)* | `max_retry_limit` | Maximum number of retries for an agent to execute a task when an error occurs. Default is `2`.
| **Use System Prompt** *(optional)* | `use_system_prompt` | Adds the ability to not use system prompt (to support o1 models). Default is `True`. | | **Use System Prompt** *(optional)* | `use_system_prompt` | Adds the ability to not use system prompt (to support o1 models). Default is `True`. |
| **Respect Context Window** *(optional)* | `respect_context_window` | Summary strategy to avoid overflowing the context window. Default is `True`. | | **Respect Context Window** *(optional)* | `respect_context_window` | Summary strategy to avoid overflowing the context window. Default is `True`. |
| **Code Execution Mode** *(optional)* | `code_execution_mode` | Determines the mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution on the host machine). Default is `safe`. |
## Creating an agent ## Creating an agent
@@ -84,7 +83,6 @@ agent = Agent(
max_retry_limit=2, # Optional max_retry_limit=2, # Optional
use_system_prompt=True, # Optional use_system_prompt=True, # Optional
respect_context_window=True, # Optional respect_context_window=True, # Optional
code_execution_mode='safe', # Optional, defaults to 'safe'
) )
``` ```
@@ -158,4 +156,4 @@ crew = my_crew.kickoff(inputs={"input": "Mark Twain"})
## Conclusion ## Conclusion
Agents are the building blocks of the CrewAI framework. By understanding how to define and interact with agents, Agents are the building blocks of the CrewAI framework. By understanding how to define and interact with agents,
you can create sophisticated AI systems that leverage the power of collaborative intelligence. The `code_execution_mode` attribute provides flexibility in how agents execute code, allowing for both secure and direct execution options. you can create sophisticated AI systems that leverage the power of collaborative intelligence.

View File

@@ -6,7 +6,7 @@ icon: terminal
# CrewAI CLI Documentation # CrewAI CLI Documentation
The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you to create, train, run, and manage crews & flows. The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you to create, train, run, and manage crews and pipelines.
## Installation ## Installation
@@ -146,34 +146,3 @@ crewai run
Make sure to run these commands from the directory where your CrewAI project is set up. Make sure to run these commands from the directory where your CrewAI project is set up.
Some commands may require additional configuration or setup within your project structure. Some commands may require additional configuration or setup within your project structure.
</Note> </Note>
### 9. API Keys
When running ```crewai create crew``` command, the CLI will first show you the top 5 most common LLM providers and ask you to select one.
Once you've selected an LLM provider, you will be prompted for API keys.
#### Initial API key providers
The CLI will initially prompt for API keys for the following services:
* OpenAI
* Groq
* Anthropic
* Google Gemini
When you select a provider, the CLI will prompt you to enter your API key.
#### Other Options
If you select option 6, you will be able to select from a list of LiteLLM supported providers.
When you select a provider, the CLI will prompt you to enter the Key name and the API key.
See the following link for each provider's key name:
* [LiteLLM Providers](https://docs.litellm.ai/docs/providers)

View File

@@ -18,71 +18,68 @@ Flows allow you to create structured, event-driven workflows. They provide a sea
4. **Flexible Control Flow**: Implement conditional logic, loops, and branching within your workflows. 4. **Flexible Control Flow**: Implement conditional logic, loops, and branching within your workflows.
5. **Input Flexibility**: Flows can accept inputs to initialize or update their state, with different handling for structured and unstructured state management.
## Getting Started ## Getting Started
Let's create a simple Flow where you will use OpenAI to generate a random city in one task and then use that city to generate a fun fact in another task. Let's create a simple Flow where you will use OpenAI to generate a random city in one task and then use that city to generate a fun fact in another task.
### Passing Inputs to Flows ```python Code
import asyncio
Flows can accept inputs to initialize or update their state before execution. The way inputs are handled depends on whether the flow uses structured or unstructured state management.
#### Structured State Management
In structured state management, the flow's state is defined using a Pydantic `BaseModel`. Inputs must match the model's schema, and any updates will overwrite the default values.
```python
from crewai.flow.flow import Flow, listen, start from crewai.flow.flow import Flow, listen, start
from pydantic import BaseModel from litellm import completion
class ExampleState(BaseModel):
counter: int = 0
message: str = ""
class StructuredExampleFlow(Flow[ExampleState]): class ExampleFlow(Flow):
model = "gpt-4o-mini"
@start() @start()
def first_method(self): def generate_city(self):
# Implementation print("Starting flow")
flow = StructuredExampleFlow() response = completion(
flow.kickoff(inputs={"counter": 10}) model=self.model,
``` messages=[
{
"role": "user",
"content": "Return the name of a random city in the world.",
},
],
)
In this example, the `counter` is initialized to `10`, while `message` retains its default value. random_city = response["choices"][0]["message"]["content"]
print(f"Random City: {random_city}")
#### Unstructured State Management return random_city
In unstructured state management, the flow's state is a dictionary. You can pass any dictionary to update the state. @listen(generate_city)
def generate_fun_fact(self, random_city):
response = completion(
model=self.model,
messages=[
{
"role": "user",
"content": f"Tell me a fun fact about {random_city}",
},
],
)
```python fun_fact = response["choices"][0]["message"]["content"]
from crewai.flow.flow import Flow, listen, start return fun_fact
class UnstructuredExampleFlow(Flow):
@start()
def first_method(self):
# Implementation
flow = UnstructuredExampleFlow() async def main():
flow.kickoff(inputs={"counter": 5, "message": "Initial message"}) flow = ExampleFlow()
``` result = await flow.kickoff()
Here, both `counter` and `message` are updated based on the provided inputs. print(f"Generated fun fact: {result}")
**Note:** Ensure that inputs for structured state management adhere to the defined schema to avoid validation errors. asyncio.run(main())
### Example Flow
```python
# Existing example code
``` ```
In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task. In the above example, we have created a simple Flow that generates a random city using OpenAI and then generates a fun fact about that city. The Flow consists of two tasks: `generate_city` and `generate_fun_fact`. The `generate_city` task is the starting point of the Flow, and the `generate_fun_fact` task listens for the output of the `generate_city` task.
When you run the Flow, it will generate a random city and then generate a fun fact about that city. The output will be printed to the console. When you run the Flow, it will generate a random city and then generate a fun fact about that city. The output will be printed to the console.
**Note:** Ensure you have set up your `.env` file to store your `OPENAI_API_KEY`. This key is necessary for authenticating requests to the OpenAI API.
### @start() ### @start()
The `@start()` decorator is used to mark a method as the starting point of a Flow. When a Flow is started, all the methods decorated with `@start()` are executed in parallel. You can have multiple start methods in a Flow, and they will all be executed when the Flow is started. The `@start()` decorator is used to mark a method as the starting point of a Flow. When a Flow is started, all the methods decorated with `@start()` are executed in parallel. You can have multiple start methods in a Flow, and they will all be executed when the Flow is started.
@@ -97,14 +94,14 @@ The `@listen()` decorator can be used in several ways:
1. **Listening to a Method by Name**: You can pass the name of the method you want to listen to as a string. When that method completes, the listener method will be triggered. 1. **Listening to a Method by Name**: You can pass the name of the method you want to listen to as a string. When that method completes, the listener method will be triggered.
```python ```python Code
@listen("generate_city") @listen("generate_city")
def generate_fun_fact(self, random_city): def generate_fun_fact(self, random_city):
# Implementation # Implementation
``` ```
2. **Listening to a Method Directly**: You can pass the method itself. When that method completes, the listener method will be triggered. 2. **Listening to a Method Directly**: You can pass the method itself. When that method completes, the listener method will be triggered.
```python ```python Code
@listen(generate_city) @listen(generate_city)
def generate_fun_fact(self, random_city): def generate_fun_fact(self, random_city):
# Implementation # Implementation
@@ -121,7 +118,8 @@ When you run a Flow, the final output is determined by the last method that comp
Here's how you can access the final output: Here's how you can access the final output:
<CodeGroup> <CodeGroup>
```python ```python Code
import asyncio
from crewai.flow.flow import Flow, listen, start from crewai.flow.flow import Flow, listen, start
class OutputExampleFlow(Flow): class OutputExampleFlow(Flow):
@@ -133,14 +131,16 @@ class OutputExampleFlow(Flow):
def second_method(self, first_output): def second_method(self, first_output):
return f"Second method received: {first_output}" return f"Second method received: {first_output}"
flow = OutputExampleFlow() async def main():
final_output = flow.kickoff() flow = OutputExampleFlow()
final_output = await flow.kickoff()
print("---- Final Output ----")
print(final_output)
print("---- Final Output ----") asyncio.run(main())
print(final_output)
``` ```
```text ``` text Output
---- Final Output ---- ---- Final Output ----
Second method received: Output from first_method Second method received: Output from first_method
``` ```
@@ -150,6 +150,7 @@ Second method received: Output from first_method
In this example, the `second_method` is the last method to complete, so its output will be the final output of the Flow. In this example, the `second_method` is the last method to complete, so its output will be the final output of the Flow.
The `kickoff()` method will return the final output, which is then printed to the console. The `kickoff()` method will return the final output, which is then printed to the console.
#### Accessing and Updating State #### Accessing and Updating State
In addition to retrieving the final output, you can also access and update the state within your Flow. The state can be used to store and share data between different methods in the Flow. After the Flow has run, you can access the state to retrieve any information that was added or updated during the execution. In addition to retrieving the final output, you can also access and update the state within your Flow. The state can be used to store and share data between different methods in the Flow. After the Flow has run, you can access the state to retrieve any information that was added or updated during the execution.
@@ -158,7 +159,8 @@ Here's an example of how to update and access the state:
<CodeGroup> <CodeGroup>
```python ```python Code
import asyncio
from crewai.flow.flow import Flow, listen, start from crewai.flow.flow import Flow, listen, start
from pydantic import BaseModel from pydantic import BaseModel
@@ -179,19 +181,21 @@ class StateExampleFlow(Flow[ExampleState]):
self.state.counter += 1 self.state.counter += 1
return self.state.message return self.state.message
flow = StateExampleFlow() async def main():
final_output = flow.kickoff() flow = StateExampleFlow()
print(f"Final Output: {final_output}") final_output = await flow.kickoff()
print("Final State:") print(f"Final Output: {final_output}")
print(flow.state) print("Final State:")
print(flow.state)
asyncio.run(main())
``` ```
```text ``` text Output
Final Output: Hello from first_method - updated by second_method Final Output: Hello from first_method - updated by second_method
Final State: Final State:
counter=2 message='Hello from first_method - updated by second_method' counter=2 message='Hello from first_method - updated by second_method'
``` ```
</CodeGroup> </CodeGroup>
In this example, the state is updated by both `first_method` and `second_method`. In this example, the state is updated by both `first_method` and `second_method`.
@@ -210,10 +214,12 @@ allowing developers to choose the approach that best fits their application's ne
In unstructured state management, all state is stored in the `state` attribute of the `Flow` class. In unstructured state management, all state is stored in the `state` attribute of the `Flow` class.
This approach offers flexibility, enabling developers to add or modify state attributes on the fly without defining a strict schema. This approach offers flexibility, enabling developers to add or modify state attributes on the fly without defining a strict schema.
```python ```python Code
import asyncio
from crewai.flow.flow import Flow, listen, start from crewai.flow.flow import Flow, listen, start
class UnstructuredExampleFlow(Flow): class UntructuredExampleFlow(Flow):
@start() @start()
def first_method(self): def first_method(self):
@@ -232,8 +238,13 @@ class UnstructuredExampleFlow(Flow):
print(f"State after third_method: {self.state}") print(f"State after third_method: {self.state}")
flow = UnstructuredExampleFlow()
flow.kickoff() async def main():
flow = UntructuredExampleFlow()
await flow.kickoff()
asyncio.run(main())
``` ```
**Key Points:** **Key Points:**
@@ -246,14 +257,18 @@ flow.kickoff()
Structured state management leverages predefined schemas to ensure consistency and type safety across the workflow. Structured state management leverages predefined schemas to ensure consistency and type safety across the workflow.
By using models like Pydantic's `BaseModel`, developers can define the exact shape of the state, enabling better validation and auto-completion in development environments. By using models like Pydantic's `BaseModel`, developers can define the exact shape of the state, enabling better validation and auto-completion in development environments.
```python ```python Code
import asyncio
from crewai.flow.flow import Flow, listen, start from crewai.flow.flow import Flow, listen, start
from pydantic import BaseModel from pydantic import BaseModel
class ExampleState(BaseModel): class ExampleState(BaseModel):
counter: int = 0 counter: int = 0
message: str = "" message: str = ""
class StructuredExampleFlow(Flow[ExampleState]): class StructuredExampleFlow(Flow[ExampleState]):
@start() @start()
@@ -272,8 +287,13 @@ class StructuredExampleFlow(Flow[ExampleState]):
print(f"State after third_method: {self.state}") print(f"State after third_method: {self.state}")
flow = StructuredExampleFlow()
flow.kickoff() async def main():
flow = StructuredExampleFlow()
await flow.kickoff()
asyncio.run(main())
``` ```
**Key Points:** **Key Points:**
@@ -305,7 +325,8 @@ The `or_` function in Flows allows you to listen to multiple methods and trigger
<CodeGroup> <CodeGroup>
```python ```python Code
import asyncio
from crewai.flow.flow import Flow, listen, or_, start from crewai.flow.flow import Flow, listen, or_, start
class OrExampleFlow(Flow): class OrExampleFlow(Flow):
@@ -322,11 +343,16 @@ class OrExampleFlow(Flow):
def logger(self, result): def logger(self, result):
print(f"Logger: {result}") print(f"Logger: {result}")
flow = OrExampleFlow()
flow.kickoff() async def main():
flow = OrExampleFlow()
await flow.kickoff()
asyncio.run(main())
``` ```
```text ``` text Output
Logger: Hello from the start method Logger: Hello from the start method
Logger: Hello from the second method Logger: Hello from the second method
``` ```
@@ -342,7 +368,8 @@ The `and_` function in Flows allows you to listen to multiple methods and trigge
<CodeGroup> <CodeGroup>
```python ```python Code
import asyncio
from crewai.flow.flow import Flow, and_, listen, start from crewai.flow.flow import Flow, and_, listen, start
class AndExampleFlow(Flow): class AndExampleFlow(Flow):
@@ -360,11 +387,16 @@ class AndExampleFlow(Flow):
print("---- Logger ----") print("---- Logger ----")
print(self.state) print(self.state)
flow = AndExampleFlow()
flow.kickoff() async def main():
flow = AndExampleFlow()
await flow.kickoff()
asyncio.run(main())
``` ```
```text ``` text Output
---- Logger ---- ---- Logger ----
{'greeting': 'Hello from the start method', 'joke': 'What do computers eat? Microchips.'} {'greeting': 'Hello from the start method', 'joke': 'What do computers eat? Microchips.'}
``` ```
@@ -381,7 +413,8 @@ You can specify different routes based on the output of the method, allowing you
<CodeGroup> <CodeGroup>
```python ```python Code
import asyncio
import random import random
from crewai.flow.flow import Flow, listen, router, start from crewai.flow.flow import Flow, listen, router, start
from pydantic import BaseModel from pydantic import BaseModel
@@ -412,11 +445,16 @@ class RouterFlow(Flow[ExampleState]):
def fourth_method(self): def fourth_method(self):
print("Fourth method running") print("Fourth method running")
flow = RouterFlow()
flow.kickoff() async def main():
flow = RouterFlow()
await flow.kickoff()
asyncio.run(main())
``` ```
```text ``` text Output
Starting the structured flow Starting the structured flow
Third method running Third method running
Fourth method running Fourth method running
@@ -447,21 +485,22 @@ This command will generate a new CrewAI project with the necessary folder struct
After running the `crewai create flow name_of_flow` command, you will see a folder structure similar to the following: After running the `crewai create flow name_of_flow` command, you will see a folder structure similar to the following:
| Directory/File | Description | | Directory/File | Description |
| :--------------------- | :----------------------------------------------------------------- | |:---------------------------------|:------------------------------------------------------------------|
| `name_of_flow/` | Root directory for the flow. | | `name_of_flow/` | Root directory for the flow. |
| ├── `crews/` | Contains directories for specific crews. | | ├── `crews/` | Contains directories for specific crews. |
| │ └── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts. | | │ └── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts.|
| │ ├── `config/` | Configuration files directory for the "poem_crew". | | │ ├── `config/` | Configuration files directory for the "poem_crew". |
| │ ├── `agents.yaml` | YAML file defining the agents for "poem_crew". | | │ ├── `agents.yaml` | YAML file defining the agents for "poem_crew". |
| │ └── `tasks.yaml` | YAML file defining the tasks for "poem_crew". | | │ └── `tasks.yaml` | YAML file defining the tasks for "poem_crew". |
| │ ├── `poem_crew.py` | Script for "poem_crew" functionality. | | │ ├── `poem_crew.py` | Script for "poem_crew" functionality. |
| ├── `tools/` | Directory for additional tools used in the flow. | | ├── `tools/` | Directory for additional tools used in the flow. |
| │ └── `custom_tool.py` | Custom tool implementation. | | │ └── `custom_tool.py` | Custom tool implementation. |
| ├── `main.py` | Main script for running the flow. | | ├── `main.py` | Main script for running the flow. |
| ├── `README.md` | Project description and instructions. | | ├── `README.md` | Project description and instructions. |
| ├── `pyproject.toml` | Configuration file for project dependencies and settings. | | ├── `pyproject.toml` | Configuration file for project dependencies and settings. |
| └── `.gitignore` | Specifies files and directories to ignore in version control. | | └── `.gitignore` | Specifies files and directories to ignore in version control. |
### Building Your Crews ### Building Your Crews
@@ -479,8 +518,9 @@ The `main.py` file is where you create your flow and connect the crews together.
Here's an example of how you can connect the `poem_crew` in the `main.py` file: Here's an example of how you can connect the `poem_crew` in the `main.py` file:
```python ```python Code
#!/usr/bin/env python #!/usr/bin/env python
import asyncio
from random import randint from random import randint
from pydantic import BaseModel from pydantic import BaseModel
@@ -496,12 +536,14 @@ class PoemFlow(Flow[PoemState]):
@start() @start()
def generate_sentence_count(self): def generate_sentence_count(self):
print("Generating sentence count") print("Generating sentence count")
# Generate a number between 1 and 5
self.state.sentence_count = randint(1, 5) self.state.sentence_count = randint(1, 5)
@listen(generate_sentence_count) @listen(generate_sentence_count)
def generate_poem(self): def generate_poem(self):
print("Generating poem") print("Generating poem")
result = PoemCrew().crew().kickoff(inputs={"sentence_count": self.state.sentence_count}) poem_crew = PoemCrew().crew()
result = poem_crew.kickoff(inputs={"sentence_count": self.state.sentence_count})
print("Poem generated", result.raw) print("Poem generated", result.raw)
self.state.poem = result.raw self.state.poem = result.raw
@@ -512,17 +554,18 @@ class PoemFlow(Flow[PoemState]):
with open("poem.txt", "w") as f: with open("poem.txt", "w") as f:
f.write(self.state.poem) f.write(self.state.poem)
def kickoff(): async def run():
"""
Run the flow.
"""
poem_flow = PoemFlow() poem_flow = PoemFlow()
poem_flow.kickoff() await poem_flow.kickoff()
def main():
def plot(): asyncio.run(run())
poem_flow = PoemFlow()
poem_flow.plot()
if __name__ == "__main__": if __name__ == "__main__":
kickoff() main()
``` ```
In this example, the `PoemFlow` class defines a flow that generates a sentence count, uses the `PoemCrew` to generate a poem, and then saves the poem to a file. The flow is kicked off by calling the `kickoff()` method. In this example, the `PoemFlow` class defines a flow that generates a sentence count, uses the `PoemCrew` to generate a poem, and then saves the poem to a file. The flow is kicked off by calling the `kickoff()` method.
@@ -544,53 +587,17 @@ source .venv/bin/activate
After activating the virtual environment, you can run the flow by executing one of the following commands: After activating the virtual environment, you can run the flow by executing one of the following commands:
```bash ```bash
crewai flow kickoff crewai flow run
``` ```
or or
```bash ```bash
uv run kickoff uv run run_flow
``` ```
The flow will execute, and you should see the output in the console. The flow will execute, and you should see the output in the console.
### Adding Additional Crews Using the CLI
Once you have created your initial flow, you can easily add additional crews to your project using the CLI. This allows you to expand your flow's capabilities by integrating new crews without starting from scratch.
To add a new crew to your existing flow, use the following command:
```bash
crewai flow add-crew <crew_name>
```
This command will create a new directory for your crew within the `crews` folder of your flow project. It will include the necessary configuration files and a crew definition file, similar to the initial setup.
#### Folder Structure
After adding a new crew, your folder structure will look like this:
| Directory/File | Description |
| :--------------------- | :----------------------------------------------------------------- |
| `name_of_flow/` | Root directory for the flow. |
| ├── `crews/` | Contains directories for specific crews. |
| │ ├── `poem_crew/` | Directory for the "poem_crew" with its configurations and scripts. |
| │ │ ├── `config/` | Configuration files directory for the "poem_crew". |
| │ │ │ ├── `agents.yaml` | YAML file defining the agents for "poem_crew". |
| │ │ │ └── `tasks.yaml` | YAML file defining the tasks for "poem_crew". |
| │ │ └── `poem_crew.py` | Script for "poem_crew" functionality. |
| └── `name_of_crew/` | Directory for the new crew. |
| ├── `config/` | Configuration files directory for the new crew. |
| │ ├── `agents.yaml` | YAML file defining the agents for the new crew. |
| │ └── `tasks.yaml` | YAML file defining the tasks for the new crew. |
| └── `name_of_crew.py` | Script for the new crew functionality. |
You can then customize the `agents.yaml` and `tasks.yaml` files to define the agents and tasks for your new crew. The `name_of_crew.py` file will contain the crew's logic, which you can modify to suit your needs.
By using the CLI to add additional crews, you can efficiently build complex AI workflows that leverage multiple crews working together.
## Plot Flows ## Plot Flows
Visualizing your AI workflows can provide valuable insights into the structure and execution paths of your flows. CrewAI offers a powerful visualization tool that allows you to generate interactive plots of your flows, making it easier to understand and optimize your AI workflows. Visualizing your AI workflows can provide valuable insights into the structure and execution paths of your flows. CrewAI offers a powerful visualization tool that allows you to generate interactive plots of your flows, making it easier to understand and optimize your AI workflows.
@@ -607,7 +614,7 @@ CrewAI provides two convenient methods to generate plots of your flows:
If you are working directly with a flow instance, you can generate a plot by calling the `plot()` method on your flow object. This method will create an HTML file containing the interactive plot of your flow. If you are working directly with a flow instance, you can generate a plot by calling the `plot()` method on your flow object. This method will create an HTML file containing the interactive plot of your flow.
```python ```python Code
# Assuming you have a flow instance # Assuming you have a flow instance
flow.plot("my_flow_plot") flow.plot("my_flow_plot")
``` ```
@@ -630,114 +637,13 @@ The generated plot will display nodes representing the tasks in your flow, with
By visualizing your flows, you can gain a clearer understanding of the workflow's structure, making it easier to debug, optimize, and communicate your AI processes to others. By visualizing your flows, you can gain a clearer understanding of the workflow's structure, making it easier to debug, optimize, and communicate your AI processes to others.
### Conclusion
## Advanced Plotting your flows is a powerful feature of CrewAI that enhances your ability to design and manage complex AI workflows. Whether you choose to use the `plot()` method or the command line, generating plots will provide you with a visual representation of your workflows, aiding in both development and presentation.
In this section, we explore more complex use cases of CrewAI Flows, starting with a self-evaluation loop. This pattern is crucial for developing AI systems that can iteratively improve their outputs through feedback.
### 1) Self-Evaluation Loop
The self-evaluation loop is a powerful pattern that allows AI workflows to automatically assess and refine their outputs. This example demonstrates how to set up a flow that generates content, evaluates it, and iterates based on feedback until the desired quality is achieved.
#### Overview
The self-evaluation loop involves two main Crews:
1. **ShakespeareanXPostCrew**: Generates a Shakespearean-style post on a given topic.
2. **XPostReviewCrew**: Evaluates the generated post, providing feedback on its validity and quality.
The process iterates until the post meets the criteria or a maximum retry limit is reached. This approach ensures high-quality outputs through iterative refinement.
#### Importance
This pattern is essential for building robust AI systems that can adapt and improve over time. By automating the evaluation and feedback loop, developers can ensure that their AI workflows produce reliable and high-quality results.
#### Main Code Highlights
Below is the `main.py` file for the self-evaluation loop flow:
```python
from typing import Optional
from crewai.flow.flow import Flow, listen, router, start
from pydantic import BaseModel
from self_evaluation_loop_flow.crews.shakespeare_crew.shakespeare_crew import (
ShakespeareanXPostCrew,
)
from self_evaluation_loop_flow.crews.x_post_review_crew.x_post_review_crew import (
XPostReviewCrew,
)
class ShakespeareXPostFlowState(BaseModel):
x_post: str = ""
feedback: Optional[str] = None
valid: bool = False
retry_count: int = 0
class ShakespeareXPostFlow(Flow[ShakespeareXPostFlowState]):
@start("retry")
def generate_shakespeare_x_post(self):
print("Generating Shakespearean X post")
topic = "Flying cars"
result = (
ShakespeareanXPostCrew()
.crew()
.kickoff(inputs={"topic": topic, "feedback": self.state.feedback})
)
print("X post generated", result.raw)
self.state.x_post = result.raw
@router(generate_shakespeare_x_post)
def evaluate_x_post(self):
if self.state.retry_count > 3:
return "max_retry_exceeded"
result = XPostReviewCrew().crew().kickoff(inputs={"x_post": self.state.x_post})
self.state.valid = result["valid"]
self.state.feedback = result["feedback"]
print("valid", self.state.valid)
print("feedback", self.state.feedback)
self.state.retry_count += 1
if self.state.valid:
return "complete"
return "retry"
@listen("complete")
def save_result(self):
print("X post is valid")
print("X post:", self.state.x_post)
with open("x_post.txt", "w") as file:
file.write(self.state.x_post)
@listen("max_retry_exceeded")
def max_retry_exceeded_exit(self):
print("Max retry count exceeded")
print("X post:", self.state.x_post)
print("Feedback:", self.state.feedback)
def kickoff():
shakespeare_flow = ShakespeareXPostFlow()
shakespeare_flow.kickoff()
def plot():
shakespeare_flow = ShakespeareXPostFlow()
shakespeare_flow.plot()
if __name__ == "__main__":
kickoff()
```
#### Code Highlights
- **Retry Mechanism**: The flow uses a retry mechanism to regenerate the post if it doesn't meet the criteria, up to a maximum of three retries.
- **Feedback Loop**: Feedback from the `XPostReviewCrew` is used to refine the post iteratively.
- **State Management**: The flow maintains state using a Pydantic model, ensuring type safety and clarity.
For a complete example and further details, please refer to the [Self Evaluation Loop Flow repository](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow).
## Next Steps ## Next Steps
If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are five specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example: If you're interested in exploring additional examples of flows, we have a variety of recommendations in our examples repository. Here are four specific flow examples, each showcasing unique use cases to help you match your current problem type to a specific example:
1. **Email Auto Responder Flow**: This example demonstrates an infinite loop where a background job continually runs to automate email responses. It's a great use case for tasks that need to be performed repeatedly without manual intervention. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/email_auto_responder_flow) 1. **Email Auto Responder Flow**: This example demonstrates an infinite loop where a background job continually runs to automate email responses. It's a great use case for tasks that need to be performed repeatedly without manual intervention. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/email_auto_responder_flow)
@@ -747,19 +653,4 @@ If you're interested in exploring additional examples of flows, we have a variet
4. **Meeting Assistant Flow**: This flow demonstrates how to broadcast one event to trigger multiple follow-up actions. For instance, after a meeting is completed, the flow can update a Trello board, send a Slack message, and save the results. It's a great example of handling multiple outcomes from a single event, making it ideal for comprehensive task management and notification systems. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/meeting_assistant_flow) 4. **Meeting Assistant Flow**: This flow demonstrates how to broadcast one event to trigger multiple follow-up actions. For instance, after a meeting is completed, the flow can update a Trello board, send a Slack message, and save the results. It's a great example of handling multiple outcomes from a single event, making it ideal for comprehensive task management and notification systems. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/meeting_assistant_flow)
5. **Self Evaluation Loop Flow**: This flow demonstrates a self-evaluation loop where AI workflows automatically assess and refine their outputs through feedback. It involves generating content, evaluating it, and iterating until the desired quality is achieved. This pattern is crucial for developing robust AI systems that can adapt and improve over time. [View Example](https://github.com/crewAIInc/crewAI-examples/tree/main/self_evaluation_loop_flow)
By exploring these examples, you can gain insights into how to leverage CrewAI Flows for various use cases, from automating repetitive tasks to managing complex, multi-step processes with dynamic decision-making and human feedback. By exploring these examples, you can gain insights into how to leverage CrewAI Flows for various use cases, from automating repetitive tasks to managing complex, multi-step processes with dynamic decision-making and human feedback.
Also, check out our YouTube video on how to use flows in CrewAI below!
<iframe
width="560"
height="315"
src="https://www.youtube.com/embed/MTb5my6VOT8"
title="YouTube video player"
frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
referrerpolicy="strict-origin-when-cross-origin"
allowfullscreen
></iframe>

View File

@@ -25,55 +25,50 @@ By default, CrewAI uses the `gpt-4o-mini` model. It uses environment variables i
- `OPENAI_API_BASE` - `OPENAI_API_BASE`
- `OPENAI_API_KEY` - `OPENAI_API_KEY`
### 2. Custom LLM Objects ### 2. String Identifier
```python Code
agent = Agent(llm="gpt-4o", ...)
```
### 3. LLM Instance
List of [more providers](https://docs.litellm.ai/docs/providers).
```python Code
from crewai import LLM
llm = LLM(model="gpt-4", temperature=0.7)
agent = Agent(llm=llm, ...)
```
### 4. Custom LLM Objects
Pass a custom LLM implementation or object from another library. Pass a custom LLM implementation or object from another library.
See below for examples.
<Tabs>
<Tab title="String Identifier">
```python Code
agent = Agent(llm="gpt-4o", ...)
```
</Tab>
<Tab title="LLM Instance">
```python Code
from crewai import LLM
llm = LLM(model="gpt-4", temperature=0.7)
agent = Agent(llm=llm, ...)
```
</Tab>
</Tabs>
## Connecting to OpenAI-Compatible LLMs ## Connecting to OpenAI-Compatible LLMs
You can connect to OpenAI-compatible LLMs using either environment variables or by setting specific attributes on the LLM class: You can connect to OpenAI-compatible LLMs using either environment variables or by setting specific attributes on the LLM class:
<Tabs> 1. Using environment variables:
<Tab title="Using Environment Variables">
```python Code
import os
os.environ["OPENAI_API_KEY"] = "your-api-key" ```python Code
os.environ["OPENAI_API_BASE"] = "https://api.your-provider.com/v1" import os
```
</Tab>
<Tab title="Using LLM Class Attributes">
```python Code
from crewai import LLM
llm = LLM( os.environ["OPENAI_API_KEY"] = "your-api-key"
model="custom-model-name", os.environ["OPENAI_API_BASE"] = "https://api.your-provider.com/v1"
api_key="your-api-key", ```
base_url="https://api.your-provider.com/v1"
) 2. Using LLM class attributes:
agent = Agent(llm=llm, ...)
``` ```python Code
</Tab> llm = LLM(
</Tabs> model="custom-model-name",
api_key="your-api-key",
base_url="https://api.your-provider.com/v1"
)
agent = Agent(llm=llm, ...)
```
## LLM Configuration Options ## LLM Configuration Options
@@ -100,165 +95,43 @@ When configuring an LLM for your agent, you have access to a wide range of param
| **api_key** | `str` | Your API key for authentication. | | **api_key** | `str` | Your API key for authentication. |
These are examples of how to configure LLMs for your agent. Example:
<AccordionGroup> ```python Code
<Accordion title="OpenAI"> llm = LLM(
model="gpt-4",
temperature=0.8,
max_tokens=150,
top_p=0.9,
frequency_penalty=0.1,
presence_penalty=0.1,
stop=["END"],
seed=42,
base_url="https://api.openai.com/v1",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
```
## Using Ollama (Local LLMs)
```python Code crewAI supports using Ollama for running open-source models locally:
from crewai import LLM
llm = LLM( 1. Install Ollama: [ollama.ai](https://ollama.ai/)
model="gpt-4", 2. Run a model: `ollama run llama2`
temperature=0.8, 3. Configure agent:
max_tokens=150,
top_p=0.9,
frequency_penalty=0.1,
presence_penalty=0.1,
stop=["END"],
seed=42,
base_url="https://api.openai.com/v1",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
```
</Accordion>
<Accordion title="Cerebras"> ```python Code
agent = Agent(
```python Code llm=LLM(model="ollama/llama3.1", base_url="http://localhost:11434"),
from crewai import LLM ...
)
llm = LLM( ```
model="cerebras/llama-3.1-70b",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
```
</Accordion>
<Accordion title="Ollama (Local LLMs)">
CrewAI supports using Ollama for running open-source models locally:
1. Install Ollama: [ollama.ai](https://ollama.ai/)
2. Run a model: `ollama run llama2`
3. Configure agent:
```python Code
from crewai import LLM
agent = Agent(
llm=LLM(
model="ollama/llama3.1",
base_url="http://localhost:11434"
),
...
)
```
</Accordion>
<Accordion title="Groq">
```python Code
from crewai import LLM
llm = LLM(
model="groq/llama3-8b-8192",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
```
</Accordion>
<Accordion title="Anthropic">
```python Code
from crewai import LLM
llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
```
</Accordion>
<Accordion title="Fireworks AI">
```python Code
from crewai import LLM
llm = LLM(
model="fireworks_ai/accounts/fireworks/models/llama-v3-70b-instruct",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
```
</Accordion>
<Accordion title="Gemini">
```python Code
from crewai import LLM
llm = LLM(
model="gemini/gemini-1.5-pro-002",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
```
</Accordion>
<Accordion title="Perplexity AI (pplx-api)">
```python Code
from crewai import LLM
llm = LLM(
model="perplexity/mistral-7b-instruct",
base_url="https://api.perplexity.ai/v1",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
```
</Accordion>
<Accordion title="IBM watsonx.ai">
```python Code
from crewai import LLM
llm = LLM(
model="watsonx/ibm/granite-13b-chat-v2",
base_url="https://api.watsonx.ai/v1",
api_key="your-api-key-here"
)
agent = Agent(llm=llm, ...)
```
</Accordion>
<Accordion title="Hugging Face">
```python Code
from crewai import LLM
llm = LLM(
model="huggingface/meta-llama/Meta-Llama-3.1-8B-Instruct",
api_key="your-api-key-here",
base_url="your_api_endpoint"
)
agent = Agent(llm=llm, ...)
```
</Accordion>
</AccordionGroup>
## Changing the Base API URL ## Changing the Base API URL
You can change the base API URL for any LLM provider by setting the `base_url` parameter: You can change the base API URL for any LLM provider by setting the `base_url` parameter:
```python Code ```python Code
from crewai import LLM
llm = LLM( llm = LLM(
model="custom-model-name", model="custom-model-name",
base_url="https://api.your-provider.com/v1", base_url="https://api.your-provider.com/v1",

View File

@@ -34,7 +34,7 @@ By default, the memory system is disabled, and you can ensure it is active by se
The memory will use OpenAI embeddings by default, but you can change it by setting `embedder` to a different model. The memory will use OpenAI embeddings by default, but you can change it by setting `embedder` to a different model.
It's also possible to initialize the memory instance with your own instance. It's also possible to initialize the memory instance with your own instance.
The 'embedder' only applies to **Short-Term Memory** which uses Chroma for RAG. The 'embedder' only applies to **Short-Term Memory** which uses Chroma for RAG using the EmbedChain package.
The **Long-Term Memory** uses SQLite3 to store task results. Currently, there is no way to override these storage implementations. The **Long-Term Memory** uses SQLite3 to store task results. Currently, there is no way to override these storage implementations.
The data storage files are saved into a platform-specific location found using the appdirs package, The data storage files are saved into a platform-specific location found using the appdirs package,
and the name of the project can be overridden using the **CREWAI_STORAGE_DIR** environment variable. and the name of the project can be overridden using the **CREWAI_STORAGE_DIR** environment variable.
@@ -113,42 +113,6 @@ my_crew = Crew(
} }
) )
``` ```
Alternatively, you can directly pass the OpenAIEmbeddingFunction to the embedder parameter.
Example:
```python Code
from crewai import Crew, Agent, Task, Process
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
my_crew = Crew(
agents=[...],
tasks=[...],
process=Process.sequential,
memory=True,
verbose=True,
embedder=OpenAIEmbeddingFunction(api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"),
)
```
### Using Ollama embeddings
```python Code
from crewai import Crew, Agent, Task, Process
my_crew = Crew(
agents=[...],
tasks=[...],
process=Process.sequential,
memory=True,
verbose=True,
embedder={
"provider": "ollama",
"config": {
"model": "mxbai-embed-large"
}
}
)
```
### Using Google AI embeddings ### Using Google AI embeddings
@@ -164,8 +128,9 @@ my_crew = Crew(
embedder={ embedder={
"provider": "google", "provider": "google",
"config": { "config": {
"api_key": "<YOUR_API_KEY>", "model": 'models/embedding-001',
"model_name": "<model_name>" "task_type": "retrieval_document",
"title": "Embeddings for Embedchain"
} }
} }
) )
@@ -174,7 +139,6 @@ my_crew = Crew(
### Using Azure OpenAI embeddings ### Using Azure OpenAI embeddings
```python Code ```python Code
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from crewai import Crew, Agent, Task, Process from crewai import Crew, Agent, Task, Process
my_crew = Crew( my_crew = Crew(
@@ -183,20 +147,36 @@ my_crew = Crew(
process=Process.sequential, process=Process.sequential,
memory=True, memory=True,
verbose=True, verbose=True,
embedder=OpenAIEmbeddingFunction( embedder={
api_key="YOUR_API_KEY", "provider": "azure_openai",
api_base="YOUR_API_BASE_PATH", "config": {
api_type="azure", "model": 'text-embedding-ada-002',
api_version="YOUR_API_VERSION", "deployment_name": "your_embedding_model_deployment_name"
model_name="text-embedding-3-small" }
) }
)
```
### Using GPT4ALL embeddings
```python Code
from crewai import Crew, Agent, Task, Process
my_crew = Crew(
agents=[...],
tasks=[...],
process=Process.sequential,
memory=True,
verbose=True,
embedder={
"provider": "gpt4all"
}
) )
``` ```
### Using Vertex AI embeddings ### Using Vertex AI embeddings
```python Code ```python Code
from chromadb.utils.embedding_functions import GoogleVertexEmbeddingFunction
from crewai import Crew, Agent, Task, Process from crewai import Crew, Agent, Task, Process
my_crew = Crew( my_crew = Crew(
@@ -205,12 +185,12 @@ my_crew = Crew(
process=Process.sequential, process=Process.sequential,
memory=True, memory=True,
verbose=True, verbose=True,
embedder=GoogleVertexEmbeddingFunction( embedder={
project_id="YOUR_PROJECT_ID", "provider": "vertexai",
region="YOUR_REGION", "config": {
api_key="YOUR_API_KEY", "model": 'textembedding-gecko'
model_name="textembedding-gecko" }
) }
) )
``` ```
@@ -228,52 +208,8 @@ my_crew = Crew(
embedder={ embedder={
"provider": "cohere", "provider": "cohere",
"config": { "config": {
"api_key": "YOUR_API_KEY", "model": "embed-english-v3.0",
"model_name": "<model_name>" "vector_dimension": 1024
}
}
)
```
### Using HuggingFace embeddings
```python Code
from crewai import Crew, Agent, Task, Process
my_crew = Crew(
agents=[...],
tasks=[...],
process=Process.sequential,
memory=True,
verbose=True,
embedder={
"provider": "huggingface",
"config": {
"api_url": "<api_url>",
}
}
)
```
### Using Watson embeddings
```python Code
from crewai import Crew, Agent, Task, Process
# Note: Ensure you have installed and imported `ibm_watsonx_ai` for Watson embeddings to work.
my_crew = Crew(
agents=[...],
tasks=[...],
process=Process.sequential,
memory=True,
verbose=True,
embedder={
"provider": "watson",
"config": {
"model": "<model_name>",
"api_url": "<api_url>",
"api_key": "<YOUR_API_KEY>",
"project_id": "<YOUR_PROJECT_ID>",
} }
} }
) )

277
docs/concepts/pipeline.mdx Normal file
View File

@@ -0,0 +1,277 @@
---
title: Pipelines
description: Understanding and utilizing pipelines in the crewAI framework for efficient multi-stage task processing.
icon: timeline-arrow
---
## What is a Pipeline?
A pipeline in CrewAI represents a structured workflow that allows for the sequential or parallel execution of multiple crews. It provides a way to organize complex processes involving multiple stages, where the output of one stage can serve as input for subsequent stages.
## Key Terminology
Understanding the following terms is crucial for working effectively with pipelines:
- **Stage**: A distinct part of the pipeline, which can be either sequential (a single crew) or parallel (multiple crews executing concurrently).
- **Kickoff**: A specific execution of the pipeline for a given set of inputs, representing a single instance of processing through the pipeline.
- **Branch**: Parallel executions within a stage (e.g., concurrent crew operations).
- **Trace**: The journey of an individual input through the entire pipeline, capturing the path and transformations it undergoes.
Example pipeline structure:
```bash Pipeline
crew1 >> [crew2, crew3] >> crew4
```
This represents a pipeline with three stages:
1. A sequential stage (crew1)
2. A parallel stage with two branches (crew2 and crew3 executing concurrently)
3. Another sequential stage (crew4)
Each input creates its own kickoff, flowing through all stages of the pipeline. Multiple kickoffs can be processed concurrently, each following the defined pipeline structure.
## Pipeline Attributes
| Attribute | Parameters | Description |
| :--------- | :---------- | :----------------------------------------------------------------------------------------------------------------- |
| **Stages** | `stages` | A list of `PipelineStage` (crews, lists of crews, or routers) representing the stages to be executed in sequence. |
## Creating a Pipeline
When creating a pipeline, you define a series of stages, each consisting of either a single crew or a list of crews for parallel execution.
The pipeline ensures that each stage is executed in order, with the output of one stage feeding into the next.
### Example: Assembling a Pipeline
```python
from crewai import Crew, Process, Pipeline
# Define your crews
research_crew = Crew(
agents=[researcher],
tasks=[research_task],
process=Process.sequential
)
analysis_crew = Crew(
agents=[analyst],
tasks=[analysis_task],
process=Process.sequential
)
writing_crew = Crew(
agents=[writer],
tasks=[writing_task],
process=Process.sequential
)
# Assemble the pipeline
my_pipeline = Pipeline(
stages=[research_crew, analysis_crew, writing_crew]
)
```
## Pipeline Methods
| Method | Description |
| :--------------- | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| **kickoff** | Executes the pipeline, processing all stages and returning the results. This method initiates one or more kickoffs through the pipeline, handling the flow of data between stages. |
| **process_runs** | Runs the pipeline for each input provided, handling the flow and transformation of data between stages. |
## Pipeline Output
The output of a pipeline in the CrewAI framework is encapsulated within the `PipelineKickoffResult` class.
This class provides a structured way to access the results of the pipeline's execution, including various formats such as raw strings, JSON, and Pydantic models.
### Pipeline Output Attributes
| Attribute | Parameters | Type | Description |
| :-------------- | :------------ | :------------------------ | :-------------------------------------------------------------------------------------------------------- |
| **ID** | `id` | `UUID4` | A unique identifier for the pipeline output. |
| **Run Results** | `run_results` | `List[PipelineRunResult]` | A list of `PipelineRunResult` objects, each representing the output of a single run through the pipeline. |
### Pipeline Output Methods
| Method/Property | Description |
| :----------------- | :----------------------------------------------------- |
| **add_run_result** | Adds a `PipelineRunResult` to the list of run results. |
### Pipeline Run Result Attributes
| Attribute | Parameters | Type | Description |
| :---------------- | :-------------- | :------------------------- | :-------------------------------------------------------------------------------------------- |
| **ID** | `id` | `UUID4` | A unique identifier for the run result. |
| **Raw** | `raw` | `str` | The raw output of the final stage in the pipeline kickoff. |
| **Pydantic** | `pydantic` | `Any` | A Pydantic model object representing the structured output of the final stage, if applicable. |
| **JSON Dict** | `json_dict` | `Union[Dict[str, Any], None]` | A dictionary representing the JSON output of the final stage, if applicable. |
| **Token Usage** | `token_usage` | `Dict[str, UsageMetrics]` | A summary of token usage across all stages of the pipeline kickoff. |
| **Trace** | `trace` | `List[Any]` | A trace of the journey of inputs through the pipeline kickoff. |
| **Crews Outputs** | `crews_outputs` | `List[CrewOutput]` | A list of `CrewOutput` objects, representing the outputs from each crew in the pipeline kickoff. |
### Pipeline Run Result Methods and Properties
| Method/Property | Description |
| :-------------- | :------------------------------------------------------------------------------------------------------- |
| **json** | Returns the JSON string representation of the run result if the output format of the final task is JSON. |
| **to_dict** | Converts the JSON and Pydantic outputs to a dictionary. |
| **str** | Returns the string representation of the run result, prioritizing Pydantic, then JSON, then raw. |
### Accessing Pipeline Outputs
Once a pipeline has been executed, its output can be accessed through the `PipelineOutput` object returned by the `process_runs` method.
The `PipelineOutput` class provides access to individual `PipelineRunResult` objects, each representing a single run through the pipeline.
#### Example
```python
# Define input data for the pipeline
input_data = [
{"initial_query": "Latest advancements in AI"},
{"initial_query": "Future of robotics"}
]
# Execute the pipeline
pipeline_output = await my_pipeline.process_runs(input_data)
# Access the results
for run_result in pipeline_output.run_results:
print(f"Run ID: {run_result.id}")
print(f"Final Raw Output: {run_result.raw}")
if run_result.json_dict:
print(f"JSON Output: {json.dumps(run_result.json_dict, indent=2)}")
if run_result.pydantic:
print(f"Pydantic Output: {run_result.pydantic}")
print(f"Token Usage: {run_result.token_usage}")
print(f"Trace: {run_result.trace}")
print("Crew Outputs:")
for crew_output in run_result.crews_outputs:
print(f" Crew: {crew_output.raw}")
print("\n")
```
This example demonstrates how to access and work with the pipeline output, including individual run results and their associated data.
## Using Pipelines
Pipelines are particularly useful for complex workflows that involve multiple stages of processing, analysis, or content generation. They allow you to:
1. **Sequence Operations**: Execute crews in a specific order, ensuring that the output of one crew is available as input to the next.
2. **Parallel Processing**: Run multiple crews concurrently within a stage for increased efficiency.
3. **Manage Complex Workflows**: Break down large tasks into smaller, manageable steps executed by specialized crews.
### Example: Running a Pipeline
```python
# Define input data for the pipeline
input_data = [{"initial_query": "Latest advancements in AI"}]
# Execute the pipeline, initiating a run for each input
results = await my_pipeline.process_runs(input_data)
# Access the results
for result in results:
print(f"Final Output: {result.raw}")
print(f"Token Usage: {result.token_usage}")
print(f"Trace: {result.trace}") # Shows the path of the input through all stages
```
## Advanced Features
### Parallel Execution within Stages
You can define parallel execution within a stage by providing a list of crews, creating multiple branches:
```python
parallel_analysis_crew = Crew(agents=[financial_analyst], tasks=[financial_analysis_task])
market_analysis_crew = Crew(agents=[market_analyst], tasks=[market_analysis_task])
my_pipeline = Pipeline(
stages=[
research_crew,
[parallel_analysis_crew, market_analysis_crew], # Parallel execution (branching)
writing_crew
]
)
```
### Routers in Pipelines
Routers are a powerful feature in crewAI pipelines that allow for dynamic decision-making and branching within your workflow.
They enable you to direct the flow of execution based on specific conditions or criteria, making your pipelines more flexible and adaptive.
#### What is a Router?
A router in crewAI is a special component that can be included as a stage in your pipeline. It evaluates the input data and determines which path the execution should take next.
This allows for conditional branching in your pipeline, where different crews or sub-pipelines can be executed based on the router's decision.
#### Key Components of a Router
1. **Routes**: A dictionary of named routes, each associated with a condition and a pipeline to execute if the condition is met.
2. **Default Route**: A fallback pipeline that is executed if none of the defined route conditions are met.
#### Creating a Router
Here's an example of how to create a router:
```python
from crewai import Router, Route, Pipeline, Crew, Agent, Task
# Define your agents
classifier = Agent(name="Classifier", role="Email Classifier")
urgent_handler = Agent(name="Urgent Handler", role="Urgent Email Processor")
normal_handler = Agent(name="Normal Handler", role="Normal Email Processor")
# Define your tasks
classify_task = Task(description="Classify the email based on its content and metadata.")
urgent_task = Task(description="Process and respond to urgent email quickly.")
normal_task = Task(description="Process and respond to normal email thoroughly.")
# Define your crews
classification_crew = Crew(agents=[classifier], tasks=[classify_task]) # classify email between high and low urgency 1-10
urgent_crew = Crew(agents=[urgent_handler], tasks=[urgent_task])
normal_crew = Crew(agents=[normal_handler], tasks=[normal_task])
# Create pipelines for different urgency levels
urgent_pipeline = Pipeline(stages=[urgent_crew])
normal_pipeline = Pipeline(stages=[normal_crew])
# Create a router
email_router = Router(
routes={
"high_urgency": Route(
condition=lambda x: x.get("urgency_score", 0) > 7,
pipeline=urgent_pipeline
),
"low_urgency": Route(
condition=lambda x: x.get("urgency_score", 0) <= 7,
pipeline=normal_pipeline
)
},
default=Pipeline(stages=[normal_pipeline]) # Default to just normal if no urgency score
)
# Use the router in a main pipeline
main_pipeline = Pipeline(stages=[classification_crew, email_router])
inputs = [{"email": "..."}, {"email": "..."}] # List of email data
main_pipeline.kickoff(inputs=inputs)
```
In this example, the router decides between an urgent pipeline and a normal pipeline based on the urgency score of the email. If the urgency score is greater than 7,
it routes to the urgent pipeline; otherwise, it uses the normal pipeline. If the input doesn't include an urgency score, it defaults to just the classification crew.
#### Benefits of Using Routers
1. **Dynamic Workflow**: Adapt your pipeline's behavior based on input characteristics or intermediate results.
2. **Efficiency**: Route urgent tasks to quicker processes, reserving more thorough pipelines for less time-sensitive inputs.
3. **Flexibility**: Easily modify or extend your pipeline's logic without changing the core structure.
4. **Scalability**: Handle a wide range of email types and urgency levels with a single pipeline structure.
### Error Handling and Validation
The `Pipeline` class includes validation mechanisms to ensure the robustness of the pipeline structure:
- Validates that stages contain only Crew instances or lists of Crew instances.
- Prevents double nesting of stages to maintain a clear structure.

View File

@@ -5,7 +5,6 @@ icon: screwdriver-wrench
--- ---
## Introduction ## Introduction
CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers. CrewAI tools empower agents with capabilities ranging from web searching and data analysis to collaboration and delegating tasks among coworkers.
This documentation outlines how to create, integrate, and leverage these tools within the CrewAI framework, including a new focus on collaboration tools. This documentation outlines how to create, integrate, and leverage these tools within the CrewAI framework, including a new focus on collaboration tools.
@@ -104,53 +103,57 @@ crew.kickoff()
Here is a list of the available tools and their descriptions: Here is a list of the available tools and their descriptions:
| Tool | Description | | Tool | Description |
| :------------------------------- | :--------------------------------------------------------------------------------------------- | | :-------------------------- | :-------------------------------------------------------------------------------------------- |
| **BrowserbaseLoadTool** | A tool for interacting with and extracting data from web browsers. | | **BrowserbaseLoadTool** | A tool for interacting with and extracting data from web browsers. |
| **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents. | | **CodeDocsSearchTool** | A RAG tool optimized for searching through code documentation and related technical documents. |
| **CodeInterpreterTool** | A tool for interpreting python code. | | **CodeInterpreterTool** | A tool for interpreting python code. |
| **ComposioTool** | Enables use of Composio tools. | | **ComposioTool** | Enables use of Composio tools. |
| **CSVSearchTool** | A RAG tool designed for searching within CSV files, tailored to handle structured data. | | **CSVSearchTool** | A RAG tool designed for searching within CSV files, tailored to handle structured data. |
| **DALL-E Tool** | A tool for generating images using the DALL-E API. | | **DALL-E Tool** | A tool for generating images using the DALL-E API. |
| **DirectorySearchTool** | A RAG tool for searching within directories, useful for navigating through file systems. | | **DirectorySearchTool** | A RAG tool for searching within directories, useful for navigating through file systems. |
| **DOCXSearchTool** | A RAG tool aimed at searching within DOCX documents, ideal for processing Word files. | | **DOCXSearchTool** | A RAG tool aimed at searching within DOCX documents, ideal for processing Word files. |
| **DirectoryReadTool** | Facilitates reading and processing of directory structures and their contents. | | **DirectoryReadTool** | Facilitates reading and processing of directory structures and their contents. |
| **EXASearchTool** | A tool designed for performing exhaustive searches across various data sources. | | **EXASearchTool** | A tool designed for performing exhaustive searches across various data sources. |
| **FileReadTool** | Enables reading and extracting data from files, supporting various file formats. | | **FileReadTool** | Enables reading and extracting data from files, supporting various file formats. |
| **FirecrawlSearchTool** | A tool to search webpages using Firecrawl and return the results. | | **FirecrawlSearchTool** | A tool to search webpages using Firecrawl and return the results. |
| **FirecrawlCrawlWebsiteTool** | A tool for crawling webpages using Firecrawl. | | **FirecrawlCrawlWebsiteTool** | A tool for crawling webpages using Firecrawl. |
| **FirecrawlScrapeWebsiteTool** | A tool for scraping webpages URL using Firecrawl and returning its contents. | | **FirecrawlScrapeWebsiteTool** | A tool for scraping webpages URL using Firecrawl and returning its contents. |
| **GithubSearchTool** | A RAG tool for searching within GitHub repositories, useful for code and documentation search. | | **GithubSearchTool** | A RAG tool for searching within GitHub repositories, useful for code and documentation search.|
| **SerperDevTool** | A specialized tool for development purposes, with specific functionalities under development. | | **SerperDevTool** | A specialized tool for development purposes, with specific functionalities under development. |
| **TXTSearchTool** | A RAG tool focused on searching within text (.txt) files, suitable for unstructured data. | | **TXTSearchTool** | A RAG tool focused on searching within text (.txt) files, suitable for unstructured data. |
| **JSONSearchTool** | A RAG tool designed for searching within JSON files, catering to structured data handling. | | **JSONSearchTool** | A RAG tool designed for searching within JSON files, catering to structured data handling. |
| **LlamaIndexTool** | Enables the use of LlamaIndex tools. | | **LlamaIndexTool** | Enables the use of LlamaIndex tools. |
| **MDXSearchTool** | A RAG tool tailored for searching within Markdown (MDX) files, useful for documentation. | | **MDXSearchTool** | A RAG tool tailored for searching within Markdown (MDX) files, useful for documentation. |
| **PDFSearchTool** | A RAG tool aimed at searching within PDF documents, ideal for processing scanned documents. | | **PDFSearchTool** | A RAG tool aimed at searching within PDF documents, ideal for processing scanned documents. |
| **PGSearchTool** | A RAG tool optimized for searching within PostgreSQL databases, suitable for database queries. | | **PGSearchTool** | A RAG tool optimized for searching within PostgreSQL databases, suitable for database queries. |
| **Vision Tool** | A tool for generating images using the DALL-E API. | | **Vision Tool** | A tool for generating images using the DALL-E API. |
| **RagTool** | A general-purpose RAG tool capable of handling various data sources and types. | | **RagTool** | A general-purpose RAG tool capable of handling various data sources and types. |
| **ScrapeElementFromWebsiteTool** | Enables scraping specific elements from websites, useful for targeted data extraction. | | **ScrapeElementFromWebsiteTool** | Enables scraping specific elements from websites, useful for targeted data extraction. |
| **ScrapeWebsiteTool** | Facilitates scraping entire websites, ideal for comprehensive data collection. | | **ScrapeWebsiteTool** | Facilitates scraping entire websites, ideal for comprehensive data collection. |
| **WebsiteSearchTool** | A RAG tool for searching website content, optimized for web data extraction. | | **WebsiteSearchTool** | A RAG tool for searching website content, optimized for web data extraction. |
| **XMLSearchTool** | A RAG tool designed for searching within XML files, suitable for structured data formats. | | **XMLSearchTool** | A RAG tool designed for searching within XML files, suitable for structured data formats. |
| **YoutubeChannelSearchTool** | A RAG tool for searching within YouTube channels, useful for video content analysis. | | **YoutubeChannelSearchTool**| A RAG tool for searching within YouTube channels, useful for video content analysis. |
| **YoutubeVideoSearchTool** | A RAG tool aimed at searching within YouTube videos, ideal for video data extraction. | | **YoutubeVideoSearchTool** | A RAG tool aimed at searching within YouTube videos, ideal for video data extraction. |
## Creating your own Tools ## Creating your own Tools
<Tip> <Tip>
Developers can craft `custom tools` tailored for their agents needs or Developers can craft `custom tools` tailored for their agents needs or utilize pre-built options.
utilize pre-built options.
</Tip> </Tip>
There are two main ways for one to create a CrewAI tool: To create your own CrewAI tools you will need to install our extra tools package:
```bash
pip install 'crewai[tools]'
```
Once you do that there are two main ways for one to create a CrewAI tool:
### Subclassing `BaseTool` ### Subclassing `BaseTool`
```python Code ```python Code
from crewai.tools import BaseTool from crewai_tools import BaseTool
class MyCustomTool(BaseTool): class MyCustomTool(BaseTool):
name: str = "Name of my tool" name: str = "Name of my tool"
@@ -164,7 +167,7 @@ class MyCustomTool(BaseTool):
### Utilizing the `tool` Decorator ### Utilizing the `tool` Decorator
```python Code ```python Code
from crewai.tools import tool from crewai_tools import tool
@tool("Name of my tool") @tool("Name of my tool")
def my_tool(question: str) -> str: def my_tool(question: str) -> str:
"""Clear description for what this tool is useful for, your agent will need this information to use it.""" """Clear description for what this tool is useful for, your agent will need this information to use it."""
@@ -175,13 +178,11 @@ def my_tool(question: str) -> str:
### Custom Caching Mechanism ### Custom Caching Mechanism
<Tip> <Tip>
Tools can optionally implement a `cache_function` to fine-tune caching Tools can optionally implement a `cache_function` to fine-tune caching behavior. This function determines when to cache results based on specific conditions, offering granular control over caching logic.
behavior. This function determines when to cache results based on specific
conditions, offering granular control over caching logic.
</Tip> </Tip>
```python Code ```python Code
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def multiplication_tool(first_number: int, second_number: int) -> str: def multiplication_tool(first_number: int, second_number: int) -> str:

View File

@@ -0,0 +1,163 @@
# Creating a CrewAI Pipeline Project
Welcome to the comprehensive guide for creating a new CrewAI pipeline project. This document will walk you through the steps to create, customize, and run your CrewAI pipeline project, ensuring you have everything you need to get started.
To learn more about CrewAI pipelines, visit the [CrewAI documentation](https://docs.crewai.com/core-concepts/Pipeline/).
## Prerequisites
Before getting started with CrewAI pipelines, make sure that you have installed CrewAI via pip:
```shell
$ pip install crewai crewai-tools
```
The same prerequisites for virtual environments and Code IDEs apply as in regular CrewAI projects.
## Creating a New Pipeline Project
To create a new CrewAI pipeline project, you have two options:
1. For a basic pipeline template:
```shell
$ crewai create pipeline <project_name>
```
2. For a pipeline example that includes a router:
```shell
$ crewai create pipeline --router <project_name>
```
These commands will create a new project folder with the following structure:
```
<project_name>/
├── README.md
├── uv.lock
├── pyproject.toml
├── src/
│ └── <project_name>/
│ ├── __init__.py
│ ├── main.py
│ ├── crews/
│ │ ├── crew1/
│ │ │ ├── crew1.py
│ │ │ └── config/
│ │ │ ├── agents.yaml
│ │ │ └── tasks.yaml
│ │ ├── crew2/
│ │ │ ├── crew2.py
│ │ │ └── config/
│ │ │ ├── agents.yaml
│ │ │ └── tasks.yaml
│ ├── pipelines/
│ │ ├── __init__.py
│ │ ├── pipeline1.py
│ │ └── pipeline2.py
│ └── tools/
│ ├── __init__.py
│ └── custom_tool.py
└── tests/
```
## Customizing Your Pipeline Project
To customize your pipeline project, you can:
1. Modify the crew files in `src/<project_name>/crews/` to define your agents and tasks for each crew.
2. Modify the pipeline files in `src/<project_name>/pipelines/` to define your pipeline structure.
3. Modify `src/<project_name>/main.py` to set up and run your pipelines.
4. Add your environment variables into the `.env` file.
## Example 1: Defining a Two-Stage Sequential Pipeline
Here's an example of how to define a pipeline with sequential stages in `src/<project_name>/pipelines/pipeline.py`:
```python
from crewai import Pipeline
from crewai.project import PipelineBase
from ..crews.research_crew.research_crew import ResearchCrew
from ..crews.write_x_crew.write_x_crew import WriteXCrew
@PipelineBase
class SequentialPipeline:
def __init__(self):
# Initialize crews
self.research_crew = ResearchCrew().crew()
self.write_x_crew = WriteXCrew().crew()
def create_pipeline(self):
return Pipeline(
stages=[
self.research_crew,
self.write_x_crew
]
)
async def kickoff(self, inputs):
pipeline = self.create_pipeline()
results = await pipeline.kickoff(inputs)
return results
```
## Example 2: Defining a Two-Stage Pipeline with Parallel Execution
```python
from crewai import Pipeline
from crewai.project import PipelineBase
from ..crews.research_crew.research_crew import ResearchCrew
from ..crews.write_x_crew.write_x_crew import WriteXCrew
from ..crews.write_linkedin_crew.write_linkedin_crew import WriteLinkedInCrew
@PipelineBase
class ParallelExecutionPipeline:
def __init__(self):
# Initialize crews
self.research_crew = ResearchCrew().crew()
self.write_x_crew = WriteXCrew().crew()
self.write_linkedin_crew = WriteLinkedInCrew().crew()
def create_pipeline(self):
return Pipeline(
stages=[
self.research_crew,
[self.write_x_crew, self.write_linkedin_crew] # Parallel execution
]
)
async def kickoff(self, inputs):
pipeline = self.create_pipeline()
results = await pipeline.kickoff(inputs)
return results
```
### Annotations
The main annotation you'll use for pipelines is `@PipelineBase`. This annotation is used to decorate your pipeline classes, similar to how `@CrewBase` is used for crews.
## Installing Dependencies
To install the dependencies for your project, use `uv` the install command is optional because when running `crewai run`, it will automatically install the dependencies for you:
```shell
$ cd <project_name>
$ crewai install (optional)
```
## Running Your Pipeline Project
To run your pipeline project, use the following command:
```shell
$ crewai run
```
This will initialize your pipeline and begin task execution as defined in your `main.py` file.
## Deploying Your Pipeline Project
Pipelines can be deployed in the same way as regular CrewAI projects. The easiest way is through [CrewAI+](https://www.crewai.com/crewaiplus), where you can deploy your pipeline in a few clicks.
Remember, when working with pipelines, you're orchestrating multiple crews to work together in a sequence or parallel fashion. This allows for more complex workflows and information processing tasks.

View File

@@ -0,0 +1,236 @@
---
title: Starting a New CrewAI Project - Using Template
description: A comprehensive guide to starting a new CrewAI project, including the latest updates and project setup methods.
---
# Starting Your CrewAI Project
Welcome to the ultimate guide for starting a new CrewAI project. This document will walk you through the steps to create, customize, and run your CrewAI project, ensuring you have everything you need to get started.
Before we start, there are a couple of things to note:
1. CrewAI is a Python package and requires Python >=3.10 and <=3.13 to run.
2. The preferred way of setting up CrewAI is using the `crewai create crew` command. This will create a new project folder and install a skeleton template for you to work on.
## Prerequisites
Before getting started with CrewAI, make sure that you have installed it via pip:
```shell
$ pip install 'crewai[tools]'
```
## Creating a New Project
In this example, we will be using `uv` as our virtual environment manager.
To create a new CrewAI project, run the following CLI command:
```shell
$ crewai create crew <project_name>
```
This command will create a new project folder with the following structure:
```shell
my_project/
├── .gitignore
├── pyproject.toml
├── README.md
└── src/
└── my_project/
├── __init__.py
├── main.py
├── crew.py
├── tools/
│ ├── custom_tool.py
│ └── __init__.py
└── config/
├── agents.yaml
└── tasks.yaml
```
You can now start developing your project by editing the files in the `src/my_project` folder. The `main.py` file is the entry point of your project, and the `crew.py` file is where you define your agents and tasks.
## Customizing Your Project
To customize your project, you can:
- Modify `src/my_project/config/agents.yaml` to define your agents.
- Modify `src/my_project/config/tasks.yaml` to define your tasks.
- Modify `src/my_project/crew.py` to add your own logic, tools, and specific arguments.
- Modify `src/my_project/main.py` to add custom inputs for your agents and tasks.
- Add your environment variables into the `.env` file.
### Example: Defining Agents and Tasks
#### agents.yaml
```yaml
researcher:
role: >
Job Candidate Researcher
goal: >
Find potential candidates for the job
backstory: >
You are adept at finding the right candidates by exploring various online
resources. Your skill in identifying suitable candidates ensures the best
match for job positions.
```
#### tasks.yaml
```yaml
research_candidates_task:
description: >
Conduct thorough research to find potential candidates for the specified job.
Utilize various online resources and databases to gather a comprehensive list of potential candidates.
Ensure that the candidates meet the job requirements provided.
Job Requirements:
{job_requirements}
expected_output: >
A list of 10 potential candidates with their contact information and brief profiles highlighting their suitability.
agent: researcher # THIS NEEDS TO MATCH THE AGENT NAME IN THE AGENTS.YAML FILE AND THE AGENT DEFINED IN THE crew.py FILE
context: # THESE NEED TO MATCH THE TASK NAMES DEFINED ABOVE AND THE TASKS.YAML FILE AND THE TASK DEFINED IN THE crew.py FILE
- researcher
```
### Referencing Variables:
Your defined functions with the same name will be used. For example, you can reference the agent for specific tasks from `tasks.yaml` file. Ensure your annotated agent and function name are the same; otherwise, your task won't recognize the reference properly.
#### Example References
`agents.yaml`
```yaml
email_summarizer:
role: >
Email Summarizer
goal: >
Summarize emails into a concise and clear summary
backstory: >
You will create a 5 bullet point summary of the report
llm: mixtal_llm
```
`tasks.yaml`
```yaml
email_summarizer_task:
description: >
Summarize the email into a 5 bullet point summary
expected_output: >
A 5 bullet point summary of the email
agent: email_summarizer
context:
- reporting_task
- research_task
```
Use the annotations to properly reference the agent and task in the `crew.py` file.
### Annotations include:
* `@agent`
* `@task`
* `@crew`
* `@tool`
* `@callback`
* `@output_json`
* `@output_pydantic`
* `@cache_handler`
`crew.py`
```python
# ...
@agent
def email_summarizer(self) -> Agent:
return Agent(
config=self.agents_config["email_summarizer"],
)
@task
def email_summarizer_task(self) -> Task:
return Task(
config=self.tasks_config["email_summarizer_task"],
)
# ...
```
## Installing Dependencies
To install the dependencies for your project, you can use `uv`. Running the following command is optional since when running `crewai run`, it will automatically install the dependencies for you.
```shell
$ cd my_project
$ crewai install (optional)
```
This will install the dependencies specified in the `pyproject.toml` file.
## Interpolating Variables
Any variable interpolated in your `agents.yaml` and `tasks.yaml` files like `{variable}` will be replaced by the value of the variable in the `main.py` file.
#### tasks.yaml
```yaml
research_task:
description: >
Conduct a thorough research about the customer and competitors in the context
of {customer_domain}.
Make sure you find any interesting and relevant information given the
current year is 2024.
expected_output: >
A complete report on the customer and their customers and competitors,
including their demographics, preferences, market positioning and audience engagement.
```
#### main.py
```python
# main.py
def run():
inputs = {
"customer_domain": "crewai.com"
}
MyProjectCrew(inputs).crew().kickoff(inputs=inputs)
```
## Running Your Project
To run your project, use the following command:
```shell
$ crewai run
```
This will initialize your crew of AI agents and begin task execution as defined in your configuration in the `main.py` file.
### Replay Tasks from Latest Crew Kickoff
CrewAI now includes a replay feature that allows you to list the tasks from the last run and replay from a specific one. To use this feature, run:
```shell
$ crewai replay <task_id>
```
Replace `<task_id>` with the ID of the task you want to replay.
### Reset Crew Memory
If you need to reset the memory of your crew before running it again, you can do so by calling the reset memory feature:
```shell
$ crewai reset-memory
```
This will clear the crew's memory, allowing for a fresh start.
## Deploying Your Project
The easiest way to deploy your crew is through [CrewAI+](https://www.crewai.com/crewaiplus), where you can deploy your crew in a few clicks.

View File

@@ -25,9 +25,9 @@ It provides a dashboard for tracking agent performance, session replays, and cus
Additionally, AgentOps provides session drilldowns for viewing Crew agent interactions, LLM calls, and tool usage in real-time. Additionally, AgentOps provides session drilldowns for viewing Crew agent interactions, LLM calls, and tool usage in real-time.
This feature is useful for debugging and understanding how agents interact with users as well as other agents. This feature is useful for debugging and understanding how agents interact with users as well as other agents.
![Overview of a select series of agent session runs](/images/agentops-overview.png) ![Overview of a select series of agent session runs](images/agentops-overview.png)
![Overview of session drilldowns for examining agent runs](/images/agentops-session.png) ![Overview of session drilldowns for examining agent runs](images/agentops-session.png)
![Viewing a step-by-step agent replay execution graph](/images/agentops-replay.png) ![Viewing a step-by-step agent replay execution graph](images/agentops-replay.png)
### Features ### Features

View File

@@ -10,23 +10,24 @@ This guide provides detailed instructions on creating custom tools for the CrewA
incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools, incorporating the latest functionalities such as tool delegation, error handling, and dynamic tool calling. It also highlights the importance of collaboration tools,
enabling agents to perform a wide range of actions. enabling agents to perform a wide range of actions.
### Prerequisites
Before creating your own tools, ensure you have the crewAI extra tools package installed:
```bash
pip install 'crewai[tools]'
```
### Subclassing `BaseTool` ### Subclassing `BaseTool`
To create a personalized tool, inherit from `BaseTool` and define the necessary attributes, including the `args_schema` for input validation, and the `_run` method. To create a personalized tool, inherit from `BaseTool` and define the necessary attributes and the `_run` method.
```python Code ```python Code
from typing import Type from crewai_tools import BaseTool
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class MyToolInput(BaseModel):
"""Input schema for MyCustomTool."""
argument: str = Field(..., description="Description of the argument.")
class MyCustomTool(BaseTool): class MyCustomTool(BaseTool):
name: str = "Name of my tool" name: str = "Name of my tool"
description: str = "What this tool does. It's vital for effective utilization." description: str = "What this tool does. It's vital for effective utilization."
args_schema: Type[BaseModel] = MyToolInput
def _run(self, argument: str) -> str: def _run(self, argument: str) -> str:
# Your tool's logic here # Your tool's logic here
@@ -39,7 +40,7 @@ Alternatively, you can use the tool decorator `@tool`. This approach allows you
offering a concise and efficient way to create specialized tools tailored to your needs. offering a concise and efficient way to create specialized tools tailored to your needs.
```python Code ```python Code
from crewai.tools import tool from crewai_tools import tool
@tool("Tool Name") @tool("Tool Name")
def my_simple_tool(question: str) -> str: def my_simple_tool(question: str) -> str:

View File

@@ -10,9 +10,9 @@ Langtrace is an open-source, external tool that helps you set up observability a
While not built directly into CrewAI, Langtrace can be used alongside CrewAI to gain deep visibility into the cost, latency, and performance of your CrewAI Agents. While not built directly into CrewAI, Langtrace can be used alongside CrewAI to gain deep visibility into the cost, latency, and performance of your CrewAI Agents.
This integration allows you to log hyperparameters, monitor performance regressions, and establish a process for continuous improvement of your Agents. This integration allows you to log hyperparameters, monitor performance regressions, and establish a process for continuous improvement of your Agents.
![Overview of a select series of agent session runs](/images/langtrace1.png) ![Overview of a select series of agent session runs](images/langtrace1.png)
![Overview of agent traces](/images/langtrace2.png) ![Overview of agent traces](images/langtrace2.png)
![Overview of llm traces in details](/images/langtrace3.png) ![Overview of llm traces in details](images/langtrace3.png)
## Setup Instructions ## Setup Instructions

Binary file not shown.

Before

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 50 KiB

View File

@@ -1,9 +1,11 @@
--- ---
title: Installation title: Installation & Setup
description: description:
icon: wrench icon: wrench
--- ---
## Install CrewAI
This guide will walk you through the installation process for CrewAI and its dependencies. This guide will walk you through the installation process for CrewAI and its dependencies.
CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently. CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently.
Let's get started! 🚀 Let's get started! 🚀
@@ -13,8 +15,17 @@ Let's get started! 🚀
</Tip> </Tip>
<Steps> <Steps>
<Step title="Install Poetry">
First, if you haven't already, install [Poetry](https://python-poetry.org/).
CrewAI uses Poetry for dependency management and package handling, offering a seamless setup and execution experience.
<CodeGroup>
```shell Terminal
pip install poetry
```
</CodeGroup>
</Step>
<Step title="Install CrewAI"> <Step title="Install CrewAI">
Install the main CrewAI package with the following command: Then, install the main CrewAI package:
<CodeGroup> <CodeGroup>
```shell Terminal ```shell Terminal
pip install crewai pip install crewai
@@ -34,29 +45,15 @@ Let's get started! 🚀
</CodeGroup> </CodeGroup>
</Step> </Step>
<Step title="Upgrade CrewAI"> <Step title="Upgrade CrewAI">
To upgrade CrewAI and CrewAI Tools to the latest version, run the following command To upgrade CrewAI and CrewAI Tools to the latest version, run the following command:
<CodeGroup> <CodeGroup>
```shell Terminal ```shell Terminal
pip install --upgrade crewai crewai-tools pip install --upgrade crewai crewai-tools
``` ```
</CodeGroup> </CodeGroup>
<Note>
1. If you're using an older version of CrewAI, you may receive a warning about using `Poetry` for dependency management.
![Error from older versions](./images/crewai-run-poetry-error.png)
2. In this case, you'll need to run the command below to update your project.
This command will migrate your project to use [UV](https://github.com/astral-sh/uv) and update the necessary files.
```shell Terminal
crewai update
```
3. After running the command above, you should see the following output:
![Successfully migrated to UV](./images/crewai-update.png)
4. You're all set! You can now proceed to the next step! 🎉
</Note>
</Step> </Step>
<Step title="Verify the installation"> <Step title="Verify the installation">
To verify that `crewai` and `crewai-tools` are installed correctly, run the following command To verify that `crewai` and `crewai-tools` are installed correctly, run the following command:
<CodeGroup> <CodeGroup>
```shell Terminal ```shell Terminal
pip freeze | grep crewai pip freeze | grep crewai

View File

@@ -45,5 +45,5 @@ By fostering collaborative intelligence, CrewAI empowers agents to work together
## Next Step ## Next Step
- [Install CrewAI](/installation) to get started with your first agent. - [Install CrewAI](/installation)

View File

@@ -66,17 +66,18 @@
"pages": [ "pages": [
"concepts/agents", "concepts/agents",
"concepts/tasks", "concepts/tasks",
"concepts/crews", "concepts/tools",
"concepts/flows",
"concepts/llms",
"concepts/processes", "concepts/processes",
"concepts/crews",
"concepts/collaboration", "concepts/collaboration",
"concepts/pipeline",
"concepts/training", "concepts/training",
"concepts/memory", "concepts/memory",
"concepts/planning", "concepts/planning",
"concepts/testing", "concepts/testing",
"concepts/flows",
"concepts/cli", "concepts/cli",
"concepts/tools", "concepts/llms",
"concepts/langchain-tools", "concepts/langchain-tools",
"concepts/llamaindex-tools" "concepts/llamaindex-tools"
] ]

View File

@@ -26,7 +26,6 @@ Follow the steps below to get crewing! 🚣‍♂️
<Step title="Modify your `agents.yaml` file"> <Step title="Modify your `agents.yaml` file">
<Tip> <Tip>
You can also modify the agents as needed to fit your use case or copy and paste as is to your project. You can also modify the agents as needed to fit your use case or copy and paste as is to your project.
Any variable interpolated in your `agents.yaml` and `tasks.yaml` files like `{topic}` will be replaced by the value of the variable in the `main.py` file.
</Tip> </Tip>
```yaml agents.yaml ```yaml agents.yaml
# src/latest_ai_development/config/agents.yaml # src/latest_ai_development/config/agents.yaml
@@ -125,7 +124,7 @@ Follow the steps below to get crewing! 🚣‍♂️
``` ```
</Step> </Step>
<Step title="Feel free to pass custom inputs to your crew"> <Step title="Feel free to pass custom inputs to your crew">
For example, you can pass the `topic` input to your crew to customize the research and reporting. For example, you can pass the `topic` input to your crew to customize the research and reporting to medical llms or any other topic.
```python main.py ```python main.py
#!/usr/bin/env python #!/usr/bin/env python
# src/latest_ai_development/main.py # src/latest_ai_development/main.py
@@ -234,74 +233,6 @@ Follow the steps below to get crewing! 🚣‍♂️
</Step> </Step>
</Steps> </Steps>
### Note on Consistency in Naming
The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code.
For example, you can reference the agent for specific tasks from `tasks.yaml` file.
This naming consistency allows CrewAI to automatically link your configurations with your code; otherwise, your task won't recognize the reference properly.
#### Example References
<Tip>
Note how we use the same name for the agent in the `agents.yaml` (`email_summarizer`) file as the method name in the `crew.py` (`email_summarizer`) file.
</Tip>
```yaml agents.yaml
email_summarizer:
role: >
Email Summarizer
goal: >
Summarize emails into a concise and clear summary
backstory: >
You will create a 5 bullet point summary of the report
llm: mixtal_llm
```
<Tip>
Note how we use the same name for the agent in the `tasks.yaml` (`email_summarizer_task`) file as the method name in the `crew.py` (`email_summarizer_task`) file.
</Tip>
```yaml tasks.yaml
email_summarizer_task:
description: >
Summarize the email into a 5 bullet point summary
expected_output: >
A 5 bullet point summary of the email
agent: email_summarizer
context:
- reporting_task
- research_task
```
Use the annotations to properly reference the agent and task in the `crew.py` file.
### Annotations include:
* `@agent`
* `@task`
* `@crew`
* `@tool`
* `@callback`
* `@output_json`
* `@output_pydantic`
* `@cache_handler`
```python crew.py
# ...
@agent
def email_summarizer(self) -> Agent:
return Agent(
config=self.agents_config["email_summarizer"],
)
@task
def email_summarizer_task(self) -> Task:
return Task(
config=self.tasks_config["email_summarizer_task"],
)
# ...
```
<Tip> <Tip>
In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process), In addition to the [sequential process](../how-to/sequential-process), you can use the [hierarchical process](../how-to/hierarchical-process),
which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results. which automatically assigns a manager to the defined crew to properly coordinate the planning and execution of tasks through delegation and validation of results.
@@ -310,7 +241,7 @@ You can learn more about the core concepts [here](/concepts).
### Replay Tasks from Latest Crew Kickoff ### Replay Tasks from Latest Crew Kickoff
CrewAI now includes a replay feature that allows you to list the tasks from the last run and replay from a specific one. To use this feature, run. CrewAI now includes a replay feature that allows you to list the tasks from the last run and replay from a specific one. To use this feature, run:
```shell ```shell
crewai replay <task_id> crewai replay <task_id>

View File

@@ -11,10 +11,10 @@ icon: eye
This tool is used to extract text from images. When passed to the agent it will extract the text from the image and then use it to generate a response, report or any other output. This tool is used to extract text from images. When passed to the agent it will extract the text from the image and then use it to generate a response, report or any other output.
The URL or the PATH of the image should be passed to the Agent. The URL or the PATH of the image should be passed to the Agent.
## Installation ## Installation
Install the crewai_tools package Install the crewai_tools package
```shell ```shell
pip install 'crewai[tools]' pip install 'crewai[tools]'
``` ```
@@ -44,6 +44,7 @@ def researcher(self) -> Agent:
The VisionTool requires the following arguments: The VisionTool requires the following arguments:
| Argument | Type | Description | | Argument | Type | Description |
| :----------------- | :------- | :------------------------------------------------------------------------------- | |:---------------|:---------|:-------------------------------------------------------------------------------------------------------------------------------------|
| **image_path_url** | `string` | **Mandatory**. The path to the image file from which text needs to be extracted. | | **image_path** | `string` | **Mandatory**. The path to the image file from which text needs to be extracted. |

View File

@@ -1,6 +1,6 @@
[project] [project]
name = "crewai" name = "crewai"
version = "0.76.9" version = "0.70.1"
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks." description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
readme = "README.md" readme = "README.md"
requires-python = ">=3.10,<=3.13" requires-python = ">=3.10,<=3.13"
@@ -16,19 +16,19 @@ dependencies = [
"opentelemetry-exporter-otlp-proto-http>=1.22.0", "opentelemetry-exporter-otlp-proto-http>=1.22.0",
"instructor>=1.3.3", "instructor>=1.3.3",
"regex>=2024.9.11", "regex>=2024.9.11",
"crewai-tools>=0.13.4", "crewai-tools>=0.12.1",
"click>=8.1.7", "click>=8.1.7",
"python-dotenv>=1.0.0", "python-dotenv>=1.0.0",
"appdirs>=1.4.4", "appdirs>=1.4.4",
"jsonref>=1.1.0", "jsonref>=1.1.0",
"agentops>=0.3.0",
"embedchain>=0.1.114",
"json-repair>=0.25.2", "json-repair>=0.25.2",
"auth0-python>=4.7.1", "auth0-python>=4.7.1",
"litellm>=1.44.22", "litellm>=1.44.22",
"pyvis>=0.3.2", "pyvis>=0.3.2",
"uv>=0.4.25", "uv>=0.4.18",
"tomli-w>=1.1.0", "tomli-w>=1.1.0",
"chromadb>=0.4.24",
"tomli>=2.0.2",
] ]
[project.urls] [project.urls]
@@ -37,7 +37,7 @@ Documentation = "https://docs.crewai.com"
Repository = "https://github.com/crewAIInc/crewAI" Repository = "https://github.com/crewAIInc/crewAI"
[project.optional-dependencies] [project.optional-dependencies]
tools = ["crewai-tools>=0.13.4"] tools = ["crewai-tools>=0.12.1"]
agentops = ["agentops>=0.3.0"] agentops = ["agentops>=0.3.0"]
[tool.uv] [tool.uv]
@@ -52,7 +52,7 @@ dev-dependencies = [
"mkdocs-material-extensions>=1.3.1", "mkdocs-material-extensions>=1.3.1",
"pillow>=10.2.0", "pillow>=10.2.0",
"cairosvg>=2.7.1", "cairosvg>=2.7.1",
"crewai-tools>=0.13.4", "crewai-tools>=0.12.1",
"pytest>=8.0.0", "pytest>=8.0.0",
"pytest-vcr>=1.0.2", "pytest-vcr>=1.0.2",
"python-dotenv>=1.0.0", "python-dotenv>=1.0.0",

View File

@@ -14,5 +14,5 @@ warnings.filterwarnings(
category=UserWarning, category=UserWarning,
module="pydantic.main", module="pydantic.main",
) )
__version__ = "0.76.9" __version__ = "0.70.1"
__all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router", "LLM", "Flow"] __all__ = ["Agent", "Crew", "Process", "Task", "Pipeline", "Router", "LLM", "Flow"]

View File

@@ -1,7 +1,6 @@
import os import os
import shutil from inspect import signature
import subprocess from typing import Any, List, Optional, Union
from typing import Any, List, Literal, Optional, Union
from pydantic import Field, InstanceOf, PrivateAttr, model_validator from pydantic import Field, InstanceOf, PrivateAttr, model_validator
@@ -10,34 +9,24 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.crew_agent_executor import CrewAgentExecutor from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.llm import LLM from crewai.llm import LLM
from crewai.memory.contextual.contextual_memory import ContextualMemory from crewai.memory.contextual.contextual_memory import ContextualMemory
from crewai.tools.agent_tools.agent_tools import AgentTools from crewai.tools.agent_tools import AgentTools
from crewai.tools import BaseTool
from crewai.utilities import Converter, Prompts from crewai.utilities import Converter, Prompts
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
from crewai.utilities.token_counter_callback import TokenCalcHandler from crewai.utilities.token_counter_callback import TokenCalcHandler
from crewai.utilities.training_handler import CrewTrainingHandler from crewai.utilities.training_handler import CrewTrainingHandler
agentops = None
try:
import agentops # type: ignore # Name "agentops" already defined on line 21
from agentops import track_agent # type: ignore
except ImportError:
def mock_agent_ops_provider(): def track_agent():
def track_agent(*args, **kwargs):
def noop(f): def noop(f):
return f return f
return noop return noop
return track_agent
agentops = None
if os.environ.get("AGENTOPS_API_KEY"):
try:
from agentops import track_agent
except ImportError:
track_agent = mock_agent_ops_provider()
else:
track_agent = mock_agent_ops_provider()
@track_agent() @track_agent()
class Agent(BaseAgent): class Agent(BaseAgent):
@@ -114,10 +103,6 @@ class Agent(BaseAgent):
default=2, default=2,
description="Maximum number of retries for an agent to execute a task when an error occurs.", description="Maximum number of retries for an agent to execute a task when an error occurs.",
) )
code_execution_mode: Literal["safe", "unsafe"] = Field(
default="safe",
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
)
@model_validator(mode="after") @model_validator(mode="after")
def post_init_setup(self): def post_init_setup(self):
@@ -179,9 +164,6 @@ class Agent(BaseAgent):
if not self.agent_executor: if not self.agent_executor:
self._setup_agent_executor() self._setup_agent_executor()
if self.allow_code_execution:
self._validate_docker_installation()
return self return self
def _setup_agent_executor(self): def _setup_agent_executor(self):
@@ -193,7 +175,7 @@ class Agent(BaseAgent):
self, self,
task: Any, task: Any,
context: Optional[str] = None, context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None, tools: Optional[List[Any]] = None,
) -> str: ) -> str:
"""Execute a task with the agent. """Execute a task with the agent.
@@ -260,9 +242,7 @@ class Agent(BaseAgent):
return result return result
def create_agent_executor( def create_agent_executor(self, tools=None, task=None) -> None:
self, tools: Optional[List[BaseTool]] = None, task=None
) -> None:
"""Create an agent executor for the agent. """Create an agent executor for the agent.
Returns: Returns:
@@ -319,9 +299,7 @@ class Agent(BaseAgent):
try: try:
from crewai_tools import CodeInterpreterTool from crewai_tools import CodeInterpreterTool
# Set the unsafe_mode based on the code_execution_mode attribute return [CodeInterpreterTool()]
unsafe_mode = self.code_execution_mode == "unsafe"
return [CodeInterpreterTool(unsafe_mode=unsafe_mode)]
except ModuleNotFoundError: except ModuleNotFoundError:
self._logger.log( self._logger.log(
"info", "Coding tools not available. Install crewai_tools. " "info", "Coding tools not available. Install crewai_tools. "
@@ -335,7 +313,7 @@ class Agent(BaseAgent):
tools_list = [] tools_list = []
try: try:
# tentatively try to import from crewai_tools import BaseTool as CrewAITool # tentatively try to import from crewai_tools import BaseTool as CrewAITool
from crewai.tools import BaseTool as CrewAITool from crewai_tools import BaseTool as CrewAITool
for tool in tools: for tool in tools:
if isinstance(tool, CrewAITool): if isinstance(tool, CrewAITool):
@@ -394,42 +372,33 @@ class Agent(BaseAgent):
return description return description
def _render_text_description_and_args(self, tools: List[BaseTool]) -> str: def _render_text_description_and_args(self, tools: List[Any]) -> str:
"""Render the tool name, description, and args in plain text. """Render the tool name, description, and args in plain text.
Output will be in the format of: Output will be in the format of:
.. code-block:: markdown .. code-block:: markdown
search: This tool is used for search, args: {"query": {"type": "string"}} search: This tool is used for search, args: {"query": {"type": "string"}}
calculator: This tool is used for math, \ calculator: This tool is used for math, \
args: {"expression": {"type": "string"}} args: {"expression": {"type": "string"}}
""" """
tool_strings = [] tool_strings = []
for tool in tools: for tool in tools:
tool_strings.append(tool.description) args_schema = str(tool.args)
if hasattr(tool, "func") and tool.func:
sig = signature(tool.func)
description = (
f"Tool Name: {tool.name}{sig}\nTool Description: {tool.description}"
)
else:
description = (
f"Tool Name: {tool.name}\nTool Description: {tool.description}"
)
tool_strings.append(f"{description}\nTool Arguments: {args_schema}")
return "\n".join(tool_strings) return "\n".join(tool_strings)
def _validate_docker_installation(self) -> None:
"""Check if Docker is installed and running."""
if not shutil.which("docker"):
raise RuntimeError(
f"Docker is not installed. Please install Docker to use code execution with agent: {self.role}"
)
try:
subprocess.run(
["docker", "info"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except subprocess.CalledProcessError:
raise RuntimeError(
f"Docker is not running. Please start Docker to use code execution with agent: {self.role}"
)
@staticmethod @staticmethod
def __tools_names(tools) -> str: def __tools_names(tools) -> str:
return ", ".join([t.name for t in tools]) return ", ".join([t.name for t in tools])

View File

@@ -18,7 +18,6 @@ from pydantic_core import PydanticCustomError
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
from crewai.agents.cache.cache_handler import CacheHandler from crewai.agents.cache.cache_handler import CacheHandler
from crewai.agents.tools_handler import ToolsHandler from crewai.agents.tools_handler import ToolsHandler
from crewai.tools import BaseTool
from crewai.utilities import I18N, Logger, RPMController from crewai.utilities import I18N, Logger, RPMController
from crewai.utilities.config import process_config from crewai.utilities.config import process_config
@@ -50,11 +49,11 @@ class BaseAgent(ABC, BaseModel):
Methods: Methods:
execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[BaseTool]] = None) -> str: execute_task(task: Any, context: Optional[str] = None, tools: Optional[List[Any]] = None) -> str:
Abstract method to execute a task. Abstract method to execute a task.
create_agent_executor(tools=None) -> None: create_agent_executor(tools=None) -> None:
Abstract method to create an agent executor. Abstract method to create an agent executor.
_parse_tools(tools: List[BaseTool]) -> List[Any]: _parse_tools(tools: List[Any]) -> List[Any]:
Abstract method to parse tools. Abstract method to parse tools.
get_delegation_tools(agents: List["BaseAgent"]): get_delegation_tools(agents: List["BaseAgent"]):
Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew. Abstract method to set the agents task tools for handling delegation and question asking to other agents in crew.
@@ -106,7 +105,7 @@ class BaseAgent(ABC, BaseModel):
default=False, default=False,
description="Enable agent to delegate and ask questions among each other.", description="Enable agent to delegate and ask questions among each other.",
) )
tools: Optional[List[BaseTool]] = Field( tools: Optional[List[Any]] = Field(
default_factory=list, description="Tools at agents' disposal" default_factory=list, description="Tools at agents' disposal"
) )
max_iter: Optional[int] = Field( max_iter: Optional[int] = Field(
@@ -189,7 +188,7 @@ class BaseAgent(ABC, BaseModel):
self, self,
task: Any, task: Any,
context: Optional[str] = None, context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None, tools: Optional[List[Any]] = None,
) -> str: ) -> str:
pass pass
@@ -198,11 +197,11 @@ class BaseAgent(ABC, BaseModel):
pass pass
@abstractmethod @abstractmethod
def _parse_tools(self, tools: List[BaseTool]) -> List[BaseTool]: def _parse_tools(self, tools: List[Any]) -> List[Any]:
pass pass
@abstractmethod @abstractmethod
def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[BaseTool]: def get_delegation_tools(self, agents: List["BaseAgent"]) -> List[Any]:
"""Set the task tools that init BaseAgenTools class.""" """Set the task tools that init BaseAgenTools class."""
pass pass

View File

@@ -17,7 +17,7 @@ if TYPE_CHECKING:
class CrewAgentExecutorMixin: class CrewAgentExecutorMixin:
crew: Optional["Crew"] crew: Optional["Crew"]
agent: Optional["BaseAgent"] crew_agent: Optional["BaseAgent"]
task: Optional["Task"] task: Optional["Task"]
iterations: int iterations: int
have_forced_answer: bool have_forced_answer: bool
@@ -33,9 +33,9 @@ class CrewAgentExecutorMixin:
"""Create and save a short-term memory item if conditions are met.""" """Create and save a short-term memory item if conditions are met."""
if ( if (
self.crew self.crew
and self.agent and self.crew_agent
and self.task and self.task
and "Action: Delegate work to coworker" not in output.text and "Action: Delegate work to coworker" not in output.log
): ):
try: try:
if ( if (
@@ -43,11 +43,11 @@ class CrewAgentExecutorMixin:
and self.crew._short_term_memory and self.crew._short_term_memory
): ):
self.crew._short_term_memory.save( self.crew._short_term_memory.save(
value=output.text, value=output.log,
metadata={ metadata={
"observation": self.task.description, "observation": self.task.description,
}, },
agent=self.agent.role, agent=self.crew_agent.role,
) )
except Exception as e: except Exception as e:
print(f"Failed to add to short term memory: {e}") print(f"Failed to add to short term memory: {e}")
@@ -61,18 +61,18 @@ class CrewAgentExecutorMixin:
and self.crew._long_term_memory and self.crew._long_term_memory
and self.crew._entity_memory and self.crew._entity_memory
and self.task and self.task
and self.agent and self.crew_agent
): ):
try: try:
ltm_agent = TaskEvaluator(self.agent) ltm_agent = TaskEvaluator(self.crew_agent)
evaluation = ltm_agent.evaluate(self.task, output.text) evaluation = ltm_agent.evaluate(self.task, output.log)
if isinstance(evaluation, ConverterError): if isinstance(evaluation, ConverterError):
return return
long_term_memory = LongTermMemoryItem( long_term_memory = LongTermMemoryItem(
task=self.task.description, task=self.task.description,
agent=self.agent.role, agent=self.crew_agent.role,
quality=evaluation.quality, quality=evaluation.quality,
datetime=str(time.time()), datetime=str(time.time()),
expected_output=self.task.expected_output, expected_output=self.task.expected_output,

View File

@@ -1,19 +1,22 @@
from typing import Optional, Union from abc import ABC, abstractmethod
from pydantic import Field from typing import List, Optional, Union
from pydantic import BaseModel, Field
from crewai.tools.base_tool import BaseTool
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.task import Task from crewai.task import Task
from crewai.utilities import I18N from crewai.utilities import I18N
class BaseAgentTool(BaseTool): class BaseAgentTools(BaseModel, ABC):
"""Base class for agent-related tools""" """Default tools around agent delegation"""
agents: list[BaseAgent] = Field(description="List of available agents") agents: List[BaseAgent] = Field(description="List of agents in this crew.")
i18n: I18N = Field( i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
default_factory=I18N, description="Internationalization settings"
) @abstractmethod
def tools(self):
pass
def _get_coworker(self, coworker: Optional[str], **kwargs) -> Optional[str]: def _get_coworker(self, coworker: Optional[str], **kwargs) -> Optional[str]:
coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker") coworker = coworker or kwargs.get("co_worker") or kwargs.get("coworker")
@@ -21,11 +24,27 @@ class BaseAgentTool(BaseTool):
is_list = coworker.startswith("[") and coworker.endswith("]") is_list = coworker.startswith("[") and coworker.endswith("]")
if is_list: if is_list:
coworker = coworker[1:-1].split(",")[0] coworker = coworker[1:-1].split(",")[0]
return coworker return coworker
def delegate_work(
self, task: str, context: str, coworker: Optional[str] = None, **kwargs
):
"""Useful to delegate a specific task to a coworker passing all necessary context and names."""
coworker = self._get_coworker(coworker, **kwargs)
return self._execute(coworker, task, context)
def ask_question(
self, question: str, context: str, coworker: Optional[str] = None, **kwargs
):
"""Useful to ask a question, opinion or take from a coworker passing all necessary context and names."""
coworker = self._get_coworker(coworker, **kwargs)
return self._execute(coworker, question, context)
def _execute( def _execute(
self, agent_name: Union[str, None], task: str, context: Union[str, None] self, agent_name: Union[str, None], task: str, context: Union[str, None]
) -> str: ):
"""Execute the command."""
try: try:
if agent_name is None: if agent_name is None:
agent_name = "" agent_name = ""
@@ -38,6 +57,7 @@ class BaseAgentTool(BaseTool):
# when it should look like this: # when it should look like this:
# {"task": "....", "coworker": "...."} # {"task": "....", "coworker": "...."}
agent_name = agent_name.casefold().replace('"', "").replace("\n", "") agent_name = agent_name.casefold().replace('"', "").replace("\n", "")
agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None") agent = [ # type: ignore # Incompatible types in assignment (expression has type "list[BaseAgent]", variable has type "str | None")
available_agent available_agent
for available_agent in self.agents for available_agent in self.agents

View File

@@ -2,7 +2,6 @@ import json
import re import re
from typing import Any, Dict, List, Union from typing import Any, Dict, List, Union
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin from crewai.agents.agent_builder.base_agent_executor_mixin import CrewAgentExecutorMixin
from crewai.agents.parser import ( from crewai.agents.parser import (
FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE, FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE,
@@ -30,7 +29,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
llm: Any, llm: Any,
task: Any, task: Any,
crew: Any, crew: Any,
agent: BaseAgent, agent: Any,
prompt: dict[str, str], prompt: dict[str, str],
max_iter: int, max_iter: int,
tools: List[Any], tools: List[Any],
@@ -104,8 +103,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
if self.crew and self.crew._train: if self.crew and self.crew._train:
self._handle_crew_training_output(formatted_answer) self._handle_crew_training_output(formatted_answer)
self._create_short_term_memory(formatted_answer)
self._create_long_term_memory(formatted_answer)
return {"output": formatted_answer.output} return {"output": formatted_answer.output}
def _invoke_loop(self, formatted_answer=None): def _invoke_loop(self, formatted_answer=None):
@@ -178,8 +176,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
return formatted_answer return formatted_answer
def _show_start_logs(self): def _show_start_logs(self):
if self.agent is None:
raise ValueError("Agent cannot be None")
if self.agent.verbose or ( if self.agent.verbose or (
hasattr(self, "crew") and getattr(self.crew, "verbose", False) hasattr(self, "crew") and getattr(self.crew, "verbose", False)
): ):
@@ -192,8 +188,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
) )
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]): def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
if self.agent is None:
raise ValueError("Agent cannot be None")
if self.agent.verbose or ( if self.agent.verbose or (
hasattr(self, "crew") and getattr(self.crew, "verbose", False) hasattr(self, "crew") and getattr(self.crew, "verbose", False)
): ):
@@ -312,7 +306,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
self, result: AgentFinish, human_feedback: str | None = None self, result: AgentFinish, human_feedback: str | None = None
) -> None: ) -> None:
"""Function to handle the process of the training data.""" """Function to handle the process of the training data."""
agent_id = str(self.agent.id) # type: ignore agent_id = str(self.agent.id)
# Load training data # Load training data
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE) training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
@@ -323,9 +317,9 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
if self.crew is not None and hasattr(self.crew, "_train_iteration"): if self.crew is not None and hasattr(self.crew, "_train_iteration"):
train_iteration = self.crew._train_iteration train_iteration = self.crew._train_iteration
if agent_id in training_data and isinstance(train_iteration, int): if agent_id in training_data and isinstance(train_iteration, int):
training_data[agent_id][train_iteration][ training_data[agent_id][train_iteration]["improved_output"] = (
"improved_output" result.output
] = result.output )
training_handler.save(training_data) training_handler.save(training_data)
else: else:
self._logger.log( self._logger.log(
@@ -345,7 +339,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
"initial_output": result.output, "initial_output": result.output,
"human_feedback": human_feedback, "human_feedback": human_feedback,
"agent": agent_id, "agent": agent_id,
"agent_role": self.agent.role, # type: ignore "agent_role": self.agent.role,
} }
if self.crew is not None and hasattr(self.crew, "_train_iteration"): if self.crew is not None and hasattr(self.crew, "_train_iteration"):
train_iteration = self.crew._train_iteration train_iteration = self.crew._train_iteration

View File

@@ -1,6 +1,6 @@
from typing import Any, Optional, Union from typing import Any, Optional, Union
from ..tools.cache_tools.cache_tools import CacheTools from ..tools.cache_tools import CacheTools
from ..tools.tool_calling import InstructorToolCalling, ToolCalling from ..tools.tool_calling import InstructorToolCalling, ToolCalling
from .cache.cache_handler import CacheHandler from .cache.cache_handler import CacheHandler

View File

@@ -1,70 +0,0 @@
from pathlib import Path
import click
from crewai.cli.utils import copy_template
def add_crew_to_flow(crew_name: str) -> None:
"""Add a new crew to the current flow."""
# Check if pyproject.toml exists in the current directory
if not Path("pyproject.toml").exists():
print("This command must be run from the root of a flow project.")
raise click.ClickException(
"This command must be run from the root of a flow project."
)
# Determine the flow folder based on the current directory
flow_folder = Path.cwd()
crews_folder = flow_folder / "src" / flow_folder.name / "crews"
if not crews_folder.exists():
print("Crews folder does not exist in the current flow.")
raise click.ClickException("Crews folder does not exist in the current flow.")
# Create the crew within the flow's crews directory
create_embedded_crew(crew_name, parent_folder=crews_folder)
click.echo(
f"Crew {crew_name} added to the current flow successfully!",
)
def create_embedded_crew(crew_name: str, parent_folder: Path) -> None:
"""Create a new crew within an existing flow project."""
folder_name = crew_name.replace(" ", "_").replace("-", "_").lower()
class_name = crew_name.replace("_", " ").replace("-", " ").title().replace(" ", "")
crew_folder = parent_folder / folder_name
if crew_folder.exists():
if not click.confirm(
f"Crew {folder_name} already exists. Do you want to override it?"
):
click.secho("Operation cancelled.", fg="yellow")
return
click.secho(f"Overriding crew {folder_name}...", fg="green", bold=True)
else:
click.secho(f"Creating crew {folder_name}...", fg="green", bold=True)
crew_folder.mkdir(parents=True)
# Create config and crew.py files
config_folder = crew_folder / "config"
config_folder.mkdir(exist_ok=True)
templates_dir = Path(__file__).parent / "templates" / "crew"
config_template_files = ["agents.yaml", "tasks.yaml"]
crew_template_file = f"{folder_name}_crew.py" # Updated file name
for file_name in config_template_files:
src_file = templates_dir / "config" / file_name
dst_file = config_folder / file_name
copy_template(src_file, dst_file, crew_name, class_name, folder_name)
src_file = templates_dir / "crew.py"
dst_file = crew_folder / crew_template_file
copy_template(src_file, dst_file, crew_name, class_name, folder_name)
click.secho(
f"Crew {crew_name} added to the flow successfully!", fg="green", bold=True
)

View File

@@ -3,7 +3,6 @@ from typing import Optional
import click import click
import pkg_resources import pkg_resources
from crewai.cli.add_crew_to_flow import add_crew_to_flow
from crewai.cli.create_crew import create_crew from crewai.cli.create_crew import create_crew
from crewai.cli.create_flow import create_flow from crewai.cli.create_flow import create_flow
from crewai.cli.create_pipeline import create_pipeline from crewai.cli.create_pipeline import create_pipeline
@@ -15,11 +14,11 @@ from .authentication.main import AuthenticationCommand
from .deploy.main import DeployCommand from .deploy.main import DeployCommand
from .evaluate_crew import evaluate_crew from .evaluate_crew import evaluate_crew
from .install_crew import install_crew from .install_crew import install_crew
from .kickoff_flow import kickoff_flow
from .plot_flow import plot_flow from .plot_flow import plot_flow
from .replay_from_task import replay_task_command from .replay_from_task import replay_task_command
from .reset_memories_command import reset_memories_command from .reset_memories_command import reset_memories_command
from .run_crew import run_crew from .run_crew import run_crew
from .run_flow import run_flow
from .tools.main import ToolCommand from .tools.main import ToolCommand
from .train_crew import train_crew from .train_crew import train_crew
from .update_crew import update_crew from .update_crew import update_crew
@@ -33,12 +32,10 @@ def crewai():
@crewai.command() @crewai.command()
@click.argument("type", type=click.Choice(["crew", "pipeline", "flow"])) @click.argument("type", type=click.Choice(["crew", "pipeline", "flow"]))
@click.argument("name") @click.argument("name")
@click.option("--provider", type=str, help="The provider to use for the crew") def create(type, name):
@click.option("--skip_provider", is_flag=True, help="Skip provider validation")
def create(type, name, provider, skip_provider=False):
"""Create a new crew, pipeline, or flow.""" """Create a new crew, pipeline, or flow."""
if type == "crew": if type == "crew":
create_crew(name, provider, skip_provider) create_crew(name)
elif type == "pipeline": elif type == "pipeline":
create_pipeline(name) create_pipeline(name)
elif type == "flow": elif type == "flow":
@@ -179,16 +176,10 @@ def test(n_iterations: int, model: str):
evaluate_crew(n_iterations, model) evaluate_crew(n_iterations, model)
@crewai.command( @crewai.command()
context_settings=dict( def install():
ignore_unknown_options=True,
allow_extra_args=True,
)
)
@click.pass_context
def install(context):
"""Install the Crew.""" """Install the Crew."""
install_crew(context.args) install_crew()
@crewai.command() @crewai.command()
@@ -313,11 +304,11 @@ def flow():
pass pass
@flow.command(name="kickoff") @flow.command(name="run")
def flow_run(): def flow_run():
"""Kickoff the Flow.""" """Run the Flow."""
click.echo("Running the Flow") click.echo("Running the Flow")
kickoff_flow() run_flow()
@flow.command(name="plot") @flow.command(name="plot")
@@ -327,13 +318,5 @@ def flow_plot():
plot_flow() plot_flow()
@flow.command(name="add-crew")
@click.argument("crew_name")
def flow_add_crew(crew_name):
"""Add a crew to an existing flow."""
click.echo(f"Adding crew {crew_name} to the flow")
add_crew_to_flow(crew_name)
if __name__ == "__main__": if __name__ == "__main__":
crewai() crewai()

View File

@@ -1,38 +0,0 @@
import json
from pathlib import Path
from pydantic import BaseModel, Field
from typing import Optional
DEFAULT_CONFIG_PATH = Path.home() / ".config" / "crewai" / "settings.json"
class Settings(BaseModel):
tool_repository_username: Optional[str] = Field(None, description="Username for interacting with the Tool Repository")
tool_repository_password: Optional[str] = Field(None, description="Password for interacting with the Tool Repository")
config_path: Path = Field(default=DEFAULT_CONFIG_PATH, exclude=True)
def __init__(self, config_path: Path = DEFAULT_CONFIG_PATH, **data):
"""Load Settings from config path"""
config_path.parent.mkdir(parents=True, exist_ok=True)
file_data = {}
if config_path.is_file():
try:
with config_path.open("r") as f:
file_data = json.load(f)
except json.JSONDecodeError:
file_data = {}
merged_data = {**file_data, **data}
super().__init__(config_path=config_path, **merged_data)
def dump(self) -> None:
"""Save current settings to settings.json"""
if self.config_path.is_file():
with self.config_path.open("r") as f:
existing_data = json.load(f)
else:
existing_data = {}
updated_data = {**existing_data, **self.model_dump(exclude_unset=True)}
with self.config_path.open("w") as f:
json.dump(updated_data, f, indent=4)

View File

@@ -1,19 +0,0 @@
ENV_VARS = {
'openai': ['OPENAI_API_KEY'],
'anthropic': ['ANTHROPIC_API_KEY'],
'gemini': ['GEMINI_API_KEY'],
'groq': ['GROQ_API_KEY'],
'ollama': ['FAKE_KEY'],
}
PROVIDERS = ['openai', 'anthropic', 'gemini', 'groq', 'ollama']
MODELS = {
'openai': ['gpt-4', 'gpt-4o', 'gpt-4o-mini', 'o1-mini', 'o1-preview'],
'anthropic': ['claude-3-5-sonnet-20240620', 'claude-3-sonnet-20240229', 'claude-3-opus-20240229', 'claude-3-haiku-20240307'],
'gemini': ['gemini-1.5-flash', 'gemini-1.5-pro', 'gemini-gemma-2-9b-it', 'gemini-gemma-2-27b-it'],
'groq': ['llama-3.1-8b-instant', 'llama-3.1-70b-versatile', 'llama-3.1-405b-reasoning', 'gemma2-9b-it', 'gemma-7b-it'],
'ollama': ['llama3.1', 'mixtral'],
}
JSON_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"

View File

@@ -1,19 +1,12 @@
import sys
from pathlib import Path from pathlib import Path
import click import click
from crewai.cli.constants import ENV_VARS from crewai.cli.utils import copy_template
from crewai.cli.provider import (
PROVIDERS,
get_provider_data,
select_model,
select_provider,
)
from crewai.cli.utils import copy_template, load_env_vars, write_env_file
def create_folder_structure(name, parent_folder=None): def create_crew(name, parent_folder=None):
"""Create a new crew."""
folder_name = name.replace(" ", "_").replace("-", "_").lower() folder_name = name.replace(" ", "_").replace("-", "_").lower()
class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "") class_name = name.replace("_", " ").replace("-", " ").title().replace(" ", "")
@@ -22,19 +15,11 @@ def create_folder_structure(name, parent_folder=None):
else: else:
folder_path = Path(folder_name) folder_path = Path(folder_name)
if folder_path.exists(): click.secho(
if not click.confirm( f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
f"Folder {folder_name} already exists. Do you want to override it?" fg="green",
): bold=True,
click.secho("Operation cancelled.", fg="yellow") )
sys.exit(0)
click.secho(f"Overriding folder {folder_name}...", fg="green", bold=True)
else:
click.secho(
f"Creating {'crew' if parent_folder else 'folder'} {folder_name}...",
fg="green",
bold=True,
)
if not folder_path.exists(): if not folder_path.exists():
folder_path.mkdir(parents=True) folder_path.mkdir(parents=True)
@@ -43,126 +28,19 @@ def create_folder_structure(name, parent_folder=None):
(folder_path / "src" / folder_name).mkdir(parents=True) (folder_path / "src" / folder_name).mkdir(parents=True)
(folder_path / "src" / folder_name / "tools").mkdir(parents=True) (folder_path / "src" / folder_name / "tools").mkdir(parents=True)
(folder_path / "src" / folder_name / "config").mkdir(parents=True) (folder_path / "src" / folder_name / "config").mkdir(parents=True)
with open(folder_path / ".env", "w") as file:
return folder_path, folder_name, class_name file.write("OPENAI_API_KEY=YOUR_API_KEY")
else:
click.secho(
def copy_template_files(folder_path, name, class_name, parent_folder): f"\tFolder {folder_name} already exists. Please choose a different name.",
package_dir = Path(__file__).parent fg="red",
templates_dir = package_dir / "templates" / "crew"
root_template_files = (
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
)
tools_template_files = ["tools/custom_tool.py", "tools/__init__.py"]
config_template_files = ["config/agents.yaml", "config/tasks.yaml"]
src_template_files = (
["__init__.py", "main.py", "crew.py"] if not parent_folder else ["crew.py"]
)
for file_name in root_template_files:
src_file = templates_dir / file_name
dst_file = folder_path / file_name
copy_template(src_file, dst_file, name, class_name, folder_path.name)
src_folder = (
folder_path / "src" / folder_path.name if not parent_folder else folder_path
)
for file_name in src_template_files:
src_file = templates_dir / file_name
dst_file = src_folder / file_name
copy_template(src_file, dst_file, name, class_name, folder_path.name)
if not parent_folder:
for file_name in tools_template_files + config_template_files:
src_file = templates_dir / file_name
dst_file = src_folder / file_name
copy_template(src_file, dst_file, name, class_name, folder_path.name)
def create_crew(name, provider=None, skip_provider=False, parent_folder=None):
folder_path, folder_name, class_name = create_folder_structure(name, parent_folder)
env_vars = load_env_vars(folder_path)
if not skip_provider:
if not provider:
provider_models = get_provider_data()
if not provider_models:
return
existing_provider = None
for provider, env_keys in ENV_VARS.items():
if any(key in env_vars for key in env_keys):
existing_provider = provider
break
if existing_provider:
if not click.confirm(
f"Found existing environment variable configuration for {existing_provider.capitalize()}. Do you want to override it?"
):
click.secho("Keeping existing provider configuration.", fg="yellow")
return
provider_models = get_provider_data()
if not provider_models:
return
while True:
selected_provider = select_provider(provider_models)
if selected_provider is None: # User typed 'q'
click.secho("Exiting...", fg="yellow")
sys.exit(0)
if selected_provider: # Valid selection
break
click.secho(
"No provider selected. Please try again or press 'q' to exit.", fg="red"
)
while True:
selected_model = select_model(selected_provider, provider_models)
if selected_model is None: # User typed 'q'
click.secho("Exiting...", fg="yellow")
sys.exit(0)
if selected_model: # Valid selection
break
click.secho(
"No model selected. Please try again or press 'q' to exit.", fg="red"
)
if selected_provider in PROVIDERS:
api_key_var = ENV_VARS[selected_provider][0]
else:
api_key_var = click.prompt(
f"Enter the environment variable name for your {selected_provider.capitalize()} API key",
type=str,
default="",
)
api_key_value = ""
click.echo(
f"Enter your {selected_provider.capitalize()} API key (press Enter to skip): ",
nl=False,
) )
try: return
api_key_value = input()
except (KeyboardInterrupt, EOFError):
api_key_value = ""
if api_key_value.strip():
env_vars = {api_key_var: api_key_value}
write_env_file(folder_path, env_vars)
click.secho("API key saved to .env file", fg="green")
else:
click.secho(
"No API key provided. Skipping .env file creation.", fg="yellow"
)
env_vars["MODEL"] = selected_model
click.secho(f"Selected model: {selected_model}", fg="green")
package_dir = Path(__file__).parent package_dir = Path(__file__).parent
templates_dir = package_dir / "templates" / "crew" templates_dir = package_dir / "templates" / "crew"
# List of template files to copy
root_template_files = ( root_template_files = (
[".gitignore", "pyproject.toml", "README.md"] if not parent_folder else [] [".gitignore", "pyproject.toml", "README.md"] if not parent_folder else []
) )

View File

@@ -3,13 +3,12 @@ import subprocess
import click import click
def install_crew(proxy_options: list[str]) -> None: def install_crew() -> None:
""" """
Install the crew by running the UV command to lock and install. Install the crew by running the UV command to lock and install.
""" """
try: try:
command = ["uv", "sync"] + proxy_options subprocess.run(["uv", "sync"], check=True, capture_output=False, text=True)
subprocess.run(command, check=True, capture_output=False, text=True)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
click.echo(f"An error occurred while running the crew: {e}", err=True) click.echo(f"An error occurred while running the crew: {e}", err=True)

View File

@@ -7,7 +7,7 @@ def plot_flow() -> None:
""" """
Plot the flow by running a command in the UV environment. Plot the flow by running a command in the UV environment.
""" """
command = ["uv", "run", "plot"] command = ["uv", "run", "plot_flow"]
try: try:
result = subprocess.run(command, capture_output=False, text=True, check=True) result = subprocess.run(command, capture_output=False, text=True, check=True)

View File

@@ -1,227 +0,0 @@
import json
import time
from collections import defaultdict
from pathlib import Path
import click
import requests
from crewai.cli.constants import JSON_URL, MODELS, PROVIDERS
def select_choice(prompt_message, choices):
"""
Presents a list of choices to the user and prompts them to select one.
Args:
- prompt_message (str): The message to display to the user before presenting the choices.
- choices (list): A list of options to present to the user.
Returns:
- str: The selected choice from the list, or None if the user chooses to quit.
"""
provider_models = get_provider_data()
if not provider_models:
return
click.secho(prompt_message, fg="cyan")
for idx, choice in enumerate(choices, start=1):
click.secho(f"{idx}. {choice}", fg="cyan")
click.secho("q. Quit", fg="cyan")
while True:
choice = click.prompt(
"Enter the number of your choice or 'q' to quit", type=str
)
if choice.lower() == "q":
return None
try:
selected_index = int(choice) - 1
if 0 <= selected_index < len(choices):
return choices[selected_index]
except ValueError:
pass
click.secho(
"Invalid selection. Please select a number between 1 and 6 or 'q' to quit.",
fg="red",
)
def select_provider(provider_models):
"""
Presents a list of providers to the user and prompts them to select one.
Args:
- provider_models (dict): A dictionary of provider models.
Returns:
- str: The selected provider
- None: If user explicitly quits
"""
predefined_providers = [p.lower() for p in PROVIDERS]
all_providers = sorted(set(predefined_providers + list(provider_models.keys())))
provider = select_choice(
"Select a provider to set up:", predefined_providers + ["other"]
)
if provider is None: # User typed 'q'
return None
if provider == "other":
provider = select_choice("Select a provider from the full list:", all_providers)
if provider is None: # User typed 'q'
return None
return provider.lower() if provider else False
def select_model(provider, provider_models):
"""
Presents a list of models for a given provider to the user and prompts them to select one.
Args:
- provider (str): The provider for which to select a model.
- provider_models (dict): A dictionary of provider models.
Returns:
- str: The selected model, or None if the operation is aborted or an invalid selection is made.
"""
predefined_providers = [p.lower() for p in PROVIDERS]
if provider in predefined_providers:
available_models = MODELS.get(provider, [])
else:
available_models = provider_models.get(provider, [])
if not available_models:
click.secho(f"No models available for provider '{provider}'.", fg="red")
return None
selected_model = select_choice(
f"Select a model to use for {provider.capitalize()}:", available_models
)
return selected_model
def load_provider_data(cache_file, cache_expiry):
"""
Loads provider data from a cache file if it exists and is not expired. If the cache is expired or corrupted, it fetches the data from the web.
Args:
- cache_file (Path): The path to the cache file.
- cache_expiry (int): The cache expiry time in seconds.
Returns:
- dict or None: The loaded provider data or None if the operation fails.
"""
current_time = time.time()
if (
cache_file.exists()
and (current_time - cache_file.stat().st_mtime) < cache_expiry
):
data = read_cache_file(cache_file)
if data:
return data
click.secho(
"Cache is corrupted. Fetching provider data from the web...", fg="yellow"
)
else:
click.secho(
"Cache expired or not found. Fetching provider data from the web...",
fg="cyan",
)
return fetch_provider_data(cache_file)
def read_cache_file(cache_file):
"""
Reads and returns the JSON content from a cache file. Returns None if the file contains invalid JSON.
Args:
- cache_file (Path): The path to the cache file.
Returns:
- dict or None: The JSON content of the cache file or None if the JSON is invalid.
"""
try:
with open(cache_file, "r") as f:
return json.load(f)
except json.JSONDecodeError:
return None
def fetch_provider_data(cache_file):
"""
Fetches provider data from a specified URL and caches it to a file.
Args:
- cache_file (Path): The path to the cache file.
Returns:
- dict or None: The fetched provider data or None if the operation fails.
"""
try:
response = requests.get(JSON_URL, stream=True, timeout=10)
response.raise_for_status()
data = download_data(response)
with open(cache_file, "w") as f:
json.dump(data, f)
return data
except requests.RequestException as e:
click.secho(f"Error fetching provider data: {e}", fg="red")
except json.JSONDecodeError:
click.secho("Error parsing provider data. Invalid JSON format.", fg="red")
return None
def download_data(response):
"""
Downloads data from a given HTTP response and returns the JSON content.
Args:
- response (requests.Response): The HTTP response object.
Returns:
- dict: The JSON content of the response.
"""
total_size = int(response.headers.get("content-length", 0))
block_size = 8192
data_chunks = []
with click.progressbar(
length=total_size, label="Downloading", show_pos=True
) as progress_bar:
for chunk in response.iter_content(block_size):
if chunk:
data_chunks.append(chunk)
progress_bar.update(len(chunk))
data_content = b"".join(data_chunks)
return json.loads(data_content.decode("utf-8"))
def get_provider_data():
"""
Retrieves provider data from a cache file, filters out models based on provider criteria, and returns a dictionary of providers mapped to their models.
Returns:
- dict or None: A dictionary of providers mapped to their models or None if the operation fails.
"""
cache_dir = Path.home() / ".crewai"
cache_dir.mkdir(exist_ok=True)
cache_file = cache_dir / "provider_cache.json"
cache_expiry = 24 * 3600
data = load_provider_data(cache_file, cache_expiry)
if not data:
return None
provider_models = defaultdict(list)
for model_name, properties in data.items():
provider = properties.get("litellm_provider", "").strip().lower()
if "http" in provider or provider == "other":
continue
if provider:
provider_models[provider].append(model_name)
return provider_models

View File

@@ -1,9 +1,7 @@
import subprocess import subprocess
import click import click
from packaging import version import tomllib
from crewai.cli.utils import get_crewai_version, read_toml
def run_crew() -> None: def run_crew() -> None:
@@ -11,21 +9,6 @@ def run_crew() -> None:
Run the crew by running a command in the UV environment. Run the crew by running a command in the UV environment.
""" """
command = ["uv", "run", "run_crew"] command = ["uv", "run", "run_crew"]
crewai_version = get_crewai_version()
min_required_version = "0.71.0"
pyproject_data = read_toml()
if pyproject_data.get("tool", {}).get("poetry") and (
version.parse(crewai_version) < version.parse(min_required_version)
):
click.secho(
f"You are running an older version of crewAI ({crewai_version}) that uses poetry pyproject.toml. "
f"Please run `crewai update` to update your pyproject.toml to use uv.",
fg="red",
)
print()
try: try:
subprocess.run(command, capture_output=False, text=True, check=True) subprocess.run(command, capture_output=False, text=True, check=True)
@@ -33,7 +16,10 @@ def run_crew() -> None:
click.echo(f"An error occurred while running the crew: {e}", err=True) click.echo(f"An error occurred while running the crew: {e}", err=True)
click.echo(e.output, err=True, nl=True) click.echo(e.output, err=True, nl=True)
if pyproject_data.get("tool", {}).get("poetry"): with open("pyproject.toml", "rb") as f:
data = tomllib.load(f)
if data.get("tool", {}).get("poetry"):
click.secho( click.secho(
"It's possible that you are using an old version of crewAI that uses poetry, please run `crewai update` to update your pyproject.toml to use uv.", "It's possible that you are using an old version of crewAI that uses poetry, please run `crewai update` to update your pyproject.toml to use uv.",
fg="yellow", fg="yellow",

View File

@@ -3,11 +3,11 @@ import subprocess
import click import click
def kickoff_flow() -> None: def run_flow() -> None:
""" """
Kickoff the flow by running a command in the UV environment. Run the flow by running a command in the UV environment.
""" """
command = ["uv", "run", "kickoff"] command = ["uv", "run", "run_flow"]
try: try:
result = subprocess.run(command, capture_output=False, text=True, check=True) result = subprocess.run(command, capture_output=False, text=True, check=True)

View File

@@ -3,7 +3,7 @@ import sys
from {{folder_name}}.crew import {{crew_name}}Crew from {{folder_name}}.crew import {{crew_name}}Crew
# This main file is intended to be a way for you to run your # This main file is intended to be a way for you to run your
# crew locally, so refrain from adding unnecessary logic into this file. # crew locally, so refrain from adding necessary logic into this file.
# Replace with inputs you want to test with, it will automatically # Replace with inputs you want to test with, it will automatically
# interpolate any tasks and agents information # interpolate any tasks and agents information

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13" requires-python = ">=3.10,<=3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.76.9,<1.0.0" "crewai[tools]>=0.67.1,<1.0.0"
] ]
[project.scripts] [project.scripts]

View File

@@ -1,18 +1,11 @@
from crewai.tools import BaseTool from crewai_tools import BaseTool
from typing import Type
from pydantic import BaseModel, Field
class MyCustomToolInput(BaseModel):
"""Input schema for MyCustomTool."""
argument: str = Field(..., description="Description of the argument.")
class MyCustomTool(BaseTool): class MyCustomTool(BaseTool):
name: str = "Name of my tool" name: str = "Name of my tool"
description: str = ( description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it." "Clear description for what this tool is useful for, you agent will need this information to use it."
) )
args_schema: Type[BaseModel] = MyCustomToolInput
def _run(self, argument: str) -> str: def _run(self, argument: str) -> str:
# Implementation goes here # Implementation goes here

View File

@@ -1,53 +1,65 @@
#!/usr/bin/env python #!/usr/bin/env python
import asyncio
from random import randint from random import randint
from pydantic import BaseModel from pydantic import BaseModel
from crewai.flow.flow import Flow, listen, start from crewai.flow.flow import Flow, listen, start
from .crews.poem_crew.poem_crew import PoemCrew from .crews.poem_crew.poem_crew import PoemCrew
class PoemState(BaseModel): class PoemState(BaseModel):
sentence_count: int = 1 sentence_count: int = 1
poem: str = "" poem: str = ""
class PoemFlow(Flow[PoemState]): class PoemFlow(Flow[PoemState]):
@start() @start()
def generate_sentence_count(self): def generate_sentence_count(self):
print("Generating sentence count") print("Generating sentence count")
# Generate a number between 1 and 5
self.state.sentence_count = randint(1, 5) self.state.sentence_count = randint(1, 5)
@listen(generate_sentence_count) @listen(generate_sentence_count)
def generate_poem(self): def generate_poem(self):
print("Generating poem") print("Generating poem")
result = ( print(f"State before poem: {self.state}")
PoemCrew() result = PoemCrew().crew().kickoff(inputs={"sentence_count": self.state.sentence_count})
.crew()
.kickoff(inputs={"sentence_count": self.state.sentence_count})
)
print("Poem generated", result.raw) print("Poem generated", result.raw)
self.state.poem = result.raw self.state.poem = result.raw
print(f"State after generate_poem: {self.state}")
@listen(generate_poem) @listen(generate_poem)
def save_poem(self): def save_poem(self):
print("Saving poem") print("Saving poem")
print(f"State before save_poem: {self.state}")
with open("poem.txt", "w") as f: with open("poem.txt", "w") as f:
f.write(self.state.poem) f.write(self.state.poem)
print(f"State after save_poem: {self.state}")
async def run_flow():
def kickoff(): """
Run the flow.
"""
poem_flow = PoemFlow() poem_flow = PoemFlow()
poem_flow.kickoff() await poem_flow.kickoff()
async def plot_flow():
def plot(): """
Plot the flow.
"""
poem_flow = PoemFlow() poem_flow = PoemFlow()
poem_flow.plot() poem_flow.plot()
def main():
asyncio.run(run_flow())
def plot():
asyncio.run(plot_flow())
if __name__ == "__main__": if __name__ == "__main__":
kickoff() main()

View File

@@ -5,12 +5,14 @@ description = "{{name}} using crewAI"
authors = [{ name = "Your Name", email = "you@example.com" }] authors = [{ name = "Your Name", email = "you@example.com" }]
requires-python = ">=3.10,<=3.13" requires-python = ">=3.10,<=3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.76.9,<1.0.0", "crewai[tools]>=0.67.1,<1.0.0",
"asyncio"
] ]
[project.scripts] [project.scripts]
kickoff = "{{folder_name}}.main:kickoff" {{folder_name}} = "{{folder_name}}.main:main"
plot = "{{folder_name}}.main:plot" run_flow = "{{folder_name}}.main:main"
plot_flow = "{{folder_name}}.main:plot"
[build-system] [build-system]
requires = ["hatchling"] requires = ["hatchling"]

View File

@@ -1,13 +1,4 @@
from typing import Type from crewai_tools import BaseTool
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class MyCustomToolInput(BaseModel):
"""Input schema for MyCustomTool."""
argument: str = Field(..., description="Description of the argument.")
class MyCustomTool(BaseTool): class MyCustomTool(BaseTool):
@@ -15,7 +6,6 @@ class MyCustomTool(BaseTool):
description: str = ( description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it." "Clear description for what this tool is useful for, you agent will need this information to use it."
) )
args_schema: Type[BaseModel] = MyCustomToolInput
def _run(self, argument: str) -> str: def _run(self, argument: str) -> str:
# Implementation goes here # Implementation goes here

View File

@@ -6,7 +6,7 @@ authors = ["Your Name <you@example.com>"]
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = ">=3.10,<=3.13" python = ">=3.10,<=3.13"
crewai = { extras = ["tools"], version = ">=0.76.9,<1.0.0" } crewai = { extras = ["tools"], version = ">=0.70.1,<1.0.0" }
asyncio = "*" asyncio = "*"
[tool.poetry.scripts] [tool.poetry.scripts]

View File

@@ -1,18 +1,11 @@
from typing import Type from crewai_tools import BaseTool
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class MyCustomToolInput(BaseModel):
"""Input schema for MyCustomTool."""
argument: str = Field(..., description="Description of the argument.")
class MyCustomTool(BaseTool): class MyCustomTool(BaseTool):
name: str = "Name of my tool" name: str = "Name of my tool"
description: str = ( description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it." "Clear description for what this tool is useful for, you agent will need this information to use it."
) )
args_schema: Type[BaseModel] = MyCustomToolInput
def _run(self, argument: str) -> str: def _run(self, argument: str) -> str:
# Implementation goes here # Implementation goes here

View File

@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
authors = ["Your Name <you@example.com>"] authors = ["Your Name <you@example.com>"]
requires-python = ">=3.10,<=3.13" requires-python = ">=3.10,<=3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.76.9,<1.0.0" "crewai[tools]>=0.67.1,<1.0.0"
] ]
[project.scripts] [project.scripts]

View File

@@ -1,18 +1,11 @@
from typing import Type from crewai_tools import BaseTool
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class MyCustomToolInput(BaseModel):
"""Input schema for MyCustomTool."""
argument: str = Field(..., description="Description of the argument.")
class MyCustomTool(BaseTool): class MyCustomTool(BaseTool):
name: str = "Name of my tool" name: str = "Name of my tool"
description: str = ( description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it." "Clear description for what this tool is useful for, you agent will need this information to use it."
) )
args_schema: Type[BaseModel] = MyCustomToolInput
def _run(self, argument: str) -> str: def _run(self, argument: str) -> str:
# Implementation goes here # Implementation goes here

View File

@@ -5,6 +5,6 @@ description = "Power up your crews with {{folder_name}}"
readme = "README.md" readme = "README.md"
requires-python = ">=3.10,<=3.13" requires-python = ">=3.10,<=3.13"
dependencies = [ dependencies = [
"crewai[tools]>=0.76.9" "crewai[tools]>=0.70.1"
] ]

View File

@@ -1,5 +1,4 @@
from crewai.tools import BaseTool from crewai_tools import BaseTool
class {{class_name}}(BaseTool): class {{class_name}}(BaseTool):
name: str = "Name of my tool" name: str = "Name of my tool"

View File

@@ -1,15 +1,17 @@
import base64 import base64
import os import os
import platform
import subprocess import subprocess
import tempfile import tempfile
from pathlib import Path from pathlib import Path
from netrc import netrc
import stat
import click import click
from rich.console import Console from rich.console import Console
from crewai.cli import git from crewai.cli import git
from crewai.cli.command import BaseCommand, PlusAPIMixin from crewai.cli.command import BaseCommand, PlusAPIMixin
from crewai.cli.config import Settings
from crewai.cli.utils import ( from crewai.cli.utils import (
get_project_description, get_project_description,
get_project_name, get_project_name,
@@ -26,6 +28,8 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
A class to handle tool repository related operations for CrewAI projects. A class to handle tool repository related operations for CrewAI projects.
""" """
BASE_URL = "https://app.crewai.com/pypi/"
def __init__(self): def __init__(self):
BaseCommand.__init__(self) BaseCommand.__init__(self)
PlusAPIMixin.__init__(self, telemetry=self._telemetry) PlusAPIMixin.__init__(self, telemetry=self._telemetry)
@@ -151,35 +155,39 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
raise SystemExit raise SystemExit
login_response_json = login_response.json() login_response_json = login_response.json()
self._set_netrc_credentials(login_response_json["credential"])
settings = Settings()
settings.tool_repository_username = login_response_json["credential"]["username"]
settings.tool_repository_password = login_response_json["credential"]["password"]
settings.dump()
console.print( console.print(
"Successfully authenticated to the tool repository.", style="bold green" "Successfully authenticated to the tool repository.", style="bold green"
) )
def _set_netrc_credentials(self, credentials, netrc_path=None):
if not netrc_path:
netrc_filename = "_netrc" if platform.system() == "Windows" else ".netrc"
netrc_path = Path.home() / netrc_filename
netrc_path.touch(mode=stat.S_IRUSR | stat.S_IWUSR, exist_ok=True)
netrc_instance = netrc(file=netrc_path)
netrc_instance.hosts["app.crewai.com"] = (credentials["username"], "", credentials["password"])
with open(netrc_path, 'w') as file:
file.write(str(netrc_instance))
console.print(f"Added credentials to {netrc_path}", style="bold green")
def _add_package(self, tool_details): def _add_package(self, tool_details):
tool_handle = tool_details["handle"] tool_handle = tool_details["handle"]
repository_handle = tool_details["repository"]["handle"] repository_handle = tool_details["repository"]["handle"]
repository_url = tool_details["repository"]["url"]
index = f"{repository_handle}={repository_url}"
add_package_command = [ add_package_command = [
"uv", "uv",
"add", "add",
"--index", "--extra-index-url",
index, self.BASE_URL + repository_handle,
tool_handle, tool_handle,
] ]
add_package_result = subprocess.run( add_package_result = subprocess.run(
add_package_command, add_package_command, capture_output=False, text=True, check=True
capture_output=False,
env=self._build_env_with_credentials(repository_handle),
text=True,
check=True
) )
if add_package_result.stderr: if add_package_result.stderr:
@@ -198,13 +206,3 @@ class ToolCommand(BaseCommand, PlusAPIMixin):
"[bold yellow]Tip:[/bold yellow] Navigate to a different directory and try again." "[bold yellow]Tip:[/bold yellow] Navigate to a different directory and try again."
) )
raise SystemExit raise SystemExit
def _build_env_with_credentials(self, repository_handle: str):
repository_handle = repository_handle.upper().replace("-", "_")
settings = Settings()
env = os.environ.copy()
env[f"UV_INDEX_{repository_handle}_USERNAME"] = str(settings.tool_repository_username or "")
env[f"UV_INDEX_{repository_handle}_PASSWORD"] = str(settings.tool_repository_password or "")
return env

View File

@@ -1,9 +1,7 @@
import os
import shutil import shutil
import tomli_w import tomli_w
import tomllib
from crewai.cli.utils import read_toml
def update_crew() -> None: def update_crew() -> None:
@@ -19,9 +17,10 @@ def migrate_pyproject(input_file, output_file):
And it will be used to migrate the pyproject.toml to the new format when uv is used. And it will be used to migrate the pyproject.toml to the new format when uv is used.
When the time comes that uv supports the new format, this function will be deprecated. When the time comes that uv supports the new format, this function will be deprecated.
""" """
poetry_data = {}
# Read the input pyproject.toml # Read the input pyproject.toml
pyproject_data = read_toml() with open(input_file, "rb") as f:
pyproject = tomllib.load(f)
# Initialize the new project structure # Initialize the new project structure
new_pyproject = { new_pyproject = {
@@ -30,30 +29,30 @@ def migrate_pyproject(input_file, output_file):
} }
# Migrate project metadata # Migrate project metadata
if "tool" in pyproject_data and "poetry" in pyproject_data["tool"]: if "tool" in pyproject and "poetry" in pyproject["tool"]:
poetry_data = pyproject_data["tool"]["poetry"] poetry = pyproject["tool"]["poetry"]
new_pyproject["project"]["name"] = poetry_data.get("name") new_pyproject["project"]["name"] = poetry.get("name")
new_pyproject["project"]["version"] = poetry_data.get("version") new_pyproject["project"]["version"] = poetry.get("version")
new_pyproject["project"]["description"] = poetry_data.get("description") new_pyproject["project"]["description"] = poetry.get("description")
new_pyproject["project"]["authors"] = [ new_pyproject["project"]["authors"] = [
{ {
"name": author.split("<")[0].strip(), "name": author.split("<")[0].strip(),
"email": author.split("<")[1].strip(">").strip(), "email": author.split("<")[1].strip(">").strip(),
} }
for author in poetry_data.get("authors", []) for author in poetry.get("authors", [])
] ]
new_pyproject["project"]["requires-python"] = poetry_data.get("python") new_pyproject["project"]["requires-python"] = poetry.get("python")
else: else:
# If it's already in the new format, just copy the project section # If it's already in the new format, just copy the project section
new_pyproject["project"] = pyproject_data.get("project", {}) new_pyproject["project"] = pyproject.get("project", {})
# Migrate or copy dependencies # Migrate or copy dependencies
if "dependencies" in new_pyproject["project"]: if "dependencies" in new_pyproject["project"]:
# If dependencies are already in the new format, keep them as is # If dependencies are already in the new format, keep them as is
pass pass
elif poetry_data and "dependencies" in poetry_data: elif "dependencies" in poetry:
new_pyproject["project"]["dependencies"] = [] new_pyproject["project"]["dependencies"] = []
for dep, version in poetry_data["dependencies"].items(): for dep, version in poetry["dependencies"].items():
if isinstance(version, dict): # Handle extras if isinstance(version, dict): # Handle extras
extras = ",".join(version.get("extras", [])) extras = ",".join(version.get("extras", []))
new_dep = f"{dep}[{extras}]" new_dep = f"{dep}[{extras}]"
@@ -67,10 +66,10 @@ def migrate_pyproject(input_file, output_file):
new_pyproject["project"]["dependencies"].append(new_dep) new_pyproject["project"]["dependencies"].append(new_dep)
# Migrate or copy scripts # Migrate or copy scripts
if poetry_data and "scripts" in poetry_data: if "scripts" in poetry:
new_pyproject["project"]["scripts"] = poetry_data["scripts"] new_pyproject["project"]["scripts"] = poetry["scripts"]
elif pyproject_data.get("project", {}) and "scripts" in pyproject_data["project"]: elif "scripts" in pyproject.get("project", {}):
new_pyproject["project"]["scripts"] = pyproject_data["project"]["scripts"] new_pyproject["project"]["scripts"] = pyproject["project"]["scripts"]
else: else:
new_pyproject["project"]["scripts"] = {} new_pyproject["project"]["scripts"] = {}
@@ -87,23 +86,14 @@ def migrate_pyproject(input_file, output_file):
new_pyproject["project"]["scripts"]["run_crew"] = f"{module_name}.main:run" new_pyproject["project"]["scripts"]["run_crew"] = f"{module_name}.main:run"
# Migrate optional dependencies # Migrate optional dependencies
if poetry_data and "extras" in poetry_data: if "extras" in poetry:
new_pyproject["project"]["optional-dependencies"] = poetry_data["extras"] new_pyproject["project"]["optional-dependencies"] = poetry["extras"]
# Backup the old pyproject.toml # Backup the old pyproject.toml
backup_file = "pyproject-old.toml" backup_file = "pyproject-old.toml"
shutil.copy2(input_file, backup_file) shutil.copy2(input_file, backup_file)
print(f"Original pyproject.toml backed up as {backup_file}") print(f"Original pyproject.toml backed up as {backup_file}")
# Rename the poetry.lock file
lock_file = "poetry.lock"
lock_backup = "poetry-old.lock"
if os.path.exists(lock_file):
os.rename(lock_file, lock_backup)
print(f"Original poetry.lock renamed to {lock_backup}")
else:
print("No poetry.lock file found to rename.")
# Write the new pyproject.toml # Write the new pyproject.toml
with open(output_file, "wb") as f: with open(output_file, "wb") as f:
tomli_w.dump(new_pyproject, f) tomli_w.dump(new_pyproject, f)

View File

@@ -6,11 +6,9 @@ from functools import reduce
from typing import Any, Dict, List from typing import Any, Dict, List
import click import click
import tomli
from rich.console import Console from rich.console import Console
from crewai.cli.authentication.utils import TokenManager from crewai.cli.authentication.utils import TokenManager
from crewai.cli.constants import ENV_VARS
if sys.version_info >= (3, 11): if sys.version_info >= (3, 11):
import tomllib import tomllib
@@ -55,13 +53,6 @@ def simple_toml_parser(content):
return result return result
def read_toml(file_path: str = "pyproject.toml"):
"""Read the content of a TOML file and return it as a dictionary."""
with open(file_path, "rb") as f:
toml_dict = tomli.load(f)
return toml_dict
def parse_toml(content): def parse_toml(content):
if sys.version_info >= (3, 11): if sys.version_info >= (3, 11):
return tomllib.loads(content) return tomllib.loads(content)
@@ -209,76 +200,3 @@ def tree_find_and_replace(directory, find, replace):
new_dirpath = os.path.join(path, new_dirname) new_dirpath = os.path.join(path, new_dirname)
old_dirpath = os.path.join(path, dirname) old_dirpath = os.path.join(path, dirname)
os.rename(old_dirpath, new_dirpath) os.rename(old_dirpath, new_dirpath)
def load_env_vars(folder_path):
"""
Loads environment variables from a .env file in the specified folder path.
Args:
- folder_path (Path): The path to the folder containing the .env file.
Returns:
- dict: A dictionary of environment variables.
"""
env_file_path = folder_path / ".env"
env_vars = {}
if env_file_path.exists():
with open(env_file_path, "r") as file:
for line in file:
key, _, value = line.strip().partition("=")
if key and value:
env_vars[key] = value
return env_vars
def update_env_vars(env_vars, provider, model):
"""
Updates environment variables with the API key for the selected provider and model.
Args:
- env_vars (dict): Environment variables dictionary.
- provider (str): Selected provider.
- model (str): Selected model.
Returns:
- None
"""
api_key_var = ENV_VARS.get(
provider,
[
click.prompt(
f"Enter the environment variable name for your {provider.capitalize()} API key",
type=str,
)
],
)[0]
if api_key_var not in env_vars:
try:
env_vars[api_key_var] = click.prompt(
f"Enter your {provider.capitalize()} API key", type=str, hide_input=True
)
except click.exceptions.Abort:
click.secho("Operation aborted by the user.", fg="red")
return None
else:
click.secho(f"API key already exists for {provider.capitalize()}.", fg="yellow")
env_vars["MODEL"] = model
click.secho(f"Selected model: {model}", fg="green")
return env_vars
def write_env_file(folder_path, env_vars):
"""
Writes environment variables to a .env file in the specified folder.
Args:
- folder_path (Path): The path to the folder where the .env file will be written.
- env_vars (dict): A dictionary of environment variables to write.
"""
env_file_path = folder_path / ".env"
with open(env_file_path, "w") as file:
for key, value in env_vars.items():
file.write(f"{key}={value}\n")

View File

@@ -1,6 +1,5 @@
import asyncio import asyncio
import json import json
import os
import uuid import uuid
import warnings import warnings
from concurrent.futures import Future from concurrent.futures import Future
@@ -32,7 +31,7 @@ from crewai.task import Task
from crewai.tasks.conditional_task import ConditionalTask from crewai.tasks.conditional_task import ConditionalTask
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.telemetry import Telemetry from crewai.telemetry import Telemetry
from crewai.tools.agent_tools.agent_tools import AgentTools from crewai.tools.agent_tools import AgentTools
from crewai.types.usage_metrics import UsageMetrics from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities import I18N, FileHandler, Logger, RPMController from crewai.utilities import I18N, FileHandler, Logger, RPMController
from crewai.utilities.constants import ( from crewai.utilities.constants import (
@@ -48,12 +47,10 @@ from crewai.utilities.planning_handler import CrewPlanner
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
from crewai.utilities.training_handler import CrewTrainingHandler from crewai.utilities.training_handler import CrewTrainingHandler
agentops = None try:
if os.environ.get("AGENTOPS_API_KEY"): import agentops
try: except ImportError:
import agentops # type: ignore agentops = None
except ImportError:
pass
if TYPE_CHECKING: if TYPE_CHECKING:
from crewai.pipeline.pipeline import Pipeline from crewai.pipeline.pipeline import Pipeline
@@ -126,8 +123,8 @@ class Crew(BaseModel):
default=None, default=None,
description="An Instance of the EntityMemory to be used by the Crew", description="An Instance of the EntityMemory to be used by the Crew",
) )
embedder: Optional[Any] = Field( embedder: Optional[dict] = Field(
default=None, default={"provider": "openai"},
description="Configuration for the embedder to be used for the crew.", description="Configuration for the embedder to be used for the crew.",
) )
usage_metrics: Optional[UsageMetrics] = Field( usage_metrics: Optional[UsageMetrics] = Field(
@@ -435,16 +432,15 @@ class Crew(BaseModel):
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {} self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
) -> None: ) -> None:
"""Trains the crew for a given number of iterations.""" """Trains the crew for a given number of iterations."""
train_crew = self.copy() self._setup_for_training(filename)
train_crew._setup_for_training(filename)
for n_iteration in range(n_iterations): for n_iteration in range(n_iterations):
train_crew._train_iteration = n_iteration self._train_iteration = n_iteration
train_crew.kickoff(inputs=inputs) self.kickoff(inputs=inputs)
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load() training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
for agent in train_crew.agents: for agent in self.agents:
result = TaskEvaluator(agent).evaluate_training_data( result = TaskEvaluator(agent).evaluate_training_data(
training_data=training_data, agent_id=str(agent.id) training_data=training_data, agent_id=str(agent.id)
) )
@@ -775,9 +771,7 @@ class Crew(BaseModel):
def _log_task_start(self, task: Task, role: str = "None"): def _log_task_start(self, task: Task, role: str = "None"):
if self.output_log_file: if self.output_log_file:
self._file_handler.log( self._file_handler.log(task_name=task.name, task=task.description, agent=role, status="started")
task_name=task.name, task=task.description, agent=role, status="started"
)
def _update_manager_tools(self, task: Task): def _update_manager_tools(self, task: Task):
if self.manager_agent: if self.manager_agent:
@@ -799,13 +793,7 @@ class Crew(BaseModel):
def _process_task_result(self, task: Task, output: TaskOutput) -> None: def _process_task_result(self, task: Task, output: TaskOutput) -> None:
role = task.agent.role if task.agent is not None else "None" role = task.agent.role if task.agent is not None else "None"
if self.output_log_file: if self.output_log_file:
self._file_handler.log( self._file_handler.log(task_name=task.name, task=task.description, agent=role, status="completed", output=output.raw)
task_name=task.name,
task=task.description,
agent=role,
status="completed",
output=output.raw,
)
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput: def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
if len(task_outputs) != 1: if len(task_outputs) != 1:
@@ -988,19 +976,17 @@ class Crew(BaseModel):
inputs: Optional[Dict[str, Any]] = None, inputs: Optional[Dict[str, Any]] = None,
) -> None: ) -> None:
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures.""" """Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
test_crew = self.copy() self._test_execution_span = self._telemetry.test_execution_span(
self,
self._test_execution_span = test_crew._telemetry.test_execution_span(
test_crew,
n_iterations, n_iterations,
inputs, inputs,
openai_model_name, # type: ignore[arg-type] openai_model_name, # type: ignore[arg-type]
) # type: ignore[arg-type] ) # type: ignore[arg-type]
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type] evaluator = CrewEvaluator(self, openai_model_name) # type: ignore[arg-type]
for i in range(1, n_iterations + 1): for i in range(1, n_iterations + 1):
evaluator.set_iteration(i) evaluator.set_iteration(i)
test_crew.kickoff(inputs=inputs) self.kickoff(inputs=inputs)
evaluator.print_crew_evaluation_result() evaluator.print_crew_evaluation_result()

View File

@@ -1,20 +1,10 @@
# flow.py
import asyncio import asyncio
import inspect import inspect
from typing import ( from typing import Any, Callable, Dict, Generic, List, Set, Type, TypeVar, Union
Any,
Callable,
Dict,
Generic,
List,
Optional,
Set,
Type,
TypeVar,
Union,
cast,
)
from pydantic import BaseModel, ValidationError from pydantic import BaseModel
from crewai.flow.flow_visualizer import plot_flow from crewai.flow.flow_visualizer import plot_flow
from crewai.flow.utils import get_possible_return_constants from crewai.flow.utils import get_possible_return_constants
@@ -130,8 +120,6 @@ class FlowMeta(type):
methods = attr_value.__trigger_methods__ methods = attr_value.__trigger_methods__
condition_type = getattr(attr_value, "__condition_type__", "OR") condition_type = getattr(attr_value, "__condition_type__", "OR")
listeners[attr_name] = (condition_type, methods) listeners[attr_name] = (condition_type, methods)
# TODO: should we add a check for __condition_type__ 'AND'?
elif hasattr(attr_value, "__is_router__"): elif hasattr(attr_value, "__is_router__"):
routers[attr_value.__router_for__] = attr_name routers[attr_value.__router_for__] = attr_name
possible_returns = get_possible_return_constants(attr_value) possible_returns = get_possible_return_constants(attr_value)
@@ -171,8 +159,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
def __init__(self) -> None: def __init__(self) -> None:
self._methods: Dict[str, Callable] = {} self._methods: Dict[str, Callable] = {}
self._state: T = self._create_initial_state() self._state: T = self._create_initial_state()
self._executed_methods: Set[str] = set() self._completed_methods: Set[str] = set()
self._scheduled_tasks: Set[str] = set()
self._pending_and_listeners: Dict[str, Set[str]] = {} self._pending_and_listeners: Dict[str, Set[str]] = {}
self._method_outputs: List[Any] = [] # List to store all method outputs self._method_outputs: List[Any] = [] # List to store all method outputs
@@ -203,74 +190,7 @@ class Flow(Generic[T], metaclass=FlowMeta):
"""Returns the list of all outputs from executed methods.""" """Returns the list of all outputs from executed methods."""
return self._method_outputs return self._method_outputs
def _initialize_state(self, inputs: Dict[str, Any]) -> None: async def kickoff(self) -> Any:
"""
Initializes or updates the state with the provided inputs.
Args:
inputs: Dictionary of inputs to initialize or update the state.
Raises:
ValueError: If inputs do not match the structured state model.
TypeError: If state is neither a BaseModel instance nor a dictionary.
"""
if isinstance(self._state, BaseModel):
# Structured state management
try:
# Define a function to create the dynamic class
def create_model_with_extra_forbid(
base_model: Type[BaseModel],
) -> Type[BaseModel]:
class ModelWithExtraForbid(base_model): # type: ignore
model_config = base_model.model_config.copy()
model_config["extra"] = "forbid"
return ModelWithExtraForbid
# Create the dynamic class
ModelWithExtraForbid = create_model_with_extra_forbid(
self._state.__class__
)
# Create a new instance using the combined state and inputs
self._state = cast(
T, ModelWithExtraForbid(**{**self._state.model_dump(), **inputs})
)
except ValidationError as e:
raise ValueError(f"Invalid inputs for structured state: {e}") from e
elif isinstance(self._state, dict):
# Unstructured state management
self._state.update(inputs)
else:
raise TypeError("State must be a BaseModel instance or a dictionary.")
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
"""
Starts the execution of the flow synchronously.
Args:
inputs: Optional dictionary of inputs to initialize or update the state.
Returns:
The final output from the flow execution.
"""
if inputs is not None:
self._initialize_state(inputs)
return asyncio.run(self.kickoff_async())
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
"""
Starts the execution of the flow asynchronously.
Args:
inputs: Optional dictionary of inputs to initialize or update the state.
Returns:
The final output from the flow execution.
"""
if inputs is not None:
self._initialize_state(inputs)
if not self._start_methods: if not self._start_methods:
raise ValueError("No start method defined") raise ValueError("No start method defined")
@@ -293,24 +213,17 @@ class Flow(Generic[T], metaclass=FlowMeta):
else: else:
return None # Or raise an exception if no methods were executed return None # Or raise an exception if no methods were executed
async def _execute_start_method(self, start_method_name: str) -> None: async def _execute_start_method(self, start_method: str) -> None:
result = await self._execute_method( result = await self._execute_method(self._methods[start_method])
start_method_name, self._methods[start_method_name] await self._execute_listeners(start_method, result)
)
await self._execute_listeners(start_method_name, result)
async def _execute_method( async def _execute_method(self, method: Callable, *args: Any, **kwargs: Any) -> Any:
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
) -> Any:
result = ( result = (
await method(*args, **kwargs) await method(*args, **kwargs)
if asyncio.iscoroutinefunction(method) if asyncio.iscoroutinefunction(method)
else method(*args, **kwargs) else method(*args, **kwargs)
) )
self._method_outputs.append(result) # Store the output self._method_outputs.append(result) # Store the output
self._executed_methods.add(method_name)
return result return result
async def _execute_listeners(self, trigger_method: str, result: Any) -> None: async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
@@ -318,40 +231,32 @@ class Flow(Generic[T], metaclass=FlowMeta):
if trigger_method in self._routers: if trigger_method in self._routers:
router_method = self._methods[self._routers[trigger_method]] router_method = self._methods[self._routers[trigger_method]]
path = await self._execute_method( path = await self._execute_method(router_method)
trigger_method, router_method
) # TODO: Change or not?
# Use the path as the new trigger method # Use the path as the new trigger method
trigger_method = path trigger_method = path
for listener_name, (condition_type, methods) in self._listeners.items(): for listener, (condition_type, methods) in self._listeners.items():
if condition_type == "OR": if condition_type == "OR":
if trigger_method in methods: if trigger_method in methods:
if ( listener_tasks.append(
listener_name not in self._executed_methods self._execute_single_listener(listener, result)
and listener_name not in self._scheduled_tasks )
):
self._scheduled_tasks.add(listener_name)
listener_tasks.append(
self._execute_single_listener(listener_name, result)
)
elif condition_type == "AND": elif condition_type == "AND":
if all(method in self._executed_methods for method in methods): if listener not in self._pending_and_listeners:
if ( self._pending_and_listeners[listener] = set()
listener_name not in self._executed_methods self._pending_and_listeners[listener].add(trigger_method)
and listener_name not in self._scheduled_tasks if set(methods) == self._pending_and_listeners[listener]:
): listener_tasks.append(
self._scheduled_tasks.add(listener_name) self._execute_single_listener(listener, result)
listener_tasks.append( )
self._execute_single_listener(listener_name, result) del self._pending_and_listeners[listener]
)
# Run all listener tasks concurrently and wait for them to complete # Run all listener tasks concurrently and wait for them to complete
await asyncio.gather(*listener_tasks) await asyncio.gather(*listener_tasks)
async def _execute_single_listener(self, listener_name: str, result: Any) -> None: async def _execute_single_listener(self, listener: str, result: Any) -> None:
try: try:
method = self._methods[listener_name] method = self._methods[listener]
sig = inspect.signature(method) sig = inspect.signature(method)
params = list(sig.parameters.values()) params = list(sig.parameters.values())
@@ -360,22 +265,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
if method_params: if method_params:
# If listener expects parameters, pass the result # If listener expects parameters, pass the result
listener_result = await self._execute_method( listener_result = await self._execute_method(method, result)
listener_name, method, result
)
else: else:
# If listener does not expect parameters, call without arguments # If listener does not expect parameters, call without arguments
listener_result = await self._execute_method(listener_name, method) listener_result = await self._execute_method(method)
# Remove from scheduled tasks after execution
self._scheduled_tasks.discard(listener_name)
# Execute listeners of this listener # Execute listeners of this listener
await self._execute_listeners(listener_name, listener_result) await self._execute_listeners(listener, listener_result)
except Exception as e: except Exception as e:
print( print(f"[Flow._execute_single_listener] Error in method {listener}: {e}")
f"[Flow._execute_single_listener] Error in method {listener_name}: {e}"
)
import traceback import traceback
traceback.print_exc() traceback.print_exc()

View File

@@ -31,10 +31,7 @@ class ContextualMemory:
formatted as bullet points. formatted as bullet points.
""" """
stm_results = self.stm.search(query) stm_results = self.stm.search(query)
formatted_results = "\n".join( formatted_results = "\n".join([f"- {result}" for result in stm_results])
[f"- {result['context']}" for result in stm_results]
)
print("formatted_results stm", formatted_results)
return f"Recent Insights:\n{formatted_results}" if stm_results else "" return f"Recent Insights:\n{formatted_results}" if stm_results else ""
def _fetch_ltm_context(self, task) -> Optional[str]: def _fetch_ltm_context(self, task) -> Optional[str]:
@@ -54,8 +51,6 @@ class ContextualMemory:
formatted_results = list(dict.fromkeys(formatted_results)) formatted_results = list(dict.fromkeys(formatted_results))
formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]") formatted_results = "\n".join([f"- {result}" for result in formatted_results]) # type: ignore # Incompatible types in assignment (expression has type "str", variable has type "list[str]")
print("formatted_results ltm", formatted_results)
return f"Historical Data:\n{formatted_results}" if ltm_results else "" return f"Historical Data:\n{formatted_results}" if ltm_results else ""
def _fetch_entity_context(self, query) -> str: def _fetch_entity_context(self, query) -> str:
@@ -67,5 +62,4 @@ class ContextualMemory:
formatted_results = "\n".join( formatted_results = "\n".join(
[f"- {result['context']}" for result in em_results] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice" [f"- {result['context']}" for result in em_results] # type: ignore # Invalid index type "str" for "str"; expected type "SupportsIndex | slice"
) )
print("formatted_results em", formatted_results)
return f"Entities:\n{formatted_results}" if em_results else "" return f"Entities:\n{formatted_results}" if em_results else ""

View File

@@ -16,7 +16,7 @@ class EntityMemory(Memory):
if storage if storage
else RAGStorage( else RAGStorage(
type="entities", type="entities",
allow_reset=True, allow_reset=False,
embedder_config=embedder_config, embedder_config=embedder_config,
crew=crew, crew=crew,
) )

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict, List from typing import Any, Dict
from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem from crewai.memory.long_term.long_term_memory_item import LongTermMemoryItem
from crewai.memory.memory import Memory from crewai.memory.memory import Memory
@@ -28,7 +28,7 @@ class LongTermMemory(Memory):
datetime=item.datetime, datetime=item.datetime,
) )
def search(self, task: str, latest_n: int = 3) -> List[Dict[str, Any]]: # type: ignore # signature of "search" incompatible with supertype "Memory" def search(self, task: str, latest_n: int = 3) -> Dict[str, Any]:
return self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load" return self.storage.load(task, latest_n) # type: ignore # BUG?: "Storage" has no attribute "load"
def reset(self) -> None: def reset(self) -> None:

View File

@@ -1,6 +1,6 @@
from typing import Any, Dict, Optional, List from typing import Any, Dict, Optional
from crewai.memory.storage.rag_storage import RAGStorage from crewai.memory.storage.interface import Storage
class Memory: class Memory:
@@ -8,7 +8,7 @@ class Memory:
Base class for memory, now supporting agent tags and generic metadata. Base class for memory, now supporting agent tags and generic metadata.
""" """
def __init__(self, storage: RAGStorage): def __init__(self, storage: Storage):
self.storage = storage self.storage = storage
def save( def save(
@@ -23,5 +23,5 @@ class Memory:
self.storage.save(value, metadata) self.storage.save(value, metadata)
def search(self, query: str) -> List[Dict[str, Any]]: def search(self, query: str) -> Dict[str, Any]:
return self.storage.search(query) return self.storage.search(query)

View File

@@ -1,76 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional
class BaseRAGStorage(ABC):
"""
Base class for RAG-based Storage implementations.
"""
app: Any | None = None
def __init__(
self,
type: str,
allow_reset: bool = True,
embedder_config: Optional[Any] = None,
crew: Any = None,
):
self.type = type
self.allow_reset = allow_reset
self.embedder_config = embedder_config
self.crew = crew
self.agents = self._initialize_agents()
def _initialize_agents(self) -> str:
if self.crew:
return "_".join(
[self._sanitize_role(agent.role) for agent in self.crew.agents]
)
return ""
@abstractmethod
def _sanitize_role(self, role: str) -> str:
"""Sanitizes agent roles to ensure valid directory names."""
pass
@abstractmethod
def save(self, value: Any, metadata: Dict[str, Any]) -> None:
"""Save a value with metadata to the storage."""
pass
@abstractmethod
def search(
self,
query: str,
limit: int = 3,
filter: Optional[dict] = None,
score_threshold: float = 0.35,
) -> List[Any]:
"""Search for entries in the storage."""
pass
@abstractmethod
def reset(self) -> None:
"""Reset the storage."""
pass
@abstractmethod
def _generate_embedding(
self, text: str, metadata: Optional[Dict[str, Any]] = None
) -> Any:
"""Generate an embedding for the given text and metadata."""
pass
@abstractmethod
def _initialize_app(self):
"""Initialize the vector db."""
pass
def setup_config(self, config: Dict[str, Any]):
"""Setup the config of the storage."""
pass
def initialize_client(self):
"""Initialize the client of the storage. This should setup the app and the db collection"""
pass

View File

@@ -1,4 +1,4 @@
from typing import Any, Dict, List from typing import Any, Dict
class Storage: class Storage:
@@ -7,7 +7,7 @@ class Storage:
def save(self, value: Any, metadata: Dict[str, Any]) -> None: def save(self, value: Any, metadata: Dict[str, Any]) -> None:
pass pass
def search(self, key: str) -> List[Dict[str, Any]]: # type: ignore def search(self, key: str) -> Dict[str, Any]: # type: ignore
pass pass
def reset(self) -> None: def reset(self) -> None:

View File

@@ -70,7 +70,7 @@ class KickoffTaskOutputsSQLiteStorage:
task.expected_output, task.expected_output,
json.dumps(output, cls=CrewJSONEncoder), json.dumps(output, cls=CrewJSONEncoder),
task_index, task_index,
json.dumps(inputs, cls=CrewJSONEncoder), json.dumps(inputs),
was_replayed, was_replayed,
), ),
) )

View File

@@ -3,13 +3,9 @@ import io
import logging import logging
import os import os
import shutil import shutil
import uuid from typing import Any, Dict, List, Optional
from typing import Any, Dict, List, Optional, cast
from chromadb import Documents, EmbeddingFunction, Embeddings from crewai.memory.storage.interface import Storage
from chromadb.api import ClientAPI
from chromadb.api.types import validate_embedding_function
from crewai.memory.storage.base_rag_storage import BaseRAGStorage
from crewai.utilities.paths import db_storage_path from crewai.utilities.paths import db_storage_path
@@ -21,168 +17,68 @@ def suppress_logging(
logger = logging.getLogger(logger_name) logger = logging.getLogger(logger_name)
original_level = logger.getEffectiveLevel() original_level = logger.getEffectiveLevel()
logger.setLevel(level) logger.setLevel(level)
with ( with contextlib.redirect_stdout(io.StringIO()), contextlib.redirect_stderr(
contextlib.redirect_stdout(io.StringIO()), io.StringIO()
contextlib.redirect_stderr(io.StringIO()), ), contextlib.suppress(UserWarning):
contextlib.suppress(UserWarning),
):
yield yield
logger.setLevel(original_level) logger.setLevel(original_level)
class RAGStorage(BaseRAGStorage): class RAGStorage(Storage):
""" """
Extends Storage to handle embeddings for memory entries, improving Extends Storage to handle embeddings for memory entries, improving
search efficiency. search efficiency.
""" """
app: ClientAPI | None = None
def __init__(self, type, allow_reset=True, embedder_config=None, crew=None): def __init__(self, type, allow_reset=True, embedder_config=None, crew=None):
super().__init__(type, allow_reset, embedder_config, crew) super().__init__()
if (
not os.getenv("OPENAI_API_KEY")
and not os.getenv("OPENAI_BASE_URL") == "https://api.openai.com/v1"
):
os.environ["OPENAI_API_KEY"] = "fake"
agents = crew.agents if crew else [] agents = crew.agents if crew else []
agents = [self._sanitize_role(agent.role) for agent in agents] agents = [self._sanitize_role(agent.role) for agent in agents]
agents = "_".join(agents) agents = "_".join(agents)
self.agents = agents
config = {
"app": {
"config": {"name": type, "collect_metrics": False, "log_level": "ERROR"}
},
"chunker": {
"chunk_size": 5000,
"chunk_overlap": 100,
"length_function": "len",
"min_chunk_size": 150,
},
"vectordb": {
"provider": "chroma",
"config": {
"collection_name": type,
"dir": f"{db_storage_path()}/{type}/{agents}",
"allow_reset": allow_reset,
},
},
}
if embedder_config:
config["embedder"] = embedder_config
self.type = type self.type = type
self.config = config
self.allow_reset = allow_reset self.allow_reset = allow_reset
self._initialize_app()
def _set_embedder_config(self):
import chromadb.utils.embedding_functions as embedding_functions
if self.embedder_config is None:
self.embedder_config = self._create_default_embedding_function()
if isinstance(self.embedder_config, dict):
provider = self.embedder_config.get("provider")
config = self.embedder_config.get("config", {})
model_name = config.get("model")
if provider == "openai":
self.embedder_config = embedding_functions.OpenAIEmbeddingFunction(
api_key=config.get("api_key") or os.getenv("OPENAI_API_KEY"),
model_name=model_name,
)
elif provider == "azure":
self.embedder_config = embedding_functions.OpenAIEmbeddingFunction(
api_key=config.get("api_key"),
api_base=config.get("api_base"),
api_type=config.get("api_type", "azure"),
api_version=config.get("api_version"),
model_name=model_name,
)
elif provider == "ollama":
from openai import OpenAI
class OllamaEmbeddingFunction(EmbeddingFunction):
def __call__(self, input: Documents) -> Embeddings:
client = OpenAI(
base_url="http://localhost:11434/v1",
api_key=config.get("api_key", "ollama"),
)
try:
response = client.embeddings.create(
input=input, model=model_name
)
embeddings = [item.embedding for item in response.data]
return cast(Embeddings, embeddings)
except Exception as e:
raise e
self.embedder_config = OllamaEmbeddingFunction()
elif provider == "vertexai":
self.embedder_config = (
embedding_functions.GoogleVertexEmbeddingFunction(
model_name=model_name,
api_key=config.get("api_key"),
)
)
elif provider == "google":
self.embedder_config = (
embedding_functions.GoogleGenerativeAiEmbeddingFunction(
model_name=model_name,
api_key=config.get("api_key"),
)
)
elif provider == "cohere":
self.embedder_config = embedding_functions.CohereEmbeddingFunction(
model_name=model_name,
api_key=config.get("api_key"),
)
elif provider == "huggingface":
self.embedder_config = embedding_functions.HuggingFaceEmbeddingServer(
url=config.get("api_url"),
)
elif provider == "watson":
try:
import ibm_watsonx_ai.foundation_models as watson_models
from ibm_watsonx_ai import Credentials
from ibm_watsonx_ai.metanames import (
EmbedTextParamsMetaNames as EmbedParams,
)
except ImportError as e:
raise ImportError(
"IBM Watson dependencies are not installed. Please install them to use Watson embedding."
) from e
class WatsonEmbeddingFunction(EmbeddingFunction):
def __call__(self, input: Documents) -> Embeddings:
if isinstance(input, str):
input = [input]
embed_params = {
EmbedParams.TRUNCATE_INPUT_TOKENS: 3,
EmbedParams.RETURN_OPTIONS: {"input_text": True},
}
embedding = watson_models.Embeddings(
model_id=config.get("model"),
params=embed_params,
credentials=Credentials(
api_key=config.get("api_key"), url=config.get("api_url")
),
project_id=config.get("project_id"),
)
try:
embeddings = embedding.embed_documents(input)
return cast(Embeddings, embeddings)
except Exception as e:
print("Error during Watson embedding:", e)
raise e
self.embedder_config = WatsonEmbeddingFunction()
else:
raise Exception(
f"Unsupported embedding provider: {provider}, supported providers: [openai, azure, ollama, vertexai, google, cohere, huggingface, watson]"
)
else:
validate_embedding_function(self.embedder_config)
self.embedder_config = self.embedder_config
def _initialize_app(self): def _initialize_app(self):
import chromadb from embedchain import App
from chromadb.config import Settings from embedchain.llm.base import BaseLlm
self._set_embedder_config() class FakeLLM(BaseLlm):
chroma_client = chromadb.PersistentClient( pass
path=f"{db_storage_path()}/{self.type}/{self.agents}",
settings=Settings(allow_reset=self.allow_reset),
)
self.app = chroma_client self.app = App.from_config(config=self.config)
self.app.llm = FakeLLM()
try: if self.allow_reset:
self.collection = self.app.get_collection( self.app.reset()
name=self.type, embedding_function=self.embedder_config
)
except Exception:
self.collection = self.app.create_collection(
name=self.type, embedding_function=self.embedder_config
)
def _sanitize_role(self, role: str) -> str: def _sanitize_role(self, role: str) -> str:
""" """
@@ -191,14 +87,11 @@ class RAGStorage(BaseRAGStorage):
return role.replace("\n", "").replace(" ", "_").replace("/", "_") return role.replace("\n", "").replace(" ", "_").replace("/", "_")
def save(self, value: Any, metadata: Dict[str, Any]) -> None: def save(self, value: Any, metadata: Dict[str, Any]) -> None:
if not hasattr(self, "app") or not hasattr(self, "collection"): if not hasattr(self, "app"):
self._initialize_app() self._initialize_app()
try: self._generate_embedding(value, metadata)
self._generate_embedding(value, metadata)
except Exception as e:
logging.error(f"Error during {self.type} save: {str(e)}")
def search( def search( # type: ignore # BUG?: Signature of "search" incompatible with supertype "Storage"
self, self,
query: str, query: str,
limit: int = 3, limit: int = 3,
@@ -207,54 +100,31 @@ class RAGStorage(BaseRAGStorage):
) -> List[Any]: ) -> List[Any]:
if not hasattr(self, "app"): if not hasattr(self, "app"):
self._initialize_app() self._initialize_app()
from embedchain.vectordb.chroma import InvalidDimensionException
try: with suppress_logging():
with suppress_logging(): try:
response = self.collection.query(query_texts=query, n_results=limit) results = (
self.app.search(query, limit, where=filter)
if filter
else self.app.search(query, limit)
)
except InvalidDimensionException:
self.app.reset()
return []
return [r for r in results if r["metadata"]["score"] >= score_threshold]
results = [] def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> Any:
for i in range(len(response["ids"][0])): if not hasattr(self, "app"):
result = {
"id": response["ids"][0][i],
"metadata": response["metadatas"][0][i],
"context": response["documents"][0][i],
"score": response["distances"][0][i],
}
if result["score"] >= score_threshold:
results.append(result)
return results
except Exception as e:
logging.error(f"Error during {self.type} search: {str(e)}")
return []
def _generate_embedding(self, text: str, metadata: Dict[str, Any]) -> None: # type: ignore
if not hasattr(self, "app") or not hasattr(self, "collection"):
self._initialize_app() self._initialize_app()
from embedchain.models.data_type import DataType
self.collection.add( self.app.add(text, data_type=DataType.TEXT, metadata=metadata)
documents=[text],
metadatas=[metadata or {}],
ids=[str(uuid.uuid4())],
)
def reset(self) -> None: def reset(self) -> None:
try: try:
shutil.rmtree(f"{db_storage_path()}/{self.type}") shutil.rmtree(f"{db_storage_path()}/{self.type}")
if self.app:
self.app.reset()
except Exception as e: except Exception as e:
if "attempt to write a readonly database" in str(e): raise Exception(
# Ignore this specific error f"An error occurred while resetting the {self.type} memory: {e}"
pass )
else:
raise Exception(
f"An error occurred while resetting the {self.type} memory: {e}"
)
def _create_default_embedding_function(self):
import chromadb.utils.embedding_functions as embedding_functions
return embedding_functions.OpenAIEmbeddingFunction(
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
)

View File

@@ -76,13 +76,27 @@ def crew(func) -> Callable[..., Crew]:
instantiated_agents = [] instantiated_agents = []
agent_roles = set() agent_roles = set()
# Use the preserved task and agent information # Collect methods from crew in order
tasks = self._original_tasks.items() all_functions = [
agents = self._original_agents.items() (name, getattr(self, name))
for name, attr in self.__class__.__dict__.items()
if callable(attr)
]
tasks = [
(name, method)
for name, method in all_functions
if hasattr(method, "is_task")
]
agents = [
(name, method)
for name, method in all_functions
if hasattr(method, "is_agent")
]
# Instantiate tasks in order # Instantiate tasks in order
for task_name, task_method in tasks: for task_name, task_method in tasks:
task_instance = task_method(self) task_instance = task_method()
instantiated_tasks.append(task_instance) instantiated_tasks.append(task_instance)
agent_instance = getattr(task_instance, "agent", None) agent_instance = getattr(task_instance, "agent", None)
if agent_instance and agent_instance.role not in agent_roles: if agent_instance and agent_instance.role not in agent_roles:
@@ -91,7 +105,7 @@ def crew(func) -> Callable[..., Crew]:
# Instantiate agents not included by tasks # Instantiate agents not included by tasks
for agent_name, agent_method in agents: for agent_name, agent_method in agents:
agent_instance = agent_method(self) agent_instance = agent_method()
if agent_instance.role not in agent_roles: if agent_instance.role not in agent_roles:
instantiated_agents.append(agent_instance) instantiated_agents.append(agent_instance)
agent_roles.add(agent_instance.role) agent_roles.add(agent_instance.role)

View File

@@ -34,18 +34,6 @@ def CrewBase(cls: T) -> T:
self.map_all_agent_variables() self.map_all_agent_variables()
self.map_all_task_variables() self.map_all_task_variables()
# Preserve task and agent information
self._original_tasks = {
name: method
for name, method in cls.__dict__.items()
if hasattr(method, "is_task") and method.is_task
}
self._original_agents = {
name: method
for name, method in cls.__dict__.items()
if hasattr(method, "is_agent") and method.is_agent
}
@staticmethod @staticmethod
def load_yaml(config_path: Path): def load_yaml(config_path: Path):
try: try:

View File

@@ -20,7 +20,6 @@ from pydantic import (
from pydantic_core import PydanticCustomError from pydantic_core import PydanticCustomError
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.base_tool import BaseTool
from crewai.tasks.output_format import OutputFormat from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput from crewai.tasks.task_output import TaskOutput
from crewai.telemetry.telemetry import Telemetry from crewai.telemetry.telemetry import Telemetry
@@ -92,7 +91,7 @@ class Task(BaseModel):
output: Optional[TaskOutput] = Field( output: Optional[TaskOutput] = Field(
description="Task output, it's final result after being executed", default=None description="Task output, it's final result after being executed", default=None
) )
tools: Optional[List[BaseTool]] = Field( tools: Optional[List[Any]] = Field(
default_factory=list, default_factory=list,
description="Tools the agent is limited to use for this task.", description="Tools the agent is limited to use for this task.",
) )
@@ -186,7 +185,7 @@ class Task(BaseModel):
self, self,
agent: Optional[BaseAgent] = None, agent: Optional[BaseAgent] = None,
context: Optional[str] = None, context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None, tools: Optional[List[Any]] = None,
) -> TaskOutput: ) -> TaskOutput:
"""Execute the task synchronously.""" """Execute the task synchronously."""
return self._execute_core(agent, context, tools) return self._execute_core(agent, context, tools)
@@ -203,7 +202,7 @@ class Task(BaseModel):
self, self,
agent: BaseAgent | None = None, agent: BaseAgent | None = None,
context: Optional[str] = None, context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None, tools: Optional[List[Any]] = None,
) -> Future[TaskOutput]: ) -> Future[TaskOutput]:
"""Execute the task asynchronously.""" """Execute the task asynchronously."""
future: Future[TaskOutput] = Future() future: Future[TaskOutput] = Future()

View File

@@ -21,7 +21,7 @@ with suppress_warnings():
from opentelemetry import trace # noqa: E402 from opentelemetry import trace # noqa: E402
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter # noqa: E402 from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter # noqa: E402
from opentelemetry.sdk.resources import SERVICE_NAME, Resource # noqa: E402 from opentelemetry.sdk.resources import SERVICE_NAME, Resource # noqa: E402
from opentelemetry.sdk.trace import TracerProvider # noqa: E402 from opentelemetry.sdk.trace import TracerProvider # noqa: E402
from opentelemetry.sdk.trace.export import BatchSpanProcessor # noqa: E402 from opentelemetry.sdk.trace.export import BatchSpanProcessor # noqa: E402
@@ -48,10 +48,6 @@ class Telemetry:
def __init__(self): def __init__(self):
self.ready = False self.ready = False
self.trace_set = False self.trace_set = False
if os.getenv("OTEL_SDK_DISABLED", "false").lower() == "true":
return
try: try:
telemetry_endpoint = "https://telemetry.crewai.com:4319" telemetry_endpoint = "https://telemetry.crewai.com:4319"
self.resource = Resource( self.resource = Resource(
@@ -69,7 +65,7 @@ class Telemetry:
self.provider.add_span_processor(processor) self.provider.add_span_processor(processor)
self.ready = True self.ready = True
except Exception as e: except BaseException as e:
if isinstance( if isinstance(
e, e,
(SystemExit, KeyboardInterrupt, GeneratorExit, asyncio.CancelledError), (SystemExit, KeyboardInterrupt, GeneratorExit, asyncio.CancelledError),
@@ -87,33 +83,404 @@ class Telemetry:
self.ready = False self.ready = False
self.trace_set = False self.trace_set = False
def _safe_telemetry_operation(self, operation):
if not self.ready:
return
try:
operation()
except Exception:
pass
def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None): def crew_creation(self, crew: Crew, inputs: dict[str, Any] | None):
"""Records the creation of a crew.""" """Records the creation of a crew."""
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Created")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "python_version", platform.python_version())
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "crew_process", crew.process)
self._add_attribute(span, "crew_memory", crew.memory)
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks))
self._add_attribute(span, "crew_number_of_agents", len(crew.agents))
if crew.share_crew:
self._add_attribute(
span,
"crew_agents",
json.dumps(
[
{
"key": agent.key,
"id": str(agent.id),
"role": agent.role,
"goal": agent.goal,
"backstory": agent.backstory,
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file,
"function_calling_llm": (
agent.function_calling_llm.model
if agent.function_calling_llm
else ""
),
"llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution,
"max_retry_limit": agent.max_retry_limit,
"tools_names": [
tool.name.casefold()
for tool in agent.tools or []
],
}
for agent in crew.agents
]
),
)
self._add_attribute(
span,
"crew_tasks",
json.dumps(
[
{
"key": task.key,
"id": str(task.id),
"description": task.description,
"expected_output": task.expected_output,
"async_execution?": task.async_execution,
"human_input?": task.human_input,
"agent_role": (
task.agent.role if task.agent else "None"
),
"agent_key": task.agent.key if task.agent else None,
"context": (
[task.description for task in task.context]
if task.context
else None
),
"tools_names": [
tool.name.casefold()
for tool in task.tools or []
],
}
for task in crew.tasks
]
),
)
self._add_attribute(span, "platform", platform.platform())
self._add_attribute(span, "platform_release", platform.release())
self._add_attribute(span, "platform_system", platform.system())
self._add_attribute(span, "platform_version", platform.version())
self._add_attribute(span, "cpus", os.cpu_count())
self._add_attribute(
span, "crew_inputs", json.dumps(inputs) if inputs else None
)
else:
self._add_attribute(
span,
"crew_agents",
json.dumps(
[
{
"key": agent.key,
"id": str(agent.id),
"role": agent.role,
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"function_calling_llm": (
agent.function_calling_llm.model
if agent.function_calling_llm
else ""
),
"llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution,
"max_retry_limit": agent.max_retry_limit,
"tools_names": [
tool.name.casefold()
for tool in agent.tools or []
],
}
for agent in crew.agents
]
),
)
self._add_attribute(
span,
"crew_tasks",
json.dumps(
[
{
"key": task.key,
"id": str(task.id),
"async_execution?": task.async_execution,
"human_input?": task.human_input,
"agent_role": (
task.agent.role if task.agent else "None"
),
"agent_key": task.agent.key if task.agent else None,
"tools_names": [
tool.name.casefold()
for tool in task.tools or []
],
}
for task in crew.tasks
]
),
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def operation(): def task_started(self, crew: Crew, task: Task) -> Span | None:
tracer = trace.get_tracer("crewai.telemetry") """Records task started in a crew."""
span = tracer.start_span("Crew Created") if self.ready:
self._add_attribute( try:
span, tracer = trace.get_tracer("crewai.telemetry")
"crewai_version",
pkg_resources.get_distribution("crewai").version, created_span = tracer.start_span("Task Created")
)
self._add_attribute(span, "python_version", platform.python_version()) self._add_attribute(created_span, "crew_key", crew.key)
self._add_attribute(span, "crew_key", crew.key) self._add_attribute(created_span, "crew_id", str(crew.id))
self._add_attribute(span, "crew_id", str(crew.id)) self._add_attribute(created_span, "task_key", task.key)
self._add_attribute(span, "crew_process", crew.process) self._add_attribute(created_span, "task_id", str(task.id))
self._add_attribute(span, "crew_memory", crew.memory)
self._add_attribute(span, "crew_number_of_tasks", len(crew.tasks)) if crew.share_crew:
self._add_attribute(span, "crew_number_of_agents", len(crew.agents)) self._add_attribute(
if crew.share_crew: created_span, "formatted_description", task.description
)
self._add_attribute(
created_span, "formatted_expected_output", task.expected_output
)
created_span.set_status(Status(StatusCode.OK))
created_span.end()
span = tracer.start_span("Task Execution")
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "task_key", task.key)
self._add_attribute(span, "task_id", str(task.id))
if crew.share_crew:
self._add_attribute(span, "formatted_description", task.description)
self._add_attribute(
span, "formatted_expected_output", task.expected_output
)
return span
except Exception:
pass
return None
def task_ended(self, span: Span, task: Task, crew: Crew):
"""Records task execution in a crew."""
if self.ready:
try:
if crew.share_crew:
self._add_attribute(
span,
"task_output",
task.output.raw if task.output else "",
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the repeated usage 'error' of a tool by an agent."""
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Repeated Usage")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "tool_name", tool_name)
self._add_attribute(span, "attempts", attempts)
if llm:
self._add_attribute(span, "llm", llm.model)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the usage of a tool by an agent."""
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "tool_name", tool_name)
self._add_attribute(span, "attempts", attempts)
if llm:
self._add_attribute(span, "llm", llm.model)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def tool_usage_error(self, llm: Any):
"""Records the usage of a tool by an agent."""
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage Error")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
if llm:
self._add_attribute(span, "llm", llm.model)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def individual_test_result_span(
self, crew: Crew, quality: float, exec_time: int, model_name: str
):
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Individual Test Result")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "quality", str(quality))
self._add_attribute(span, "exec_time", str(exec_time))
self._add_attribute(span, "model_name", model_name)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def test_execution_span(
self,
crew: Crew,
iterations: int,
inputs: dict[str, Any] | None,
model_name: str,
):
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Test Execution")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "iterations", str(iterations))
self._add_attribute(span, "model_name", model_name)
if crew.share_crew:
self._add_attribute(
span, "inputs", json.dumps(inputs) if inputs else None
)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def deploy_signup_error_span(self):
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Deploy Signup Error")
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def start_deployment_span(self, uuid: Optional[str] = None):
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Start Deployment")
if uuid:
self._add_attribute(span, "uuid", uuid)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def create_crew_deployment_span(self):
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Create Crew Deployment")
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Get Crew Logs")
self._add_attribute(span, "log_type", log_type)
if uuid:
self._add_attribute(span, "uuid", uuid)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def remove_crew_span(self, uuid: Optional[str] = None):
if self.ready:
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Remove Crew")
if uuid:
self._add_attribute(span, "uuid", uuid)
span.set_status(Status(StatusCode.OK))
span.end()
except Exception:
pass
def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None):
"""Records the complete execution of a crew.
This is only collected if the user has opted-in to share the crew.
"""
self.crew_creation(crew, inputs)
if (self.ready) and (crew.share_crew):
try:
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Execution")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(
span, "crew_inputs", json.dumps(inputs) if inputs else None
)
self._add_attribute( self._add_attribute(
span, span,
"crew_agents", "crew_agents",
@@ -129,15 +496,8 @@ class Telemetry:
"max_iter": agent.max_iter, "max_iter": agent.max_iter,
"max_rpm": agent.max_rpm, "max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file, "i18n": agent.i18n.prompt_file,
"function_calling_llm": (
agent.function_calling_llm.model
if agent.function_calling_llm
else ""
),
"llm": agent.llm.model, "llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation, "delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution,
"max_retry_limit": agent.max_retry_limit,
"tools_names": [ "tools_names": [
tool.name.casefold() for tool in agent.tools or [] tool.name.casefold() for tool in agent.tools or []
], ],
@@ -152,15 +512,12 @@ class Telemetry:
json.dumps( json.dumps(
[ [
{ {
"key": task.key,
"id": str(task.id), "id": str(task.id),
"description": task.description, "description": task.description,
"expected_output": task.expected_output, "expected_output": task.expected_output,
"async_execution?": task.async_execution, "async_execution?": task.async_execution,
"human_input?": task.human_input, "human_input?": task.human_input,
"agent_role": ( "agent_role": task.agent.role if task.agent else "None",
task.agent.role if task.agent else "None"
),
"agent_key": task.agent.key if task.agent else None, "agent_key": task.agent.key if task.agent else None,
"context": ( "context": (
[task.description for task in task.context] [task.description for task in task.context]
@@ -175,433 +532,78 @@ class Telemetry:
] ]
), ),
) )
self._add_attribute(span, "platform", platform.platform()) return span
self._add_attribute(span, "platform_release", platform.release()) except Exception:
self._add_attribute(span, "platform_system", platform.system()) pass
self._add_attribute(span, "platform_version", platform.version())
self._add_attribute(span, "cpus", os.cpu_count()) def end_crew(self, crew, final_string_output):
if (self.ready) and (crew.share_crew):
try:
self._add_attribute( self._add_attribute(
span, "crew_inputs", json.dumps(inputs) if inputs else None crew._execution_span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
) )
else:
self._add_attribute( self._add_attribute(
span, crew._execution_span, "crew_output", final_string_output
"crew_agents", )
self._add_attribute(
crew._execution_span,
"crew_tasks_output",
json.dumps( json.dumps(
[ [
{ {
"key": agent.key,
"id": str(agent.id),
"role": agent.role,
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"function_calling_llm": (
agent.function_calling_llm.model
if agent.function_calling_llm
else ""
),
"llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation,
"allow_code_execution?": agent.allow_code_execution,
"max_retry_limit": agent.max_retry_limit,
"tools_names": [
tool.name.casefold() for tool in agent.tools or []
],
}
for agent in crew.agents
]
),
)
self._add_attribute(
span,
"crew_tasks",
json.dumps(
[
{
"key": task.key,
"id": str(task.id), "id": str(task.id),
"async_execution?": task.async_execution, "description": task.description,
"human_input?": task.human_input, "output": task.output.raw_output,
"agent_role": (
task.agent.role if task.agent else "None"
),
"agent_key": task.agent.key if task.agent else None,
"tools_names": [
tool.name.casefold() for tool in task.tools or []
],
} }
for task in crew.tasks for task in crew.tasks
] ]
), ),
) )
span.set_status(Status(StatusCode.OK)) crew._execution_span.set_status(Status(StatusCode.OK))
span.end() crew._execution_span.end()
except Exception:
self._safe_telemetry_operation(operation) pass
def task_started(self, crew: Crew, task: Task) -> Span | None:
"""Records task started in a crew."""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
created_span = tracer.start_span("Task Created")
self._add_attribute(created_span, "crew_key", crew.key)
self._add_attribute(created_span, "crew_id", str(crew.id))
self._add_attribute(created_span, "task_key", task.key)
self._add_attribute(created_span, "task_id", str(task.id))
if crew.share_crew:
self._add_attribute(
created_span, "formatted_description", task.description
)
self._add_attribute(
created_span, "formatted_expected_output", task.expected_output
)
created_span.set_status(Status(StatusCode.OK))
created_span.end()
span = tracer.start_span("Task Execution")
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "task_key", task.key)
self._add_attribute(span, "task_id", str(task.id))
if crew.share_crew:
self._add_attribute(span, "formatted_description", task.description)
self._add_attribute(
span, "formatted_expected_output", task.expected_output
)
return span
return self._safe_telemetry_operation(operation)
def task_ended(self, span: Span, task: Task, crew: Crew):
"""Records task execution in a crew."""
def operation():
if crew.share_crew:
self._add_attribute(
span,
"task_output",
task.output.raw if task.output else "",
)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def tool_repeated_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the repeated usage 'error' of a tool by an agent."""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Repeated Usage")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "tool_name", tool_name)
self._add_attribute(span, "attempts", attempts)
if llm:
self._add_attribute(span, "llm", llm.model)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def tool_usage(self, llm: Any, tool_name: str, attempts: int):
"""Records the usage of a tool by an agent."""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "tool_name", tool_name)
self._add_attribute(span, "attempts", attempts)
if llm:
self._add_attribute(span, "llm", llm.model)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def tool_usage_error(self, llm: Any):
"""Records the usage of a tool by an agent."""
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Tool Usage Error")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
if llm:
self._add_attribute(span, "llm", llm.model)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def individual_test_result_span(
self, crew: Crew, quality: float, exec_time: int, model_name: str
):
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Individual Test Result")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "quality", str(quality))
self._add_attribute(span, "exec_time", str(exec_time))
self._add_attribute(span, "model_name", model_name)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def test_execution_span(
self,
crew: Crew,
iterations: int,
inputs: dict[str, Any] | None,
model_name: str,
):
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Test Execution")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(span, "iterations", str(iterations))
self._add_attribute(span, "model_name", model_name)
if crew.share_crew:
self._add_attribute(
span, "inputs", json.dumps(inputs) if inputs else None
)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def deploy_signup_error_span(self):
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Deploy Signup Error")
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def start_deployment_span(self, uuid: Optional[str] = None):
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Start Deployment")
if uuid:
self._add_attribute(span, "uuid", uuid)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def create_crew_deployment_span(self):
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Create Crew Deployment")
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def get_crew_logs_span(self, uuid: Optional[str], log_type: str = "deployment"):
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Get Crew Logs")
self._add_attribute(span, "log_type", log_type)
if uuid:
self._add_attribute(span, "uuid", uuid)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def remove_crew_span(self, uuid: Optional[str] = None):
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Remove Crew")
if uuid:
self._add_attribute(span, "uuid", uuid)
span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation)
def crew_execution_span(self, crew: Crew, inputs: dict[str, Any] | None):
"""Records the complete execution of a crew.
This is only collected if the user has opted-in to share the crew.
"""
self.crew_creation(crew, inputs)
def operation():
tracer = trace.get_tracer("crewai.telemetry")
span = tracer.start_span("Crew Execution")
self._add_attribute(
span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(span, "crew_key", crew.key)
self._add_attribute(span, "crew_id", str(crew.id))
self._add_attribute(
span, "crew_inputs", json.dumps(inputs) if inputs else None
)
self._add_attribute(
span,
"crew_agents",
json.dumps(
[
{
"key": agent.key,
"id": str(agent.id),
"role": agent.role,
"goal": agent.goal,
"backstory": agent.backstory,
"verbose?": agent.verbose,
"max_iter": agent.max_iter,
"max_rpm": agent.max_rpm,
"i18n": agent.i18n.prompt_file,
"llm": agent.llm.model,
"delegation_enabled?": agent.allow_delegation,
"tools_names": [
tool.name.casefold() for tool in agent.tools or []
],
}
for agent in crew.agents
]
),
)
self._add_attribute(
span,
"crew_tasks",
json.dumps(
[
{
"id": str(task.id),
"description": task.description,
"expected_output": task.expected_output,
"async_execution?": task.async_execution,
"human_input?": task.human_input,
"agent_role": task.agent.role if task.agent else "None",
"agent_key": task.agent.key if task.agent else None,
"context": (
[task.description for task in task.context]
if task.context
else None
),
"tools_names": [
tool.name.casefold() for tool in task.tools or []
],
}
for task in crew.tasks
]
),
)
return span
if crew.share_crew:
return self._safe_telemetry_operation(operation)
return None
def end_crew(self, crew, final_string_output):
def operation():
self._add_attribute(
crew._execution_span,
"crewai_version",
pkg_resources.get_distribution("crewai").version,
)
self._add_attribute(
crew._execution_span, "crew_output", final_string_output
)
self._add_attribute(
crew._execution_span,
"crew_tasks_output",
json.dumps(
[
{
"id": str(task.id),
"description": task.description,
"output": task.output.raw_output,
}
for task in crew.tasks
]
),
)
crew._execution_span.set_status(Status(StatusCode.OK))
crew._execution_span.end()
if crew.share_crew:
self._safe_telemetry_operation(operation)
def _add_attribute(self, span, key, value): def _add_attribute(self, span, key, value):
"""Add an attribute to a span.""" """Add an attribute to a span."""
try:
def operation():
return span.set_attribute(key, value) return span.set_attribute(key, value)
except Exception:
self._safe_telemetry_operation(operation) pass
def flow_creation_span(self, flow_name: str): def flow_creation_span(self, flow_name: str):
def operation(): if self.ready:
tracer = trace.get_tracer("crewai.telemetry") try:
span = tracer.start_span("Flow Creation") tracer = trace.get_tracer("crewai.telemetry")
self._add_attribute(span, "flow_name", flow_name) span = tracer.start_span("Flow Creation")
span.set_status(Status(StatusCode.OK)) self._add_attribute(span, "flow_name", flow_name)
span.end() span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation) except Exception:
pass
def flow_plotting_span(self, flow_name: str, node_names: list[str]): def flow_plotting_span(self, flow_name: str, node_names: list[str]):
def operation(): if self.ready:
tracer = trace.get_tracer("crewai.telemetry") try:
span = tracer.start_span("Flow Plotting") tracer = trace.get_tracer("crewai.telemetry")
self._add_attribute(span, "flow_name", flow_name) span = tracer.start_span("Flow Plotting")
self._add_attribute(span, "node_names", json.dumps(node_names)) self._add_attribute(span, "flow_name", flow_name)
span.set_status(Status(StatusCode.OK)) self._add_attribute(span, "node_names", json.dumps(node_names))
span.end() span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation) except Exception:
pass
def flow_execution_span(self, flow_name: str, node_names: list[str]): def flow_execution_span(self, flow_name: str, node_names: list[str]):
def operation(): if self.ready:
tracer = trace.get_tracer("crewai.telemetry") try:
span = tracer.start_span("Flow Execution") tracer = trace.get_tracer("crewai.telemetry")
self._add_attribute(span, "flow_name", flow_name) span = tracer.start_span("Flow Execution")
self._add_attribute(span, "node_names", json.dumps(node_names)) self._add_attribute(span, "flow_name", flow_name)
span.set_status(Status(StatusCode.OK)) self._add_attribute(span, "node_names", json.dumps(node_names))
span.end() span.set_status(Status(StatusCode.OK))
span.end()
self._safe_telemetry_operation(operation) except Exception:
pass

View File

@@ -1 +0,0 @@
from .base_tool import BaseTool, tool

View File

@@ -0,0 +1,25 @@
from crewai.agents.agent_builder.utilities.base_agent_tool import BaseAgentTools
class AgentTools(BaseAgentTools):
"""Default tools around agent delegation"""
def tools(self):
from langchain.tools import StructuredTool
coworkers = ", ".join([f"{agent.role}" for agent in self.agents])
tools = [
StructuredTool.from_function(
func=self.delegate_work,
name="Delegate work to coworker",
description=self.i18n.tools("delegate_work").format(
coworkers=coworkers
),
),
StructuredTool.from_function(
func=self.ask_question,
name="Ask question to coworker",
description=self.i18n.tools("ask_question").format(coworkers=coworkers),
),
]
return tools

View File

@@ -1,32 +0,0 @@
from crewai.tools.base_tool import BaseTool
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.utilities import I18N
from .delegate_work_tool import DelegateWorkTool
from .ask_question_tool import AskQuestionTool
class AgentTools:
"""Manager class for agent-related tools"""
def __init__(self, agents: list[BaseAgent], i18n: I18N = I18N()):
self.agents = agents
self.i18n = i18n
def tools(self) -> list[BaseTool]:
"""Get all available agent tools"""
coworkers = ", ".join([f"{agent.role}" for agent in self.agents])
delegate_tool = DelegateWorkTool(
agents=self.agents,
i18n=self.i18n,
description=self.i18n.tools("delegate_work").format(coworkers=coworkers),
)
ask_tool = AskQuestionTool(
agents=self.agents,
i18n=self.i18n,
description=self.i18n.tools("ask_question").format(coworkers=coworkers),
)
return [delegate_tool, ask_tool]

View File

@@ -1,26 +0,0 @@
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
from typing import Optional
from pydantic import BaseModel, Field
class AskQuestionToolSchema(BaseModel):
question: str = Field(..., description="The question to ask")
context: str = Field(..., description="The context for the question")
coworker: str = Field(..., description="The role/name of the coworker to ask")
class AskQuestionTool(BaseAgentTool):
"""Tool for asking questions to coworkers"""
name: str = "Ask question to coworker"
args_schema: type[BaseModel] = AskQuestionToolSchema
def _run(
self,
question: str,
context: str,
coworker: Optional[str] = None,
**kwargs,
) -> str:
coworker = self._get_coworker(coworker, **kwargs)
return self._execute(coworker, question, context)

View File

@@ -1,29 +0,0 @@
from crewai.tools.agent_tools.base_agent_tools import BaseAgentTool
from typing import Optional
from pydantic import BaseModel, Field
class DelegateWorkToolSchema(BaseModel):
task: str = Field(..., description="The task to delegate")
context: str = Field(..., description="The context for the task")
coworker: str = Field(
..., description="The role/name of the coworker to delegate to"
)
class DelegateWorkTool(BaseAgentTool):
"""Tool for delegating work to coworkers"""
name: str = "Delegate work to coworker"
args_schema: type[BaseModel] = DelegateWorkToolSchema
def _run(
self,
task: str,
context: str,
coworker: Optional[str] = None,
**kwargs,
) -> str:
coworker = self._get_coworker(coworker, **kwargs)
return self._execute(coworker, task, context)

View File

@@ -1,186 +0,0 @@
from abc import ABC, abstractmethod
from typing import Any, Callable, Type, get_args, get_origin
from langchain_core.tools import StructuredTool
from pydantic import BaseModel, ConfigDict, Field, validator
from pydantic import BaseModel as PydanticBaseModel
class BaseTool(BaseModel, ABC):
class _ArgsSchemaPlaceholder(PydanticBaseModel):
pass
model_config = ConfigDict()
name: str
"""The unique name of the tool that clearly communicates its purpose."""
description: str
"""Used to tell the model how/when/why to use the tool."""
args_schema: Type[PydanticBaseModel] = Field(default_factory=_ArgsSchemaPlaceholder)
"""The schema for the arguments that the tool accepts."""
description_updated: bool = False
"""Flag to check if the description has been updated."""
cache_function: Callable = lambda _args=None, _result=None: True
"""Function that will be used to determine if the tool should be cached, should return a boolean. If None, the tool will be cached."""
result_as_answer: bool = False
"""Flag to check if the tool should be the final agent answer."""
@validator("args_schema", always=True, pre=True)
def _default_args_schema(
cls, v: Type[PydanticBaseModel]
) -> Type[PydanticBaseModel]:
if not isinstance(v, cls._ArgsSchemaPlaceholder):
return v
return type(
f"{cls.__name__}Schema",
(PydanticBaseModel,),
{
"__annotations__": {
k: v for k, v in cls._run.__annotations__.items() if k != "return"
},
},
)
def model_post_init(self, __context: Any) -> None:
self._generate_description()
super().model_post_init(__context)
def run(
self,
*args: Any,
**kwargs: Any,
) -> Any:
print(f"Using Tool: {self.name}")
return self._run(*args, **kwargs)
@abstractmethod
def _run(
self,
*args: Any,
**kwargs: Any,
) -> Any:
"""Here goes the actual implementation of the tool."""
def to_langchain(self) -> StructuredTool:
self._set_args_schema()
return StructuredTool(
name=self.name,
description=self.description,
args_schema=self.args_schema,
func=self._run,
)
@classmethod
def from_langchain(cls, tool: StructuredTool) -> "BaseTool":
if cls == Tool:
if tool.func is None:
raise ValueError("StructuredTool must have a callable 'func'")
return Tool(
name=tool.name,
description=tool.description,
args_schema=tool.args_schema,
func=tool.func,
)
raise NotImplementedError(f"from_langchain not implemented for {cls.__name__}")
def _set_args_schema(self):
if self.args_schema is None:
class_name = f"{self.__class__.__name__}Schema"
self.args_schema = type(
class_name,
(PydanticBaseModel,),
{
"__annotations__": {
k: v
for k, v in self._run.__annotations__.items()
if k != "return"
},
},
)
def _generate_description(self):
args_schema = {
name: {
"description": field.description,
"type": BaseTool._get_arg_annotations(field.annotation),
}
for name, field in self.args_schema.model_fields.items()
}
self.description = f"Tool Name: {self.name}\nTool Arguments: {args_schema}\nTool Description: {self.description}"
@staticmethod
def _get_arg_annotations(annotation: type[Any] | None) -> str:
if annotation is None:
return "None"
origin = get_origin(annotation)
args = get_args(annotation)
if origin is None:
return (
annotation.__name__
if hasattr(annotation, "__name__")
else str(annotation)
)
if args:
args_str = ", ".join(BaseTool._get_arg_annotations(arg) for arg in args)
return f"{origin.__name__}[{args_str}]"
return origin.__name__
class Tool(BaseTool):
func: Callable
"""The function that will be executed when the tool is called."""
def _run(self, *args: Any, **kwargs: Any) -> Any:
return self.func(*args, **kwargs)
def to_langchain(
tools: list[BaseTool | StructuredTool],
) -> list[StructuredTool]:
return [t.to_langchain() if isinstance(t, BaseTool) else t for t in tools]
def tool(*args):
"""
Decorator to create a tool from a function.
"""
def _make_with_name(tool_name: str) -> Callable:
def _make_tool(f: Callable) -> BaseTool:
if f.__doc__ is None:
raise ValueError("Function must have a docstring")
if f.__annotations__ is None:
raise ValueError("Function must have type annotations")
class_name = "".join(tool_name.split()).title()
args_schema = type(
class_name,
(PydanticBaseModel,),
{
"__annotations__": {
k: v for k, v in f.__annotations__.items() if k != "return"
},
},
)
return Tool(
name=tool_name,
description=f.__doc__,
func=f,
args_schema=args_schema,
)
return _make_tool
if len(args) == 1 and callable(args[0]):
return _make_with_name(args[0].__name__)(args[0])
if len(args) == 1 and isinstance(args[0], str):
return _make_with_name(args[0])
raise ValueError("Invalid arguments")

View File

@@ -1,6 +1,5 @@
import ast import ast
import datetime import datetime
import os
import time import time
from difflib import SequenceMatcher from difflib import SequenceMatcher
from textwrap import dedent from textwrap import dedent
@@ -10,17 +9,14 @@ import crewai.utilities.events as events
from crewai.agents.tools_handler import ToolsHandler from crewai.agents.tools_handler import ToolsHandler
from crewai.task import Task from crewai.task import Task
from crewai.telemetry import Telemetry from crewai.telemetry import Telemetry
from crewai.tools import BaseTool
from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling from crewai.tools.tool_calling import InstructorToolCalling, ToolCalling
from crewai.tools.tool_usage_events import ToolUsageError, ToolUsageFinished from crewai.tools.tool_usage_events import ToolUsageError, ToolUsageFinished
from crewai.utilities import I18N, Converter, ConverterError, Printer from crewai.utilities import I18N, Converter, ConverterError, Printer
agentops = None try:
if os.environ.get("AGENTOPS_API_KEY"): import agentops
try: except ImportError:
import agentops # type: ignore agentops = None
except ImportError:
pass
OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o", "o1-preview", "o1-mini"] OPENAI_BIGGER_MODELS = ["gpt-4", "gpt-4o", "o1-preview", "o1-mini"]
@@ -50,7 +46,7 @@ class ToolUsage:
def __init__( def __init__(
self, self,
tools_handler: ToolsHandler, tools_handler: ToolsHandler,
tools: List[BaseTool], tools: List[Any],
original_tools: List[Any], original_tools: List[Any],
tools_description: str, tools_description: str,
tools_names: str, tools_names: str,
@@ -59,7 +55,7 @@ class ToolUsage:
agent: Any, agent: Any,
action: Any, action: Any,
) -> None: ) -> None:
self._i18n: I18N = agent.i18n self._i18n: I18N = I18N()
self._printer: Printer = Printer() self._printer: Printer = Printer()
self._telemetry: Telemetry = Telemetry() self._telemetry: Telemetry = Telemetry()
self._run_attempts: int = 1 self._run_attempts: int = 1
@@ -299,7 +295,19 @@ class ToolUsage:
"""Render the tool name and description in plain text.""" """Render the tool name and description in plain text."""
descriptions = [] descriptions = []
for tool in self.tools: for tool in self.tools:
descriptions.append(tool.description) args = {
k: {k2: v2 for k2, v2 in v.items() if k2 in ["description", "type"]}
for k, v in tool.args.items()
}
descriptions.append(
"\n".join(
[
f"Tool Name: {tool.name.lower()}",
f"Tool Description: {tool.description}",
f"Tool Arguments: {args}",
]
)
)
return "\n--\n".join(descriptions) return "\n--\n".join(descriptions)
def _function_calling(self, tool_string: str): def _function_calling(self, tool_string: str):

View File

@@ -2,14 +2,13 @@ from datetime import datetime, date
import json import json
from uuid import UUID from uuid import UUID
from pydantic import BaseModel from pydantic import BaseModel
from decimal import Decimal
class CrewJSONEncoder(json.JSONEncoder): class CrewJSONEncoder(json.JSONEncoder):
def default(self, obj): def default(self, obj):
if isinstance(obj, BaseModel): if isinstance(obj, BaseModel):
return self._handle_pydantic_model(obj) return self._handle_pydantic_model(obj)
elif isinstance(obj, UUID) or isinstance(obj, Decimal): elif isinstance(obj, UUID):
return str(obj) return str(obj)
elif isinstance(obj, datetime) or isinstance(obj, date): elif isinstance(obj, datetime) or isinstance(obj, date):

View File

@@ -1,4 +1,3 @@
import os
from typing import List from typing import List
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
@@ -7,26 +6,16 @@ from crewai.utilities import Converter
from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser from crewai.utilities.pydantic_schema_parser import PydanticSchemaParser
def mock_agent_ops_provider(): agentops = None
def track_agent(*args, **kwargs): try:
from agentops import track_agent
except ImportError:
def track_agent(name):
def noop(f): def noop(f):
return f return f
return noop return noop
return track_agent
agentops = None
if os.environ.get("AGENTOPS_API_KEY"):
try:
from agentops import track_agent
except ImportError:
track_agent = mock_agent_ops_provider()
else:
track_agent = mock_agent_ops_provider()
class Entity(BaseModel): class Entity(BaseModel):
name: str = Field(description="The name of the entity.") name: str = Field(description="The name of the entity.")

View File

@@ -5,6 +5,7 @@ from unittest import mock
from unittest.mock import patch from unittest.mock import patch
import pytest import pytest
from crewai_tools import tool
from crewai import Agent, Crew, Task from crewai import Agent, Crew, Task
from crewai.agents.cache import CacheHandler from crewai.agents.cache import CacheHandler
@@ -13,7 +14,6 @@ from crewai.agents.parser import AgentAction, CrewAgentParser, OutputParserExcep
from crewai.llm import LLM from crewai.llm import LLM
from crewai.tools.tool_calling import InstructorToolCalling from crewai.tools.tool_calling import InstructorToolCalling
from crewai.tools.tool_usage import ToolUsage from crewai.tools.tool_usage import ToolUsage
from crewai.tools import tool
from crewai.tools.tool_usage_events import ToolUsageFinished from crewai.tools.tool_usage_events import ToolUsageFinished
from crewai.utilities import RPMController from crewai.utilities import RPMController
from crewai.utilities.events import Emitter from crewai.utilities.events import Emitter
@@ -277,10 +277,9 @@ def test_cache_hitting():
"multiplier-{'first_number': 12, 'second_number': 3}": 36, "multiplier-{'first_number': 12, 'second_number': 3}": 36,
} }
with ( with patch.object(CacheHandler, "read") as read, patch.object(
patch.object(CacheHandler, "read") as read, Emitter, "emit"
patch.object(Emitter, "emit") as emit, ) as emit:
):
read.return_value = "0" read.return_value = "0"
task = Task( task = Task(
description="What is 2 times 6? Ignore correctness and just return the result of the multiplication tool, you must use the tool.", description="What is 2 times 6? Ignore correctness and just return the result of the multiplication tool, you must use the tool.",
@@ -605,7 +604,7 @@ def test_agent_respect_the_max_rpm_set(capsys):
def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys): def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
from unittest.mock import patch from unittest.mock import patch
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def get_final_answer() -> float: def get_final_answer() -> float:
@@ -643,7 +642,7 @@ def test_agent_respect_the_max_rpm_set_over_crew_rpm(capsys):
def test_agent_without_max_rpm_respet_crew_rpm(capsys): def test_agent_without_max_rpm_respet_crew_rpm(capsys):
from unittest.mock import patch from unittest.mock import patch
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def get_final_answer() -> float: def get_final_answer() -> float:
@@ -697,7 +696,7 @@ def test_agent_without_max_rpm_respet_crew_rpm(capsys):
def test_agent_error_on_parsing_tool(capsys): def test_agent_error_on_parsing_tool(capsys):
from unittest.mock import patch from unittest.mock import patch
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def get_final_answer() -> float: def get_final_answer() -> float:
@@ -740,7 +739,7 @@ def test_agent_error_on_parsing_tool(capsys):
def test_agent_remembers_output_format_after_using_tools_too_many_times(): def test_agent_remembers_output_format_after_using_tools_too_many_times():
from unittest.mock import patch from unittest.mock import patch
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def get_final_answer() -> float: def get_final_answer() -> float:
@@ -864,16 +863,11 @@ def test_agent_function_calling_llm():
from crewai.tools.tool_usage import ToolUsage from crewai.tools.tool_usage import ToolUsage
with ( with patch.object(
patch.object( instructor, "from_litellm", wraps=instructor.from_litellm
instructor, "from_litellm", wraps=instructor.from_litellm ) as mock_from_litellm, patch.object(
) as mock_from_litellm, ToolUsage, "_original_tool_calling", side_effect=Exception("Forced exception")
patch.object( ) as mock_original_tool_calling:
ToolUsage,
"_original_tool_calling",
side_effect=Exception("Forced exception"),
) as mock_original_tool_calling,
):
crew.kickoff() crew.kickoff()
mock_from_litellm.assert_called() mock_from_litellm.assert_called()
mock_original_tool_calling.assert_called() mock_original_tool_calling.assert_called()
@@ -900,7 +894,7 @@ def test_agent_count_formatting_error():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_tool_result_as_answer_is_the_final_answer_for_the_agent(): def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
from crewai.tools import BaseTool from crewai_tools import BaseTool
class MyCustomTool(BaseTool): class MyCustomTool(BaseTool):
name: str = "Get Greetings" name: str = "Get Greetings"
@@ -930,7 +924,7 @@ def test_tool_result_as_answer_is_the_final_answer_for_the_agent():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_tool_usage_information_is_appended_to_agent(): def test_tool_usage_information_is_appended_to_agent():
from crewai.tools import BaseTool from crewai_tools import BaseTool
class MyCustomTool(BaseTool): class MyCustomTool(BaseTool):
name: str = "Decide Greetings" name: str = "Decide Greetings"

View File

@@ -3,7 +3,7 @@
import pytest import pytest
from crewai.agent import Agent from crewai.agent import Agent
from crewai.tools.agent_tools.agent_tools import AgentTools from crewai.tools.agent_tools import AgentTools
researcher = Agent( researcher = Agent(
role="researcher", role="researcher",
@@ -11,14 +11,12 @@ researcher = Agent(
backstory="You're an expert researcher, specialized in technology", backstory="You're an expert researcher, specialized in technology",
allow_delegation=False, allow_delegation=False,
) )
tools = AgentTools(agents=[researcher]).tools() tools = AgentTools(agents=[researcher])
delegate_tool = tools[0]
ask_tool = tools[1]
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_delegate_work(): def test_delegate_work():
result = delegate_tool.run( result = tools.delegate_work(
coworker="researcher", coworker="researcher",
task="share your take on AI Agents", task="share your take on AI Agents",
context="I heard you hate them", context="I heard you hate them",
@@ -32,8 +30,8 @@ def test_delegate_work():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_delegate_work_with_wrong_co_worker_variable(): def test_delegate_work_with_wrong_co_worker_variable():
result = delegate_tool.run( result = tools.delegate_work(
coworker="researcher", co_worker="researcher",
task="share your take on AI Agents", task="share your take on AI Agents",
context="I heard you hate them", context="I heard you hate them",
) )
@@ -46,7 +44,7 @@ def test_delegate_work_with_wrong_co_worker_variable():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_ask_question(): def test_ask_question():
result = ask_tool.run( result = tools.ask_question(
coworker="researcher", coworker="researcher",
question="do you hate AI Agents?", question="do you hate AI Agents?",
context="I heard you LOVE them", context="I heard you LOVE them",
@@ -60,8 +58,8 @@ def test_ask_question():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_ask_question_with_wrong_co_worker_variable(): def test_ask_question_with_wrong_co_worker_variable():
result = ask_tool.run( result = tools.ask_question(
coworker="researcher", co_worker="researcher",
question="do you hate AI Agents?", question="do you hate AI Agents?",
context="I heard you LOVE them", context="I heard you LOVE them",
) )
@@ -74,8 +72,8 @@ def test_ask_question_with_wrong_co_worker_variable():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_delegate_work_withwith_coworker_as_array(): def test_delegate_work_withwith_coworker_as_array():
result = delegate_tool.run( result = tools.delegate_work(
coworker="[researcher]", co_worker="[researcher]",
task="share your take on AI Agents", task="share your take on AI Agents",
context="I heard you hate them", context="I heard you hate them",
) )
@@ -88,8 +86,8 @@ def test_delegate_work_withwith_coworker_as_array():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_ask_question_with_coworker_as_array(): def test_ask_question_with_coworker_as_array():
result = ask_tool.run( result = tools.ask_question(
coworker="[researcher]", co_worker="[researcher]",
question="do you hate AI Agents?", question="do you hate AI Agents?",
context="I heard you LOVE them", context="I heard you LOVE them",
) )
@@ -101,7 +99,7 @@ def test_ask_question_with_coworker_as_array():
def test_delegate_work_to_wrong_agent(): def test_delegate_work_to_wrong_agent():
result = ask_tool.run( result = tools.ask_question(
coworker="writer", coworker="writer",
question="share your take on AI Agents", question="share your take on AI Agents",
context="I heard you hate them", context="I heard you hate them",
@@ -114,7 +112,7 @@ def test_delegate_work_to_wrong_agent():
def test_ask_question_to_wrong_agent(): def test_ask_question_to_wrong_agent():
result = ask_tool.run( result = tools.ask_question(
coworker="writer", coworker="writer",
question="do you hate AI Agents?", question="do you hate AI Agents?",
context="I heard you LOVE them", context="I heard you LOVE them",

0
tests/agent_tools/lol.py Normal file
View File

View File

@@ -2,7 +2,6 @@ import hashlib
from typing import Any, List, Optional from typing import Any, List, Optional
from crewai.agents.agent_builder.base_agent import BaseAgent from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.tools.base_tool import BaseTool
from pydantic import BaseModel from pydantic import BaseModel
@@ -11,13 +10,13 @@ class TestAgent(BaseAgent):
self, self,
task: Any, task: Any,
context: Optional[str] = None, context: Optional[str] = None,
tools: Optional[List[BaseTool]] = None, tools: Optional[List[Any]] = None,
) -> str: ) -> str:
return "" return ""
def create_agent_executor(self, tools=None) -> None: ... def create_agent_executor(self, tools=None) -> None: ...
def _parse_tools(self, tools: List[BaseTool]) -> List[BaseTool]: def _parse_tools(self, tools: List[Any]) -> List[Any]:
return [] return []
def get_delegation_tools(self, agents: List["BaseAgent"]): ... def get_delegation_tools(self, agents: List["BaseAgent"]): ...

View File

@@ -1,9 +1,7 @@
from pathlib import Path
from unittest import mock from unittest import mock
import pytest import pytest
from click.testing import CliRunner from click.testing import CliRunner
from crewai.cli.cli import ( from crewai.cli.cli import (
deploy_create, deploy_create,
deploy_list, deploy_list,
@@ -11,7 +9,6 @@ from crewai.cli.cli import (
deploy_push, deploy_push,
deploy_remove, deploy_remove,
deply_status, deply_status,
flow_add_crew,
reset_memories, reset_memories,
signup, signup,
test, test,
@@ -280,42 +277,3 @@ def test_deploy_remove_no_uuid(command, runner):
assert result.exit_code == 0 assert result.exit_code == 0
mock_deploy.remove_crew.assert_called_once_with(uuid=None) mock_deploy.remove_crew.assert_called_once_with(uuid=None)
@mock.patch("crewai.cli.add_crew_to_flow.create_embedded_crew")
@mock.patch("pathlib.Path.exists", return_value=True) # Mock the existence check
def test_flow_add_crew(mock_path_exists, mock_create_embedded_crew, runner):
crew_name = "new_crew"
result = runner.invoke(flow_add_crew, [crew_name])
# Log the output for debugging
print(result.output)
assert result.exit_code == 0, f"Command failed with output: {result.output}"
assert f"Adding crew {crew_name} to the flow" in result.output
# Verify that create_embedded_crew was called with the correct arguments
mock_create_embedded_crew.assert_called_once()
call_args, call_kwargs = mock_create_embedded_crew.call_args
assert call_args[0] == crew_name
assert "parent_folder" in call_kwargs
assert isinstance(call_kwargs["parent_folder"], Path)
def test_add_crew_to_flow_not_in_root(runner):
# Simulate not being in the root of a flow project
with mock.patch("pathlib.Path.exists", autospec=True) as mock_exists:
# Mock Path.exists to return False when checking for pyproject.toml
def exists_side_effect(self):
if self.name == "pyproject.toml":
return False # Simulate that pyproject.toml does not exist
return True # All other paths exist
mock_exists.side_effect = exists_side_effect
result = runner.invoke(flow_add_crew, ["new_crew"])
assert result.exit_code != 0
assert "This command must be run from the root of a flow project." in str(
result.output
)

View File

@@ -1,109 +0,0 @@
import unittest
import json
import tempfile
import shutil
from pathlib import Path
from crewai.cli.config import Settings
class TestSettings(unittest.TestCase):
def setUp(self):
self.test_dir = Path(tempfile.mkdtemp())
self.config_path = self.test_dir / "settings.json"
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_empty_initialization(self):
settings = Settings(config_path=self.config_path)
self.assertIsNone(settings.tool_repository_username)
self.assertIsNone(settings.tool_repository_password)
def test_initialization_with_data(self):
settings = Settings(
config_path=self.config_path,
tool_repository_username="user1"
)
self.assertEqual(settings.tool_repository_username, "user1")
self.assertIsNone(settings.tool_repository_password)
def test_initialization_with_existing_file(self):
self.config_path.parent.mkdir(parents=True, exist_ok=True)
with self.config_path.open("w") as f:
json.dump({"tool_repository_username": "file_user"}, f)
settings = Settings(config_path=self.config_path)
self.assertEqual(settings.tool_repository_username, "file_user")
def test_merge_file_and_input_data(self):
self.config_path.parent.mkdir(parents=True, exist_ok=True)
with self.config_path.open("w") as f:
json.dump({
"tool_repository_username": "file_user",
"tool_repository_password": "file_pass"
}, f)
settings = Settings(
config_path=self.config_path,
tool_repository_username="new_user"
)
self.assertEqual(settings.tool_repository_username, "new_user")
self.assertEqual(settings.tool_repository_password, "file_pass")
def test_dump_new_settings(self):
settings = Settings(
config_path=self.config_path,
tool_repository_username="user1"
)
settings.dump()
with self.config_path.open("r") as f:
saved_data = json.load(f)
self.assertEqual(saved_data["tool_repository_username"], "user1")
def test_update_existing_settings(self):
self.config_path.parent.mkdir(parents=True, exist_ok=True)
with self.config_path.open("w") as f:
json.dump({"existing_setting": "value"}, f)
settings = Settings(
config_path=self.config_path,
tool_repository_username="user1"
)
settings.dump()
with self.config_path.open("r") as f:
saved_data = json.load(f)
self.assertEqual(saved_data["existing_setting"], "value")
self.assertEqual(saved_data["tool_repository_username"], "user1")
def test_none_values(self):
settings = Settings(
config_path=self.config_path,
tool_repository_username=None
)
settings.dump()
with self.config_path.open("r") as f:
saved_data = json.load(f)
self.assertIsNone(saved_data.get("tool_repository_username"))
def test_invalid_json_in_config(self):
self.config_path.parent.mkdir(parents=True, exist_ok=True)
with self.config_path.open("w") as f:
f.write("invalid json")
try:
settings = Settings(config_path=self.config_path)
self.assertIsNone(settings.tool_repository_username)
except json.JSONDecodeError:
self.fail("Settings initialization should handle invalid JSON")
def test_empty_config_file(self):
self.config_path.parent.mkdir(parents=True, exist_ok=True)
self.config_path.touch()
settings = Settings(config_path=self.config_path)
self.assertIsNone(settings.tool_repository_username)

View File

@@ -75,14 +75,13 @@ def test_install_success(mock_get, mock_subprocess_run):
[ [
"uv", "uv",
"add", "add",
"--index", "--extra-index-url",
"sample-repo=https://example.com/repo", "https://app.crewai.com/pypi/sample-repo",
"sample-tool", "sample-tool",
], ],
capture_output=False, capture_output=False,
text=True, text=True,
check=True, check=True,
env=unittest.mock.ANY
) )
assert "Succesfully installed sample-tool" in output assert "Succesfully installed sample-tool" in output

View File

@@ -9,7 +9,6 @@ from unittest.mock import MagicMock, patch
import instructor import instructor
import pydantic_core import pydantic_core
import pytest import pytest
from crewai.agent import Agent from crewai.agent import Agent
from crewai.agents.cache import CacheHandler from crewai.agents.cache import CacheHandler
from crewai.crew import Crew from crewai.crew import Crew
@@ -456,7 +455,7 @@ def test_crew_verbose_output(capsys):
def test_cache_hitting_between_agents(): def test_cache_hitting_between_agents():
from unittest.mock import call, patch from unittest.mock import call, patch
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def multiplier(first_number: int, second_number: int) -> float: def multiplier(first_number: int, second_number: int) -> float:
@@ -498,8 +497,7 @@ def test_cache_hitting_between_agents():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_api_calls_throttling(capsys): def test_api_calls_throttling(capsys):
from unittest.mock import patch from unittest.mock import patch
from crewai_tools import tool
from crewai.tools import tool
@tool @tool
def get_final_answer() -> float: def get_final_answer() -> float:
@@ -781,14 +779,11 @@ def test_async_task_execution_call_count():
list_important_history.output = mock_task_output list_important_history.output = mock_task_output
write_article.output = mock_task_output write_article.output = mock_task_output
with ( with patch.object(
patch.object( Task, "execute_sync", return_value=mock_task_output
Task, "execute_sync", return_value=mock_task_output ) as mock_execute_sync, patch.object(
) as mock_execute_sync, Task, "execute_async", return_value=mock_future
patch.object( ) as mock_execute_async:
Task, "execute_async", return_value=mock_future
) as mock_execute_async,
):
crew.kickoff() crew.kickoff()
assert mock_execute_async.call_count == 2 assert mock_execute_async.call_count == 2
@@ -1110,8 +1105,7 @@ def test_dont_set_agents_step_callback_if_already_set():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_crew_function_calling_llm(): def test_crew_function_calling_llm():
from unittest.mock import patch from unittest.mock import patch
from crewai_tools import tool
from crewai.tools import tool
llm = "gpt-4o" llm = "gpt-4o"
@@ -1146,7 +1140,7 @@ def test_crew_function_calling_llm():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_task_with_no_arguments(): def test_task_with_no_arguments():
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def return_data() -> str: def return_data() -> str:
@@ -1309,9 +1303,8 @@ def test_hierarchical_crew_creation_tasks_with_agents():
assert crew.manager_agent is not None assert crew.manager_agent is not None
assert crew.manager_agent.tools is not None assert crew.manager_agent.tools is not None
assert ( assert crew.manager_agent.tools[0].description.startswith(
"Delegate a specific task to one of the following coworkers: Senior Writer\n" "Delegate a specific task to one of the following coworkers: Senior Writer"
in crew.manager_agent.tools[0].description
) )
@@ -1338,9 +1331,8 @@ def test_hierarchical_crew_creation_tasks_with_async_execution():
crew.kickoff() crew.kickoff()
assert crew.manager_agent is not None assert crew.manager_agent is not None
assert crew.manager_agent.tools is not None assert crew.manager_agent.tools is not None
assert ( assert crew.manager_agent.tools[0].description.startswith(
"Delegate a specific task to one of the following coworkers: Senior Writer\n" "Delegate a specific task to one of the following coworkers: Senior Writer\n"
in crew.manager_agent.tools[0].description
) )
@@ -1372,9 +1364,8 @@ def test_hierarchical_crew_creation_tasks_with_sync_last():
crew.kickoff() crew.kickoff()
assert crew.manager_agent is not None assert crew.manager_agent is not None
assert crew.manager_agent.tools is not None assert crew.manager_agent.tools is not None
assert ( assert crew.manager_agent.tools[0].description.startswith(
"Delegate a specific task to one of the following coworkers: Senior Writer, Researcher, CEO\n" "Delegate a specific task to one of the following coworkers: Senior Writer, Researcher, CEO\n"
in crew.manager_agent.tools[0].description
) )
@@ -1457,6 +1448,52 @@ def test_crew_does_not_interpolate_without_inputs():
interpolate_task_inputs.assert_not_called() interpolate_task_inputs.assert_not_called()
# def test_crew_partial_inputs():
# agent = Agent(
# role="{topic} Researcher",
# goal="Express hot takes on {topic}.",
# backstory="You have a lot of experience with {topic}.",
# )
# task = Task(
# description="Give me an analysis around {topic}.",
# expected_output="{points} bullet points about {topic}.",
# )
# crew = Crew(agents=[agent], tasks=[task], inputs={"topic": "AI"})
# inputs = {"topic": "AI"}
# crew._interpolate_inputs(inputs=inputs) # Manual call for now
# assert crew.tasks[0].description == "Give me an analysis around AI."
# assert crew.tasks[0].expected_output == "{points} bullet points about AI."
# assert crew.agents[0].role == "AI Researcher"
# assert crew.agents[0].goal == "Express hot takes on AI."
# assert crew.agents[0].backstory == "You have a lot of experience with AI."
# def test_crew_invalid_inputs():
# agent = Agent(
# role="{topic} Researcher",
# goal="Express hot takes on {topic}.",
# backstory="You have a lot of experience with {topic}.",
# )
# task = Task(
# description="Give me an analysis around {topic}.",
# expected_output="{points} bullet points about {topic}.",
# )
# crew = Crew(agents=[agent], tasks=[task], inputs={"subject": "AI"})
# inputs = {"subject": "AI"}
# crew._interpolate_inputs(inputs=inputs) # Manual call for now
# assert crew.tasks[0].description == "Give me an analysis around {topic}."
# assert crew.tasks[0].expected_output == "{points} bullet points about {topic}."
# assert crew.agents[0].role == "{topic} Researcher"
# assert crew.agents[0].goal == "Express hot takes on {topic}."
# assert crew.agents[0].backstory == "You have a lot of experience with {topic}."
def test_task_callback_on_crew(): def test_task_callback_on_crew():
from unittest.mock import MagicMock, patch from unittest.mock import MagicMock, patch
@@ -1497,7 +1534,7 @@ def test_task_callback_on_crew():
def test_tools_with_custom_caching(): def test_tools_with_custom_caching():
from unittest.mock import patch from unittest.mock import patch
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def multiplcation_tool(first_number: int, second_number: int) -> int: def multiplcation_tool(first_number: int, second_number: int) -> int:
@@ -1699,7 +1736,7 @@ def test_manager_agent_in_agents_raises_exception():
def test_manager_agent_with_tools_raises_exception(): def test_manager_agent_with_tools_raises_exception():
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def testing_tool(first_number: int, second_number: int) -> int: def testing_tool(first_number: int, second_number: int) -> int:
@@ -1733,10 +1770,7 @@ def test_manager_agent_with_tools_raises_exception():
@patch("crewai.crew.Crew.kickoff") @patch("crewai.crew.Crew.kickoff")
@patch("crewai.crew.CrewTrainingHandler") @patch("crewai.crew.CrewTrainingHandler")
@patch("crewai.crew.TaskEvaluator") @patch("crewai.crew.TaskEvaluator")
@patch("crewai.crew.Crew.copy") def test_crew_train_success(task_evaluator, crew_training_handler, kickoff):
def test_crew_train_success(
copy_mock, task_evaluator, crew_training_handler, kickoff_mock
):
task = Task( task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.", expected_output="5 bullet points with a paragraph for each idea.",
@@ -1747,19 +1781,9 @@ def test_crew_train_success(
agents=[researcher, writer], agents=[researcher, writer],
tasks=[task], tasks=[task],
) )
# Create a mock for the copied crew
copy_mock.return_value = crew
crew.train( crew.train(
n_iterations=2, inputs={"topic": "AI"}, filename="trained_agents_data.pkl" n_iterations=2, inputs={"topic": "AI"}, filename="trained_agents_data.pkl"
) )
# Ensure kickoff is called on the copied crew
kickoff_mock.assert_has_calls(
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
)
task_evaluator.assert_has_calls( task_evaluator.assert_has_calls(
[ [
mock.call(researcher), mock.call(researcher),
@@ -1798,6 +1822,10 @@ def test_crew_train_success(
] ]
) )
kickoff.assert_has_calls(
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
)
def test_crew_train_error(): def test_crew_train_error():
task = Task( task = Task(
@@ -1812,7 +1840,7 @@ def test_crew_train_error():
) )
with pytest.raises(TypeError) as e: with pytest.raises(TypeError) as e:
crew.train() # type: ignore purposefully throwing err crew.train()
assert "train() missing 1 required positional argument: 'n_iterations'" in str( assert "train() missing 1 required positional argument: 'n_iterations'" in str(
e e
) )
@@ -2508,9 +2536,8 @@ def test_conditional_should_execute():
@mock.patch("crewai.crew.CrewEvaluator") @mock.patch("crewai.crew.CrewEvaluator")
@mock.patch("crewai.crew.Crew.copy")
@mock.patch("crewai.crew.Crew.kickoff") @mock.patch("crewai.crew.Crew.kickoff")
def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator): def test_crew_testing_function(mock_kickoff, crew_evaluator):
task = Task( task = Task(
description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.", description="Come up with a list of 5 interesting ideas to explore for an article, then write one amazing paragraph highlight for each idea that showcases how good an article about this topic could be. Return the list of ideas with their paragraph and your notes.",
expected_output="5 bullet points with a paragraph for each idea.", expected_output="5 bullet points with a paragraph for each idea.",
@@ -2521,15 +2548,11 @@ def test_crew_testing_function(kickoff_mock, copy_mock, crew_evaluator):
agents=[researcher], agents=[researcher],
tasks=[task], tasks=[task],
) )
# Create a mock for the copied crew
copy_mock.return_value = crew
n_iterations = 2 n_iterations = 2
crew.test(n_iterations, openai_model_name="gpt-4o-mini", inputs={"topic": "AI"}) crew.test(n_iterations, openai_model_name="gpt-4o-mini", inputs={"topic": "AI"})
# Ensure kickoff is called on the copied crew assert len(mock_kickoff.mock_calls) == n_iterations
kickoff_mock.assert_has_calls( mock_kickoff.assert_has_calls(
[mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})] [mock.call(inputs={"topic": "AI"}), mock.call(inputs={"topic": "AI"})]
) )

File diff suppressed because one or more lines are too long

View File

@@ -1,5 +1,5 @@
import pytest import pytest
from unittest.mock import patch
from crewai.agent import Agent from crewai.agent import Agent
from crewai.crew import Crew from crewai.crew import Crew
from crewai.memory.short_term.short_term_memory import ShortTermMemory from crewai.memory.short_term.short_term_memory import ShortTermMemory
@@ -26,6 +26,7 @@ def short_term_memory():
return ShortTermMemory(crew=Crew(agents=[agent], tasks=[task])) return ShortTermMemory(crew=Crew(agents=[agent], tasks=[task]))
@pytest.mark.vcr(filter_headers=["authorization"])
def test_save_and_search(short_term_memory): def test_save_and_search(short_term_memory):
memory = ShortTermMemoryItem( memory = ShortTermMemoryItem(
data="""test value test value test value test value test value test value data="""test value test value test value test value test value test value
@@ -34,28 +35,12 @@ def test_save_and_search(short_term_memory):
agent="test_agent", agent="test_agent",
metadata={"task": "test_task"}, metadata={"task": "test_task"},
) )
short_term_memory.save(
value=memory.data,
metadata=memory.metadata,
agent=memory.agent,
)
with patch.object(ShortTermMemory, "save") as mock_save: find = short_term_memory.search("test value", score_threshold=0.01)[0]
short_term_memory.save( assert find["context"] == memory.data, "Data value mismatch."
value=memory.data, assert find["metadata"]["agent"] == "test_agent", "Agent value mismatch."
metadata=memory.metadata,
agent=memory.agent,
)
mock_save.assert_called_once_with(
value=memory.data,
metadata=memory.metadata,
agent=memory.agent,
)
expected_result = [
{
"context": memory.data,
"metadata": {"agent": "test_agent"},
"score": 0.95,
}
]
with patch.object(ShortTermMemory, "search", return_value=expected_result):
find = short_term_memory.search("test value", score_threshold=0.01)[0]
assert find["context"] == memory.data, "Data value mismatch."
assert find["metadata"]["agent"] == "test_agent", "Agent value mismatch."

View File

@@ -15,7 +15,7 @@ from pydantic_core import ValidationError
def test_task_tool_reflect_agent_tools(): def test_task_tool_reflect_agent_tools():
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def fake_tool() -> None: def fake_tool() -> None:
@@ -39,7 +39,7 @@ def test_task_tool_reflect_agent_tools():
def test_task_tool_takes_precedence_over_agent_tools(): def test_task_tool_takes_precedence_over_agent_tools():
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def fake_tool() -> None: def fake_tool() -> None:
@@ -656,7 +656,7 @@ def test_increment_delegations_for_sequential_process():
@pytest.mark.vcr(filter_headers=["authorization"]) @pytest.mark.vcr(filter_headers=["authorization"])
def test_increment_tool_errors(): def test_increment_tool_errors():
from crewai.tools import tool from crewai_tools import tool
@tool @tool
def scoring_examples() -> None: def scoring_examples() -> None:

View File

@@ -1,109 +0,0 @@
from typing import Callable
from crewai.tools import BaseTool, tool
def test_creating_a_tool_using_annotation():
@tool("Name of my tool")
def my_tool(question: str) -> str:
"""Clear description for what this tool is useful for, you agent will need this information to use it."""
return question
# Assert all the right attributes were defined
assert my_tool.name == "Name of my tool"
assert (
my_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it."
)
assert my_tool.args_schema.schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (
my_tool.func("What is the meaning of life?") == "What is the meaning of life?"
)
# Assert the langchain tool conversion worked as expected
converted_tool = my_tool.to_langchain()
assert converted_tool.name == "Name of my tool"
assert (
converted_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it."
)
assert converted_tool.args_schema.schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (
converted_tool.func("What is the meaning of life?")
== "What is the meaning of life?"
)
def test_creating_a_tool_using_baseclass():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
def _run(self, question: str) -> str:
return question
my_tool = MyCustomTool()
# Assert all the right attributes were defined
assert my_tool.name == "Name of my tool"
assert (
my_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it."
)
assert my_tool.args_schema.schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert my_tool.run("What is the meaning of life?") == "What is the meaning of life?"
# Assert the langchain tool conversion worked as expected
converted_tool = my_tool.to_langchain()
assert converted_tool.name == "Name of my tool"
assert (
converted_tool.description
== "Tool Name: Name of my tool\nTool Arguments: {'question': {'description': None, 'type': 'str'}}\nTool Description: Clear description for what this tool is useful for, you agent will need this information to use it."
)
assert converted_tool.args_schema.schema()["properties"] == {
"question": {"title": "Question", "type": "string"}
}
assert (
converted_tool.run("What is the meaning of life?")
== "What is the meaning of life?"
)
def test_setting_cache_function():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
cache_function: Callable = lambda: False
def _run(self, question: str) -> str:
return question
my_tool = MyCustomTool()
# Assert all the right attributes were defined
assert not my_tool.cache_function()
def test_default_cache_function_is_true():
class MyCustomTool(BaseTool):
name: str = "Name of my tool"
description: str = (
"Clear description for what this tool is useful for, you agent will need this information to use it."
)
def _run(self, question: str) -> str:
return question
my_tool = MyCustomTool()
# Assert all the right attributes were defined
assert my_tool.cache_function()

View File

@@ -1,123 +0,0 @@
import json
import random
from unittest.mock import MagicMock
import pytest
from pydantic import BaseModel, Field
from crewai import Agent, Task
from crewai.tools.tool_usage import ToolUsage
from crewai.tools import BaseTool
class RandomNumberToolInput(BaseModel):
min_value: int = Field(
..., description="The minimum value of the range (inclusive)"
)
max_value: int = Field(
..., description="The maximum value of the range (inclusive)"
)
class RandomNumberTool(BaseTool):
name: str = "Random Number Generator"
description: str = "Generates a random number within a specified range"
args_schema: type[BaseModel] = RandomNumberToolInput
def _run(self, min_value: int, max_value: int) -> int:
return random.randint(min_value, max_value)
# Example agent and task
example_agent = Agent(
role="Number Generator",
goal="Generate random numbers for various purposes",
backstory="You are an AI agent specialized in generating random numbers within specified ranges.",
tools=[RandomNumberTool()],
verbose=True,
)
example_task = Task(
description="Generate a random number between 1 and 100",
expected_output="A random number between 1 and 100",
agent=example_agent,
)
def test_random_number_tool_range():
tool = RandomNumberTool()
result = tool._run(1, 10)
assert 1 <= result <= 10
def test_random_number_tool_invalid_range():
tool = RandomNumberTool()
with pytest.raises(ValueError):
tool._run(10, 1) # min_value > max_value
def test_random_number_tool_schema():
tool = RandomNumberTool()
# Get the schema using model_json_schema()
schema = tool.args_schema.model_json_schema()
# Convert the schema to a string
schema_str = json.dumps(schema)
# Check if the schema string contains the expected fields
assert "min_value" in schema_str
assert "max_value" in schema_str
# Parse the schema string back to a dictionary
schema_dict = json.loads(schema_str)
# Check if the schema contains the correct field types
assert schema_dict["properties"]["min_value"]["type"] == "integer"
assert schema_dict["properties"]["max_value"]["type"] == "integer"
# Check if the schema contains the field descriptions
assert (
"minimum value" in schema_dict["properties"]["min_value"]["description"].lower()
)
assert (
"maximum value" in schema_dict["properties"]["max_value"]["description"].lower()
)
def test_tool_usage_render():
tool = RandomNumberTool()
tool_usage = ToolUsage(
tools_handler=MagicMock(),
tools=[tool],
original_tools=[tool],
tools_description="Sample tool for testing",
tools_names="random_number_generator",
task=MagicMock(),
function_calling_llm=MagicMock(),
agent=MagicMock(),
action=MagicMock(),
)
rendered = tool_usage._render()
# Updated checks to match the actual output
assert "Tool Name: Random Number Generator" in rendered
assert "Tool Arguments:" in rendered
assert (
"'min_value': {'description': 'The minimum value of the range (inclusive)', 'type': 'int'}"
in rendered
)
assert (
"'max_value': {'description': 'The maximum value of the range (inclusive)', 'type': 'int'}"
in rendered
)
assert (
"Tool Description: Generates a random number within a specified range"
in rendered
)
assert (
"Tool Name: Random Number Generator\nTool Arguments: {'min_value': {'description': 'The minimum value of the range (inclusive)', 'type': 'int'}, 'max_value': {'description': 'The maximum value of the range (inclusive)', 'type': 'int'}}\nTool Description: Generates a random number within a specified range"
in rendered
)

1980
uv.lock generated

File diff suppressed because it is too large Load Diff