mirror of
https://github.com/crewAIInc/crewAI.git
synced 2025-12-16 20:38:29 +00:00
Compare commits
117 Commits
tony-docs
...
bugfix/con
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1eb4717352 | ||
|
|
a1f35e768f | ||
|
|
00eede0d5d | ||
|
|
a3d5c86218 | ||
|
|
60d13bf7e8 | ||
|
|
86825e1769 | ||
|
|
7afc531fbb | ||
|
|
ed0490112b | ||
|
|
66c66e3d84 | ||
|
|
b9b625a70d | ||
|
|
b58253cacc | ||
|
|
fbf8732784 | ||
|
|
8fedbe49cb | ||
|
|
1e8ee247ca | ||
|
|
34d2993456 | ||
|
|
e3c5c174ee | ||
|
|
b4e2db0306 | ||
|
|
9cc759ba32 | ||
|
|
ac9f8b9d5a | ||
|
|
3d4a1e4b18 | ||
|
|
123f302744 | ||
|
|
5bae78639e | ||
|
|
5235442a5b | ||
|
|
c62fb615b1 | ||
|
|
78797c64b0 | ||
|
|
8a7584798b | ||
|
|
b50772a38b | ||
|
|
96a7e8038f | ||
|
|
ec050e5d33 | ||
|
|
e2ce65fc5b | ||
|
|
14503bc43b | ||
|
|
00c2f5043e | ||
|
|
bcd90e26b0 | ||
|
|
4eaa8755eb | ||
|
|
ba66910fbd | ||
|
|
90f1bee602 | ||
|
|
1cb5f57864 | ||
|
|
7dc47adb5c | ||
|
|
ac819bcb6e | ||
|
|
b6d668fc66 | ||
|
|
1b488b6da7 | ||
|
|
d3b398ed52 | ||
|
|
d52fd09602 | ||
|
|
d6800d8957 | ||
|
|
2fd7506ed9 | ||
|
|
161084aff2 | ||
|
|
b145cb3247 | ||
|
|
1adbcf697d | ||
|
|
e51355200a | ||
|
|
47818f4f41 | ||
|
|
9b10fd47b0 | ||
|
|
c408368267 | ||
|
|
90b3145e92 | ||
|
|
fbd0e015d5 | ||
|
|
17e25fb842 | ||
|
|
d6d98ee969 | ||
|
|
e0600e3bb9 | ||
|
|
a79d77dfd7 | ||
|
|
56ec9bc224 | ||
|
|
8eef02739a | ||
|
|
6f4ad532e6 | ||
|
|
74a1de8550 | ||
|
|
e529766391 | ||
|
|
a7f5d574dc | ||
|
|
0cc02d9492 | ||
|
|
fa26f6ebae | ||
|
|
f6c2982619 | ||
|
|
5a8649a97f | ||
|
|
e6100debac | ||
|
|
abee94d056 | ||
|
|
92731544ae | ||
|
|
77c7b7dfa1 | ||
|
|
ea64c29fee | ||
|
|
f4bb040ad8 | ||
|
|
515478473a | ||
|
|
9cf3fadd0f | ||
|
|
89c4b3fe88 | ||
|
|
9e5c599f58 | ||
|
|
a950e67c7d | ||
|
|
de6933b2d2 | ||
|
|
748383d74c | ||
|
|
23b9e10323 | ||
|
|
ddb7958da7 | ||
|
|
477cce321f | ||
|
|
7bed63a693 | ||
|
|
2709a9205a | ||
|
|
d19d7b01ec | ||
|
|
a3ad2c1957 | ||
|
|
c3e7a3ec19 | ||
|
|
cba8c9faec | ||
|
|
bcb7fb27d0 | ||
|
|
c310044bec | ||
|
|
5263df24b6 | ||
|
|
dea6ed7ef0 | ||
|
|
d3a0dad323 | ||
|
|
67bf4aea56 | ||
|
|
8c76bad50f | ||
|
|
e27a15023c | ||
|
|
a836f466f4 | ||
|
|
67f0de1f90 | ||
|
|
c642ebf97e | ||
|
|
a21e310d78 | ||
|
|
aba68da542 | ||
|
|
e254f11933 | ||
|
|
ab2274caf0 | ||
|
|
3e4f112f39 | ||
|
|
cc018bf128 | ||
|
|
46d3e4d4d9 | ||
|
|
627bb3f5f6 | ||
|
|
4a44245de9 | ||
|
|
30d027158a | ||
|
|
3fecde49b6 | ||
|
|
cc129a0bce | ||
|
|
b5779dca12 | ||
|
|
42311d9c7a | ||
|
|
294f2cc3a9 | ||
|
|
3dc442801f |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -21,4 +21,5 @@ crew_tasks_output.json
|
||||
.mypy_cache
|
||||
.ruff_cache
|
||||
.venv
|
||||
agentops.log
|
||||
agentops.log
|
||||
test_flow.html
|
||||
18
README.md
18
README.md
@@ -1,10 +1,18 @@
|
||||
<div align="center">
|
||||
|
||||

|
||||

|
||||
|
||||
# **CrewAI**
|
||||
|
||||
🤖 **CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
||||
**CrewAI**: Production-grade framework for orchestrating sophisticated AI agent systems. From simple automations to complex real-world applications, CrewAI provides precise control and deep customization. By fostering collaborative intelligence through flexible, production-ready architecture, CrewAI empowers agents to work together seamlessly, tackling complex business challenges with predictable, consistent results.
|
||||
|
||||
**CrewAI Enterprise**
|
||||
Want to plan, build (+ no code), deploy, monitor and interare your agents: [CrewAI Enterprise](https://www.crewai.com/enterprise). Designed for complex, real-world applications, our enterprise solution offers:
|
||||
|
||||
- **Seamless Integrations**
|
||||
- **Scalable & Secure Deployment**
|
||||
- **Actionable Insights**
|
||||
- **24/7 Support**
|
||||
|
||||
<h3>
|
||||
|
||||
@@ -190,7 +198,7 @@ research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
the current year is 2025.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
@@ -392,7 +400,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
|
||||
goal="Gather and validate supporting market data",
|
||||
backstory="You excel at finding and correlating multiple data sources"
|
||||
)
|
||||
|
||||
|
||||
analysis_task = Task(
|
||||
description="Analyze {sector} sector data for the past {timeframe}",
|
||||
expected_output="Detailed market analysis with confidence score",
|
||||
@@ -403,7 +411,7 @@ class AdvancedAnalysisFlow(Flow[MarketState]):
|
||||
expected_output="Corroborating evidence and potential contradictions",
|
||||
agent=researcher
|
||||
)
|
||||
|
||||
|
||||
# Demonstrate crew autonomy
|
||||
analysis_crew = Crew(
|
||||
agents=[analyst, researcher],
|
||||
|
||||
@@ -43,7 +43,7 @@ Think of an agent as a specialized team member with specific skills, expertise,
|
||||
| **Max Retry Limit** _(optional)_ | `max_retry_limit` | `int` | Maximum number of retries when an error occurs. Default is 2. |
|
||||
| **Respect Context Window** _(optional)_ | `respect_context_window` | `bool` | Keep messages under context window size by summarizing. Default is True. |
|
||||
| **Code Execution Mode** _(optional)_ | `code_execution_mode` | `Literal["safe", "unsafe"]` | Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct). Default is 'safe'. |
|
||||
| **Embedder Config** _(optional)_ | `embedder_config` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
||||
| **Embedder** _(optional)_ | `embedder` | `Optional[Dict[str, Any]]` | Configuration for the embedder used by the agent. |
|
||||
| **Knowledge Sources** _(optional)_ | `knowledge_sources` | `Optional[List[BaseKnowledgeSource]]` | Knowledge sources available to the agent. |
|
||||
| **Use System Prompt** _(optional)_ | `use_system_prompt` | `Optional[bool]` | Whether to use system prompt (for o1 model support). Default is True. |
|
||||
|
||||
@@ -152,7 +152,7 @@ agent = Agent(
|
||||
use_system_prompt=True, # Default: True
|
||||
tools=[SerperDevTool()], # Optional: List of tools
|
||||
knowledge_sources=None, # Optional: List of knowledge sources
|
||||
embedder_config=None, # Optional: Custom embedder configuration
|
||||
embedder=None, # Optional: Custom embedder configuration
|
||||
system_template=None, # Optional: Custom system prompt template
|
||||
prompt_template=None, # Optional: Custom prompt template
|
||||
response_template=None, # Optional: Custom response template
|
||||
|
||||
@@ -12,7 +12,7 @@ The CrewAI CLI provides a set of commands to interact with CrewAI, allowing you
|
||||
|
||||
To use the CrewAI CLI, make sure you have CrewAI installed:
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
@@ -20,7 +20,7 @@ pip install crewai
|
||||
|
||||
The basic structure of a CrewAI CLI command is:
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
||||
```
|
||||
|
||||
@@ -30,7 +30,7 @@ crewai [COMMAND] [OPTIONS] [ARGUMENTS]
|
||||
|
||||
Create a new crew or flow.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai create [OPTIONS] TYPE NAME
|
||||
```
|
||||
|
||||
@@ -38,7 +38,7 @@ crewai create [OPTIONS] TYPE NAME
|
||||
- `NAME`: Name of the crew or flow
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai create crew my_new_crew
|
||||
crewai create flow my_new_flow
|
||||
```
|
||||
@@ -47,14 +47,14 @@ crewai create flow my_new_flow
|
||||
|
||||
Show the installed version of CrewAI.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai version [OPTIONS]
|
||||
```
|
||||
|
||||
- `--tools`: (Optional) Show the installed version of CrewAI tools
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai version
|
||||
crewai version --tools
|
||||
```
|
||||
@@ -63,7 +63,7 @@ crewai version --tools
|
||||
|
||||
Train the crew for a specified number of iterations.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai train [OPTIONS]
|
||||
```
|
||||
|
||||
@@ -71,7 +71,7 @@ crewai train [OPTIONS]
|
||||
- `-f, --filename TEXT`: Path to a custom file for training (default: "trained_agents_data.pkl")
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai train -n 10 -f my_training_data.pkl
|
||||
```
|
||||
|
||||
@@ -79,14 +79,14 @@ crewai train -n 10 -f my_training_data.pkl
|
||||
|
||||
Replay the crew execution from a specific task.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai replay [OPTIONS]
|
||||
```
|
||||
|
||||
- `-t, --task_id TEXT`: Replay the crew from this task ID, including all subsequent tasks
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai replay -t task_123456
|
||||
```
|
||||
|
||||
@@ -94,7 +94,7 @@ crewai replay -t task_123456
|
||||
|
||||
Retrieve your latest crew.kickoff() task outputs.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai log-tasks-outputs
|
||||
```
|
||||
|
||||
@@ -102,7 +102,7 @@ crewai log-tasks-outputs
|
||||
|
||||
Reset the crew memories (long, short, entity, latest_crew_kickoff_outputs).
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai reset-memories [OPTIONS]
|
||||
```
|
||||
|
||||
@@ -113,7 +113,7 @@ crewai reset-memories [OPTIONS]
|
||||
- `-a, --all`: Reset ALL memories
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai reset-memories --long --short
|
||||
crewai reset-memories --all
|
||||
```
|
||||
@@ -122,7 +122,7 @@ crewai reset-memories --all
|
||||
|
||||
Test the crew and evaluate the results.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai test [OPTIONS]
|
||||
```
|
||||
|
||||
@@ -130,24 +130,56 @@ crewai test [OPTIONS]
|
||||
- `-m, --model TEXT`: LLM Model to run the tests on the Crew (default: "gpt-4o-mini")
|
||||
|
||||
Example:
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai test -n 5 -m gpt-3.5-turbo
|
||||
```
|
||||
|
||||
### 8. Run
|
||||
|
||||
Run the crew.
|
||||
Run the crew or flow.
|
||||
|
||||
```shell
|
||||
```shell Terminal
|
||||
crewai run
|
||||
```
|
||||
|
||||
<Note>
|
||||
Starting from version 0.103.0, the `crewai run` command can be used to run both standard crews and flows. For flows, it automatically detects the type from pyproject.toml and runs the appropriate command. This is now the recommended way to run both crews and flows.
|
||||
</Note>
|
||||
|
||||
<Note>
|
||||
Make sure to run these commands from the directory where your CrewAI project is set up.
|
||||
Some commands may require additional configuration or setup within your project structure.
|
||||
</Note>
|
||||
|
||||
### 9. Chat
|
||||
|
||||
### 9. API Keys
|
||||
Starting in version `0.98.0`, when you run the `crewai chat` command, you start an interactive session with your crew. The AI assistant will guide you by asking for necessary inputs to execute the crew. Once all inputs are provided, the crew will execute its tasks.
|
||||
|
||||
After receiving the results, you can continue interacting with the assistant for further instructions or questions.
|
||||
|
||||
```shell Terminal
|
||||
crewai chat
|
||||
```
|
||||
<Note>
|
||||
Ensure you execute these commands from your CrewAI project's root directory.
|
||||
</Note>
|
||||
<Note>
|
||||
IMPORTANT: Set the `chat_llm` property in your `crew.py` file to enable this command.
|
||||
|
||||
```python
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents,
|
||||
tasks=self.tasks,
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
chat_llm="gpt-4o", # LLM for chat orchestration
|
||||
)
|
||||
```
|
||||
</Note>
|
||||
|
||||
### 10. API Keys
|
||||
|
||||
When running ```crewai create crew``` command, the CLI will first show you the top 5 most common LLM providers and ask you to select one.
|
||||
|
||||
|
||||
@@ -23,14 +23,14 @@ A crew in crewAI represents a collaborative group of agents working together to
|
||||
| **Language** _(optional)_ | `language` | Language used for the crew, defaults to English. |
|
||||
| **Language File** _(optional)_ | `language_file` | Path to the language file to be used for the crew. |
|
||||
| **Memory** _(optional)_ | `memory` | Utilized for storing execution memories (short-term, long-term, entity memory). |
|
||||
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||
| **Memory Config** _(optional)_ | `memory_config` | Configuration for the memory provider to be used by the crew. |
|
||||
| **Cache** _(optional)_ | `cache` | Specifies whether to use a cache for storing the results of tools' execution. Defaults to `True`. |
|
||||
| **Embedder** _(optional)_ | `embedder` | Configuration for the embedder to be used by the crew. Mostly used by memory for now. Default is `{"provider": "openai"}`. |
|
||||
| **Full Output** _(optional)_ | `full_output` | Whether the crew should return the full output with all tasks outputs or just the final output. Defaults to `False`. |
|
||||
| **Step Callback** _(optional)_ | `step_callback` | A function that is called after each step of every agent. This can be used to log the agent's actions or to perform other operations; it won't override the agent-specific `step_callback`. |
|
||||
| **Task Callback** _(optional)_ | `task_callback` | A function that is called after the completion of each task. Useful for monitoring or additional operations post-task execution. |
|
||||
| **Share Crew** _(optional)_ | `share_crew` | Whether you want to share the complete crew information and execution with the crewAI team to make the library better, and allow us to train models. |
|
||||
| **Output Log File** _(optional)_ | `output_log_file` | Whether you want to have a file with the complete crew output and execution. You can set it using True and it will default to the folder you are currently in and it will be called logs.txt or passing a string with the full path and name of the file. |
|
||||
| **Output Log File** _(optional)_ | `output_log_file` | Set to True to save logs as logs.txt in the current directory or provide a file path. Logs will be in JSON format if the filename ends in .json, otherwise .txt. Defautls to `None`. |
|
||||
| **Manager Agent** _(optional)_ | `manager_agent` | `manager` sets a custom agent that will be used as a manager. |
|
||||
| **Prompt File** _(optional)_ | `prompt_file` | Path to the prompt JSON file to be used for the crew. |
|
||||
| **Planning** *(optional)* | `planning` | Adds planning ability to the Crew. When activated before each Crew iteration, all Crew data is sent to an AgentPlanner that will plan the tasks and this plan will be added to each task description. |
|
||||
@@ -240,6 +240,23 @@ print(f"Tasks Output: {crew_output.tasks_output}")
|
||||
print(f"Token Usage: {crew_output.token_usage}")
|
||||
```
|
||||
|
||||
## Accessing Crew Logs
|
||||
|
||||
You can see real time log of the crew execution, by setting `output_log_file` as a `True(Boolean)` or a `file_name(str)`. Supports logging of events as both `file_name.txt` and `file_name.json`.
|
||||
In case of `True(Boolean)` will save as `logs.txt`.
|
||||
|
||||
In case of `output_log_file` is set as `False(Booelan)` or `None`, the logs will not be populated.
|
||||
|
||||
```python Code
|
||||
# Save crew logs
|
||||
crew = Crew(output_log_file = True) # Logs will be saved as logs.txt
|
||||
crew = Crew(output_log_file = file_name) # Logs will be saved as file_name.txt
|
||||
crew = Crew(output_log_file = file_name.txt) # Logs will be saved as file_name.txt
|
||||
crew = Crew(output_log_file = file_name.json) # Logs will be saved as file_name.json
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Memory Utilization
|
||||
|
||||
Crews can utilize memory (short-term, long-term, and entity memory) to enhance their execution and learning over time. This feature allows crews to store and recall execution memories, aiding in decision-making and task execution strategies.
|
||||
@@ -279,9 +296,9 @@ print(result)
|
||||
Once your crew is assembled, initiate the workflow with the appropriate kickoff method. CrewAI provides several methods for better control over the kickoff process: `kickoff()`, `kickoff_for_each()`, `kickoff_async()`, and `kickoff_for_each_async()`.
|
||||
|
||||
- `kickoff()`: Starts the execution process according to the defined process flow.
|
||||
- `kickoff_for_each()`: Executes tasks for each agent individually.
|
||||
- `kickoff_for_each()`: Executes tasks sequentially for each provided input event or item in the collection.
|
||||
- `kickoff_async()`: Initiates the workflow asynchronously.
|
||||
- `kickoff_for_each_async()`: Executes tasks for each agent individually in an asynchronous manner.
|
||||
- `kickoff_for_each_async()`: Executes tasks concurrently for each provided input event or item, leveraging asynchronous processing.
|
||||
|
||||
```python Code
|
||||
# Start the crew's task execution
|
||||
|
||||
350
docs/concepts/event-listner.mdx
Normal file
350
docs/concepts/event-listner.mdx
Normal file
@@ -0,0 +1,350 @@
|
||||
---
|
||||
title: 'Event Listeners'
|
||||
description: 'Tap into CrewAI events to build custom integrations and monitoring'
|
||||
---
|
||||
|
||||
# Event Listeners
|
||||
|
||||
CrewAI provides a powerful event system that allows you to listen for and react to various events that occur during the execution of your Crew. This feature enables you to build custom integrations, monitoring solutions, logging systems, or any other functionality that needs to be triggered based on CrewAI's internal events.
|
||||
|
||||
## How It Works
|
||||
|
||||
CrewAI uses an event bus architecture to emit events throughout the execution lifecycle. The event system is built on the following components:
|
||||
|
||||
1. **CrewAIEventsBus**: A singleton event bus that manages event registration and emission
|
||||
2. **CrewEvent**: Base class for all events in the system
|
||||
3. **BaseEventListener**: Abstract base class for creating custom event listeners
|
||||
|
||||
When specific actions occur in CrewAI (like a Crew starting execution, an Agent completing a task, or a tool being used), the system emits corresponding events. You can register handlers for these events to execute custom code when they occur.
|
||||
|
||||
## Creating a Custom Event Listener
|
||||
|
||||
To create a custom event listener, you need to:
|
||||
|
||||
1. Create a class that inherits from `BaseEventListener`
|
||||
2. Implement the `setup_listeners` method
|
||||
3. Register handlers for the events you're interested in
|
||||
4. Create an instance of your listener in the appropriate file
|
||||
|
||||
Here's a simple example of a custom event listener class:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffStartedEvent,
|
||||
CrewKickoffCompletedEvent,
|
||||
AgentExecutionCompletedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_started(source, event):
|
||||
print(f"Crew '{event.crew_name}' has started execution!")
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_completed(source, event):
|
||||
print(f"Crew '{event.crew_name}' has completed execution!")
|
||||
print(f"Output: {event.output}")
|
||||
|
||||
@crewai_event_bus.on(AgentExecutionCompletedEvent)
|
||||
def on_agent_execution_completed(source, event):
|
||||
print(f"Agent '{event.agent.role}' completed task")
|
||||
print(f"Output: {event.output}")
|
||||
```
|
||||
|
||||
## Properly Registering Your Listener
|
||||
|
||||
Simply defining your listener class isn't enough. You need to create an instance of it and ensure it's imported in your application. This ensures that:
|
||||
|
||||
1. The event handlers are registered with the event bus
|
||||
2. The listener instance remains in memory (not garbage collected)
|
||||
3. The listener is active when events are emitted
|
||||
|
||||
### Option 1: Import and Instantiate in Your Crew or Flow Implementation
|
||||
|
||||
The most important thing is to create an instance of your listener in the file where your Crew or Flow is defined and executed:
|
||||
|
||||
#### For Crew-based Applications
|
||||
|
||||
Create and import your listener at the top of your Crew implementation file:
|
||||
|
||||
```python
|
||||
# In your crew.py file
|
||||
from crewai import Agent, Crew, Task
|
||||
from my_listeners import MyCustomListener
|
||||
|
||||
# Create an instance of your listener
|
||||
my_listener = MyCustomListener()
|
||||
|
||||
class MyCustomCrew:
|
||||
# Your crew implementation...
|
||||
|
||||
def crew(self):
|
||||
return Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
# ...
|
||||
)
|
||||
```
|
||||
|
||||
#### For Flow-based Applications
|
||||
|
||||
Create and import your listener at the top of your Flow implementation file:
|
||||
|
||||
```python
|
||||
# In your main.py or flow.py file
|
||||
from crewai.flow import Flow, listen, start
|
||||
from my_listeners import MyCustomListener
|
||||
|
||||
# Create an instance of your listener
|
||||
my_listener = MyCustomListener()
|
||||
|
||||
class MyCustomFlow(Flow):
|
||||
# Your flow implementation...
|
||||
|
||||
@start()
|
||||
def first_step(self):
|
||||
# ...
|
||||
```
|
||||
|
||||
This ensures that your listener is loaded and active when your Crew or Flow is executed.
|
||||
|
||||
### Option 2: Create a Package for Your Listeners
|
||||
|
||||
For a more structured approach, especially if you have multiple listeners:
|
||||
|
||||
1. Create a package for your listeners:
|
||||
|
||||
```
|
||||
my_project/
|
||||
├── listeners/
|
||||
│ ├── __init__.py
|
||||
│ ├── my_custom_listener.py
|
||||
│ └── another_listener.py
|
||||
```
|
||||
|
||||
2. In `my_custom_listener.py`, define your listener class and create an instance:
|
||||
|
||||
```python
|
||||
# my_custom_listener.py
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
# ... import events ...
|
||||
|
||||
class MyCustomListener(BaseEventListener):
|
||||
# ... implementation ...
|
||||
|
||||
# Create an instance of your listener
|
||||
my_custom_listener = MyCustomListener()
|
||||
```
|
||||
|
||||
3. In `__init__.py`, import the listener instances to ensure they're loaded:
|
||||
|
||||
```python
|
||||
# __init__.py
|
||||
from .my_custom_listener import my_custom_listener
|
||||
from .another_listener import another_listener
|
||||
|
||||
# Optionally export them if you need to access them elsewhere
|
||||
__all__ = ['my_custom_listener', 'another_listener']
|
||||
```
|
||||
|
||||
4. Import your listeners package in your Crew or Flow file:
|
||||
|
||||
```python
|
||||
# In your crew.py or flow.py file
|
||||
import my_project.listeners # This loads all your listeners
|
||||
|
||||
class MyCustomCrew:
|
||||
# Your crew implementation...
|
||||
```
|
||||
|
||||
This is exactly how CrewAI's built-in `agentops_listener` is registered. In the CrewAI codebase, you'll find:
|
||||
|
||||
```python
|
||||
# src/crewai/utilities/events/third_party/__init__.py
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
|
||||
|
||||
## Available Event Types
|
||||
|
||||
CrewAI provides a wide range of events that you can listen for:
|
||||
|
||||
### Crew Events
|
||||
|
||||
- **CrewKickoffStartedEvent**: Emitted when a Crew starts execution
|
||||
- **CrewKickoffCompletedEvent**: Emitted when a Crew completes execution
|
||||
- **CrewKickoffFailedEvent**: Emitted when a Crew fails to complete execution
|
||||
- **CrewTestStartedEvent**: Emitted when a Crew starts testing
|
||||
- **CrewTestCompletedEvent**: Emitted when a Crew completes testing
|
||||
- **CrewTestFailedEvent**: Emitted when a Crew fails to complete testing
|
||||
- **CrewTrainStartedEvent**: Emitted when a Crew starts training
|
||||
- **CrewTrainCompletedEvent**: Emitted when a Crew completes training
|
||||
- **CrewTrainFailedEvent**: Emitted when a Crew fails to complete training
|
||||
|
||||
### Agent Events
|
||||
|
||||
- **AgentExecutionStartedEvent**: Emitted when an Agent starts executing a task
|
||||
- **AgentExecutionCompletedEvent**: Emitted when an Agent completes executing a task
|
||||
- **AgentExecutionErrorEvent**: Emitted when an Agent encounters an error during execution
|
||||
|
||||
### Task Events
|
||||
|
||||
- **TaskStartedEvent**: Emitted when a Task starts execution
|
||||
- **TaskCompletedEvent**: Emitted when a Task completes execution
|
||||
- **TaskFailedEvent**: Emitted when a Task fails to complete execution
|
||||
- **TaskEvaluationEvent**: Emitted when a Task is evaluated
|
||||
|
||||
### Tool Usage Events
|
||||
|
||||
- **ToolUsageStartedEvent**: Emitted when a tool execution is started
|
||||
- **ToolUsageFinishedEvent**: Emitted when a tool execution is completed
|
||||
- **ToolUsageErrorEvent**: Emitted when a tool execution encounters an error
|
||||
- **ToolValidateInputErrorEvent**: Emitted when a tool input validation encounters an error
|
||||
- **ToolExecutionErrorEvent**: Emitted when a tool execution encounters an error
|
||||
- **ToolSelectionErrorEvent**: Emitted when there's an error selecting a tool
|
||||
|
||||
### Flow Events
|
||||
|
||||
- **FlowCreatedEvent**: Emitted when a Flow is created
|
||||
- **FlowStartedEvent**: Emitted when a Flow starts execution
|
||||
- **FlowFinishedEvent**: Emitted when a Flow completes execution
|
||||
- **FlowPlotEvent**: Emitted when a Flow is plotted
|
||||
- **MethodExecutionStartedEvent**: Emitted when a Flow method starts execution
|
||||
- **MethodExecutionFinishedEvent**: Emitted when a Flow method completes execution
|
||||
- **MethodExecutionFailedEvent**: Emitted when a Flow method fails to complete execution
|
||||
|
||||
### LLM Events
|
||||
|
||||
- **LLMCallStartedEvent**: Emitted when an LLM call starts
|
||||
- **LLMCallCompletedEvent**: Emitted when an LLM call completes
|
||||
- **LLMCallFailedEvent**: Emitted when an LLM call fails
|
||||
- **LLMStreamChunkEvent**: Emitted for each chunk received during streaming LLM responses
|
||||
|
||||
## Event Handler Structure
|
||||
|
||||
Each event handler receives two parameters:
|
||||
|
||||
1. **source**: The object that emitted the event
|
||||
2. **event**: The event instance, containing event-specific data
|
||||
|
||||
The structure of the event object depends on the event type, but all events inherit from `CrewEvent` and include:
|
||||
|
||||
- **timestamp**: The time when the event was emitted
|
||||
- **type**: A string identifier for the event type
|
||||
|
||||
Additional fields vary by event type. For example, `CrewKickoffCompletedEvent` includes `crew_name` and `output` fields.
|
||||
|
||||
## Real-World Example: Integration with AgentOps
|
||||
|
||||
CrewAI includes an example of a third-party integration with [AgentOps](https://github.com/AgentOps-AI/agentops), a monitoring and observability platform for AI agents. Here's how it's implemented:
|
||||
|
||||
```python
|
||||
from typing import Optional
|
||||
|
||||
from crewai.utilities.events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.base_event_listener import BaseEventListener
|
||||
from crewai.utilities.events.crew_events import CrewKickoffStartedEvent
|
||||
from crewai.utilities.events.task_events import TaskEvaluationEvent
|
||||
|
||||
try:
|
||||
import agentops
|
||||
AGENTOPS_INSTALLED = True
|
||||
except ImportError:
|
||||
AGENTOPS_INSTALLED = False
|
||||
|
||||
class AgentOpsListener(BaseEventListener):
|
||||
tool_event: Optional["agentops.ToolEvent"] = None
|
||||
session: Optional["agentops.Session"] = None
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def setup_listeners(self, crewai_event_bus):
|
||||
if not AGENTOPS_INSTALLED:
|
||||
return
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def on_crew_kickoff_started(source, event: CrewKickoffStartedEvent):
|
||||
self.session = agentops.init()
|
||||
for agent in source.agents:
|
||||
if self.session:
|
||||
self.session.create_agent(
|
||||
name=agent.role,
|
||||
agent_id=str(agent.id),
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(CrewKickoffCompletedEvent)
|
||||
def on_crew_kickoff_completed(source, event: CrewKickoffCompletedEvent):
|
||||
if self.session:
|
||||
self.session.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageStartedEvent)
|
||||
def on_tool_usage_started(source, event: ToolUsageStartedEvent):
|
||||
self.tool_event = agentops.ToolEvent(name=event.tool_name)
|
||||
if self.session:
|
||||
self.session.record(self.tool_event)
|
||||
|
||||
@crewai_event_bus.on(ToolUsageErrorEvent)
|
||||
def on_tool_usage_error(source, event: ToolUsageErrorEvent):
|
||||
agentops.ErrorEvent(exception=event.error, trigger_event=self.tool_event)
|
||||
```
|
||||
|
||||
This listener initializes an AgentOps session when a Crew starts, registers agents with AgentOps, tracks tool usage, and ends the session when the Crew completes.
|
||||
|
||||
The AgentOps listener is registered in CrewAI's event system through the import in `src/crewai/utilities/events/third_party/__init__.py`:
|
||||
|
||||
```python
|
||||
from .agentops_listener import agentops_listener
|
||||
```
|
||||
|
||||
This ensures the `agentops_listener` is loaded when the `crewai.utilities.events` package is imported.
|
||||
|
||||
## Advanced Usage: Scoped Handlers
|
||||
|
||||
For temporary event handling (useful for testing or specific operations), you can use the `scoped_handlers` context manager:
|
||||
|
||||
```python
|
||||
from crewai.utilities.events import crewai_event_bus, CrewKickoffStartedEvent
|
||||
|
||||
with crewai_event_bus.scoped_handlers():
|
||||
@crewai_event_bus.on(CrewKickoffStartedEvent)
|
||||
def temp_handler(source, event):
|
||||
print("This handler only exists within this context")
|
||||
|
||||
# Do something that emits events
|
||||
|
||||
# Outside the context, the temporary handler is removed
|
||||
```
|
||||
|
||||
## Use Cases
|
||||
|
||||
Event listeners can be used for a variety of purposes:
|
||||
|
||||
1. **Logging and Monitoring**: Track the execution of your Crew and log important events
|
||||
2. **Analytics**: Collect data about your Crew's performance and behavior
|
||||
3. **Debugging**: Set up temporary listeners to debug specific issues
|
||||
4. **Integration**: Connect CrewAI with external systems like monitoring platforms, databases, or notification services
|
||||
5. **Custom Behavior**: Trigger custom actions based on specific events
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Keep Handlers Light**: Event handlers should be lightweight and avoid blocking operations
|
||||
2. **Error Handling**: Include proper error handling in your event handlers to prevent exceptions from affecting the main execution
|
||||
3. **Cleanup**: If your listener allocates resources, ensure they're properly cleaned up
|
||||
4. **Selective Listening**: Only listen for events you actually need to handle
|
||||
5. **Testing**: Test your event listeners in isolation to ensure they behave as expected
|
||||
|
||||
By leveraging CrewAI's event system, you can extend its functionality and integrate it seamlessly with your existing infrastructure.
|
||||
@@ -150,12 +150,12 @@ final_output = flow.kickoff()
|
||||
|
||||
print("---- Final Output ----")
|
||||
print(final_output)
|
||||
````
|
||||
```
|
||||
|
||||
```text Output
|
||||
---- Final Output ----
|
||||
Second method received: Output from first_method
|
||||
````
|
||||
```
|
||||
|
||||
</CodeGroup>
|
||||
|
||||
@@ -232,18 +232,18 @@ class UnstructuredExampleFlow(Flow):
|
||||
def first_method(self):
|
||||
# The state automatically includes an 'id' field
|
||||
print(f"State ID: {self.state['id']}")
|
||||
self.state.message = "Hello from structured flow"
|
||||
self.state.counter = 0
|
||||
self.state['counter'] = 0
|
||||
self.state['message'] = "Hello from structured flow"
|
||||
|
||||
@listen(first_method)
|
||||
def second_method(self):
|
||||
self.state.counter += 1
|
||||
self.state.message += " - updated"
|
||||
self.state['counter'] += 1
|
||||
self.state['message'] += " - updated"
|
||||
|
||||
@listen(second_method)
|
||||
def third_method(self):
|
||||
self.state.counter += 1
|
||||
self.state.message += " - updated again"
|
||||
self.state['counter'] += 1
|
||||
self.state['message'] += " - updated again"
|
||||
|
||||
print(f"State after third_method: {self.state}")
|
||||
|
||||
@@ -323,6 +323,91 @@ flow.kickoff()
|
||||
|
||||
By providing both unstructured and structured state management options, CrewAI Flows empowers developers to build AI workflows that are both flexible and robust, catering to a wide range of application requirements.
|
||||
|
||||
## Flow Persistence
|
||||
|
||||
The @persist decorator enables automatic state persistence in CrewAI Flows, allowing you to maintain flow state across restarts or different workflow executions. This decorator can be applied at either the class level or method level, providing flexibility in how you manage state persistence.
|
||||
|
||||
### Class-Level Persistence
|
||||
|
||||
When applied at the class level, the @persist decorator automatically persists all flow method states:
|
||||
|
||||
```python
|
||||
@persist # Using SQLiteFlowPersistence by default
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def initialize_flow(self):
|
||||
# This method will automatically have its state persisted
|
||||
self.state.counter = 1
|
||||
print("Initialized flow. State ID:", self.state.id)
|
||||
|
||||
@listen(initialize_flow)
|
||||
def next_step(self):
|
||||
# The state (including self.state.id) is automatically reloaded
|
||||
self.state.counter += 1
|
||||
print("Flow state is persisted. Counter:", self.state.counter)
|
||||
```
|
||||
|
||||
### Method-Level Persistence
|
||||
|
||||
For more granular control, you can apply @persist to specific methods:
|
||||
|
||||
```python
|
||||
class AnotherFlow(Flow[dict]):
|
||||
@persist # Persists only this method's state
|
||||
@start()
|
||||
def begin(self):
|
||||
if "runs" not in self.state:
|
||||
self.state["runs"] = 0
|
||||
self.state["runs"] += 1
|
||||
print("Method-level persisted runs:", self.state["runs"])
|
||||
```
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Unique State Identification**
|
||||
- Each flow state automatically receives a unique UUID
|
||||
- The ID is preserved across state updates and method calls
|
||||
- Supports both structured (Pydantic BaseModel) and unstructured (dictionary) states
|
||||
|
||||
2. **Default SQLite Backend**
|
||||
- SQLiteFlowPersistence is the default storage backend
|
||||
- States are automatically saved to a local SQLite database
|
||||
- Robust error handling ensures clear messages if database operations fail
|
||||
|
||||
3. **Error Handling**
|
||||
- Comprehensive error messages for database operations
|
||||
- Automatic state validation during save and load
|
||||
- Clear feedback when persistence operations encounter issues
|
||||
|
||||
### Important Considerations
|
||||
|
||||
- **State Types**: Both structured (Pydantic BaseModel) and unstructured (dictionary) states are supported
|
||||
- **Automatic ID**: The `id` field is automatically added if not present
|
||||
- **State Recovery**: Failed or restarted flows can automatically reload their previous state
|
||||
- **Custom Implementation**: You can provide your own FlowPersistence implementation for specialized storage needs
|
||||
|
||||
### Technical Advantages
|
||||
|
||||
1. **Precise Control Through Low-Level Access**
|
||||
- Direct access to persistence operations for advanced use cases
|
||||
- Fine-grained control via method-level persistence decorators
|
||||
- Built-in state inspection and debugging capabilities
|
||||
- Full visibility into state changes and persistence operations
|
||||
|
||||
2. **Enhanced Reliability**
|
||||
- Automatic state recovery after system failures or restarts
|
||||
- Transaction-based state updates for data integrity
|
||||
- Comprehensive error handling with clear error messages
|
||||
- Robust validation during state save and load operations
|
||||
|
||||
3. **Extensible Architecture**
|
||||
- Customizable persistence backend through FlowPersistence interface
|
||||
- Support for specialized storage solutions beyond SQLite
|
||||
- Compatible with both structured (Pydantic) and unstructured (dict) states
|
||||
- Seamless integration with existing CrewAI flow patterns
|
||||
|
||||
The persistence system's architecture emphasizes technical precision and customization options, allowing developers to maintain full control over state management while benefiting from built-in reliability features.
|
||||
|
||||
## Flow Control
|
||||
|
||||
### Conditional Logic: `or`
|
||||
@@ -653,3 +738,34 @@ Also, check out our YouTube video on how to use flows in CrewAI below!
|
||||
referrerpolicy="strict-origin-when-cross-origin"
|
||||
allowfullscreen
|
||||
></iframe>
|
||||
|
||||
## Running Flows
|
||||
|
||||
There are two ways to run a flow:
|
||||
|
||||
### Using the Flow API
|
||||
|
||||
You can run a flow programmatically by creating an instance of your flow class and calling the `kickoff()` method:
|
||||
|
||||
```python
|
||||
flow = ExampleFlow()
|
||||
result = flow.kickoff()
|
||||
```
|
||||
|
||||
### Using the CLI
|
||||
|
||||
Starting from version 0.103.0, you can run flows using the `crewai run` command:
|
||||
|
||||
```shell
|
||||
crewai run
|
||||
```
|
||||
|
||||
This command automatically detects if your project is a flow (based on the `type = "flow"` setting in your pyproject.toml) and runs it accordingly. This is the recommended way to run flows from the command line.
|
||||
|
||||
For backward compatibility, you can also use:
|
||||
|
||||
```shell
|
||||
crewai flow kickoff
|
||||
```
|
||||
|
||||
However, the `crewai run` command is now the preferred method as it works for both crews and flows.
|
||||
|
||||
@@ -91,7 +91,13 @@ result = crew.kickoff(inputs={"question": "What city does John live in and how o
|
||||
```
|
||||
|
||||
|
||||
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including TXT, PDF, DOCX, HTML, and more.
|
||||
Here's another example with the `CrewDoclingSource`. The CrewDoclingSource is actually quite versatile and can handle multiple file formats including MD, PDF, DOCX, HTML, and more.
|
||||
|
||||
<Note>
|
||||
You need to install `docling` for the following example to work: `uv add docling`
|
||||
</Note>
|
||||
|
||||
|
||||
|
||||
```python Code
|
||||
from crewai import LLM, Agent, Crew, Process, Task
|
||||
@@ -146,10 +152,10 @@ Here are examples of how to use different types of knowledge sources:
|
||||
|
||||
### Text File Knowledge Source
|
||||
```python
|
||||
from crewai.knowledge.source.crew_docling_source import CrewDoclingSource
|
||||
from crewai.knowledge.source.text_file_knowledge_source import TextFileKnowledgeSource
|
||||
|
||||
# Create a text file knowledge source
|
||||
text_source = CrewDoclingSource(
|
||||
text_source = TextFileKnowledgeSource(
|
||||
file_paths=["document.txt", "another.txt"]
|
||||
)
|
||||
|
||||
@@ -282,6 +288,7 @@ The `embedder` parameter supports various embedding model providers that include
|
||||
- `ollama`: Local embeddings with Ollama
|
||||
- `vertexai`: Google Cloud VertexAI embeddings
|
||||
- `cohere`: Cohere's embedding models
|
||||
- `voyageai`: VoyageAI's embedding models
|
||||
- `bedrock`: AWS Bedrock embeddings
|
||||
- `huggingface`: Hugging Face models
|
||||
- `watson`: IBM Watson embeddings
|
||||
@@ -317,6 +324,13 @@ agent = Agent(
|
||||
verbose=True,
|
||||
allow_delegation=False,
|
||||
llm=gemini_llm,
|
||||
embedder={
|
||||
"provider": "google",
|
||||
"config": {
|
||||
"model": "models/text-embedding-004",
|
||||
"api_key": GEMINI_API_KEY,
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
task = Task(
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -58,41 +58,107 @@ my_crew = Crew(
|
||||
### Example: Use Custom Memory Instances e.g FAISS as the VectorDB
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from crewai import Crew, Process
|
||||
from crewai.memory import LongTermMemory, ShortTermMemory, EntityMemory
|
||||
from crewai.memory.storage import LTMSQLiteStorage, RAGStorage
|
||||
from typing import List, Optional
|
||||
|
||||
# Assemble your crew with memory capabilities
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process="Process.sequential",
|
||||
memory=True,
|
||||
long_term_memory=EnhanceLongTermMemory(
|
||||
my_crew: Crew = Crew(
|
||||
agents = [...],
|
||||
tasks = [...],
|
||||
process = Process.sequential,
|
||||
memory = True,
|
||||
# Long-term memory for persistent storage across sessions
|
||||
long_term_memory = LongTermMemory(
|
||||
storage=LTMSQLiteStorage(
|
||||
db_path="/my_data_dir/my_crew1/long_term_memory_storage.db"
|
||||
db_path="/my_crew1/long_term_memory_storage.db"
|
||||
)
|
||||
),
|
||||
short_term_memory=EnhanceShortTermMemory(
|
||||
storage=CustomRAGStorage(
|
||||
crew_name="my_crew",
|
||||
storage_type="short_term",
|
||||
data_dir="//my_data_dir",
|
||||
model=embedder["model"],
|
||||
dimension=embedder["dimension"],
|
||||
# Short-term memory for current context using RAG
|
||||
short_term_memory = ShortTermMemory(
|
||||
storage = RAGStorage(
|
||||
embedder_config={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": 'text-embedding-3-small'
|
||||
}
|
||||
},
|
||||
type="short_term",
|
||||
path="/my_crew1/"
|
||||
)
|
||||
),
|
||||
),
|
||||
entity_memory=EnhanceEntityMemory(
|
||||
storage=CustomRAGStorage(
|
||||
crew_name="my_crew",
|
||||
storage_type="entities",
|
||||
data_dir="//my_data_dir",
|
||||
model=embedder["model"],
|
||||
dimension=embedder["dimension"],
|
||||
),
|
||||
# Entity memory for tracking key information about entities
|
||||
entity_memory = EntityMemory(
|
||||
storage=RAGStorage(
|
||||
embedder_config={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": 'text-embedding-3-small'
|
||||
}
|
||||
},
|
||||
type="short_term",
|
||||
path="/my_crew1/"
|
||||
)
|
||||
),
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
When configuring memory storage:
|
||||
- Use environment variables for storage paths (e.g., `CREWAI_STORAGE_DIR`)
|
||||
- Never hardcode sensitive information like database credentials
|
||||
- Consider access permissions for storage directories
|
||||
- Use relative paths when possible to maintain portability
|
||||
|
||||
Example using environment variables:
|
||||
```python
|
||||
import os
|
||||
from crewai import Crew
|
||||
from crewai.memory import LongTermMemory
|
||||
from crewai.memory.storage import LTMSQLiteStorage
|
||||
|
||||
# Configure storage path using environment variable
|
||||
storage_path = os.getenv("CREWAI_STORAGE_DIR", "./storage")
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
long_term_memory=LongTermMemory(
|
||||
storage=LTMSQLiteStorage(
|
||||
db_path="{storage_path}/memory.db".format(storage_path=storage_path)
|
||||
)
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Basic Memory Configuration
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.memory import LongTermMemory
|
||||
|
||||
# Simple memory configuration
|
||||
crew = Crew(memory=True) # Uses default storage locations
|
||||
```
|
||||
|
||||
### Custom Storage Configuration
|
||||
```python
|
||||
from crewai import Crew
|
||||
from crewai.memory import LongTermMemory
|
||||
from crewai.memory.storage import LTMSQLiteStorage
|
||||
|
||||
# Configure custom storage paths
|
||||
crew = Crew(
|
||||
memory=True,
|
||||
long_term_memory=LongTermMemory(
|
||||
storage=LTMSQLiteStorage(db_path="./memory.db")
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
## Integrating Mem0 for Enhanced User Memory
|
||||
|
||||
[Mem0](https://mem0.ai/) is a self-improving memory layer for LLM applications, enabling personalized AI experiences.
|
||||
@@ -185,7 +251,12 @@ my_crew = Crew(
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder=OpenAIEmbeddingFunction(api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"),
|
||||
embedder={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": 'text-embedding-3-small'
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
@@ -211,6 +282,19 @@ my_crew = Crew(
|
||||
|
||||
### Using Google AI embeddings
|
||||
|
||||
#### Prerequisites
|
||||
Before using Google AI embeddings, ensure you have:
|
||||
- Access to the Gemini API
|
||||
- The necessary API keys and permissions
|
||||
|
||||
You will need to update your *pyproject.toml* dependencies:
|
||||
```YAML
|
||||
dependencies = [
|
||||
"google-generativeai>=0.8.4", #main version in January/2025 - crewai v.0.100.0 and crewai-tools 0.33.0
|
||||
"crewai[tools]>=0.100.0,<1.0.0"
|
||||
]
|
||||
```
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
@@ -224,7 +308,7 @@ my_crew = Crew(
|
||||
"provider": "google",
|
||||
"config": {
|
||||
"api_key": "<YOUR_API_KEY>",
|
||||
"model_name": "<model_name>"
|
||||
"model": "<model_name>"
|
||||
}
|
||||
}
|
||||
)
|
||||
@@ -242,13 +326,15 @@ my_crew = Crew(
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder=OpenAIEmbeddingFunction(
|
||||
api_key="YOUR_API_KEY",
|
||||
api_base="YOUR_API_BASE_PATH",
|
||||
api_type="azure",
|
||||
api_version="YOUR_API_VERSION",
|
||||
model_name="text-embedding-3-small"
|
||||
)
|
||||
embedder={
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"api_base": "YOUR_API_BASE_PATH",
|
||||
"api_version": "YOUR_API_VERSION",
|
||||
"model_name": 'text-embedding-3-small'
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
@@ -264,12 +350,15 @@ my_crew = Crew(
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder=GoogleVertexEmbeddingFunction(
|
||||
project_id="YOUR_PROJECT_ID",
|
||||
region="YOUR_REGION",
|
||||
api_key="YOUR_API_KEY",
|
||||
model_name="textembedding-gecko"
|
||||
)
|
||||
embedder={
|
||||
"provider": "vertexai",
|
||||
"config": {
|
||||
"project_id"="YOUR_PROJECT_ID",
|
||||
"region"="YOUR_REGION",
|
||||
"api_key"="YOUR_API_KEY",
|
||||
"model_name"="textembedding-gecko"
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
@@ -288,7 +377,27 @@ my_crew = Crew(
|
||||
"provider": "cohere",
|
||||
"config": {
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"model_name": "<model_name>"
|
||||
"model": "<model_name>"
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
### Using VoyageAI embeddings
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "voyageai",
|
||||
"config": {
|
||||
"api_key": "YOUR_API_KEY",
|
||||
"model": "<model_name>"
|
||||
}
|
||||
}
|
||||
)
|
||||
@@ -338,7 +447,66 @@ my_crew = Crew(
|
||||
)
|
||||
```
|
||||
|
||||
### Resetting Memory
|
||||
### Using Amazon Bedrock embeddings
|
||||
|
||||
```python Code
|
||||
# Note: Ensure you have installed `boto3` for Bedrock embeddings to work.
|
||||
|
||||
import os
|
||||
import boto3
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
|
||||
boto3_session = boto3.Session(
|
||||
region_name=os.environ.get("AWS_REGION_NAME"),
|
||||
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
|
||||
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY")
|
||||
)
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
embedder={
|
||||
"provider": "bedrock",
|
||||
"config":{
|
||||
"session": boto3_session,
|
||||
"model": "amazon.titan-embed-text-v2:0",
|
||||
"vector_dimension": 1024
|
||||
}
|
||||
}
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
### Adding Custom Embedding Function
|
||||
|
||||
```python Code
|
||||
from crewai import Crew, Agent, Task, Process
|
||||
from chromadb import Documents, EmbeddingFunction, Embeddings
|
||||
|
||||
# Create a custom embedding function
|
||||
class CustomEmbedder(EmbeddingFunction):
|
||||
def __call__(self, input: Documents) -> Embeddings:
|
||||
# generate embeddings
|
||||
return [1, 2, 3] # this is a dummy embedding
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "custom",
|
||||
"config": {
|
||||
"embedder": CustomEmbedder()
|
||||
}
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### Resetting Memory via cli
|
||||
|
||||
```shell
|
||||
crewai reset-memories [OPTIONS]
|
||||
@@ -352,8 +520,46 @@ crewai reset-memories [OPTIONS]
|
||||
| `-s`, `--short` | Reset SHORT TERM memory. | Flag (boolean) | False |
|
||||
| `-e`, `--entities` | Reset ENTITIES memory. | Flag (boolean) | False |
|
||||
| `-k`, `--kickoff-outputs` | Reset LATEST KICKOFF TASK OUTPUTS. | Flag (boolean) | False |
|
||||
| `-kn`, `--knowledge` | Reset KNOWLEDEGE storage | Flag (boolean) | False |
|
||||
| `-a`, `--all` | Reset ALL memories. | Flag (boolean) | False |
|
||||
|
||||
Note: To use the cli command you need to have your crew in a file called crew.py in the same directory.
|
||||
|
||||
|
||||
|
||||
|
||||
### Resetting Memory via crew object
|
||||
|
||||
```python
|
||||
|
||||
my_crew = Crew(
|
||||
agents=[...],
|
||||
tasks=[...],
|
||||
process=Process.sequential,
|
||||
memory=True,
|
||||
verbose=True,
|
||||
embedder={
|
||||
"provider": "custom",
|
||||
"config": {
|
||||
"embedder": CustomEmbedder()
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
my_crew.reset_memories(command_type = 'all') # Resets all the memory
|
||||
```
|
||||
|
||||
#### Resetting Memory Options
|
||||
|
||||
| Command Type | Description |
|
||||
| :----------------- | :------------------------------- |
|
||||
| `long` | Reset LONG TERM memory. |
|
||||
| `short` | Reset SHORT TERM memory. |
|
||||
| `entities` | Reset ENTITIES memory. |
|
||||
| `kickoff_outputs` | Reset LATEST KICKOFF TASK OUTPUTS. |
|
||||
| `knowledge` | Reset KNOWLEDGE memory. |
|
||||
| `all` | Reset ALL memories. |
|
||||
|
||||
|
||||
## Benefits of Using CrewAI's Memory System
|
||||
|
||||
|
||||
@@ -81,8 +81,8 @@ my_crew.kickoff()
|
||||
|
||||
3. **Collect Data:**
|
||||
|
||||
- Search for the latest papers, articles, and reports published in 2023 and early 2024.
|
||||
- Use keywords like "Large Language Models 2024", "AI LLM advancements", "AI ethics 2024", etc.
|
||||
- Search for the latest papers, articles, and reports published in 2024 and early 2025.
|
||||
- Use keywords like "Large Language Models 2025", "AI LLM advancements", "AI ethics 2025", etc.
|
||||
|
||||
4. **Analyze Findings:**
|
||||
|
||||
|
||||
@@ -33,11 +33,12 @@ crew = Crew(
|
||||
| :------------------------------- | :---------------- | :---------------------------- | :------------------------------------------------------------------------------------------------------------------- |
|
||||
| **Description** | `description` | `str` | A clear, concise statement of what the task entails. |
|
||||
| **Expected Output** | `expected_output` | `str` | A detailed description of what the task's completion looks like. |
|
||||
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
||||
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
||||
| **Name** _(optional)_ | `name` | `Optional[str]` | A name identifier for the task. |
|
||||
| **Agent** _(optional)_ | `agent` | `Optional[BaseAgent]` | The agent responsible for executing the task. |
|
||||
| **Tools** _(optional)_ | `tools` | `List[BaseTool]` | The tools/resources the agent is limited to use for this task. |
|
||||
| **Context** _(optional)_ | `context` | `Optional[List["Task"]]` | Other tasks whose outputs will be used as context for this task. |
|
||||
| **Async Execution** _(optional)_ | `async_execution` | `Optional[bool]` | Whether the task should be executed asynchronously. Defaults to False. |
|
||||
| **Human Input** _(optional)_ | `human_input` | `Optional[bool]` | Whether the task should have a human review the final answer of the agent. Defaults to False. |
|
||||
| **Config** _(optional)_ | `config` | `Optional[Dict[str, Any]]` | Task-specific configuration parameters. |
|
||||
| **Output File** _(optional)_ | `output_file` | `Optional[str]` | File path for storing the task output. |
|
||||
| **Output JSON** _(optional)_ | `output_json` | `Optional[Type[BaseModel]]` | A Pydantic model to structure the JSON output. |
|
||||
@@ -68,7 +69,7 @@ research_task:
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
the current year is 2025.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
@@ -154,7 +155,7 @@ research_task = Task(
|
||||
description="""
|
||||
Conduct a thorough research about AI Agents.
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
the current year is 2025.
|
||||
""",
|
||||
expected_output="""
|
||||
A list with 10 bullet points of the most relevant information about AI Agents
|
||||
@@ -267,7 +268,7 @@ analysis_task = Task(
|
||||
|
||||
Task guardrails provide a way to validate and transform task outputs before they
|
||||
are passed to the next task. This feature helps ensure data quality and provides
|
||||
efeedback to agents when their output doesn't meet specific criteria.
|
||||
feedback to agents when their output doesn't meet specific criteria.
|
||||
|
||||
### Using Task Guardrails
|
||||
|
||||
@@ -875,6 +876,19 @@ save_output_task = Task(
|
||||
#...
|
||||
```
|
||||
|
||||
Check out the video below to see how to use structured outputs in CrewAI:
|
||||
|
||||
<iframe
|
||||
width="560"
|
||||
height="315"
|
||||
src="https://www.youtube.com/embed/dNpKQk5uxHw"
|
||||
title="YouTube video player"
|
||||
frameborder="0"
|
||||
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
|
||||
referrerpolicy="strict-origin-when-cross-origin"
|
||||
allowfullscreen
|
||||
></iframe>
|
||||
|
||||
## Conclusion
|
||||
|
||||
Tasks are the driving force behind the actions of agents in CrewAI.
|
||||
|
||||
@@ -48,7 +48,6 @@ Define a crew with a designated manager and establish a clear chain of command.
|
||||
</Tip>
|
||||
|
||||
```python Code
|
||||
from langchain_openai import ChatOpenAI
|
||||
from crewai import Crew, Process, Agent
|
||||
|
||||
# Agents are defined with attributes for backstory, cache, and verbose mode
|
||||
@@ -56,38 +55,51 @@ researcher = Agent(
|
||||
role='Researcher',
|
||||
goal='Conduct in-depth analysis',
|
||||
backstory='Experienced data analyst with a knack for uncovering hidden trends.',
|
||||
cache=True,
|
||||
verbose=False,
|
||||
# tools=[] # This can be optionally specified; defaults to an empty list
|
||||
use_system_prompt=True, # Enable or disable system prompts for this agent
|
||||
max_rpm=30, # Limit on the number of requests per minute
|
||||
max_iter=5 # Maximum number of iterations for a final answer
|
||||
)
|
||||
writer = Agent(
|
||||
role='Writer',
|
||||
goal='Create engaging content',
|
||||
backstory='Creative writer passionate about storytelling in technical domains.',
|
||||
cache=True,
|
||||
verbose=False,
|
||||
# tools=[] # Optionally specify tools; defaults to an empty list
|
||||
use_system_prompt=True, # Enable or disable system prompts for this agent
|
||||
max_rpm=30, # Limit on the number of requests per minute
|
||||
max_iter=5 # Maximum number of iterations for a final answer
|
||||
)
|
||||
|
||||
# Establishing the crew with a hierarchical process and additional configurations
|
||||
project_crew = Crew(
|
||||
tasks=[...], # Tasks to be delegated and executed under the manager's supervision
|
||||
agents=[researcher, writer],
|
||||
manager_llm=ChatOpenAI(temperature=0, model="gpt-4"), # Mandatory if manager_agent is not set
|
||||
process=Process.hierarchical, # Specifies the hierarchical management approach
|
||||
respect_context_window=True, # Enable respect of the context window for tasks
|
||||
memory=True, # Enable memory usage for enhanced task execution
|
||||
manager_agent=None, # Optional: explicitly set a specific agent as manager instead of the manager_llm
|
||||
planning=True, # Enable planning feature for pre-execution strategy
|
||||
manager_llm="gpt-4o", # Specify which LLM the manager should use
|
||||
process=Process.hierarchical,
|
||||
planning=True,
|
||||
)
|
||||
```
|
||||
|
||||
### Using a Custom Manager Agent
|
||||
|
||||
Alternatively, you can create a custom manager agent with specific attributes tailored to your project's management needs. This gives you more control over the manager's behavior and capabilities.
|
||||
|
||||
```python
|
||||
# Define a custom manager agent
|
||||
manager = Agent(
|
||||
role="Project Manager",
|
||||
goal="Efficiently manage the crew and ensure high-quality task completion",
|
||||
backstory="You're an experienced project manager, skilled in overseeing complex projects and guiding teams to success.",
|
||||
allow_delegation=True,
|
||||
)
|
||||
|
||||
# Use the custom manager in your crew
|
||||
project_crew = Crew(
|
||||
tasks=[...],
|
||||
agents=[researcher, writer],
|
||||
manager_agent=manager, # Use your custom manager agent
|
||||
process=Process.hierarchical,
|
||||
planning=True,
|
||||
)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
For more details on creating and customizing a manager agent, check out the [Custom Manager Agent documentation](https://docs.crewai.com/how-to/custom-manager-agent#custom-manager-agent).
|
||||
</Tip>
|
||||
|
||||
|
||||
### Workflow in Action
|
||||
|
||||
1. **Task Assignment**: The manager assigns tasks strategically, considering each agent's capabilities and available tools.
|
||||
@@ -97,4 +109,4 @@ project_crew = Crew(
|
||||
## Conclusion
|
||||
|
||||
Adopting the hierarchical process in CrewAI, with the correct configurations and understanding of the system's capabilities, facilitates an organized and efficient approach to project management.
|
||||
Utilize the advanced features and customizations to tailor the workflow to your specific needs, ensuring optimal task execution and project success.
|
||||
Utilize the advanced features and customizations to tailor the workflow to your specific needs, ensuring optimal task execution and project success.
|
||||
|
||||
@@ -60,12 +60,12 @@ writer = Agent(
|
||||
# Create tasks for your agents
|
||||
task1 = Task(
|
||||
description=(
|
||||
"Conduct a comprehensive analysis of the latest advancements in AI in 2024. "
|
||||
"Conduct a comprehensive analysis of the latest advancements in AI in 2025. "
|
||||
"Identify key trends, breakthrough technologies, and potential industry impacts. "
|
||||
"Compile your findings in a detailed report. "
|
||||
"Make sure to check with a human if the draft is good before finalizing your answer."
|
||||
),
|
||||
expected_output='A comprehensive full report on the latest AI advancements in 2024, leave nothing out',
|
||||
expected_output='A comprehensive full report on the latest AI advancements in 2025, leave nothing out',
|
||||
agent=researcher,
|
||||
human_input=True
|
||||
)
|
||||
@@ -76,7 +76,7 @@ task2 = Task(
|
||||
"Your post should be informative yet accessible, catering to a tech-savvy audience. "
|
||||
"Aim for a narrative that captures the essence of these breakthroughs and their implications for the future."
|
||||
),
|
||||
expected_output='A compelling 3 paragraphs blog post formatted as markdown about the latest AI advancements in 2024',
|
||||
expected_output='A compelling 3 paragraphs blog post formatted as markdown about the latest AI advancements in 2025',
|
||||
agent=writer,
|
||||
human_input=True
|
||||
)
|
||||
|
||||
@@ -54,7 +54,8 @@ coding_agent = Agent(
|
||||
# Create a task that requires code execution
|
||||
data_analysis_task = Task(
|
||||
description="Analyze the given dataset and calculate the average age of participants. Ages: {ages}",
|
||||
agent=coding_agent
|
||||
agent=coding_agent,
|
||||
expected_output="The average age of the participants."
|
||||
)
|
||||
|
||||
# Create a crew and add the task
|
||||
@@ -116,4 +117,4 @@ async def async_multiple_crews():
|
||||
|
||||
# Run the async function
|
||||
asyncio.run(async_multiple_crews())
|
||||
```
|
||||
```
|
||||
|
||||
100
docs/how-to/langfuse-observability.mdx
Normal file
100
docs/how-to/langfuse-observability.mdx
Normal file
@@ -0,0 +1,100 @@
|
||||
---
|
||||
title: Agent Monitoring with Langfuse
|
||||
description: Learn how to integrate Langfuse with CrewAI via OpenTelemetry using OpenLit
|
||||
icon: magnifying-glass-chart
|
||||
---
|
||||
|
||||
# Integrate Langfuse with CrewAI
|
||||
|
||||
This notebook demonstrates how to integrate **Langfuse** with **CrewAI** using OpenTelemetry via the **OpenLit** SDK. By the end of this notebook, you will be able to trace your CrewAI applications with Langfuse for improved observability and debugging.
|
||||
|
||||
> **What is Langfuse?** [Langfuse](https://langfuse.com) is an open-source LLM engineering platform. It provides tracing and monitoring capabilities for LLM applications, helping developers debug, analyze, and optimize their AI systems. Langfuse integrates with various tools and frameworks via native integrations, OpenTelemetry, and APIs/SDKs.
|
||||
|
||||
[](https://langfuse.com/watch-demo)
|
||||
|
||||
## Get Started
|
||||
|
||||
We'll walk through a simple example of using CrewAI and integrating it with Langfuse via OpenTelemetry using OpenLit.
|
||||
|
||||
### Step 1: Install Dependencies
|
||||
|
||||
|
||||
```python
|
||||
%pip install langfuse openlit crewai crewai_tools
|
||||
```
|
||||
|
||||
### Step 2: Set Up Environment Variables
|
||||
|
||||
Set your Langfuse API keys and configure OpenTelemetry export settings to send traces to Langfuse. Please refer to the [Langfuse OpenTelemetry Docs](https://langfuse.com/docs/opentelemetry/get-started) for more information on the Langfuse OpenTelemetry endpoint `/api/public/otel` and authentication.
|
||||
|
||||
|
||||
```python
|
||||
import os
|
||||
import base64
|
||||
|
||||
LANGFUSE_PUBLIC_KEY="pk-lf-..."
|
||||
LANGFUSE_SECRET_KEY="sk-lf-..."
|
||||
LANGFUSE_AUTH=base64.b64encode(f"{LANGFUSE_PUBLIC_KEY}:{LANGFUSE_SECRET_KEY}".encode()).decode()
|
||||
|
||||
os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://cloud.langfuse.com/api/public/otel" # EU data region
|
||||
# os.environ["OTEL_EXPORTER_OTLP_ENDPOINT"] = "https://us.cloud.langfuse.com/api/public/otel" # US data region
|
||||
os.environ["OTEL_EXPORTER_OTLP_HEADERS"] = f"Authorization=Basic {LANGFUSE_AUTH}"
|
||||
|
||||
# your openai key
|
||||
os.environ["OPENAI_API_KEY"] = "sk-..."
|
||||
```
|
||||
|
||||
### Step 3: Initialize OpenLit
|
||||
|
||||
Initialize the OpenLit OpenTelemetry instrumentation SDK to start capturing OpenTelemetry traces.
|
||||
|
||||
|
||||
```python
|
||||
import openlit
|
||||
|
||||
openlit.init()
|
||||
```
|
||||
|
||||
### Step 4: Create a Simple CrewAI Application
|
||||
|
||||
We'll create a simple CrewAI application where multiple agents collaborate to answer a user's question.
|
||||
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
from crewai_tools import (
|
||||
WebsiteSearchTool
|
||||
)
|
||||
|
||||
web_rag_tool = WebsiteSearchTool()
|
||||
|
||||
writer = Agent(
|
||||
role="Writer",
|
||||
goal="You make math engaging and understandable for young children through poetry",
|
||||
backstory="You're an expert in writing haikus but you know nothing of math.",
|
||||
tools=[web_rag_tool],
|
||||
)
|
||||
|
||||
task = Task(description=("What is {multiplication}?"),
|
||||
expected_output=("Compose a haiku that includes the answer."),
|
||||
agent=writer)
|
||||
|
||||
crew = Crew(
|
||||
agents=[writer],
|
||||
tasks=[task],
|
||||
share_crew=False
|
||||
)
|
||||
```
|
||||
|
||||
### Step 5: See Traces in Langfuse
|
||||
|
||||
After running the agent, you can view the traces generated by your CrewAI application in [Langfuse](https://cloud.langfuse.com). You should see detailed steps of the LLM interactions, which can help you debug and optimize your AI agent.
|
||||
|
||||

|
||||
|
||||
_[Public example trace in Langfuse](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/e2cf380ffc8d47d28da98f136140642b?timestamp=2025-02-05T15%3A12%3A02.717Z&observation=3b32338ee6a5d9af)_
|
||||
|
||||
## References
|
||||
|
||||
- [Langfuse OpenTelemetry Docs](https://langfuse.com/docs/opentelemetry/get-started)
|
||||
@@ -23,6 +23,7 @@ LiteLLM supports a wide range of providers, including but not limited to:
|
||||
- Azure OpenAI
|
||||
- AWS (Bedrock, SageMaker)
|
||||
- Cohere
|
||||
- VoyageAI
|
||||
- Hugging Face
|
||||
- Ollama
|
||||
- Mistral AI
|
||||
|
||||
206
docs/how-to/mlflow-observability.mdx
Normal file
206
docs/how-to/mlflow-observability.mdx
Normal file
@@ -0,0 +1,206 @@
|
||||
---
|
||||
title: Agent Monitoring with MLflow
|
||||
description: Quickly start monitoring your Agents with MLflow.
|
||||
icon: bars-staggered
|
||||
---
|
||||
|
||||
# MLflow Overview
|
||||
|
||||
[MLflow](https://mlflow.org/) is an open-source platform to assist machine learning practitioners and teams in handling the complexities of the machine learning process.
|
||||
|
||||
It provides a tracing feature that enhances LLM observability in your Generative AI applications by capturing detailed information about the execution of your application’s services.
|
||||
Tracing provides a way to record the inputs, outputs, and metadata associated with each intermediate step of a request, enabling you to easily pinpoint the source of bugs and unexpected behaviors.
|
||||
|
||||

|
||||
|
||||
### Features
|
||||
|
||||
- **Tracing Dashboard**: Monitor activities of your crewAI agents with detailed dashboards that include inputs, outputs and metadata of spans.
|
||||
- **Automated Tracing**: A fully automated integration with crewAI, which can be enabled by running `mlflow.crewai.autolog()`.
|
||||
- **Manual Trace Instrumentation with minor efforts**: Customize trace instrumentation through MLflow's high-level fluent APIs such as decorators, function wrappers and context managers.
|
||||
- **OpenTelemetry Compatibility**: MLflow Tracing supports exporting traces to an OpenTelemetry Collector, which can then be used to export traces to various backends such as Jaeger, Zipkin, and AWS X-Ray.
|
||||
- **Package and Deploy Agents**: Package and deploy your crewAI agents to an inference server with a variety of deployment targets.
|
||||
- **Securely Host LLMs**: Host multiple LLM from various providers in one unified endpoint through MFflow gateway.
|
||||
- **Evaluation**: Evaluate your crewAI agents with a wide range of metrics using a convenient API `mlflow.evaluate()`.
|
||||
|
||||
## Setup Instructions
|
||||
|
||||
<Steps>
|
||||
<Step title="Install MLflow package">
|
||||
```shell
|
||||
# The crewAI integration is available in mlflow>=2.19.0
|
||||
pip install mlflow
|
||||
```
|
||||
</Step>
|
||||
<Step title="Start MFflow tracking server">
|
||||
```shell
|
||||
# This process is optional, but it is recommended to use MLflow tracking server for better visualization and broader features.
|
||||
mlflow server
|
||||
```
|
||||
</Step>
|
||||
<Step title="Initialize MLflow in Your Application">
|
||||
Add the following two lines to your application code:
|
||||
|
||||
```python
|
||||
import mlflow
|
||||
|
||||
mlflow.crewai.autolog()
|
||||
|
||||
# Optional: Set a tracking URI and an experiment name if you have a tracking server
|
||||
mlflow.set_tracking_uri("http://localhost:5000")
|
||||
mlflow.set_experiment("CrewAI")
|
||||
```
|
||||
|
||||
Example Usage for tracing CrewAI Agents:
|
||||
|
||||
```python
|
||||
from crewai import Agent, Crew, Task
|
||||
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
|
||||
from crewai_tools import SerperDevTool, WebsiteSearchTool
|
||||
|
||||
from textwrap import dedent
|
||||
|
||||
content = "Users name is John. He is 30 years old and lives in San Francisco."
|
||||
string_source = StringKnowledgeSource(
|
||||
content=content, metadata={"preference": "personal"}
|
||||
)
|
||||
|
||||
search_tool = WebsiteSearchTool()
|
||||
|
||||
|
||||
class TripAgents:
|
||||
def city_selection_agent(self):
|
||||
return Agent(
|
||||
role="City Selection Expert",
|
||||
goal="Select the best city based on weather, season, and prices",
|
||||
backstory="An expert in analyzing travel data to pick ideal destinations",
|
||||
tools=[
|
||||
search_tool,
|
||||
],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
def local_expert(self):
|
||||
return Agent(
|
||||
role="Local Expert at this city",
|
||||
goal="Provide the BEST insights about the selected city",
|
||||
backstory="""A knowledgeable local guide with extensive information
|
||||
about the city, it's attractions and customs""",
|
||||
tools=[search_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
|
||||
class TripTasks:
|
||||
def identify_task(self, agent, origin, cities, interests, range):
|
||||
return Task(
|
||||
description=dedent(
|
||||
f"""
|
||||
Analyze and select the best city for the trip based
|
||||
on specific criteria such as weather patterns, seasonal
|
||||
events, and travel costs. This task involves comparing
|
||||
multiple cities, considering factors like current weather
|
||||
conditions, upcoming cultural or seasonal events, and
|
||||
overall travel expenses.
|
||||
Your final answer must be a detailed
|
||||
report on the chosen city, and everything you found out
|
||||
about it, including the actual flight costs, weather
|
||||
forecast and attractions.
|
||||
|
||||
Traveling from: {origin}
|
||||
City Options: {cities}
|
||||
Trip Date: {range}
|
||||
Traveler Interests: {interests}
|
||||
"""
|
||||
),
|
||||
agent=agent,
|
||||
expected_output="Detailed report on the chosen city including flight costs, weather forecast, and attractions",
|
||||
)
|
||||
|
||||
def gather_task(self, agent, origin, interests, range):
|
||||
return Task(
|
||||
description=dedent(
|
||||
f"""
|
||||
As a local expert on this city you must compile an
|
||||
in-depth guide for someone traveling there and wanting
|
||||
to have THE BEST trip ever!
|
||||
Gather information about key attractions, local customs,
|
||||
special events, and daily activity recommendations.
|
||||
Find the best spots to go to, the kind of place only a
|
||||
local would know.
|
||||
This guide should provide a thorough overview of what
|
||||
the city has to offer, including hidden gems, cultural
|
||||
hotspots, must-visit landmarks, weather forecasts, and
|
||||
high level costs.
|
||||
The final answer must be a comprehensive city guide,
|
||||
rich in cultural insights and practical tips,
|
||||
tailored to enhance the travel experience.
|
||||
|
||||
Trip Date: {range}
|
||||
Traveling from: {origin}
|
||||
Traveler Interests: {interests}
|
||||
"""
|
||||
),
|
||||
agent=agent,
|
||||
expected_output="Comprehensive city guide including hidden gems, cultural hotspots, and practical travel tips",
|
||||
)
|
||||
|
||||
|
||||
class TripCrew:
|
||||
def __init__(self, origin, cities, date_range, interests):
|
||||
self.cities = cities
|
||||
self.origin = origin
|
||||
self.interests = interests
|
||||
self.date_range = date_range
|
||||
|
||||
def run(self):
|
||||
agents = TripAgents()
|
||||
tasks = TripTasks()
|
||||
|
||||
city_selector_agent = agents.city_selection_agent()
|
||||
local_expert_agent = agents.local_expert()
|
||||
|
||||
identify_task = tasks.identify_task(
|
||||
city_selector_agent,
|
||||
self.origin,
|
||||
self.cities,
|
||||
self.interests,
|
||||
self.date_range,
|
||||
)
|
||||
gather_task = tasks.gather_task(
|
||||
local_expert_agent, self.origin, self.interests, self.date_range
|
||||
)
|
||||
|
||||
crew = Crew(
|
||||
agents=[city_selector_agent, local_expert_agent],
|
||||
tasks=[identify_task, gather_task],
|
||||
verbose=True,
|
||||
memory=True,
|
||||
knowledge={
|
||||
"sources": [string_source],
|
||||
"metadata": {"preference": "personal"},
|
||||
},
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
return result
|
||||
|
||||
|
||||
trip_crew = TripCrew("California", "Tokyo", "Dec 12 - Dec 20", "sports")
|
||||
result = trip_crew.run()
|
||||
|
||||
print(result)
|
||||
```
|
||||
Refer to [MLflow Tracing Documentation](https://mlflow.org/docs/latest/llms/tracing/index.html) for more configurations and use cases.
|
||||
</Step>
|
||||
<Step title="Visualize Activities of Agents">
|
||||
Now traces for your crewAI agents are captured by MLflow.
|
||||
Let's visit MLflow tracking server to view the traces and get insights into your Agents.
|
||||
|
||||
Open `127.0.0.1:5000` on your browser to visit MLflow tracking server.
|
||||
<Frame caption="MLflow Tracing Dashboard">
|
||||
<img src="/images/mlflow1.png" alt="MLflow tracing example with crewai" />
|
||||
</Frame>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
@@ -45,6 +45,7 @@ image_analyst = Agent(
|
||||
# Create a task for image analysis
|
||||
task = Task(
|
||||
description="Analyze the product image at https://example.com/product.jpg and provide a detailed description",
|
||||
expected_output="A detailed description of the product image",
|
||||
agent=image_analyst
|
||||
)
|
||||
|
||||
@@ -81,6 +82,7 @@ inspection_task = Task(
|
||||
3. Compliance with standards
|
||||
Provide a detailed report highlighting any issues found.
|
||||
""",
|
||||
expected_output="A detailed report highlighting any issues found",
|
||||
agent=expert_analyst
|
||||
)
|
||||
|
||||
|
||||
@@ -1,211 +0,0 @@
|
||||
# Portkey Integration with CrewAI
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-CrewAI.png" alt="Portkey CrewAI Header Image" width="70%" />
|
||||
|
||||
|
||||
[Portkey](https://portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) is a 2-line upgrade to make your CrewAI agents reliable, cost-efficient, and fast.
|
||||
|
||||
Portkey adds 4 core production capabilities to any CrewAI agent:
|
||||
1. Routing to **200+ LLMs**
|
||||
2. Making each LLM call more robust
|
||||
3. Full-stack tracing & cost, performance analytics
|
||||
4. Real-time guardrails to enforce behavior
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. **Install Required Packages:**
|
||||
|
||||
```bash
|
||||
pip install -qU crewai portkey-ai
|
||||
```
|
||||
|
||||
2. **Configure the LLM Client:**
|
||||
|
||||
To build CrewAI Agents with Portkey, you'll need two keys:
|
||||
- **Portkey API Key**: Sign up on the [Portkey app](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai) and copy your API key
|
||||
- **Virtual Key**: Virtual Keys securely manage your LLM API keys in one place. Store your LLM provider API keys securely in Portkey's vault
|
||||
|
||||
```python
|
||||
from crewai import LLM
|
||||
from portkey_ai import createHeaders, PORTKEY_GATEWAY_URL
|
||||
|
||||
gpt_llm = LLM(
|
||||
model="gpt-4",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy", # We are using Virtual key
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_VIRTUAL_KEY", # Enter your Virtual key from Portkey
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
3. **Create and Run Your First Agent:**
|
||||
|
||||
```python
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
# Define your agents with roles and goals
|
||||
coder = Agent(
|
||||
role='Software developer',
|
||||
goal='Write clear, concise code on demand',
|
||||
backstory='An expert coder with a keen eye for software trends.',
|
||||
llm=gpt_llm
|
||||
)
|
||||
|
||||
# Create tasks for your agents
|
||||
task1 = Task(
|
||||
description="Define the HTML for making a simple website with heading- Hello World! Portkey is working!",
|
||||
expected_output="A clear and concise HTML code",
|
||||
agent=coder
|
||||
)
|
||||
|
||||
# Instantiate your crew
|
||||
crew = Crew(
|
||||
agents=[coder],
|
||||
tasks=[task1],
|
||||
)
|
||||
|
||||
result = crew.kickoff()
|
||||
print(result)
|
||||
```
|
||||
|
||||
|
||||
## Key Features
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| 🌐 Multi-LLM Support | Access OpenAI, Anthropic, Gemini, Azure, and 250+ providers through a unified interface |
|
||||
| 🛡️ Production Reliability | Implement retries, timeouts, load balancing, and fallbacks |
|
||||
| 📊 Advanced Observability | Track 40+ metrics including costs, tokens, latency, and custom metadata |
|
||||
| 🔍 Comprehensive Logging | Debug with detailed execution traces and function call logs |
|
||||
| 🚧 Security Controls | Set budget limits and implement role-based access control |
|
||||
| 🔄 Performance Analytics | Capture and analyze feedback for continuous improvement |
|
||||
| 💾 Intelligent Caching | Reduce costs and latency with semantic or simple caching |
|
||||
|
||||
|
||||
## Production Features with Portkey Configs
|
||||
|
||||
All features mentioned below are through Portkey's Config system. Portkey's Config system allows you to define routing strategies using simple JSON objects in your LLM API calls. You can create and manage Configs directly in your code or through the Portkey Dashboard. Each Config has a unique ID for easy reference.
|
||||
|
||||
<Frame>
|
||||
<img src="https://raw.githubusercontent.com/Portkey-AI/docs-core/refs/heads/main/images/libraries/libraries-3.avif"/>
|
||||
</Frame>
|
||||
|
||||
|
||||
### 1. Use 250+ LLMs
|
||||
Access various LLMs like Anthropic, Gemini, Mistral, Azure OpenAI, and more with minimal code changes. Switch between providers or use them together seamlessly. [Learn more about Universal API](https://portkey.ai/docs/product/ai-gateway/universal-api)
|
||||
|
||||
|
||||
Easily switch between different LLM providers:
|
||||
|
||||
```python
|
||||
# Anthropic Configuration
|
||||
anthropic_llm = LLM(
|
||||
model="claude-3-5-sonnet-latest",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy",
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_ANTHROPIC_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||
trace_id="anthropic_agent"
|
||||
)
|
||||
)
|
||||
|
||||
# Azure OpenAI Configuration
|
||||
azure_llm = LLM(
|
||||
model="gpt-4",
|
||||
base_url=PORTKEY_GATEWAY_URL,
|
||||
api_key="dummy",
|
||||
extra_headers=createHeaders(
|
||||
api_key="YOUR_PORTKEY_API_KEY",
|
||||
virtual_key="YOUR_AZURE_VIRTUAL_KEY", #You don't need provider when using Virtual keys
|
||||
trace_id="azure_agent"
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
### 2. Caching
|
||||
Improve response times and reduce costs with two powerful caching modes:
|
||||
- **Simple Cache**: Perfect for exact matches
|
||||
- **Semantic Cache**: Matches responses for requests that are semantically similar
|
||||
[Learn more about Caching](https://portkey.ai/docs/product/ai-gateway/cache-simple-and-semantic)
|
||||
|
||||
```py
|
||||
config = {
|
||||
"cache": {
|
||||
"mode": "semantic", # or "simple" for exact matching
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Production Reliability
|
||||
Portkey provides comprehensive reliability features:
|
||||
- **Automatic Retries**: Handle temporary failures gracefully
|
||||
- **Request Timeouts**: Prevent hanging operations
|
||||
- **Conditional Routing**: Route requests based on specific conditions
|
||||
- **Fallbacks**: Set up automatic provider failovers
|
||||
- **Load Balancing**: Distribute requests efficiently
|
||||
|
||||
[Learn more about Reliability Features](https://portkey.ai/docs/product/ai-gateway/)
|
||||
|
||||
|
||||
|
||||
### 4. Metrics
|
||||
|
||||
Agent runs are complex. Portkey automatically logs **40+ comprehensive metrics** for your AI agents, including cost, tokens used, latency, etc. Whether you need a broad overview or granular insights into your agent runs, Portkey's customizable filters provide the metrics you need.
|
||||
|
||||
|
||||
- Cost per agent interaction
|
||||
- Response times and latency
|
||||
- Token usage and efficiency
|
||||
- Success/failure rates
|
||||
- Cache hit rates
|
||||
|
||||
<img src="https://github.com/siddharthsambharia-portkey/Portkey-Product-Images/blob/main/Portkey-Dashboard.png?raw=true" width="70%" alt="Portkey Dashboard" />
|
||||
|
||||
### 5. Detailed Logging
|
||||
Logs are essential for understanding agent behavior, diagnosing issues, and improving performance. They provide a detailed record of agent activities and tool use, which is crucial for debugging and optimizing processes.
|
||||
|
||||
|
||||
Access a dedicated section to view records of agent executions, including parameters, outcomes, function calls, and errors. Filter logs based on multiple parameters such as trace ID, model, tokens used, and metadata.
|
||||
|
||||
<details>
|
||||
<summary><b>Traces</b></summary>
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Traces.png" alt="Portkey Traces" width="70%" />
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary><b>Logs</b></summary>
|
||||
<img src="https://raw.githubusercontent.com/siddharthsambharia-portkey/Portkey-Product-Images/main/Portkey-Logs.png" alt="Portkey Logs" width="70%" />
|
||||
</details>
|
||||
|
||||
### 6. Enterprise Security Features
|
||||
- Set budget limit and rate limts per Virtual Key (disposable API keys)
|
||||
- Implement role-based access control
|
||||
- Track system changes with audit logs
|
||||
- Configure data retention policies
|
||||
|
||||
|
||||
|
||||
For detailed information on creating and managing Configs, visit the [Portkey documentation](https://docs.portkey.ai/product/ai-gateway/configs).
|
||||
|
||||
## Resources
|
||||
|
||||
- [📘 Portkey Documentation](https://docs.portkey.ai)
|
||||
- [📊 Portkey Dashboard](https://app.portkey.ai/?utm_source=crewai&utm_medium=crewai&utm_campaign=crewai)
|
||||
- [🐦 Twitter](https://twitter.com/portkeyai)
|
||||
- [💬 Discord Community](https://discord.gg/DD7vgKK299)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
title: Portkey Observability and Guardrails
|
||||
title: Agent Monitoring with Portkey
|
||||
description: How to use Portkey with CrewAI
|
||||
icon: key
|
||||
---
|
||||
|
||||
BIN
docs/images/mlflow-tracing.gif
Normal file
BIN
docs/images/mlflow-tracing.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 MiB |
BIN
docs/images/mlflow1.png
Normal file
BIN
docs/images/mlflow1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 382 KiB |
@@ -15,113 +15,124 @@ icon: wrench
|
||||
If you need to update Python, visit [python.org/downloads](https://python.org/downloads)
|
||||
</Note>
|
||||
|
||||
# Installing CrewAI
|
||||
CrewAI uses the `uv` as its dependency management and package handling tool. It simplifies project setup and execution, offering a seamless experience.
|
||||
|
||||
CrewAI is a flexible and powerful AI framework that enables you to create and manage AI agents, tools, and tasks efficiently.
|
||||
Let's get you set up! 🚀
|
||||
If you haven't installed `uv` yet, follow **step 1** to quickly get it set up on your system, else you can skip to **step 2**.
|
||||
|
||||
<Steps>
|
||||
<Step title="Install CrewAI">
|
||||
Install CrewAI with all recommended tools using either method:
|
||||
```shell Terminal
|
||||
pip install 'crewai[tools]'
|
||||
<Step title="Install uv">
|
||||
- **On macOS/Linux:**
|
||||
|
||||
Use `curl` to download the script and execute it with `sh`:
|
||||
|
||||
```shell
|
||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
```
|
||||
or
|
||||
```shell Terminal
|
||||
pip install crewai crewai-tools
|
||||
If your system doesn't have `curl`, you can use `wget`:
|
||||
|
||||
```shell
|
||||
wget -qO- https://astral.sh/uv/install.sh | sh
|
||||
```
|
||||
|
||||
<Note>
|
||||
Both methods install the core package and additional tools needed for most use cases.
|
||||
</Note>
|
||||
- **On Windows:**
|
||||
|
||||
Use `irm` to download the script and `iex` to execute it:
|
||||
|
||||
```shell
|
||||
powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex"
|
||||
```
|
||||
If you run into any issues, refer to [UV's installation guide](https://docs.astral.sh/uv/getting-started/installation/) for more information.
|
||||
</Step>
|
||||
|
||||
<Step title="Upgrade CrewAI (Existing Installations Only)">
|
||||
If you have an older version of CrewAI installed, you can upgrade it:
|
||||
```shell Terminal
|
||||
pip install --upgrade crewai crewai-tools
|
||||
<Step title="Install CrewAI 🚀">
|
||||
- Run the following command to install `crewai` CLI:
|
||||
```shell
|
||||
uv tool install crewai
|
||||
```
|
||||
|
||||
<Warning>
|
||||
If you see a Poetry-related warning, you'll need to migrate to our new dependency manager:
|
||||
```shell Terminal
|
||||
crewai update
|
||||
<Warning>
|
||||
If you encounter a `PATH` warning, run this command to update your shell:
|
||||
```shell
|
||||
uv tool update-shell
|
||||
```
|
||||
This will update your project to use [UV](https://github.com/astral-sh/uv), our new faster dependency manager.
|
||||
</Warning>
|
||||
</Warning>
|
||||
|
||||
<Note>
|
||||
Skip this step if you're doing a fresh installation.
|
||||
</Note>
|
||||
</Step>
|
||||
|
||||
<Step title="Verify Installation">
|
||||
Check your installed versions:
|
||||
```shell Terminal
|
||||
pip freeze | grep crewai
|
||||
- To verify that `crewai` is installed, run:
|
||||
```shell
|
||||
uv tools list
|
||||
```
|
||||
|
||||
You should see something like:
|
||||
```markdown Output
|
||||
crewai==X.X.X
|
||||
crewai-tools==X.X.X
|
||||
- You should see something like:
|
||||
```markdown
|
||||
crewai v0.102.0
|
||||
- crewai
|
||||
```
|
||||
<Check>Installation successful! You're ready to create your first crew.</Check>
|
||||
<Check>Installation successful! You're ready to create your first crew! 🎉</Check>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
# Creating a New Project
|
||||
# Creating a CrewAI Project
|
||||
|
||||
<Info>
|
||||
We recommend using the YAML Template scaffolding for a structured approach to defining agents and tasks.
|
||||
</Info>
|
||||
We recommend using the `YAML` template scaffolding for a structured approach to defining agents and tasks. Here's how to get started:
|
||||
|
||||
<Steps>
|
||||
<Step title="Generate Project Structure">
|
||||
Run the CrewAI CLI command:
|
||||
```shell Terminal
|
||||
crewai create crew <project_name>
|
||||
```
|
||||
<Step title="Generate Project Scaffolding">
|
||||
- Run the `crewai` CLI command:
|
||||
```shell
|
||||
crewai create crew <your_project_name>
|
||||
```
|
||||
|
||||
This creates a new project with the following structure:
|
||||
<Frame>
|
||||
```
|
||||
my_project/
|
||||
├── .gitignore
|
||||
├── pyproject.toml
|
||||
├── README.md
|
||||
├── .env
|
||||
└── src/
|
||||
└── my_project/
|
||||
├── __init__.py
|
||||
├── main.py
|
||||
├── crew.py
|
||||
├── tools/
|
||||
│ ├── custom_tool.py
|
||||
│ └── __init__.py
|
||||
└── config/
|
||||
├── agents.yaml
|
||||
└── tasks.yaml
|
||||
```
|
||||
</Frame>
|
||||
</Step>
|
||||
- This creates a new project with the following structure:
|
||||
<Frame>
|
||||
```
|
||||
my_project/
|
||||
├── .gitignore
|
||||
├── knowledge/
|
||||
├── pyproject.toml
|
||||
├── README.md
|
||||
├── .env
|
||||
└── src/
|
||||
└── my_project/
|
||||
├── __init__.py
|
||||
├── main.py
|
||||
├── crew.py
|
||||
├── tools/
|
||||
│ ├── custom_tool.py
|
||||
│ └── __init__.py
|
||||
└── config/
|
||||
├── agents.yaml
|
||||
└── tasks.yaml
|
||||
```
|
||||
</Frame>
|
||||
</Step>
|
||||
|
||||
<Step title="Customize Your Project">
|
||||
Your project will contain these essential files:
|
||||
- Your project will contain these essential files:
|
||||
| File | Purpose |
|
||||
| --- | --- |
|
||||
| `agents.yaml` | Define your AI agents and their roles |
|
||||
| `tasks.yaml` | Set up agent tasks and workflows |
|
||||
| `.env` | Store API keys and environment variables |
|
||||
| `main.py` | Project entry point and execution flow |
|
||||
| `crew.py` | Crew orchestration and coordination |
|
||||
| `tools/` | Directory for custom agent tools |
|
||||
| `knowledge/` | Directory for knowledge base |
|
||||
|
||||
| File | Purpose |
|
||||
| --- | --- |
|
||||
| `agents.yaml` | Define your AI agents and their roles |
|
||||
| `tasks.yaml` | Set up agent tasks and workflows |
|
||||
| `.env` | Store API keys and environment variables |
|
||||
| `main.py` | Project entry point and execution flow |
|
||||
| `crew.py` | Crew orchestration and coordination |
|
||||
| `tools/` | Directory for custom agent tools |
|
||||
- Start by editing `agents.yaml` and `tasks.yaml` to define your crew's behavior.
|
||||
- Keep sensitive information like API keys in `.env`.
|
||||
</Step>
|
||||
|
||||
<Tip>
|
||||
Start by editing `agents.yaml` and `tasks.yaml` to define your crew's behavior.
|
||||
Keep sensitive information like API keys in `.env`.
|
||||
</Tip>
|
||||
<Step title="Run your Crew">
|
||||
- Before you run your crew, make sure to run:
|
||||
```bash
|
||||
crewai install
|
||||
```
|
||||
- If you need to install additional packages, use:
|
||||
```shell
|
||||
uv add <package-name>
|
||||
```
|
||||
- To run your crew, execute the following command in the root of your project:
|
||||
```bash
|
||||
crewai run
|
||||
```
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
|
||||
@@ -101,8 +101,10 @@
|
||||
"how-to/conditional-tasks",
|
||||
"how-to/agentops-observability",
|
||||
"how-to/langtrace-observability",
|
||||
"how-to/mlflow-observability",
|
||||
"how-to/openlit-observability",
|
||||
"how-to/portkey-observability"
|
||||
"how-to/portkey-observability",
|
||||
"how-to/langfuse-observability"
|
||||
]
|
||||
},
|
||||
{
|
||||
@@ -114,6 +116,8 @@
|
||||
{
|
||||
"group": "Tools",
|
||||
"pages": [
|
||||
"tools/aimindtool",
|
||||
"tools/bravesearchtool",
|
||||
"tools/browserbaseloadtool",
|
||||
"tools/codedocssearchtool",
|
||||
"tools/codeinterpretertool",
|
||||
@@ -130,18 +134,32 @@
|
||||
"tools/firecrawlscrapewebsitetool",
|
||||
"tools/firecrawlsearchtool",
|
||||
"tools/githubsearchtool",
|
||||
"tools/hyperbrowserloadtool",
|
||||
"tools/linkupsearchtool",
|
||||
"tools/llamaindextool",
|
||||
"tools/serperdevtool",
|
||||
"tools/s3readertool",
|
||||
"tools/s3writertool",
|
||||
"tools/scrapegraphscrapetool",
|
||||
"tools/scrapeelementfromwebsitetool",
|
||||
"tools/jsonsearchtool",
|
||||
"tools/mdxsearchtool",
|
||||
"tools/mysqltool",
|
||||
"tools/multiontool",
|
||||
"tools/nl2sqltool",
|
||||
"tools/patronustools",
|
||||
"tools/pdfsearchtool",
|
||||
"tools/pgsearchtool",
|
||||
"tools/qdrantvectorsearchtool",
|
||||
"tools/ragtool",
|
||||
"tools/scrapewebsitetool",
|
||||
"tools/scrapflyscrapetool",
|
||||
"tools/seleniumscrapingtool",
|
||||
"tools/snowflakesearchtool",
|
||||
"tools/spidertool",
|
||||
"tools/txtsearchtool",
|
||||
"tools/visiontool",
|
||||
"tools/weaviatevectorsearchtool",
|
||||
"tools/websitesearchtool",
|
||||
"tools/xmlsearchtool",
|
||||
"tools/youtubechannelsearchtool",
|
||||
|
||||
@@ -8,10 +8,10 @@ icon: rocket
|
||||
|
||||
Let's create a simple crew that will help us `research` and `report` on the `latest AI developments` for a given topic or subject.
|
||||
|
||||
Before we proceed, make sure you have `crewai` and `crewai-tools` installed.
|
||||
Before we proceed, make sure you have finished installing CrewAI.
|
||||
If you haven't installed them yet, you can do so by following the [installation guide](/installation).
|
||||
|
||||
Follow the steps below to get crewing! 🚣♂️
|
||||
Follow the steps below to get Crewing! 🚣♂️
|
||||
|
||||
<Steps>
|
||||
<Step title="Create your crew">
|
||||
@@ -23,6 +23,13 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="Navigate to your new crew project">
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
cd latest-ai-development
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="Modify your `agents.yaml` file">
|
||||
<Tip>
|
||||
You can also modify the agents as needed to fit your use case or copy and paste as is to your project.
|
||||
@@ -58,7 +65,7 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
description: >
|
||||
Conduct a thorough research about {topic}
|
||||
Make sure you find any interesting and relevant information given
|
||||
the current year is 2024.
|
||||
the current year is 2025.
|
||||
expected_output: >
|
||||
A list with 10 bullet points of the most relevant information about {topic}
|
||||
agent: researcher
|
||||
@@ -172,21 +179,26 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
- A [Serper.dev](https://serper.dev/) API key: `SERPER_API_KEY=YOUR_KEY_HERE`
|
||||
</Step>
|
||||
<Step title="Lock and install the dependencies">
|
||||
Lock the dependencies and install them by using the CLI command but first, navigate to your project directory:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
cd latest-ai-development
|
||||
crewai install
|
||||
```
|
||||
</CodeGroup>
|
||||
- Lock the dependencies and install them by using the CLI command:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
crewai install
|
||||
```
|
||||
</CodeGroup>
|
||||
- If you have additional packages that you want to install, you can do so by running:
|
||||
<CodeGroup>
|
||||
```shell Terminal
|
||||
uv add <package-name>
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="Run your crew">
|
||||
To run your crew, execute the following command in the root of your project:
|
||||
<CodeGroup>
|
||||
```bash Terminal
|
||||
crewai run
|
||||
```
|
||||
</CodeGroup>
|
||||
- To run your crew, execute the following command in the root of your project:
|
||||
<CodeGroup>
|
||||
```bash Terminal
|
||||
crewai run
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
<Step title="View your final report">
|
||||
You should see the output in the console and the `report.md` file should be created in the root of your project with the final report.
|
||||
@@ -195,10 +207,10 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
|
||||
<CodeGroup>
|
||||
```markdown output/report.md
|
||||
# Comprehensive Report on the Rise and Impact of AI Agents in 2024
|
||||
# Comprehensive Report on the Rise and Impact of AI Agents in 2025
|
||||
|
||||
## 1. Introduction to AI Agents
|
||||
In 2024, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce.
|
||||
In 2025, Artificial Intelligence (AI) agents are at the forefront of innovation across various industries. As intelligent systems that can perform tasks typically requiring human cognition, AI agents are paving the way for significant advancements in operational efficiency, decision-making, and overall productivity within sectors like Human Resources (HR) and Finance. This report aims to detail the rise of AI agents, their frameworks, applications, and potential implications on the workforce.
|
||||
|
||||
## 2. Benefits of AI Agents
|
||||
AI agents bring numerous advantages that are transforming traditional work environments. Key benefits include:
|
||||
@@ -252,12 +264,18 @@ Follow the steps below to get crewing! 🚣♂️
|
||||
To stay competitive and harness the full potential of AI agents, organizations must remain vigilant about latest developments in AI technology and consider continuous learning and adaptation in their strategic planning.
|
||||
|
||||
## 8. Conclusion
|
||||
The emergence of AI agents is undeniably reshaping the workplace landscape in 2024. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment.
|
||||
The emergence of AI agents is undeniably reshaping the workplace landscape in 5. With their ability to automate tasks, enhance efficiency, and improve decision-making, AI agents are critical in driving operational success. Organizations must embrace and adapt to AI developments to thrive in an increasingly digital business environment.
|
||||
```
|
||||
</CodeGroup>
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
<Check>
|
||||
Congratulations!
|
||||
|
||||
You have successfully set up your crew project and are ready to start building your own agentic workflows!
|
||||
</Check>
|
||||
|
||||
### Note on Consistency in Naming
|
||||
|
||||
The names you use in your YAML files (`agents.yaml` and `tasks.yaml`) should match the method names in your Python code.
|
||||
@@ -278,7 +296,7 @@ email_summarizer:
|
||||
Summarize emails into a concise and clear summary
|
||||
backstory: >
|
||||
You will create a 5 bullet point summary of the report
|
||||
llm: mixtal_llm
|
||||
llm: openai/gpt-4o
|
||||
```
|
||||
|
||||
<Tip>
|
||||
@@ -297,194 +315,9 @@ email_summarizer_task:
|
||||
- research_task
|
||||
```
|
||||
|
||||
Use the annotations to properly reference the agent and task in the `crew.py` file.
|
||||
|
||||
### Annotations include:
|
||||
|
||||
Here are examples of how to use each annotation in your CrewAI project, and when you should use them:
|
||||
|
||||
#### @agent
|
||||
Used to define an agent in your crew. Use this when:
|
||||
- You need to create a specialized AI agent with a specific role
|
||||
- You want the agent to be automatically collected and managed by the crew
|
||||
- You need to reuse the same agent configuration across multiple tasks
|
||||
|
||||
```python
|
||||
@agent
|
||||
def research_agent(self) -> Agent:
|
||||
return Agent(
|
||||
role="Research Analyst",
|
||||
goal="Conduct thorough research on given topics",
|
||||
backstory="Expert researcher with years of experience in data analysis",
|
||||
tools=[SerperDevTool()],
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
#### @task
|
||||
Used to define a task that can be executed by agents. Use this when:
|
||||
- You need to define a specific piece of work for an agent
|
||||
- You want tasks to be automatically sequenced and managed
|
||||
- You need to establish dependencies between different tasks
|
||||
|
||||
```python
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
description="Research the latest developments in AI technology",
|
||||
expected_output="A comprehensive report on AI advancements",
|
||||
agent=self.research_agent(),
|
||||
output_file="output/research.md"
|
||||
)
|
||||
```
|
||||
|
||||
#### @crew
|
||||
Used to define your crew configuration. Use this when:
|
||||
- You want to automatically collect all @agent and @task definitions
|
||||
- You need to specify how tasks should be processed (sequential or hierarchical)
|
||||
- You want to set up crew-wide configurations
|
||||
|
||||
```python
|
||||
@crew
|
||||
def research_crew(self) -> Crew:
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically collected from @agent methods
|
||||
tasks=self.tasks, # Automatically collected from @task methods
|
||||
process=Process.sequential,
|
||||
verbose=True
|
||||
)
|
||||
```
|
||||
|
||||
#### @tool
|
||||
Used to create custom tools for your agents. Use this when:
|
||||
- You need to give agents specific capabilities (like web search, data analysis)
|
||||
- You want to encapsulate external API calls or complex operations
|
||||
- You need to share functionality across multiple agents
|
||||
|
||||
```python
|
||||
@tool
|
||||
def web_search_tool(query: str, max_results: int = 5) -> list[str]:
|
||||
"""
|
||||
Search the web for information.
|
||||
|
||||
Args:
|
||||
query: The search query
|
||||
max_results: Maximum number of results to return
|
||||
|
||||
Returns:
|
||||
List of search results
|
||||
"""
|
||||
# Implement your search logic here
|
||||
return [f"Result {i} for: {query}" for i in range(max_results)]
|
||||
```
|
||||
|
||||
#### @before_kickoff
|
||||
Used to execute logic before the crew starts. Use this when:
|
||||
- You need to validate or preprocess input data
|
||||
- You want to set up resources or configurations before execution
|
||||
- You need to perform any initialization logic
|
||||
|
||||
```python
|
||||
@before_kickoff
|
||||
def validate_inputs(self, inputs: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
|
||||
"""Validate and preprocess inputs before the crew starts."""
|
||||
if inputs is None:
|
||||
return None
|
||||
|
||||
if 'topic' not in inputs:
|
||||
raise ValueError("Topic is required")
|
||||
|
||||
# Add additional context
|
||||
inputs['timestamp'] = datetime.now().isoformat()
|
||||
inputs['topic'] = inputs['topic'].strip().lower()
|
||||
return inputs
|
||||
```
|
||||
|
||||
#### @after_kickoff
|
||||
Used to process results after the crew completes. Use this when:
|
||||
- You need to format or transform the final output
|
||||
- You want to perform cleanup operations
|
||||
- You need to save or log the results in a specific way
|
||||
|
||||
```python
|
||||
@after_kickoff
|
||||
def process_results(self, result: CrewOutput) -> CrewOutput:
|
||||
"""Process and format the results after the crew completes."""
|
||||
result.raw = result.raw.strip()
|
||||
result.raw = f"""
|
||||
# Research Results
|
||||
Generated on: {datetime.now().isoformat()}
|
||||
|
||||
{result.raw}
|
||||
"""
|
||||
return result
|
||||
```
|
||||
|
||||
#### @callback
|
||||
Used to handle events during crew execution. Use this when:
|
||||
- You need to monitor task progress
|
||||
- You want to log intermediate results
|
||||
- You need to implement custom progress tracking or metrics
|
||||
|
||||
```python
|
||||
@callback
|
||||
def log_task_completion(self, task: Task, output: str):
|
||||
"""Log task completion details for monitoring."""
|
||||
print(f"Task '{task.description}' completed")
|
||||
print(f"Output length: {len(output)} characters")
|
||||
print(f"Agent used: {task.agent.role}")
|
||||
print("-" * 50)
|
||||
```
|
||||
|
||||
#### @cache_handler
|
||||
Used to implement custom caching for task results. Use this when:
|
||||
- You want to avoid redundant expensive operations
|
||||
- You need to implement custom cache storage or expiration logic
|
||||
- You want to persist results between runs
|
||||
|
||||
```python
|
||||
@cache_handler
|
||||
def custom_cache(self, key: str) -> Optional[str]:
|
||||
"""Custom cache implementation for storing task results."""
|
||||
cache_file = f"cache/{key}.json"
|
||||
|
||||
if os.path.exists(cache_file):
|
||||
with open(cache_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
# Check if cache is still valid (e.g., not expired)
|
||||
if datetime.fromisoformat(data['timestamp']) > datetime.now() - timedelta(days=1):
|
||||
return data['result']
|
||||
return None
|
||||
```
|
||||
|
||||
<Note>
|
||||
These decorators are part of the CrewAI framework and help organize your crew's structure by automatically collecting agents, tasks, and handling various lifecycle events.
|
||||
They should be used within a class decorated with `@CrewBase`.
|
||||
</Note>
|
||||
|
||||
### Replay Tasks from Latest Crew Kickoff
|
||||
|
||||
CrewAI now includes a replay feature that allows you to list the tasks from the last run and replay from a specific one. To use this feature, run.
|
||||
|
||||
```shell
|
||||
crewai replay <task_id>
|
||||
```
|
||||
|
||||
Replace `<task_id>` with the ID of the task you want to replay.
|
||||
|
||||
### Reset Crew Memory
|
||||
|
||||
If you need to reset the memory of your crew before running it again, you can do so by calling the reset memory feature:
|
||||
|
||||
```shell
|
||||
crewai reset-memories --all
|
||||
```
|
||||
|
||||
This will clear the crew's memory, allowing for a fresh start.
|
||||
|
||||
## Deploying Your Project
|
||||
|
||||
The easiest way to deploy your crew is through CrewAI Enterprise, where you can deploy your crew in a few clicks.
|
||||
The easiest way to deploy your crew is through [CrewAI Enterprise](http://app.crewai.com), where you can deploy your crew in a few clicks.
|
||||
|
||||
<CardGroup cols={2}>
|
||||
<Card
|
||||
|
||||
118
docs/tools/aimindtool.mdx
Normal file
118
docs/tools/aimindtool.mdx
Normal file
@@ -0,0 +1,118 @@
|
||||
---
|
||||
title: AI Mind Tool
|
||||
description: The `AIMindTool` is designed to query data sources in natural language.
|
||||
icon: brain
|
||||
---
|
||||
|
||||
# `AIMindTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `AIMindTool` is a wrapper around [AI-Minds](https://mindsdb.com/minds) provided by [MindsDB](https://mindsdb.com/). It allows you to query data sources in natural language by simply configuring their connection parameters. This tool is useful when you need answers to questions from your data stored in various data sources including PostgreSQL, MySQL, MariaDB, ClickHouse, Snowflake, and Google BigQuery.
|
||||
|
||||
Minds are AI systems that work similarly to large language models (LLMs) but go beyond by answering any question from any data. This is accomplished by:
|
||||
- Selecting the most relevant data for an answer using parametric search
|
||||
- Understanding the meaning and providing responses within the correct context through semantic search
|
||||
- Delivering precise answers by analyzing data and using machine learning (ML) models
|
||||
|
||||
## Installation
|
||||
|
||||
To incorporate this tool into your project, you need to install the Minds SDK:
|
||||
|
||||
```shell
|
||||
uv add minds-sdk
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `AIMindTool`, follow these steps:
|
||||
|
||||
1. **Package Installation**: Confirm that the `crewai[tools]` and `minds-sdk` packages are installed in your Python environment.
|
||||
2. **API Key Acquisition**: Sign up for a Minds account [here](https://mdb.ai/register), and obtain an API key.
|
||||
3. **Environment Configuration**: Store your obtained API key in an environment variable named `MINDS_API_KEY` to facilitate its use by the tool.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and execute a query:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import AIMindTool
|
||||
|
||||
# Initialize the AIMindTool
|
||||
aimind_tool = AIMindTool(
|
||||
datasources=[
|
||||
{
|
||||
"description": "house sales data",
|
||||
"engine": "postgres",
|
||||
"connection_data": {
|
||||
"user": "demo_user",
|
||||
"password": "demo_password",
|
||||
"host": "samples.mindsdb.com",
|
||||
"port": 5432,
|
||||
"database": "demo",
|
||||
"schema": "demo_data"
|
||||
},
|
||||
"tables": ["house_sales"]
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
# Run a natural language query
|
||||
result = aimind_tool.run("How many 3 bedroom houses were sold in 2008?")
|
||||
print(result)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `AIMindTool` accepts the following parameters:
|
||||
|
||||
- **api_key**: Optional. Your Minds API key. If not provided, it will be read from the `MINDS_API_KEY` environment variable.
|
||||
- **datasources**: A list of dictionaries, each containing the following keys:
|
||||
- **description**: A description of the data contained in the datasource.
|
||||
- **engine**: The engine (or type) of the datasource.
|
||||
- **connection_data**: A dictionary containing the connection parameters for the datasource.
|
||||
- **tables**: A list of tables that the data source will use. This is optional and can be omitted if all tables in the data source are to be used.
|
||||
|
||||
A list of supported data sources and their connection parameters can be found [here](https://docs.mdb.ai/docs/data_sources).
|
||||
|
||||
## Agent Integration Example
|
||||
|
||||
Here's how to integrate the `AIMindTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai.project import agent
|
||||
from crewai_tools import AIMindTool
|
||||
|
||||
# Initialize the tool
|
||||
aimind_tool = AIMindTool(
|
||||
datasources=[
|
||||
{
|
||||
"description": "sales data",
|
||||
"engine": "postgres",
|
||||
"connection_data": {
|
||||
"user": "your_user",
|
||||
"password": "your_password",
|
||||
"host": "your_host",
|
||||
"port": 5432,
|
||||
"database": "your_db",
|
||||
"schema": "your_schema"
|
||||
},
|
||||
"tables": ["sales"]
|
||||
}
|
||||
]
|
||||
)
|
||||
|
||||
# Define an agent with the AIMindTool
|
||||
@agent
|
||||
def data_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["data_analyst"],
|
||||
allow_delegation=False,
|
||||
tools=[aimind_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `AIMindTool` provides a powerful way to query your data sources using natural language, making it easier to extract insights without writing complex SQL queries. By connecting to various data sources and leveraging AI-Minds technology, this tool enables agents to access and analyze data efficiently.
|
||||
96
docs/tools/bravesearchtool.mdx
Normal file
96
docs/tools/bravesearchtool.mdx
Normal file
@@ -0,0 +1,96 @@
|
||||
---
|
||||
title: Brave Search
|
||||
description: The `BraveSearchTool` is designed to search the internet using the Brave Search API.
|
||||
icon: searchengin
|
||||
---
|
||||
|
||||
# `BraveSearchTool`
|
||||
|
||||
## Description
|
||||
|
||||
This tool is designed to perform web searches using the Brave Search API. It allows you to search the internet with a specified query and retrieve relevant results. The tool supports customizable result counts and country-specific searches.
|
||||
|
||||
## Installation
|
||||
|
||||
To incorporate this tool into your project, follow the installation instructions below:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `BraveSearchTool`, follow these steps:
|
||||
|
||||
1. **Package Installation**: Confirm that the `crewai[tools]` package is installed in your Python environment.
|
||||
2. **API Key Acquisition**: Acquire a Brave Search API key by registering at [Brave Search API](https://api.search.brave.com/app/keys).
|
||||
3. **Environment Configuration**: Store your obtained API key in an environment variable named `BRAVE_API_KEY` to facilitate its use by the tool.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and execute a search with a given query:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import BraveSearchTool
|
||||
|
||||
# Initialize the tool for internet searching capabilities
|
||||
tool = BraveSearchTool()
|
||||
|
||||
# Execute a search
|
||||
results = tool.run(search_query="CrewAI agent framework")
|
||||
print(results)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `BraveSearchTool` accepts the following parameters:
|
||||
|
||||
- **search_query**: Mandatory. The search query you want to use to search the internet.
|
||||
- **country**: Optional. Specify the country for the search results. Default is empty string.
|
||||
- **n_results**: Optional. Number of search results to return. Default is `10`.
|
||||
- **save_file**: Optional. Whether to save the search results to a file. Default is `False`.
|
||||
|
||||
## Example with Parameters
|
||||
|
||||
Here is an example demonstrating how to use the tool with additional parameters:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import BraveSearchTool
|
||||
|
||||
# Initialize the tool with custom parameters
|
||||
tool = BraveSearchTool(
|
||||
country="US",
|
||||
n_results=5,
|
||||
save_file=True
|
||||
)
|
||||
|
||||
# Execute a search
|
||||
results = tool.run(search_query="Latest AI developments")
|
||||
print(results)
|
||||
```
|
||||
|
||||
## Agent Integration Example
|
||||
|
||||
Here's how to integrate the `BraveSearchTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai.project import agent
|
||||
from crewai_tools import BraveSearchTool
|
||||
|
||||
# Initialize the tool
|
||||
brave_search_tool = BraveSearchTool()
|
||||
|
||||
# Define an agent with the BraveSearchTool
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["researcher"],
|
||||
allow_delegation=False,
|
||||
tools=[brave_search_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
By integrating the `BraveSearchTool` into Python projects, users gain the ability to conduct real-time, relevant searches across the internet directly from their applications. The tool provides a simple interface to the powerful Brave Search API, making it easy to retrieve and process search results programmatically. By adhering to the setup and usage guidelines provided, incorporating this tool into projects is streamlined and straightforward.
|
||||
@@ -8,18 +8,15 @@ icon: code-simple
|
||||
|
||||
## Description
|
||||
|
||||
This tool enables the Agent to execute Python 3 code that it has generated autonomously. The code is run in a secure, isolated environment, ensuring safety regardless of the content.
|
||||
|
||||
This functionality is particularly valuable as it allows the Agent to create code, execute it within the same ecosystem,
|
||||
obtain the results, and utilize that information to inform subsequent decisions and actions.
|
||||
The `CodeInterpreterTool` enables CrewAI agents to execute Python 3 code that they generate autonomously. The code is run in a secure, isolated Docker container, ensuring safety regardless of the content. This functionality is particularly valuable as it allows agents to create code, execute it, obtain the results, and utilize that information to inform subsequent decisions and actions.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Docker
|
||||
- Docker must be installed and running on your system. If you don't have it, you can install it from [here](https://docs.docker.com/get-docker/).
|
||||
|
||||
## Installation
|
||||
|
||||
Install the `crewai_tools` package
|
||||
To use this tool, you need to install the CrewAI tools package:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
@@ -27,27 +24,153 @@ pip install 'crewai[tools]'
|
||||
|
||||
## Example
|
||||
|
||||
Remember that when using this tool, the code must be generated by the Agent itself.
|
||||
The code must be a Python3 code. And it will take some time for the first time to run
|
||||
because it needs to build the Docker image.
|
||||
The following example demonstrates how to use the `CodeInterpreterTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
|
||||
Agent(
|
||||
...
|
||||
tools=[CodeInterpreterTool()],
|
||||
# Initialize the tool
|
||||
code_interpreter = CodeInterpreterTool()
|
||||
|
||||
# Define an agent that uses the tool
|
||||
programmer_agent = Agent(
|
||||
role="Python Programmer",
|
||||
goal="Write and execute Python code to solve problems",
|
||||
backstory="An expert Python programmer who can write efficient code to solve complex problems.",
|
||||
tools=[code_interpreter],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to generate and execute code
|
||||
coding_task = Task(
|
||||
description="Write a Python function to calculate the Fibonacci sequence up to the 10th number and print the result.",
|
||||
expected_output="The Fibonacci sequence up to the 10th number.",
|
||||
agent=programmer_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(
|
||||
agents=[programmer_agent],
|
||||
tasks=[coding_task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
)
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
We also provide a simple way to use it directly from the Agent.
|
||||
You can also enable code execution directly when creating an agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
|
||||
agent = Agent(
|
||||
...
|
||||
allow_code_execution=True,
|
||||
# Create an agent with code execution enabled
|
||||
programmer_agent = Agent(
|
||||
role="Python Programmer",
|
||||
goal="Write and execute Python code to solve problems",
|
||||
backstory="An expert Python programmer who can write efficient code to solve complex problems.",
|
||||
allow_code_execution=True, # This automatically adds the CodeInterpreterTool
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `CodeInterpreterTool` accepts the following parameters during initialization:
|
||||
|
||||
- **user_dockerfile_path**: Optional. Path to a custom Dockerfile to use for the code interpreter container.
|
||||
- **user_docker_base_url**: Optional. URL to the Docker daemon to use for running the container.
|
||||
- **unsafe_mode**: Optional. Whether to run code directly on the host machine instead of in a Docker container. Default is `False`. Use with caution!
|
||||
|
||||
When using the tool with an agent, the agent will need to provide:
|
||||
|
||||
- **code**: Required. The Python 3 code to execute.
|
||||
- **libraries_used**: Required. A list of libraries used in the code that need to be installed.
|
||||
|
||||
## Agent Integration Example
|
||||
|
||||
Here's a more detailed example of how to integrate the `CodeInterpreterTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
|
||||
# Initialize the tool
|
||||
code_interpreter = CodeInterpreterTool()
|
||||
|
||||
# Define an agent that uses the tool
|
||||
data_analyst = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze data using Python code",
|
||||
backstory="""You are an expert data analyst who specializes in using Python
|
||||
to analyze and visualize data. You can write efficient code to process
|
||||
large datasets and extract meaningful insights.""",
|
||||
tools=[code_interpreter],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent
|
||||
analysis_task = Task(
|
||||
description="""
|
||||
Write Python code to:
|
||||
1. Generate a random dataset of 100 points with x and y coordinates
|
||||
2. Calculate the correlation coefficient between x and y
|
||||
3. Create a scatter plot of the data
|
||||
4. Print the correlation coefficient and save the plot as 'scatter.png'
|
||||
|
||||
Make sure to handle any necessary imports and print the results.
|
||||
""",
|
||||
expected_output="The correlation coefficient and confirmation that the scatter plot has been saved.",
|
||||
agent=data_analyst,
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(
|
||||
agents=[data_analyst],
|
||||
tasks=[analysis_task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
)
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `CodeInterpreterTool` uses Docker to create a secure environment for code execution:
|
||||
|
||||
```python Code
|
||||
class CodeInterpreterTool(BaseTool):
|
||||
name: str = "Code Interpreter"
|
||||
description: str = "Interprets Python3 code strings with a final print statement."
|
||||
args_schema: Type[BaseModel] = CodeInterpreterSchema
|
||||
default_image_tag: str = "code-interpreter:latest"
|
||||
|
||||
def _run(self, **kwargs) -> str:
|
||||
code = kwargs.get("code", self.code)
|
||||
libraries_used = kwargs.get("libraries_used", [])
|
||||
|
||||
if self.unsafe_mode:
|
||||
return self.run_code_unsafe(code, libraries_used)
|
||||
else:
|
||||
return self.run_code_in_docker(code, libraries_used)
|
||||
```
|
||||
|
||||
The tool performs the following steps:
|
||||
1. Verifies that the Docker image exists or builds it if necessary
|
||||
2. Creates a Docker container with the current working directory mounted
|
||||
3. Installs any required libraries specified by the agent
|
||||
4. Executes the Python code in the container
|
||||
5. Returns the output of the code execution
|
||||
6. Cleans up by stopping and removing the container
|
||||
|
||||
## Security Considerations
|
||||
|
||||
By default, the `CodeInterpreterTool` runs code in an isolated Docker container, which provides a layer of security. However, there are still some security considerations to keep in mind:
|
||||
|
||||
1. The Docker container has access to the current working directory, so sensitive files could potentially be accessed.
|
||||
2. The `unsafe_mode` parameter allows code to be executed directly on the host machine, which should only be used in trusted environments.
|
||||
3. Be cautious when allowing agents to install arbitrary libraries, as they could potentially include malicious code.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `CodeInterpreterTool` provides a powerful way for CrewAI agents to execute Python code in a relatively secure environment. By enabling agents to write and run code, it significantly expands their problem-solving capabilities, especially for tasks involving data analysis, calculations, or other computational work. This tool is particularly useful for agents that need to perform complex operations that are more efficiently expressed in code than in natural language.
|
||||
|
||||
@@ -1,78 +1,118 @@
|
||||
---
|
||||
title: Composio Tool
|
||||
description: The `ComposioTool` is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the Composio SDK.
|
||||
description: Composio provides 250+ production-ready tools for AI agents with flexible authentication management.
|
||||
icon: gear-code
|
||||
---
|
||||
|
||||
# `ComposioTool`
|
||||
# `ComposioToolSet`
|
||||
|
||||
## Description
|
||||
Composio is an integration platform that allows you to connect your AI agents to 250+ tools. Key features include:
|
||||
|
||||
This tools is a wrapper around the composio set of tools and gives your agent access to a wide variety of tools from the Composio SDK.
|
||||
- **Enterprise-Grade Authentication**: Built-in support for OAuth, API Keys, JWT with automatic token refresh
|
||||
- **Full Observability**: Detailed tool usage logs, execution timestamps, and more
|
||||
|
||||
## Installation
|
||||
|
||||
To incorporate this tool into your project, follow the installation instructions below:
|
||||
To incorporate Composio tools into your project, follow the instructions below:
|
||||
|
||||
```shell
|
||||
pip install composio-core
|
||||
pip install 'crewai[tools]'
|
||||
pip install composio-crewai
|
||||
pip install crewai
|
||||
```
|
||||
|
||||
after the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`.
|
||||
After the installation is complete, either run `composio login` or export your composio API key as `COMPOSIO_API_KEY`. Get your Composio API key from [here](https://app.composio.dev)
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and execute a github action:
|
||||
|
||||
1. Initialize Composio tools
|
||||
1. Initialize Composio toolset
|
||||
|
||||
```python Code
|
||||
from composio import App
|
||||
from crewai_tools import ComposioTool
|
||||
from crewai import Agent, Task
|
||||
from composio_crewai import ComposioToolSet, App, Action
|
||||
from crewai import Agent, Task, Crew
|
||||
|
||||
|
||||
tools = [ComposioTool.from_action(action=Action.GITHUB_ACTIVITY_STAR_REPO_FOR_AUTHENTICATED_USER)]
|
||||
toolset = ComposioToolSet()
|
||||
```
|
||||
|
||||
If you don't know what action you want to use, use `from_app` and `tags` filter to get relevant actions
|
||||
|
||||
2. Connect your GitHub account
|
||||
<CodeGroup>
|
||||
```shell CLI
|
||||
composio add github
|
||||
```
|
||||
```python Code
|
||||
tools = ComposioTool.from_app(App.GITHUB, tags=["important"])
|
||||
request = toolset.initiate_connection(app=App.GITHUB)
|
||||
print(f"Open this URL to authenticate: {request.redirectUrl}")
|
||||
```
|
||||
</CodeGroup>
|
||||
|
||||
or use `use_case` to search relevant actions
|
||||
3. Get Tools
|
||||
|
||||
- Retrieving all the tools from an app (not recommended for production):
|
||||
```python Code
|
||||
tools = ComposioTool.from_app(App.GITHUB, use_case="Star a github repository")
|
||||
tools = toolset.get_tools(apps=[App.GITHUB])
|
||||
```
|
||||
|
||||
2. Define agent
|
||||
- Filtering tools based on tags:
|
||||
```python Code
|
||||
tag = "users"
|
||||
|
||||
filtered_action_enums = toolset.find_actions_by_tags(
|
||||
App.GITHUB,
|
||||
tags=[tag],
|
||||
)
|
||||
|
||||
tools = toolset.get_tools(actions=filtered_action_enums)
|
||||
```
|
||||
|
||||
- Filtering tools based on use case:
|
||||
```python Code
|
||||
use_case = "Star a repository on GitHub"
|
||||
|
||||
filtered_action_enums = toolset.find_actions_by_use_case(
|
||||
App.GITHUB, use_case=use_case, advanced=False
|
||||
)
|
||||
|
||||
tools = toolset.get_tools(actions=filtered_action_enums)
|
||||
```
|
||||
<Tip>Set `advanced` to True to get actions for complex use cases</Tip>
|
||||
|
||||
- Using specific tools:
|
||||
|
||||
In this demo, we will use the `GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER` action from the GitHub app.
|
||||
```python Code
|
||||
tools = toolset.get_tools(
|
||||
actions=[Action.GITHUB_STAR_A_REPOSITORY_FOR_THE_AUTHENTICATED_USER]
|
||||
)
|
||||
```
|
||||
Learn more about filtering actions [here](https://docs.composio.dev/patterns/tools/use-tools/use-specific-actions)
|
||||
|
||||
4. Define agent
|
||||
|
||||
```python Code
|
||||
crewai_agent = Agent(
|
||||
role="Github Agent",
|
||||
goal="You take action on Github using Github APIs",
|
||||
backstory=(
|
||||
"You are AI agent that is responsible for taking actions on Github "
|
||||
"on users behalf. You need to take action on Github using Github APIs"
|
||||
),
|
||||
role="GitHub Agent",
|
||||
goal="You take action on GitHub using GitHub APIs",
|
||||
backstory="You are AI agent that is responsible for taking actions on GitHub on behalf of users using GitHub APIs",
|
||||
verbose=True,
|
||||
tools=tools,
|
||||
llm= # pass an llm
|
||||
)
|
||||
```
|
||||
|
||||
3. Execute task
|
||||
5. Execute task
|
||||
|
||||
```python Code
|
||||
task = Task(
|
||||
description="Star a repo ComposioHQ/composio on GitHub",
|
||||
description="Star a repo composiohq/composio on GitHub",
|
||||
agent=crewai_agent,
|
||||
expected_output="if the star happened",
|
||||
expected_output="Status of the operation",
|
||||
)
|
||||
|
||||
task.execute()
|
||||
crew = Crew(agents=[crewai_agent], tasks=[task])
|
||||
|
||||
crew.kickoff()
|
||||
```
|
||||
|
||||
* More detailed list of tools can be found [here](https://app.composio.dev)
|
||||
* More detailed list of tools can be found [here](https://app.composio.dev)
|
||||
|
||||
@@ -8,9 +8,9 @@ icon: file-pen
|
||||
|
||||
## Description
|
||||
|
||||
The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files.
|
||||
The `FileWriterTool` is a component of the crewai_tools package, designed to simplify the process of writing content to files with cross-platform compatibility (Windows, Linux, macOS).
|
||||
It is particularly useful in scenarios such as generating reports, saving logs, creating configuration files, and more.
|
||||
This tool supports creating new directories if they don't exist, making it easier to organize your output.
|
||||
This tool handles path differences across operating systems, supports UTF-8 encoding, and automatically creates directories if they don't exist, making it easier to organize your output reliably across different platforms.
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -43,6 +43,8 @@ print(result)
|
||||
|
||||
## Conclusion
|
||||
|
||||
By integrating the `FileWriterTool` into your crews, the agents can execute the process of writing content to files and creating directories.
|
||||
This tool is essential for tasks that require saving output data, creating structured file systems, and more. By adhering to the setup and usage guidelines provided,
|
||||
incorporating this tool into projects is straightforward and efficient.
|
||||
By integrating the `FileWriterTool` into your crews, the agents can reliably write content to files across different operating systems.
|
||||
This tool is essential for tasks that require saving output data, creating structured file systems, and handling cross-platform file operations.
|
||||
It's particularly recommended for Windows users who may encounter file writing issues with standard Python file operations.
|
||||
|
||||
By adhering to the setup and usage guidelines provided, incorporating this tool into projects is straightforward and ensures consistent file writing behavior across all platforms.
|
||||
|
||||
86
docs/tools/hyperbrowserloadtool.mdx
Normal file
86
docs/tools/hyperbrowserloadtool.mdx
Normal file
@@ -0,0 +1,86 @@
|
||||
---
|
||||
title: Hyperbrowser Load Tool
|
||||
description: The `HyperbrowserLoadTool` enables web scraping and crawling using Hyperbrowser.
|
||||
icon: globe
|
||||
---
|
||||
|
||||
# `HyperbrowserLoadTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `HyperbrowserLoadTool` enables web scraping and crawling using [Hyperbrowser](https://hyperbrowser.ai), a platform for running and scaling headless browsers. This tool allows you to scrape a single page or crawl an entire site, returning the content in properly formatted markdown or HTML.
|
||||
|
||||
Key Features:
|
||||
- Instant Scalability - Spin up hundreds of browser sessions in seconds without infrastructure headaches
|
||||
- Simple Integration - Works seamlessly with popular tools like Puppeteer and Playwright
|
||||
- Powerful APIs - Easy to use APIs for scraping/crawling any site
|
||||
- Bypass Anti-Bot Measures - Built-in stealth mode, ad blocking, automatic CAPTCHA solving, and rotating proxies
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install the Hyperbrowser SDK:
|
||||
|
||||
```shell
|
||||
uv add hyperbrowser
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `HyperbrowserLoadTool`, follow these steps:
|
||||
|
||||
1. **Sign Up**: Head to [Hyperbrowser](https://app.hyperbrowser.ai/) to sign up and generate an API key.
|
||||
2. **API Key**: Set the `HYPERBROWSER_API_KEY` environment variable or pass it directly to the tool constructor.
|
||||
3. **Install SDK**: Install the Hyperbrowser SDK using the command above.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and use it to scrape a website:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import HyperbrowserLoadTool
|
||||
from crewai import Agent
|
||||
|
||||
# Initialize the tool with your API key
|
||||
tool = HyperbrowserLoadTool(api_key="your_api_key") # Or use environment variable
|
||||
|
||||
# Define an agent that uses the tool
|
||||
@agent
|
||||
def web_researcher(self) -> Agent:
|
||||
'''
|
||||
This agent uses the HyperbrowserLoadTool to scrape websites
|
||||
and extract information.
|
||||
'''
|
||||
return Agent(
|
||||
config=self.agents_config["web_researcher"],
|
||||
tools=[tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `HyperbrowserLoadTool` accepts the following parameters:
|
||||
|
||||
### Constructor Parameters
|
||||
- **api_key**: Optional. Your Hyperbrowser API key. If not provided, it will be read from the `HYPERBROWSER_API_KEY` environment variable.
|
||||
|
||||
### Run Parameters
|
||||
- **url**: Required. The website URL to scrape or crawl.
|
||||
- **operation**: Optional. The operation to perform on the website. Either 'scrape' or 'crawl'. Default is 'scrape'.
|
||||
- **params**: Optional. Additional parameters for the scrape or crawl operation.
|
||||
|
||||
## Supported Parameters
|
||||
|
||||
For detailed information on all supported parameters, visit:
|
||||
- [Scrape Parameters](https://docs.hyperbrowser.ai/reference/sdks/python/scrape#start-scrape-job-and-wait)
|
||||
- [Crawl Parameters](https://docs.hyperbrowser.ai/reference/sdks/python/crawl#start-crawl-job-and-wait)
|
||||
|
||||
## Return Format
|
||||
|
||||
The tool returns content in the following format:
|
||||
|
||||
- For **scrape** operations: The content of the page in markdown or HTML format.
|
||||
- For **crawl** operations: The content of each page separated by dividers, including the URL of each page.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `HyperbrowserLoadTool` provides a powerful way to scrape and crawl websites, handling complex scenarios like anti-bot measures, CAPTCHAs, and more. By leveraging Hyperbrowser's platform, this tool enables agents to access and extract web content efficiently.
|
||||
112
docs/tools/linkupsearchtool.mdx
Normal file
112
docs/tools/linkupsearchtool.mdx
Normal file
@@ -0,0 +1,112 @@
|
||||
---
|
||||
title: Linkup Search Tool
|
||||
description: The `LinkupSearchTool` enables querying the Linkup API for contextual information.
|
||||
icon: link
|
||||
---
|
||||
|
||||
# `LinkupSearchTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `LinkupSearchTool` provides the ability to query the Linkup API for contextual information and retrieve structured results. This tool is ideal for enriching workflows with up-to-date and reliable information from Linkup, allowing agents to access relevant data during their tasks.
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install the Linkup SDK:
|
||||
|
||||
```shell
|
||||
uv add linkup-sdk
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `LinkupSearchTool`, follow these steps:
|
||||
|
||||
1. **API Key**: Obtain a Linkup API key.
|
||||
2. **Environment Setup**: Set up your environment with the API key.
|
||||
3. **Install SDK**: Install the Linkup SDK using the command above.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and use it in an agent:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import LinkupSearchTool
|
||||
from crewai import Agent
|
||||
import os
|
||||
|
||||
# Initialize the tool with your API key
|
||||
linkup_tool = LinkupSearchTool(api_key=os.getenv("LINKUP_API_KEY"))
|
||||
|
||||
# Define an agent that uses the tool
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
'''
|
||||
This agent uses the LinkupSearchTool to retrieve contextual information
|
||||
from the Linkup API.
|
||||
'''
|
||||
return Agent(
|
||||
config=self.agents_config["researcher"],
|
||||
tools=[linkup_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `LinkupSearchTool` accepts the following parameters:
|
||||
|
||||
### Constructor Parameters
|
||||
- **api_key**: Required. Your Linkup API key.
|
||||
|
||||
### Run Parameters
|
||||
- **query**: Required. The search term or phrase.
|
||||
- **depth**: Optional. The search depth. Default is "standard".
|
||||
- **output_type**: Optional. The type of output. Default is "searchResults".
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
You can customize the search parameters for more specific results:
|
||||
|
||||
```python Code
|
||||
# Perform a search with custom parameters
|
||||
results = linkup_tool.run(
|
||||
query="Women Nobel Prize Physics",
|
||||
depth="deep",
|
||||
output_type="searchResults"
|
||||
)
|
||||
```
|
||||
|
||||
## Return Format
|
||||
|
||||
The tool returns results in the following format:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"results": [
|
||||
{
|
||||
"name": "Result Title",
|
||||
"url": "https://example.com/result",
|
||||
"content": "Content of the result..."
|
||||
},
|
||||
// Additional results...
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
If an error occurs, the response will be:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": false,
|
||||
"error": "Error message"
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The tool gracefully handles API errors and provides structured feedback. If the API request fails, the tool will return a dictionary with `success: false` and an error message.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `LinkupSearchTool` provides a seamless way to integrate Linkup's contextual information retrieval capabilities into your CrewAI agents. By leveraging this tool, agents can access relevant and up-to-date information to enhance their decision-making and task execution.
|
||||
146
docs/tools/llamaindextool.mdx
Normal file
146
docs/tools/llamaindextool.mdx
Normal file
@@ -0,0 +1,146 @@
|
||||
---
|
||||
title: LlamaIndex Tool
|
||||
description: The `LlamaIndexTool` is a wrapper for LlamaIndex tools and query engines.
|
||||
icon: address-book
|
||||
---
|
||||
|
||||
# `LlamaIndexTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `LlamaIndexTool` is designed to be a general wrapper around LlamaIndex tools and query engines, enabling you to leverage LlamaIndex resources in terms of RAG/agentic pipelines as tools to plug into CrewAI agents. This tool allows you to seamlessly integrate LlamaIndex's powerful data processing and retrieval capabilities into your CrewAI workflows.
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install LlamaIndex:
|
||||
|
||||
```shell
|
||||
uv add llama-index
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `LlamaIndexTool`, follow these steps:
|
||||
|
||||
1. **Install LlamaIndex**: Install the LlamaIndex package using the command above.
|
||||
2. **Set Up LlamaIndex**: Follow the [LlamaIndex documentation](https://docs.llamaindex.ai/) to set up a RAG/agent pipeline.
|
||||
3. **Create a Tool or Query Engine**: Create a LlamaIndex tool or query engine that you want to use with CrewAI.
|
||||
|
||||
## Example
|
||||
|
||||
The following examples demonstrate how to initialize the tool from different LlamaIndex components:
|
||||
|
||||
### From a LlamaIndex Tool
|
||||
|
||||
```python Code
|
||||
from crewai_tools import LlamaIndexTool
|
||||
from crewai import Agent
|
||||
from llama_index.core.tools import FunctionTool
|
||||
|
||||
# Example 1: Initialize from FunctionTool
|
||||
def search_data(query: str) -> str:
|
||||
"""Search for information in the data."""
|
||||
# Your implementation here
|
||||
return f"Results for: {query}"
|
||||
|
||||
# Create a LlamaIndex FunctionTool
|
||||
og_tool = FunctionTool.from_defaults(
|
||||
search_data,
|
||||
name="DataSearchTool",
|
||||
description="Search for information in the data"
|
||||
)
|
||||
|
||||
# Wrap it with LlamaIndexTool
|
||||
tool = LlamaIndexTool.from_tool(og_tool)
|
||||
|
||||
# Define an agent that uses the tool
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
'''
|
||||
This agent uses the LlamaIndexTool to search for information.
|
||||
'''
|
||||
return Agent(
|
||||
config=self.agents_config["researcher"],
|
||||
tools=[tool]
|
||||
)
|
||||
```
|
||||
|
||||
### From LlamaHub Tools
|
||||
|
||||
```python Code
|
||||
from crewai_tools import LlamaIndexTool
|
||||
from llama_index.tools.wolfram_alpha import WolframAlphaToolSpec
|
||||
|
||||
# Initialize from LlamaHub Tools
|
||||
wolfram_spec = WolframAlphaToolSpec(app_id="your_app_id")
|
||||
wolfram_tools = wolfram_spec.to_tool_list()
|
||||
tools = [LlamaIndexTool.from_tool(t) for t in wolfram_tools]
|
||||
```
|
||||
|
||||
### From a LlamaIndex Query Engine
|
||||
|
||||
```python Code
|
||||
from crewai_tools import LlamaIndexTool
|
||||
from llama_index.core import VectorStoreIndex
|
||||
from llama_index.core.readers import SimpleDirectoryReader
|
||||
|
||||
# Load documents
|
||||
documents = SimpleDirectoryReader("./data").load_data()
|
||||
|
||||
# Create an index
|
||||
index = VectorStoreIndex.from_documents(documents)
|
||||
|
||||
# Create a query engine
|
||||
query_engine = index.as_query_engine()
|
||||
|
||||
# Create a LlamaIndexTool from the query engine
|
||||
query_tool = LlamaIndexTool.from_query_engine(
|
||||
query_engine,
|
||||
name="Company Data Query Tool",
|
||||
description="Use this tool to lookup information in company documents"
|
||||
)
|
||||
```
|
||||
|
||||
## Class Methods
|
||||
|
||||
The `LlamaIndexTool` provides two main class methods for creating instances:
|
||||
|
||||
### from_tool
|
||||
|
||||
Creates a `LlamaIndexTool` from a LlamaIndex tool.
|
||||
|
||||
```python Code
|
||||
@classmethod
|
||||
def from_tool(cls, tool: Any, **kwargs: Any) -> "LlamaIndexTool":
|
||||
# Implementation details
|
||||
```
|
||||
|
||||
### from_query_engine
|
||||
|
||||
Creates a `LlamaIndexTool` from a LlamaIndex query engine.
|
||||
|
||||
```python Code
|
||||
@classmethod
|
||||
def from_query_engine(
|
||||
cls,
|
||||
query_engine: Any,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
return_direct: bool = False,
|
||||
**kwargs: Any,
|
||||
) -> "LlamaIndexTool":
|
||||
# Implementation details
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `from_query_engine` method accepts the following parameters:
|
||||
|
||||
- **query_engine**: Required. The LlamaIndex query engine to wrap.
|
||||
- **name**: Optional. The name of the tool.
|
||||
- **description**: Optional. The description of the tool.
|
||||
- **return_direct**: Optional. Whether to return the response directly. Default is `False`.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `LlamaIndexTool` provides a powerful way to integrate LlamaIndex's capabilities into CrewAI agents. By wrapping LlamaIndex tools and query engines, it enables agents to leverage sophisticated data retrieval and processing functionalities, enhancing their ability to work with complex information sources.
|
||||
128
docs/tools/multiontool.mdx
Normal file
128
docs/tools/multiontool.mdx
Normal file
@@ -0,0 +1,128 @@
|
||||
---
|
||||
title: MultiOn Tool
|
||||
description: The `MultiOnTool` empowers CrewAI agents with the capability to navigate and interact with the web through natural language instructions.
|
||||
icon: globe
|
||||
---
|
||||
|
||||
# `MultiOnTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `MultiOnTool` is designed to wrap [MultiOn's](https://docs.multion.ai/welcome) web browsing capabilities, enabling CrewAI agents to control web browsers using natural language instructions. This tool facilitates seamless web browsing, making it an essential asset for projects requiring dynamic web data interaction and automation of web-based tasks.
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install the MultiOn package:
|
||||
|
||||
```shell
|
||||
uv add multion
|
||||
```
|
||||
|
||||
You'll also need to install the MultiOn browser extension and enable API usage.
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `MultiOnTool`, follow these steps:
|
||||
|
||||
1. **Install CrewAI**: Ensure that the `crewai[tools]` package is installed in your Python environment.
|
||||
2. **Install and use MultiOn**: Follow [MultiOn documentation](https://docs.multion.ai/learn/browser-extension) for installing the MultiOn Browser Extension.
|
||||
3. **Enable API Usage**: Click on the MultiOn extension in the extensions folder of your browser (not the hovering MultiOn icon on the web page) to open the extension configurations. Click the API Enabled toggle to enable the API.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and execute a web browsing task:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import MultiOnTool
|
||||
|
||||
# Initialize the tool
|
||||
multion_tool = MultiOnTool(api_key="YOUR_MULTION_API_KEY", local=False)
|
||||
|
||||
# Define an agent that uses the tool
|
||||
browser_agent = Agent(
|
||||
role="Browser Agent",
|
||||
goal="Control web browsers using natural language",
|
||||
backstory="An expert browsing agent.",
|
||||
tools=[multion_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to search and summarize news
|
||||
browse_task = Task(
|
||||
description="Summarize the top 3 trending AI News headlines",
|
||||
expected_output="A summary of the top 3 trending AI News headlines",
|
||||
agent=browser_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[browser_agent], tasks=[browse_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `MultiOnTool` accepts the following parameters during initialization:
|
||||
|
||||
- **api_key**: Optional. Specifies the MultiOn API key. If not provided, it will look for the `MULTION_API_KEY` environment variable.
|
||||
- **local**: Optional. Set to `True` to run the agent locally on your browser. Make sure the MultiOn browser extension is installed and API Enabled is checked. Default is `False`.
|
||||
- **max_steps**: Optional. Sets the maximum number of steps the MultiOn agent can take for a command. Default is `3`.
|
||||
|
||||
## Usage
|
||||
|
||||
When using the `MultiOnTool`, the agent will provide natural language instructions that the tool translates into web browsing actions. The tool returns the results of the browsing session along with a status.
|
||||
|
||||
```python Code
|
||||
# Example of using the tool with an agent
|
||||
browser_agent = Agent(
|
||||
role="Web Browser Agent",
|
||||
goal="Search for and summarize information from the web",
|
||||
backstory="An expert at finding and extracting information from websites.",
|
||||
tools=[multion_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent
|
||||
search_task = Task(
|
||||
description="Search for the latest AI news on TechCrunch and summarize the top 3 headlines",
|
||||
expected_output="A summary of the top 3 AI news headlines from TechCrunch",
|
||||
agent=browser_agent,
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(agents=[browser_agent], tasks=[search_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
If the status returned is `CONTINUE`, the agent should be instructed to reissue the same instruction to continue execution.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `MultiOnTool` is implemented as a subclass of `BaseTool` from CrewAI. It wraps the MultiOn client to provide web browsing capabilities:
|
||||
|
||||
```python Code
|
||||
class MultiOnTool(BaseTool):
|
||||
"""Tool to wrap MultiOn Browse Capabilities."""
|
||||
|
||||
name: str = "Multion Browse Tool"
|
||||
description: str = """Multion gives the ability for LLMs to control web browsers using natural language instructions.
|
||||
If the status is 'CONTINUE', reissue the same instruction to continue execution
|
||||
"""
|
||||
|
||||
# Implementation details...
|
||||
|
||||
def _run(self, cmd: str, *args: Any, **kwargs: Any) -> str:
|
||||
"""
|
||||
Run the Multion client with the given command.
|
||||
|
||||
Args:
|
||||
cmd (str): The detailed and specific natural language instruction for web browsing
|
||||
*args (Any): Additional arguments to pass to the Multion client
|
||||
**kwargs (Any): Additional keyword arguments to pass to the Multion client
|
||||
"""
|
||||
# Implementation details...
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `MultiOnTool` provides a powerful way to integrate web browsing capabilities into CrewAI agents. By enabling agents to interact with websites through natural language instructions, it opens up a wide range of possibilities for web-based tasks, from data collection and research to automated interactions with web services.
|
||||
195
docs/tools/patronustools.mdx
Normal file
195
docs/tools/patronustools.mdx
Normal file
@@ -0,0 +1,195 @@
|
||||
---
|
||||
title: Patronus Evaluation Tools
|
||||
description: The Patronus evaluation tools enable CrewAI agents to evaluate and score model inputs and outputs using the Patronus AI platform.
|
||||
icon: check
|
||||
---
|
||||
|
||||
# `Patronus Evaluation Tools`
|
||||
|
||||
## Description
|
||||
|
||||
The [Patronus evaluation tools](https://patronus.ai) are designed to enable CrewAI agents to evaluate and score model inputs and outputs using the Patronus AI platform. These tools provide different levels of control over the evaluation process, from allowing agents to select the most appropriate evaluator and criteria to using predefined criteria or custom local evaluators.
|
||||
|
||||
There are three main Patronus evaluation tools:
|
||||
|
||||
1. **PatronusEvalTool**: Allows agents to select the most appropriate evaluator and criteria for the evaluation task.
|
||||
2. **PatronusPredefinedCriteriaEvalTool**: Uses predefined evaluator and criteria specified by the user.
|
||||
3. **PatronusLocalEvaluatorTool**: Uses custom function evaluators defined by the user.
|
||||
|
||||
## Installation
|
||||
|
||||
To use these tools, you need to install the Patronus package:
|
||||
|
||||
```shell
|
||||
uv add patronus
|
||||
```
|
||||
|
||||
You'll also need to set up your Patronus API key as an environment variable:
|
||||
|
||||
```shell
|
||||
export PATRONUS_API_KEY="your_patronus_api_key"
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the Patronus evaluation tools, follow these steps:
|
||||
|
||||
1. **Install Patronus**: Install the Patronus package using the command above.
|
||||
2. **Set Up API Key**: Set your Patronus API key as an environment variable.
|
||||
3. **Choose the Right Tool**: Select the appropriate Patronus evaluation tool based on your needs.
|
||||
4. **Configure the Tool**: Configure the tool with the necessary parameters.
|
||||
|
||||
## Examples
|
||||
|
||||
### Using PatronusEvalTool
|
||||
|
||||
The following example demonstrates how to use the `PatronusEvalTool`, which allows agents to select the most appropriate evaluator and criteria:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import PatronusEvalTool
|
||||
|
||||
# Initialize the tool
|
||||
patronus_eval_tool = PatronusEvalTool()
|
||||
|
||||
# Define an agent that uses the tool
|
||||
coding_agent = Agent(
|
||||
role="Coding Agent",
|
||||
goal="Generate high quality code and verify that the output is code",
|
||||
backstory="An experienced coder who can generate high quality python code.",
|
||||
tools=[patronus_eval_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to generate and evaluate code
|
||||
generate_code_task = Task(
|
||||
description="Create a simple program to generate the first N numbers in the Fibonacci sequence. Select the most appropriate evaluator and criteria for evaluating your output.",
|
||||
expected_output="Program that generates the first N numbers in the Fibonacci sequence.",
|
||||
agent=coding_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[coding_agent], tasks=[generate_code_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
### Using PatronusPredefinedCriteriaEvalTool
|
||||
|
||||
The following example demonstrates how to use the `PatronusPredefinedCriteriaEvalTool`, which uses predefined evaluator and criteria:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import PatronusPredefinedCriteriaEvalTool
|
||||
|
||||
# Initialize the tool with predefined criteria
|
||||
patronus_eval_tool = PatronusPredefinedCriteriaEvalTool(
|
||||
evaluators=[{"evaluator": "judge", "criteria": "contains-code"}]
|
||||
)
|
||||
|
||||
# Define an agent that uses the tool
|
||||
coding_agent = Agent(
|
||||
role="Coding Agent",
|
||||
goal="Generate high quality code",
|
||||
backstory="An experienced coder who can generate high quality python code.",
|
||||
tools=[patronus_eval_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to generate code
|
||||
generate_code_task = Task(
|
||||
description="Create a simple program to generate the first N numbers in the Fibonacci sequence.",
|
||||
expected_output="Program that generates the first N numbers in the Fibonacci sequence.",
|
||||
agent=coding_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[coding_agent], tasks=[generate_code_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
### Using PatronusLocalEvaluatorTool
|
||||
|
||||
The following example demonstrates how to use the `PatronusLocalEvaluatorTool`, which uses custom function evaluators:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import PatronusLocalEvaluatorTool
|
||||
from patronus import Client, EvaluationResult
|
||||
import random
|
||||
|
||||
# Initialize the Patronus client
|
||||
client = Client()
|
||||
|
||||
# Register a custom evaluator
|
||||
@client.register_local_evaluator("random_evaluator")
|
||||
def random_evaluator(**kwargs):
|
||||
score = random.random()
|
||||
return EvaluationResult(
|
||||
score_raw=score,
|
||||
pass_=score >= 0.5,
|
||||
explanation="example explanation",
|
||||
)
|
||||
|
||||
# Initialize the tool with the custom evaluator
|
||||
patronus_eval_tool = PatronusLocalEvaluatorTool(
|
||||
patronus_client=client,
|
||||
evaluator="random_evaluator",
|
||||
evaluated_model_gold_answer="example label",
|
||||
)
|
||||
|
||||
# Define an agent that uses the tool
|
||||
coding_agent = Agent(
|
||||
role="Coding Agent",
|
||||
goal="Generate high quality code",
|
||||
backstory="An experienced coder who can generate high quality python code.",
|
||||
tools=[patronus_eval_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to generate code
|
||||
generate_code_task = Task(
|
||||
description="Create a simple program to generate the first N numbers in the Fibonacci sequence.",
|
||||
expected_output="Program that generates the first N numbers in the Fibonacci sequence.",
|
||||
agent=coding_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[coding_agent], tasks=[generate_code_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
### PatronusEvalTool
|
||||
|
||||
The `PatronusEvalTool` does not require any parameters during initialization. It automatically fetches available evaluators and criteria from the Patronus API.
|
||||
|
||||
### PatronusPredefinedCriteriaEvalTool
|
||||
|
||||
The `PatronusPredefinedCriteriaEvalTool` accepts the following parameters during initialization:
|
||||
|
||||
- **evaluators**: Required. A list of dictionaries containing the evaluator and criteria to use. For example: `[{"evaluator": "judge", "criteria": "contains-code"}]`.
|
||||
|
||||
### PatronusLocalEvaluatorTool
|
||||
|
||||
The `PatronusLocalEvaluatorTool` accepts the following parameters during initialization:
|
||||
|
||||
- **patronus_client**: Required. The Patronus client instance.
|
||||
- **evaluator**: Optional. The name of the registered local evaluator to use. Default is an empty string.
|
||||
- **evaluated_model_gold_answer**: Optional. The gold answer to use for evaluation. Default is an empty string.
|
||||
|
||||
## Usage
|
||||
|
||||
When using the Patronus evaluation tools, you provide the model input, output, and context, and the tool returns the evaluation results from the Patronus API.
|
||||
|
||||
For the `PatronusEvalTool` and `PatronusPredefinedCriteriaEvalTool`, the following parameters are required when calling the tool:
|
||||
|
||||
- **evaluated_model_input**: The agent's task description in simple text.
|
||||
- **evaluated_model_output**: The agent's output of the task.
|
||||
- **evaluated_model_retrieved_context**: The agent's context.
|
||||
|
||||
For the `PatronusLocalEvaluatorTool`, the same parameters are required, but the evaluator and gold answer are specified during initialization.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The Patronus evaluation tools provide a powerful way to evaluate and score model inputs and outputs using the Patronus AI platform. By enabling agents to evaluate their own outputs or the outputs of other agents, these tools can help improve the quality and reliability of CrewAI workflows.
|
||||
271
docs/tools/qdrantvectorsearchtool.mdx
Normal file
271
docs/tools/qdrantvectorsearchtool.mdx
Normal file
@@ -0,0 +1,271 @@
|
||||
---
|
||||
title: 'Qdrant Vector Search Tool'
|
||||
description: 'Semantic search capabilities for CrewAI agents using Qdrant vector database'
|
||||
icon: magnifying-glass-plus
|
||||
---
|
||||
|
||||
# `QdrantVectorSearchTool`
|
||||
|
||||
The Qdrant Vector Search Tool enables semantic search capabilities in your CrewAI agents by leveraging [Qdrant](https://qdrant.tech/), a vector similarity search engine. This tool allows your agents to search through documents stored in a Qdrant collection using semantic similarity.
|
||||
|
||||
## Installation
|
||||
|
||||
Install the required packages:
|
||||
|
||||
```bash
|
||||
uv add qdrant-client
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
|
||||
Here's a minimal example of how to use the tool:
|
||||
|
||||
```python
|
||||
from crewai import Agent
|
||||
from crewai_tools import QdrantVectorSearchTool
|
||||
|
||||
# Initialize the tool
|
||||
qdrant_tool = QdrantVectorSearchTool(
|
||||
qdrant_url="your_qdrant_url",
|
||||
qdrant_api_key="your_qdrant_api_key",
|
||||
collection_name="your_collection"
|
||||
)
|
||||
|
||||
# Create an agent that uses the tool
|
||||
agent = Agent(
|
||||
role="Research Assistant",
|
||||
goal="Find relevant information in documents",
|
||||
tools=[qdrant_tool]
|
||||
)
|
||||
|
||||
# The tool will automatically use OpenAI embeddings
|
||||
# and return the 3 most relevant results with scores > 0.35
|
||||
```
|
||||
|
||||
## Complete Working Example
|
||||
|
||||
Here's a complete example showing how to:
|
||||
1. Extract text from a PDF
|
||||
2. Generate embeddings using OpenAI
|
||||
3. Store in Qdrant
|
||||
4. Create a CrewAI agentic RAG workflow for semantic search
|
||||
|
||||
```python
|
||||
import os
|
||||
import uuid
|
||||
import pdfplumber
|
||||
from openai import OpenAI
|
||||
from dotenv import load_dotenv
|
||||
from crewai import Agent, Task, Crew, Process, LLM
|
||||
from crewai_tools import QdrantVectorSearchTool
|
||||
from qdrant_client import QdrantClient
|
||||
from qdrant_client.models import PointStruct, Distance, VectorParams
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Initialize OpenAI client
|
||||
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
|
||||
|
||||
# Extract text from PDF
|
||||
def extract_text_from_pdf(pdf_path):
|
||||
text = []
|
||||
with pdfplumber.open(pdf_path) as pdf:
|
||||
for page in pdf.pages:
|
||||
page_text = page.extract_text()
|
||||
if page_text:
|
||||
text.append(page_text.strip())
|
||||
return text
|
||||
|
||||
# Generate OpenAI embeddings
|
||||
def get_openai_embedding(text):
|
||||
response = client.embeddings.create(
|
||||
input=text,
|
||||
model="text-embedding-3-small"
|
||||
)
|
||||
return response.data[0].embedding
|
||||
|
||||
# Store text and embeddings in Qdrant
|
||||
def load_pdf_to_qdrant(pdf_path, qdrant, collection_name):
|
||||
# Extract text from PDF
|
||||
text_chunks = extract_text_from_pdf(pdf_path)
|
||||
|
||||
# Create Qdrant collection
|
||||
if qdrant.collection_exists(collection_name):
|
||||
qdrant.delete_collection(collection_name)
|
||||
qdrant.create_collection(
|
||||
collection_name=collection_name,
|
||||
vectors_config=VectorParams(size=1536, distance=Distance.COSINE)
|
||||
)
|
||||
|
||||
# Store embeddings
|
||||
points = []
|
||||
for chunk in text_chunks:
|
||||
embedding = get_openai_embedding(chunk)
|
||||
points.append(PointStruct(
|
||||
id=str(uuid.uuid4()),
|
||||
vector=embedding,
|
||||
payload={"text": chunk}
|
||||
))
|
||||
qdrant.upsert(collection_name=collection_name, points=points)
|
||||
|
||||
# Initialize Qdrant client and load data
|
||||
qdrant = QdrantClient(
|
||||
url=os.getenv("QDRANT_URL"),
|
||||
api_key=os.getenv("QDRANT_API_KEY")
|
||||
)
|
||||
collection_name = "example_collection"
|
||||
pdf_path = "path/to/your/document.pdf"
|
||||
load_pdf_to_qdrant(pdf_path, qdrant, collection_name)
|
||||
|
||||
# Initialize Qdrant search tool
|
||||
qdrant_tool = QdrantVectorSearchTool(
|
||||
qdrant_url=os.getenv("QDRANT_URL"),
|
||||
qdrant_api_key=os.getenv("QDRANT_API_KEY"),
|
||||
collection_name=collection_name,
|
||||
limit=3,
|
||||
score_threshold=0.35
|
||||
)
|
||||
|
||||
# Create CrewAI agents
|
||||
search_agent = Agent(
|
||||
role="Senior Semantic Search Agent",
|
||||
goal="Find and analyze documents based on semantic search",
|
||||
backstory="""You are an expert research assistant who can find relevant
|
||||
information using semantic search in a Qdrant database.""",
|
||||
tools=[qdrant_tool],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
answer_agent = Agent(
|
||||
role="Senior Answer Assistant",
|
||||
goal="Generate answers to questions based on the context provided",
|
||||
backstory="""You are an expert answer assistant who can generate
|
||||
answers to questions based on the context provided.""",
|
||||
tools=[qdrant_tool],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# Define tasks
|
||||
search_task = Task(
|
||||
description="""Search for relevant documents about the {query}.
|
||||
Your final answer should include:
|
||||
- The relevant information found
|
||||
- The similarity scores of the results
|
||||
- The metadata of the relevant documents""",
|
||||
agent=search_agent
|
||||
)
|
||||
|
||||
answer_task = Task(
|
||||
description="""Given the context and metadata of relevant documents,
|
||||
generate a final answer based on the context.""",
|
||||
agent=answer_agent
|
||||
)
|
||||
|
||||
# Run CrewAI workflow
|
||||
crew = Crew(
|
||||
agents=[search_agent, answer_agent],
|
||||
tasks=[search_task, answer_task],
|
||||
process=Process.sequential,
|
||||
verbose=True
|
||||
)
|
||||
|
||||
result = crew.kickoff(
|
||||
inputs={"query": "What is the role of X in the document?"}
|
||||
)
|
||||
print(result)
|
||||
```
|
||||
|
||||
## Tool Parameters
|
||||
|
||||
### Required Parameters
|
||||
- `qdrant_url` (str): The URL of your Qdrant server
|
||||
- `qdrant_api_key` (str): API key for authentication with Qdrant
|
||||
- `collection_name` (str): Name of the Qdrant collection to search
|
||||
|
||||
### Optional Parameters
|
||||
- `limit` (int): Maximum number of results to return (default: 3)
|
||||
- `score_threshold` (float): Minimum similarity score threshold (default: 0.35)
|
||||
- `custom_embedding_fn` (Callable[[str], list[float]]): Custom function for text vectorization
|
||||
|
||||
## Search Parameters
|
||||
|
||||
The tool accepts these parameters in its schema:
|
||||
- `query` (str): The search query to find similar documents
|
||||
- `filter_by` (str, optional): Metadata field to filter on
|
||||
- `filter_value` (str, optional): Value to filter by
|
||||
|
||||
## Return Format
|
||||
|
||||
The tool returns results in JSON format:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"metadata": {
|
||||
// Any metadata stored with the document
|
||||
},
|
||||
"context": "The actual text content of the document",
|
||||
"distance": 0.95 // Similarity score
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Default Embedding
|
||||
|
||||
By default, the tool uses OpenAI's `text-embedding-3-small` model for vectorization. This requires:
|
||||
- OpenAI API key set in environment: `OPENAI_API_KEY`
|
||||
|
||||
## Custom Embeddings
|
||||
|
||||
Instead of using the default embedding model, you might want to use your own embedding function in cases where you:
|
||||
|
||||
1. Want to use a different embedding model (e.g., Cohere, HuggingFace, Ollama models)
|
||||
2. Need to reduce costs by using open-source embedding models
|
||||
3. Have specific requirements for vector dimensions or embedding quality
|
||||
4. Want to use domain-specific embeddings (e.g., for medical or legal text)
|
||||
|
||||
Here's an example using a HuggingFace model:
|
||||
|
||||
```python
|
||||
from transformers import AutoTokenizer, AutoModel
|
||||
import torch
|
||||
|
||||
# Load model and tokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
|
||||
model = AutoModel.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
|
||||
|
||||
def custom_embeddings(text: str) -> list[float]:
|
||||
# Tokenize and get model outputs
|
||||
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)
|
||||
outputs = model(**inputs)
|
||||
|
||||
# Use mean pooling to get text embedding
|
||||
embeddings = outputs.last_hidden_state.mean(dim=1)
|
||||
|
||||
# Convert to list of floats and return
|
||||
return embeddings[0].tolist()
|
||||
|
||||
# Use custom embeddings with the tool
|
||||
tool = QdrantVectorSearchTool(
|
||||
qdrant_url="your_url",
|
||||
qdrant_api_key="your_key",
|
||||
collection_name="your_collection",
|
||||
custom_embedding_fn=custom_embeddings # Pass your custom function
|
||||
)
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The tool handles these specific errors:
|
||||
- Raises ImportError if `qdrant-client` is not installed (with option to auto-install)
|
||||
- Raises ValueError if `QDRANT_URL` is not set
|
||||
- Prompts to install `qdrant-client` if missing using `uv add qdrant-client`
|
||||
|
||||
## Environment Variables
|
||||
|
||||
Required environment variables:
|
||||
```bash
|
||||
export QDRANT_URL="your_qdrant_url" # If not provided in constructor
|
||||
export QDRANT_API_KEY="your_api_key" # If not provided in constructor
|
||||
export OPENAI_API_KEY="your_openai_key" # If using default embeddings
|
||||
154
docs/tools/ragtool.mdx
Normal file
154
docs/tools/ragtool.mdx
Normal file
@@ -0,0 +1,154 @@
|
||||
---
|
||||
title: RAG Tool
|
||||
description: The `RagTool` is a dynamic knowledge base tool for answering questions using Retrieval-Augmented Generation.
|
||||
icon: vector-square
|
||||
---
|
||||
|
||||
# `RagTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `RagTool` is designed to answer questions by leveraging the power of Retrieval-Augmented Generation (RAG) through EmbedChain.
|
||||
It provides a dynamic knowledge base that can be queried to retrieve relevant information from various data sources.
|
||||
This tool is particularly useful for applications that require access to a vast array of information and need to provide contextually relevant answers.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and use it with different data sources:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import RagTool
|
||||
|
||||
# Create a RAG tool with default settings
|
||||
rag_tool = RagTool()
|
||||
|
||||
# Add content from a file
|
||||
rag_tool.add(data_type="file", path="path/to/your/document.pdf")
|
||||
|
||||
# Add content from a web page
|
||||
rag_tool.add(data_type="web_page", url="https://example.com")
|
||||
|
||||
# Define an agent with the RagTool
|
||||
@agent
|
||||
def knowledge_expert(self) -> Agent:
|
||||
'''
|
||||
This agent uses the RagTool to answer questions about the knowledge base.
|
||||
'''
|
||||
return Agent(
|
||||
config=self.agents_config["knowledge_expert"],
|
||||
allow_delegation=False,
|
||||
tools=[rag_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Supported Data Sources
|
||||
|
||||
The `RagTool` can be used with a wide variety of data sources, including:
|
||||
|
||||
- 📰 PDF files
|
||||
- 📊 CSV files
|
||||
- 📃 JSON files
|
||||
- 📝 Text
|
||||
- 📁 Directories/Folders
|
||||
- 🌐 HTML Web pages
|
||||
- 📽️ YouTube Channels
|
||||
- 📺 YouTube Videos
|
||||
- 📚 Documentation websites
|
||||
- 📝 MDX files
|
||||
- 📄 DOCX files
|
||||
- 🧾 XML files
|
||||
- 📬 Gmail
|
||||
- 📝 GitHub repositories
|
||||
- 🐘 PostgreSQL databases
|
||||
- 🐬 MySQL databases
|
||||
- 🤖 Slack conversations
|
||||
- 💬 Discord messages
|
||||
- 🗨️ Discourse forums
|
||||
- 📝 Substack newsletters
|
||||
- 🐝 Beehiiv content
|
||||
- 💾 Dropbox files
|
||||
- 🖼️ Images
|
||||
- ⚙️ Custom data sources
|
||||
|
||||
## Parameters
|
||||
|
||||
The `RagTool` accepts the following parameters:
|
||||
|
||||
- **summarize**: Optional. Whether to summarize the retrieved content. Default is `False`.
|
||||
- **adapter**: Optional. A custom adapter for the knowledge base. If not provided, an EmbedchainAdapter will be used.
|
||||
- **config**: Optional. Configuration for the underlying EmbedChain App.
|
||||
|
||||
## Adding Content
|
||||
|
||||
You can add content to the knowledge base using the `add` method:
|
||||
|
||||
```python Code
|
||||
# Add a PDF file
|
||||
rag_tool.add(data_type="file", path="path/to/your/document.pdf")
|
||||
|
||||
# Add a web page
|
||||
rag_tool.add(data_type="web_page", url="https://example.com")
|
||||
|
||||
# Add a YouTube video
|
||||
rag_tool.add(data_type="youtube_video", url="https://www.youtube.com/watch?v=VIDEO_ID")
|
||||
|
||||
# Add a directory of files
|
||||
rag_tool.add(data_type="directory", path="path/to/your/directory")
|
||||
```
|
||||
|
||||
## Agent Integration Example
|
||||
|
||||
Here's how to integrate the `RagTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai.project import agent
|
||||
from crewai_tools import RagTool
|
||||
|
||||
# Initialize the tool and add content
|
||||
rag_tool = RagTool()
|
||||
rag_tool.add(data_type="web_page", url="https://docs.crewai.com")
|
||||
rag_tool.add(data_type="file", path="company_data.pdf")
|
||||
|
||||
# Define an agent with the RagTool
|
||||
@agent
|
||||
def knowledge_expert(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config["knowledge_expert"],
|
||||
allow_delegation=False,
|
||||
tools=[rag_tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
You can customize the behavior of the `RagTool` by providing a configuration dictionary:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import RagTool
|
||||
|
||||
# Create a RAG tool with custom configuration
|
||||
config = {
|
||||
"app": {
|
||||
"name": "custom_app",
|
||||
},
|
||||
"llm": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "gpt-4",
|
||||
}
|
||||
},
|
||||
"embedder": {
|
||||
"provider": "openai",
|
||||
"config": {
|
||||
"model": "text-embedding-ada-002"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rag_tool = RagTool(config=config, summarize=True)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `RagTool` provides a powerful way to create and query knowledge bases from various data sources. By leveraging Retrieval-Augmented Generation, it enables agents to access and retrieve relevant information efficiently, enhancing their ability to provide accurate and contextually appropriate responses.
|
||||
144
docs/tools/s3readertool.mdx
Normal file
144
docs/tools/s3readertool.mdx
Normal file
@@ -0,0 +1,144 @@
|
||||
---
|
||||
title: S3 Reader Tool
|
||||
description: The `S3ReaderTool` enables CrewAI agents to read files from Amazon S3 buckets.
|
||||
icon: aws
|
||||
---
|
||||
|
||||
# `S3ReaderTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `S3ReaderTool` is designed to read files from Amazon S3 buckets. This tool allows CrewAI agents to access and retrieve content stored in S3, making it ideal for workflows that require reading data, configuration files, or any other content stored in AWS S3 storage.
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install the required dependencies:
|
||||
|
||||
```shell
|
||||
uv add boto3
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `S3ReaderTool`, follow these steps:
|
||||
|
||||
1. **Install Dependencies**: Install the required packages using the command above.
|
||||
2. **Configure AWS Credentials**: Set up your AWS credentials as environment variables.
|
||||
3. **Initialize the Tool**: Create an instance of the tool.
|
||||
4. **Specify S3 Path**: Provide the S3 path to the file you want to read.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to use the `S3ReaderTool` to read a file from an S3 bucket:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools.aws.s3 import S3ReaderTool
|
||||
|
||||
# Initialize the tool
|
||||
s3_reader_tool = S3ReaderTool()
|
||||
|
||||
# Define an agent that uses the tool
|
||||
file_reader_agent = Agent(
|
||||
role="File Reader",
|
||||
goal="Read files from S3 buckets",
|
||||
backstory="An expert in retrieving and processing files from cloud storage.",
|
||||
tools=[s3_reader_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to read a configuration file
|
||||
read_task = Task(
|
||||
description="Read the configuration file from {my_bucket} and summarize its contents.",
|
||||
expected_output="A summary of the configuration file contents.",
|
||||
agent=file_reader_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[file_reader_agent], tasks=[read_task])
|
||||
result = crew.kickoff(inputs={"my_bucket": "s3://my-bucket/config/app-config.json"})
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `S3ReaderTool` accepts the following parameter when used by an agent:
|
||||
|
||||
- **file_path**: Required. The S3 file path in the format `s3://bucket-name/file-name`.
|
||||
|
||||
## AWS Credentials
|
||||
|
||||
The tool requires AWS credentials to access S3 buckets. You can configure these credentials using environment variables:
|
||||
|
||||
- **CREW_AWS_REGION**: The AWS region where your S3 bucket is located. Default is `us-east-1`.
|
||||
- **CREW_AWS_ACCESS_KEY_ID**: Your AWS access key ID.
|
||||
- **CREW_AWS_SEC_ACCESS_KEY**: Your AWS secret access key.
|
||||
|
||||
## Usage
|
||||
|
||||
When using the `S3ReaderTool` with an agent, the agent will need to provide the S3 file path:
|
||||
|
||||
```python Code
|
||||
# Example of using the tool with an agent
|
||||
file_reader_agent = Agent(
|
||||
role="File Reader",
|
||||
goal="Read files from S3 buckets",
|
||||
backstory="An expert in retrieving and processing files from cloud storage.",
|
||||
tools=[s3_reader_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent to read a specific file
|
||||
read_config_task = Task(
|
||||
description="Read the application configuration file from {my_bucket} and extract the database connection settings.",
|
||||
expected_output="The database connection settings from the configuration file.",
|
||||
agent=file_reader_agent,
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(agents=[file_reader_agent], tasks=[read_config_task])
|
||||
result = crew.kickoff(inputs={"my_bucket": "s3://my-bucket/config/app-config.json"})
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The `S3ReaderTool` includes error handling for common S3 issues:
|
||||
|
||||
- Invalid S3 path format
|
||||
- Missing or inaccessible files
|
||||
- Permission issues
|
||||
- AWS credential problems
|
||||
|
||||
When an error occurs, the tool will return an error message that includes details about the issue.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `S3ReaderTool` uses the AWS SDK for Python (boto3) to interact with S3:
|
||||
|
||||
```python Code
|
||||
class S3ReaderTool(BaseTool):
|
||||
name: str = "S3 Reader Tool"
|
||||
description: str = "Reads a file from Amazon S3 given an S3 file path"
|
||||
|
||||
def _run(self, file_path: str) -> str:
|
||||
try:
|
||||
bucket_name, object_key = self._parse_s3_path(file_path)
|
||||
|
||||
s3 = boto3.client(
|
||||
's3',
|
||||
region_name=os.getenv('CREW_AWS_REGION', 'us-east-1'),
|
||||
aws_access_key_id=os.getenv('CREW_AWS_ACCESS_KEY_ID'),
|
||||
aws_secret_access_key=os.getenv('CREW_AWS_SEC_ACCESS_KEY')
|
||||
)
|
||||
|
||||
# Read file content from S3
|
||||
response = s3.get_object(Bucket=bucket_name, Key=object_key)
|
||||
file_content = response['Body'].read().decode('utf-8')
|
||||
|
||||
return file_content
|
||||
except ClientError as e:
|
||||
return f"Error reading file from S3: {str(e)}"
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `S3ReaderTool` provides a straightforward way to read files from Amazon S3 buckets. By enabling agents to access content stored in S3, it facilitates workflows that require cloud-based file access. This tool is particularly useful for data processing, configuration management, and any task that involves retrieving information from AWS S3 storage.
|
||||
150
docs/tools/s3writertool.mdx
Normal file
150
docs/tools/s3writertool.mdx
Normal file
@@ -0,0 +1,150 @@
|
||||
---
|
||||
title: S3 Writer Tool
|
||||
description: The `S3WriterTool` enables CrewAI agents to write content to files in Amazon S3 buckets.
|
||||
icon: aws
|
||||
---
|
||||
|
||||
# `S3WriterTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `S3WriterTool` is designed to write content to files in Amazon S3 buckets. This tool allows CrewAI agents to create or update files in S3, making it ideal for workflows that require storing data, saving configuration files, or persisting any other content to AWS S3 storage.
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install the required dependencies:
|
||||
|
||||
```shell
|
||||
uv add boto3
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `S3WriterTool`, follow these steps:
|
||||
|
||||
1. **Install Dependencies**: Install the required packages using the command above.
|
||||
2. **Configure AWS Credentials**: Set up your AWS credentials as environment variables.
|
||||
3. **Initialize the Tool**: Create an instance of the tool.
|
||||
4. **Specify S3 Path and Content**: Provide the S3 path where you want to write the file and the content to be written.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to use the `S3WriterTool` to write content to a file in an S3 bucket:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools.aws.s3 import S3WriterTool
|
||||
|
||||
# Initialize the tool
|
||||
s3_writer_tool = S3WriterTool()
|
||||
|
||||
# Define an agent that uses the tool
|
||||
file_writer_agent = Agent(
|
||||
role="File Writer",
|
||||
goal="Write content to files in S3 buckets",
|
||||
backstory="An expert in storing and managing files in cloud storage.",
|
||||
tools=[s3_writer_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to write a report
|
||||
write_task = Task(
|
||||
description="Generate a summary report of the quarterly sales data and save it to {my_bucket}.",
|
||||
expected_output="Confirmation that the report was successfully saved to S3.",
|
||||
agent=file_writer_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[file_writer_agent], tasks=[write_task])
|
||||
result = crew.kickoff(inputs={"my_bucket": "s3://my-bucket/reports/quarterly-summary.txt"})
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `S3WriterTool` accepts the following parameters when used by an agent:
|
||||
|
||||
- **file_path**: Required. The S3 file path in the format `s3://bucket-name/file-name`.
|
||||
- **content**: Required. The content to write to the file.
|
||||
|
||||
## AWS Credentials
|
||||
|
||||
The tool requires AWS credentials to access S3 buckets. You can configure these credentials using environment variables:
|
||||
|
||||
- **CREW_AWS_REGION**: The AWS region where your S3 bucket is located. Default is `us-east-1`.
|
||||
- **CREW_AWS_ACCESS_KEY_ID**: Your AWS access key ID.
|
||||
- **CREW_AWS_SEC_ACCESS_KEY**: Your AWS secret access key.
|
||||
|
||||
## Usage
|
||||
|
||||
When using the `S3WriterTool` with an agent, the agent will need to provide both the S3 file path and the content to write:
|
||||
|
||||
```python Code
|
||||
# Example of using the tool with an agent
|
||||
file_writer_agent = Agent(
|
||||
role="File Writer",
|
||||
goal="Write content to files in S3 buckets",
|
||||
backstory="An expert in storing and managing files in cloud storage.",
|
||||
tools=[s3_writer_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent to write a specific file
|
||||
write_config_task = Task(
|
||||
description="""
|
||||
Create a configuration file with the following database settings:
|
||||
- host: db.example.com
|
||||
- port: 5432
|
||||
- username: app_user
|
||||
- password: secure_password
|
||||
|
||||
Save this configuration as JSON to {my_bucket}.
|
||||
""",
|
||||
expected_output="Confirmation that the configuration file was successfully saved to S3.",
|
||||
agent=file_writer_agent,
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(agents=[file_writer_agent], tasks=[write_config_task])
|
||||
result = crew.kickoff(inputs={"my_bucket": "s3://my-bucket/config/db-config.json"})
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The `S3WriterTool` includes error handling for common S3 issues:
|
||||
|
||||
- Invalid S3 path format
|
||||
- Permission issues (e.g., no write access to the bucket)
|
||||
- AWS credential problems
|
||||
- Bucket does not exist
|
||||
|
||||
When an error occurs, the tool will return an error message that includes details about the issue.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `S3WriterTool` uses the AWS SDK for Python (boto3) to interact with S3:
|
||||
|
||||
```python Code
|
||||
class S3WriterTool(BaseTool):
|
||||
name: str = "S3 Writer Tool"
|
||||
description: str = "Writes content to a file in Amazon S3 given an S3 file path"
|
||||
|
||||
def _run(self, file_path: str, content: str) -> str:
|
||||
try:
|
||||
bucket_name, object_key = self._parse_s3_path(file_path)
|
||||
|
||||
s3 = boto3.client(
|
||||
's3',
|
||||
region_name=os.getenv('CREW_AWS_REGION', 'us-east-1'),
|
||||
aws_access_key_id=os.getenv('CREW_AWS_ACCESS_KEY_ID'),
|
||||
aws_secret_access_key=os.getenv('CREW_AWS_SEC_ACCESS_KEY')
|
||||
)
|
||||
|
||||
s3.put_object(Bucket=bucket_name, Key=object_key, Body=content.encode('utf-8'))
|
||||
return f"Successfully wrote content to {file_path}"
|
||||
except ClientError as e:
|
||||
return f"Error writing file to S3: {str(e)}"
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `S3WriterTool` provides a straightforward way to write content to files in Amazon S3 buckets. By enabling agents to create and update files in S3, it facilitates workflows that require cloud-based file storage. This tool is particularly useful for data persistence, configuration management, report generation, and any task that involves storing information in AWS S3 storage.
|
||||
139
docs/tools/scrapeelementfromwebsitetool.mdx
Normal file
139
docs/tools/scrapeelementfromwebsitetool.mdx
Normal file
@@ -0,0 +1,139 @@
|
||||
---
|
||||
title: Scrape Element From Website Tool
|
||||
description: The `ScrapeElementFromWebsiteTool` enables CrewAI agents to extract specific elements from websites using CSS selectors.
|
||||
icon: code
|
||||
---
|
||||
|
||||
# `ScrapeElementFromWebsiteTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `ScrapeElementFromWebsiteTool` is designed to extract specific elements from websites using CSS selectors. This tool allows CrewAI agents to scrape targeted content from web pages, making it useful for data extraction tasks where only specific parts of a webpage are needed.
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install the required dependencies:
|
||||
|
||||
```shell
|
||||
uv add requests beautifulsoup4
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `ScrapeElementFromWebsiteTool`, follow these steps:
|
||||
|
||||
1. **Install Dependencies**: Install the required packages using the command above.
|
||||
2. **Identify CSS Selectors**: Determine the CSS selectors for the elements you want to extract from the website.
|
||||
3. **Initialize the Tool**: Create an instance of the tool with the necessary parameters.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to use the `ScrapeElementFromWebsiteTool` to extract specific elements from a website:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import ScrapeElementFromWebsiteTool
|
||||
|
||||
# Initialize the tool
|
||||
scrape_tool = ScrapeElementFromWebsiteTool()
|
||||
|
||||
# Define an agent that uses the tool
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract specific information from websites",
|
||||
backstory="An expert in web scraping who can extract targeted content from web pages.",
|
||||
tools=[scrape_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to extract headlines from a news website
|
||||
scrape_task = Task(
|
||||
description="Extract the main headlines from the CNN homepage. Use the CSS selector '.headline' to target the headline elements.",
|
||||
expected_output="A list of the main headlines from CNN.",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[web_scraper_agent], tasks=[scrape_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
You can also initialize the tool with predefined parameters:
|
||||
|
||||
```python Code
|
||||
# Initialize the tool with predefined parameters
|
||||
scrape_tool = ScrapeElementFromWebsiteTool(
|
||||
website_url="https://www.example.com",
|
||||
css_element=".main-content"
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `ScrapeElementFromWebsiteTool` accepts the following parameters during initialization:
|
||||
|
||||
- **website_url**: Optional. The URL of the website to scrape. If provided during initialization, the agent won't need to specify it when using the tool.
|
||||
- **css_element**: Optional. The CSS selector for the elements to extract. If provided during initialization, the agent won't need to specify it when using the tool.
|
||||
- **cookies**: Optional. A dictionary containing cookies to be sent with the request. This can be useful for websites that require authentication.
|
||||
|
||||
## Usage
|
||||
|
||||
When using the `ScrapeElementFromWebsiteTool` with an agent, the agent will need to provide the following parameters (unless they were specified during initialization):
|
||||
|
||||
- **website_url**: The URL of the website to scrape.
|
||||
- **css_element**: The CSS selector for the elements to extract.
|
||||
|
||||
The tool will return the text content of all elements matching the CSS selector, joined by newlines.
|
||||
|
||||
```python Code
|
||||
# Example of using the tool with an agent
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract specific elements from websites",
|
||||
backstory="An expert in web scraping who can extract targeted content using CSS selectors.",
|
||||
tools=[scrape_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent to extract specific elements
|
||||
extract_task = Task(
|
||||
description="""
|
||||
Extract all product titles from the featured products section on example.com.
|
||||
Use the CSS selector '.product-title' to target the title elements.
|
||||
""",
|
||||
expected_output="A list of product titles from the website",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
|
||||
# Run the task through a crew
|
||||
crew = Crew(agents=[web_scraper_agent], tasks=[extract_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `ScrapeElementFromWebsiteTool` uses the `requests` library to fetch the web page and `BeautifulSoup` to parse the HTML and extract the specified elements:
|
||||
|
||||
```python Code
|
||||
class ScrapeElementFromWebsiteTool(BaseTool):
|
||||
name: str = "Read a website content"
|
||||
description: str = "A tool that can be used to read a website content."
|
||||
|
||||
# Implementation details...
|
||||
|
||||
def _run(self, **kwargs: Any) -> Any:
|
||||
website_url = kwargs.get("website_url", self.website_url)
|
||||
css_element = kwargs.get("css_element", self.css_element)
|
||||
page = requests.get(
|
||||
website_url,
|
||||
headers=self.headers,
|
||||
cookies=self.cookies if self.cookies else {},
|
||||
)
|
||||
parsed = BeautifulSoup(page.content, "html.parser")
|
||||
elements = parsed.select(css_element)
|
||||
return "\n".join([element.get_text() for element in elements])
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `ScrapeElementFromWebsiteTool` provides a powerful way to extract specific elements from websites using CSS selectors. By enabling agents to target only the content they need, it makes web scraping tasks more efficient and focused. This tool is particularly useful for data extraction, content monitoring, and research tasks where specific information needs to be extracted from web pages.
|
||||
196
docs/tools/scrapegraphscrapetool.mdx
Normal file
196
docs/tools/scrapegraphscrapetool.mdx
Normal file
@@ -0,0 +1,196 @@
|
||||
---
|
||||
title: Scrapegraph Scrape Tool
|
||||
description: The `ScrapegraphScrapeTool` leverages Scrapegraph AI's SmartScraper API to intelligently extract content from websites.
|
||||
icon: chart-area
|
||||
---
|
||||
|
||||
# `ScrapegraphScrapeTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `ScrapegraphScrapeTool` is designed to leverage Scrapegraph AI's SmartScraper API to intelligently extract content from websites. This tool provides advanced web scraping capabilities with AI-powered content extraction, making it ideal for targeted data collection and content analysis tasks. Unlike traditional web scrapers, it can understand the context and structure of web pages to extract the most relevant information based on natural language prompts.
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install the Scrapegraph Python client:
|
||||
|
||||
```shell
|
||||
uv add scrapegraph-py
|
||||
```
|
||||
|
||||
You'll also need to set up your Scrapegraph API key as an environment variable:
|
||||
|
||||
```shell
|
||||
export SCRAPEGRAPH_API_KEY="your_api_key"
|
||||
```
|
||||
|
||||
You can obtain an API key from [Scrapegraph AI](https://scrapegraphai.com).
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `ScrapegraphScrapeTool`, follow these steps:
|
||||
|
||||
1. **Install Dependencies**: Install the required package using the command above.
|
||||
2. **Set Up API Key**: Set your Scrapegraph API key as an environment variable or provide it during initialization.
|
||||
3. **Initialize the Tool**: Create an instance of the tool with the necessary parameters.
|
||||
4. **Define Extraction Prompts**: Create natural language prompts to guide the extraction of specific content.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to use the `ScrapegraphScrapeTool` to extract content from a website:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import ScrapegraphScrapeTool
|
||||
|
||||
# Initialize the tool
|
||||
scrape_tool = ScrapegraphScrapeTool(api_key="your_api_key")
|
||||
|
||||
# Define an agent that uses the tool
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract specific information from websites",
|
||||
backstory="An expert in web scraping who can extract targeted content from web pages.",
|
||||
tools=[scrape_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to extract product information from an e-commerce site
|
||||
scrape_task = Task(
|
||||
description="Extract product names, prices, and descriptions from the featured products section of example.com.",
|
||||
expected_output="A structured list of product information including names, prices, and descriptions.",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[web_scraper_agent], tasks=[scrape_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
You can also initialize the tool with predefined parameters:
|
||||
|
||||
```python Code
|
||||
# Initialize the tool with predefined parameters
|
||||
scrape_tool = ScrapegraphScrapeTool(
|
||||
website_url="https://www.example.com",
|
||||
user_prompt="Extract all product prices and descriptions",
|
||||
api_key="your_api_key"
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `ScrapegraphScrapeTool` accepts the following parameters during initialization:
|
||||
|
||||
- **api_key**: Optional. Your Scrapegraph API key. If not provided, it will look for the `SCRAPEGRAPH_API_KEY` environment variable.
|
||||
- **website_url**: Optional. The URL of the website to scrape. If provided during initialization, the agent won't need to specify it when using the tool.
|
||||
- **user_prompt**: Optional. Custom instructions for content extraction. If provided during initialization, the agent won't need to specify it when using the tool.
|
||||
- **enable_logging**: Optional. Whether to enable logging for the Scrapegraph client. Default is `False`.
|
||||
|
||||
## Usage
|
||||
|
||||
When using the `ScrapegraphScrapeTool` with an agent, the agent will need to provide the following parameters (unless they were specified during initialization):
|
||||
|
||||
- **website_url**: The URL of the website to scrape.
|
||||
- **user_prompt**: Optional. Custom instructions for content extraction. Default is "Extract the main content of the webpage".
|
||||
|
||||
The tool will return the extracted content based on the provided prompt.
|
||||
|
||||
```python Code
|
||||
# Example of using the tool with an agent
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract specific information from websites",
|
||||
backstory="An expert in web scraping who can extract targeted content from web pages.",
|
||||
tools=[scrape_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent to extract specific content
|
||||
extract_task = Task(
|
||||
description="Extract the main heading and summary from example.com",
|
||||
expected_output="The main heading and summary from the website",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(agents=[web_scraper_agent], tasks=[extract_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The `ScrapegraphScrapeTool` may raise the following exceptions:
|
||||
|
||||
- **ValueError**: When API key is missing or URL format is invalid.
|
||||
- **RateLimitError**: When API rate limits are exceeded.
|
||||
- **RuntimeError**: When scraping operation fails (network issues, API errors).
|
||||
|
||||
It's recommended to instruct agents to handle potential errors gracefully:
|
||||
|
||||
```python Code
|
||||
# Create a task that includes error handling instructions
|
||||
robust_extract_task = Task(
|
||||
description="""
|
||||
Extract the main heading from example.com.
|
||||
Be aware that you might encounter errors such as:
|
||||
- Invalid URL format
|
||||
- Missing API key
|
||||
- Rate limit exceeded
|
||||
- Network or API errors
|
||||
|
||||
If you encounter any errors, provide a clear explanation of what went wrong
|
||||
and suggest possible solutions.
|
||||
""",
|
||||
expected_output="Either the extracted heading or a clear error explanation",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
```
|
||||
|
||||
## Rate Limiting
|
||||
|
||||
The Scrapegraph API has rate limits that vary based on your subscription plan. Consider the following best practices:
|
||||
|
||||
- Implement appropriate delays between requests when processing multiple URLs.
|
||||
- Handle rate limit errors gracefully in your application.
|
||||
- Check your API plan limits on the Scrapegraph dashboard.
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `ScrapegraphScrapeTool` uses the Scrapegraph Python client to interact with the SmartScraper API:
|
||||
|
||||
```python Code
|
||||
class ScrapegraphScrapeTool(BaseTool):
|
||||
"""
|
||||
A tool that uses Scrapegraph AI to intelligently scrape website content.
|
||||
"""
|
||||
|
||||
# Implementation details...
|
||||
|
||||
def _run(self, **kwargs: Any) -> Any:
|
||||
website_url = kwargs.get("website_url", self.website_url)
|
||||
user_prompt = (
|
||||
kwargs.get("user_prompt", self.user_prompt)
|
||||
or "Extract the main content of the webpage"
|
||||
)
|
||||
|
||||
if not website_url:
|
||||
raise ValueError("website_url is required")
|
||||
|
||||
# Validate URL format
|
||||
self._validate_url(website_url)
|
||||
|
||||
try:
|
||||
# Make the SmartScraper request
|
||||
response = self._client.smartscraper(
|
||||
website_url=website_url,
|
||||
user_prompt=user_prompt,
|
||||
)
|
||||
|
||||
return response
|
||||
# Error handling...
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `ScrapegraphScrapeTool` provides a powerful way to extract content from websites using AI-powered understanding of web page structure. By enabling agents to target specific information using natural language prompts, it makes web scraping tasks more efficient and focused. This tool is particularly useful for data extraction, content monitoring, and research tasks where specific information needs to be extracted from web pages.
|
||||
220
docs/tools/scrapflyscrapetool.mdx
Normal file
220
docs/tools/scrapflyscrapetool.mdx
Normal file
@@ -0,0 +1,220 @@
|
||||
---
|
||||
title: Scrapfly Scrape Website Tool
|
||||
description: The `ScrapflyScrapeWebsiteTool` leverages Scrapfly's web scraping API to extract content from websites in various formats.
|
||||
icon: spider
|
||||
---
|
||||
|
||||
# `ScrapflyScrapeWebsiteTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `ScrapflyScrapeWebsiteTool` is designed to leverage [Scrapfly](https://scrapfly.io/)'s web scraping API to extract content from websites. This tool provides advanced web scraping capabilities with headless browser support, proxies, and anti-bot bypass features. It allows for extracting web page data in various formats, including raw HTML, markdown, and plain text, making it ideal for a wide range of web scraping tasks.
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install the Scrapfly SDK:
|
||||
|
||||
```shell
|
||||
uv add scrapfly-sdk
|
||||
```
|
||||
|
||||
You'll also need to obtain a Scrapfly API key by registering at [scrapfly.io/register](https://www.scrapfly.io/register/).
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `ScrapflyScrapeWebsiteTool`, follow these steps:
|
||||
|
||||
1. **Install Dependencies**: Install the Scrapfly SDK using the command above.
|
||||
2. **Obtain API Key**: Register at Scrapfly to get your API key.
|
||||
3. **Initialize the Tool**: Create an instance of the tool with your API key.
|
||||
4. **Configure Scraping Parameters**: Customize the scraping parameters based on your needs.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to use the `ScrapflyScrapeWebsiteTool` to extract content from a website:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import ScrapflyScrapeWebsiteTool
|
||||
|
||||
# Initialize the tool
|
||||
scrape_tool = ScrapflyScrapeWebsiteTool(api_key="your_scrapfly_api_key")
|
||||
|
||||
# Define an agent that uses the tool
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract information from websites",
|
||||
backstory="An expert in web scraping who can extract content from any website.",
|
||||
tools=[scrape_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to extract content from a website
|
||||
scrape_task = Task(
|
||||
description="Extract the main content from the product page at https://web-scraping.dev/products and summarize the available products.",
|
||||
expected_output="A summary of the products available on the website.",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[web_scraper_agent], tasks=[scrape_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
You can also customize the scraping parameters:
|
||||
|
||||
```python Code
|
||||
# Example with custom scraping parameters
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract information from websites with custom parameters",
|
||||
backstory="An expert in web scraping who can extract content from any website.",
|
||||
tools=[scrape_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# The agent will use the tool with parameters like:
|
||||
# url="https://web-scraping.dev/products"
|
||||
# scrape_format="markdown"
|
||||
# ignore_scrape_failures=True
|
||||
# scrape_config={
|
||||
# "asp": True, # Bypass scraping blocking solutions, like Cloudflare
|
||||
# "render_js": True, # Enable JavaScript rendering with a cloud headless browser
|
||||
# "proxy_pool": "public_residential_pool", # Select a proxy pool
|
||||
# "country": "us", # Select a proxy location
|
||||
# "auto_scroll": True, # Auto scroll the page
|
||||
# }
|
||||
|
||||
scrape_task = Task(
|
||||
description="Extract the main content from the product page at https://web-scraping.dev/products using advanced scraping options including JavaScript rendering and proxy settings.",
|
||||
expected_output="A detailed summary of the products with all available information.",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `ScrapflyScrapeWebsiteTool` accepts the following parameters:
|
||||
|
||||
### Initialization Parameters
|
||||
|
||||
- **api_key**: Required. Your Scrapfly API key.
|
||||
|
||||
### Run Parameters
|
||||
|
||||
- **url**: Required. The URL of the website to scrape.
|
||||
- **scrape_format**: Optional. The format in which to extract the web page content. Options are "raw" (HTML), "markdown", or "text". Default is "markdown".
|
||||
- **scrape_config**: Optional. A dictionary containing additional Scrapfly scraping configuration options.
|
||||
- **ignore_scrape_failures**: Optional. Whether to ignore failures during scraping. If set to `True`, the tool will return `None` instead of raising an exception when scraping fails.
|
||||
|
||||
## Scrapfly Configuration Options
|
||||
|
||||
The `scrape_config` parameter allows you to customize the scraping behavior with the following options:
|
||||
|
||||
- **asp**: Enable anti-scraping protection bypass.
|
||||
- **render_js**: Enable JavaScript rendering with a cloud headless browser.
|
||||
- **proxy_pool**: Select a proxy pool (e.g., "public_residential_pool", "datacenter").
|
||||
- **country**: Select a proxy location (e.g., "us", "uk").
|
||||
- **auto_scroll**: Automatically scroll the page to load lazy-loaded content.
|
||||
- **js**: Execute custom JavaScript code by the headless browser.
|
||||
|
||||
For a complete list of configuration options, refer to the [Scrapfly API documentation](https://scrapfly.io/docs/scrape-api/getting-started).
|
||||
|
||||
## Usage
|
||||
|
||||
When using the `ScrapflyScrapeWebsiteTool` with an agent, the agent will need to provide the URL of the website to scrape and can optionally specify the format and additional configuration options:
|
||||
|
||||
```python Code
|
||||
# Example of using the tool with an agent
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract information from websites",
|
||||
backstory="An expert in web scraping who can extract content from any website.",
|
||||
tools=[scrape_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent
|
||||
scrape_task = Task(
|
||||
description="Extract the main content from example.com in markdown format.",
|
||||
expected_output="The main content of example.com in markdown format.",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(agents=[web_scraper_agent], tasks=[scrape_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
For more advanced usage with custom configuration:
|
||||
|
||||
```python Code
|
||||
# Create a task with more specific instructions
|
||||
advanced_scrape_task = Task(
|
||||
description="""
|
||||
Extract content from example.com with the following requirements:
|
||||
- Convert the content to plain text format
|
||||
- Enable JavaScript rendering
|
||||
- Use a US-based proxy
|
||||
- Handle any scraping failures gracefully
|
||||
""",
|
||||
expected_output="The extracted content from example.com",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
By default, the `ScrapflyScrapeWebsiteTool` will raise an exception if scraping fails. Agents can be instructed to handle failures gracefully by specifying the `ignore_scrape_failures` parameter:
|
||||
|
||||
```python Code
|
||||
# Create a task that instructs the agent to handle errors
|
||||
error_handling_task = Task(
|
||||
description="""
|
||||
Extract content from a potentially problematic website and make sure to handle any
|
||||
scraping failures gracefully by setting ignore_scrape_failures to True.
|
||||
""",
|
||||
expected_output="Either the extracted content or a graceful error message",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `ScrapflyScrapeWebsiteTool` uses the Scrapfly SDK to interact with the Scrapfly API:
|
||||
|
||||
```python Code
|
||||
class ScrapflyScrapeWebsiteTool(BaseTool):
|
||||
name: str = "Scrapfly web scraping API tool"
|
||||
description: str = (
|
||||
"Scrape a webpage url using Scrapfly and return its content as markdown or text"
|
||||
)
|
||||
|
||||
# Implementation details...
|
||||
|
||||
def _run(
|
||||
self,
|
||||
url: str,
|
||||
scrape_format: str = "markdown",
|
||||
scrape_config: Optional[Dict[str, Any]] = None,
|
||||
ignore_scrape_failures: Optional[bool] = None,
|
||||
):
|
||||
from scrapfly import ScrapeApiResponse, ScrapeConfig
|
||||
|
||||
scrape_config = scrape_config if scrape_config is not None else {}
|
||||
try:
|
||||
response: ScrapeApiResponse = self.scrapfly.scrape(
|
||||
ScrapeConfig(url, format=scrape_format, **scrape_config)
|
||||
)
|
||||
return response.scrape_result["content"]
|
||||
except Exception as e:
|
||||
if ignore_scrape_failures:
|
||||
logger.error(f"Error fetching data from {url}, exception: {e}")
|
||||
return None
|
||||
else:
|
||||
raise e
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `ScrapflyScrapeWebsiteTool` provides a powerful way to extract content from websites using Scrapfly's advanced web scraping capabilities. With features like headless browser support, proxies, and anti-bot bypass, it can handle complex websites and extract content in various formats. This tool is particularly useful for data extraction, content monitoring, and research tasks where reliable web scraping is required.
|
||||
@@ -13,64 +13,183 @@ icon: clipboard-user
|
||||
|
||||
## Description
|
||||
|
||||
The SeleniumScrapingTool is crafted for high-efficiency web scraping tasks.
|
||||
The `SeleniumScrapingTool` is crafted for high-efficiency web scraping tasks.
|
||||
It allows for precise extraction of content from web pages by using CSS selectors to target specific elements.
|
||||
Its design caters to a wide range of scraping needs, offering flexibility to work with any provided website URL.
|
||||
|
||||
## Installation
|
||||
|
||||
To get started with the SeleniumScrapingTool, install the crewai_tools package using pip:
|
||||
To use this tool, you need to install the CrewAI tools package and Selenium:
|
||||
|
||||
```shell
|
||||
pip install 'crewai[tools]'
|
||||
uv add selenium webdriver-manager
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
You'll also need to have Chrome installed on your system, as the tool uses Chrome WebDriver for browser automation.
|
||||
|
||||
Below are some scenarios where the SeleniumScrapingTool can be utilized:
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to use the `SeleniumScrapingTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
from crewai_tools import SeleniumScrapingTool
|
||||
|
||||
# Example 1:
|
||||
# Initialize the tool without any parameters to scrape
|
||||
# the current page it navigates to
|
||||
tool = SeleniumScrapingTool()
|
||||
# Initialize the tool
|
||||
selenium_tool = SeleniumScrapingTool()
|
||||
|
||||
# Example 2:
|
||||
# Scrape the entire webpage of a given URL
|
||||
tool = SeleniumScrapingTool(website_url='https://example.com')
|
||||
|
||||
# Example 3:
|
||||
# Target and scrape a specific CSS element from a webpage
|
||||
tool = SeleniumScrapingTool(
|
||||
website_url='https://example.com',
|
||||
css_element='.main-content'
|
||||
# Define an agent that uses the tool
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract information from websites using Selenium",
|
||||
backstory="An expert web scraper who can extract content from dynamic websites.",
|
||||
tools=[selenium_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example 4:
|
||||
# Perform scraping with additional parameters for a customized experience
|
||||
tool = SeleniumScrapingTool(
|
||||
# Example task to scrape content from a website
|
||||
scrape_task = Task(
|
||||
description="Extract the main content from the homepage of example.com. Use the CSS selector 'main' to target the main content area.",
|
||||
expected_output="The main content from example.com's homepage.",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(
|
||||
agents=[web_scraper_agent],
|
||||
tasks=[scrape_task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
)
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
You can also initialize the tool with predefined parameters:
|
||||
|
||||
```python Code
|
||||
# Initialize the tool with predefined parameters
|
||||
selenium_tool = SeleniumScrapingTool(
|
||||
website_url='https://example.com',
|
||||
css_element='.main-content',
|
||||
cookie={'name': 'user', 'value': 'John Doe'},
|
||||
wait_time=10
|
||||
wait_time=5
|
||||
)
|
||||
|
||||
# Define an agent that uses the tool
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract information from websites using Selenium",
|
||||
backstory="An expert web scraper who can extract content from dynamic websites.",
|
||||
tools=[selenium_tool],
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
## Arguments
|
||||
## Parameters
|
||||
|
||||
The following parameters can be used to customize the SeleniumScrapingTool's scraping process:
|
||||
The `SeleniumScrapingTool` accepts the following parameters during initialization:
|
||||
|
||||
| Argument | Type | Description |
|
||||
|:---------------|:---------|:-------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **website_url** | `string` | **Mandatory**. Specifies the URL of the website from which content is to be scraped. |
|
||||
| **css_element** | `string` | **Mandatory**. The CSS selector for a specific element to target on the website, enabling focused scraping of a particular part of a webpage. |
|
||||
| **cookie** | `object` | **Optional**. A dictionary containing cookie information, useful for simulating a logged-in session to access restricted content. |
|
||||
| **wait_time** | `int` | **Optional**. Specifies the delay (in seconds) before scraping, allowing the website and any dynamic content to fully load. |
|
||||
- **website_url**: Optional. The URL of the website to scrape. If provided during initialization, the agent won't need to specify it when using the tool.
|
||||
- **css_element**: Optional. The CSS selector for the elements to extract. If provided during initialization, the agent won't need to specify it when using the tool.
|
||||
- **cookie**: Optional. A dictionary containing cookie information, useful for simulating a logged-in session to access restricted content.
|
||||
- **wait_time**: Optional. Specifies the delay (in seconds) before scraping, allowing the website and any dynamic content to fully load. Default is `3` seconds.
|
||||
- **return_html**: Optional. Whether to return the HTML content instead of just the text. Default is `False`.
|
||||
|
||||
When using the tool with an agent, the agent will need to provide the following parameters (unless they were specified during initialization):
|
||||
|
||||
<Warning>
|
||||
Since the `SeleniumScrapingTool` is under active development, the parameters and functionality may evolve over time.
|
||||
Users are encouraged to keep the tool updated and report any issues or suggestions for enhancements.
|
||||
</Warning>
|
||||
- **website_url**: Required. The URL of the website to scrape.
|
||||
- **css_element**: Required. The CSS selector for the elements to extract.
|
||||
|
||||
## Agent Integration Example
|
||||
|
||||
Here's a more detailed example of how to integrate the `SeleniumScrapingTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew, Process
|
||||
from crewai_tools import SeleniumScrapingTool
|
||||
|
||||
# Initialize the tool
|
||||
selenium_tool = SeleniumScrapingTool()
|
||||
|
||||
# Define an agent that uses the tool
|
||||
web_scraper_agent = Agent(
|
||||
role="Web Scraper",
|
||||
goal="Extract and analyze information from dynamic websites",
|
||||
backstory="""You are an expert web scraper who specializes in extracting
|
||||
content from dynamic websites that require browser automation. You have
|
||||
extensive knowledge of CSS selectors and can identify the right selectors
|
||||
to target specific content on any website.""",
|
||||
tools=[selenium_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent
|
||||
scrape_task = Task(
|
||||
description="""
|
||||
Extract the following information from the news website at {website_url}:
|
||||
|
||||
1. The headlines of all featured articles (CSS selector: '.headline')
|
||||
2. The publication dates of these articles (CSS selector: '.pub-date')
|
||||
3. The author names where available (CSS selector: '.author')
|
||||
|
||||
Compile this information into a structured format with each article's details grouped together.
|
||||
""",
|
||||
expected_output="A structured list of articles with their headlines, publication dates, and authors.",
|
||||
agent=web_scraper_agent,
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(
|
||||
agents=[web_scraper_agent],
|
||||
tasks=[scrape_task],
|
||||
verbose=True,
|
||||
process=Process.sequential,
|
||||
)
|
||||
result = crew.kickoff(inputs={"website_url": "https://news-example.com"})
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `SeleniumScrapingTool` uses Selenium WebDriver to automate browser interactions:
|
||||
|
||||
```python Code
|
||||
class SeleniumScrapingTool(BaseTool):
|
||||
name: str = "Read a website content"
|
||||
description: str = "A tool that can be used to read a website content."
|
||||
args_schema: Type[BaseModel] = SeleniumScrapingToolSchema
|
||||
|
||||
def _run(self, **kwargs: Any) -> Any:
|
||||
website_url = kwargs.get("website_url", self.website_url)
|
||||
css_element = kwargs.get("css_element", self.css_element)
|
||||
return_html = kwargs.get("return_html", self.return_html)
|
||||
driver = self._create_driver(website_url, self.cookie, self.wait_time)
|
||||
|
||||
content = self._get_content(driver, css_element, return_html)
|
||||
driver.close()
|
||||
|
||||
return "\n".join(content)
|
||||
```
|
||||
|
||||
The tool performs the following steps:
|
||||
1. Creates a headless Chrome browser instance
|
||||
2. Navigates to the specified URL
|
||||
3. Waits for the specified time to allow the page to load
|
||||
4. Adds any cookies if provided
|
||||
5. Extracts content based on the CSS selector
|
||||
6. Returns the extracted content as text or HTML
|
||||
7. Closes the browser instance
|
||||
|
||||
## Handling Dynamic Content
|
||||
|
||||
The `SeleniumScrapingTool` is particularly useful for scraping websites with dynamic content that is loaded via JavaScript. By using a real browser instance, it can:
|
||||
|
||||
1. Execute JavaScript on the page
|
||||
2. Wait for dynamic content to load
|
||||
3. Interact with elements if needed
|
||||
4. Extract content that would not be available with simple HTTP requests
|
||||
|
||||
You can adjust the `wait_time` parameter to ensure that all dynamic content has loaded before extraction.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `SeleniumScrapingTool` provides a powerful way to extract content from websites using browser automation. By enabling agents to interact with websites as a real user would, it facilitates scraping of dynamic content that would be difficult or impossible to extract using simpler methods. This tool is particularly useful for research, data collection, and monitoring tasks that involve modern web applications with JavaScript-rendered content.
|
||||
|
||||
202
docs/tools/snowflakesearchtool.mdx
Normal file
202
docs/tools/snowflakesearchtool.mdx
Normal file
@@ -0,0 +1,202 @@
|
||||
---
|
||||
title: Snowflake Search Tool
|
||||
description: The `SnowflakeSearchTool` enables CrewAI agents to execute SQL queries and perform semantic search on Snowflake data warehouses.
|
||||
icon: snowflake
|
||||
---
|
||||
|
||||
# `SnowflakeSearchTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `SnowflakeSearchTool` is designed to connect to Snowflake data warehouses and execute SQL queries with advanced features like connection pooling, retry logic, and asynchronous execution. This tool allows CrewAI agents to interact with Snowflake databases, making it ideal for data analysis, reporting, and business intelligence tasks that require access to enterprise data stored in Snowflake.
|
||||
|
||||
## Installation
|
||||
|
||||
To use this tool, you need to install the required dependencies:
|
||||
|
||||
```shell
|
||||
uv add cryptography snowflake-connector-python snowflake-sqlalchemy
|
||||
```
|
||||
|
||||
Or alternatively:
|
||||
|
||||
```shell
|
||||
uv sync --extra snowflake
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `SnowflakeSearchTool`, follow these steps:
|
||||
|
||||
1. **Install Dependencies**: Install the required packages using one of the commands above.
|
||||
2. **Configure Snowflake Connection**: Create a `SnowflakeConfig` object with your Snowflake credentials.
|
||||
3. **Initialize the Tool**: Create an instance of the tool with the necessary configuration.
|
||||
4. **Execute Queries**: Use the tool to run SQL queries against your Snowflake database.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to use the `SnowflakeSearchTool` to query data from a Snowflake database:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import SnowflakeSearchTool, SnowflakeConfig
|
||||
|
||||
# Create Snowflake configuration
|
||||
config = SnowflakeConfig(
|
||||
account="your_account",
|
||||
user="your_username",
|
||||
password="your_password",
|
||||
warehouse="COMPUTE_WH",
|
||||
database="your_database",
|
||||
snowflake_schema="your_schema"
|
||||
)
|
||||
|
||||
# Initialize the tool
|
||||
snowflake_tool = SnowflakeSearchTool(config=config)
|
||||
|
||||
# Define an agent that uses the tool
|
||||
data_analyst_agent = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze data from Snowflake database",
|
||||
backstory="An expert data analyst who can extract insights from enterprise data.",
|
||||
tools=[snowflake_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to query sales data
|
||||
query_task = Task(
|
||||
description="Query the sales data for the last quarter and summarize the top 5 products by revenue.",
|
||||
expected_output="A summary of the top 5 products by revenue for the last quarter.",
|
||||
agent=data_analyst_agent,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[data_analyst_agent],
|
||||
tasks=[query_task])
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
You can also customize the tool with additional parameters:
|
||||
|
||||
```python Code
|
||||
# Initialize the tool with custom parameters
|
||||
snowflake_tool = SnowflakeSearchTool(
|
||||
config=config,
|
||||
pool_size=10,
|
||||
max_retries=5,
|
||||
retry_delay=2.0,
|
||||
enable_caching=True
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
### SnowflakeConfig Parameters
|
||||
|
||||
The `SnowflakeConfig` class accepts the following parameters:
|
||||
|
||||
- **account**: Required. Snowflake account identifier.
|
||||
- **user**: Required. Snowflake username.
|
||||
- **password**: Optional*. Snowflake password.
|
||||
- **private_key_path**: Optional*. Path to private key file (alternative to password).
|
||||
- **warehouse**: Required. Snowflake warehouse name.
|
||||
- **database**: Required. Default database.
|
||||
- **snowflake_schema**: Required. Default schema.
|
||||
- **role**: Optional. Snowflake role.
|
||||
- **session_parameters**: Optional. Custom session parameters as a dictionary.
|
||||
|
||||
*Either `password` or `private_key_path` must be provided.
|
||||
|
||||
### SnowflakeSearchTool Parameters
|
||||
|
||||
The `SnowflakeSearchTool` accepts the following parameters during initialization:
|
||||
|
||||
- **config**: Required. A `SnowflakeConfig` object containing connection details.
|
||||
- **pool_size**: Optional. Number of connections in the pool. Default is 5.
|
||||
- **max_retries**: Optional. Maximum retry attempts for failed queries. Default is 3.
|
||||
- **retry_delay**: Optional. Delay between retries in seconds. Default is 1.0.
|
||||
- **enable_caching**: Optional. Whether to enable query result caching. Default is True.
|
||||
|
||||
## Usage
|
||||
|
||||
When using the `SnowflakeSearchTool`, you need to provide the following parameters:
|
||||
|
||||
- **query**: Required. The SQL query to execute.
|
||||
- **database**: Optional. Override the default database specified in the config.
|
||||
- **snowflake_schema**: Optional. Override the default schema specified in the config.
|
||||
- **timeout**: Optional. Query timeout in seconds. Default is 300.
|
||||
|
||||
The tool will return the query results as a list of dictionaries, where each dictionary represents a row with column names as keys.
|
||||
|
||||
```python Code
|
||||
# Example of using the tool with an agent
|
||||
data_analyst = Agent(
|
||||
role="Data Analyst",
|
||||
goal="Analyze sales data from Snowflake",
|
||||
backstory="An expert data analyst with experience in SQL and data visualization.",
|
||||
tools=[snowflake_tool],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# The agent will use the tool with parameters like:
|
||||
# query="SELECT product_name, SUM(revenue) as total_revenue FROM sales GROUP BY product_name ORDER BY total_revenue DESC LIMIT 5"
|
||||
# timeout=600
|
||||
|
||||
# Create a task for the agent
|
||||
analysis_task = Task(
|
||||
description="Query the sales database and identify the top 5 products by revenue for the last quarter.",
|
||||
expected_output="A detailed analysis of the top 5 products by revenue.",
|
||||
agent=data_analyst
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(
|
||||
agents=[data_analyst],
|
||||
tasks=[analysis_task]
|
||||
)
|
||||
result = crew.kickoff()
|
||||
```
|
||||
|
||||
## Advanced Features
|
||||
|
||||
### Connection Pooling
|
||||
|
||||
The `SnowflakeSearchTool` implements connection pooling to improve performance by reusing database connections. You can control the pool size with the `pool_size` parameter.
|
||||
|
||||
### Automatic Retries
|
||||
|
||||
The tool automatically retries failed queries with exponential backoff. You can configure the retry behavior with the `max_retries` and `retry_delay` parameters.
|
||||
|
||||
### Query Result Caching
|
||||
|
||||
To improve performance for repeated queries, the tool can cache query results. This feature is enabled by default but can be disabled by setting `enable_caching=False`.
|
||||
|
||||
### Key-Pair Authentication
|
||||
|
||||
In addition to password authentication, the tool supports key-pair authentication for enhanced security:
|
||||
|
||||
```python Code
|
||||
config = SnowflakeConfig(
|
||||
account="your_account",
|
||||
user="your_username",
|
||||
private_key_path="/path/to/your/private/key.p8",
|
||||
warehouse="COMPUTE_WH",
|
||||
database="your_database",
|
||||
snowflake_schema="your_schema"
|
||||
)
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The `SnowflakeSearchTool` includes comprehensive error handling for common Snowflake issues:
|
||||
|
||||
- Connection failures
|
||||
- Query timeouts
|
||||
- Authentication errors
|
||||
- Database and schema errors
|
||||
|
||||
When an error occurs, the tool will attempt to retry the operation (if configured) and provide detailed error information.
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `SnowflakeSearchTool` provides a powerful way to integrate Snowflake data warehouses with CrewAI agents. With features like connection pooling, automatic retries, and query caching, it enables efficient and reliable access to enterprise data. This tool is particularly useful for data analysis, reporting, and business intelligence tasks that require access to structured data stored in Snowflake.
|
||||
164
docs/tools/weaviatevectorsearchtool.mdx
Normal file
164
docs/tools/weaviatevectorsearchtool.mdx
Normal file
@@ -0,0 +1,164 @@
|
||||
---
|
||||
title: Weaviate Vector Search
|
||||
description: The `WeaviateVectorSearchTool` is designed to search a Weaviate vector database for semantically similar documents.
|
||||
icon: database
|
||||
---
|
||||
|
||||
# `WeaviateVectorSearchTool`
|
||||
|
||||
## Description
|
||||
|
||||
The `WeaviateVectorSearchTool` is specifically crafted for conducting semantic searches within documents stored in a Weaviate vector database. This tool allows you to find semantically similar documents to a given query, leveraging the power of vector embeddings for more accurate and contextually relevant search results.
|
||||
|
||||
[Weaviate](https://weaviate.io/) is a vector database that stores and queries vector embeddings, enabling semantic search capabilities.
|
||||
|
||||
## Installation
|
||||
|
||||
To incorporate this tool into your project, you need to install the Weaviate client:
|
||||
|
||||
```shell
|
||||
uv add weaviate-client
|
||||
```
|
||||
|
||||
## Steps to Get Started
|
||||
|
||||
To effectively use the `WeaviateVectorSearchTool`, follow these steps:
|
||||
|
||||
1. **Package Installation**: Confirm that the `crewai[tools]` and `weaviate-client` packages are installed in your Python environment.
|
||||
2. **Weaviate Setup**: Set up a Weaviate cluster. You can follow the [Weaviate documentation](https://weaviate.io/developers/wcs/connect) for instructions.
|
||||
3. **API Keys**: Obtain your Weaviate cluster URL and API key.
|
||||
4. **OpenAI API Key**: Ensure you have an OpenAI API key set in your environment variables as `OPENAI_API_KEY`.
|
||||
|
||||
## Example
|
||||
|
||||
The following example demonstrates how to initialize the tool and execute a search:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import WeaviateVectorSearchTool
|
||||
|
||||
# Initialize the tool
|
||||
tool = WeaviateVectorSearchTool(
|
||||
collection_name='example_collections',
|
||||
limit=3,
|
||||
weaviate_cluster_url="https://your-weaviate-cluster-url.com",
|
||||
weaviate_api_key="your-weaviate-api-key",
|
||||
)
|
||||
|
||||
@agent
|
||||
def search_agent(self) -> Agent:
|
||||
'''
|
||||
This agent uses the WeaviateVectorSearchTool to search for
|
||||
semantically similar documents in a Weaviate vector database.
|
||||
'''
|
||||
return Agent(
|
||||
config=self.agents_config["search_agent"],
|
||||
tools=[tool]
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `WeaviateVectorSearchTool` accepts the following parameters:
|
||||
|
||||
- **collection_name**: Required. The name of the collection to search within.
|
||||
- **weaviate_cluster_url**: Required. The URL of the Weaviate cluster.
|
||||
- **weaviate_api_key**: Required. The API key for the Weaviate cluster.
|
||||
- **limit**: Optional. The number of results to return. Default is `3`.
|
||||
- **vectorizer**: Optional. The vectorizer to use. If not provided, it will use `text2vec_openai` with the `nomic-embed-text` model.
|
||||
- **generative_model**: Optional. The generative model to use. If not provided, it will use OpenAI's `gpt-4o`.
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
You can customize the vectorizer and generative model used by the tool:
|
||||
|
||||
```python Code
|
||||
from crewai_tools import WeaviateVectorSearchTool
|
||||
from weaviate.classes.config import Configure
|
||||
|
||||
# Setup custom model for vectorizer and generative model
|
||||
tool = WeaviateVectorSearchTool(
|
||||
collection_name='example_collections',
|
||||
limit=3,
|
||||
vectorizer=Configure.Vectorizer.text2vec_openai(model="nomic-embed-text"),
|
||||
generative_model=Configure.Generative.openai(model="gpt-4o-mini"),
|
||||
weaviate_cluster_url="https://your-weaviate-cluster-url.com",
|
||||
weaviate_api_key="your-weaviate-api-key",
|
||||
)
|
||||
```
|
||||
|
||||
## Preloading Documents
|
||||
|
||||
You can preload your Weaviate database with documents before using the tool:
|
||||
|
||||
```python Code
|
||||
import os
|
||||
from crewai_tools import WeaviateVectorSearchTool
|
||||
import weaviate
|
||||
from weaviate.classes.init import Auth
|
||||
|
||||
# Connect to Weaviate
|
||||
client = weaviate.connect_to_weaviate_cloud(
|
||||
cluster_url="https://your-weaviate-cluster-url.com",
|
||||
auth_credentials=Auth.api_key("your-weaviate-api-key"),
|
||||
headers={"X-OpenAI-Api-Key": "your-openai-api-key"}
|
||||
)
|
||||
|
||||
# Get or create collection
|
||||
test_docs = client.collections.get("example_collections")
|
||||
if not test_docs:
|
||||
test_docs = client.collections.create(
|
||||
name="example_collections",
|
||||
vectorizer_config=Configure.Vectorizer.text2vec_openai(model="nomic-embed-text"),
|
||||
generative_config=Configure.Generative.openai(model="gpt-4o"),
|
||||
)
|
||||
|
||||
# Load documents
|
||||
docs_to_load = os.listdir("knowledge")
|
||||
with test_docs.batch.dynamic() as batch:
|
||||
for d in docs_to_load:
|
||||
with open(os.path.join("knowledge", d), "r") as f:
|
||||
content = f.read()
|
||||
batch.add_object(
|
||||
{
|
||||
"content": content,
|
||||
"year": d.split("_")[0],
|
||||
}
|
||||
)
|
||||
|
||||
# Initialize the tool
|
||||
tool = WeaviateVectorSearchTool(
|
||||
collection_name='example_collections',
|
||||
limit=3,
|
||||
weaviate_cluster_url="https://your-weaviate-cluster-url.com",
|
||||
weaviate_api_key="your-weaviate-api-key",
|
||||
)
|
||||
```
|
||||
|
||||
## Agent Integration Example
|
||||
|
||||
Here's how to integrate the `WeaviateVectorSearchTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent
|
||||
from crewai_tools import WeaviateVectorSearchTool
|
||||
|
||||
# Initialize the tool
|
||||
weaviate_tool = WeaviateVectorSearchTool(
|
||||
collection_name='example_collections',
|
||||
limit=3,
|
||||
weaviate_cluster_url="https://your-weaviate-cluster-url.com",
|
||||
weaviate_api_key="your-weaviate-api-key",
|
||||
)
|
||||
|
||||
# Create an agent with the tool
|
||||
rag_agent = Agent(
|
||||
name="rag_agent",
|
||||
role="You are a helpful assistant that can answer questions with the help of the WeaviateVectorSearchTool.",
|
||||
llm="gpt-4o-mini",
|
||||
tools=[weaviate_tool],
|
||||
)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `WeaviateVectorSearchTool` provides a powerful way to search for semantically similar documents in a Weaviate vector database. By leveraging vector embeddings, it enables more accurate and contextually relevant search results compared to traditional keyword-based searches. This tool is particularly useful for applications that require finding information based on meaning rather than exact matches.
|
||||
@@ -27,31 +27,73 @@ pip install 'crewai[tools]'
|
||||
|
||||
## Example
|
||||
|
||||
To begin using the YoutubeChannelSearchTool, follow the example below.
|
||||
This demonstrates initializing the tool with a specific Youtube channel handle and conducting a search within that channel's content.
|
||||
The following example demonstrates how to use the `YoutubeChannelSearchTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import YoutubeChannelSearchTool
|
||||
|
||||
# Initialize the tool to search within any Youtube channel's content the agent learns about during its execution
|
||||
tool = YoutubeChannelSearchTool()
|
||||
# Initialize the tool for general YouTube channel searches
|
||||
youtube_channel_tool = YoutubeChannelSearchTool()
|
||||
|
||||
# OR
|
||||
# Define an agent that uses the tool
|
||||
channel_researcher = Agent(
|
||||
role="Channel Researcher",
|
||||
goal="Extract relevant information from YouTube channels",
|
||||
backstory="An expert researcher who specializes in analyzing YouTube channel content.",
|
||||
tools=[youtube_channel_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Initialize the tool with a specific Youtube channel handle to target your search
|
||||
tool = YoutubeChannelSearchTool(youtube_channel_handle='@exampleChannel')
|
||||
# Example task to search for information in a specific channel
|
||||
research_task = Task(
|
||||
description="Search for information about machine learning tutorials in the YouTube channel {youtube_channel_handle}",
|
||||
expected_output="A summary of the key machine learning tutorials available on the channel.",
|
||||
agent=channel_researcher,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[channel_researcher], tasks=[research_task])
|
||||
result = crew.kickoff(inputs={"youtube_channel_handle": "@exampleChannel"})
|
||||
```
|
||||
|
||||
## Arguments
|
||||
You can also initialize the tool with a specific YouTube channel handle:
|
||||
|
||||
- `youtube_channel_handle` : A mandatory string representing the Youtube channel handle. This parameter is crucial for initializing the tool to specify the channel you want to search within. The tool is designed to only search within the content of the provided channel handle.
|
||||
```python Code
|
||||
# Initialize the tool with a specific YouTube channel handle
|
||||
youtube_channel_tool = YoutubeChannelSearchTool(
|
||||
youtube_channel_handle='@exampleChannel'
|
||||
)
|
||||
|
||||
## Custom model and embeddings
|
||||
# Define an agent that uses the tool
|
||||
channel_researcher = Agent(
|
||||
role="Channel Researcher",
|
||||
goal="Extract relevant information from a specific YouTube channel",
|
||||
backstory="An expert researcher who specializes in analyzing YouTube channel content.",
|
||||
tools=[youtube_channel_tool],
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
## Parameters
|
||||
|
||||
The `YoutubeChannelSearchTool` accepts the following parameters:
|
||||
|
||||
- **youtube_channel_handle**: Optional. The handle of the YouTube channel to search within. If provided during initialization, the agent won't need to specify it when using the tool. If the handle doesn't start with '@', it will be automatically added.
|
||||
- **config**: Optional. Configuration for the underlying RAG system, including LLM and embedder settings.
|
||||
- **summarize**: Optional. Whether to summarize the retrieved content. Default is `False`.
|
||||
|
||||
When using the tool with an agent, the agent will need to provide:
|
||||
|
||||
- **search_query**: Required. The search query to find relevant information in the channel content.
|
||||
- **youtube_channel_handle**: Required only if not provided during initialization. The handle of the YouTube channel to search within.
|
||||
|
||||
## Custom Model and Embeddings
|
||||
|
||||
By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows:
|
||||
|
||||
```python Code
|
||||
tool = YoutubeChannelSearchTool(
|
||||
```python Code
|
||||
youtube_channel_tool = YoutubeChannelSearchTool(
|
||||
config=dict(
|
||||
llm=dict(
|
||||
provider="ollama", # or google, openai, anthropic, llama2, ...
|
||||
@@ -72,4 +114,81 @@ tool = YoutubeChannelSearchTool(
|
||||
),
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
|
||||
## Agent Integration Example
|
||||
|
||||
Here's a more detailed example of how to integrate the `YoutubeChannelSearchTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import YoutubeChannelSearchTool
|
||||
|
||||
# Initialize the tool
|
||||
youtube_channel_tool = YoutubeChannelSearchTool()
|
||||
|
||||
# Define an agent that uses the tool
|
||||
channel_researcher = Agent(
|
||||
role="Channel Researcher",
|
||||
goal="Extract and analyze information from YouTube channels",
|
||||
backstory="""You are an expert channel researcher who specializes in extracting
|
||||
and analyzing information from YouTube channels. You have a keen eye for detail
|
||||
and can quickly identify key points and insights from video content across an entire channel.""",
|
||||
tools=[youtube_channel_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent
|
||||
research_task = Task(
|
||||
description="""
|
||||
Search for information about data science projects and tutorials
|
||||
in the YouTube channel {youtube_channel_handle}.
|
||||
|
||||
Focus on:
|
||||
1. Key data science techniques covered
|
||||
2. Popular tutorial series
|
||||
3. Most viewed or recommended videos
|
||||
|
||||
Provide a comprehensive summary of these points.
|
||||
""",
|
||||
expected_output="A detailed summary of data science content available on the channel.",
|
||||
agent=channel_researcher,
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(agents=[channel_researcher], tasks=[research_task])
|
||||
result = crew.kickoff(inputs={"youtube_channel_handle": "@exampleDataScienceChannel"})
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `YoutubeChannelSearchTool` is implemented as a subclass of `RagTool`, which provides the base functionality for Retrieval-Augmented Generation:
|
||||
|
||||
```python Code
|
||||
class YoutubeChannelSearchTool(RagTool):
|
||||
name: str = "Search a Youtube Channels content"
|
||||
description: str = "A tool that can be used to semantic search a query from a Youtube Channels content."
|
||||
args_schema: Type[BaseModel] = YoutubeChannelSearchToolSchema
|
||||
|
||||
def __init__(self, youtube_channel_handle: Optional[str] = None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
if youtube_channel_handle is not None:
|
||||
kwargs["data_type"] = DataType.YOUTUBE_CHANNEL
|
||||
self.add(youtube_channel_handle)
|
||||
self.description = f"A tool that can be used to semantic search a query the {youtube_channel_handle} Youtube Channels content."
|
||||
self.args_schema = FixedYoutubeChannelSearchToolSchema
|
||||
self._generate_description()
|
||||
|
||||
def add(
|
||||
self,
|
||||
youtube_channel_handle: str,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
if not youtube_channel_handle.startswith("@"):
|
||||
youtube_channel_handle = f"@{youtube_channel_handle}"
|
||||
super().add(youtube_channel_handle, **kwargs)
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `YoutubeChannelSearchTool` provides a powerful way to search and extract information from YouTube channel content using RAG techniques. By enabling agents to search across an entire channel's videos, it facilitates information extraction and analysis tasks that would otherwise be difficult to perform. This tool is particularly useful for research, content analysis, and knowledge extraction from YouTube channels.
|
||||
@@ -29,35 +29,73 @@ pip install 'crewai[tools]'
|
||||
|
||||
## Example
|
||||
|
||||
To integrate the YoutubeVideoSearchTool into your Python projects, follow the example below.
|
||||
This demonstrates how to use the tool both for general Youtube content searches and for targeted searches within a specific video's content.
|
||||
The following example demonstrates how to use the `YoutubeVideoSearchTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import YoutubeVideoSearchTool
|
||||
|
||||
# General search across Youtube content without specifying a video URL,
|
||||
# so the agent can search within any Youtube video content
|
||||
# it learns about its url during its operation
|
||||
tool = YoutubeVideoSearchTool()
|
||||
# Initialize the tool for general YouTube video searches
|
||||
youtube_search_tool = YoutubeVideoSearchTool()
|
||||
|
||||
# Targeted search within a specific Youtube video's content
|
||||
tool = YoutubeVideoSearchTool(
|
||||
# Define an agent that uses the tool
|
||||
video_researcher = Agent(
|
||||
role="Video Researcher",
|
||||
goal="Extract relevant information from YouTube videos",
|
||||
backstory="An expert researcher who specializes in analyzing video content.",
|
||||
tools=[youtube_search_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Example task to search for information in a specific video
|
||||
research_task = Task(
|
||||
description="Search for information about machine learning frameworks in the YouTube video at {youtube_video_url}",
|
||||
expected_output="A summary of the key machine learning frameworks mentioned in the video.",
|
||||
agent=video_researcher,
|
||||
)
|
||||
|
||||
# Create and run the crew
|
||||
crew = Crew(agents=[video_researcher], tasks=[research_task])
|
||||
result = crew.kickoff(inputs={"youtube_video_url": "https://youtube.com/watch?v=example"})
|
||||
```
|
||||
|
||||
You can also initialize the tool with a specific YouTube video URL:
|
||||
|
||||
```python Code
|
||||
# Initialize the tool with a specific YouTube video URL
|
||||
youtube_search_tool = YoutubeVideoSearchTool(
|
||||
youtube_video_url='https://youtube.com/watch?v=example'
|
||||
)
|
||||
|
||||
# Define an agent that uses the tool
|
||||
video_researcher = Agent(
|
||||
role="Video Researcher",
|
||||
goal="Extract relevant information from a specific YouTube video",
|
||||
backstory="An expert researcher who specializes in analyzing video content.",
|
||||
tools=[youtube_search_tool],
|
||||
verbose=True,
|
||||
)
|
||||
```
|
||||
|
||||
## Arguments
|
||||
## Parameters
|
||||
|
||||
The YoutubeVideoSearchTool accepts the following initialization arguments:
|
||||
The `YoutubeVideoSearchTool` accepts the following parameters:
|
||||
|
||||
- `youtube_video_url`: An optional argument at initialization but required if targeting a specific Youtube video. It specifies the Youtube video URL path you want to search within.
|
||||
- **youtube_video_url**: Optional. The URL of the YouTube video to search within. If provided during initialization, the agent won't need to specify it when using the tool.
|
||||
- **config**: Optional. Configuration for the underlying RAG system, including LLM and embedder settings.
|
||||
- **summarize**: Optional. Whether to summarize the retrieved content. Default is `False`.
|
||||
|
||||
## Custom model and embeddings
|
||||
When using the tool with an agent, the agent will need to provide:
|
||||
|
||||
- **search_query**: Required. The search query to find relevant information in the video content.
|
||||
- **youtube_video_url**: Required only if not provided during initialization. The URL of the YouTube video to search within.
|
||||
|
||||
## Custom Model and Embeddings
|
||||
|
||||
By default, the tool uses OpenAI for both embeddings and summarization. To customize the model, you can use a config dictionary as follows:
|
||||
|
||||
```python Code
|
||||
tool = YoutubeVideoSearchTool(
|
||||
youtube_search_tool = YoutubeVideoSearchTool(
|
||||
config=dict(
|
||||
llm=dict(
|
||||
provider="ollama", # or google, openai, anthropic, llama2, ...
|
||||
@@ -78,4 +116,72 @@ tool = YoutubeVideoSearchTool(
|
||||
),
|
||||
)
|
||||
)
|
||||
```
|
||||
```
|
||||
|
||||
## Agent Integration Example
|
||||
|
||||
Here's a more detailed example of how to integrate the `YoutubeVideoSearchTool` with a CrewAI agent:
|
||||
|
||||
```python Code
|
||||
from crewai import Agent, Task, Crew
|
||||
from crewai_tools import YoutubeVideoSearchTool
|
||||
|
||||
# Initialize the tool
|
||||
youtube_search_tool = YoutubeVideoSearchTool()
|
||||
|
||||
# Define an agent that uses the tool
|
||||
video_researcher = Agent(
|
||||
role="Video Researcher",
|
||||
goal="Extract and analyze information from YouTube videos",
|
||||
backstory="""You are an expert video researcher who specializes in extracting
|
||||
and analyzing information from YouTube videos. You have a keen eye for detail
|
||||
and can quickly identify key points and insights from video content.""",
|
||||
tools=[youtube_search_tool],
|
||||
verbose=True,
|
||||
)
|
||||
|
||||
# Create a task for the agent
|
||||
research_task = Task(
|
||||
description="""
|
||||
Search for information about recent advancements in artificial intelligence
|
||||
in the YouTube video at {youtube_video_url}.
|
||||
|
||||
Focus on:
|
||||
1. Key AI technologies mentioned
|
||||
2. Real-world applications discussed
|
||||
3. Future predictions made by the speaker
|
||||
|
||||
Provide a comprehensive summary of these points.
|
||||
""",
|
||||
expected_output="A detailed summary of AI advancements, applications, and future predictions from the video.",
|
||||
agent=video_researcher,
|
||||
)
|
||||
|
||||
# Run the task
|
||||
crew = Crew(agents=[video_researcher], tasks=[research_task])
|
||||
result = crew.kickoff(inputs={"youtube_video_url": "https://youtube.com/watch?v=example"})
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The `YoutubeVideoSearchTool` is implemented as a subclass of `RagTool`, which provides the base functionality for Retrieval-Augmented Generation:
|
||||
|
||||
```python Code
|
||||
class YoutubeVideoSearchTool(RagTool):
|
||||
name: str = "Search a Youtube Video content"
|
||||
description: str = "A tool that can be used to semantic search a query from a Youtube Video content."
|
||||
args_schema: Type[BaseModel] = YoutubeVideoSearchToolSchema
|
||||
|
||||
def __init__(self, youtube_video_url: Optional[str] = None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
if youtube_video_url is not None:
|
||||
kwargs["data_type"] = DataType.YOUTUBE_VIDEO
|
||||
self.add(youtube_video_url)
|
||||
self.description = f"A tool that can be used to semantic search a query the {youtube_video_url} Youtube Video content."
|
||||
self.args_schema = FixedYoutubeVideoSearchToolSchema
|
||||
self._generate_description()
|
||||
```
|
||||
|
||||
## Conclusion
|
||||
|
||||
The `YoutubeVideoSearchTool` provides a powerful way to search and extract information from YouTube video content using RAG techniques. By enabling agents to search within video content, it facilitates information extraction and analysis tasks that would otherwise be difficult to perform. This tool is particularly useful for research, content analysis, and knowledge extraction from video sources.
|
||||
@@ -152,6 +152,7 @@ nav:
|
||||
- Agent Monitoring with AgentOps: 'how-to/AgentOps-Observability.md'
|
||||
- Agent Monitoring with LangTrace: 'how-to/Langtrace-Observability.md'
|
||||
- Agent Monitoring with OpenLIT: 'how-to/openlit-Observability.md'
|
||||
- Agent Monitoring with MLflow: 'how-to/mlflow-Observability.md'
|
||||
- Tools Docs:
|
||||
- Browserbase Web Loader: 'tools/BrowserbaseLoadTool.md'
|
||||
- Code Docs RAG Search: 'tools/CodeDocsSearchTool.md'
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "crewai"
|
||||
version = "0.95.0"
|
||||
version = "0.102.0"
|
||||
description = "Cutting-edge framework for orchestrating role-playing, autonomous AI agents. By fostering collaborative intelligence, CrewAI empowers agents to work together seamlessly, tackling complex tasks."
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
@@ -11,27 +11,22 @@ dependencies = [
|
||||
# Core Dependencies
|
||||
"pydantic>=2.4.2",
|
||||
"openai>=1.13.3",
|
||||
"litellm==1.57.4",
|
||||
"litellm==1.60.2",
|
||||
"instructor>=1.3.3",
|
||||
|
||||
# Text Processing
|
||||
"pdfplumber>=0.11.4",
|
||||
"regex>=2024.9.11",
|
||||
|
||||
# Telemetry and Monitoring
|
||||
"opentelemetry-api>=1.22.0",
|
||||
"opentelemetry-sdk>=1.22.0",
|
||||
"opentelemetry-exporter-otlp-proto-http>=1.22.0",
|
||||
|
||||
# Data Handling
|
||||
"chromadb>=0.5.23",
|
||||
"openpyxl>=3.1.5",
|
||||
"pyvis>=0.3.2",
|
||||
|
||||
# Authentication and Security
|
||||
"auth0-python>=4.7.1",
|
||||
"python-dotenv>=1.0.0",
|
||||
|
||||
# Configuration and Utils
|
||||
"click>=8.1.7",
|
||||
"appdirs>=1.4.4",
|
||||
@@ -40,7 +35,8 @@ dependencies = [
|
||||
"uv>=0.4.25",
|
||||
"tomli-w>=1.1.0",
|
||||
"tomli>=2.0.2",
|
||||
"blinker>=1.9.0"
|
||||
"blinker>=1.9.0",
|
||||
"json5>=0.10.0",
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
@@ -49,7 +45,7 @@ Documentation = "https://docs.crewai.com"
|
||||
Repository = "https://github.com/crewAIInc/crewAI"
|
||||
|
||||
[project.optional-dependencies]
|
||||
tools = ["crewai-tools>=0.25.5"]
|
||||
tools = ["crewai-tools>=0.36.0"]
|
||||
embeddings = [
|
||||
"tiktoken~=0.7.0"
|
||||
]
|
||||
|
||||
@@ -14,7 +14,7 @@ warnings.filterwarnings(
|
||||
category=UserWarning,
|
||||
module="pydantic.main",
|
||||
)
|
||||
__version__ = "0.95.0"
|
||||
__version__ = "0.102.0"
|
||||
__all__ = [
|
||||
"Agent",
|
||||
"Crew",
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
from typing import Any, Dict, List, Literal, Optional, Union
|
||||
from typing import Any, Dict, List, Literal, Optional, Sequence, Union
|
||||
|
||||
from pydantic import Field, InstanceOf, PrivateAttr, model_validator
|
||||
|
||||
from crewai.agents import CacheHandler
|
||||
from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.agents.crew_agent_executor import CrewAgentExecutor
|
||||
from crewai.cli.constants import ENV_VARS, LITELLM_PARAMS
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
|
||||
@@ -17,29 +16,20 @@ from crewai.memory.contextual.contextual_memory import ContextualMemory
|
||||
from crewai.task import Task
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.utilities import Converter, Prompts
|
||||
from crewai.utilities.constants import TRAINED_AGENTS_DATA_FILE, TRAINING_DATA_FILE
|
||||
from crewai.utilities.converter import generate_model_description
|
||||
from crewai.utilities.events.agent_events import (
|
||||
AgentExecutionCompletedEvent,
|
||||
AgentExecutionErrorEvent,
|
||||
AgentExecutionStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
from crewai.utilities.token_counter_callback import TokenCalcHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
agentops = None
|
||||
|
||||
try:
|
||||
import agentops # type: ignore # Name "agentops" is already defined
|
||||
from agentops import track_agent # type: ignore
|
||||
except ImportError:
|
||||
|
||||
def track_agent():
|
||||
def noop(f):
|
||||
return f
|
||||
|
||||
return noop
|
||||
|
||||
|
||||
@track_agent()
|
||||
class Agent(BaseAgent):
|
||||
"""Represents an agent in a system.
|
||||
|
||||
@@ -56,13 +46,13 @@ class Agent(BaseAgent):
|
||||
llm: The language model that will run the agent.
|
||||
function_calling_llm: The language model that will handle the tool calling for this agent, it overrides the crew function_calling_llm.
|
||||
max_iter: Maximum number of iterations for an agent to execute a task.
|
||||
memory: Whether the agent should have memory or not.
|
||||
max_rpm: Maximum number of requests per minute for the agent execution to be respected.
|
||||
verbose: Whether the agent execution should be in verbose mode.
|
||||
allow_delegation: Whether the agent is allowed to delegate tasks to other agents.
|
||||
tools: Tools at agents disposal
|
||||
step_callback: Callback to be executed after each step of the agent execution.
|
||||
knowledge_sources: Knowledge sources for the agent.
|
||||
embedder: Embedder configuration for the agent.
|
||||
"""
|
||||
|
||||
_times_executed: int = PrivateAttr(default=0)
|
||||
@@ -72,9 +62,6 @@ class Agent(BaseAgent):
|
||||
)
|
||||
agent_ops_agent_name: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||
agent_ops_agent_id: str = None # type: ignore # Incompatible types in assignment (expression has type "None", variable has type "str")
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
default=None, description="An instance of the CacheHandler class."
|
||||
)
|
||||
step_callback: Optional[Any] = Field(
|
||||
default=None,
|
||||
description="Callback to be executed after each step of the agent execution.",
|
||||
@@ -108,10 +95,6 @@ class Agent(BaseAgent):
|
||||
default=True,
|
||||
description="Keep messages under the context window size by summarizing content.",
|
||||
)
|
||||
max_iter: int = Field(
|
||||
default=20,
|
||||
description="Maximum number of iterations for an agent to execute a task before giving it's best answer",
|
||||
)
|
||||
max_retry_limit: int = Field(
|
||||
default=2,
|
||||
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||
@@ -124,21 +107,13 @@ class Agent(BaseAgent):
|
||||
default="safe",
|
||||
description="Mode for code execution: 'safe' (using Docker) or 'unsafe' (direct execution).",
|
||||
)
|
||||
embedder_config: Optional[Dict[str, Any]] = Field(
|
||||
embedder: Optional[Dict[str, Any]] = Field(
|
||||
default=None,
|
||||
description="Embedder configuration for the agent.",
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
default=None,
|
||||
)
|
||||
|
||||
@model_validator(mode="after")
|
||||
def post_init_setup(self):
|
||||
self._set_knowledge()
|
||||
self.agent_ops_agent_name = self.role
|
||||
|
||||
self.llm = create_llm(self.llm)
|
||||
@@ -158,17 +133,22 @@ class Agent(BaseAgent):
|
||||
self.cache_handler = CacheHandler()
|
||||
self.set_cache_handler(self.cache_handler)
|
||||
|
||||
def _set_knowledge(self):
|
||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||
try:
|
||||
if self.embedder is None and crew_embedder:
|
||||
self.embedder = crew_embedder
|
||||
|
||||
if self.knowledge_sources:
|
||||
knowledge_agent_name = f"{self.role.replace(' ', '_')}"
|
||||
full_pattern = re.compile(r"[^a-zA-Z0-9\-_\r\n]|(\.\.)")
|
||||
knowledge_agent_name = f"{re.sub(full_pattern, '_', self.role)}"
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
):
|
||||
self._knowledge = Knowledge(
|
||||
self.knowledge = Knowledge(
|
||||
sources=self.knowledge_sources,
|
||||
embedder_config=self.embedder_config,
|
||||
embedder=self.embedder,
|
||||
collection_name=knowledge_agent_name,
|
||||
storage=self.knowledge_storage or None,
|
||||
)
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Invalid Knowledge Configuration: {str(e)}")
|
||||
@@ -202,13 +182,15 @@ class Agent(BaseAgent):
|
||||
if task.output_json:
|
||||
# schema = json.dumps(task.output_json, indent=2)
|
||||
schema = generate_model_description(task.output_json)
|
||||
task_prompt += "\n" + self.i18n.slice(
|
||||
"formatted_task_instructions"
|
||||
).format(output_format=schema)
|
||||
|
||||
elif task.output_pydantic:
|
||||
schema = generate_model_description(task.output_pydantic)
|
||||
|
||||
task_prompt += "\n" + self.i18n.slice("formatted_task_instructions").format(
|
||||
output_format=schema
|
||||
)
|
||||
task_prompt += "\n" + self.i18n.slice(
|
||||
"formatted_task_instructions"
|
||||
).format(output_format=schema)
|
||||
|
||||
if context:
|
||||
task_prompt = self.i18n.slice("task_with_context").format(
|
||||
@@ -227,8 +209,8 @@ class Agent(BaseAgent):
|
||||
if memory.strip() != "":
|
||||
task_prompt += self.i18n.slice("memory").format(memory=memory)
|
||||
|
||||
if self._knowledge:
|
||||
agent_knowledge_snippets = self._knowledge.query([task.prompt()])
|
||||
if self.knowledge:
|
||||
agent_knowledge_snippets = self.knowledge.query([task.prompt()])
|
||||
if agent_knowledge_snippets:
|
||||
agent_knowledge_context = extract_knowledge_context(
|
||||
agent_knowledge_snippets
|
||||
@@ -252,6 +234,15 @@ class Agent(BaseAgent):
|
||||
task_prompt = self._use_trained_data(task_prompt=task_prompt)
|
||||
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionStartedEvent(
|
||||
agent=self,
|
||||
tools=self.tools,
|
||||
task_prompt=task_prompt,
|
||||
task=task,
|
||||
),
|
||||
)
|
||||
result = self.agent_executor.invoke(
|
||||
{
|
||||
"input": task_prompt,
|
||||
@@ -261,8 +252,27 @@ class Agent(BaseAgent):
|
||||
}
|
||||
)["output"]
|
||||
except Exception as e:
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
# Do not retry on litellm errors
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionErrorEvent(
|
||||
agent=self,
|
||||
task=task,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise e
|
||||
self._times_executed += 1
|
||||
if self._times_executed > self.max_retry_limit:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionErrorEvent(
|
||||
agent=self,
|
||||
task=task,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise e
|
||||
result = self.execute_task(task, context, tools)
|
||||
|
||||
@@ -275,7 +285,10 @@ class Agent(BaseAgent):
|
||||
for tool_result in self.tools_results: # type: ignore # Item "None" of "list[Any] | None" has no attribute "__iter__" (not iterable)
|
||||
if tool_result.get("result_as_answer", False):
|
||||
result = tool_result["result"]
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=AgentExecutionCompletedEvent(agent=self, task=task, output=result),
|
||||
)
|
||||
return result
|
||||
|
||||
def create_agent_executor(
|
||||
@@ -333,14 +346,14 @@ class Agent(BaseAgent):
|
||||
tools = agent_tools.tools()
|
||||
return tools
|
||||
|
||||
def get_multimodal_tools(self) -> List[Tool]:
|
||||
def get_multimodal_tools(self) -> Sequence[BaseTool]:
|
||||
from crewai.tools.agent_tools.add_image_tool import AddImageTool
|
||||
|
||||
return [AddImageTool()]
|
||||
|
||||
def get_code_execution_tools(self):
|
||||
try:
|
||||
from crewai_tools import CodeInterpreterTool
|
||||
from crewai_tools import CodeInterpreterTool # type: ignore
|
||||
|
||||
# Set the unsafe_mode based on the code_execution_mode attribute
|
||||
unsafe_mode = self.code_execution_mode == "unsafe"
|
||||
|
||||
@@ -18,10 +18,12 @@ from pydantic_core import PydanticCustomError
|
||||
from crewai.agents.agent_builder.utilities.base_token_process import TokenProcess
|
||||
from crewai.agents.cache.cache_handler import CacheHandler
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.tools import BaseTool
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.knowledge.knowledge import Knowledge
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.tools.base_tool import BaseTool, Tool
|
||||
from crewai.utilities import I18N, Logger, RPMController
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter
|
||||
|
||||
T = TypeVar("T", bound="BaseAgent")
|
||||
|
||||
@@ -40,7 +42,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
max_rpm (Optional[int]): Maximum number of requests per minute for the agent execution.
|
||||
allow_delegation (bool): Allow delegation of tasks to agents.
|
||||
tools (Optional[List[Any]]): Tools at the agent's disposal.
|
||||
max_iter (Optional[int]): Maximum iterations for an agent to execute a task.
|
||||
max_iter (int): Maximum iterations for an agent to execute a task.
|
||||
agent_executor (InstanceOf): An instance of the CrewAgentExecutor class.
|
||||
llm (Any): Language model that will run the agent.
|
||||
crew (Any): Crew to which the agent belongs.
|
||||
@@ -48,6 +50,8 @@ class BaseAgent(ABC, BaseModel):
|
||||
cache_handler (InstanceOf[CacheHandler]): An instance of the CacheHandler class.
|
||||
tools_handler (InstanceOf[ToolsHandler]): An instance of the ToolsHandler class.
|
||||
max_tokens: Maximum number of tokens for the agent to generate in a response.
|
||||
knowledge_sources: Knowledge sources for the agent.
|
||||
knowledge_storage: Custom knowledge storage for the agent.
|
||||
|
||||
|
||||
Methods:
|
||||
@@ -107,10 +111,10 @@ class BaseAgent(ABC, BaseModel):
|
||||
default=False,
|
||||
description="Enable agent to delegate and ask questions among each other.",
|
||||
)
|
||||
tools: Optional[List[Any]] = Field(
|
||||
tools: Optional[List[BaseTool]] = Field(
|
||||
default_factory=list, description="Tools at agents' disposal"
|
||||
)
|
||||
max_iter: Optional[int] = Field(
|
||||
max_iter: int = Field(
|
||||
default=25, description="Maximum iterations for an agent to execute a task"
|
||||
)
|
||||
agent_executor: InstanceOf = Field(
|
||||
@@ -121,15 +125,27 @@ class BaseAgent(ABC, BaseModel):
|
||||
)
|
||||
crew: Any = Field(default=None, description="Crew to which the agent belongs.")
|
||||
i18n: I18N = Field(default=I18N(), description="Internationalization settings.")
|
||||
cache_handler: InstanceOf[CacheHandler] = Field(
|
||||
cache_handler: Optional[InstanceOf[CacheHandler]] = Field(
|
||||
default=None, description="An instance of the CacheHandler class."
|
||||
)
|
||||
tools_handler: InstanceOf[ToolsHandler] = Field(
|
||||
default=None, description="An instance of the ToolsHandler class."
|
||||
default_factory=ToolsHandler,
|
||||
description="An instance of the ToolsHandler class.",
|
||||
)
|
||||
max_tokens: Optional[int] = Field(
|
||||
default=None, description="Maximum number of tokens for the agent's execution."
|
||||
)
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
default=None, description="Knowledge for the agent."
|
||||
)
|
||||
knowledge_sources: Optional[List[BaseKnowledgeSource]] = Field(
|
||||
default=None,
|
||||
description="Knowledge sources for the agent.",
|
||||
)
|
||||
knowledge_storage: Optional[Any] = Field(
|
||||
default=None,
|
||||
description="Custom knowledge storage for the agent.",
|
||||
)
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
@@ -239,7 +255,7 @@ class BaseAgent(ABC, BaseModel):
|
||||
@abstractmethod
|
||||
def get_output_converter(
|
||||
self, llm: Any, text: str, model: type[BaseModel] | None, instructions: str
|
||||
):
|
||||
) -> Converter:
|
||||
"""Get the converter class for the agent to create json/pydantic outputs."""
|
||||
pass
|
||||
|
||||
@@ -256,13 +272,44 @@ class BaseAgent(ABC, BaseModel):
|
||||
"tools_handler",
|
||||
"cache_handler",
|
||||
"llm",
|
||||
"knowledge_sources",
|
||||
"knowledge_storage",
|
||||
"knowledge",
|
||||
}
|
||||
|
||||
# Copy llm and clear callbacks
|
||||
# Copy llm
|
||||
existing_llm = shallow_copy(self.llm)
|
||||
copied_knowledge = shallow_copy(self.knowledge)
|
||||
copied_knowledge_storage = shallow_copy(self.knowledge_storage)
|
||||
# Properly copy knowledge sources if they exist
|
||||
existing_knowledge_sources = None
|
||||
if self.knowledge_sources:
|
||||
# Create a shared storage instance for all knowledge sources
|
||||
shared_storage = (
|
||||
self.knowledge_sources[0].storage if self.knowledge_sources else None
|
||||
)
|
||||
|
||||
existing_knowledge_sources = []
|
||||
for source in self.knowledge_sources:
|
||||
copied_source = (
|
||||
source.model_copy()
|
||||
if hasattr(source, "model_copy")
|
||||
else shallow_copy(source)
|
||||
)
|
||||
# Ensure all copied sources use the same storage instance
|
||||
copied_source.storage = shared_storage
|
||||
existing_knowledge_sources.append(copied_source)
|
||||
|
||||
copied_data = self.model_dump(exclude=exclude)
|
||||
copied_data = {k: v for k, v in copied_data.items() if v is not None}
|
||||
copied_agent = type(self)(**copied_data, llm=existing_llm, tools=self.tools)
|
||||
copied_agent = type(self)(
|
||||
**copied_data,
|
||||
llm=existing_llm,
|
||||
tools=self.tools,
|
||||
knowledge_sources=existing_knowledge_sources,
|
||||
knowledge=copied_knowledge,
|
||||
knowledge_storage=copied_knowledge_storage,
|
||||
)
|
||||
|
||||
return copied_agent
|
||||
|
||||
@@ -304,3 +351,6 @@ class BaseAgent(ABC, BaseModel):
|
||||
if not self._rpm_controller:
|
||||
self._rpm_controller = rpm_controller
|
||||
self.create_agent_executor()
|
||||
|
||||
def set_knowledge(self, crew_embedder: Optional[Dict[str, Any]] = None):
|
||||
pass
|
||||
|
||||
@@ -95,18 +95,34 @@ class CrewAgentExecutorMixin:
|
||||
pass
|
||||
|
||||
def _ask_human_input(self, final_answer: str) -> str:
|
||||
"""Prompt human input for final decision making."""
|
||||
"""Prompt human input with mode-appropriate messaging."""
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m ## Final Result:\033[00m \033[92m{final_answer}\033[00m"
|
||||
)
|
||||
|
||||
self._printer.print(
|
||||
content=(
|
||||
# Training mode prompt (single iteration)
|
||||
if self.crew and getattr(self.crew, "_train", False):
|
||||
prompt = (
|
||||
"\n\n=====\n"
|
||||
"## Please provide feedback on the Final Result and the Agent's actions. "
|
||||
"Respond with 'looks good' or a similar phrase when you're satisfied.\n"
|
||||
"## TRAINING MODE: Provide feedback to improve the agent's performance.\n"
|
||||
"This will be used to train better versions of the agent.\n"
|
||||
"Please provide detailed feedback about the result quality and reasoning process.\n"
|
||||
"=====\n"
|
||||
),
|
||||
color="bold_yellow",
|
||||
)
|
||||
return input()
|
||||
)
|
||||
# Regular human-in-the-loop prompt (multiple iterations)
|
||||
else:
|
||||
prompt = (
|
||||
"\n\n=====\n"
|
||||
"## HUMAN FEEDBACK: Provide feedback on the Final Result and Agent's actions.\n"
|
||||
"Please follow these guidelines:\n"
|
||||
" - If you are happy with the result, simply hit Enter without typing anything.\n"
|
||||
" - Otherwise, provide specific improvement requests.\n"
|
||||
" - You can provide multiple rounds of feedback until satisfied.\n"
|
||||
"=====\n"
|
||||
)
|
||||
|
||||
self._printer.print(content=prompt, color="bold_yellow")
|
||||
response = input()
|
||||
if response.strip() != "":
|
||||
self._printer.print(content="\nProcessing your feedback...", color="cyan")
|
||||
return response
|
||||
|
||||
@@ -25,17 +25,17 @@ class OutputConverter(BaseModel, ABC):
|
||||
llm: Any = Field(description="The language model to be used to convert the text.")
|
||||
model: Any = Field(description="The model to be used to convert the text.")
|
||||
instructions: str = Field(description="Conversion instructions to the LLM.")
|
||||
max_attempts: Optional[int] = Field(
|
||||
max_attempts: int = Field(
|
||||
description="Max number of attempts to try to get the output formatted.",
|
||||
default=3,
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def to_pydantic(self, current_attempt=1):
|
||||
def to_pydantic(self, current_attempt=1) -> BaseModel:
|
||||
"""Convert text to pydantic."""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def to_json(self, current_attempt=1):
|
||||
def to_json(self, current_attempt=1) -> dict:
|
||||
"""Convert text to json."""
|
||||
pass
|
||||
|
||||
@@ -2,25 +2,26 @@ from crewai.types.usage_metrics import UsageMetrics
|
||||
|
||||
|
||||
class TokenProcess:
|
||||
total_tokens: int = 0
|
||||
prompt_tokens: int = 0
|
||||
cached_prompt_tokens: int = 0
|
||||
completion_tokens: int = 0
|
||||
successful_requests: int = 0
|
||||
def __init__(self) -> None:
|
||||
self.total_tokens: int = 0
|
||||
self.prompt_tokens: int = 0
|
||||
self.cached_prompt_tokens: int = 0
|
||||
self.completion_tokens: int = 0
|
||||
self.successful_requests: int = 0
|
||||
|
||||
def sum_prompt_tokens(self, tokens: int):
|
||||
self.prompt_tokens = self.prompt_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
def sum_prompt_tokens(self, tokens: int) -> None:
|
||||
self.prompt_tokens += tokens
|
||||
self.total_tokens += tokens
|
||||
|
||||
def sum_completion_tokens(self, tokens: int):
|
||||
self.completion_tokens = self.completion_tokens + tokens
|
||||
self.total_tokens = self.total_tokens + tokens
|
||||
def sum_completion_tokens(self, tokens: int) -> None:
|
||||
self.completion_tokens += tokens
|
||||
self.total_tokens += tokens
|
||||
|
||||
def sum_cached_prompt_tokens(self, tokens: int):
|
||||
self.cached_prompt_tokens = self.cached_prompt_tokens + tokens
|
||||
def sum_cached_prompt_tokens(self, tokens: int) -> None:
|
||||
self.cached_prompt_tokens += tokens
|
||||
|
||||
def sum_successful_requests(self, requests: int):
|
||||
self.successful_requests = self.successful_requests + requests
|
||||
def sum_successful_requests(self, requests: int) -> None:
|
||||
self.successful_requests += requests
|
||||
|
||||
def get_summary(self) -> UsageMetrics:
|
||||
return UsageMetrics(
|
||||
|
||||
@@ -13,10 +13,17 @@ from crewai.agents.parser import (
|
||||
OutputParserException,
|
||||
)
|
||||
from crewai.agents.tools_handler import ToolsHandler
|
||||
from crewai.llm import LLM
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.tools.tool_usage import ToolUsage, ToolUsageErrorException
|
||||
from crewai.utilities import I18N, Printer
|
||||
from crewai.utilities.constants import MAX_LLM_RETRY, TRAINING_DATA_FILE
|
||||
from crewai.utilities.events import (
|
||||
ToolUsageErrorEvent,
|
||||
ToolUsageStartedEvent,
|
||||
crewai_event_bus,
|
||||
)
|
||||
from crewai.utilities.events.tool_usage_events import ToolUsageStartedEvent
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
@@ -54,7 +61,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
callbacks: List[Any] = [],
|
||||
):
|
||||
self._i18n: I18N = I18N()
|
||||
self.llm = llm
|
||||
self.llm: LLM = llm
|
||||
self.task = task
|
||||
self.agent = agent
|
||||
self.crew = crew
|
||||
@@ -80,10 +87,8 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self.tool_name_to_tool_map: Dict[str, BaseTool] = {
|
||||
tool.name: tool for tool in self.tools
|
||||
}
|
||||
if self.llm.stop:
|
||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
||||
else:
|
||||
self.llm.stop = self.stop
|
||||
self.stop = stop_words
|
||||
self.llm.stop = list(set(self.llm.stop + self.stop))
|
||||
|
||||
def invoke(self, inputs: Dict[str, str]) -> Dict[str, Any]:
|
||||
if "system" in self.prompt:
|
||||
@@ -98,7 +103,22 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._show_start_logs()
|
||||
|
||||
self.ask_for_human_input = bool(inputs.get("ask_for_human_input", False))
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
try:
|
||||
formatted_answer = self._invoke_loop()
|
||||
except AssertionError:
|
||||
self._printer.print(
|
||||
content="Agent failed to reach a final answer. This is likely a bug - please report it.",
|
||||
color="red",
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
self._handle_unknown_error(e)
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
# Do not retry on litellm errors
|
||||
raise e
|
||||
else:
|
||||
raise e
|
||||
|
||||
if self.ask_for_human_input:
|
||||
formatted_answer = self._handle_human_feedback(formatted_answer)
|
||||
@@ -107,7 +127,7 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._create_long_term_memory(formatted_answer)
|
||||
return {"output": formatted_answer.output}
|
||||
|
||||
def _invoke_loop(self):
|
||||
def _invoke_loop(self) -> AgentFinish:
|
||||
"""
|
||||
Main loop to invoke the agent's thought process until it reaches a conclusion
|
||||
or the maximum number of iterations is reached.
|
||||
@@ -124,7 +144,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._enforce_rpm_limit()
|
||||
|
||||
answer = self._get_llm_response()
|
||||
|
||||
formatted_answer = self._process_llm_response(answer)
|
||||
|
||||
if isinstance(formatted_answer, AgentAction):
|
||||
@@ -142,13 +161,37 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
formatted_answer = self._handle_output_parser_exception(e)
|
||||
|
||||
except Exception as e:
|
||||
if e.__class__.__module__.startswith("litellm"):
|
||||
# Do not retry on litellm errors
|
||||
raise e
|
||||
if self._is_context_length_exceeded(e):
|
||||
self._handle_context_length()
|
||||
continue
|
||||
else:
|
||||
self._handle_unknown_error(e)
|
||||
raise e
|
||||
finally:
|
||||
self.iterations += 1
|
||||
|
||||
# During the invoke loop, formatted_answer alternates between AgentAction
|
||||
# (when the agent is using tools) and eventually becomes AgentFinish
|
||||
# (when the agent reaches a final answer). This assertion confirms we've
|
||||
# reached a final answer and helps type checking understand this transition.
|
||||
assert isinstance(formatted_answer, AgentFinish)
|
||||
self._show_logs(formatted_answer)
|
||||
return formatted_answer
|
||||
|
||||
def _handle_unknown_error(self, exception: Exception) -> None:
|
||||
"""Handle unknown errors by informing the user."""
|
||||
self._printer.print(
|
||||
content="An unknown error occurred. Please check the details below.",
|
||||
color="red",
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"Error details: {exception}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def _has_reached_max_iterations(self) -> bool:
|
||||
"""Check if the maximum number of iterations has been reached."""
|
||||
return self.iterations >= self.max_iter
|
||||
@@ -160,10 +203,17 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
|
||||
def _get_llm_response(self) -> str:
|
||||
"""Call the LLM and return the response, handling any invalid responses."""
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
try:
|
||||
answer = self.llm.call(
|
||||
self.messages,
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
except Exception as e:
|
||||
self._printer.print(
|
||||
content=f"Error during LLM call: {e}",
|
||||
color="red",
|
||||
)
|
||||
raise e
|
||||
|
||||
if not answer:
|
||||
self._printer.print(
|
||||
@@ -184,7 +234,6 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
if FINAL_ANSWER_AND_PARSABLE_ACTION_ERROR_MESSAGE in e.error:
|
||||
answer = answer.split("Observation:")[0].strip()
|
||||
|
||||
self.iterations += 1
|
||||
return self._format_answer(answer)
|
||||
|
||||
def _handle_agent_action(
|
||||
@@ -260,8 +309,11 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
self._printer.print(
|
||||
content=f"\033[1m\033[95m# Agent:\033[00m \033[1m\033[92m{agent_role}\033[00m"
|
||||
)
|
||||
description = (
|
||||
getattr(self.task, "description") if self.task else "Not Found"
|
||||
)
|
||||
self._printer.print(
|
||||
content=f"\033[95m## Task:\033[00m \033[92m{self.task.description}\033[00m"
|
||||
content=f"\033[95m## Task:\033[00m \033[92m{description}\033[00m"
|
||||
)
|
||||
|
||||
def _show_logs(self, formatted_answer: Union[AgentAction, AgentFinish]):
|
||||
@@ -303,40 +355,68 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
def _execute_tool_and_check_finality(self, agent_action: AgentAction) -> ToolResult:
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
original_tools=self.original_tools,
|
||||
tools_description=self.tools_description,
|
||||
tools_names=self.tools_names,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
task=self.task, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
action=agent_action,
|
||||
)
|
||||
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
tool_result = tool_calling.message
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
else:
|
||||
if tool_calling.tool_name.casefold().strip() in [
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
]:
|
||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
|
||||
if tool:
|
||||
return ToolResult(
|
||||
result=tool_result, result_as_answer=tool.result_as_answer
|
||||
)
|
||||
else:
|
||||
tool_result = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||
try:
|
||||
if self.agent:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageStartedEvent(
|
||||
agent_key=self.agent.key,
|
||||
agent_role=self.agent.role,
|
||||
tool_name=agent_action.tool,
|
||||
tool_args=agent_action.tool_input,
|
||||
tool_class=agent_action.tool,
|
||||
),
|
||||
)
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
tool_usage = ToolUsage(
|
||||
tools_handler=self.tools_handler,
|
||||
tools=self.tools,
|
||||
original_tools=self.original_tools,
|
||||
tools_description=self.tools_description,
|
||||
tools_names=self.tools_names,
|
||||
function_calling_llm=self.function_calling_llm,
|
||||
task=self.task, # type: ignore[arg-type]
|
||||
agent=self.agent,
|
||||
action=agent_action,
|
||||
)
|
||||
tool_calling = tool_usage.parse_tool_calling(agent_action.text)
|
||||
|
||||
if isinstance(tool_calling, ToolUsageErrorException):
|
||||
tool_result = tool_calling.message
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
else:
|
||||
if tool_calling.tool_name.casefold().strip() in [
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
] or tool_calling.tool_name.casefold().replace("_", " ") in [
|
||||
name.casefold().strip() for name in self.tool_name_to_tool_map
|
||||
]:
|
||||
tool_result = tool_usage.use(tool_calling, agent_action.text)
|
||||
tool = self.tool_name_to_tool_map.get(tool_calling.tool_name)
|
||||
if tool:
|
||||
return ToolResult(
|
||||
result=tool_result, result_as_answer=tool.result_as_answer
|
||||
)
|
||||
else:
|
||||
tool_result = self._i18n.errors("wrong_tool_name").format(
|
||||
tool=tool_calling.tool_name,
|
||||
tools=", ".join([tool.name.casefold() for tool in self.tools]),
|
||||
)
|
||||
return ToolResult(result=tool_result, result_as_answer=False)
|
||||
|
||||
except Exception as e:
|
||||
# TODO: drop
|
||||
if self.agent:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolUsageErrorEvent( # validation error
|
||||
agent_key=self.agent.key,
|
||||
agent_role=self.agent.role,
|
||||
tool_name=agent_action.tool,
|
||||
tool_args=agent_action.tool_input,
|
||||
tool_class=agent_action.tool,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
raise e
|
||||
|
||||
def _summarize_messages(self) -> None:
|
||||
messages_groups = []
|
||||
@@ -386,58 +466,50 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
)
|
||||
|
||||
def _handle_crew_training_output(
|
||||
self, result: AgentFinish, human_feedback: str | None = None
|
||||
self, result: AgentFinish, human_feedback: Optional[str] = None
|
||||
) -> None:
|
||||
"""Function to handle the process of the training data."""
|
||||
"""Handle the process of saving training data."""
|
||||
agent_id = str(self.agent.id) # type: ignore
|
||||
train_iteration = (
|
||||
getattr(self.crew, "_train_iteration", None) if self.crew else None
|
||||
)
|
||||
|
||||
if train_iteration is None or not isinstance(train_iteration, int):
|
||||
self._printer.print(
|
||||
content="Invalid or missing train iteration. Cannot save training data.",
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
# Load training data
|
||||
training_handler = CrewTrainingHandler(TRAINING_DATA_FILE)
|
||||
training_data = training_handler.load()
|
||||
training_data = training_handler.load() or {}
|
||||
|
||||
# Check if training data exists, human input is not requested, and self.crew is valid
|
||||
if training_data and not self.ask_for_human_input:
|
||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||
train_iteration = self.crew._train_iteration
|
||||
if agent_id in training_data and isinstance(train_iteration, int):
|
||||
training_data[agent_id][train_iteration][
|
||||
"improved_output"
|
||||
] = result.output
|
||||
training_handler.save(training_data)
|
||||
else:
|
||||
self._printer.print(
|
||||
content="Invalid train iteration type or agent_id not in training data.",
|
||||
color="red",
|
||||
)
|
||||
else:
|
||||
self._printer.print(
|
||||
content="Crew is None or does not have _train_iteration attribute.",
|
||||
color="red",
|
||||
)
|
||||
# Initialize or retrieve agent's training data
|
||||
agent_training_data = training_data.get(agent_id, {})
|
||||
|
||||
if self.ask_for_human_input and human_feedback is not None:
|
||||
training_data = {
|
||||
if human_feedback is not None:
|
||||
# Save initial output and human feedback
|
||||
agent_training_data[train_iteration] = {
|
||||
"initial_output": result.output,
|
||||
"human_feedback": human_feedback,
|
||||
"agent": agent_id,
|
||||
"agent_role": self.agent.role, # type: ignore
|
||||
}
|
||||
if self.crew is not None and hasattr(self.crew, "_train_iteration"):
|
||||
train_iteration = self.crew._train_iteration
|
||||
if isinstance(train_iteration, int):
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).append(
|
||||
train_iteration, agent_id, training_data
|
||||
)
|
||||
else:
|
||||
self._printer.print(
|
||||
content="Invalid train iteration type. Expected int.",
|
||||
color="red",
|
||||
)
|
||||
else:
|
||||
# Save improved output
|
||||
if train_iteration in agent_training_data:
|
||||
agent_training_data[train_iteration]["improved_output"] = result.output
|
||||
else:
|
||||
self._printer.print(
|
||||
content="Crew is None or does not have _train_iteration attribute.",
|
||||
content=(
|
||||
f"No existing training data for agent {agent_id} and iteration "
|
||||
f"{train_iteration}. Cannot save improved output."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
return
|
||||
|
||||
# Update the training data and save
|
||||
training_data[agent_id] = agent_training_data
|
||||
training_handler.save(training_data)
|
||||
|
||||
def _format_prompt(self, prompt: str, inputs: Dict[str, str]) -> str:
|
||||
prompt = prompt.replace("{input}", inputs["input"])
|
||||
@@ -453,82 +525,85 @@ class CrewAgentExecutor(CrewAgentExecutorMixin):
|
||||
return {"role": role, "content": prompt}
|
||||
|
||||
def _handle_human_feedback(self, formatted_answer: AgentFinish) -> AgentFinish:
|
||||
"""
|
||||
Handles the human feedback loop, allowing the user to provide feedback
|
||||
on the agent's output and determining if additional iterations are needed.
|
||||
"""Handle human feedback with different flows for training vs regular use.
|
||||
|
||||
Parameters:
|
||||
formatted_answer (AgentFinish): The initial output from the agent.
|
||||
Args:
|
||||
formatted_answer: The initial AgentFinish result to get feedback on
|
||||
|
||||
Returns:
|
||||
AgentFinish: The final output after incorporating human feedback.
|
||||
AgentFinish: The final answer after processing feedback
|
||||
"""
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
|
||||
if self._is_training_mode():
|
||||
return self._handle_training_feedback(formatted_answer, human_feedback)
|
||||
|
||||
return self._handle_regular_feedback(formatted_answer, human_feedback)
|
||||
|
||||
def _is_training_mode(self) -> bool:
|
||||
"""Check if crew is in training mode."""
|
||||
return bool(self.crew and self.crew._train)
|
||||
|
||||
def _handle_training_feedback(
|
||||
self, initial_answer: AgentFinish, feedback: str
|
||||
) -> AgentFinish:
|
||||
"""Process feedback for training scenarios with single iteration."""
|
||||
self._handle_crew_training_output(initial_answer, feedback)
|
||||
self.messages.append(
|
||||
self._format_msg(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
)
|
||||
improved_answer = self._invoke_loop()
|
||||
self._handle_crew_training_output(improved_answer)
|
||||
self.ask_for_human_input = False
|
||||
return improved_answer
|
||||
|
||||
def _handle_regular_feedback(
|
||||
self, current_answer: AgentFinish, initial_feedback: str
|
||||
) -> AgentFinish:
|
||||
"""Process feedback for regular use with potential multiple iterations."""
|
||||
feedback = initial_feedback
|
||||
answer = current_answer
|
||||
|
||||
while self.ask_for_human_input:
|
||||
human_feedback = self._ask_human_input(formatted_answer.output)
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer, human_feedback)
|
||||
|
||||
# Make an LLM call to verify if additional changes are requested based on human feedback
|
||||
additional_changes_prompt = self._i18n.slice(
|
||||
"human_feedback_classification"
|
||||
).format(feedback=human_feedback)
|
||||
|
||||
retry_count = 0
|
||||
llm_call_successful = False
|
||||
additional_changes_response = None
|
||||
|
||||
while retry_count < MAX_LLM_RETRY and not llm_call_successful:
|
||||
try:
|
||||
additional_changes_response = (
|
||||
self.llm.call(
|
||||
[
|
||||
self._format_msg(
|
||||
additional_changes_prompt, role="system"
|
||||
)
|
||||
],
|
||||
callbacks=self.callbacks,
|
||||
)
|
||||
.strip()
|
||||
.lower()
|
||||
)
|
||||
llm_call_successful = True
|
||||
except Exception as e:
|
||||
retry_count += 1
|
||||
|
||||
self._printer.print(
|
||||
content=f"Error during LLM call to classify human feedback: {e}. Retrying... ({retry_count}/{MAX_LLM_RETRY})",
|
||||
color="red",
|
||||
)
|
||||
|
||||
if not llm_call_successful:
|
||||
self._printer.print(
|
||||
content="Error processing feedback after multiple attempts.",
|
||||
color="red",
|
||||
)
|
||||
# If the user provides a blank response, assume they are happy with the result
|
||||
if feedback.strip() == "":
|
||||
self.ask_for_human_input = False
|
||||
break
|
||||
|
||||
if additional_changes_response == "false":
|
||||
self.ask_for_human_input = False
|
||||
elif additional_changes_response == "true":
|
||||
self.ask_for_human_input = True
|
||||
# Add human feedback to messages
|
||||
self.messages.append(self._format_msg(f"Feedback: {human_feedback}"))
|
||||
# Invoke the loop again with updated messages
|
||||
formatted_answer = self._invoke_loop()
|
||||
|
||||
if self.crew and self.crew._train:
|
||||
self._handle_crew_training_output(formatted_answer)
|
||||
else:
|
||||
# Unexpected response
|
||||
self._printer.print(
|
||||
content=f"Unexpected response from LLM: '{additional_changes_response}'. Assuming no additional changes requested.",
|
||||
color="red",
|
||||
)
|
||||
self.ask_for_human_input = False
|
||||
answer = self._process_feedback_iteration(feedback)
|
||||
feedback = self._ask_human_input(answer.output)
|
||||
|
||||
return formatted_answer
|
||||
return answer
|
||||
|
||||
def _process_feedback_iteration(self, feedback: str) -> AgentFinish:
|
||||
"""Process a single feedback iteration."""
|
||||
self.messages.append(
|
||||
self._format_msg(
|
||||
self._i18n.slice("feedback_instructions").format(feedback=feedback)
|
||||
)
|
||||
)
|
||||
return self._invoke_loop()
|
||||
|
||||
def _log_feedback_error(self, retry_count: int, error: Exception) -> None:
|
||||
"""Log feedback processing errors."""
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"Error processing feedback: {error}. "
|
||||
f"Retrying... ({retry_count + 1}/{MAX_LLM_RETRY})"
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
|
||||
def _log_max_retries_exceeded(self) -> None:
|
||||
"""Log when max retries for feedback processing are exceeded."""
|
||||
self._printer.print(
|
||||
content=(
|
||||
f"Failed to process feedback after {MAX_LLM_RETRY} attempts. "
|
||||
"Ending feedback loop."
|
||||
),
|
||||
color="red",
|
||||
)
|
||||
|
||||
def _handle_max_iterations_exceeded(self, formatted_answer):
|
||||
"""
|
||||
|
||||
@@ -94,6 +94,13 @@ class CrewAgentParser:
|
||||
|
||||
elif includes_answer:
|
||||
final_answer = text.split(FINAL_ANSWER_ACTION)[-1].strip()
|
||||
# Check whether the final answer ends with triple backticks.
|
||||
if final_answer.endswith("```"):
|
||||
# Count occurrences of triple backticks in the final answer.
|
||||
count = final_answer.count("```")
|
||||
# If count is odd then it's an unmatched trailing set; remove it.
|
||||
if count % 2 != 0:
|
||||
final_answer = final_answer[:-3].rstrip()
|
||||
return AgentFinish(thought, final_answer, text)
|
||||
|
||||
if not re.search(r"Action\s*\d*\s*:[\s]*(.*?)", text, re.DOTALL):
|
||||
@@ -117,11 +124,15 @@ class CrewAgentParser:
|
||||
)
|
||||
|
||||
def _extract_thought(self, text: str) -> str:
|
||||
regex = r"(.*?)(?:\n\nAction|\n\nFinal Answer)"
|
||||
thought_match = re.search(regex, text, re.DOTALL)
|
||||
if thought_match:
|
||||
return thought_match.group(1).strip()
|
||||
return ""
|
||||
thought_index = text.find("\n\nAction")
|
||||
if thought_index == -1:
|
||||
thought_index = text.find("\n\nFinal Answer")
|
||||
if thought_index == -1:
|
||||
return ""
|
||||
thought = text[:thought_index].strip()
|
||||
# Remove any triple backticks from the thought string
|
||||
thought = thought.replace("```", "").strip()
|
||||
return thought
|
||||
|
||||
def _clean_action(self, text: str) -> str:
|
||||
"""Clean action string by removing non-essential formatting characters."""
|
||||
|
||||
@@ -203,7 +203,6 @@ def install(context):
|
||||
@crewai.command()
|
||||
def run():
|
||||
"""Run the Crew."""
|
||||
click.echo("Running the Crew")
|
||||
run_crew()
|
||||
|
||||
|
||||
@@ -350,7 +349,10 @@ def chat():
|
||||
Start a conversation with the Crew, collecting user-supplied inputs,
|
||||
and using the Chat LLM to generate responses.
|
||||
"""
|
||||
click.echo("Starting a conversation with the Crew")
|
||||
click.secho(
|
||||
"\nStarting a conversation with the Crew\n" "Type 'exit' or Ctrl+C to quit.\n",
|
||||
)
|
||||
|
||||
run_chat()
|
||||
|
||||
|
||||
|
||||
@@ -216,10 +216,43 @@ MODELS = {
|
||||
"watsonx/ibm/granite-3-8b-instruct",
|
||||
],
|
||||
"bedrock": [
|
||||
"bedrock/us.amazon.nova-pro-v1:0",
|
||||
"bedrock/us.amazon.nova-micro-v1:0",
|
||||
"bedrock/us.amazon.nova-lite-v1:0",
|
||||
"bedrock/us.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"bedrock/us.anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"bedrock/us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"bedrock/us.anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"bedrock/us.anthropic.claude-3-opus-20240229-v1:0",
|
||||
"bedrock/us.anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"bedrock/us.meta.llama3-2-11b-instruct-v1:0",
|
||||
"bedrock/us.meta.llama3-2-3b-instruct-v1:0",
|
||||
"bedrock/us.meta.llama3-2-90b-instruct-v1:0",
|
||||
"bedrock/us.meta.llama3-2-1b-instruct-v1:0",
|
||||
"bedrock/us.meta.llama3-1-8b-instruct-v1:0",
|
||||
"bedrock/us.meta.llama3-1-70b-instruct-v1:0",
|
||||
"bedrock/us.meta.llama3-3-70b-instruct-v1:0",
|
||||
"bedrock/us.meta.llama3-1-405b-instruct-v1:0",
|
||||
"bedrock/eu.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"bedrock/eu.anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"bedrock/eu.anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"bedrock/eu.meta.llama3-2-3b-instruct-v1:0",
|
||||
"bedrock/eu.meta.llama3-2-1b-instruct-v1:0",
|
||||
"bedrock/apac.anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"bedrock/apac.anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"bedrock/apac.anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"bedrock/apac.anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"bedrock/amazon.nova-pro-v1:0",
|
||||
"bedrock/amazon.nova-micro-v1:0",
|
||||
"bedrock/amazon.nova-lite-v1:0",
|
||||
"bedrock/anthropic.claude-3-5-sonnet-20240620-v1:0",
|
||||
"bedrock/anthropic.claude-3-5-haiku-20241022-v1:0",
|
||||
"bedrock/anthropic.claude-3-5-sonnet-20241022-v2:0",
|
||||
"bedrock/anthropic.claude-3-7-sonnet-20250219-v1:0",
|
||||
"bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
|
||||
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"bedrock/anthropic.claude-3-opus-20240229-v1:0",
|
||||
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
|
||||
"bedrock/anthropic.claude-v2:1",
|
||||
"bedrock/anthropic.claude-v2",
|
||||
"bedrock/anthropic.claude-instant-v1",
|
||||
@@ -234,8 +267,6 @@ MODELS = {
|
||||
"bedrock/ai21.j2-mid-v1",
|
||||
"bedrock/ai21.j2-ultra-v1",
|
||||
"bedrock/ai21.jamba-instruct-v1:0",
|
||||
"bedrock/meta.llama2-13b-chat-v1",
|
||||
"bedrock/meta.llama2-70b-chat-v1",
|
||||
"bedrock/mistral.mistral-7b-instruct-v0:2",
|
||||
"bedrock/mistral.mixtral-8x7b-instruct-v0:1",
|
||||
],
|
||||
|
||||
@@ -1,17 +1,52 @@
|
||||
import json
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
|
||||
import click
|
||||
import tomli
|
||||
from packaging import version
|
||||
|
||||
from crewai.cli.utils import read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
from crewai.crew import Crew
|
||||
from crewai.llm import LLM
|
||||
from crewai.types.crew_chat import ChatInputField, ChatInputs
|
||||
from crewai.utilities.llm_utils import create_llm
|
||||
|
||||
MIN_REQUIRED_VERSION = "0.98.0"
|
||||
|
||||
|
||||
def check_conversational_crews_version(
|
||||
crewai_version: str, pyproject_data: dict
|
||||
) -> bool:
|
||||
"""
|
||||
Check if the installed crewAI version supports conversational crews.
|
||||
|
||||
Args:
|
||||
crewai_version: The current version of crewAI.
|
||||
pyproject_data: Dictionary containing pyproject.toml data.
|
||||
|
||||
Returns:
|
||||
bool: True if version check passes, False otherwise.
|
||||
"""
|
||||
try:
|
||||
if version.parse(crewai_version) < version.parse(MIN_REQUIRED_VERSION):
|
||||
click.secho(
|
||||
"You are using an older version of crewAI that doesn't support conversational crews. "
|
||||
"Run 'uv upgrade crewai' to get the latest version.",
|
||||
fg="red",
|
||||
)
|
||||
return False
|
||||
except version.InvalidVersion:
|
||||
click.secho("Invalid crewAI version format detected.", fg="red")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def run_chat():
|
||||
"""
|
||||
@@ -19,20 +54,47 @@ def run_chat():
|
||||
Incorporates crew_name, crew_description, and input fields to build a tool schema.
|
||||
Exits if crew_name or crew_description are missing.
|
||||
"""
|
||||
crewai_version = get_crewai_version()
|
||||
pyproject_data = read_toml()
|
||||
|
||||
if not check_conversational_crews_version(crewai_version, pyproject_data):
|
||||
return
|
||||
|
||||
crew, crew_name = load_crew_and_name()
|
||||
chat_llm = initialize_chat_llm(crew)
|
||||
if not chat_llm:
|
||||
return
|
||||
|
||||
crew_chat_inputs = generate_crew_chat_inputs(crew, crew_name, chat_llm)
|
||||
crew_tool_schema = generate_crew_tool_schema(crew_chat_inputs)
|
||||
system_message = build_system_message(crew_chat_inputs)
|
||||
|
||||
# Call the LLM to generate the introductory message
|
||||
introductory_message = chat_llm.call(
|
||||
messages=[{"role": "system", "content": system_message}]
|
||||
# Indicate that the crew is being analyzed
|
||||
click.secho(
|
||||
"\nAnalyzing crew and required inputs - this may take 3 to 30 seconds "
|
||||
"depending on the complexity of your crew.",
|
||||
fg="white",
|
||||
)
|
||||
click.secho(f"\nAssistant: {introductory_message}\n", fg="green")
|
||||
|
||||
# Start loading indicator
|
||||
loading_complete = threading.Event()
|
||||
loading_thread = threading.Thread(target=show_loading, args=(loading_complete,))
|
||||
loading_thread.start()
|
||||
|
||||
try:
|
||||
crew_chat_inputs = generate_crew_chat_inputs(crew, crew_name, chat_llm)
|
||||
crew_tool_schema = generate_crew_tool_schema(crew_chat_inputs)
|
||||
system_message = build_system_message(crew_chat_inputs)
|
||||
|
||||
# Call the LLM to generate the introductory message
|
||||
introductory_message = chat_llm.call(
|
||||
messages=[{"role": "system", "content": system_message}]
|
||||
)
|
||||
finally:
|
||||
# Stop loading indicator
|
||||
loading_complete.set()
|
||||
loading_thread.join()
|
||||
|
||||
# Indicate that the analysis is complete
|
||||
click.secho("\nFinished analyzing crew.\n", fg="white")
|
||||
|
||||
click.secho(f"Assistant: {introductory_message}\n", fg="green")
|
||||
|
||||
messages = [
|
||||
{"role": "system", "content": system_message},
|
||||
@@ -43,15 +105,17 @@ def run_chat():
|
||||
crew_chat_inputs.crew_name: create_tool_function(crew, messages),
|
||||
}
|
||||
|
||||
click.secho(
|
||||
"\nEntering an interactive chat loop with function-calling.\n"
|
||||
"Type 'exit' or Ctrl+C to quit.\n",
|
||||
fg="cyan",
|
||||
)
|
||||
|
||||
chat_loop(chat_llm, messages, crew_tool_schema, available_functions)
|
||||
|
||||
|
||||
def show_loading(event: threading.Event):
|
||||
"""Display animated loading dots while processing."""
|
||||
while not event.is_set():
|
||||
print(".", end="", flush=True)
|
||||
time.sleep(1)
|
||||
print()
|
||||
|
||||
|
||||
def initialize_chat_llm(crew: Crew) -> Optional[LLM]:
|
||||
"""Initializes the chat LLM and handles exceptions."""
|
||||
try:
|
||||
@@ -85,7 +149,7 @@ def build_system_message(crew_chat_inputs: ChatInputs) -> str:
|
||||
"Please keep your responses concise and friendly. "
|
||||
"If a user asks a question outside the crew's scope, provide a brief answer and remind them of the crew's purpose. "
|
||||
"After calling the tool, be prepared to take user feedback and make adjustments as needed. "
|
||||
"If you are ever unsure about a user's request or need clarification, ask the user for more information."
|
||||
"If you are ever unsure about a user's request or need clarification, ask the user for more information. "
|
||||
"Before doing anything else, introduce yourself with a friendly message like: 'Hey! I'm here to help you with [crew's purpose]. Could you please provide me with [inputs] so we can get started?' "
|
||||
"For example: 'Hey! I'm here to help you with uncovering and reporting cutting-edge developments through thorough research and detailed analysis. Could you please provide me with a topic you're interested in? This will help us generate a comprehensive research report and detailed analysis.'"
|
||||
f"\nCrew Name: {crew_chat_inputs.crew_name}"
|
||||
@@ -102,25 +166,33 @@ def create_tool_function(crew: Crew, messages: List[Dict[str, str]]) -> Any:
|
||||
return run_crew_tool_with_messages
|
||||
|
||||
|
||||
def flush_input():
|
||||
"""Flush any pending input from the user."""
|
||||
if platform.system() == "Windows":
|
||||
# Windows platform
|
||||
import msvcrt
|
||||
|
||||
while msvcrt.kbhit():
|
||||
msvcrt.getch()
|
||||
else:
|
||||
# Unix-like platforms (Linux, macOS)
|
||||
import termios
|
||||
|
||||
termios.tcflush(sys.stdin, termios.TCIFLUSH)
|
||||
|
||||
|
||||
def chat_loop(chat_llm, messages, crew_tool_schema, available_functions):
|
||||
"""Main chat loop for interacting with the user."""
|
||||
while True:
|
||||
try:
|
||||
user_input = click.prompt("You", type=str)
|
||||
if user_input.strip().lower() in ["exit", "quit"]:
|
||||
click.echo("Exiting chat. Goodbye!")
|
||||
break
|
||||
# Flush any pending input before accepting new input
|
||||
flush_input()
|
||||
|
||||
messages.append({"role": "user", "content": user_input})
|
||||
final_response = chat_llm.call(
|
||||
messages=messages,
|
||||
tools=[crew_tool_schema],
|
||||
available_functions=available_functions,
|
||||
user_input = get_user_input()
|
||||
handle_user_input(
|
||||
user_input, chat_llm, messages, crew_tool_schema, available_functions
|
||||
)
|
||||
|
||||
messages.append({"role": "assistant", "content": final_response})
|
||||
click.secho(f"\nAssistant: {final_response}\n", fg="green")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
click.echo("\nExiting chat. Goodbye!")
|
||||
break
|
||||
@@ -129,6 +201,55 @@ def chat_loop(chat_llm, messages, crew_tool_schema, available_functions):
|
||||
break
|
||||
|
||||
|
||||
def get_user_input() -> str:
|
||||
"""Collect multi-line user input with exit handling."""
|
||||
click.secho(
|
||||
"\nYou (type your message below. Press 'Enter' twice when you're done):",
|
||||
fg="blue",
|
||||
)
|
||||
user_input_lines = []
|
||||
while True:
|
||||
line = input()
|
||||
if line.strip().lower() == "exit":
|
||||
return "exit"
|
||||
if line == "":
|
||||
break
|
||||
user_input_lines.append(line)
|
||||
return "\n".join(user_input_lines)
|
||||
|
||||
|
||||
def handle_user_input(
|
||||
user_input: str,
|
||||
chat_llm: LLM,
|
||||
messages: List[Dict[str, str]],
|
||||
crew_tool_schema: Dict[str, Any],
|
||||
available_functions: Dict[str, Any],
|
||||
) -> None:
|
||||
if user_input.strip().lower() == "exit":
|
||||
click.echo("Exiting chat. Goodbye!")
|
||||
return
|
||||
|
||||
if not user_input.strip():
|
||||
click.echo("Empty message. Please provide input or type 'exit' to quit.")
|
||||
return
|
||||
|
||||
messages.append({"role": "user", "content": user_input})
|
||||
|
||||
# Indicate that assistant is processing
|
||||
click.echo()
|
||||
click.secho("Assistant is processing your input. Please wait...", fg="green")
|
||||
|
||||
# Process assistant's response
|
||||
final_response = chat_llm.call(
|
||||
messages=messages,
|
||||
tools=[crew_tool_schema],
|
||||
available_functions=available_functions,
|
||||
)
|
||||
|
||||
messages.append({"role": "assistant", "content": final_response})
|
||||
click.secho(f"\nAssistant: {final_response}\n", fg="green")
|
||||
|
||||
|
||||
def generate_crew_tool_schema(crew_inputs: ChatInputs) -> dict:
|
||||
"""
|
||||
Dynamically build a Littellm 'function' schema for the given crew.
|
||||
@@ -323,10 +444,10 @@ def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) ->
|
||||
):
|
||||
# Replace placeholders with input names
|
||||
task_description = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), task.description
|
||||
lambda m: m.group(1), task.description or ""
|
||||
)
|
||||
expected_output = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), task.expected_output
|
||||
lambda m: m.group(1), task.expected_output or ""
|
||||
)
|
||||
context_texts.append(f"Task Description: {task_description}")
|
||||
context_texts.append(f"Expected Output: {expected_output}")
|
||||
@@ -337,10 +458,10 @@ def generate_input_description_with_ai(input_name: str, crew: Crew, chat_llm) ->
|
||||
or f"{{{input_name}}}" in agent.backstory
|
||||
):
|
||||
# Replace placeholders with input names
|
||||
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role)
|
||||
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal)
|
||||
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role or "")
|
||||
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal or "")
|
||||
agent_backstory = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), agent.backstory
|
||||
lambda m: m.group(1), agent.backstory or ""
|
||||
)
|
||||
context_texts.append(f"Agent Role: {agent_role}")
|
||||
context_texts.append(f"Agent Goal: {agent_goal}")
|
||||
@@ -381,18 +502,20 @@ def generate_crew_description_with_ai(crew: Crew, chat_llm) -> str:
|
||||
for task in crew.tasks:
|
||||
# Replace placeholders with input names
|
||||
task_description = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), task.description
|
||||
lambda m: m.group(1), task.description or ""
|
||||
)
|
||||
expected_output = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), task.expected_output
|
||||
lambda m: m.group(1), task.expected_output or ""
|
||||
)
|
||||
context_texts.append(f"Task Description: {task_description}")
|
||||
context_texts.append(f"Expected Output: {expected_output}")
|
||||
for agent in crew.agents:
|
||||
# Replace placeholders with input names
|
||||
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role)
|
||||
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal)
|
||||
agent_backstory = placeholder_pattern.sub(lambda m: m.group(1), agent.backstory)
|
||||
agent_role = placeholder_pattern.sub(lambda m: m.group(1), agent.role or "")
|
||||
agent_goal = placeholder_pattern.sub(lambda m: m.group(1), agent.goal or "")
|
||||
agent_backstory = placeholder_pattern.sub(
|
||||
lambda m: m.group(1), agent.backstory or ""
|
||||
)
|
||||
context_texts.append(f"Agent Role: {agent_role}")
|
||||
context_texts.append(f"Agent Goal: {agent_goal}")
|
||||
context_texts.append(f"Agent Backstory: {agent_backstory}")
|
||||
|
||||
@@ -2,11 +2,7 @@ import subprocess
|
||||
|
||||
import click
|
||||
|
||||
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
|
||||
from crewai.memory.entity.entity_memory import EntityMemory
|
||||
from crewai.memory.long_term.long_term_memory import LongTermMemory
|
||||
from crewai.memory.short_term.short_term_memory import ShortTermMemory
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
from crewai.cli.utils import get_crew
|
||||
|
||||
|
||||
def reset_memories_command(
|
||||
@@ -30,30 +26,35 @@ def reset_memories_command(
|
||||
"""
|
||||
|
||||
try:
|
||||
crew = get_crew()
|
||||
if not crew:
|
||||
raise ValueError("No crew found.")
|
||||
if all:
|
||||
ShortTermMemory().reset()
|
||||
EntityMemory().reset()
|
||||
LongTermMemory().reset()
|
||||
TaskOutputStorageHandler().reset()
|
||||
KnowledgeStorage().reset()
|
||||
crew.reset_memories(command_type="all")
|
||||
click.echo("All memories have been reset.")
|
||||
else:
|
||||
if long:
|
||||
LongTermMemory().reset()
|
||||
click.echo("Long term memory has been reset.")
|
||||
return
|
||||
|
||||
if short:
|
||||
ShortTermMemory().reset()
|
||||
click.echo("Short term memory has been reset.")
|
||||
if entity:
|
||||
EntityMemory().reset()
|
||||
click.echo("Entity memory has been reset.")
|
||||
if kickoff_outputs:
|
||||
TaskOutputStorageHandler().reset()
|
||||
click.echo("Latest Kickoff outputs stored has been reset.")
|
||||
if knowledge:
|
||||
KnowledgeStorage().reset()
|
||||
click.echo("Knowledge has been reset.")
|
||||
if not any([long, short, entity, kickoff_outputs, knowledge]):
|
||||
click.echo(
|
||||
"No memory type specified. Please specify at least one type to reset."
|
||||
)
|
||||
return
|
||||
|
||||
if long:
|
||||
crew.reset_memories(command_type="long")
|
||||
click.echo("Long term memory has been reset.")
|
||||
if short:
|
||||
crew.reset_memories(command_type="short")
|
||||
click.echo("Short term memory has been reset.")
|
||||
if entity:
|
||||
crew.reset_memories(command_type="entity")
|
||||
click.echo("Entity memory has been reset.")
|
||||
if kickoff_outputs:
|
||||
crew.reset_memories(command_type="kickoff_outputs")
|
||||
click.echo("Latest Kickoff outputs stored has been reset.")
|
||||
if knowledge:
|
||||
crew.reset_memories(command_type="knowledge")
|
||||
click.echo("Knowledge has been reset.")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while resetting the memories: {e}", err=True)
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
import subprocess
|
||||
from enum import Enum
|
||||
from typing import List, Optional
|
||||
|
||||
import click
|
||||
from packaging import version
|
||||
@@ -7,16 +9,24 @@ from crewai.cli.utils import read_toml
|
||||
from crewai.cli.version import get_crewai_version
|
||||
|
||||
|
||||
class CrewType(Enum):
|
||||
STANDARD = "standard"
|
||||
FLOW = "flow"
|
||||
|
||||
|
||||
def run_crew() -> None:
|
||||
"""
|
||||
Run the crew by running a command in the UV environment.
|
||||
Run the crew or flow by running a command in the UV environment.
|
||||
|
||||
Starting from version 0.103.0, this command can be used to run both
|
||||
standard crews and flows. For flows, it detects the type from pyproject.toml
|
||||
and automatically runs the appropriate command.
|
||||
"""
|
||||
command = ["uv", "run", "run_crew"]
|
||||
crewai_version = get_crewai_version()
|
||||
min_required_version = "0.71.0"
|
||||
|
||||
pyproject_data = read_toml()
|
||||
|
||||
# Check for legacy poetry configuration
|
||||
if pyproject_data.get("tool", {}).get("poetry") and (
|
||||
version.parse(crewai_version) < version.parse(min_required_version)
|
||||
):
|
||||
@@ -26,18 +36,54 @@ def run_crew() -> None:
|
||||
fg="red",
|
||||
)
|
||||
|
||||
# Determine crew type
|
||||
is_flow = pyproject_data.get("tool", {}).get("crewai", {}).get("type") == "flow"
|
||||
crew_type = CrewType.FLOW if is_flow else CrewType.STANDARD
|
||||
|
||||
# Display appropriate message
|
||||
click.echo(f"Running the {'Flow' if is_flow else 'Crew'}")
|
||||
|
||||
# Execute the appropriate command
|
||||
execute_command(crew_type)
|
||||
|
||||
|
||||
def execute_command(crew_type: CrewType) -> None:
|
||||
"""
|
||||
Execute the appropriate command based on crew type.
|
||||
|
||||
Args:
|
||||
crew_type: The type of crew to run
|
||||
"""
|
||||
command = ["uv", "run", "kickoff" if crew_type == CrewType.FLOW else "run_crew"]
|
||||
|
||||
try:
|
||||
subprocess.run(command, capture_output=False, text=True, check=True)
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
click.echo(f"An error occurred while running the crew: {e}", err=True)
|
||||
click.echo(e.output, err=True, nl=True)
|
||||
|
||||
if pyproject_data.get("tool", {}).get("poetry"):
|
||||
click.secho(
|
||||
"It's possible that you are using an old version of crewAI that uses poetry, please run `crewai update` to update your pyproject.toml to use uv.",
|
||||
fg="yellow",
|
||||
)
|
||||
handle_error(e, crew_type)
|
||||
|
||||
except Exception as e:
|
||||
click.echo(f"An unexpected error occurred: {e}", err=True)
|
||||
|
||||
|
||||
def handle_error(error: subprocess.CalledProcessError, crew_type: CrewType) -> None:
|
||||
"""
|
||||
Handle subprocess errors with appropriate messaging.
|
||||
|
||||
Args:
|
||||
error: The subprocess error that occurred
|
||||
crew_type: The type of crew that was being run
|
||||
"""
|
||||
entity_type = "flow" if crew_type == CrewType.FLOW else "crew"
|
||||
click.echo(f"An error occurred while running the {entity_type}: {error}", err=True)
|
||||
|
||||
if error.output:
|
||||
click.echo(error.output, err=True, nl=True)
|
||||
|
||||
pyproject_data = read_toml()
|
||||
if pyproject_data.get("tool", {}).get("poetry"):
|
||||
click.secho(
|
||||
"It's possible that you are using an old version of crewAI that uses poetry, "
|
||||
"please run `crewai update` to update your pyproject.toml to use uv.",
|
||||
fg="yellow",
|
||||
)
|
||||
|
||||
1
src/crewai/cli/templates/crew/.gitignore
vendored
1
src/crewai/cli/templates/crew/.gitignore
vendored
@@ -1,2 +1,3 @@
|
||||
.env
|
||||
__pycache__/
|
||||
.DS_Store
|
||||
|
||||
@@ -1,62 +1,62 @@
|
||||
from crewai import Agent, Crew, Process, Task
|
||||
from crewai.project import CrewBase, agent, crew, task
|
||||
|
||||
# If you want to run a snippet of code before or after the crew starts,
|
||||
# If you want to run a snippet of code before or after the crew starts,
|
||||
# you can use the @before_kickoff and @after_kickoff decorators
|
||||
# https://docs.crewai.com/concepts/crews#example-crew-class-with-decorators
|
||||
|
||||
@CrewBase
|
||||
class {{crew_name}}():
|
||||
"""{{crew_name}} crew"""
|
||||
"""{{crew_name}} crew"""
|
||||
|
||||
# Learn more about YAML configuration files here:
|
||||
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
|
||||
# Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
# Learn more about YAML configuration files here:
|
||||
# Agents: https://docs.crewai.com/concepts/agents#yaml-configuration-recommended
|
||||
# Tasks: https://docs.crewai.com/concepts/tasks#yaml-configuration-recommended
|
||||
agents_config = 'config/agents.yaml'
|
||||
tasks_config = 'config/tasks.yaml'
|
||||
|
||||
# If you would like to add tools to your agents, you can learn more about it here:
|
||||
# https://docs.crewai.com/concepts/agents#agent-tools
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True
|
||||
)
|
||||
# If you would like to add tools to your agents, you can learn more about it here:
|
||||
# https://docs.crewai.com/concepts/agents#agent-tools
|
||||
@agent
|
||||
def researcher(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['researcher'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
@agent
|
||||
def reporting_analyst(self) -> Agent:
|
||||
return Agent(
|
||||
config=self.agents_config['reporting_analyst'],
|
||||
verbose=True
|
||||
)
|
||||
|
||||
# To learn more about structured task outputs,
|
||||
# task dependencies, and task callbacks, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/tasks#overview-of-a-task
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
)
|
||||
# To learn more about structured task outputs,
|
||||
# task dependencies, and task callbacks, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/tasks#overview-of-a-task
|
||||
@task
|
||||
def research_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['research_task'],
|
||||
)
|
||||
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
output_file='report.md'
|
||||
)
|
||||
@task
|
||||
def reporting_task(self) -> Task:
|
||||
return Task(
|
||||
config=self.tasks_config['reporting_task'],
|
||||
output_file='report.md'
|
||||
)
|
||||
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the {{crew_name}} crew"""
|
||||
# To learn how to add knowledge sources to your crew, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/knowledge#what-is-knowledge
|
||||
@crew
|
||||
def crew(self) -> Crew:
|
||||
"""Creates the {{crew_name}} crew"""
|
||||
# To learn how to add knowledge sources to your crew, check out the documentation:
|
||||
# https://docs.crewai.com/concepts/knowledge#what-is-knowledge
|
||||
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
# process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
|
||||
)
|
||||
return Crew(
|
||||
agents=self.agents, # Automatically created by the @agent decorator
|
||||
tasks=self.tasks, # Automatically created by the @task decorator
|
||||
process=Process.sequential,
|
||||
verbose=True,
|
||||
# process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
|
||||
)
|
||||
|
||||
@@ -56,7 +56,8 @@ def test():
|
||||
Test the crew execution and returns the results.
|
||||
"""
|
||||
inputs = {
|
||||
"topic": "AI LLMs"
|
||||
"topic": "AI LLMs",
|
||||
"current_year": str(datetime.now().year)
|
||||
}
|
||||
try:
|
||||
{{crew_name}}().crew().test(n_iterations=int(sys.argv[1]), openai_model_name=sys.argv[2], inputs=inputs)
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.95.0,<1.0.0"
|
||||
"crewai[tools]>=0.102.0,<1.0.0"
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
1
src/crewai/cli/templates/flow/.gitignore
vendored
1
src/crewai/cli/templates/flow/.gitignore
vendored
@@ -1,3 +1,4 @@
|
||||
.env
|
||||
__pycache__/
|
||||
lib/
|
||||
.DS_Store
|
||||
|
||||
@@ -30,13 +30,13 @@ crewai install
|
||||
|
||||
## Running the Project
|
||||
|
||||
To kickstart your crew of AI agents and begin task execution, run this from the root folder of your project:
|
||||
To kickstart your flow and begin execution, run this from the root folder of your project:
|
||||
|
||||
```bash
|
||||
crewai run
|
||||
```
|
||||
|
||||
This command initializes the {{name}} Crew, assembling the agents and assigning them tasks as defined in your configuration.
|
||||
This command initializes the {{name}} Flow as defined in your configuration.
|
||||
|
||||
This example, unmodified, will run the create a `report.md` file with the output of a research on LLMs in the root folder.
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ from random import randint
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow.flow import Flow, listen, start
|
||||
from crewai.flow import Flow, listen, start
|
||||
|
||||
from {{folder_name}}.crews.poem_crew.poem_crew import PoemCrew
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "{{name}} using crewAI"
|
||||
authors = [{ name = "Your Name", email = "you@example.com" }]
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.95.0,<1.0.0",
|
||||
"crewai[tools]>=0.102.0,<1.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
|
||||
@@ -5,7 +5,7 @@ description = "Power up your crews with {{folder_name}}"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10,<3.13"
|
||||
dependencies = [
|
||||
"crewai[tools]>=0.95.0"
|
||||
"crewai[tools]>=0.102.0"
|
||||
]
|
||||
|
||||
[tool.crewai]
|
||||
|
||||
@@ -9,6 +9,7 @@ import tomli
|
||||
from rich.console import Console
|
||||
|
||||
from crewai.cli.constants import ENV_VARS
|
||||
from crewai.crew import Crew
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
import tomllib
|
||||
@@ -247,3 +248,66 @@ def write_env_file(folder_path, env_vars):
|
||||
with open(env_file_path, "w") as file:
|
||||
for key, value in env_vars.items():
|
||||
file.write(f"{key}={value}\n")
|
||||
|
||||
|
||||
def get_crew(crew_path: str = "crew.py", require: bool = False) -> Crew | None:
|
||||
"""Get the crew instance from the crew.py file."""
|
||||
try:
|
||||
import importlib.util
|
||||
import os
|
||||
|
||||
for root, _, files in os.walk("."):
|
||||
if crew_path in files:
|
||||
crew_os_path = os.path.join(root, crew_path)
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"crew_module", crew_os_path
|
||||
)
|
||||
if not spec or not spec.loader:
|
||||
continue
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
try:
|
||||
sys.modules[spec.name] = module
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
for attr_name in dir(module):
|
||||
attr = getattr(module, attr_name)
|
||||
try:
|
||||
if isinstance(attr, Crew) and hasattr(attr, "kickoff"):
|
||||
print(
|
||||
f"Found valid crew object in attribute '{attr_name}' at {crew_os_path}."
|
||||
)
|
||||
return attr
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing attribute {attr_name}: {e}")
|
||||
continue
|
||||
|
||||
except Exception as exec_error:
|
||||
print(f"Error executing module: {exec_error}")
|
||||
import traceback
|
||||
|
||||
print(f"Traceback: {traceback.format_exc()}")
|
||||
|
||||
except (ImportError, AttributeError) as e:
|
||||
if require:
|
||||
console.print(
|
||||
f"Error importing crew from {crew_path}: {str(e)}",
|
||||
style="bold red",
|
||||
)
|
||||
continue
|
||||
|
||||
break
|
||||
|
||||
if require:
|
||||
console.print("No valid Crew instance found in crew.py", style="bold red")
|
||||
raise SystemExit
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
if require:
|
||||
console.print(
|
||||
f"Unexpected error while loading crew: {str(e)}", style="bold red"
|
||||
)
|
||||
raise SystemExit
|
||||
return None
|
||||
|
||||
@@ -4,6 +4,7 @@ import re
|
||||
import uuid
|
||||
import warnings
|
||||
from concurrent.futures import Future
|
||||
from copy import copy as shallow_copy
|
||||
from hashlib import md5
|
||||
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
@@ -34,15 +35,25 @@ from crewai.process import Process
|
||||
from crewai.task import Task
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.tools.agent_tools.agent_tools import AgentTools
|
||||
from crewai.tools.base_tool import Tool
|
||||
from crewai.types.crew_chat import ChatInputs
|
||||
from crewai.types.usage_metrics import UsageMetrics
|
||||
from crewai.utilities import I18N, FileHandler, Logger, RPMController
|
||||
from crewai.utilities.constants import TRAINING_DATA_FILE
|
||||
from crewai.utilities.evaluators.crew_evaluator_handler import CrewEvaluator
|
||||
from crewai.utilities.evaluators.task_evaluator import TaskEvaluator
|
||||
from crewai.utilities.events.crew_events import (
|
||||
CrewKickoffCompletedEvent,
|
||||
CrewKickoffFailedEvent,
|
||||
CrewKickoffStartedEvent,
|
||||
CrewTestCompletedEvent,
|
||||
CrewTestFailedEvent,
|
||||
CrewTestStartedEvent,
|
||||
CrewTrainCompletedEvent,
|
||||
CrewTrainFailedEvent,
|
||||
CrewTrainStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.formatter import (
|
||||
aggregate_raw_outputs_from_task_outputs,
|
||||
aggregate_raw_outputs_from_tasks,
|
||||
@@ -52,12 +63,6 @@ from crewai.utilities.planning_handler import CrewPlanner
|
||||
from crewai.utilities.task_output_storage_handler import TaskOutputStorageHandler
|
||||
from crewai.utilities.training_handler import CrewTrainingHandler
|
||||
|
||||
try:
|
||||
import agentops # type: ignore
|
||||
except ImportError:
|
||||
agentops = None
|
||||
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning, module="pysbd")
|
||||
|
||||
|
||||
@@ -84,6 +89,7 @@ class Crew(BaseModel):
|
||||
step_callback: Callback to be executed after each step for every agents execution.
|
||||
share_crew: Whether you want to share the complete crew information and execution with crewAI to make the library better, and allow us to train models.
|
||||
planning: Plan the crew execution and add the plan to the crew.
|
||||
chat_llm: The language model used for orchestrating chat interactions with the crew.
|
||||
"""
|
||||
|
||||
__hash__ = object.__hash__ # type: ignore
|
||||
@@ -178,13 +184,13 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="Maximum number of requests per minute for the crew execution to be respected.",
|
||||
)
|
||||
prompt_file: str = Field(
|
||||
prompt_file: Optional[str] = Field(
|
||||
default=None,
|
||||
description="Path to the prompt json file to be used for the crew.",
|
||||
)
|
||||
output_log_file: Optional[str] = Field(
|
||||
output_log_file: Optional[Union[bool, str]] = Field(
|
||||
default=None,
|
||||
description="output_log_file",
|
||||
description="Path to the log file to be saved",
|
||||
)
|
||||
planning: Optional[bool] = Field(
|
||||
default=False,
|
||||
@@ -210,8 +216,9 @@ class Crew(BaseModel):
|
||||
default=None,
|
||||
description="LLM used to handle chatting with the crew.",
|
||||
)
|
||||
_knowledge: Optional[Knowledge] = PrivateAttr(
|
||||
knowledge: Optional[Knowledge] = Field(
|
||||
default=None,
|
||||
description="Knowledge for the crew.",
|
||||
)
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@@ -249,8 +256,6 @@ class Crew(BaseModel):
|
||||
if self.function_calling_llm and not isinstance(self.function_calling_llm, LLM):
|
||||
self.function_calling_llm = create_llm(self.function_calling_llm)
|
||||
|
||||
self._telemetry = Telemetry()
|
||||
self._telemetry.set_tracer()
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -273,12 +278,26 @@ class Crew(BaseModel):
|
||||
if self.entity_memory
|
||||
else EntityMemory(crew=self, embedder_config=self.embedder)
|
||||
)
|
||||
if hasattr(self, "memory_config") and self.memory_config is not None:
|
||||
self._user_memory = (
|
||||
self.user_memory if self.user_memory else UserMemory(crew=self)
|
||||
)
|
||||
if (
|
||||
self.memory_config and "user_memory" in self.memory_config
|
||||
): # Check for user_memory in config
|
||||
user_memory_config = self.memory_config["user_memory"]
|
||||
if isinstance(
|
||||
user_memory_config, UserMemory
|
||||
): # Check if it is already an instance
|
||||
self._user_memory = user_memory_config
|
||||
elif isinstance(
|
||||
user_memory_config, dict
|
||||
): # Check if it's a configuration dict
|
||||
self._user_memory = UserMemory(
|
||||
crew=self, **user_memory_config
|
||||
) # Initialize with config
|
||||
else:
|
||||
raise TypeError(
|
||||
"user_memory must be a UserMemory instance or a configuration dictionary"
|
||||
)
|
||||
else:
|
||||
self._user_memory = None
|
||||
self._user_memory = None # No user memory if not in config
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
@@ -289,9 +308,9 @@ class Crew(BaseModel):
|
||||
if isinstance(self.knowledge_sources, list) and all(
|
||||
isinstance(k, BaseKnowledgeSource) for k in self.knowledge_sources
|
||||
):
|
||||
self._knowledge = Knowledge(
|
||||
self.knowledge = Knowledge(
|
||||
sources=self.knowledge_sources,
|
||||
embedder_config=self.embedder,
|
||||
embedder=self.embedder,
|
||||
collection_name="crew",
|
||||
)
|
||||
|
||||
@@ -378,6 +397,22 @@ class Crew(BaseModel):
|
||||
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_must_have_non_conditional_task(self) -> "Crew":
|
||||
"""Ensure that a crew has at least one non-conditional task."""
|
||||
if not self.tasks:
|
||||
return self
|
||||
non_conditional_count = sum(
|
||||
1 for task in self.tasks if not isinstance(task, ConditionalTask)
|
||||
)
|
||||
if non_conditional_count == 0:
|
||||
raise PydanticCustomError(
|
||||
"only_conditional_tasks",
|
||||
"Crew must include at least one non-conditional task",
|
||||
{},
|
||||
)
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def validate_first_task(self) -> "Crew":
|
||||
"""Ensure the first task is not a ConditionalTask."""
|
||||
@@ -489,83 +524,121 @@ class Crew(BaseModel):
|
||||
self, n_iterations: int, filename: str, inputs: Optional[Dict[str, Any]] = {}
|
||||
) -> None:
|
||||
"""Trains the crew for a given number of iterations."""
|
||||
train_crew = self.copy()
|
||||
train_crew._setup_for_training(filename)
|
||||
try:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTrainStartedEvent(
|
||||
crew_name=self.name or "crew",
|
||||
n_iterations=n_iterations,
|
||||
filename=filename,
|
||||
inputs=inputs,
|
||||
),
|
||||
)
|
||||
train_crew = self.copy()
|
||||
train_crew._setup_for_training(filename)
|
||||
|
||||
for n_iteration in range(n_iterations):
|
||||
train_crew._train_iteration = n_iteration
|
||||
train_crew.kickoff(inputs=inputs)
|
||||
for n_iteration in range(n_iterations):
|
||||
train_crew._train_iteration = n_iteration
|
||||
train_crew.kickoff(inputs=inputs)
|
||||
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
training_data = CrewTrainingHandler(TRAINING_DATA_FILE).load()
|
||||
|
||||
for agent in train_crew.agents:
|
||||
if training_data.get(str(agent.id)):
|
||||
result = TaskEvaluator(agent).evaluate_training_data(
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
for agent in train_crew.agents:
|
||||
if training_data.get(str(agent.id)):
|
||||
result = TaskEvaluator(agent).evaluate_training_data(
|
||||
training_data=training_data, agent_id=str(agent.id)
|
||||
)
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
|
||||
CrewTrainingHandler(filename).save_trained_data(
|
||||
agent_id=str(agent.role), trained_data=result.model_dump()
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTrainCompletedEvent(
|
||||
crew_name=self.name or "crew",
|
||||
n_iterations=n_iterations,
|
||||
filename=filename,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTrainFailedEvent(error=str(e), crew_name=self.name or "crew"),
|
||||
)
|
||||
self._logger.log("error", f"Training failed: {e}", color="red")
|
||||
CrewTrainingHandler(TRAINING_DATA_FILE).clear()
|
||||
CrewTrainingHandler(filename).clear()
|
||||
raise
|
||||
|
||||
def kickoff(
|
||||
self,
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> CrewOutput:
|
||||
for before_callback in self.before_kickoff_callbacks:
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
inputs = before_callback(inputs)
|
||||
try:
|
||||
for before_callback in self.before_kickoff_callbacks:
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
inputs = before_callback(inputs)
|
||||
|
||||
"""Starts the crew to work on its assigned tasks."""
|
||||
self._execution_span = self._telemetry.crew_execution_span(self, inputs)
|
||||
self._task_output_handler.reset()
|
||||
self._logging_color = "bold_purple"
|
||||
|
||||
if inputs is not None:
|
||||
self._inputs = inputs
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
|
||||
for agent in self.agents:
|
||||
agent.i18n = i18n
|
||||
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
agent.crew = self # type: ignore[attr-defined]
|
||||
# TODO: Create an AgentFunctionCalling protocol for future refactoring
|
||||
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
|
||||
if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
|
||||
agent.create_agent_executor()
|
||||
|
||||
if self.planning:
|
||||
self._handle_crew_planning()
|
||||
|
||||
metrics: List[UsageMetrics] = []
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
elif self.process == Process.hierarchical:
|
||||
result = self._run_hierarchical_process()
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffStartedEvent(crew_name=self.name or "crew", inputs=inputs),
|
||||
)
|
||||
|
||||
for after_callback in self.after_kickoff_callbacks:
|
||||
result = after_callback(result)
|
||||
# Starts the crew to work on its assigned tasks.
|
||||
self._task_output_handler.reset()
|
||||
self._logging_color = "bold_purple"
|
||||
|
||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||
if inputs is not None:
|
||||
self._inputs = inputs
|
||||
self._interpolate_inputs(inputs)
|
||||
self._set_tasks_callbacks()
|
||||
|
||||
self.usage_metrics = UsageMetrics()
|
||||
for metric in metrics:
|
||||
self.usage_metrics.add_usage_metrics(metric)
|
||||
i18n = I18N(prompt_file=self.prompt_file)
|
||||
|
||||
return result
|
||||
for agent in self.agents:
|
||||
agent.i18n = i18n
|
||||
# type: ignore[attr-defined] # Argument 1 to "_interpolate_inputs" of "Crew" has incompatible type "dict[str, Any] | None"; expected "dict[str, Any]"
|
||||
agent.crew = self # type: ignore[attr-defined]
|
||||
agent.set_knowledge(crew_embedder=self.embedder)
|
||||
# TODO: Create an AgentFunctionCalling protocol for future refactoring
|
||||
if not agent.function_calling_llm: # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
agent.function_calling_llm = self.function_calling_llm # type: ignore # "BaseAgent" has no attribute "function_calling_llm"
|
||||
|
||||
if not agent.step_callback: # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
agent.step_callback = self.step_callback # type: ignore # "BaseAgent" has no attribute "step_callback"
|
||||
|
||||
agent.create_agent_executor()
|
||||
|
||||
if self.planning:
|
||||
self._handle_crew_planning()
|
||||
|
||||
metrics: List[UsageMetrics] = []
|
||||
|
||||
if self.process == Process.sequential:
|
||||
result = self._run_sequential_process()
|
||||
elif self.process == Process.hierarchical:
|
||||
result = self._run_hierarchical_process()
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"The process '{self.process}' is not implemented yet."
|
||||
)
|
||||
|
||||
for after_callback in self.after_kickoff_callbacks:
|
||||
result = after_callback(result)
|
||||
|
||||
metrics += [agent._token_process.get_summary() for agent in self.agents]
|
||||
|
||||
self.usage_metrics = UsageMetrics()
|
||||
for metric in metrics:
|
||||
self.usage_metrics.add_usage_metrics(metric)
|
||||
return result
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffFailedEvent(error=str(e), crew_name=self.name or "crew"),
|
||||
)
|
||||
raise
|
||||
|
||||
def kickoff_for_each(self, inputs: List[Dict[str, Any]]) -> List[CrewOutput]:
|
||||
"""Executes the Crew's workflow for each input in the list and aggregates results."""
|
||||
@@ -674,12 +747,7 @@ class Crew(BaseModel):
|
||||
manager.tools = []
|
||||
raise Exception("Manager agent should not have tools")
|
||||
else:
|
||||
self.manager_llm = (
|
||||
getattr(self.manager_llm, "model_name", None)
|
||||
or getattr(self.manager_llm, "model", None)
|
||||
or getattr(self.manager_llm, "deployment_name", None)
|
||||
or self.manager_llm
|
||||
)
|
||||
self.manager_llm = create_llm(self.manager_llm)
|
||||
manager = Agent(
|
||||
role=i18n.retrieve("hierarchical_manager_agent", "role"),
|
||||
goal=i18n.retrieve("hierarchical_manager_agent", "goal"),
|
||||
@@ -739,6 +807,8 @@ class Crew(BaseModel):
|
||||
task, task_outputs, futures, task_index, was_replayed
|
||||
)
|
||||
if skipped_task_output:
|
||||
task_outputs.append(skipped_task_output)
|
||||
last_sync_output = skipped_task_output
|
||||
continue
|
||||
|
||||
if task.async_execution:
|
||||
@@ -752,8 +822,10 @@ class Crew(BaseModel):
|
||||
)
|
||||
futures.append((task, future, task_index))
|
||||
else:
|
||||
# Process any pending async tasks before executing a sync task
|
||||
if futures:
|
||||
task_outputs = self._process_async_tasks(futures, was_replayed)
|
||||
processed_outputs = self._process_async_tasks(futures, was_replayed)
|
||||
task_outputs.extend(processed_outputs)
|
||||
futures.clear()
|
||||
|
||||
context = self._get_context(task, task_outputs)
|
||||
@@ -762,12 +834,15 @@ class Crew(BaseModel):
|
||||
context=context,
|
||||
tools=tools_for_task,
|
||||
)
|
||||
task_outputs = [task_output]
|
||||
task_outputs.append(task_output)
|
||||
last_sync_output = task_output
|
||||
self._process_task_result(task, task_output)
|
||||
self._store_execution_log(task, task_output, task_index, was_replayed)
|
||||
|
||||
# Process any remaining async tasks at the end
|
||||
if futures:
|
||||
task_outputs = self._process_async_tasks(futures, was_replayed)
|
||||
processed_outputs = self._process_async_tasks(futures, was_replayed)
|
||||
task_outputs.extend(processed_outputs)
|
||||
|
||||
return self._create_crew_output(task_outputs)
|
||||
|
||||
@@ -779,12 +854,17 @@ class Crew(BaseModel):
|
||||
task_index: int,
|
||||
was_replayed: bool,
|
||||
) -> Optional[TaskOutput]:
|
||||
# Process any pending async tasks to ensure we have the most up-to-date context
|
||||
if futures:
|
||||
task_outputs = self._process_async_tasks(futures, was_replayed)
|
||||
processed_outputs = self._process_async_tasks(futures, was_replayed)
|
||||
task_outputs.extend(processed_outputs)
|
||||
futures.clear()
|
||||
|
||||
previous_output = task_outputs[task_index - 1] if task_outputs else None
|
||||
if previous_output is not None and not task.should_execute(previous_output):
|
||||
# Get the previous output to evaluate the condition
|
||||
previous_output = task_outputs[-1] if task_outputs else None
|
||||
|
||||
# If there's no previous output or the condition evaluates to False, skip the task
|
||||
if previous_output is None or not task.should_execute(previous_output):
|
||||
self._logger.log(
|
||||
"debug",
|
||||
f"Skipping conditional task: {task.description}",
|
||||
@@ -792,8 +872,13 @@ class Crew(BaseModel):
|
||||
)
|
||||
skipped_task_output = task.get_skipped_task_output()
|
||||
|
||||
# Store the execution log for the skipped task
|
||||
if not was_replayed:
|
||||
self._store_execution_log(task, skipped_task_output, task_index)
|
||||
|
||||
# Set the output on the task itself so it can be referenced later
|
||||
task.output = skipped_task_output
|
||||
|
||||
return skipped_task_output
|
||||
return None
|
||||
|
||||
@@ -905,20 +990,29 @@ class Crew(BaseModel):
|
||||
)
|
||||
|
||||
def _create_crew_output(self, task_outputs: List[TaskOutput]) -> CrewOutput:
|
||||
if len(task_outputs) != 1:
|
||||
raise ValueError(
|
||||
"Something went wrong. Kickoff should return only one task output."
|
||||
)
|
||||
final_task_output = task_outputs[0]
|
||||
if not task_outputs:
|
||||
raise ValueError("No task outputs available to create crew output.")
|
||||
|
||||
# Filter out empty outputs and get the last valid one as the main output
|
||||
valid_outputs = [t for t in task_outputs if t.raw]
|
||||
if not valid_outputs:
|
||||
raise ValueError("No valid task outputs available to create crew output.")
|
||||
final_task_output = valid_outputs[-1]
|
||||
|
||||
final_string_output = final_task_output.raw
|
||||
self._finish_execution(final_string_output)
|
||||
token_usage = self.calculate_usage_metrics()
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewKickoffCompletedEvent(
|
||||
crew_name=self.name or "crew", output=final_task_output
|
||||
),
|
||||
)
|
||||
return CrewOutput(
|
||||
raw=final_task_output.raw,
|
||||
pydantic=final_task_output.pydantic,
|
||||
json_dict=final_task_output.json_dict,
|
||||
tasks_output=[task.output for task in self.tasks if task.output],
|
||||
tasks_output=task_outputs,
|
||||
token_usage=token_usage,
|
||||
)
|
||||
|
||||
@@ -991,8 +1085,8 @@ class Crew(BaseModel):
|
||||
return result
|
||||
|
||||
def query_knowledge(self, query: List[str]) -> Union[List[Dict[str, Any]], None]:
|
||||
if self._knowledge:
|
||||
return self._knowledge.query(query)
|
||||
if self.knowledge:
|
||||
return self.knowledge.query(query)
|
||||
return None
|
||||
|
||||
def fetch_inputs(self) -> Set[str]:
|
||||
@@ -1033,9 +1127,10 @@ class Crew(BaseModel):
|
||||
"_short_term_memory",
|
||||
"_long_term_memory",
|
||||
"_entity_memory",
|
||||
"_telemetry",
|
||||
"agents",
|
||||
"tasks",
|
||||
"knowledge_sources",
|
||||
"knowledge",
|
||||
}
|
||||
|
||||
cloned_agents = [agent.copy() for agent in self.agents]
|
||||
@@ -1043,6 +1138,9 @@ class Crew(BaseModel):
|
||||
task_mapping = {}
|
||||
|
||||
cloned_tasks = []
|
||||
existing_knowledge_sources = shallow_copy(self.knowledge_sources)
|
||||
existing_knowledge = shallow_copy(self.knowledge)
|
||||
|
||||
for task in self.tasks:
|
||||
cloned_task = task.copy(cloned_agents, task_mapping)
|
||||
cloned_tasks.append(cloned_task)
|
||||
@@ -1062,7 +1160,13 @@ class Crew(BaseModel):
|
||||
copied_data.pop("agents", None)
|
||||
copied_data.pop("tasks", None)
|
||||
|
||||
copied_crew = Crew(**copied_data, agents=cloned_agents, tasks=cloned_tasks)
|
||||
copied_crew = Crew(
|
||||
**copied_data,
|
||||
agents=cloned_agents,
|
||||
tasks=cloned_tasks,
|
||||
knowledge_sources=existing_knowledge_sources,
|
||||
knowledge=existing_knowledge,
|
||||
)
|
||||
|
||||
return copied_crew
|
||||
|
||||
@@ -1088,13 +1192,6 @@ class Crew(BaseModel):
|
||||
def _finish_execution(self, final_string_output: str) -> None:
|
||||
if self.max_rpm:
|
||||
self._rpm_controller.stop_rpm_counter()
|
||||
if agentops:
|
||||
agentops.end_session(
|
||||
end_state="Success",
|
||||
end_state_reason="Finished Execution",
|
||||
is_auto_end=True,
|
||||
)
|
||||
self._telemetry.end_crew(self, final_string_output)
|
||||
|
||||
def calculate_usage_metrics(self) -> UsageMetrics:
|
||||
"""Calculates and returns the usage metrics."""
|
||||
@@ -1112,25 +1209,122 @@ class Crew(BaseModel):
|
||||
def test(
|
||||
self,
|
||||
n_iterations: int,
|
||||
openai_model_name: Optional[str] = None,
|
||||
eval_llm: Union[str, InstanceOf[LLM]],
|
||||
inputs: Optional[Dict[str, Any]] = None,
|
||||
) -> None:
|
||||
"""Test and evaluate the Crew with the given inputs for n iterations concurrently using concurrent.futures."""
|
||||
test_crew = self.copy()
|
||||
try:
|
||||
eval_llm = create_llm(eval_llm)
|
||||
if not eval_llm:
|
||||
raise ValueError("Failed to create LLM instance.")
|
||||
|
||||
self._test_execution_span = test_crew._telemetry.test_execution_span(
|
||||
test_crew,
|
||||
n_iterations,
|
||||
inputs,
|
||||
openai_model_name, # type: ignore[arg-type]
|
||||
) # type: ignore[arg-type]
|
||||
evaluator = CrewEvaluator(test_crew, openai_model_name) # type: ignore[arg-type]
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTestStartedEvent(
|
||||
crew_name=self.name or "crew",
|
||||
n_iterations=n_iterations,
|
||||
eval_llm=eval_llm,
|
||||
inputs=inputs,
|
||||
),
|
||||
)
|
||||
test_crew = self.copy()
|
||||
evaluator = CrewEvaluator(test_crew, eval_llm) # type: ignore[arg-type]
|
||||
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
test_crew.kickoff(inputs=inputs)
|
||||
for i in range(1, n_iterations + 1):
|
||||
evaluator.set_iteration(i)
|
||||
test_crew.kickoff(inputs=inputs)
|
||||
|
||||
evaluator.print_crew_evaluation_result()
|
||||
evaluator.print_crew_evaluation_result()
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTestCompletedEvent(
|
||||
crew_name=self.name or "crew",
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
CrewTestFailedEvent(error=str(e), crew_name=self.name or "crew"),
|
||||
)
|
||||
raise
|
||||
|
||||
def __repr__(self):
|
||||
return f"Crew(id={self.id}, process={self.process}, number_of_agents={len(self.agents)}, number_of_tasks={len(self.tasks)})"
|
||||
|
||||
def reset_memories(self, command_type: str) -> None:
|
||||
"""Reset specific or all memories for the crew.
|
||||
|
||||
Args:
|
||||
command_type: Type of memory to reset.
|
||||
Valid options: 'long', 'short', 'entity', 'knowledge',
|
||||
'kickoff_outputs', or 'all'
|
||||
|
||||
Raises:
|
||||
ValueError: If an invalid command type is provided.
|
||||
RuntimeError: If memory reset operation fails.
|
||||
"""
|
||||
VALID_TYPES = frozenset(
|
||||
["long", "short", "entity", "knowledge", "kickoff_outputs", "all"]
|
||||
)
|
||||
|
||||
if command_type not in VALID_TYPES:
|
||||
raise ValueError(
|
||||
f"Invalid command type. Must be one of: {', '.join(sorted(VALID_TYPES))}"
|
||||
)
|
||||
|
||||
try:
|
||||
if command_type == "all":
|
||||
self._reset_all_memories()
|
||||
else:
|
||||
self._reset_specific_memory(command_type)
|
||||
|
||||
self._logger.log("info", f"{command_type} memory has been reset")
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"Failed to reset {command_type} memory: {str(e)}"
|
||||
self._logger.log("error", error_msg)
|
||||
raise RuntimeError(error_msg) from e
|
||||
|
||||
def _reset_all_memories(self) -> None:
|
||||
"""Reset all available memory systems."""
|
||||
memory_systems = [
|
||||
("short term", getattr(self, "_short_term_memory", None)),
|
||||
("entity", getattr(self, "_entity_memory", None)),
|
||||
("long term", getattr(self, "_long_term_memory", None)),
|
||||
("task output", getattr(self, "_task_output_handler", None)),
|
||||
("knowledge", getattr(self, "knowledge", None)),
|
||||
]
|
||||
|
||||
for name, system in memory_systems:
|
||||
if system is not None:
|
||||
try:
|
||||
system.reset()
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to reset {name} memory") from e
|
||||
|
||||
def _reset_specific_memory(self, memory_type: str) -> None:
|
||||
"""Reset a specific memory system.
|
||||
|
||||
Args:
|
||||
memory_type: Type of memory to reset
|
||||
|
||||
Raises:
|
||||
RuntimeError: If the specified memory system fails to reset
|
||||
"""
|
||||
reset_functions = {
|
||||
"long": (self._long_term_memory, "long term"),
|
||||
"short": (self._short_term_memory, "short term"),
|
||||
"entity": (self._entity_memory, "entity"),
|
||||
"knowledge": (self.knowledge, "knowledge"),
|
||||
"kickoff_outputs": (self._task_output_handler, "task output"),
|
||||
}
|
||||
|
||||
memory_system, name = reset_functions[memory_type]
|
||||
if memory_system is None:
|
||||
raise RuntimeError(f"{name} memory system is not initialized")
|
||||
|
||||
try:
|
||||
memory_system.reset()
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to reset {name} memory") from e
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from crewai.flow.flow import Flow
|
||||
from crewai.flow.flow import Flow, start, listen, or_, and_, router
|
||||
from crewai.flow.persistence import persist
|
||||
|
||||
__all__ = ["Flow", "start", "listen", "or_", "and_", "router", "persist"]
|
||||
|
||||
__all__ = ["Flow"]
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import asyncio
|
||||
import copy
|
||||
import inspect
|
||||
import logging
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
@@ -15,25 +17,82 @@ from typing import (
|
||||
)
|
||||
from uuid import uuid4
|
||||
|
||||
from blinker import Signal
|
||||
from pydantic import BaseModel, Field, ValidationError
|
||||
|
||||
from crewai.flow.flow_events import (
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.utils import get_possible_return_constants
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.events.flow_events import (
|
||||
FlowCreatedEvent,
|
||||
FlowFinishedEvent,
|
||||
FlowPlotEvent,
|
||||
FlowStartedEvent,
|
||||
MethodExecutionFailedEvent,
|
||||
MethodExecutionFinishedEvent,
|
||||
MethodExecutionStartedEvent,
|
||||
)
|
||||
from crewai.flow.flow_visualizer import plot_flow
|
||||
from crewai.flow.utils import get_possible_return_constants
|
||||
from crewai.telemetry import Telemetry
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FlowState(BaseModel):
|
||||
"""Base model for all flow states, ensuring each state has a unique ID."""
|
||||
id: str = Field(default_factory=lambda: str(uuid4()), description="Unique identifier for the flow state")
|
||||
|
||||
T = TypeVar("T", bound=Union[FlowState, Dict[str, Any]])
|
||||
id: str = Field(
|
||||
default_factory=lambda: str(uuid4()),
|
||||
description="Unique identifier for the flow state",
|
||||
)
|
||||
|
||||
|
||||
# Type variables with explicit bounds
|
||||
T = TypeVar(
|
||||
"T", bound=Union[Dict[str, Any], BaseModel]
|
||||
) # Generic flow state type parameter
|
||||
StateT = TypeVar(
|
||||
"StateT", bound=Union[Dict[str, Any], BaseModel]
|
||||
) # State validation type parameter
|
||||
|
||||
|
||||
def ensure_state_type(state: Any, expected_type: Type[StateT]) -> StateT:
|
||||
"""Ensure state matches expected type with proper validation.
|
||||
|
||||
Args:
|
||||
state: State instance to validate
|
||||
expected_type: Expected type for the state
|
||||
|
||||
Returns:
|
||||
Validated state instance
|
||||
|
||||
Raises:
|
||||
TypeError: If state doesn't match expected type
|
||||
ValueError: If state validation fails
|
||||
"""
|
||||
"""Ensure state matches expected type with proper validation.
|
||||
|
||||
Args:
|
||||
state: State instance to validate
|
||||
expected_type: Expected type for the state
|
||||
|
||||
Returns:
|
||||
Validated state instance
|
||||
|
||||
Raises:
|
||||
TypeError: If state doesn't match expected type
|
||||
ValueError: If state validation fails
|
||||
"""
|
||||
if expected_type is dict:
|
||||
if not isinstance(state, dict):
|
||||
raise TypeError(f"Expected dict, got {type(state).__name__}")
|
||||
return cast(StateT, state)
|
||||
if isinstance(expected_type, type) and issubclass(expected_type, BaseModel):
|
||||
if not isinstance(state, expected_type):
|
||||
raise TypeError(
|
||||
f"Expected {expected_type.__name__}, got {type(state).__name__}"
|
||||
)
|
||||
return cast(StateT, state)
|
||||
raise TypeError(f"Invalid expected_type: {expected_type}")
|
||||
|
||||
|
||||
def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
||||
@@ -77,6 +136,7 @@ def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
||||
>>> def complex_start(self):
|
||||
... pass
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
func.__is_start_method__ = True
|
||||
if condition is not None:
|
||||
@@ -101,6 +161,7 @@ def start(condition: Optional[Union[str, dict, Callable]] = None) -> Callable:
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def listen(condition: Union[str, dict, Callable]) -> Callable:
|
||||
"""
|
||||
Creates a listener that executes when specified conditions are met.
|
||||
@@ -137,6 +198,7 @@ def listen(condition: Union[str, dict, Callable]) -> Callable:
|
||||
>>> def handle_completion(self):
|
||||
... pass
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
if isinstance(condition, str):
|
||||
func.__trigger_methods__ = [condition]
|
||||
@@ -201,6 +263,7 @@ def router(condition: Union[str, dict, Callable]) -> Callable:
|
||||
... return CONTINUE
|
||||
... return STOP
|
||||
"""
|
||||
|
||||
def decorator(func):
|
||||
func.__is_router__ = True
|
||||
if isinstance(condition, str):
|
||||
@@ -224,6 +287,7 @@ def router(condition: Union[str, dict, Callable]) -> Callable:
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def or_(*conditions: Union[str, dict, Callable]) -> dict:
|
||||
"""
|
||||
Combines multiple conditions with OR logic for flow control.
|
||||
@@ -326,21 +390,31 @@ class FlowMeta(type):
|
||||
routers = set()
|
||||
|
||||
for attr_name, attr_value in dct.items():
|
||||
if hasattr(attr_value, "__is_start_method__"):
|
||||
start_methods.append(attr_name)
|
||||
# Check for any flow-related attributes
|
||||
if (
|
||||
hasattr(attr_value, "__is_flow_method__")
|
||||
or hasattr(attr_value, "__is_start_method__")
|
||||
or hasattr(attr_value, "__trigger_methods__")
|
||||
or hasattr(attr_value, "__is_router__")
|
||||
):
|
||||
# Register start methods
|
||||
if hasattr(attr_value, "__is_start_method__"):
|
||||
start_methods.append(attr_name)
|
||||
|
||||
# Register listeners and routers
|
||||
if hasattr(attr_value, "__trigger_methods__"):
|
||||
methods = attr_value.__trigger_methods__
|
||||
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
||||
listeners[attr_name] = (condition_type, methods)
|
||||
elif hasattr(attr_value, "__trigger_methods__"):
|
||||
methods = attr_value.__trigger_methods__
|
||||
condition_type = getattr(attr_value, "__condition_type__", "OR")
|
||||
listeners[attr_name] = (condition_type, methods)
|
||||
if hasattr(attr_value, "__is_router__") and attr_value.__is_router__:
|
||||
routers.add(attr_name)
|
||||
possible_returns = get_possible_return_constants(attr_value)
|
||||
if possible_returns:
|
||||
router_paths[attr_name] = possible_returns
|
||||
|
||||
if (
|
||||
hasattr(attr_value, "__is_router__")
|
||||
and attr_value.__is_router__
|
||||
):
|
||||
routers.add(attr_name)
|
||||
possible_returns = get_possible_return_constants(attr_value)
|
||||
if possible_returns:
|
||||
router_paths[attr_name] = possible_returns
|
||||
|
||||
setattr(cls, "_start_methods", start_methods)
|
||||
setattr(cls, "_listeners", listeners)
|
||||
@@ -351,14 +425,17 @@ class FlowMeta(type):
|
||||
|
||||
|
||||
class Flow(Generic[T], metaclass=FlowMeta):
|
||||
_telemetry = Telemetry()
|
||||
"""Base class for all flows.
|
||||
|
||||
Type parameter T must be either Dict[str, Any] or a subclass of BaseModel."""
|
||||
|
||||
_printer = Printer()
|
||||
|
||||
_start_methods: List[str] = []
|
||||
_listeners: Dict[str, tuple[str, List[str]]] = {}
|
||||
_routers: Set[str] = set()
|
||||
_router_paths: Dict[str, List[str]] = {}
|
||||
initial_state: Union[Type[T], T, None] = None
|
||||
event_emitter = Signal("event_emitter")
|
||||
|
||||
def __class_getitem__(cls: Type["Flow"], item: Type[T]) -> Type["Flow"]:
|
||||
class _FlowGeneric(cls): # type: ignore
|
||||
@@ -367,53 +444,139 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
_FlowGeneric.__name__ = f"{cls.__name__}[{item.__name__}]"
|
||||
return _FlowGeneric
|
||||
|
||||
def __init__(self) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
persistence: Optional[FlowPersistence] = None,
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Initialize a new Flow instance.
|
||||
|
||||
Args:
|
||||
persistence: Optional persistence backend for storing flow states
|
||||
**kwargs: Additional state values to initialize or override
|
||||
"""
|
||||
# Initialize basic instance attributes
|
||||
self._methods: Dict[str, Callable] = {}
|
||||
self._state: T = self._create_initial_state()
|
||||
self._method_execution_counts: Dict[str, int] = {}
|
||||
self._pending_and_listeners: Dict[str, Set[str]] = {}
|
||||
self._method_outputs: List[Any] = [] # List to store all method outputs
|
||||
self._persistence: Optional[FlowPersistence] = persistence
|
||||
|
||||
self._telemetry.flow_creation_span(self.__class__.__name__)
|
||||
# Initialize state with initial values
|
||||
self._state = self._create_initial_state()
|
||||
|
||||
# Apply any additional kwargs
|
||||
if kwargs:
|
||||
self._initialize_state(kwargs)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowCreatedEvent(
|
||||
type="flow_created",
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
|
||||
# Register all flow-related methods
|
||||
for method_name in dir(self):
|
||||
if callable(getattr(self, method_name)) and not method_name.startswith(
|
||||
"__"
|
||||
):
|
||||
self._methods[method_name] = getattr(self, method_name)
|
||||
if not method_name.startswith("_"):
|
||||
method = getattr(self, method_name)
|
||||
# Check for any flow-related attributes
|
||||
if (
|
||||
hasattr(method, "__is_flow_method__")
|
||||
or hasattr(method, "__is_start_method__")
|
||||
or hasattr(method, "__trigger_methods__")
|
||||
or hasattr(method, "__is_router__")
|
||||
):
|
||||
# Ensure method is bound to this instance
|
||||
if not hasattr(method, "__self__"):
|
||||
method = method.__get__(self, self.__class__)
|
||||
self._methods[method_name] = method
|
||||
|
||||
def _create_initial_state(self) -> T:
|
||||
"""Create and initialize flow state with UUID and default values.
|
||||
|
||||
Returns:
|
||||
New state instance with UUID and default values initialized
|
||||
|
||||
Raises:
|
||||
ValueError: If structured state model lacks 'id' field
|
||||
TypeError: If state is neither BaseModel nor dictionary
|
||||
"""
|
||||
# Handle case where initial_state is None but we have a type parameter
|
||||
if self.initial_state is None and hasattr(self, "_initial_state_T"):
|
||||
state_type = getattr(self, "_initial_state_T")
|
||||
if isinstance(state_type, type):
|
||||
if issubclass(state_type, FlowState):
|
||||
return state_type() # type: ignore
|
||||
# Create instance without id, then set it
|
||||
instance = state_type()
|
||||
if not hasattr(instance, "id"):
|
||||
setattr(instance, "id", str(uuid4()))
|
||||
return cast(T, instance)
|
||||
elif issubclass(state_type, BaseModel):
|
||||
# Create a new type that includes the ID field
|
||||
class StateWithId(state_type, FlowState): # type: ignore
|
||||
pass
|
||||
return StateWithId() # type: ignore
|
||||
|
||||
instance = StateWithId()
|
||||
if not hasattr(instance, "id"):
|
||||
setattr(instance, "id", str(uuid4()))
|
||||
return cast(T, instance)
|
||||
elif state_type is dict:
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
# Handle case where no initial state is provided
|
||||
if self.initial_state is None:
|
||||
return {"id": str(uuid4())} # type: ignore
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
# Handle case where initial_state is a type (class)
|
||||
if isinstance(self.initial_state, type):
|
||||
if issubclass(self.initial_state, FlowState):
|
||||
return self.initial_state() # type: ignore
|
||||
return cast(T, self.initial_state()) # Uses model defaults
|
||||
elif issubclass(self.initial_state, BaseModel):
|
||||
# Create a new type that includes the ID field
|
||||
class StateWithId(self.initial_state, FlowState): # type: ignore
|
||||
pass
|
||||
return StateWithId() # type: ignore
|
||||
# Validate that the model has an id field
|
||||
model_fields = getattr(self.initial_state, "model_fields", None)
|
||||
if not model_fields or "id" not in model_fields:
|
||||
raise ValueError("Flow state model must have an 'id' field")
|
||||
return cast(T, self.initial_state()) # Uses model defaults
|
||||
elif self.initial_state is dict:
|
||||
return cast(T, {"id": str(uuid4())})
|
||||
|
||||
# Handle dictionary case
|
||||
if isinstance(self.initial_state, dict) and "id" not in self.initial_state:
|
||||
self.initial_state["id"] = str(uuid4())
|
||||
# Handle dictionary instance case
|
||||
if isinstance(self.initial_state, dict):
|
||||
new_state = dict(self.initial_state) # Copy to avoid mutations
|
||||
if "id" not in new_state:
|
||||
new_state["id"] = str(uuid4())
|
||||
return cast(T, new_state)
|
||||
|
||||
return self.initial_state # type: ignore
|
||||
# Handle BaseModel instance case
|
||||
if isinstance(self.initial_state, BaseModel):
|
||||
model = cast(BaseModel, self.initial_state)
|
||||
if not hasattr(model, "id"):
|
||||
raise ValueError("Flow state model must have an 'id' field")
|
||||
|
||||
# Create new instance with same values to avoid mutations
|
||||
if hasattr(model, "model_dump"):
|
||||
# Pydantic v2
|
||||
state_dict = model.model_dump()
|
||||
elif hasattr(model, "dict"):
|
||||
# Pydantic v1
|
||||
state_dict = model.dict()
|
||||
else:
|
||||
# Fallback for other BaseModel implementations
|
||||
state_dict = {
|
||||
k: v for k, v in model.__dict__.items() if not k.startswith("_")
|
||||
}
|
||||
|
||||
# Create new instance of the same class
|
||||
model_class = type(model)
|
||||
return cast(T, model_class(**state_dict))
|
||||
raise TypeError(
|
||||
f"Initial state must be dict or BaseModel, got {type(self.initial_state)}"
|
||||
)
|
||||
|
||||
def _copy_state(self) -> T:
|
||||
return copy.deepcopy(self._state)
|
||||
|
||||
@property
|
||||
def state(self) -> T:
|
||||
@@ -424,72 +587,198 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
"""Returns the list of all outputs from executed methods."""
|
||||
return self._method_outputs
|
||||
|
||||
@property
|
||||
def flow_id(self) -> str:
|
||||
"""Returns the unique identifier of this flow instance.
|
||||
|
||||
This property provides a consistent way to access the flow's unique identifier
|
||||
regardless of the underlying state implementation (dict or BaseModel).
|
||||
|
||||
Returns:
|
||||
str: The flow's unique identifier, or an empty string if not found
|
||||
|
||||
Note:
|
||||
This property safely handles both dictionary and BaseModel state types,
|
||||
returning an empty string if the ID cannot be retrieved rather than raising
|
||||
an exception.
|
||||
|
||||
Example:
|
||||
```python
|
||||
flow = MyFlow()
|
||||
print(f"Current flow ID: {flow.flow_id}") # Safely get flow ID
|
||||
```
|
||||
"""
|
||||
try:
|
||||
if not hasattr(self, "_state"):
|
||||
return ""
|
||||
|
||||
if isinstance(self._state, dict):
|
||||
return str(self._state.get("id", ""))
|
||||
elif isinstance(self._state, BaseModel):
|
||||
return str(getattr(self._state, "id", ""))
|
||||
return ""
|
||||
except (AttributeError, TypeError):
|
||||
return "" # Safely handle any unexpected attribute access issues
|
||||
|
||||
def _initialize_state(self, inputs: Dict[str, Any]) -> None:
|
||||
"""Initialize or update flow state with new inputs.
|
||||
|
||||
Args:
|
||||
inputs: Dictionary of state values to set/update
|
||||
|
||||
Raises:
|
||||
ValueError: If validation fails for structured state
|
||||
TypeError: If state is neither BaseModel nor dictionary
|
||||
"""
|
||||
if isinstance(self._state, dict):
|
||||
# Preserve the ID when updating unstructured state
|
||||
# For dict states, preserve existing fields unless overridden
|
||||
current_id = self._state.get("id")
|
||||
self._state.update(inputs)
|
||||
# Only update specified fields
|
||||
for k, v in inputs.items():
|
||||
self._state[k] = v
|
||||
# Ensure ID is preserved or generated
|
||||
if current_id:
|
||||
self._state["id"] = current_id
|
||||
elif "id" not in self._state:
|
||||
self._state["id"] = str(uuid4())
|
||||
elif isinstance(self._state, BaseModel):
|
||||
# Structured state
|
||||
# For BaseModel states, preserve existing fields unless overridden
|
||||
try:
|
||||
def create_model_with_extra_forbid(
|
||||
base_model: Type[BaseModel],
|
||||
) -> Type[BaseModel]:
|
||||
class ModelWithExtraForbid(base_model): # type: ignore
|
||||
model_config = base_model.model_config.copy()
|
||||
model_config["extra"] = "forbid"
|
||||
|
||||
return ModelWithExtraForbid
|
||||
|
||||
# Get current state as dict, preserving the ID if it exists
|
||||
state_model = cast(BaseModel, self._state)
|
||||
current_state = (
|
||||
state_model.model_dump()
|
||||
if hasattr(state_model, "model_dump")
|
||||
else state_model.dict()
|
||||
if hasattr(state_model, "dict")
|
||||
else {
|
||||
k: v
|
||||
for k, v in state_model.__dict__.items()
|
||||
if not k.startswith("_")
|
||||
model = cast(BaseModel, self._state)
|
||||
# Get current state as dict
|
||||
if hasattr(model, "model_dump"):
|
||||
current_state = model.model_dump()
|
||||
elif hasattr(model, "dict"):
|
||||
current_state = model.dict()
|
||||
else:
|
||||
current_state = {
|
||||
k: v for k, v in model.__dict__.items() if not k.startswith("_")
|
||||
}
|
||||
)
|
||||
|
||||
ModelWithExtraForbid = create_model_with_extra_forbid(
|
||||
self._state.__class__
|
||||
)
|
||||
self._state = cast(
|
||||
T, ModelWithExtraForbid(**{**current_state, **inputs})
|
||||
)
|
||||
# Create new state with preserved fields and updates
|
||||
new_state = {**current_state, **inputs}
|
||||
|
||||
# Create new instance with merged state
|
||||
model_class = type(model)
|
||||
if hasattr(model_class, "model_validate"):
|
||||
# Pydantic v2
|
||||
self._state = cast(T, model_class.model_validate(new_state))
|
||||
elif hasattr(model_class, "parse_obj"):
|
||||
# Pydantic v1
|
||||
self._state = cast(T, model_class.parse_obj(new_state))
|
||||
else:
|
||||
# Fallback for other BaseModel implementations
|
||||
self._state = cast(T, model_class(**new_state))
|
||||
except ValidationError as e:
|
||||
raise ValueError(f"Invalid inputs for structured state: {e}") from e
|
||||
else:
|
||||
raise TypeError("State must be a BaseModel instance or a dictionary.")
|
||||
|
||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=FlowStartedEvent(
|
||||
type="flow_started",
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
def _restore_state(self, stored_state: Dict[str, Any]) -> None:
|
||||
"""Restore flow state from persistence.
|
||||
|
||||
if inputs is not None:
|
||||
self._initialize_state(inputs)
|
||||
return asyncio.run(self.kickoff_async())
|
||||
Args:
|
||||
stored_state: Previously stored state to restore
|
||||
|
||||
Raises:
|
||||
ValueError: If validation fails for structured state
|
||||
TypeError: If state is neither BaseModel nor dictionary
|
||||
"""
|
||||
# When restoring from persistence, use the stored ID
|
||||
stored_id = stored_state.get("id")
|
||||
if not stored_id:
|
||||
raise ValueError("Stored state must have an 'id' field")
|
||||
|
||||
if isinstance(self._state, dict):
|
||||
# For dict states, update all fields from stored state
|
||||
self._state.clear()
|
||||
self._state.update(stored_state)
|
||||
elif isinstance(self._state, BaseModel):
|
||||
# For BaseModel states, create new instance with stored values
|
||||
model = cast(BaseModel, self._state)
|
||||
if hasattr(model, "model_validate"):
|
||||
# Pydantic v2
|
||||
self._state = cast(T, type(model).model_validate(stored_state))
|
||||
elif hasattr(model, "parse_obj"):
|
||||
# Pydantic v1
|
||||
self._state = cast(T, type(model).parse_obj(stored_state))
|
||||
else:
|
||||
# Fallback for other BaseModel implementations
|
||||
self._state = cast(T, type(model)(**stored_state))
|
||||
else:
|
||||
raise TypeError(f"State must be dict or BaseModel, got {type(self._state)}")
|
||||
|
||||
def kickoff(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
"""
|
||||
Start the flow execution in a synchronous context.
|
||||
|
||||
This method wraps kickoff_async so that all state initialization and event
|
||||
emission is handled in the asynchronous method.
|
||||
"""
|
||||
|
||||
async def run_flow():
|
||||
return await self.kickoff_async(inputs)
|
||||
|
||||
return asyncio.run(run_flow())
|
||||
|
||||
async def kickoff_async(self, inputs: Optional[Dict[str, Any]] = None) -> Any:
|
||||
if not self._start_methods:
|
||||
raise ValueError("No start method defined")
|
||||
"""
|
||||
Start the flow execution asynchronously.
|
||||
|
||||
self._telemetry.flow_execution_span(
|
||||
self.__class__.__name__, list(self._methods.keys())
|
||||
This method performs state restoration (if an 'id' is provided and persistence is available)
|
||||
and updates the flow state with any additional inputs. It then emits the FlowStartedEvent,
|
||||
logs the flow startup, and executes all start methods. Once completed, it emits the
|
||||
FlowFinishedEvent and returns the final output.
|
||||
|
||||
Args:
|
||||
inputs: Optional dictionary containing input values and/or a state ID for restoration.
|
||||
|
||||
Returns:
|
||||
The final output from the flow, which is the result of the last executed method.
|
||||
"""
|
||||
if inputs:
|
||||
# Override the id in the state if it exists in inputs
|
||||
if "id" in inputs:
|
||||
if isinstance(self._state, dict):
|
||||
self._state["id"] = inputs["id"]
|
||||
elif isinstance(self._state, BaseModel):
|
||||
setattr(self._state, "id", inputs["id"])
|
||||
|
||||
# If persistence is enabled, attempt to restore the stored state using the provided id.
|
||||
if "id" in inputs and self._persistence is not None:
|
||||
restore_uuid = inputs["id"]
|
||||
stored_state = self._persistence.load_state(restore_uuid)
|
||||
if stored_state:
|
||||
self._log_flow_event(
|
||||
f"Loading flow state from memory for UUID: {restore_uuid}",
|
||||
color="yellow",
|
||||
)
|
||||
self._restore_state(stored_state)
|
||||
else:
|
||||
self._log_flow_event(
|
||||
f"No flow state found for UUID: {restore_uuid}", color="red"
|
||||
)
|
||||
|
||||
# Update state with any additional inputs (ignoring the 'id' key)
|
||||
filtered_inputs = {k: v for k, v in inputs.items() if k != "id"}
|
||||
if filtered_inputs:
|
||||
self._initialize_state(filtered_inputs)
|
||||
|
||||
# Emit FlowStartedEvent and log the start of the flow.
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowStartedEvent(
|
||||
type="flow_started",
|
||||
flow_name=self.__class__.__name__,
|
||||
inputs=inputs,
|
||||
),
|
||||
)
|
||||
self._log_flow_event(
|
||||
f"Flow started with ID: {self.flow_id}", color="bold_magenta"
|
||||
)
|
||||
|
||||
if inputs is not None and "id" not in inputs:
|
||||
self._initialize_state(inputs)
|
||||
|
||||
tasks = [
|
||||
self._execute_start_method(start_method)
|
||||
@@ -499,14 +788,15 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
final_output = self._method_outputs[-1] if self._method_outputs else None
|
||||
|
||||
self.event_emitter.send(
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=FlowFinishedEvent(
|
||||
FlowFinishedEvent(
|
||||
type="flow_finished",
|
||||
flow_name=self.__class__.__name__,
|
||||
result=final_output,
|
||||
),
|
||||
)
|
||||
|
||||
return final_output
|
||||
|
||||
async def _execute_start_method(self, start_method_name: str) -> None:
|
||||
@@ -535,16 +825,55 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
async def _execute_method(
|
||||
self, method_name: str, method: Callable, *args: Any, **kwargs: Any
|
||||
) -> Any:
|
||||
result = (
|
||||
await method(*args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(method)
|
||||
else method(*args, **kwargs)
|
||||
)
|
||||
self._method_outputs.append(result)
|
||||
self._method_execution_counts[method_name] = (
|
||||
self._method_execution_counts.get(method_name, 0) + 1
|
||||
)
|
||||
return result
|
||||
try:
|
||||
dumped_params = {f"_{i}": arg for i, arg in enumerate(args)} | (
|
||||
kwargs or {}
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionStartedEvent(
|
||||
type="method_execution_started",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
params=dumped_params,
|
||||
state=self._copy_state(),
|
||||
),
|
||||
)
|
||||
|
||||
result = (
|
||||
await method(*args, **kwargs)
|
||||
if asyncio.iscoroutinefunction(method)
|
||||
else method(*args, **kwargs)
|
||||
)
|
||||
|
||||
self._method_outputs.append(result)
|
||||
self._method_execution_counts[method_name] = (
|
||||
self._method_execution_counts.get(method_name, 0) + 1
|
||||
)
|
||||
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionFinishedEvent(
|
||||
type="method_execution_finished",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
state=self._copy_state(),
|
||||
result=result,
|
||||
),
|
||||
)
|
||||
|
||||
return result
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
MethodExecutionFailedEvent(
|
||||
type="method_execution_failed",
|
||||
method_name=method_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
error=e,
|
||||
),
|
||||
)
|
||||
raise e
|
||||
|
||||
async def _execute_listeners(self, trigger_method: str, result: Any) -> None:
|
||||
"""
|
||||
@@ -565,35 +894,45 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
Notes
|
||||
-----
|
||||
- Routers are executed sequentially to maintain flow control
|
||||
- Each router's result becomes the new trigger_method
|
||||
- Each router's result becomes a new trigger_method
|
||||
- Normal listeners are executed in parallel for efficiency
|
||||
- Listeners can receive the trigger method's result as a parameter
|
||||
"""
|
||||
# First, handle routers repeatedly until no router triggers anymore
|
||||
router_results = []
|
||||
current_trigger = trigger_method
|
||||
|
||||
while True:
|
||||
routers_triggered = self._find_triggered_methods(
|
||||
trigger_method, router_only=True
|
||||
current_trigger, router_only=True
|
||||
)
|
||||
if not routers_triggered:
|
||||
break
|
||||
|
||||
for router_name in routers_triggered:
|
||||
await self._execute_single_listener(router_name, result)
|
||||
# After executing router, the router's result is the path
|
||||
# The last router executed sets the trigger_method
|
||||
# The router result is the last element in self._method_outputs
|
||||
trigger_method = self._method_outputs[-1]
|
||||
router_result = self._method_outputs[-1]
|
||||
if router_result: # Only add non-None results
|
||||
router_results.append(router_result)
|
||||
current_trigger = (
|
||||
router_result # Update for next iteration of router chain
|
||||
)
|
||||
|
||||
# Now that no more routers are triggered by current trigger_method,
|
||||
# execute normal listeners
|
||||
listeners_triggered = self._find_triggered_methods(
|
||||
trigger_method, router_only=False
|
||||
)
|
||||
if listeners_triggered:
|
||||
tasks = [
|
||||
self._execute_single_listener(listener_name, result)
|
||||
for listener_name in listeners_triggered
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
# Now execute normal listeners for all router results and the original trigger
|
||||
all_triggers = [trigger_method] + router_results
|
||||
|
||||
for current_trigger in all_triggers:
|
||||
if current_trigger: # Skip None results
|
||||
listeners_triggered = self._find_triggered_methods(
|
||||
current_trigger, router_only=False
|
||||
)
|
||||
if listeners_triggered:
|
||||
tasks = [
|
||||
self._execute_single_listener(listener_name, result)
|
||||
for listener_name in listeners_triggered
|
||||
]
|
||||
await asyncio.gather(*tasks)
|
||||
|
||||
def _find_triggered_methods(
|
||||
self, trigger_method: str, router_only: bool
|
||||
@@ -683,15 +1022,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
try:
|
||||
method = self._methods[listener_name]
|
||||
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionStartedEvent(
|
||||
type="method_execution_started",
|
||||
method_name=listener_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
|
||||
sig = inspect.signature(method)
|
||||
params = list(sig.parameters.values())
|
||||
method_params = [p for p in params if p.name != "self"]
|
||||
@@ -703,15 +1033,6 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
else:
|
||||
listener_result = await self._execute_method(listener_name, method)
|
||||
|
||||
self.event_emitter.send(
|
||||
self,
|
||||
event=MethodExecutionFinishedEvent(
|
||||
type="method_execution_finished",
|
||||
method_name=listener_name,
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
|
||||
# Execute listeners (and possibly routers) of this listener
|
||||
await self._execute_listeners(listener_name, listener_result)
|
||||
|
||||
@@ -723,8 +1044,38 @@ class Flow(Generic[T], metaclass=FlowMeta):
|
||||
|
||||
traceback.print_exc()
|
||||
|
||||
def _log_flow_event(
|
||||
self, message: str, color: str = "yellow", level: str = "info"
|
||||
) -> None:
|
||||
"""Centralized logging method for flow events.
|
||||
|
||||
This method provides a consistent interface for logging flow-related events,
|
||||
combining both console output with colors and proper logging levels.
|
||||
|
||||
Args:
|
||||
message: The message to log
|
||||
color: Color to use for console output (default: yellow)
|
||||
Available colors: purple, red, bold_green, bold_purple,
|
||||
bold_blue, yellow, yellow
|
||||
level: Log level to use (default: info)
|
||||
Supported levels: info, warning
|
||||
|
||||
Note:
|
||||
This method uses the Printer utility for colored console output
|
||||
and the standard logging module for log level support.
|
||||
"""
|
||||
self._printer.print(message, color=color)
|
||||
if level == "info":
|
||||
logger.info(message)
|
||||
elif level == "warning":
|
||||
logger.warning(message)
|
||||
|
||||
def plot(self, filename: str = "crewai_flow") -> None:
|
||||
self._telemetry.flow_plotting_span(
|
||||
self.__class__.__name__, list(self._methods.keys())
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
FlowPlotEvent(
|
||||
type="flow_plot",
|
||||
flow_name=self.__class__.__name__,
|
||||
),
|
||||
)
|
||||
plot_flow(self, filename)
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class Event:
|
||||
type: str
|
||||
flow_name: str
|
||||
timestamp: datetime = field(init=False)
|
||||
|
||||
def __post_init__(self):
|
||||
self.timestamp = datetime.now()
|
||||
|
||||
|
||||
@dataclass
|
||||
class FlowStartedEvent(Event):
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class MethodExecutionStartedEvent(Event):
|
||||
method_name: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class MethodExecutionFinishedEvent(Event):
|
||||
method_name: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class FlowFinishedEvent(Event):
|
||||
result: Optional[Any] = None
|
||||
18
src/crewai/flow/persistence/__init__.py
Normal file
18
src/crewai/flow/persistence/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""
|
||||
CrewAI Flow Persistence.
|
||||
|
||||
This module provides interfaces and implementations for persisting flow states.
|
||||
"""
|
||||
|
||||
from typing import Any, Dict, TypeVar, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.persistence.decorators import persist
|
||||
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
|
||||
|
||||
__all__ = ["FlowPersistence", "persist", "SQLiteFlowPersistence"]
|
||||
|
||||
StateType = TypeVar('StateType', bound=Union[Dict[str, Any], BaseModel])
|
||||
DictStateType = Dict[str, Any]
|
||||
53
src/crewai/flow/persistence/base.py
Normal file
53
src/crewai/flow/persistence/base.py
Normal file
@@ -0,0 +1,53 @@
|
||||
"""Base class for flow state persistence."""
|
||||
|
||||
import abc
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class FlowPersistence(abc.ABC):
|
||||
"""Abstract base class for flow state persistence.
|
||||
|
||||
This class defines the interface that all persistence implementations must follow.
|
||||
It supports both structured (Pydantic BaseModel) and unstructured (dict) states.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def init_db(self) -> None:
|
||||
"""Initialize the persistence backend.
|
||||
|
||||
This method should handle any necessary setup, such as:
|
||||
- Creating tables
|
||||
- Establishing connections
|
||||
- Setting up indexes
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def save_state(
|
||||
self,
|
||||
flow_uuid: str,
|
||||
method_name: str,
|
||||
state_data: Union[Dict[str, Any], BaseModel]
|
||||
) -> None:
|
||||
"""Persist the flow state after method completion.
|
||||
|
||||
Args:
|
||||
flow_uuid: Unique identifier for the flow instance
|
||||
method_name: Name of the method that just completed
|
||||
state_data: Current state data (either dict or Pydantic model)
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load the most recent state for a given flow UUID.
|
||||
|
||||
Args:
|
||||
flow_uuid: Unique identifier for the flow instance
|
||||
|
||||
Returns:
|
||||
The most recent state as a dictionary, or None if no state exists
|
||||
"""
|
||||
pass
|
||||
254
src/crewai/flow/persistence/decorators.py
Normal file
254
src/crewai/flow/persistence/decorators.py
Normal file
@@ -0,0 +1,254 @@
|
||||
"""
|
||||
Decorators for flow state persistence.
|
||||
|
||||
Example:
|
||||
```python
|
||||
from crewai.flow.flow import Flow, start
|
||||
from crewai.flow.persistence import persist, SQLiteFlowPersistence
|
||||
|
||||
class MyFlow(Flow):
|
||||
@start()
|
||||
@persist(SQLiteFlowPersistence())
|
||||
def sync_method(self):
|
||||
# Synchronous method implementation
|
||||
pass
|
||||
|
||||
@start()
|
||||
@persist(SQLiteFlowPersistence())
|
||||
async def async_method(self):
|
||||
# Asynchronous method implementation
|
||||
await some_async_operation()
|
||||
```
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import functools
|
||||
import logging
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Optional,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
from crewai.flow.persistence.sqlite import SQLiteFlowPersistence
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
T = TypeVar("T")
|
||||
|
||||
# Constants for log messages
|
||||
LOG_MESSAGES = {
|
||||
"save_state": "Saving flow state to memory for ID: {}",
|
||||
"save_error": "Failed to persist state for method {}: {}",
|
||||
"state_missing": "Flow instance has no state",
|
||||
"id_missing": "Flow state must have an 'id' field for persistence"
|
||||
}
|
||||
|
||||
|
||||
class PersistenceDecorator:
|
||||
"""Class to handle flow state persistence with consistent logging."""
|
||||
|
||||
_printer = Printer() # Class-level printer instance
|
||||
|
||||
@classmethod
|
||||
def persist_state(cls, flow_instance: Any, method_name: str, persistence_instance: FlowPersistence, verbose: bool = False) -> None:
|
||||
"""Persist flow state with proper error handling and logging.
|
||||
|
||||
This method handles the persistence of flow state data, including proper
|
||||
error handling and colored console output for status updates.
|
||||
|
||||
Args:
|
||||
flow_instance: The flow instance whose state to persist
|
||||
method_name: Name of the method that triggered persistence
|
||||
persistence_instance: The persistence backend to use
|
||||
verbose: Whether to log persistence operations
|
||||
|
||||
Raises:
|
||||
ValueError: If flow has no state or state lacks an ID
|
||||
RuntimeError: If state persistence fails
|
||||
AttributeError: If flow instance lacks required state attributes
|
||||
"""
|
||||
try:
|
||||
state = getattr(flow_instance, 'state', None)
|
||||
if state is None:
|
||||
raise ValueError("Flow instance has no state")
|
||||
|
||||
flow_uuid: Optional[str] = None
|
||||
if isinstance(state, dict):
|
||||
flow_uuid = state.get('id')
|
||||
elif isinstance(state, BaseModel):
|
||||
flow_uuid = getattr(state, 'id', None)
|
||||
|
||||
if not flow_uuid:
|
||||
raise ValueError("Flow state must have an 'id' field for persistence")
|
||||
|
||||
# Log state saving only if verbose is True
|
||||
if verbose:
|
||||
cls._printer.print(LOG_MESSAGES["save_state"].format(flow_uuid), color="cyan")
|
||||
logger.info(LOG_MESSAGES["save_state"].format(flow_uuid))
|
||||
|
||||
try:
|
||||
persistence_instance.save_state(
|
||||
flow_uuid=flow_uuid,
|
||||
method_name=method_name,
|
||||
state_data=state,
|
||||
)
|
||||
except Exception as e:
|
||||
error_msg = LOG_MESSAGES["save_error"].format(method_name, str(e))
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise RuntimeError(f"State persistence failed: {str(e)}") from e
|
||||
except AttributeError:
|
||||
error_msg = LOG_MESSAGES["state_missing"]
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg)
|
||||
except (TypeError, ValueError) as e:
|
||||
error_msg = LOG_MESSAGES["id_missing"]
|
||||
cls._printer.print(error_msg, color="red")
|
||||
logger.error(error_msg)
|
||||
raise ValueError(error_msg) from e
|
||||
|
||||
|
||||
def persist(persistence: Optional[FlowPersistence] = None, verbose: bool = False):
|
||||
"""Decorator to persist flow state.
|
||||
|
||||
This decorator can be applied at either the class level or method level.
|
||||
When applied at the class level, it automatically persists all flow method
|
||||
states. When applied at the method level, it persists only that method's
|
||||
state.
|
||||
|
||||
Args:
|
||||
persistence: Optional FlowPersistence implementation to use.
|
||||
If not provided, uses SQLiteFlowPersistence.
|
||||
verbose: Whether to log persistence operations. Defaults to False.
|
||||
|
||||
Returns:
|
||||
A decorator that can be applied to either a class or method
|
||||
|
||||
Raises:
|
||||
ValueError: If the flow state doesn't have an 'id' field
|
||||
RuntimeError: If state persistence fails
|
||||
|
||||
Example:
|
||||
@persist(verbose=True) # Class-level persistence with logging
|
||||
class MyFlow(Flow[MyState]):
|
||||
@start()
|
||||
def begin(self):
|
||||
pass
|
||||
"""
|
||||
def decorator(target: Union[Type, Callable[..., T]]) -> Union[Type, Callable[..., T]]:
|
||||
"""Decorator that handles both class and method decoration."""
|
||||
actual_persistence = persistence or SQLiteFlowPersistence()
|
||||
|
||||
if isinstance(target, type):
|
||||
# Class decoration
|
||||
original_init = getattr(target, "__init__")
|
||||
|
||||
@functools.wraps(original_init)
|
||||
def new_init(self: Any, *args: Any, **kwargs: Any) -> None:
|
||||
if 'persistence' not in kwargs:
|
||||
kwargs['persistence'] = actual_persistence
|
||||
original_init(self, *args, **kwargs)
|
||||
|
||||
setattr(target, "__init__", new_init)
|
||||
|
||||
# Store original methods to preserve their decorators
|
||||
original_methods = {}
|
||||
|
||||
for name, method in target.__dict__.items():
|
||||
if callable(method) and (
|
||||
hasattr(method, "__is_start_method__") or
|
||||
hasattr(method, "__trigger_methods__") or
|
||||
hasattr(method, "__condition_type__") or
|
||||
hasattr(method, "__is_flow_method__") or
|
||||
hasattr(method, "__is_router__")
|
||||
):
|
||||
original_methods[name] = method
|
||||
|
||||
# Create wrapped versions of the methods that include persistence
|
||||
for name, method in original_methods.items():
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
# Create a closure to capture the current name and method
|
||||
def create_async_wrapper(method_name: str, original_method: Callable):
|
||||
@functools.wraps(original_method)
|
||||
async def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||
result = await original_method(self, *args, **kwargs)
|
||||
PersistenceDecorator.persist_state(self, method_name, actual_persistence, verbose)
|
||||
return result
|
||||
return method_wrapper
|
||||
|
||||
wrapped = create_async_wrapper(name, method)
|
||||
|
||||
# Preserve all original decorators and attributes
|
||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||
if hasattr(method, attr):
|
||||
setattr(wrapped, attr, getattr(method, attr))
|
||||
setattr(wrapped, "__is_flow_method__", True)
|
||||
|
||||
# Update the class with the wrapped method
|
||||
setattr(target, name, wrapped)
|
||||
else:
|
||||
# Create a closure to capture the current name and method
|
||||
def create_sync_wrapper(method_name: str, original_method: Callable):
|
||||
@functools.wraps(original_method)
|
||||
def method_wrapper(self: Any, *args: Any, **kwargs: Any) -> Any:
|
||||
result = original_method(self, *args, **kwargs)
|
||||
PersistenceDecorator.persist_state(self, method_name, actual_persistence, verbose)
|
||||
return result
|
||||
return method_wrapper
|
||||
|
||||
wrapped = create_sync_wrapper(name, method)
|
||||
|
||||
# Preserve all original decorators and attributes
|
||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||
if hasattr(method, attr):
|
||||
setattr(wrapped, attr, getattr(method, attr))
|
||||
setattr(wrapped, "__is_flow_method__", True)
|
||||
|
||||
# Update the class with the wrapped method
|
||||
setattr(target, name, wrapped)
|
||||
|
||||
return target
|
||||
else:
|
||||
# Method decoration
|
||||
method = target
|
||||
setattr(method, "__is_flow_method__", True)
|
||||
|
||||
if asyncio.iscoroutinefunction(method):
|
||||
@functools.wraps(method)
|
||||
async def method_async_wrapper(flow_instance: Any, *args: Any, **kwargs: Any) -> T:
|
||||
method_coro = method(flow_instance, *args, **kwargs)
|
||||
if asyncio.iscoroutine(method_coro):
|
||||
result = await method_coro
|
||||
else:
|
||||
result = method_coro
|
||||
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence, verbose)
|
||||
return result
|
||||
|
||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||
if hasattr(method, attr):
|
||||
setattr(method_async_wrapper, attr, getattr(method, attr))
|
||||
setattr(method_async_wrapper, "__is_flow_method__", True)
|
||||
return cast(Callable[..., T], method_async_wrapper)
|
||||
else:
|
||||
@functools.wraps(method)
|
||||
def method_sync_wrapper(flow_instance: Any, *args: Any, **kwargs: Any) -> T:
|
||||
result = method(flow_instance, *args, **kwargs)
|
||||
PersistenceDecorator.persist_state(flow_instance, method.__name__, actual_persistence, verbose)
|
||||
return result
|
||||
|
||||
for attr in ["__is_start_method__", "__trigger_methods__", "__condition_type__", "__is_router__"]:
|
||||
if hasattr(method, attr):
|
||||
setattr(method_sync_wrapper, attr, getattr(method, attr))
|
||||
setattr(method_sync_wrapper, "__is_flow_method__", True)
|
||||
return cast(Callable[..., T], method_sync_wrapper)
|
||||
|
||||
return decorator
|
||||
134
src/crewai/flow/persistence/sqlite.py
Normal file
134
src/crewai/flow/persistence/sqlite.py
Normal file
@@ -0,0 +1,134 @@
|
||||
"""
|
||||
SQLite-based implementation of flow state persistence.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow.persistence.base import FlowPersistence
|
||||
|
||||
|
||||
class SQLiteFlowPersistence(FlowPersistence):
|
||||
"""SQLite-based implementation of flow state persistence.
|
||||
|
||||
This class provides a simple, file-based persistence implementation using SQLite.
|
||||
It's suitable for development and testing, or for production use cases with
|
||||
moderate performance requirements.
|
||||
"""
|
||||
|
||||
db_path: str # Type annotation for instance variable
|
||||
|
||||
def __init__(self, db_path: Optional[str] = None):
|
||||
"""Initialize SQLite persistence.
|
||||
|
||||
Args:
|
||||
db_path: Path to the SQLite database file. If not provided, uses
|
||||
db_storage_path() from utilities.paths.
|
||||
|
||||
Raises:
|
||||
ValueError: If db_path is invalid
|
||||
"""
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
# Get path from argument or default location
|
||||
path = db_path or str(Path(db_storage_path()) / "flow_states.db")
|
||||
|
||||
if not path:
|
||||
raise ValueError("Database path must be provided")
|
||||
|
||||
self.db_path = path # Now mypy knows this is str
|
||||
self.init_db()
|
||||
|
||||
def init_db(self) -> None:
|
||||
"""Create the necessary tables if they don't exist."""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS flow_states (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
flow_uuid TEXT NOT NULL,
|
||||
method_name TEXT NOT NULL,
|
||||
timestamp DATETIME NOT NULL,
|
||||
state_json TEXT NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
# Add index for faster UUID lookups
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_states_uuid
|
||||
ON flow_states(flow_uuid)
|
||||
"""
|
||||
)
|
||||
|
||||
def save_state(
|
||||
self,
|
||||
flow_uuid: str,
|
||||
method_name: str,
|
||||
state_data: Union[Dict[str, Any], BaseModel],
|
||||
) -> None:
|
||||
"""Save the current flow state to SQLite.
|
||||
|
||||
Args:
|
||||
flow_uuid: Unique identifier for the flow instance
|
||||
method_name: Name of the method that just completed
|
||||
state_data: Current state data (either dict or Pydantic model)
|
||||
"""
|
||||
# Convert state_data to dict, handling both Pydantic and dict cases
|
||||
if isinstance(state_data, BaseModel):
|
||||
state_dict = dict(state_data) # Use dict() for better type compatibility
|
||||
elif isinstance(state_data, dict):
|
||||
state_dict = state_data
|
||||
else:
|
||||
raise ValueError(
|
||||
f"state_data must be either a Pydantic BaseModel or dict, got {type(state_data)}"
|
||||
)
|
||||
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO flow_states (
|
||||
flow_uuid,
|
||||
method_name,
|
||||
timestamp,
|
||||
state_json
|
||||
) VALUES (?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
flow_uuid,
|
||||
method_name,
|
||||
datetime.now(timezone.utc).isoformat(),
|
||||
json.dumps(state_dict),
|
||||
),
|
||||
)
|
||||
|
||||
def load_state(self, flow_uuid: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load the most recent state for a given flow UUID.
|
||||
|
||||
Args:
|
||||
flow_uuid: Unique identifier for the flow instance
|
||||
|
||||
Returns:
|
||||
The most recent state as a dictionary, or None if no state exists
|
||||
"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
SELECT state_json
|
||||
FROM flow_states
|
||||
WHERE flow_uuid = ?
|
||||
ORDER BY id DESC
|
||||
LIMIT 1
|
||||
""",
|
||||
(flow_uuid,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
|
||||
if row:
|
||||
return json.loads(row[0])
|
||||
return None
|
||||
91
src/crewai/flow/state_utils.py
Normal file
91
src/crewai/flow/state_utils.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import json
|
||||
from datetime import date, datetime
|
||||
from typing import Any, Dict, List, Union
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.flow import Flow
|
||||
|
||||
SerializablePrimitive = Union[str, int, float, bool, None]
|
||||
Serializable = Union[
|
||||
SerializablePrimitive, List["Serializable"], Dict[str, "Serializable"]
|
||||
]
|
||||
|
||||
|
||||
def export_state(flow: Flow) -> dict[str, Serializable]:
|
||||
"""Exports the Flow's internal state as JSON-compatible data structures.
|
||||
|
||||
Performs a one-way transformation of a Flow's state into basic Python types
|
||||
that can be safely serialized to JSON. To prevent infinite recursion with
|
||||
circular references, the conversion is limited to a depth of 5 levels.
|
||||
|
||||
Args:
|
||||
flow: The Flow object whose state needs to be exported
|
||||
|
||||
Returns:
|
||||
dict[str, Any]: The transformed state using JSON-compatible Python
|
||||
types.
|
||||
"""
|
||||
result = to_serializable(flow._state)
|
||||
assert isinstance(result, dict)
|
||||
return result
|
||||
|
||||
|
||||
def to_serializable(
|
||||
obj: Any, max_depth: int = 5, _current_depth: int = 0
|
||||
) -> Serializable:
|
||||
"""Converts a Python object into a JSON-compatible representation.
|
||||
|
||||
Supports primitives, datetime objects, collections, dictionaries, and
|
||||
Pydantic models. Recursion depth is limited to prevent infinite nesting.
|
||||
Non-convertible objects default to their string representations.
|
||||
|
||||
Args:
|
||||
obj (Any): Object to transform.
|
||||
max_depth (int, optional): Maximum recursion depth. Defaults to 5.
|
||||
|
||||
Returns:
|
||||
Serializable: A JSON-compatible structure.
|
||||
"""
|
||||
if _current_depth >= max_depth:
|
||||
return repr(obj)
|
||||
|
||||
if isinstance(obj, (str, int, float, bool, type(None))):
|
||||
return obj
|
||||
elif isinstance(obj, (date, datetime)):
|
||||
return obj.isoformat()
|
||||
elif isinstance(obj, (list, tuple, set)):
|
||||
return [to_serializable(item, max_depth, _current_depth + 1) for item in obj]
|
||||
elif isinstance(obj, dict):
|
||||
return {
|
||||
_to_serializable_key(key): to_serializable(
|
||||
value, max_depth, _current_depth + 1
|
||||
)
|
||||
for key, value in obj.items()
|
||||
}
|
||||
elif isinstance(obj, BaseModel):
|
||||
return to_serializable(obj.model_dump(), max_depth, _current_depth + 1)
|
||||
else:
|
||||
return repr(obj)
|
||||
|
||||
|
||||
def _to_serializable_key(key: Any) -> str:
|
||||
if isinstance(key, (str, int)):
|
||||
return str(key)
|
||||
return f"key_{id(key)}_{repr(key)}"
|
||||
|
||||
|
||||
def to_string(obj: Any) -> str | None:
|
||||
"""Serializes an object into a JSON string.
|
||||
|
||||
Args:
|
||||
obj (Any): Object to serialize.
|
||||
|
||||
Returns:
|
||||
str | None: A JSON-formatted string or `None` if empty.
|
||||
"""
|
||||
serializable = to_serializable(obj)
|
||||
if serializable is None:
|
||||
return None
|
||||
else:
|
||||
return json.dumps(serializable)
|
||||
50
src/crewai/flow/task_decorator.py
Normal file
50
src/crewai/flow/task_decorator.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from functools import wraps
|
||||
from typing import Any, Callable, Optional, Union, cast
|
||||
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
|
||||
def task(func: Callable) -> Callable:
|
||||
"""
|
||||
Decorator for Flow methods that return a Task.
|
||||
|
||||
This decorator ensures that when a method returns a ConditionalTask,
|
||||
the condition is properly evaluated based on the previous task's output.
|
||||
|
||||
Args:
|
||||
func: The method to decorate
|
||||
|
||||
Returns:
|
||||
The decorated method
|
||||
"""
|
||||
setattr(func, "is_task", True)
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(self, *args, **kwargs):
|
||||
result = func(self, *args, **kwargs)
|
||||
|
||||
# Set the task name if not already set
|
||||
if hasattr(result, "name") and not result.name:
|
||||
result.name = func.__name__
|
||||
|
||||
# If this is a ConditionalTask, ensure it has a valid condition
|
||||
if isinstance(result, ConditionalTask):
|
||||
# If the condition is a boolean, wrap it in a function
|
||||
if isinstance(result.condition, bool):
|
||||
bool_value = result.condition
|
||||
result.condition = lambda _: bool_value
|
||||
|
||||
# Get the previous task output if available
|
||||
previous_outputs = getattr(self, "_method_outputs", [])
|
||||
previous_output = previous_outputs[-1] if previous_outputs else None
|
||||
|
||||
# If there's a previous output and it's a TaskOutput, check if we should execute
|
||||
if previous_output and isinstance(previous_output, TaskOutput):
|
||||
if not result.should_execute(previous_output):
|
||||
# Return a skipped task output instead of the task
|
||||
return result.get_skipped_task_output()
|
||||
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
@@ -16,7 +16,8 @@ Example
|
||||
import ast
|
||||
import inspect
|
||||
import textwrap
|
||||
from typing import Any, Dict, List, Optional, Set, Union
|
||||
from collections import defaultdict, deque
|
||||
from typing import Any, Deque, Dict, List, Optional, Set, Union
|
||||
|
||||
|
||||
def get_possible_return_constants(function: Any) -> Optional[List[str]]:
|
||||
@@ -118,7 +119,7 @@ def calculate_node_levels(flow: Any) -> Dict[str, int]:
|
||||
- Processes router paths separately
|
||||
"""
|
||||
levels: Dict[str, int] = {}
|
||||
queue: List[str] = []
|
||||
queue: Deque[str] = deque()
|
||||
visited: Set[str] = set()
|
||||
pending_and_listeners: Dict[str, Set[str]] = {}
|
||||
|
||||
@@ -128,28 +129,35 @@ def calculate_node_levels(flow: Any) -> Dict[str, int]:
|
||||
levels[method_name] = 0
|
||||
queue.append(method_name)
|
||||
|
||||
# Precompute listener dependencies
|
||||
or_listeners = defaultdict(list)
|
||||
and_listeners = defaultdict(set)
|
||||
for listener_name, (condition_type, trigger_methods) in flow._listeners.items():
|
||||
if condition_type == "OR":
|
||||
for method in trigger_methods:
|
||||
or_listeners[method].append(listener_name)
|
||||
elif condition_type == "AND":
|
||||
and_listeners[listener_name] = set(trigger_methods)
|
||||
|
||||
# Breadth-first traversal to assign levels
|
||||
while queue:
|
||||
current = queue.pop(0)
|
||||
current = queue.popleft()
|
||||
current_level = levels[current]
|
||||
visited.add(current)
|
||||
|
||||
for listener_name, (condition_type, trigger_methods) in flow._listeners.items():
|
||||
if condition_type == "OR":
|
||||
if current in trigger_methods:
|
||||
if (
|
||||
listener_name not in levels
|
||||
or levels[listener_name] > current_level + 1
|
||||
):
|
||||
levels[listener_name] = current_level + 1
|
||||
if listener_name not in visited:
|
||||
queue.append(listener_name)
|
||||
elif condition_type == "AND":
|
||||
for listener_name in or_listeners[current]:
|
||||
if listener_name not in levels or levels[listener_name] > current_level + 1:
|
||||
levels[listener_name] = current_level + 1
|
||||
if listener_name not in visited:
|
||||
queue.append(listener_name)
|
||||
|
||||
for listener_name, required_methods in and_listeners.items():
|
||||
if current in required_methods:
|
||||
if listener_name not in pending_and_listeners:
|
||||
pending_and_listeners[listener_name] = set()
|
||||
if current in trigger_methods:
|
||||
pending_and_listeners[listener_name].add(current)
|
||||
if set(trigger_methods) == pending_and_listeners[listener_name]:
|
||||
pending_and_listeners[listener_name].add(current)
|
||||
|
||||
if required_methods == pending_and_listeners[listener_name]:
|
||||
if (
|
||||
listener_name not in levels
|
||||
or levels[listener_name] > current_level + 1
|
||||
@@ -159,22 +167,7 @@ def calculate_node_levels(flow: Any) -> Dict[str, int]:
|
||||
queue.append(listener_name)
|
||||
|
||||
# Handle router connections
|
||||
if current in flow._routers:
|
||||
router_method_name = current
|
||||
paths = flow._router_paths.get(router_method_name, [])
|
||||
for path in paths:
|
||||
for listener_name, (
|
||||
condition_type,
|
||||
trigger_methods,
|
||||
) in flow._listeners.items():
|
||||
if path in trigger_methods:
|
||||
if (
|
||||
listener_name not in levels
|
||||
or levels[listener_name] > current_level + 1
|
||||
):
|
||||
levels[listener_name] = current_level + 1
|
||||
if listener_name not in visited:
|
||||
queue.append(listener_name)
|
||||
process_router_paths(flow, current, current_level, levels, queue)
|
||||
|
||||
return levels
|
||||
|
||||
@@ -227,10 +220,7 @@ def build_ancestor_dict(flow: Any) -> Dict[str, Set[str]]:
|
||||
|
||||
|
||||
def dfs_ancestors(
|
||||
node: str,
|
||||
ancestors: Dict[str, Set[str]],
|
||||
visited: Set[str],
|
||||
flow: Any
|
||||
node: str, ancestors: Dict[str, Set[str]], visited: Set[str], flow: Any
|
||||
) -> None:
|
||||
"""
|
||||
Perform depth-first search to build ancestor relationships.
|
||||
@@ -274,7 +264,9 @@ def dfs_ancestors(
|
||||
dfs_ancestors(listener_name, ancestors, visited, flow)
|
||||
|
||||
|
||||
def is_ancestor(node: str, ancestor_candidate: str, ancestors: Dict[str, Set[str]]) -> bool:
|
||||
def is_ancestor(
|
||||
node: str, ancestor_candidate: str, ancestors: Dict[str, Set[str]]
|
||||
) -> bool:
|
||||
"""
|
||||
Check if one node is an ancestor of another.
|
||||
|
||||
@@ -339,7 +331,9 @@ def build_parent_children_dict(flow: Any) -> Dict[str, List[str]]:
|
||||
return parent_children
|
||||
|
||||
|
||||
def get_child_index(parent: str, child: str, parent_children: Dict[str, List[str]]) -> int:
|
||||
def get_child_index(
|
||||
parent: str, child: str, parent_children: Dict[str, List[str]]
|
||||
) -> int:
|
||||
"""
|
||||
Get the index of a child node in its parent's sorted children list.
|
||||
|
||||
@@ -360,3 +354,23 @@ def get_child_index(parent: str, child: str, parent_children: Dict[str, List[str
|
||||
children = parent_children.get(parent, [])
|
||||
children.sort()
|
||||
return children.index(child)
|
||||
|
||||
|
||||
def process_router_paths(flow, current, current_level, levels, queue):
|
||||
"""
|
||||
Handle the router connections for the current node.
|
||||
"""
|
||||
if current in flow._routers:
|
||||
paths = flow._router_paths.get(current, [])
|
||||
for path in paths:
|
||||
for listener_name, (
|
||||
condition_type,
|
||||
trigger_methods,
|
||||
) in flow._listeners.items():
|
||||
if path in trigger_methods:
|
||||
if (
|
||||
listener_name not in levels
|
||||
or levels[listener_name] > current_level + 1
|
||||
):
|
||||
levels[listener_name] = current_level + 1
|
||||
queue.append(listener_name)
|
||||
|
||||
@@ -15,20 +15,20 @@ class Knowledge(BaseModel):
|
||||
Args:
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
embedder: Optional[Dict[str, Any]] = None
|
||||
"""
|
||||
|
||||
sources: List[BaseKnowledgeSource] = Field(default_factory=list)
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
storage: Optional[KnowledgeStorage] = Field(default=None)
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
embedder: Optional[Dict[str, Any]] = None
|
||||
collection_name: Optional[str] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
collection_name: str,
|
||||
sources: List[BaseKnowledgeSource],
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
embedder: Optional[Dict[str, Any]] = None,
|
||||
storage: Optional[KnowledgeStorage] = None,
|
||||
**data,
|
||||
):
|
||||
@@ -37,25 +37,23 @@ class Knowledge(BaseModel):
|
||||
self.storage = storage
|
||||
else:
|
||||
self.storage = KnowledgeStorage(
|
||||
embedder_config=embedder_config, collection_name=collection_name
|
||||
embedder=embedder, collection_name=collection_name
|
||||
)
|
||||
self.sources = sources
|
||||
self.storage.initialize_knowledge_storage()
|
||||
for source in sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
self._add_sources()
|
||||
|
||||
def query(self, query: List[str], limit: int = 3) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Query across all knowledge sources to find the most relevant information.
|
||||
Returns the top_k most relevant chunks.
|
||||
|
||||
|
||||
Raises:
|
||||
ValueError: If storage is not initialized.
|
||||
"""
|
||||
if self.storage is None:
|
||||
raise ValueError("Storage is not initialized.")
|
||||
|
||||
|
||||
results = self.storage.search(
|
||||
query,
|
||||
limit,
|
||||
@@ -63,6 +61,15 @@ class Knowledge(BaseModel):
|
||||
return results
|
||||
|
||||
def _add_sources(self):
|
||||
for source in self.sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
try:
|
||||
for source in self.sources:
|
||||
source.storage = self.storage
|
||||
source.add()
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
def reset(self) -> None:
|
||||
if self.storage:
|
||||
self.storage.reset()
|
||||
else:
|
||||
raise ValueError("Storage is not initialized.")
|
||||
|
||||
@@ -29,7 +29,13 @@ class BaseFileKnowledgeSource(BaseKnowledgeSource, ABC):
|
||||
def validate_file_path(cls, v, info):
|
||||
"""Validate that at least one of file_path or file_paths is provided."""
|
||||
# Single check if both are None, O(1) instead of nested conditions
|
||||
if v is None and info.data.get("file_path" if info.field_name == "file_paths" else "file_paths") is None:
|
||||
if (
|
||||
v is None
|
||||
and info.data.get(
|
||||
"file_path" if info.field_name == "file_paths" else "file_paths"
|
||||
)
|
||||
is None
|
||||
):
|
||||
raise ValueError("Either file_path or file_paths must be provided")
|
||||
return v
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ try:
|
||||
from docling.exceptions import ConversionError
|
||||
from docling_core.transforms.chunker.hierarchical_chunker import HierarchicalChunker
|
||||
from docling_core.types.doc.document import DoclingDocument
|
||||
|
||||
DOCLING_AVAILABLE = True
|
||||
except ImportError:
|
||||
DOCLING_AVAILABLE = False
|
||||
@@ -38,8 +39,8 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
||||
file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
||||
chunks: List[str] = Field(default_factory=list)
|
||||
safe_file_paths: List[Union[Path, str]] = Field(default_factory=list)
|
||||
content: List[DoclingDocument] = Field(default_factory=list)
|
||||
document_converter: DocumentConverter = Field(
|
||||
content: List["DoclingDocument"] = Field(default_factory=list)
|
||||
document_converter: "DocumentConverter" = Field(
|
||||
default_factory=lambda: DocumentConverter(
|
||||
allowed_formats=[
|
||||
InputFormat.MD,
|
||||
@@ -65,7 +66,7 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
||||
self.safe_file_paths = self.validate_content()
|
||||
self.content = self._load_content()
|
||||
|
||||
def _load_content(self) -> List[DoclingDocument]:
|
||||
def _load_content(self) -> List["DoclingDocument"]:
|
||||
try:
|
||||
return self._convert_source_to_docling_documents()
|
||||
except ConversionError as e:
|
||||
@@ -87,11 +88,11 @@ class CrewDoclingSource(BaseKnowledgeSource):
|
||||
self.chunks.extend(list(new_chunks_iterable))
|
||||
self._save_documents()
|
||||
|
||||
def _convert_source_to_docling_documents(self) -> List[DoclingDocument]:
|
||||
def _convert_source_to_docling_documents(self) -> List["DoclingDocument"]:
|
||||
conv_results_iter = self.document_converter.convert_all(self.safe_file_paths)
|
||||
return [result.document for result in conv_results_iter]
|
||||
|
||||
def _chunk_doc(self, doc: DoclingDocument) -> Iterator[str]:
|
||||
def _chunk_doc(self, doc: "DoclingDocument") -> Iterator[str]:
|
||||
chunker = HierarchicalChunker()
|
||||
for chunk in chunker.chunk(doc):
|
||||
yield chunk.text
|
||||
|
||||
@@ -1,28 +1,138 @@
|
||||
from pathlib import Path
|
||||
from typing import Dict, List
|
||||
from typing import Dict, Iterator, List, Optional, Union
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from crewai.knowledge.source.base_file_knowledge_source import BaseFileKnowledgeSource
|
||||
from pydantic import Field, field_validator
|
||||
|
||||
from crewai.knowledge.source.base_knowledge_source import BaseKnowledgeSource
|
||||
from crewai.utilities.constants import KNOWLEDGE_DIRECTORY
|
||||
from crewai.utilities.logger import Logger
|
||||
|
||||
|
||||
class ExcelKnowledgeSource(BaseFileKnowledgeSource):
|
||||
class ExcelKnowledgeSource(BaseKnowledgeSource):
|
||||
"""A knowledge source that stores and queries Excel file content using embeddings."""
|
||||
|
||||
def load_content(self) -> Dict[Path, str]:
|
||||
"""Load and preprocess Excel file content."""
|
||||
pd = self._import_dependencies()
|
||||
# override content to be a dict of file paths to sheet names to csv content
|
||||
|
||||
_logger: Logger = Logger(verbose=True)
|
||||
|
||||
file_path: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
default=None,
|
||||
description="[Deprecated] The path to the file. Use file_paths instead.",
|
||||
)
|
||||
file_paths: Optional[Union[Path, List[Path], str, List[str]]] = Field(
|
||||
default_factory=list, description="The path to the file"
|
||||
)
|
||||
chunks: List[str] = Field(default_factory=list)
|
||||
content: Dict[Path, Dict[str, str]] = Field(default_factory=dict)
|
||||
safe_file_paths: List[Path] = Field(default_factory=list)
|
||||
|
||||
@field_validator("file_path", "file_paths", mode="before")
|
||||
def validate_file_path(cls, v, info):
|
||||
"""Validate that at least one of file_path or file_paths is provided."""
|
||||
# Single check if both are None, O(1) instead of nested conditions
|
||||
if (
|
||||
v is None
|
||||
and info.data.get(
|
||||
"file_path" if info.field_name == "file_paths" else "file_paths"
|
||||
)
|
||||
is None
|
||||
):
|
||||
raise ValueError("Either file_path or file_paths must be provided")
|
||||
return v
|
||||
|
||||
def _process_file_paths(self) -> List[Path]:
|
||||
"""Convert file_path to a list of Path objects."""
|
||||
|
||||
if hasattr(self, "file_path") and self.file_path is not None:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
|
||||
color="yellow",
|
||||
)
|
||||
self.file_paths = self.file_path
|
||||
|
||||
if self.file_paths is None:
|
||||
raise ValueError("Your source must be provided with a file_paths: []")
|
||||
|
||||
# Convert single path to list
|
||||
path_list: List[Union[Path, str]] = (
|
||||
[self.file_paths]
|
||||
if isinstance(self.file_paths, (str, Path))
|
||||
else list(self.file_paths)
|
||||
if isinstance(self.file_paths, list)
|
||||
else []
|
||||
)
|
||||
|
||||
if not path_list:
|
||||
raise ValueError(
|
||||
"file_path/file_paths must be a Path, str, or a list of these types"
|
||||
)
|
||||
|
||||
return [self.convert_to_path(path) for path in path_list]
|
||||
|
||||
def validate_content(self):
|
||||
"""Validate the paths."""
|
||||
for path in self.safe_file_paths:
|
||||
if not path.exists():
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"File not found: {path}. Try adding sources to the knowledge directory. If it's inside the knowledge directory, use the relative path.",
|
||||
color="red",
|
||||
)
|
||||
raise FileNotFoundError(f"File not found: {path}")
|
||||
if not path.is_file():
|
||||
self._logger.log(
|
||||
"error",
|
||||
f"Path is not a file: {path}",
|
||||
color="red",
|
||||
)
|
||||
|
||||
def model_post_init(self, _) -> None:
|
||||
if self.file_path:
|
||||
self._logger.log(
|
||||
"warning",
|
||||
"The 'file_path' attribute is deprecated and will be removed in a future version. Please use 'file_paths' instead.",
|
||||
color="yellow",
|
||||
)
|
||||
self.file_paths = self.file_path
|
||||
self.safe_file_paths = self._process_file_paths()
|
||||
self.validate_content()
|
||||
self.content = self._load_content()
|
||||
|
||||
def _load_content(self) -> Dict[Path, Dict[str, str]]:
|
||||
"""Load and preprocess Excel file content from multiple sheets.
|
||||
|
||||
Each sheet's content is converted to CSV format and stored.
|
||||
|
||||
Returns:
|
||||
Dict[Path, Dict[str, str]]: A mapping of file paths to their respective sheet contents.
|
||||
|
||||
Raises:
|
||||
ImportError: If required dependencies are missing.
|
||||
FileNotFoundError: If the specified Excel file cannot be opened.
|
||||
"""
|
||||
pd = self._import_dependencies()
|
||||
content_dict = {}
|
||||
for file_path in self.safe_file_paths:
|
||||
file_path = self.convert_to_path(file_path)
|
||||
df = pd.read_excel(file_path)
|
||||
content = df.to_csv(index=False)
|
||||
content_dict[file_path] = content
|
||||
with pd.ExcelFile(file_path) as xl:
|
||||
sheet_dict = {
|
||||
str(sheet_name): str(
|
||||
pd.read_excel(xl, sheet_name).to_csv(index=False)
|
||||
)
|
||||
for sheet_name in xl.sheet_names
|
||||
}
|
||||
content_dict[file_path] = sheet_dict
|
||||
return content_dict
|
||||
|
||||
def convert_to_path(self, path: Union[Path, str]) -> Path:
|
||||
"""Convert a path to a Path object."""
|
||||
return Path(KNOWLEDGE_DIRECTORY + "/" + path) if isinstance(path, str) else path
|
||||
|
||||
def _import_dependencies(self):
|
||||
"""Dynamically import dependencies."""
|
||||
try:
|
||||
import openpyxl # noqa
|
||||
import pandas as pd
|
||||
|
||||
return pd
|
||||
@@ -38,10 +148,14 @@ class ExcelKnowledgeSource(BaseFileKnowledgeSource):
|
||||
and save the embeddings.
|
||||
"""
|
||||
# Convert dictionary values to a single string if content is a dictionary
|
||||
if isinstance(self.content, dict):
|
||||
content_str = "\n".join(str(value) for value in self.content.values())
|
||||
else:
|
||||
content_str = str(self.content)
|
||||
# Updated to account for .xlsx workbooks with multiple tabs/sheets
|
||||
content_str = ""
|
||||
for value in self.content.values():
|
||||
if isinstance(value, dict):
|
||||
for sheet_value in value.values():
|
||||
content_str += str(sheet_value) + "\n"
|
||||
else:
|
||||
content_str += str(value) + "\n"
|
||||
|
||||
new_chunks = self._chunk_text(content_str)
|
||||
self.chunks.extend(new_chunks)
|
||||
|
||||
@@ -48,11 +48,11 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
embedder: Optional[Dict[str, Any]] = None,
|
||||
collection_name: Optional[str] = None,
|
||||
):
|
||||
self.collection_name = collection_name
|
||||
self._set_embedder_config(embedder_config)
|
||||
self._set_embedder_config(embedder)
|
||||
|
||||
def search(
|
||||
self,
|
||||
@@ -76,7 +76,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
"context": fetched["documents"][0][i], # type: ignore
|
||||
"score": fetched["distances"][0][i], # type: ignore
|
||||
}
|
||||
if result["score"] >= score_threshold: # type: ignore
|
||||
if result["score"] >= score_threshold:
|
||||
results.append(result)
|
||||
return results
|
||||
else:
|
||||
@@ -99,7 +99,7 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
)
|
||||
if self.app:
|
||||
self.collection = self.app.get_or_create_collection(
|
||||
name=collection_name, embedding_function=self.embedder_config
|
||||
name=collection_name, embedding_function=self.embedder
|
||||
)
|
||||
else:
|
||||
raise Exception("Vector Database Client not initialized")
|
||||
@@ -187,17 +187,15 @@ class KnowledgeStorage(BaseKnowledgeStorage):
|
||||
api_key=os.getenv("OPENAI_API_KEY"), model_name="text-embedding-3-small"
|
||||
)
|
||||
|
||||
def _set_embedder_config(
|
||||
self, embedder_config: Optional[Dict[str, Any]] = None
|
||||
) -> None:
|
||||
def _set_embedder_config(self, embedder: Optional[Dict[str, Any]] = None) -> None:
|
||||
"""Set the embedding configuration for the knowledge storage.
|
||||
|
||||
Args:
|
||||
embedder_config (Optional[Dict[str, Any]]): Configuration dictionary for the embedder.
|
||||
If None or empty, defaults to the default embedding function.
|
||||
"""
|
||||
self.embedder_config = (
|
||||
EmbeddingConfigurator().configure_embedder(embedder_config)
|
||||
if embedder_config
|
||||
self.embedder = (
|
||||
EmbeddingConfigurator().configure_embedder(embedder)
|
||||
if embedder
|
||||
else self._create_default_embedding_function()
|
||||
)
|
||||
|
||||
@@ -5,17 +5,42 @@ import sys
|
||||
import threading
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
from typing import Any, Dict, List, Optional, Union, cast
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Type,
|
||||
TypedDict,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from pydantic import BaseModel
|
||||
|
||||
from crewai.utilities.events.llm_events import (
|
||||
LLMCallCompletedEvent,
|
||||
LLMCallFailedEvent,
|
||||
LLMCallStartedEvent,
|
||||
LLMCallType,
|
||||
LLMStreamChunkEvent,
|
||||
)
|
||||
from crewai.utilities.events.tool_usage_events import ToolExecutionErrorEvent
|
||||
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore", UserWarning)
|
||||
import litellm
|
||||
from litellm import Choices, get_supported_openai_params
|
||||
from litellm import Choices
|
||||
from litellm.litellm_core_utils.get_supported_openai_params import (
|
||||
get_supported_openai_params,
|
||||
)
|
||||
from litellm.types.utils import ModelResponse
|
||||
from litellm.utils import supports_response_schema
|
||||
|
||||
|
||||
from crewai.utilities.events import crewai_event_bus
|
||||
from crewai.utilities.exceptions.context_window_exceeding_exception import (
|
||||
LLMContextLengthExceededException,
|
||||
)
|
||||
@@ -53,6 +78,7 @@ LLM_CONTEXT_WINDOW_SIZES = {
|
||||
"gpt-4-turbo": 128000,
|
||||
"o1-preview": 128000,
|
||||
"o1-mini": 128000,
|
||||
"o3-mini": 200000, # Based on official o3-mini specifications
|
||||
# gemini
|
||||
"gemini-2.0-flash": 1048576,
|
||||
"gemini-1.5-pro": 2097152,
|
||||
@@ -114,6 +140,17 @@ def suppress_warnings():
|
||||
sys.stderr = old_stderr
|
||||
|
||||
|
||||
class Delta(TypedDict):
|
||||
content: Optional[str]
|
||||
role: Optional[str]
|
||||
|
||||
|
||||
class StreamingChoices(TypedDict):
|
||||
delta: Delta
|
||||
index: int
|
||||
finish_reason: Optional[str]
|
||||
|
||||
|
||||
class LLM:
|
||||
def __init__(
|
||||
self,
|
||||
@@ -128,21 +165,24 @@ class LLM:
|
||||
presence_penalty: Optional[float] = None,
|
||||
frequency_penalty: Optional[float] = None,
|
||||
logit_bias: Optional[Dict[int, float]] = None,
|
||||
response_format: Optional[Dict[str, Any]] = None,
|
||||
response_format: Optional[Type[BaseModel]] = None,
|
||||
seed: Optional[int] = None,
|
||||
logprobs: Optional[int] = None,
|
||||
top_logprobs: Optional[int] = None,
|
||||
base_url: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
api_version: Optional[str] = None,
|
||||
api_key: Optional[str] = None,
|
||||
callbacks: List[Any] = [],
|
||||
reasoning_effort: Optional[Literal["none", "low", "medium", "high"]] = None,
|
||||
stream: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
self.model = model
|
||||
self.timeout = timeout
|
||||
self.temperature = temperature
|
||||
self.top_p = top_p
|
||||
self.n = n
|
||||
self.stop = stop
|
||||
self.max_completion_tokens = max_completion_tokens
|
||||
self.max_tokens = max_tokens
|
||||
self.presence_penalty = presence_penalty
|
||||
@@ -153,129 +193,657 @@ class LLM:
|
||||
self.logprobs = logprobs
|
||||
self.top_logprobs = top_logprobs
|
||||
self.base_url = base_url
|
||||
self.api_base = api_base
|
||||
self.api_version = api_version
|
||||
self.api_key = api_key
|
||||
self.callbacks = callbacks
|
||||
self.context_window_size = 0
|
||||
self.reasoning_effort = reasoning_effort
|
||||
self.additional_params = kwargs
|
||||
self.is_anthropic = self._is_anthropic_model(model)
|
||||
self.stream = stream
|
||||
|
||||
litellm.drop_params = True
|
||||
|
||||
# Normalize self.stop to always be a List[str]
|
||||
if stop is None:
|
||||
self.stop: List[str] = []
|
||||
elif isinstance(stop, str):
|
||||
self.stop = [stop]
|
||||
else:
|
||||
self.stop = stop
|
||||
|
||||
self.set_callbacks(callbacks)
|
||||
self.set_env_callbacks()
|
||||
|
||||
def call(
|
||||
def _is_anthropic_model(self, model: str) -> bool:
|
||||
"""Determine if the model is from Anthropic provider.
|
||||
|
||||
Args:
|
||||
model: The model identifier string.
|
||||
|
||||
Returns:
|
||||
bool: True if the model is from Anthropic, False otherwise.
|
||||
"""
|
||||
ANTHROPIC_PREFIXES = ("anthropic/", "claude-", "claude/")
|
||||
return any(prefix in model.lower() for prefix in ANTHROPIC_PREFIXES)
|
||||
|
||||
def _prepare_completion_params(
|
||||
self,
|
||||
messages: List[Dict[str, str]],
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
tools: Optional[List[dict]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Prepare parameters for the completion call.
|
||||
|
||||
Args:
|
||||
messages: Input messages for the LLM
|
||||
tools: Optional list of tool schemas
|
||||
callbacks: Optional list of callback functions
|
||||
available_functions: Optional dict of available functions
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: Parameters for the completion call
|
||||
"""
|
||||
# --- 1) Format messages according to provider requirements
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
formatted_messages = self._format_messages_for_provider(messages)
|
||||
|
||||
# --- 2) Prepare the parameters for the completion call
|
||||
params = {
|
||||
"model": self.model,
|
||||
"messages": formatted_messages,
|
||||
"timeout": self.timeout,
|
||||
"temperature": self.temperature,
|
||||
"top_p": self.top_p,
|
||||
"n": self.n,
|
||||
"stop": self.stop,
|
||||
"max_tokens": self.max_tokens or self.max_completion_tokens,
|
||||
"presence_penalty": self.presence_penalty,
|
||||
"frequency_penalty": self.frequency_penalty,
|
||||
"logit_bias": self.logit_bias,
|
||||
"response_format": self.response_format,
|
||||
"seed": self.seed,
|
||||
"logprobs": self.logprobs,
|
||||
"top_logprobs": self.top_logprobs,
|
||||
"api_base": self.api_base,
|
||||
"base_url": self.base_url,
|
||||
"api_version": self.api_version,
|
||||
"api_key": self.api_key,
|
||||
"stream": self.stream,
|
||||
"tools": tools,
|
||||
"reasoning_effort": self.reasoning_effort,
|
||||
**self.additional_params,
|
||||
}
|
||||
|
||||
# Remove None values from params
|
||||
return {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
def _handle_streaming_response(
|
||||
self,
|
||||
params: Dict[str, Any],
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""
|
||||
High-level call method that:
|
||||
1) Calls litellm.completion
|
||||
2) Checks for function/tool calls
|
||||
3) If a tool call is found:
|
||||
a) executes the function
|
||||
b) returns the result
|
||||
4) If no tool call, returns the text response
|
||||
"""Handle a streaming response from the LLM.
|
||||
|
||||
:param messages: The conversation messages
|
||||
:param tools: Optional list of function schemas for function calling
|
||||
:param callbacks: Optional list of callbacks
|
||||
:param available_functions: A dictionary mapping function_name -> actual Python function
|
||||
:return: Final text response from the LLM or the tool result
|
||||
Args:
|
||||
params: Parameters for the completion call
|
||||
callbacks: Optional list of callback functions
|
||||
available_functions: Dict of available functions
|
||||
|
||||
Returns:
|
||||
str: The complete response text
|
||||
|
||||
Raises:
|
||||
Exception: If no content is received from the streaming response
|
||||
"""
|
||||
# --- 1) Initialize response tracking
|
||||
full_response = ""
|
||||
last_chunk = None
|
||||
chunk_count = 0
|
||||
usage_info = None
|
||||
|
||||
# --- 2) Make sure stream is set to True and include usage metrics
|
||||
params["stream"] = True
|
||||
params["stream_options"] = {"include_usage": True}
|
||||
|
||||
try:
|
||||
# --- 3) Process each chunk in the stream
|
||||
for chunk in litellm.completion(**params):
|
||||
chunk_count += 1
|
||||
last_chunk = chunk
|
||||
|
||||
# Extract content from the chunk
|
||||
chunk_content = None
|
||||
|
||||
# Safely extract content from various chunk formats
|
||||
try:
|
||||
# Try to access choices safely
|
||||
choices = None
|
||||
if isinstance(chunk, dict) and "choices" in chunk:
|
||||
choices = chunk["choices"]
|
||||
elif hasattr(chunk, "choices"):
|
||||
# Check if choices is not a type but an actual attribute with value
|
||||
if not isinstance(getattr(chunk, "choices"), type):
|
||||
choices = getattr(chunk, "choices")
|
||||
|
||||
# Try to extract usage information if available
|
||||
if isinstance(chunk, dict) and "usage" in chunk:
|
||||
usage_info = chunk["usage"]
|
||||
elif hasattr(chunk, "usage"):
|
||||
# Check if usage is not a type but an actual attribute with value
|
||||
if not isinstance(getattr(chunk, "usage"), type):
|
||||
usage_info = getattr(chunk, "usage")
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
choice = choices[0]
|
||||
|
||||
# Handle different delta formats
|
||||
delta = None
|
||||
if isinstance(choice, dict) and "delta" in choice:
|
||||
delta = choice["delta"]
|
||||
elif hasattr(choice, "delta"):
|
||||
delta = getattr(choice, "delta")
|
||||
|
||||
# Extract content from delta
|
||||
if delta:
|
||||
# Handle dict format
|
||||
if isinstance(delta, dict):
|
||||
if "content" in delta and delta["content"] is not None:
|
||||
chunk_content = delta["content"]
|
||||
# Handle object format
|
||||
elif hasattr(delta, "content"):
|
||||
chunk_content = getattr(delta, "content")
|
||||
|
||||
# Handle case where content might be None or empty
|
||||
if chunk_content is None and isinstance(delta, dict):
|
||||
# Some models might send empty content chunks
|
||||
chunk_content = ""
|
||||
except Exception as e:
|
||||
logging.debug(f"Error extracting content from chunk: {e}")
|
||||
logging.debug(f"Chunk format: {type(chunk)}, content: {chunk}")
|
||||
|
||||
# Only add non-None content to the response
|
||||
if chunk_content is not None:
|
||||
# Add the chunk content to the full response
|
||||
full_response += chunk_content
|
||||
|
||||
# Emit the chunk event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMStreamChunkEvent(chunk=chunk_content),
|
||||
)
|
||||
|
||||
# --- 4) Fallback to non-streaming if no content received
|
||||
if not full_response.strip() and chunk_count == 0:
|
||||
logging.warning(
|
||||
"No chunks received in streaming response, falling back to non-streaming"
|
||||
)
|
||||
non_streaming_params = params.copy()
|
||||
non_streaming_params["stream"] = False
|
||||
non_streaming_params.pop(
|
||||
"stream_options", None
|
||||
) # Remove stream_options for non-streaming call
|
||||
return self._handle_non_streaming_response(
|
||||
non_streaming_params, callbacks, available_functions
|
||||
)
|
||||
|
||||
# --- 5) Handle empty response with chunks
|
||||
if not full_response.strip() and chunk_count > 0:
|
||||
logging.warning(
|
||||
f"Received {chunk_count} chunks but no content was extracted"
|
||||
)
|
||||
if last_chunk is not None:
|
||||
try:
|
||||
# Try to extract content from the last chunk's message
|
||||
choices = None
|
||||
if isinstance(last_chunk, dict) and "choices" in last_chunk:
|
||||
choices = last_chunk["choices"]
|
||||
elif hasattr(last_chunk, "choices"):
|
||||
if not isinstance(getattr(last_chunk, "choices"), type):
|
||||
choices = getattr(last_chunk, "choices")
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
choice = choices[0]
|
||||
|
||||
# Try to get content from message
|
||||
message = None
|
||||
if isinstance(choice, dict) and "message" in choice:
|
||||
message = choice["message"]
|
||||
elif hasattr(choice, "message"):
|
||||
message = getattr(choice, "message")
|
||||
|
||||
if message:
|
||||
content = None
|
||||
if isinstance(message, dict) and "content" in message:
|
||||
content = message["content"]
|
||||
elif hasattr(message, "content"):
|
||||
content = getattr(message, "content")
|
||||
|
||||
if content:
|
||||
full_response = content
|
||||
logging.info(
|
||||
f"Extracted content from last chunk message: {full_response}"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.debug(f"Error extracting content from last chunk: {e}")
|
||||
logging.debug(
|
||||
f"Last chunk format: {type(last_chunk)}, content: {last_chunk}"
|
||||
)
|
||||
|
||||
# --- 6) If still empty, raise an error instead of using a default response
|
||||
if not full_response.strip():
|
||||
raise Exception(
|
||||
"No content received from streaming response. Received empty chunks or failed to extract content."
|
||||
)
|
||||
|
||||
# --- 7) Check for tool calls in the final response
|
||||
tool_calls = None
|
||||
try:
|
||||
if last_chunk:
|
||||
choices = None
|
||||
if isinstance(last_chunk, dict) and "choices" in last_chunk:
|
||||
choices = last_chunk["choices"]
|
||||
elif hasattr(last_chunk, "choices"):
|
||||
if not isinstance(getattr(last_chunk, "choices"), type):
|
||||
choices = getattr(last_chunk, "choices")
|
||||
|
||||
if choices and len(choices) > 0:
|
||||
choice = choices[0]
|
||||
|
||||
message = None
|
||||
if isinstance(choice, dict) and "message" in choice:
|
||||
message = choice["message"]
|
||||
elif hasattr(choice, "message"):
|
||||
message = getattr(choice, "message")
|
||||
|
||||
if message:
|
||||
if isinstance(message, dict) and "tool_calls" in message:
|
||||
tool_calls = message["tool_calls"]
|
||||
elif hasattr(message, "tool_calls"):
|
||||
tool_calls = getattr(message, "tool_calls")
|
||||
except Exception as e:
|
||||
logging.debug(f"Error checking for tool calls: {e}")
|
||||
|
||||
# --- 8) If no tool calls or no available functions, return the text response directly
|
||||
if not tool_calls or not available_functions:
|
||||
# Log token usage if available in streaming mode
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
# Emit completion event and return response
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL)
|
||||
return full_response
|
||||
|
||||
# --- 9) Handle tool calls if present
|
||||
tool_result = self._handle_tool_call(tool_calls, available_functions)
|
||||
if tool_result is not None:
|
||||
return tool_result
|
||||
|
||||
# --- 10) Log token usage if available in streaming mode
|
||||
self._handle_streaming_callbacks(callbacks, usage_info, last_chunk)
|
||||
|
||||
# --- 11) Emit completion event and return response
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL)
|
||||
return full_response
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error in streaming response: {str(e)}")
|
||||
if full_response.strip():
|
||||
logging.warning(f"Returning partial response despite error: {str(e)}")
|
||||
self._handle_emit_call_events(full_response, LLMCallType.LLM_CALL)
|
||||
return full_response
|
||||
|
||||
# Emit failed event and re-raise the exception
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e)),
|
||||
)
|
||||
raise Exception(f"Failed to get streaming response: {str(e)}")
|
||||
|
||||
def _handle_streaming_callbacks(
|
||||
self,
|
||||
callbacks: Optional[List[Any]],
|
||||
usage_info: Optional[Dict[str, Any]],
|
||||
last_chunk: Optional[Any],
|
||||
) -> None:
|
||||
"""Handle callbacks with usage info for streaming responses.
|
||||
|
||||
Args:
|
||||
callbacks: Optional list of callback functions
|
||||
usage_info: Usage information collected during streaming
|
||||
last_chunk: The last chunk received from the streaming response
|
||||
"""
|
||||
if callbacks and len(callbacks) > 0:
|
||||
for callback in callbacks:
|
||||
if hasattr(callback, "log_success_event"):
|
||||
# Use the usage_info we've been tracking
|
||||
if not usage_info:
|
||||
# Try to get usage from the last chunk if we haven't already
|
||||
try:
|
||||
if last_chunk:
|
||||
if (
|
||||
isinstance(last_chunk, dict)
|
||||
and "usage" in last_chunk
|
||||
):
|
||||
usage_info = last_chunk["usage"]
|
||||
elif hasattr(last_chunk, "usage"):
|
||||
if not isinstance(
|
||||
getattr(last_chunk, "usage"), type
|
||||
):
|
||||
usage_info = getattr(last_chunk, "usage")
|
||||
except Exception as e:
|
||||
logging.debug(f"Error extracting usage info: {e}")
|
||||
|
||||
if usage_info:
|
||||
callback.log_success_event(
|
||||
kwargs={}, # We don't have the original params here
|
||||
response_obj={"usage": usage_info},
|
||||
start_time=0,
|
||||
end_time=0,
|
||||
)
|
||||
|
||||
def _handle_non_streaming_response(
|
||||
self,
|
||||
params: Dict[str, Any],
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
) -> str:
|
||||
"""Handle a non-streaming response from the LLM.
|
||||
|
||||
Args:
|
||||
params: Parameters for the completion call
|
||||
callbacks: Optional list of callback functions
|
||||
available_functions: Dict of available functions
|
||||
|
||||
Returns:
|
||||
str: The response text
|
||||
"""
|
||||
# --- 1) Make the completion call
|
||||
response = litellm.completion(**params)
|
||||
|
||||
# --- 2) Extract response message and content
|
||||
response_message = cast(Choices, cast(ModelResponse, response).choices)[
|
||||
0
|
||||
].message
|
||||
text_response = response_message.content or ""
|
||||
|
||||
# --- 3) Handle callbacks with usage info
|
||||
if callbacks and len(callbacks) > 0:
|
||||
for callback in callbacks:
|
||||
if hasattr(callback, "log_success_event"):
|
||||
usage_info = getattr(response, "usage", None)
|
||||
if usage_info:
|
||||
callback.log_success_event(
|
||||
kwargs=params,
|
||||
response_obj={"usage": usage_info},
|
||||
start_time=0,
|
||||
end_time=0,
|
||||
)
|
||||
|
||||
# --- 4) Check for tool calls
|
||||
tool_calls = getattr(response_message, "tool_calls", [])
|
||||
|
||||
# --- 5) If no tool calls or no available functions, return the text response directly
|
||||
if not tool_calls or not available_functions:
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL)
|
||||
return text_response
|
||||
|
||||
# --- 6) Handle tool calls if present
|
||||
tool_result = self._handle_tool_call(tool_calls, available_functions)
|
||||
if tool_result is not None:
|
||||
return tool_result
|
||||
|
||||
# --- 7) If tool call handling didn't return a result, emit completion event and return text response
|
||||
self._handle_emit_call_events(text_response, LLMCallType.LLM_CALL)
|
||||
return text_response
|
||||
|
||||
def _handle_tool_call(
|
||||
self,
|
||||
tool_calls: List[Any],
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
) -> Optional[str]:
|
||||
"""Handle a tool call from the LLM.
|
||||
|
||||
Args:
|
||||
tool_calls: List of tool calls from the LLM
|
||||
available_functions: Dict of available functions
|
||||
|
||||
Returns:
|
||||
Optional[str]: The result of the tool call, or None if no tool call was made
|
||||
"""
|
||||
# --- 1) Validate tool calls and available functions
|
||||
if not tool_calls or not available_functions:
|
||||
return None
|
||||
|
||||
# --- 2) Extract function name from first tool call
|
||||
tool_call = tool_calls[0]
|
||||
function_name = tool_call.function.name
|
||||
function_args = {} # Initialize to empty dict to avoid unbound variable
|
||||
|
||||
# --- 3) Check if function is available
|
||||
if function_name in available_functions:
|
||||
try:
|
||||
# --- 3.1) Parse function arguments
|
||||
function_args = json.loads(tool_call.function.arguments)
|
||||
fn = available_functions[function_name]
|
||||
|
||||
# --- 3.2) Execute function
|
||||
result = fn(**function_args)
|
||||
|
||||
# --- 3.3) Emit success event
|
||||
self._handle_emit_call_events(result, LLMCallType.TOOL_CALL)
|
||||
return result
|
||||
except Exception as e:
|
||||
# --- 3.4) Handle execution errors
|
||||
fn = available_functions.get(
|
||||
function_name, lambda: None
|
||||
) # Ensure fn is always a callable
|
||||
logging.error(f"Error executing function '{function_name}': {e}")
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=ToolExecutionErrorEvent(
|
||||
tool_name=function_name,
|
||||
tool_args=function_args,
|
||||
tool_class=fn,
|
||||
error=str(e),
|
||||
),
|
||||
)
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=f"Tool execution error: {str(e)}"),
|
||||
)
|
||||
return None
|
||||
|
||||
def call(
|
||||
self,
|
||||
messages: Union[str, List[Dict[str, str]]],
|
||||
tools: Optional[List[dict]] = None,
|
||||
callbacks: Optional[List[Any]] = None,
|
||||
available_functions: Optional[Dict[str, Any]] = None,
|
||||
) -> Union[str, Any]:
|
||||
"""High-level LLM call method.
|
||||
|
||||
Args:
|
||||
messages: Input messages for the LLM.
|
||||
Can be a string or list of message dictionaries.
|
||||
If string, it will be converted to a single user message.
|
||||
If list, each dict must have 'role' and 'content' keys.
|
||||
tools: Optional list of tool schemas for function calling.
|
||||
Each tool should define its name, description, and parameters.
|
||||
callbacks: Optional list of callback functions to be executed
|
||||
during and after the LLM call.
|
||||
available_functions: Optional dict mapping function names to callables
|
||||
that can be invoked by the LLM.
|
||||
|
||||
Returns:
|
||||
Union[str, Any]: Either a text response from the LLM (str) or
|
||||
the result of a tool function call (Any).
|
||||
|
||||
Raises:
|
||||
TypeError: If messages format is invalid
|
||||
ValueError: If response format is not supported
|
||||
LLMContextLengthExceededException: If input exceeds model's context limit
|
||||
"""
|
||||
# --- 1) Emit call started event
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallStartedEvent(
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
callbacks=callbacks,
|
||||
available_functions=available_functions,
|
||||
),
|
||||
)
|
||||
|
||||
# --- 2) Validate parameters before proceeding with the call
|
||||
self._validate_call_params()
|
||||
|
||||
# --- 3) Convert string messages to proper format if needed
|
||||
if isinstance(messages, str):
|
||||
messages = [{"role": "user", "content": messages}]
|
||||
|
||||
# --- 4) Handle O1 model special case (system messages not supported)
|
||||
if "o1" in self.model.lower():
|
||||
for message in messages:
|
||||
if message.get("role") == "system":
|
||||
message["role"] = "assistant"
|
||||
|
||||
# --- 5) Set up callbacks if provided
|
||||
with suppress_warnings():
|
||||
if callbacks and len(callbacks) > 0:
|
||||
self.set_callbacks(callbacks)
|
||||
|
||||
try:
|
||||
# --- 1) Make the completion call
|
||||
params = {
|
||||
"model": self.model,
|
||||
"messages": messages,
|
||||
"timeout": self.timeout,
|
||||
"temperature": self.temperature,
|
||||
"top_p": self.top_p,
|
||||
"n": self.n,
|
||||
"stop": self.stop,
|
||||
"max_tokens": self.max_tokens or self.max_completion_tokens,
|
||||
"presence_penalty": self.presence_penalty,
|
||||
"frequency_penalty": self.frequency_penalty,
|
||||
"logit_bias": self.logit_bias,
|
||||
"response_format": self.response_format,
|
||||
"seed": self.seed,
|
||||
"logprobs": self.logprobs,
|
||||
"top_logprobs": self.top_logprobs,
|
||||
"api_base": self.base_url,
|
||||
"api_version": self.api_version,
|
||||
"api_key": self.api_key,
|
||||
"stream": False,
|
||||
"tools": tools, # pass the tool schema
|
||||
}
|
||||
# --- 6) Prepare parameters for the completion call
|
||||
params = self._prepare_completion_params(messages, tools)
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
|
||||
response = litellm.completion(**params)
|
||||
response_message = cast(Choices, cast(ModelResponse, response).choices)[
|
||||
0
|
||||
].message
|
||||
text_response = response_message.content or ""
|
||||
tool_calls = getattr(response_message, "tool_calls", [])
|
||||
|
||||
# --- 2) If no tool calls, return the text response
|
||||
if not tool_calls or not available_functions:
|
||||
return text_response
|
||||
|
||||
# --- 3) Handle the tool call
|
||||
tool_call = tool_calls[0]
|
||||
function_name = tool_call.function.name
|
||||
|
||||
if function_name in available_functions:
|
||||
try:
|
||||
function_args = json.loads(tool_call.function.arguments)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.warning(f"Failed to parse function arguments: {e}")
|
||||
return text_response
|
||||
|
||||
fn = available_functions[function_name]
|
||||
try:
|
||||
# Call the actual tool function
|
||||
result = fn(**function_args)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"Error executing function '{function_name}': {e}"
|
||||
)
|
||||
return text_response
|
||||
|
||||
else:
|
||||
logging.warning(
|
||||
f"Tool call requested unknown function '{function_name}'"
|
||||
# --- 7) Make the completion call and handle response
|
||||
if self.stream:
|
||||
return self._handle_streaming_response(
|
||||
params, callbacks, available_functions
|
||||
)
|
||||
else:
|
||||
return self._handle_non_streaming_response(
|
||||
params, callbacks, available_functions
|
||||
)
|
||||
return text_response
|
||||
|
||||
except Exception as e:
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallFailedEvent(error=str(e)),
|
||||
)
|
||||
if not LLMContextLengthExceededException(
|
||||
str(e)
|
||||
)._is_context_limit_error(str(e)):
|
||||
logging.error(f"LiteLLM call failed: {str(e)}")
|
||||
raise
|
||||
|
||||
def _handle_emit_call_events(self, response: Any, call_type: LLMCallType):
|
||||
"""Handle the events for the LLM call.
|
||||
|
||||
Args:
|
||||
response (str): The response from the LLM call.
|
||||
call_type (str): The type of call, either "tool_call" or "llm_call".
|
||||
"""
|
||||
crewai_event_bus.emit(
|
||||
self,
|
||||
event=LLMCallCompletedEvent(response=response, call_type=call_type),
|
||||
)
|
||||
|
||||
def _format_messages_for_provider(
|
||||
self, messages: List[Dict[str, str]]
|
||||
) -> List[Dict[str, str]]:
|
||||
"""Format messages according to provider requirements.
|
||||
|
||||
Args:
|
||||
messages: List of message dictionaries with 'role' and 'content' keys.
|
||||
Can be empty or None.
|
||||
|
||||
Returns:
|
||||
List of formatted messages according to provider requirements.
|
||||
For Anthropic models, ensures first message has 'user' role.
|
||||
|
||||
Raises:
|
||||
TypeError: If messages is None or contains invalid message format.
|
||||
"""
|
||||
if messages is None:
|
||||
raise TypeError("Messages cannot be None")
|
||||
|
||||
# Validate message format first
|
||||
for msg in messages:
|
||||
if not isinstance(msg, dict) or "role" not in msg or "content" not in msg:
|
||||
raise TypeError(
|
||||
"Invalid message format. Each message must be a dict with 'role' and 'content' keys"
|
||||
)
|
||||
|
||||
# Handle O1 models specially
|
||||
if "o1" in self.model.lower():
|
||||
formatted_messages = []
|
||||
for msg in messages:
|
||||
# Convert system messages to assistant messages
|
||||
if msg["role"] == "system":
|
||||
formatted_messages.append(
|
||||
{"role": "assistant", "content": msg["content"]}
|
||||
)
|
||||
else:
|
||||
formatted_messages.append(msg)
|
||||
return formatted_messages
|
||||
|
||||
# Handle Anthropic models
|
||||
if not self.is_anthropic:
|
||||
return messages
|
||||
|
||||
# Anthropic requires messages to start with 'user' role
|
||||
if not messages or messages[0]["role"] == "system":
|
||||
# If first message is system or empty, add a placeholder user message
|
||||
return [{"role": "user", "content": "."}, *messages]
|
||||
|
||||
return messages
|
||||
|
||||
def _get_custom_llm_provider(self) -> Optional[str]:
|
||||
"""
|
||||
Derives the custom_llm_provider from the model string.
|
||||
- For example, if the model is "openrouter/deepseek/deepseek-chat", returns "openrouter".
|
||||
- If the model is "gemini/gemini-1.5-pro", returns "gemini".
|
||||
- If there is no '/', defaults to "openai".
|
||||
"""
|
||||
if "/" in self.model:
|
||||
return self.model.split("/")[0]
|
||||
return None
|
||||
|
||||
def _validate_call_params(self) -> None:
|
||||
"""
|
||||
Validate parameters before making a call. Currently this only checks if
|
||||
a response_format is provided and whether the model supports it.
|
||||
The custom_llm_provider is dynamically determined from the model:
|
||||
- E.g., "openrouter/deepseek/deepseek-chat" yields "openrouter"
|
||||
- "gemini/gemini-1.5-pro" yields "gemini"
|
||||
- If no slash is present, "openai" is assumed.
|
||||
"""
|
||||
provider = self._get_custom_llm_provider()
|
||||
if self.response_format is not None and not supports_response_schema(
|
||||
model=self.model,
|
||||
custom_llm_provider=provider,
|
||||
):
|
||||
raise ValueError(
|
||||
f"The model {self.model} does not support response_format for provider '{provider}'. "
|
||||
"Please remove response_format or use a supported model."
|
||||
)
|
||||
|
||||
def supports_function_calling(self) -> bool:
|
||||
try:
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
return "response_format" in params
|
||||
provider = self._get_custom_llm_provider()
|
||||
return litellm.utils.supports_function_calling(
|
||||
self.model, custom_llm_provider=provider
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {str(e)}")
|
||||
logging.error(f"Failed to check function calling support: {str(e)}")
|
||||
return False
|
||||
|
||||
def supports_stop_words(self) -> bool:
|
||||
try:
|
||||
params = get_supported_openai_params(model=self.model)
|
||||
return "stop" in params
|
||||
return params is not None and "stop" in params
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to get supported params: {str(e)}")
|
||||
return False
|
||||
@@ -284,10 +852,23 @@ class LLM:
|
||||
"""
|
||||
Returns the context window size, using 75% of the maximum to avoid
|
||||
cutting off messages mid-thread.
|
||||
|
||||
Raises:
|
||||
ValueError: If a model's context window size is outside valid bounds (1024-2097152)
|
||||
"""
|
||||
if self.context_window_size != 0:
|
||||
return self.context_window_size
|
||||
|
||||
MIN_CONTEXT = 1024
|
||||
MAX_CONTEXT = 2097152 # Current max from gemini-1.5-pro
|
||||
|
||||
# Validate all context window sizes
|
||||
for key, value in LLM_CONTEXT_WINDOW_SIZES.items():
|
||||
if value < MIN_CONTEXT or value > MAX_CONTEXT:
|
||||
raise ValueError(
|
||||
f"Context window for {key} must be between {MIN_CONTEXT} and {MAX_CONTEXT}"
|
||||
)
|
||||
|
||||
self.context_window_size = int(
|
||||
DEFAULT_CONTEXT_WINDOW_SIZE * CONTEXT_WINDOW_USAGE_RATIO
|
||||
)
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.memory.entity.entity_memory_item import EntityMemoryItem
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
@@ -10,13 +14,15 @@ class EntityMemory(Memory):
|
||||
Inherits from the Memory class.
|
||||
"""
|
||||
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||
self.memory_provider = crew.memory_config.get("provider")
|
||||
else:
|
||||
self.memory_provider = None
|
||||
_memory_provider: Optional[str] = PrivateAttr()
|
||||
|
||||
if self.memory_provider == "mem0":
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||
memory_provider = crew.memory_config.get("provider")
|
||||
else:
|
||||
memory_provider = None
|
||||
|
||||
if memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
@@ -36,11 +42,13 @@ class EntityMemory(Memory):
|
||||
path=path,
|
||||
)
|
||||
)
|
||||
super().__init__(storage)
|
||||
|
||||
super().__init__(storage=storage)
|
||||
self._memory_provider = memory_provider
|
||||
|
||||
def save(self, item: EntityMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||
"""Saves an entity item into the SQLite storage."""
|
||||
if self.memory_provider == "mem0":
|
||||
if self._memory_provider == "mem0":
|
||||
data = f"""
|
||||
Remember details about the following entity:
|
||||
Name: {item.name}
|
||||
|
||||
@@ -17,7 +17,7 @@ class LongTermMemory(Memory):
|
||||
def __init__(self, storage=None, path=None):
|
||||
if not storage:
|
||||
storage = LTMSQLiteStorage(db_path=path) if path else LTMSQLiteStorage()
|
||||
super().__init__(storage)
|
||||
super().__init__(storage=storage)
|
||||
|
||||
def save(self, item: LongTermMemoryItem) -> None: # type: ignore # BUG?: Signature of "save" incompatible with supertype "Memory"
|
||||
metadata = item.metadata
|
||||
|
||||
@@ -1,15 +1,19 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class Memory:
|
||||
class Memory(BaseModel):
|
||||
"""
|
||||
Base class for memory, now supporting agent tags and generic metadata.
|
||||
"""
|
||||
|
||||
def __init__(self, storage: RAGStorage):
|
||||
self.storage = storage
|
||||
embedder_config: Optional[Dict[str, Any]] = None
|
||||
|
||||
storage: Any
|
||||
|
||||
def __init__(self, storage: Any, **data: Any):
|
||||
super().__init__(storage=storage, **data)
|
||||
|
||||
def save(
|
||||
self,
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from pydantic import PrivateAttr
|
||||
|
||||
from crewai.memory.memory import Memory
|
||||
from crewai.memory.short_term.short_term_memory_item import ShortTermMemoryItem
|
||||
from crewai.memory.storage.rag_storage import RAGStorage
|
||||
@@ -14,13 +16,15 @@ class ShortTermMemory(Memory):
|
||||
MemoryItem instances.
|
||||
"""
|
||||
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||
if hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||
self.memory_provider = crew.memory_config.get("provider")
|
||||
else:
|
||||
self.memory_provider = None
|
||||
_memory_provider: Optional[str] = PrivateAttr()
|
||||
|
||||
if self.memory_provider == "mem0":
|
||||
def __init__(self, crew=None, embedder_config=None, storage=None, path=None):
|
||||
if crew and hasattr(crew, "memory_config") and crew.memory_config is not None:
|
||||
memory_provider = crew.memory_config.get("provider")
|
||||
else:
|
||||
memory_provider = None
|
||||
|
||||
if memory_provider == "mem0":
|
||||
try:
|
||||
from crewai.memory.storage.mem0_storage import Mem0Storage
|
||||
except ImportError:
|
||||
@@ -39,7 +43,8 @@ class ShortTermMemory(Memory):
|
||||
path=path,
|
||||
)
|
||||
)
|
||||
super().__init__(storage)
|
||||
super().__init__(storage=storage)
|
||||
self._memory_provider = memory_provider
|
||||
|
||||
def save(
|
||||
self,
|
||||
@@ -48,7 +53,7 @@ class ShortTermMemory(Memory):
|
||||
agent: Optional[str] = None,
|
||||
) -> None:
|
||||
item = ShortTermMemoryItem(data=value, metadata=metadata, agent=agent)
|
||||
if self.memory_provider == "mem0":
|
||||
if self._memory_provider == "mem0":
|
||||
item.data = f"Remember the following insights from Agent run: {item.data}"
|
||||
|
||||
super().save(value=item.data, metadata=item.metadata, agent=item.agent)
|
||||
|
||||
@@ -13,7 +13,7 @@ class BaseRAGStorage(ABC):
|
||||
self,
|
||||
type: str,
|
||||
allow_reset: bool = True,
|
||||
embedder_config: Optional[Any] = None,
|
||||
embedder_config: Optional[Dict[str, Any]] = None,
|
||||
crew: Any = None,
|
||||
):
|
||||
self.type = type
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
import json
|
||||
import logging
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from crewai.task import Task
|
||||
from crewai.utilities import Printer
|
||||
from crewai.utilities.crew_json_encoder import CrewJSONEncoder
|
||||
from crewai.utilities.errors import DatabaseError, DatabaseOperationError
|
||||
from crewai.utilities.paths import db_storage_path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class KickoffTaskOutputsSQLiteStorage:
|
||||
"""
|
||||
@@ -14,15 +19,24 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, db_path: str = f"{db_storage_path()}/latest_kickoff_task_outputs.db"
|
||||
self, db_path: Optional[str] = None
|
||||
) -> None:
|
||||
if db_path is None:
|
||||
# Get the parent directory of the default db path and create our db file there
|
||||
db_path = str(Path(db_storage_path()) / "latest_kickoff_task_outputs.db")
|
||||
self.db_path = db_path
|
||||
self._printer: Printer = Printer()
|
||||
self._initialize_db()
|
||||
|
||||
def _initialize_db(self):
|
||||
"""
|
||||
Initializes the SQLite database and creates LTM table
|
||||
def _initialize_db(self) -> None:
|
||||
"""Initialize the SQLite database and create the latest_kickoff_task_outputs table.
|
||||
|
||||
This method sets up the database schema for storing task outputs. It creates
|
||||
a table with columns for task_id, expected_output, output (as JSON),
|
||||
task_index, inputs (as JSON), was_replayed flag, and timestamp.
|
||||
|
||||
Raises:
|
||||
DatabaseOperationError: If database initialization fails due to SQLite errors.
|
||||
"""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
@@ -43,10 +57,9 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._printer.print(
|
||||
content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
error_msg = DatabaseError.format_error(DatabaseError.INIT_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
|
||||
def add(
|
||||
self,
|
||||
@@ -55,9 +68,22 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
task_index: int,
|
||||
was_replayed: bool = False,
|
||||
inputs: Dict[str, Any] = {},
|
||||
):
|
||||
) -> None:
|
||||
"""Add a new task output record to the database.
|
||||
|
||||
Args:
|
||||
task: The Task object containing task details.
|
||||
output: Dictionary containing the task's output data.
|
||||
task_index: Integer index of the task in the sequence.
|
||||
was_replayed: Boolean indicating if this was a replay execution.
|
||||
inputs: Dictionary of input parameters used for the task.
|
||||
|
||||
Raises:
|
||||
DatabaseOperationError: If saving the task output fails due to SQLite errors.
|
||||
"""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute("BEGIN TRANSACTION")
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(
|
||||
"""
|
||||
@@ -76,21 +102,31 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
)
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._printer.print(
|
||||
content=f"SAVING KICKOFF TASK OUTPUTS ERROR: An error occurred during database initialization: {e}",
|
||||
color="red",
|
||||
)
|
||||
error_msg = DatabaseError.format_error(DatabaseError.SAVE_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
|
||||
def update(
|
||||
self,
|
||||
task_index: int,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
Updates an existing row in the latest_kickoff_task_outputs table based on task_index.
|
||||
**kwargs: Any,
|
||||
) -> None:
|
||||
"""Update an existing task output record in the database.
|
||||
|
||||
Updates fields of a task output record identified by task_index. The fields
|
||||
to update are provided as keyword arguments.
|
||||
|
||||
Args:
|
||||
task_index: Integer index of the task to update.
|
||||
**kwargs: Arbitrary keyword arguments representing fields to update.
|
||||
Values that are dictionaries will be JSON encoded.
|
||||
|
||||
Raises:
|
||||
DatabaseOperationError: If updating the task output fails due to SQLite errors.
|
||||
"""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute("BEGIN TRANSACTION")
|
||||
cursor = conn.cursor()
|
||||
|
||||
fields = []
|
||||
@@ -110,14 +146,23 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
conn.commit()
|
||||
|
||||
if cursor.rowcount == 0:
|
||||
self._printer.print(
|
||||
f"No row found with task_index {task_index}. No update performed.",
|
||||
color="red",
|
||||
)
|
||||
logger.warning(f"No row found with task_index {task_index}. No update performed.")
|
||||
except sqlite3.Error as e:
|
||||
self._printer.print(f"UPDATE KICKOFF TASK OUTPUTS ERROR: {e}", color="red")
|
||||
error_msg = DatabaseError.format_error(DatabaseError.UPDATE_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
|
||||
def load(self) -> Optional[List[Dict[str, Any]]]:
|
||||
def load(self) -> List[Dict[str, Any]]:
|
||||
"""Load all task output records from the database.
|
||||
|
||||
Returns:
|
||||
List of dictionaries containing task output records, ordered by task_index.
|
||||
Each dictionary contains: task_id, expected_output, output, task_index,
|
||||
inputs, was_replayed, and timestamp.
|
||||
|
||||
Raises:
|
||||
DatabaseOperationError: If loading task outputs fails due to SQLite errors.
|
||||
"""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
@@ -144,23 +189,26 @@ class KickoffTaskOutputsSQLiteStorage:
|
||||
return results
|
||||
|
||||
except sqlite3.Error as e:
|
||||
self._printer.print(
|
||||
content=f"LOADING KICKOFF TASK OUTPUTS ERROR: An error occurred while querying kickoff task outputs: {e}",
|
||||
color="red",
|
||||
)
|
||||
return None
|
||||
error_msg = DatabaseError.format_error(DatabaseError.LOAD_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
|
||||
def delete_all(self):
|
||||
"""
|
||||
Deletes all rows from the latest_kickoff_task_outputs table.
|
||||
def delete_all(self) -> None:
|
||||
"""Delete all task output records from the database.
|
||||
|
||||
This method removes all records from the latest_kickoff_task_outputs table.
|
||||
Use with caution as this operation cannot be undone.
|
||||
|
||||
Raises:
|
||||
DatabaseOperationError: If deleting task outputs fails due to SQLite errors.
|
||||
"""
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute("BEGIN TRANSACTION")
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("DELETE FROM latest_kickoff_task_outputs")
|
||||
conn.commit()
|
||||
except sqlite3.Error as e:
|
||||
self._printer.print(
|
||||
content=f"ERROR: Failed to delete all kickoff task outputs: {e}",
|
||||
color="red",
|
||||
)
|
||||
error_msg = DatabaseError.format_error(DatabaseError.DELETE_ERROR, e)
|
||||
logger.error(error_msg)
|
||||
raise DatabaseOperationError(error_msg, e)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import json
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from crewai.utilities import Printer
|
||||
@@ -12,10 +13,15 @@ class LTMSQLiteStorage:
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, db_path: str = f"{db_storage_path()}/long_term_memory_storage.db"
|
||||
self, db_path: Optional[str] = None
|
||||
) -> None:
|
||||
if db_path is None:
|
||||
# Get the parent directory of the default db path and create our db file there
|
||||
db_path = str(Path(db_storage_path()) / "long_term_memory_storage.db")
|
||||
self.db_path = db_path
|
||||
self._printer: Printer = Printer()
|
||||
# Ensure parent directory exists
|
||||
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
self._initialize_db()
|
||||
|
||||
def _initialize_db(self):
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
from functools import wraps
|
||||
from typing import Callable
|
||||
from typing import Any, Callable, Optional, Union, cast
|
||||
|
||||
from crewai import Crew
|
||||
from crewai.project.utils import memoize
|
||||
from crewai.tasks.conditional_task import ConditionalTask
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
|
||||
"""Decorators for defining crew components and their behaviors."""
|
||||
|
||||
@@ -21,13 +23,35 @@ def after_kickoff(func):
|
||||
|
||||
def task(func):
|
||||
"""Marks a method as a crew task."""
|
||||
func.is_task = True
|
||||
setattr(func, "is_task", True)
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result = func(*args, **kwargs)
|
||||
if not result.name:
|
||||
|
||||
# Set the task name if not already set
|
||||
if hasattr(result, "name") and not result.name:
|
||||
result.name = func.__name__
|
||||
|
||||
# If this is a ConditionalTask, ensure it has a valid condition
|
||||
if isinstance(result, ConditionalTask):
|
||||
# If the condition is a boolean, wrap it in a function
|
||||
if isinstance(result.condition, bool):
|
||||
bool_value = result.condition
|
||||
result.condition = lambda _: bool_value
|
||||
|
||||
# Get the previous task output if available
|
||||
self = args[0] if args else None
|
||||
if self and hasattr(self, "_method_outputs"):
|
||||
previous_outputs = getattr(self, "_method_outputs", [])
|
||||
previous_output = previous_outputs[-1] if previous_outputs else None
|
||||
|
||||
# If there's a previous output and it's a TaskOutput, check if we should execute
|
||||
if previous_output and isinstance(previous_output, TaskOutput):
|
||||
if not result.should_execute(previous_output):
|
||||
# Return a skipped task output instead of the task
|
||||
return result.get_skipped_task_output()
|
||||
|
||||
return result
|
||||
|
||||
return memoize(wrapper)
|
||||
|
||||
@@ -21,7 +21,6 @@ from typing import (
|
||||
Union,
|
||||
)
|
||||
|
||||
from opentelemetry.trace import Span
|
||||
from pydantic import (
|
||||
UUID4,
|
||||
BaseModel,
|
||||
@@ -36,10 +35,15 @@ from crewai.agents.agent_builder.base_agent import BaseAgent
|
||||
from crewai.tasks.guardrail_result import GuardrailResult
|
||||
from crewai.tasks.output_format import OutputFormat
|
||||
from crewai.tasks.task_output import TaskOutput
|
||||
from crewai.telemetry.telemetry import Telemetry
|
||||
from crewai.tools.base_tool import BaseTool
|
||||
from crewai.utilities.config import process_config
|
||||
from crewai.utilities.converter import Converter, convert_to_model
|
||||
from crewai.utilities.events import (
|
||||
TaskCompletedEvent,
|
||||
TaskFailedEvent,
|
||||
TaskStartedEvent,
|
||||
)
|
||||
from crewai.utilities.events.crewai_event_bus import crewai_event_bus
|
||||
from crewai.utilities.i18n import I18N
|
||||
from crewai.utilities.printer import Printer
|
||||
|
||||
@@ -183,8 +187,6 @@ class Task(BaseModel):
|
||||
)
|
||||
return v
|
||||
|
||||
_telemetry: Telemetry = PrivateAttr(default_factory=Telemetry)
|
||||
_execution_span: Optional[Span] = PrivateAttr(default=None)
|
||||
_original_description: Optional[str] = PrivateAttr(default=None)
|
||||
_original_expected_output: Optional[str] = PrivateAttr(default=None)
|
||||
_original_output_file: Optional[str] = PrivateAttr(default=None)
|
||||
@@ -348,94 +350,102 @@ class Task(BaseModel):
|
||||
tools: Optional[List[Any]],
|
||||
) -> TaskOutput:
|
||||
"""Run the core execution logic of the task."""
|
||||
agent = agent or self.agent
|
||||
self.agent = agent
|
||||
if not agent:
|
||||
raise Exception(
|
||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||
try:
|
||||
agent = agent or self.agent
|
||||
self.agent = agent
|
||||
if not agent:
|
||||
raise Exception(
|
||||
f"The task '{self.description}' has no agent assigned, therefore it can't be executed directly and should be executed in a Crew using a specific process that support that, like hierarchical."
|
||||
)
|
||||
|
||||
self.start_time = datetime.datetime.now()
|
||||
|
||||
self.prompt_context = context
|
||||
tools = tools or self.tools or []
|
||||
|
||||
self.processed_by_agents.add(agent.role)
|
||||
crewai_event_bus.emit(self, TaskStartedEvent(context=context))
|
||||
result = agent.execute_task(
|
||||
task=self,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
self.start_time = datetime.datetime.now()
|
||||
self._execution_span = self._telemetry.task_started(crew=agent.crew, task=self)
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
task_output = TaskOutput(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
expected_output=self.expected_output,
|
||||
raw=result,
|
||||
pydantic=pydantic_output,
|
||||
json_dict=json_output,
|
||||
agent=agent.role,
|
||||
output_format=self._get_output_format(),
|
||||
)
|
||||
|
||||
self.prompt_context = context
|
||||
tools = tools or self.tools or []
|
||||
if self.guardrail:
|
||||
guardrail_result = GuardrailResult.from_tuple(
|
||||
self.guardrail(task_output)
|
||||
)
|
||||
if not guardrail_result.success:
|
||||
if self.retry_count >= self.max_retries:
|
||||
raise Exception(
|
||||
f"Task failed guardrail validation after {self.max_retries} retries. "
|
||||
f"Last error: {guardrail_result.error}"
|
||||
)
|
||||
|
||||
self.processed_by_agents.add(agent.role)
|
||||
self.retry_count += 1
|
||||
context = self.i18n.errors("validation_error").format(
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
return self._execute_core(agent, context, tools)
|
||||
|
||||
result = agent.execute_task(
|
||||
task=self,
|
||||
context=context,
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
pydantic_output, json_output = self._export_output(result)
|
||||
task_output = TaskOutput(
|
||||
name=self.name,
|
||||
description=self.description,
|
||||
expected_output=self.expected_output,
|
||||
raw=result,
|
||||
pydantic=pydantic_output,
|
||||
json_dict=json_output,
|
||||
agent=agent.role,
|
||||
output_format=self._get_output_format(),
|
||||
)
|
||||
|
||||
if self.guardrail:
|
||||
guardrail_result = GuardrailResult.from_tuple(self.guardrail(task_output))
|
||||
if not guardrail_result.success:
|
||||
if self.retry_count >= self.max_retries:
|
||||
if guardrail_result.result is None:
|
||||
raise Exception(
|
||||
f"Task failed guardrail validation after {self.max_retries} retries. "
|
||||
f"Last error: {guardrail_result.error}"
|
||||
"Task guardrail returned None as result. This is not allowed."
|
||||
)
|
||||
|
||||
self.retry_count += 1
|
||||
context = self.i18n.errors("validation_error").format(
|
||||
guardrail_result_error=guardrail_result.error,
|
||||
task_output=task_output.raw,
|
||||
if isinstance(guardrail_result.result, str):
|
||||
task_output.raw = guardrail_result.result
|
||||
pydantic_output, json_output = self._export_output(
|
||||
guardrail_result.result
|
||||
)
|
||||
task_output.pydantic = pydantic_output
|
||||
task_output.json_dict = json_output
|
||||
elif isinstance(guardrail_result.result, TaskOutput):
|
||||
task_output = guardrail_result.result
|
||||
|
||||
self.output = task_output
|
||||
self.end_time = datetime.datetime.now()
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
|
||||
crew = self.agent.crew # type: ignore[union-attr]
|
||||
if crew and crew.task_callback and crew.task_callback != self.callback:
|
||||
crew.task_callback(self.output)
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json()
|
||||
if pydantic_output
|
||||
else result
|
||||
)
|
||||
printer = Printer()
|
||||
printer.print(
|
||||
content=f"Guardrail blocked, retrying, due to: {guardrail_result.error}\n",
|
||||
color="yellow",
|
||||
)
|
||||
return self._execute_core(agent, context, tools)
|
||||
|
||||
if guardrail_result.result is None:
|
||||
raise Exception(
|
||||
"Task guardrail returned None as result. This is not allowed."
|
||||
)
|
||||
|
||||
if isinstance(guardrail_result.result, str):
|
||||
task_output.raw = guardrail_result.result
|
||||
pydantic_output, json_output = self._export_output(
|
||||
guardrail_result.result
|
||||
)
|
||||
task_output.pydantic = pydantic_output
|
||||
task_output.json_dict = json_output
|
||||
elif isinstance(guardrail_result.result, TaskOutput):
|
||||
task_output = guardrail_result.result
|
||||
|
||||
self.output = task_output
|
||||
self.end_time = datetime.datetime.now()
|
||||
|
||||
if self.callback:
|
||||
self.callback(self.output)
|
||||
|
||||
if self._execution_span:
|
||||
self._telemetry.task_ended(self._execution_span, self, agent.crew)
|
||||
self._execution_span = None
|
||||
|
||||
if self.output_file:
|
||||
content = (
|
||||
json_output
|
||||
if json_output
|
||||
else pydantic_output.model_dump_json() if pydantic_output else result
|
||||
)
|
||||
self._save_file(content)
|
||||
|
||||
return task_output
|
||||
self._save_file(content)
|
||||
crewai_event_bus.emit(self, TaskCompletedEvent(output=task_output))
|
||||
return task_output
|
||||
except Exception as e:
|
||||
self.end_time = datetime.datetime.now()
|
||||
crewai_event_bus.emit(self, TaskFailedEvent(error=str(e)))
|
||||
raise e # Re-raise the exception after emitting the event
|
||||
|
||||
def prompt(self) -> str:
|
||||
"""Prompt the task.
|
||||
@@ -452,7 +462,7 @@ class Task(BaseModel):
|
||||
return "\n".join(tasks_slices)
|
||||
|
||||
def interpolate_inputs_and_add_conversation_history(
|
||||
self, inputs: Dict[str, Union[str, int, float]]
|
||||
self, inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]]
|
||||
) -> None:
|
||||
"""Interpolate inputs into the task description, expected output, and output file path.
|
||||
Add conversation history if present.
|
||||
@@ -524,7 +534,9 @@ class Task(BaseModel):
|
||||
)
|
||||
|
||||
def interpolate_only(
|
||||
self, input_string: Optional[str], inputs: Dict[str, Union[str, int, float]]
|
||||
self,
|
||||
input_string: Optional[str],
|
||||
inputs: Dict[str, Union[str, int, float, Dict[str, Any], List[Any]]],
|
||||
) -> str:
|
||||
"""Interpolate placeholders (e.g., {key}) in a string while leaving JSON untouched.
|
||||
|
||||
@@ -532,17 +544,39 @@ class Task(BaseModel):
|
||||
input_string: The string containing template variables to interpolate.
|
||||
Can be None or empty, in which case an empty string is returned.
|
||||
inputs: Dictionary mapping template variables to their values.
|
||||
Supported value types are strings, integers, and floats.
|
||||
If input_string is empty or has no placeholders, inputs can be empty.
|
||||
Supported value types are strings, integers, floats, and dicts/lists
|
||||
containing only these types and other nested dicts/lists.
|
||||
|
||||
Returns:
|
||||
The interpolated string with all template variables replaced with their values.
|
||||
Empty string if input_string is None or empty.
|
||||
|
||||
Raises:
|
||||
ValueError: If a required template variable is missing from inputs.
|
||||
KeyError: If a template variable is not found in the inputs dictionary.
|
||||
ValueError: If a value contains unsupported types
|
||||
"""
|
||||
|
||||
# Validation function for recursive type checking
|
||||
def validate_type(value: Any) -> None:
|
||||
if value is None:
|
||||
return
|
||||
if isinstance(value, (str, int, float, bool)):
|
||||
return
|
||||
if isinstance(value, (dict, list)):
|
||||
for item in value.values() if isinstance(value, dict) else value:
|
||||
validate_type(item)
|
||||
return
|
||||
raise ValueError(
|
||||
f"Unsupported type {type(value).__name__} in inputs. "
|
||||
"Only str, int, float, bool, dict, and list are allowed."
|
||||
)
|
||||
|
||||
# Validate all input values
|
||||
for key, value in inputs.items():
|
||||
try:
|
||||
validate_type(value)
|
||||
except ValueError as e:
|
||||
raise ValueError(f"Invalid value for key '{key}': {str(e)}") from e
|
||||
|
||||
if input_string is None or not input_string:
|
||||
return ""
|
||||
if "{" not in input_string and "}" not in input_string:
|
||||
@@ -551,15 +585,7 @@ class Task(BaseModel):
|
||||
raise ValueError(
|
||||
"Inputs dictionary cannot be empty when interpolating variables"
|
||||
)
|
||||
|
||||
try:
|
||||
# Validate input types
|
||||
for key, value in inputs.items():
|
||||
if not isinstance(value, (str, int, float)):
|
||||
raise ValueError(
|
||||
f"Value for key '{key}' must be a string, integer, or float, got {type(value).__name__}"
|
||||
)
|
||||
|
||||
escaped_string = input_string.replace("{", "{{").replace("}", "}}")
|
||||
|
||||
for key in inputs.keys():
|
||||
@@ -652,19 +678,32 @@ class Task(BaseModel):
|
||||
return OutputFormat.PYDANTIC
|
||||
return OutputFormat.RAW
|
||||
|
||||
def _save_file(self, result: Any) -> None:
|
||||
def _save_file(self, result: Union[Dict, str, Any]) -> None:
|
||||
"""Save task output to a file.
|
||||
|
||||
Note:
|
||||
For cross-platform file writing, especially on Windows, consider using FileWriterTool
|
||||
from the crewai_tools package:
|
||||
pip install 'crewai[tools]'
|
||||
from crewai_tools import FileWriterTool
|
||||
|
||||
Args:
|
||||
result: The result to save to the file. Can be a dict or any stringifiable object.
|
||||
|
||||
Raises:
|
||||
ValueError: If output_file is not set
|
||||
RuntimeError: If there is an error writing to the file
|
||||
RuntimeError: If there is an error writing to the file. For cross-platform
|
||||
compatibility, especially on Windows, use FileWriterTool from crewai_tools
|
||||
package.
|
||||
"""
|
||||
if self.output_file is None:
|
||||
raise ValueError("output_file is not set.")
|
||||
|
||||
FILEWRITER_RECOMMENDATION = (
|
||||
"For cross-platform file writing, especially on Windows, "
|
||||
"use FileWriterTool from crewai_tools package."
|
||||
)
|
||||
|
||||
try:
|
||||
resolved_path = Path(self.output_file).expanduser().resolve()
|
||||
directory = resolved_path.parent
|
||||
@@ -680,7 +719,11 @@ class Task(BaseModel):
|
||||
else:
|
||||
file.write(str(result))
|
||||
except (OSError, IOError) as e:
|
||||
raise RuntimeError(f"Failed to save output file: {e}")
|
||||
raise RuntimeError(
|
||||
"\n".join(
|
||||
[f"Failed to save output file: {e}", FILEWRITER_RECOMMENDATION]
|
||||
)
|
||||
)
|
||||
return None
|
||||
|
||||
def __repr__(self):
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from typing import Any, Callable
|
||||
from typing import Any, Callable, Union, cast
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
@@ -14,17 +14,23 @@ class ConditionalTask(Task):
|
||||
"""
|
||||
|
||||
condition: Callable[[TaskOutput], bool] = Field(
|
||||
default=None,
|
||||
description="Maximum number of retries for an agent to execute a task when an error occurs.",
|
||||
default=lambda _: True, # Default to always execute
|
||||
description="Function that determines whether the task should be executed or a boolean value.",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
condition: Callable[[Any], bool],
|
||||
condition: Union[Callable[[Any], bool], bool],
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.condition = condition
|
||||
|
||||
# If condition is a boolean, wrap it in a function that always returns that boolean
|
||||
if isinstance(condition, bool):
|
||||
bool_value = condition
|
||||
self.condition = lambda _: bool_value
|
||||
else:
|
||||
self.condition = cast(Callable[[TaskOutput], bool], condition)
|
||||
|
||||
def should_execute(self, context: TaskOutput) -> bool:
|
||||
"""
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user